public inbox for gentoo-commits@lists.gentoo.org
 help / color / mirror / Atom feed
* [gentoo-commits] proj/linux-patches:6.5 commit in: /
@ 2023-09-06 22:14 Mike Pagano
  0 siblings, 0 replies; 24+ messages in thread
From: Mike Pagano @ 2023-09-06 22:14 UTC (permalink / raw
  To: gentoo-commits

commit:     9a88bbc80d292195f44fd0c75e13123e4e8eb02a
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed Sep  6 22:14:09 2023 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed Sep  6 22:14:09 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=9a88bbc8

Linux patch 6.5.2

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README            |    4 +
 1001_linux-6.5.2.patch | 1052 ++++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 1056 insertions(+)

diff --git a/0000_README b/0000_README
index f7da0ce2..465e90aa 100644
--- a/0000_README
+++ b/0000_README
@@ -47,6 +47,10 @@ Patch:  1000_linux-6.5.1.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.5.1
 
+Patch:  1001_linux-6.5.2.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.5.2
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1001_linux-6.5.2.patch b/1001_linux-6.5.2.patch
new file mode 100644
index 00000000..82cc18a9
--- /dev/null
+++ b/1001_linux-6.5.2.patch
@@ -0,0 +1,1052 @@
+diff --git a/Documentation/devicetree/bindings/serial/nxp,sc16is7xx.txt b/Documentation/devicetree/bindings/serial/nxp,sc16is7xx.txt
+index 0fa8e3e43bf80..1a7e4bff0456f 100644
+--- a/Documentation/devicetree/bindings/serial/nxp,sc16is7xx.txt
++++ b/Documentation/devicetree/bindings/serial/nxp,sc16is7xx.txt
+@@ -23,6 +23,9 @@ Optional properties:
+     1 = active low.
+ - irda-mode-ports: An array that lists the indices of the port that
+ 		   should operate in IrDA mode.
++- nxp,modem-control-line-ports: An array that lists the indices of the port that
++				should have shared GPIO lines configured as
++				modem control lines.
+ 
+ Example:
+         sc16is750: sc16is750@51 {
+@@ -35,6 +38,26 @@ Example:
+                 #gpio-cells = <2>;
+         };
+ 
++	sc16is752: sc16is752@53 {
++		compatible = "nxp,sc16is752";
++		reg = <0x53>;
++		clocks = <&clk20m>;
++		interrupt-parent = <&gpio3>;
++		interrupts = <7 IRQ_TYPE_EDGE_FALLING>;
++		nxp,modem-control-line-ports = <1>; /* Port 1 as modem control lines */
++		gpio-controller; /* Port 0 as GPIOs */
++		#gpio-cells = <2>;
++	};
++
++	sc16is752: sc16is752@54 {
++		compatible = "nxp,sc16is752";
++		reg = <0x54>;
++		clocks = <&clk20m>;
++		interrupt-parent = <&gpio3>;
++		interrupts = <7 IRQ_TYPE_EDGE_FALLING>;
++		nxp,modem-control-line-ports = <0 1>; /* Ports 0 and 1 as modem control lines */
++	};
++
+ * spi as bus
+ 
+ Required properties:
+@@ -59,6 +82,9 @@ Optional properties:
+     1 = active low.
+ - irda-mode-ports: An array that lists the indices of the port that
+ 		   should operate in IrDA mode.
++- nxp,modem-control-line-ports: An array that lists the indices of the port that
++				should have shared GPIO lines configured as
++				modem control lines.
+ 
+ Example:
+ 	sc16is750: sc16is750@0 {
+@@ -70,3 +96,23 @@ Example:
+ 		gpio-controller;
+ 		#gpio-cells = <2>;
+ 	};
++
++	sc16is752: sc16is752@1 {
++		compatible = "nxp,sc16is752";
++		reg = <1>;
++		clocks = <&clk20m>;
++		interrupt-parent = <&gpio3>;
++		interrupts = <7 IRQ_TYPE_EDGE_FALLING>;
++		nxp,modem-control-line-ports = <1>; /* Port 1 as modem control lines */
++		gpio-controller; /* Port 0 as GPIOs */
++		#gpio-cells = <2>;
++	};
++
++	sc16is752: sc16is752@2 {
++		compatible = "nxp,sc16is752";
++		reg = <2>;
++		clocks = <&clk20m>;
++		interrupt-parent = <&gpio3>;
++		interrupts = <7 IRQ_TYPE_EDGE_FALLING>;
++		nxp,modem-control-line-ports = <0 1>; /* Ports 0 and 1 as modem control lines */
++	};
+diff --git a/Makefile b/Makefile
+index 062b9694e0547..c47558bc00aa8 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 5
+-SUBLEVEL = 1
++SUBLEVEL = 2
+ EXTRAVERSION =
+ NAME = Hurr durr I'ma ninja sloth
+ 
+diff --git a/arch/arm/mach-pxa/sharpsl_pm.c b/arch/arm/mach-pxa/sharpsl_pm.c
+index d29bdcd5270e0..72fa2e3fd3531 100644
+--- a/arch/arm/mach-pxa/sharpsl_pm.c
++++ b/arch/arm/mach-pxa/sharpsl_pm.c
+@@ -216,8 +216,6 @@ void sharpsl_battery_kick(void)
+ {
+ 	schedule_delayed_work(&sharpsl_bat, msecs_to_jiffies(125));
+ }
+-EXPORT_SYMBOL(sharpsl_battery_kick);
+-
+ 
+ static void sharpsl_battery_thread(struct work_struct *private_)
+ {
+diff --git a/arch/arm/mach-pxa/spitz.c b/arch/arm/mach-pxa/spitz.c
+index d01ea54b0b782..cc691b199429c 100644
+--- a/arch/arm/mach-pxa/spitz.c
++++ b/arch/arm/mach-pxa/spitz.c
+@@ -9,7 +9,6 @@
+  */
+ 
+ #include <linux/kernel.h>
+-#include <linux/module.h>	/* symbol_get ; symbol_put */
+ #include <linux/platform_device.h>
+ #include <linux/delay.h>
+ #include <linux/gpio_keys.h>
+@@ -518,17 +517,6 @@ static struct gpiod_lookup_table spitz_ads7846_gpio_table = {
+ 	},
+ };
+ 
+-static void spitz_bl_kick_battery(void)
+-{
+-	void (*kick_batt)(void);
+-
+-	kick_batt = symbol_get(sharpsl_battery_kick);
+-	if (kick_batt) {
+-		kick_batt();
+-		symbol_put(sharpsl_battery_kick);
+-	}
+-}
+-
+ static struct gpiod_lookup_table spitz_lcdcon_gpio_table = {
+ 	.dev_id = "spi2.1",
+ 	.table = {
+@@ -556,7 +544,7 @@ static struct corgi_lcd_platform_data spitz_lcdcon_info = {
+ 	.max_intensity		= 0x2f,
+ 	.default_intensity	= 0x1f,
+ 	.limit_mask		= 0x0b,
+-	.kick_battery		= spitz_bl_kick_battery,
++	.kick_battery		= sharpsl_battery_kick,
+ };
+ 
+ static struct spi_board_info spitz_spi_devices[] = {
+diff --git a/arch/mips/alchemy/devboards/db1000.c b/arch/mips/alchemy/devboards/db1000.c
+index 79d66faa84828..012da042d0a4f 100644
+--- a/arch/mips/alchemy/devboards/db1000.c
++++ b/arch/mips/alchemy/devboards/db1000.c
+@@ -14,7 +14,6 @@
+ #include <linux/interrupt.h>
+ #include <linux/leds.h>
+ #include <linux/mmc/host.h>
+-#include <linux/module.h>
+ #include <linux/platform_device.h>
+ #include <linux/pm.h>
+ #include <linux/spi/spi.h>
+@@ -167,12 +166,7 @@ static struct platform_device db1x00_audio_dev = {
+ 
+ static irqreturn_t db1100_mmc_cd(int irq, void *ptr)
+ {
+-	void (*mmc_cd)(struct mmc_host *, unsigned long);
+-	/* link against CONFIG_MMC=m */
+-	mmc_cd = symbol_get(mmc_detect_change);
+-	mmc_cd(ptr, msecs_to_jiffies(500));
+-	symbol_put(mmc_detect_change);
+-
++	mmc_detect_change(ptr, msecs_to_jiffies(500));
+ 	return IRQ_HANDLED;
+ }
+ 
+diff --git a/arch/mips/alchemy/devboards/db1200.c b/arch/mips/alchemy/devboards/db1200.c
+index 1864eb935ca57..76080c71a2a7b 100644
+--- a/arch/mips/alchemy/devboards/db1200.c
++++ b/arch/mips/alchemy/devboards/db1200.c
+@@ -10,7 +10,6 @@
+ #include <linux/gpio.h>
+ #include <linux/i2c.h>
+ #include <linux/init.h>
+-#include <linux/module.h>
+ #include <linux/interrupt.h>
+ #include <linux/io.h>
+ #include <linux/leds.h>
+@@ -340,14 +339,7 @@ static irqreturn_t db1200_mmc_cd(int irq, void *ptr)
+ 
+ static irqreturn_t db1200_mmc_cdfn(int irq, void *ptr)
+ {
+-	void (*mmc_cd)(struct mmc_host *, unsigned long);
+-
+-	/* link against CONFIG_MMC=m */
+-	mmc_cd = symbol_get(mmc_detect_change);
+-	if (mmc_cd) {
+-		mmc_cd(ptr, msecs_to_jiffies(200));
+-		symbol_put(mmc_detect_change);
+-	}
++	mmc_detect_change(ptr, msecs_to_jiffies(200));
+ 
+ 	msleep(100);	/* debounce */
+ 	if (irq == DB1200_SD0_INSERT_INT)
+@@ -431,14 +423,7 @@ static irqreturn_t pb1200_mmc1_cd(int irq, void *ptr)
+ 
+ static irqreturn_t pb1200_mmc1_cdfn(int irq, void *ptr)
+ {
+-	void (*mmc_cd)(struct mmc_host *, unsigned long);
+-
+-	/* link against CONFIG_MMC=m */
+-	mmc_cd = symbol_get(mmc_detect_change);
+-	if (mmc_cd) {
+-		mmc_cd(ptr, msecs_to_jiffies(200));
+-		symbol_put(mmc_detect_change);
+-	}
++	mmc_detect_change(ptr, msecs_to_jiffies(200));
+ 
+ 	msleep(100);	/* debounce */
+ 	if (irq == PB1200_SD1_INSERT_INT)
+diff --git a/arch/mips/alchemy/devboards/db1300.c b/arch/mips/alchemy/devboards/db1300.c
+index e70e529ddd914..ff61901329c62 100644
+--- a/arch/mips/alchemy/devboards/db1300.c
++++ b/arch/mips/alchemy/devboards/db1300.c
+@@ -17,7 +17,6 @@
+ #include <linux/interrupt.h>
+ #include <linux/ata_platform.h>
+ #include <linux/mmc/host.h>
+-#include <linux/module.h>
+ #include <linux/mtd/mtd.h>
+ #include <linux/mtd/platnand.h>
+ #include <linux/platform_device.h>
+@@ -459,14 +458,7 @@ static irqreturn_t db1300_mmc_cd(int irq, void *ptr)
+ 
+ static irqreturn_t db1300_mmc_cdfn(int irq, void *ptr)
+ {
+-	void (*mmc_cd)(struct mmc_host *, unsigned long);
+-
+-	/* link against CONFIG_MMC=m.  We can only be called once MMC core has
+-	 * initialized the controller, so symbol_get() should always succeed.
+-	 */
+-	mmc_cd = symbol_get(mmc_detect_change);
+-	mmc_cd(ptr, msecs_to_jiffies(200));
+-	symbol_put(mmc_detect_change);
++	mmc_detect_change(ptr, msecs_to_jiffies(200));
+ 
+ 	msleep(100);	/* debounce */
+ 	if (irq == DB1300_SD1_INSERT_INT)
+diff --git a/drivers/firmware/stratix10-svc.c b/drivers/firmware/stratix10-svc.c
+index 2d674126160fe..cab11af28c231 100644
+--- a/drivers/firmware/stratix10-svc.c
++++ b/drivers/firmware/stratix10-svc.c
+@@ -756,7 +756,7 @@ svc_create_memory_pool(struct platform_device *pdev,
+ 	paddr = begin;
+ 	size = end - begin;
+ 	va = devm_memremap(dev, paddr, size, MEMREMAP_WC);
+-	if (!va) {
++	if (IS_ERR(va)) {
+ 		dev_err(dev, "fail to remap shared memory\n");
+ 		return ERR_PTR(-EINVAL);
+ 	}
+diff --git a/drivers/fsi/fsi-master-ast-cf.c b/drivers/fsi/fsi-master-ast-cf.c
+index 5f608ef8b53ca..cde281ec89d7b 100644
+--- a/drivers/fsi/fsi-master-ast-cf.c
++++ b/drivers/fsi/fsi-master-ast-cf.c
+@@ -1441,3 +1441,4 @@ static struct platform_driver fsi_master_acf = {
+ 
+ module_platform_driver(fsi_master_acf);
+ MODULE_LICENSE("GPL");
++MODULE_FIRMWARE(FW_FILE_NAME);
+diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
+index 0c8a479895761..c184f64342aa0 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
+@@ -109,9 +109,11 @@ static int gmc_v10_0_process_interrupt(struct amdgpu_device *adev,
+ 				       struct amdgpu_irq_src *source,
+ 				       struct amdgpu_iv_entry *entry)
+ {
++	uint32_t vmhub_index = entry->client_id == SOC15_IH_CLIENTID_VMC ?
++			       AMDGPU_MMHUB0(0) : AMDGPU_GFXHUB(0);
++	struct amdgpu_vmhub *hub = &adev->vmhub[vmhub_index];
+ 	bool retry_fault = !!(entry->src_data[1] & 0x80);
+ 	bool write_fault = !!(entry->src_data[1] & 0x20);
+-	struct amdgpu_vmhub *hub = &adev->vmhub[entry->vmid_src];
+ 	struct amdgpu_task_info task_info;
+ 	uint32_t status = 0;
+ 	u64 addr;
+diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
+index c571f0d959946..dd9744b583949 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
+@@ -97,7 +97,9 @@ static int gmc_v11_0_process_interrupt(struct amdgpu_device *adev,
+ 				       struct amdgpu_irq_src *source,
+ 				       struct amdgpu_iv_entry *entry)
+ {
+-	struct amdgpu_vmhub *hub = &adev->vmhub[entry->vmid_src];
++	uint32_t vmhub_index = entry->client_id == SOC21_IH_CLIENTID_VMC ?
++			       AMDGPU_MMHUB0(0) : AMDGPU_GFXHUB(0);
++	struct amdgpu_vmhub *hub = &adev->vmhub[vmhub_index];
+ 	uint32_t status = 0;
+ 	u64 addr;
+ 
+diff --git a/drivers/hid/wacom.h b/drivers/hid/wacom.h
+index 4da50e19808ef..166a76c9bcad3 100644
+--- a/drivers/hid/wacom.h
++++ b/drivers/hid/wacom.h
+@@ -150,6 +150,7 @@ struct wacom_remote {
+ 		struct input_dev *input;
+ 		bool registered;
+ 		struct wacom_battery battery;
++		ktime_t active_time;
+ 	} remotes[WACOM_MAX_REMOTES];
+ };
+ 
+diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c
+index 76e5353aca0c7..eb833455abd50 100644
+--- a/drivers/hid/wacom_sys.c
++++ b/drivers/hid/wacom_sys.c
+@@ -2523,6 +2523,18 @@ fail:
+ 	return;
+ }
+ 
++static void wacom_remote_destroy_battery(struct wacom *wacom, int index)
++{
++	struct wacom_remote *remote = wacom->remote;
++
++	if (remote->remotes[index].battery.battery) {
++		devres_release_group(&wacom->hdev->dev,
++				     &remote->remotes[index].battery.bat_desc);
++		remote->remotes[index].battery.battery = NULL;
++		remote->remotes[index].active_time = 0;
++	}
++}
++
+ static void wacom_remote_destroy_one(struct wacom *wacom, unsigned int index)
+ {
+ 	struct wacom_remote *remote = wacom->remote;
+@@ -2537,9 +2549,7 @@ static void wacom_remote_destroy_one(struct wacom *wacom, unsigned int index)
+ 			remote->remotes[i].registered = false;
+ 			spin_unlock_irqrestore(&remote->remote_lock, flags);
+ 
+-			if (remote->remotes[i].battery.battery)
+-				devres_release_group(&wacom->hdev->dev,
+-						     &remote->remotes[i].battery.bat_desc);
++			wacom_remote_destroy_battery(wacom, i);
+ 
+ 			if (remote->remotes[i].group.name)
+ 				devres_release_group(&wacom->hdev->dev,
+@@ -2547,7 +2557,6 @@ static void wacom_remote_destroy_one(struct wacom *wacom, unsigned int index)
+ 
+ 			remote->remotes[i].serial = 0;
+ 			remote->remotes[i].group.name = NULL;
+-			remote->remotes[i].battery.battery = NULL;
+ 			wacom->led.groups[i].select = WACOM_STATUS_UNKNOWN;
+ 		}
+ 	}
+@@ -2632,6 +2641,9 @@ static int wacom_remote_attach_battery(struct wacom *wacom, int index)
+ 	if (remote->remotes[index].battery.battery)
+ 		return 0;
+ 
++	if (!remote->remotes[index].active_time)
++		return 0;
++
+ 	if (wacom->led.groups[index].select == WACOM_STATUS_UNKNOWN)
+ 		return 0;
+ 
+@@ -2647,6 +2659,7 @@ static void wacom_remote_work(struct work_struct *work)
+ {
+ 	struct wacom *wacom = container_of(work, struct wacom, remote_work);
+ 	struct wacom_remote *remote = wacom->remote;
++	ktime_t kt = ktime_get();
+ 	struct wacom_remote_data data;
+ 	unsigned long flags;
+ 	unsigned int count;
+@@ -2673,6 +2686,10 @@ static void wacom_remote_work(struct work_struct *work)
+ 		serial = data.remote[i].serial;
+ 		if (data.remote[i].connected) {
+ 
++			if (kt - remote->remotes[i].active_time > WACOM_REMOTE_BATTERY_TIMEOUT
++			    && remote->remotes[i].active_time != 0)
++				wacom_remote_destroy_battery(wacom, i);
++
+ 			if (remote->remotes[i].serial == serial) {
+ 				wacom_remote_attach_battery(wacom, i);
+ 				continue;
+diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
+index 174bf03908d7c..6c056f8844e70 100644
+--- a/drivers/hid/wacom_wac.c
++++ b/drivers/hid/wacom_wac.c
+@@ -1134,6 +1134,7 @@ static int wacom_remote_irq(struct wacom_wac *wacom_wac, size_t len)
+ 	if (index < 0 || !remote->remotes[index].registered)
+ 		goto out;
+ 
++	remote->remotes[i].active_time = ktime_get();
+ 	input = remote->remotes[index].input;
+ 
+ 	input_report_key(input, BTN_0, (data[9] & 0x01));
+diff --git a/drivers/hid/wacom_wac.h b/drivers/hid/wacom_wac.h
+index ee21bb260f22f..2e7cc5e7a0cb7 100644
+--- a/drivers/hid/wacom_wac.h
++++ b/drivers/hid/wacom_wac.h
+@@ -13,6 +13,7 @@
+ #define WACOM_NAME_MAX		64
+ #define WACOM_MAX_REMOTES	5
+ #define WACOM_STATUS_UNKNOWN	255
++#define WACOM_REMOTE_BATTERY_TIMEOUT	21000000000ll
+ 
+ /* packet length for individual models */
+ #define WACOM_PKGLEN_BBFUN	 9
+diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
+index 159a3e9490aed..554e67103c1a1 100644
+--- a/drivers/mmc/host/Kconfig
++++ b/drivers/mmc/host/Kconfig
+@@ -526,11 +526,12 @@ config MMC_ALCOR
+ 	  of Alcor Micro PCI-E card reader
+ 
+ config MMC_AU1X
+-	tristate "Alchemy AU1XX0 MMC Card Interface support"
++	bool "Alchemy AU1XX0 MMC Card Interface support"
+ 	depends on MIPS_ALCHEMY
++	depends on MMC=y
+ 	help
+ 	  This selects the AMD Alchemy(R) Multimedia card interface.
+-	  If you have a Alchemy platform with a MMC slot, say Y or M here.
++	  If you have a Alchemy platform with a MMC slot, say Y here.
+ 
+ 	  If unsure, say N.
+ 
+diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ptp.c b/drivers/net/ethernet/freescale/enetc/enetc_ptp.c
+index 17c097cef7d45..5243fc0310589 100644
+--- a/drivers/net/ethernet/freescale/enetc/enetc_ptp.c
++++ b/drivers/net/ethernet/freescale/enetc/enetc_ptp.c
+@@ -8,7 +8,7 @@
+ #include "enetc.h"
+ 
+ int enetc_phc_index = -1;
+-EXPORT_SYMBOL(enetc_phc_index);
++EXPORT_SYMBOL_GPL(enetc_phc_index);
+ 
+ static struct ptp_clock_info enetc_ptp_caps = {
+ 	.owner		= THIS_MODULE,
+diff --git a/drivers/net/wireless/ath/ath11k/dp_tx.c b/drivers/net/wireless/ath/ath11k/dp_tx.c
+index a34833de7c676..b85a4a03b37ab 100644
+--- a/drivers/net/wireless/ath/ath11k/dp_tx.c
++++ b/drivers/net/wireless/ath/ath11k/dp_tx.c
+@@ -344,7 +344,7 @@ ath11k_dp_tx_htt_tx_complete_buf(struct ath11k_base *ab,
+ 	dma_unmap_single(ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
+ 
+ 	if (!skb_cb->vif) {
+-		dev_kfree_skb_any(msdu);
++		ieee80211_free_txskb(ar->hw, msdu);
+ 		return;
+ 	}
+ 
+@@ -369,7 +369,7 @@ ath11k_dp_tx_htt_tx_complete_buf(struct ath11k_base *ab,
+ 			   "dp_tx: failed to find the peer with peer_id %d\n",
+ 			    ts->peer_id);
+ 		spin_unlock_bh(&ab->base_lock);
+-		dev_kfree_skb_any(msdu);
++		ieee80211_free_txskb(ar->hw, msdu);
+ 		return;
+ 	}
+ 	spin_unlock_bh(&ab->base_lock);
+@@ -566,12 +566,12 @@ static void ath11k_dp_tx_complete_msdu(struct ath11k *ar,
+ 	dma_unmap_single(ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
+ 
+ 	if (unlikely(!rcu_access_pointer(ab->pdevs_active[ar->pdev_idx]))) {
+-		dev_kfree_skb_any(msdu);
++		ieee80211_free_txskb(ar->hw, msdu);
+ 		return;
+ 	}
+ 
+ 	if (unlikely(!skb_cb->vif)) {
+-		dev_kfree_skb_any(msdu);
++		ieee80211_free_txskb(ar->hw, msdu);
+ 		return;
+ 	}
+ 
+@@ -624,7 +624,7 @@ static void ath11k_dp_tx_complete_msdu(struct ath11k *ar,
+ 			   "dp_tx: failed to find the peer with peer_id %d\n",
+ 			    ts->peer_id);
+ 		spin_unlock_bh(&ab->base_lock);
+-		dev_kfree_skb_any(msdu);
++		ieee80211_free_txskb(ar->hw, msdu);
+ 		return;
+ 	}
+ 	arsta = (struct ath11k_sta *)peer->sta->drv_priv;
+diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c b/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c
+index d39a3cc5e381f..be4d63db5f64a 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c
++++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c
+@@ -495,6 +495,7 @@ void mt76_connac2_mac_write_txwi(struct mt76_dev *dev, __le32 *txwi,
+ 				    BSS_CHANGED_BEACON_ENABLED));
+ 	bool inband_disc = !!(changed & (BSS_CHANGED_UNSOL_BCAST_PROBE_RESP |
+ 					 BSS_CHANGED_FILS_DISCOVERY));
++	bool amsdu_en = wcid->amsdu;
+ 
+ 	if (vif) {
+ 		struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
+@@ -554,12 +555,14 @@ void mt76_connac2_mac_write_txwi(struct mt76_dev *dev, __le32 *txwi,
+ 	txwi[4] = 0;
+ 
+ 	val = FIELD_PREP(MT_TXD5_PID, pid);
+-	if (pid >= MT_PACKET_ID_FIRST)
++	if (pid >= MT_PACKET_ID_FIRST) {
+ 		val |= MT_TXD5_TX_STATUS_HOST;
++		amsdu_en = amsdu_en && !is_mt7921(dev);
++	}
+ 
+ 	txwi[5] = cpu_to_le32(val);
+ 	txwi[6] = 0;
+-	txwi[7] = wcid->amsdu ? cpu_to_le32(MT_TXD7_HW_AMSDU) : 0;
++	txwi[7] = amsdu_en ? cpu_to_le32(MT_TXD7_HW_AMSDU) : 0;
+ 
+ 	if (is_8023)
+ 		mt76_connac2_mac_write_txwi_8023(txwi, skb, wcid);
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/main.c b/drivers/net/wireless/mediatek/mt76/mt7921/main.c
+index 3b6adb29cbef1..0e3ada1e008cd 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7921/main.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7921/main.c
+@@ -1363,7 +1363,7 @@ mt7921_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant)
+ 		return -EINVAL;
+ 
+ 	if ((BIT(hweight8(tx_ant)) - 1) != tx_ant)
+-		tx_ant = BIT(ffs(tx_ant) - 1) - 1;
++		return -EINVAL;
+ 
+ 	mt7921_mutex_acquire(dev);
+ 
+diff --git a/drivers/net/wireless/realtek/rtw88/usb.c b/drivers/net/wireless/realtek/rtw88/usb.c
+index 4a57efdba97bb..875a61c9c80d4 100644
+--- a/drivers/net/wireless/realtek/rtw88/usb.c
++++ b/drivers/net/wireless/realtek/rtw88/usb.c
+@@ -844,7 +844,7 @@ int rtw_usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
+ 
+ 	ret = rtw_core_init(rtwdev);
+ 	if (ret)
+-		goto err_release_hw;
++		goto err_free_rx_bufs;
+ 
+ 	ret = rtw_usb_intf_init(rtwdev, intf);
+ 	if (ret) {
+@@ -890,6 +890,9 @@ err_destroy_usb:
+ err_deinit_core:
+ 	rtw_core_deinit(rtwdev);
+ 
++err_free_rx_bufs:
++	rtw_usb_free_rx_bufs(rtwusb);
++
+ err_release_hw:
+ 	ieee80211_free_hw(hw);
+ 
+diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c
+index 4dff656af3ad2..74241b2ff21e3 100644
+--- a/drivers/pinctrl/pinctrl-amd.c
++++ b/drivers/pinctrl/pinctrl-amd.c
+@@ -748,7 +748,7 @@ static int amd_pinconf_get(struct pinctrl_dev *pctldev,
+ 		break;
+ 
+ 	default:
+-		dev_err(&gpio_dev->pdev->dev, "Invalid config param %04x\n",
++		dev_dbg(&gpio_dev->pdev->dev, "Invalid config param %04x\n",
+ 			param);
+ 		return -ENOTSUPP;
+ 	}
+@@ -798,7 +798,7 @@ static int amd_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
+ 			break;
+ 
+ 		default:
+-			dev_err(&gpio_dev->pdev->dev,
++			dev_dbg(&gpio_dev->pdev->dev,
+ 				"Invalid config param %04x\n", param);
+ 			ret = -ENOTSUPP;
+ 		}
+diff --git a/drivers/rtc/rtc-ds1685.c b/drivers/rtc/rtc-ds1685.c
+index 0f707be0eb87f..04dbf35cf3b70 100644
+--- a/drivers/rtc/rtc-ds1685.c
++++ b/drivers/rtc/rtc-ds1685.c
+@@ -1432,7 +1432,7 @@ ds1685_rtc_poweroff(struct platform_device *pdev)
+ 		unreachable();
+ 	}
+ }
+-EXPORT_SYMBOL(ds1685_rtc_poweroff);
++EXPORT_SYMBOL_GPL(ds1685_rtc_poweroff);
+ /* ----------------------------------------------------------------------- */
+ 
+ 
+diff --git a/drivers/staging/rtl8712/os_intfs.c b/drivers/staging/rtl8712/os_intfs.c
+index a2f3645be0cc8..b18e6d9c832b8 100644
+--- a/drivers/staging/rtl8712/os_intfs.c
++++ b/drivers/staging/rtl8712/os_intfs.c
+@@ -327,6 +327,7 @@ int r8712_init_drv_sw(struct _adapter *padapter)
+ 	mp871xinit(padapter);
+ 	init_default_value(padapter);
+ 	r8712_InitSwLeds(padapter);
++	mutex_init(&padapter->mutex_start);
+ 
+ 	return 0;
+ 
+diff --git a/drivers/staging/rtl8712/usb_intf.c b/drivers/staging/rtl8712/usb_intf.c
+index 37364d3101e21..df05213f922f4 100644
+--- a/drivers/staging/rtl8712/usb_intf.c
++++ b/drivers/staging/rtl8712/usb_intf.c
+@@ -567,7 +567,6 @@ static int r871xu_drv_init(struct usb_interface *pusb_intf,
+ 	if (rtl871x_load_fw(padapter))
+ 		goto deinit_drv_sw;
+ 	init_completion(&padapter->rx_filter_ready);
+-	mutex_init(&padapter->mutex_start);
+ 	return 0;
+ 
+ deinit_drv_sw:
+diff --git a/drivers/tty/serial/qcom_geni_serial.c b/drivers/tty/serial/qcom_geni_serial.c
+index daaf2a64e7f1f..54b22cbc0fcef 100644
+--- a/drivers/tty/serial/qcom_geni_serial.c
++++ b/drivers/tty/serial/qcom_geni_serial.c
+@@ -126,6 +126,7 @@ struct qcom_geni_serial_port {
+ 	dma_addr_t rx_dma_addr;
+ 	bool setup;
+ 	unsigned int baud;
++	unsigned long clk_rate;
+ 	void *rx_buf;
+ 	u32 loopback;
+ 	bool brk;
+@@ -1249,6 +1250,7 @@ static void qcom_geni_serial_set_termios(struct uart_port *uport,
+ 			baud * sampling_rate, clk_rate, clk_div);
+ 
+ 	uport->uartclk = clk_rate;
++	port->clk_rate = clk_rate;
+ 	dev_pm_opp_set_rate(uport->dev, clk_rate);
+ 	ser_clk_cfg = SER_CLK_EN;
+ 	ser_clk_cfg |= clk_div << CLK_DIV_SHFT;
+@@ -1513,10 +1515,13 @@ static void qcom_geni_serial_pm(struct uart_port *uport,
+ 
+ 	if (new_state == UART_PM_STATE_ON && old_state == UART_PM_STATE_OFF) {
+ 		geni_icc_enable(&port->se);
++		if (port->clk_rate)
++			dev_pm_opp_set_rate(uport->dev, port->clk_rate);
+ 		geni_se_resources_on(&port->se);
+ 	} else if (new_state == UART_PM_STATE_OFF &&
+ 			old_state == UART_PM_STATE_ON) {
+ 		geni_se_resources_off(&port->se);
++		dev_pm_opp_set_rate(uport->dev, 0);
+ 		geni_icc_disable(&port->se);
+ 	}
+ }
+diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c
+index 2e7e7c409cf2e..faeb3dc371c05 100644
+--- a/drivers/tty/serial/sc16is7xx.c
++++ b/drivers/tty/serial/sc16is7xx.c
+@@ -1342,9 +1342,18 @@ static int sc16is7xx_gpio_direction_output(struct gpio_chip *chip,
+ 		state |= BIT(offset);
+ 	else
+ 		state &= ~BIT(offset);
+-	sc16is7xx_port_write(port, SC16IS7XX_IOSTATE_REG, state);
++
++	/*
++	 * If we write IOSTATE first, and then IODIR, the output value is not
++	 * transferred to the corresponding I/O pin.
++	 * The datasheet states that each register bit will be transferred to
++	 * the corresponding I/O pin programmed as output when writing to
++	 * IOSTATE. Therefore, configure direction first with IODIR, and then
++	 * set value after with IOSTATE.
++	 */
+ 	sc16is7xx_port_update(port, SC16IS7XX_IODIR_REG, BIT(offset),
+ 			      BIT(offset));
++	sc16is7xx_port_write(port, SC16IS7XX_IOSTATE_REG, state);
+ 
+ 	return 0;
+ }
+@@ -1436,6 +1445,12 @@ static int sc16is7xx_probe(struct device *dev,
+ 		s->p[i].port.fifosize	= SC16IS7XX_FIFO_SIZE;
+ 		s->p[i].port.flags	= UPF_FIXED_TYPE | UPF_LOW_LATENCY;
+ 		s->p[i].port.iobase	= i;
++		/*
++		 * Use all ones as membase to make sure uart_configure_port() in
++		 * serial_core.c does not abort for SPI/I2C devices where the
++		 * membase address is not applicable.
++		 */
++		s->p[i].port.membase	= (void __iomem *)~0;
+ 		s->p[i].port.iotype	= UPIO_PORT;
+ 		s->p[i].port.uartclk	= freq;
+ 		s->p[i].port.rs485_config = sc16is7xx_config_rs485;
+diff --git a/drivers/usb/chipidea/ci_hdrc_imx.c b/drivers/usb/chipidea/ci_hdrc_imx.c
+index 336ef6dd8e7d8..873539f9a2c0a 100644
+--- a/drivers/usb/chipidea/ci_hdrc_imx.c
++++ b/drivers/usb/chipidea/ci_hdrc_imx.c
+@@ -175,10 +175,12 @@ static struct imx_usbmisc_data *usbmisc_get_init_data(struct device *dev)
+ 	if (of_usb_get_phy_mode(np) == USBPHY_INTERFACE_MODE_ULPI)
+ 		data->ulpi = 1;
+ 
+-	of_property_read_u32(np, "samsung,picophy-pre-emp-curr-control",
+-			&data->emp_curr_control);
+-	of_property_read_u32(np, "samsung,picophy-dc-vol-level-adjust",
+-			&data->dc_vol_level_adjust);
++	if (of_property_read_u32(np, "samsung,picophy-pre-emp-curr-control",
++			&data->emp_curr_control))
++		data->emp_curr_control = -1;
++	if (of_property_read_u32(np, "samsung,picophy-dc-vol-level-adjust",
++			&data->dc_vol_level_adjust))
++		data->dc_vol_level_adjust = -1;
+ 
+ 	return data;
+ }
+diff --git a/drivers/usb/chipidea/usbmisc_imx.c b/drivers/usb/chipidea/usbmisc_imx.c
+index 9ee9621e2ccca..1c7932f22218a 100644
+--- a/drivers/usb/chipidea/usbmisc_imx.c
++++ b/drivers/usb/chipidea/usbmisc_imx.c
+@@ -659,13 +659,15 @@ static int usbmisc_imx7d_init(struct imx_usbmisc_data *data)
+ 			usbmisc->base + MX7D_USBNC_USB_CTRL2);
+ 		/* PHY tuning for signal quality */
+ 		reg = readl(usbmisc->base + MX7D_USB_OTG_PHY_CFG1);
+-		if (data->emp_curr_control && data->emp_curr_control <=
++		if (data->emp_curr_control >= 0 &&
++			data->emp_curr_control <=
+ 			(TXPREEMPAMPTUNE0_MASK >> TXPREEMPAMPTUNE0_BIT)) {
+ 			reg &= ~TXPREEMPAMPTUNE0_MASK;
+ 			reg |= (data->emp_curr_control << TXPREEMPAMPTUNE0_BIT);
+ 		}
+ 
+-		if (data->dc_vol_level_adjust && data->dc_vol_level_adjust <=
++		if (data->dc_vol_level_adjust >= 0 &&
++			data->dc_vol_level_adjust <=
+ 			(TXVREFTUNE0_MASK >> TXVREFTUNE0_BIT)) {
+ 			reg &= ~TXVREFTUNE0_MASK;
+ 			reg |= (data->dc_vol_level_adjust << TXVREFTUNE0_BIT);
+diff --git a/drivers/usb/dwc3/dwc3-meson-g12a.c b/drivers/usb/dwc3/dwc3-meson-g12a.c
+index e99c7489dba02..2c07c038b584d 100644
+--- a/drivers/usb/dwc3/dwc3-meson-g12a.c
++++ b/drivers/usb/dwc3/dwc3-meson-g12a.c
+@@ -926,6 +926,12 @@ static int __maybe_unused dwc3_meson_g12a_resume(struct device *dev)
+ 			return ret;
+ 	}
+ 
++	if (priv->drvdata->usb_post_init) {
++		ret = priv->drvdata->usb_post_init(priv);
++		if (ret)
++			return ret;
++	}
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index 8ac98e60fff56..7994a4549a6c8 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -259,6 +259,7 @@ static void option_instat_callback(struct urb *urb);
+ #define QUECTEL_PRODUCT_EM05G			0x030a
+ #define QUECTEL_PRODUCT_EM060K			0x030b
+ #define QUECTEL_PRODUCT_EM05G_CS		0x030c
++#define QUECTEL_PRODUCT_EM05GV2			0x030e
+ #define QUECTEL_PRODUCT_EM05CN_SG		0x0310
+ #define QUECTEL_PRODUCT_EM05G_SG		0x0311
+ #define QUECTEL_PRODUCT_EM05CN			0x0312
+@@ -1188,6 +1189,8 @@ static const struct usb_device_id option_ids[] = {
+ 	  .driver_info = RSVD(6) | ZLP },
+ 	{ USB_DEVICE_INTERFACE_CLASS(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM05G, 0xff),
+ 	  .driver_info = RSVD(6) | ZLP },
++	{ USB_DEVICE_INTERFACE_CLASS(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM05GV2, 0xff),
++	  .driver_info = RSVD(4) | ZLP },
+ 	{ USB_DEVICE_INTERFACE_CLASS(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM05G_CS, 0xff),
+ 	  .driver_info = RSVD(6) | ZLP },
+ 	{ USB_DEVICE_INTERFACE_CLASS(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM05G_GR, 0xff),
+@@ -2232,6 +2235,10 @@ static const struct usb_device_id option_ids[] = {
+ 	  .driver_info = RSVD(0) | RSVD(1) | RSVD(6) },
+ 	{ USB_DEVICE_INTERFACE_CLASS(0x0489, 0xe0db, 0xff),			/* Foxconn T99W265 MBIM */
+ 	  .driver_info = RSVD(3) },
++	{ USB_DEVICE_INTERFACE_CLASS(0x0489, 0xe0ee, 0xff),			/* Foxconn T99W368 MBIM */
++	  .driver_info = RSVD(3) },
++	{ USB_DEVICE_INTERFACE_CLASS(0x0489, 0xe0f0, 0xff),			/* Foxconn T99W373 MBIM */
++	  .driver_info = RSVD(3) },
+ 	{ USB_DEVICE(0x1508, 0x1001),						/* Fibocom NL668 (IOT version) */
+ 	  .driver_info = RSVD(4) | RSVD(5) | RSVD(6) },
+ 	{ USB_DEVICE(0x1782, 0x4d10) },						/* Fibocom L610 (AT mode) */
+diff --git a/drivers/usb/typec/tcpm/tcpci.c b/drivers/usb/typec/tcpm/tcpci.c
+index fc708c289a73a..0ee3e6e29bb17 100644
+--- a/drivers/usb/typec/tcpm/tcpci.c
++++ b/drivers/usb/typec/tcpm/tcpci.c
+@@ -602,6 +602,10 @@ static int tcpci_init(struct tcpc_dev *tcpc)
+ 	if (time_after(jiffies, timeout))
+ 		return -ETIMEDOUT;
+ 
++	ret = tcpci_write16(tcpci, TCPC_FAULT_STATUS, TCPC_FAULT_STATUS_ALL_REG_RST_TO_DEFAULT);
++	if (ret < 0)
++		return ret;
++
+ 	/* Handle vendor init */
+ 	if (tcpci->data->init) {
+ 		ret = tcpci->data->init(tcpci, tcpci->data);
+diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
+index cc1d839264977..bf97b81ff5b07 100644
+--- a/drivers/usb/typec/tcpm/tcpm.c
++++ b/drivers/usb/typec/tcpm/tcpm.c
+@@ -2753,6 +2753,13 @@ static void tcpm_pd_ctrl_request(struct tcpm_port *port,
+ 			port->sink_cap_done = true;
+ 			tcpm_set_state(port, ready_state(port), 0);
+ 			break;
++		/*
++		 * Some port partners do not support GET_STATUS, avoid soft reset the link to
++		 * prevent redundant power re-negotiation
++		 */
++		case GET_STATUS_SEND:
++			tcpm_set_state(port, ready_state(port), 0);
++			break;
+ 		case SRC_READY:
+ 		case SNK_READY:
+ 			if (port->vdm_state > VDM_STATE_READY) {
+diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c
+index de4f12152b62f..9c9350eb17040 100644
+--- a/fs/erofs/zdata.c
++++ b/fs/erofs/zdata.c
+@@ -1038,6 +1038,8 @@ hitted:
+ 	cur = end - min_t(erofs_off_t, offset + end - map->m_la, end);
+ 	if (!(map->m_flags & EROFS_MAP_MAPPED)) {
+ 		zero_user_segment(page, cur, end);
++		++spiltted;
++		tight = false;
+ 		goto next_part;
+ 	}
+ 	if (map->m_flags & EROFS_MAP_FRAGMENT) {
+diff --git a/fs/nilfs2/alloc.c b/fs/nilfs2/alloc.c
+index 6ce8617b562d5..7342de296ec3c 100644
+--- a/fs/nilfs2/alloc.c
++++ b/fs/nilfs2/alloc.c
+@@ -205,7 +205,8 @@ static int nilfs_palloc_get_block(struct inode *inode, unsigned long blkoff,
+ 	int ret;
+ 
+ 	spin_lock(lock);
+-	if (prev->bh && blkoff == prev->blkoff) {
++	if (prev->bh && blkoff == prev->blkoff &&
++	    likely(buffer_uptodate(prev->bh))) {
+ 		get_bh(prev->bh);
+ 		*bhp = prev->bh;
+ 		spin_unlock(lock);
+diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c
+index 35bc793053180..acf7a266f72f5 100644
+--- a/fs/nilfs2/inode.c
++++ b/fs/nilfs2/inode.c
+@@ -1025,7 +1025,7 @@ int nilfs_load_inode_block(struct inode *inode, struct buffer_head **pbh)
+ 	int err;
+ 
+ 	spin_lock(&nilfs->ns_inode_lock);
+-	if (ii->i_bh == NULL) {
++	if (ii->i_bh == NULL || unlikely(!buffer_uptodate(ii->i_bh))) {
+ 		spin_unlock(&nilfs->ns_inode_lock);
+ 		err = nilfs_ifile_get_inode_block(ii->i_root->ifile,
+ 						  inode->i_ino, pbh);
+@@ -1034,7 +1034,10 @@ int nilfs_load_inode_block(struct inode *inode, struct buffer_head **pbh)
+ 		spin_lock(&nilfs->ns_inode_lock);
+ 		if (ii->i_bh == NULL)
+ 			ii->i_bh = *pbh;
+-		else {
++		else if (unlikely(!buffer_uptodate(ii->i_bh))) {
++			__brelse(ii->i_bh);
++			ii->i_bh = *pbh;
++		} else {
+ 			brelse(*pbh);
+ 			*pbh = ii->i_bh;
+ 		}
+diff --git a/fs/smb/server/auth.c b/fs/smb/server/auth.c
+index 5e5e120edcc22..15e5684e328c1 100644
+--- a/fs/smb/server/auth.c
++++ b/fs/smb/server/auth.c
+@@ -355,6 +355,9 @@ int ksmbd_decode_ntlmssp_auth_blob(struct authenticate_message *authblob,
+ 		if (blob_len < (u64)sess_key_off + sess_key_len)
+ 			return -EINVAL;
+ 
++		if (sess_key_len > CIFS_KEY_SIZE)
++			return -EINVAL;
++
+ 		ctx_arc4 = kmalloc(sizeof(*ctx_arc4), GFP_KERNEL);
+ 		if (!ctx_arc4)
+ 			return -ENOMEM;
+diff --git a/fs/smb/server/oplock.c b/fs/smb/server/oplock.c
+index 844b303baf293..90edd8522d291 100644
+--- a/fs/smb/server/oplock.c
++++ b/fs/smb/server/oplock.c
+@@ -1492,7 +1492,7 @@ struct create_context *smb2_find_context_vals(void *open_req, const char *tag, i
+ 		    name_len < 4 ||
+ 		    name_off + name_len > cc_len ||
+ 		    (value_off & 0x7) != 0 ||
+-		    (value_off && (value_off < name_off + name_len)) ||
++		    (value_len && value_off < name_off + (name_len < 8 ? 8 : name_len)) ||
+ 		    ((u64)value_off + value_len > cc_len))
+ 			return ERR_PTR(-EINVAL);
+ 
+diff --git a/fs/smb/server/smb2pdu.c b/fs/smb/server/smb2pdu.c
+index 7cc1b0c47d0a2..687b750a35bf7 100644
+--- a/fs/smb/server/smb2pdu.c
++++ b/fs/smb/server/smb2pdu.c
+@@ -4308,7 +4308,7 @@ static int smb2_get_ea(struct ksmbd_work *work, struct ksmbd_file *fp,
+ 		if (!strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN))
+ 			name_len -= XATTR_USER_PREFIX_LEN;
+ 
+-		ptr = (char *)(&eainfo->name + name_len + 1);
++		ptr = eainfo->name + name_len + 1;
+ 		buf_free_len -= (offsetof(struct smb2_ea_info, name) +
+ 				name_len + 1);
+ 		/* bailout if xattr can't fit in buf_free_len */
+diff --git a/fs/smb/server/smb2pdu.h b/fs/smb/server/smb2pdu.h
+index 2767c08a534a3..d12cfd3b09278 100644
+--- a/fs/smb/server/smb2pdu.h
++++ b/fs/smb/server/smb2pdu.h
+@@ -361,7 +361,7 @@ struct smb2_ea_info {
+ 	__u8   Flags;
+ 	__u8   EaNameLength;
+ 	__le16 EaValueLength;
+-	char name[1];
++	char name[];
+ 	/* optionally followed by value */
+ } __packed; /* level 15 Query */
+ 
+diff --git a/fs/smb/server/transport_rdma.c b/fs/smb/server/transport_rdma.c
+index c06efc020bd95..7578200f63b1d 100644
+--- a/fs/smb/server/transport_rdma.c
++++ b/fs/smb/server/transport_rdma.c
+@@ -1366,24 +1366,35 @@ static int smb_direct_rdma_xmit(struct smb_direct_transport *t,
+ 	LIST_HEAD(msg_list);
+ 	char *desc_buf;
+ 	int credits_needed;
+-	unsigned int desc_buf_len;
+-	size_t total_length = 0;
++	unsigned int desc_buf_len, desc_num = 0;
+ 
+ 	if (t->status != SMB_DIRECT_CS_CONNECTED)
+ 		return -ENOTCONN;
+ 
++	if (buf_len > t->max_rdma_rw_size)
++		return -EINVAL;
++
+ 	/* calculate needed credits */
+ 	credits_needed = 0;
+ 	desc_buf = buf;
+ 	for (i = 0; i < desc_len / sizeof(*desc); i++) {
++		if (!buf_len)
++			break;
++
+ 		desc_buf_len = le32_to_cpu(desc[i].length);
++		if (!desc_buf_len)
++			return -EINVAL;
++
++		if (desc_buf_len > buf_len) {
++			desc_buf_len = buf_len;
++			desc[i].length = cpu_to_le32(desc_buf_len);
++			buf_len = 0;
++		}
+ 
+ 		credits_needed += calc_rw_credits(t, desc_buf, desc_buf_len);
+ 		desc_buf += desc_buf_len;
+-		total_length += desc_buf_len;
+-		if (desc_buf_len == 0 || total_length > buf_len ||
+-		    total_length > t->max_rdma_rw_size)
+-			return -EINVAL;
++		buf_len -= desc_buf_len;
++		desc_num++;
+ 	}
+ 
+ 	ksmbd_debug(RDMA, "RDMA %s, len %#x, needed credits %#x\n",
+@@ -1395,7 +1406,7 @@ static int smb_direct_rdma_xmit(struct smb_direct_transport *t,
+ 
+ 	/* build rdma_rw_ctx for each descriptor */
+ 	desc_buf = buf;
+-	for (i = 0; i < desc_len / sizeof(*desc); i++) {
++	for (i = 0; i < desc_num; i++) {
+ 		msg = kzalloc(offsetof(struct smb_direct_rdma_rw_msg, sg_list) +
+ 			      sizeof(struct scatterlist) * SG_CHUNK_SIZE, GFP_KERNEL);
+ 		if (!msg) {
+diff --git a/include/linux/usb/tcpci.h b/include/linux/usb/tcpci.h
+index 85e95a3251d34..83376473ac765 100644
+--- a/include/linux/usb/tcpci.h
++++ b/include/linux/usb/tcpci.h
+@@ -103,6 +103,7 @@
+ #define TCPC_POWER_STATUS_SINKING_VBUS	BIT(0)
+ 
+ #define TCPC_FAULT_STATUS		0x1f
++#define TCPC_FAULT_STATUS_ALL_REG_RST_TO_DEFAULT BIT(7)
+ 
+ #define TCPC_ALERT_EXTENDED		0x21
+ 
+diff --git a/kernel/module/main.c b/kernel/module/main.c
+index ff7cc4e292990..98fedfdb8db52 100644
+--- a/kernel/module/main.c
++++ b/kernel/module/main.c
+@@ -1295,12 +1295,20 @@ void *__symbol_get(const char *symbol)
+ 	};
+ 
+ 	preempt_disable();
+-	if (!find_symbol(&fsa) || strong_try_module_get(fsa.owner)) {
+-		preempt_enable();
+-		return NULL;
++	if (!find_symbol(&fsa))
++		goto fail;
++	if (fsa.license != GPL_ONLY) {
++		pr_warn("failing symbol_get of non-GPLONLY symbol %s.\n",
++			symbol);
++		goto fail;
+ 	}
++	if (strong_try_module_get(fsa.owner))
++		goto fail;
+ 	preempt_enable();
+ 	return (void *)kernel_symbol_value(fsa.sym);
++fail:
++	preempt_enable();
++	return NULL;
+ }
+ EXPORT_SYMBOL_GPL(__symbol_get);
+ 
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index 8e64aaad53619..2656ca3b9b39c 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -9486,7 +9486,7 @@ static struct trace_array *trace_array_create(const char *name)
+ 	if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
+ 		goto out_free_tr;
+ 
+-	if (!alloc_cpumask_var(&tr->pipe_cpumask, GFP_KERNEL))
++	if (!zalloc_cpumask_var(&tr->pipe_cpumask, GFP_KERNEL))
+ 		goto out_free_tr;
+ 
+ 	tr->trace_flags = global_trace.trace_flags & ~ZEROED_TRACE_FLAGS;
+@@ -10431,7 +10431,7 @@ __init static int tracer_alloc_buffers(void)
+ 	if (trace_create_savedcmd() < 0)
+ 		goto out_free_temp_buffer;
+ 
+-	if (!alloc_cpumask_var(&global_trace.pipe_cpumask, GFP_KERNEL))
++	if (!zalloc_cpumask_var(&global_trace.pipe_cpumask, GFP_KERNEL))
+ 		goto out_free_savedcmd;
+ 
+ 	/* TODO: make the number of buffers hot pluggable with CPUS */
+diff --git a/sound/usb/stream.c b/sound/usb/stream.c
+index f10f4e6d3fb85..3d4add94e367d 100644
+--- a/sound/usb/stream.c
++++ b/sound/usb/stream.c
+@@ -1093,6 +1093,7 @@ static int __snd_usb_parse_audio_interface(struct snd_usb_audio *chip,
+ 	int i, altno, err, stream;
+ 	struct audioformat *fp = NULL;
+ 	struct snd_usb_power_domain *pd = NULL;
++	bool set_iface_first;
+ 	int num, protocol;
+ 
+ 	dev = chip->dev;
+@@ -1223,11 +1224,19 @@ static int __snd_usb_parse_audio_interface(struct snd_usb_audio *chip,
+ 				return err;
+ 		}
+ 
++		set_iface_first = false;
++		if (protocol == UAC_VERSION_1 ||
++		    (chip->quirk_flags & QUIRK_FLAG_SET_IFACE_FIRST))
++			set_iface_first = true;
++
+ 		/* try to set the interface... */
+ 		usb_set_interface(chip->dev, iface_no, 0);
++		if (set_iface_first)
++			usb_set_interface(chip->dev, iface_no, altno);
+ 		snd_usb_init_pitch(chip, fp);
+ 		snd_usb_init_sample_rate(chip, fp, fp->rate_max);
+-		usb_set_interface(chip->dev, iface_no, altno);
++		if (!set_iface_first)
++			usb_set_interface(chip->dev, iface_no, altno);
+ 	}
+ 	return 0;
+ }


^ permalink raw reply related	[flat|nested] 24+ messages in thread
* [gentoo-commits] proj/linux-patches:6.5 commit in: /
@ 2023-12-01 10:33 Mike Pagano
  0 siblings, 0 replies; 24+ messages in thread
From: Mike Pagano @ 2023-12-01 10:33 UTC (permalink / raw
  To: gentoo-commits

commit:     e798906aa4e1b565f6ff8c3e0dce71454da0dabd
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Fri Dec  1 10:32:44 2023 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Fri Dec  1 10:32:44 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=e798906a

neighbour: Fix __randomize_layout crash in struct neighbour

Bug: https://bugs.gentoo.org/918128

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README                                        |  4 ++
 ...ix_randomize_layout_crash_in_struct_neigh.patch | 44 ++++++++++++++++++++++
 2 files changed, 48 insertions(+)

diff --git a/0000_README b/0000_README
index 12ba051a..fc63225f 100644
--- a/0000_README
+++ b/0000_README
@@ -111,6 +111,10 @@ Patch:  2000_BT-Check-key-sizes-only-if-Secure-Simple-Pairing-enabled.patch
 From:   https://lore.kernel.org/linux-bluetooth/20190522070540.48895-1-marcel@holtmann.org/raw
 Desc:   Bluetooth: Check key sizes only when Secure Simple Pairing is enabled. See bug #686758
 
+Patch:  2010_Fix_randomize_layout_crash_in_struct_neigh.patch
+From:   https://git.kernel.org/pub/scm/linux/kernel/git/netdev/net.git/commit/?id=45b3fae4675d
+Desc:   neighbour: Fix __randomize_layout crash in struct neighbour
+
 Patch:  2900_tmp513-Fix-build-issue-by-selecting-CONFIG_REG.patch
 From:   https://bugs.gentoo.org/710790
 Desc:   tmp513 requies REGMAP_I2C to build.  Select it by default in Kconfig. See bug #710790. Thanks to Phil Stracchino

diff --git a/2010_Fix_randomize_layout_crash_in_struct_neigh.patch b/2010_Fix_randomize_layout_crash_in_struct_neigh.patch
new file mode 100644
index 00000000..8ee50b2f
--- /dev/null
+++ b/2010_Fix_randomize_layout_crash_in_struct_neigh.patch
@@ -0,0 +1,44 @@
+From 45b3fae4675dc1d4ee2d7aefa19d85ee4f891377 Mon Sep 17 00:00:00 2001
+From: "Gustavo A. R. Silva" <gustavoars@kernel.org>
+Date: Sat, 25 Nov 2023 15:33:58 -0600
+Subject: neighbour: Fix __randomize_layout crash in struct neighbour
+
+Previously, one-element and zero-length arrays were treated as true
+flexible arrays, even though they are actually "fake" flex arrays.
+The __randomize_layout would leave them untouched at the end of the
+struct, similarly to proper C99 flex-array members.
+
+However, this approach changed with commit 1ee60356c2dc ("gcc-plugins:
+randstruct: Only warn about true flexible arrays"). Now, only C99
+flexible-array members will remain untouched at the end of the struct,
+while one-element and zero-length arrays will be subject to randomization.
+
+Fix a `__randomize_layout` crash in `struct neighbour` by transforming
+zero-length array `primary_key` into a proper C99 flexible-array member.
+
+Fixes: 1ee60356c2dc ("gcc-plugins: randstruct: Only warn about true flexible arrays")
+Closes: https://lore.kernel.org/linux-hardening/20231124102458.GB1503258@e124191.cambridge.arm.com/
+Signed-off-by: Gustavo A. R. Silva <gustavoars@kernel.org>
+Reviewed-by: Kees Cook <keescook@chromium.org>
+Tested-by: Joey Gouly <joey.gouly@arm.com>
+Link: https://lore.kernel.org/r/ZWJoRsJGnCPdJ3+2@work
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+---
+ include/net/neighbour.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/include/net/neighbour.h b/include/net/neighbour.h
+index 07022bb0d44d4b..0d28172193fa63 100644
+--- a/include/net/neighbour.h
++++ b/include/net/neighbour.h
+@@ -162,7 +162,7 @@ struct neighbour {
+ 	struct rcu_head		rcu;
+ 	struct net_device	*dev;
+ 	netdevice_tracker	dev_tracker;
+-	u8			primary_key[0];
++	u8			primary_key[];
+ } __randomize_layout;
+ 
+ struct neigh_ops {
+-- 
+cgit 


^ permalink raw reply related	[flat|nested] 24+ messages in thread
* [gentoo-commits] proj/linux-patches:6.5 commit in: /
@ 2023-11-28 17:50 Mike Pagano
  0 siblings, 0 replies; 24+ messages in thread
From: Mike Pagano @ 2023-11-28 17:50 UTC (permalink / raw
  To: gentoo-commits

commit:     35b0948c626964e560310987774c01a06d3282f9
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Tue Nov 28 17:50:28 2023 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Tue Nov 28 17:50:28 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=35b0948c

Linux patch 6.5.13

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README             |     4 +
 1012_linux-6.5.13.patch | 20729 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 20733 insertions(+)

diff --git a/0000_README b/0000_README
index fd46c9a5..12ba051a 100644
--- a/0000_README
+++ b/0000_README
@@ -91,6 +91,10 @@ Patch:  1011_linux-6.5.12.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.5.12
 
+Patch:  1012_linux-6.5.13.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.5.13
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1012_linux-6.5.13.patch b/1012_linux-6.5.13.patch
new file mode 100644
index 00000000..f79c48fd
--- /dev/null
+++ b/1012_linux-6.5.13.patch
@@ -0,0 +1,20729 @@
+diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
+index 23ebe34ff901e..5711129686d10 100644
+--- a/Documentation/admin-guide/kernel-parameters.txt
++++ b/Documentation/admin-guide/kernel-parameters.txt
+@@ -2938,6 +2938,10 @@
+ 	locktorture.torture_type= [KNL]
+ 			Specify the locking implementation to test.
+ 
++	locktorture.writer_fifo= [KNL]
++			Run the write-side locktorture kthreads at
++			sched_set_fifo() real-time priority.
++
+ 	locktorture.verbose= [KNL]
+ 			Enable additional printk() statements.
+ 
+@@ -5781,6 +5785,13 @@
+ 			This feature may be more efficiently disabled
+ 			using the csdlock_debug- kernel parameter.
+ 
++	smp.panic_on_ipistall= [KNL]
++			If a csd_lock_timeout extends for more than
++			the specified number of milliseconds, panic the
++			system.  By default, let CSD-lock acquisition
++			take as long as they take.  Specifying 300,000
++			for this value provides a 5-minute timeout.
++
+ 	smsc-ircc2.nopnp	[HW] Don't use PNP to discover SMC devices
+ 	smsc-ircc2.ircc_cfg=	[HW] Device configuration I/O port
+ 	smsc-ircc2.ircc_sir=	[HW] SIR base I/O port
+diff --git a/Documentation/devicetree/bindings/phy/qcom,snps-eusb2-repeater.yaml b/Documentation/devicetree/bindings/phy/qcom,snps-eusb2-repeater.yaml
+index 083fda530b484..828650d4c4b09 100644
+--- a/Documentation/devicetree/bindings/phy/qcom,snps-eusb2-repeater.yaml
++++ b/Documentation/devicetree/bindings/phy/qcom,snps-eusb2-repeater.yaml
+@@ -27,6 +27,27 @@ properties:
+ 
+   vdd3-supply: true
+ 
++  qcom,tune-usb2-disc-thres:
++    $ref: /schemas/types.yaml#/definitions/uint8
++    description: High-Speed disconnect threshold
++    minimum: 0
++    maximum: 7
++    default: 0
++
++  qcom,tune-usb2-amplitude:
++    $ref: /schemas/types.yaml#/definitions/uint8
++    description: High-Speed trasmit amplitude
++    minimum: 0
++    maximum: 15
++    default: 8
++
++  qcom,tune-usb2-preem:
++    $ref: /schemas/types.yaml#/definitions/uint8
++    description: High-Speed TX pre-emphasis tuning
++    minimum: 0
++    maximum: 7
++    default: 5
++
+ required:
+   - compatible
+   - reg
+diff --git a/Documentation/devicetree/bindings/serial/serial.yaml b/Documentation/devicetree/bindings/serial/serial.yaml
+index ea277560a5966..5727bd549deca 100644
+--- a/Documentation/devicetree/bindings/serial/serial.yaml
++++ b/Documentation/devicetree/bindings/serial/serial.yaml
+@@ -96,7 +96,7 @@ then:
+     rts-gpios: false
+ 
+ patternProperties:
+-  "^bluetooth|gnss|gps|mcu$":
++  "^(bluetooth|gnss|gps|mcu)$":
+     if:
+       type: object
+     then:
+diff --git a/Documentation/devicetree/bindings/timer/renesas,rz-mtu3.yaml b/Documentation/devicetree/bindings/timer/renesas,rz-mtu3.yaml
+index bffdab0b01859..fbac40b958dde 100644
+--- a/Documentation/devicetree/bindings/timer/renesas,rz-mtu3.yaml
++++ b/Documentation/devicetree/bindings/timer/renesas,rz-mtu3.yaml
+@@ -169,27 +169,27 @@ properties:
+       - const: tgib0
+       - const: tgic0
+       - const: tgid0
+-      - const: tgiv0
++      - const: tciv0
+       - const: tgie0
+       - const: tgif0
+       - const: tgia1
+       - const: tgib1
+-      - const: tgiv1
+-      - const: tgiu1
++      - const: tciv1
++      - const: tciu1
+       - const: tgia2
+       - const: tgib2
+-      - const: tgiv2
+-      - const: tgiu2
++      - const: tciv2
++      - const: tciu2
+       - const: tgia3
+       - const: tgib3
+       - const: tgic3
+       - const: tgid3
+-      - const: tgiv3
++      - const: tciv3
+       - const: tgia4
+       - const: tgib4
+       - const: tgic4
+       - const: tgid4
+-      - const: tgiv4
++      - const: tciv4
+       - const: tgiu5
+       - const: tgiv5
+       - const: tgiw5
+@@ -197,18 +197,18 @@ properties:
+       - const: tgib6
+       - const: tgic6
+       - const: tgid6
+-      - const: tgiv6
++      - const: tciv6
+       - const: tgia7
+       - const: tgib7
+       - const: tgic7
+       - const: tgid7
+-      - const: tgiv7
++      - const: tciv7
+       - const: tgia8
+       - const: tgib8
+       - const: tgic8
+       - const: tgid8
+-      - const: tgiv8
+-      - const: tgiu8
++      - const: tciv8
++      - const: tciu8
+ 
+   clocks:
+     maxItems: 1
+@@ -285,16 +285,16 @@ examples:
+                    <GIC_SPI 211 IRQ_TYPE_EDGE_RISING>,
+                    <GIC_SPI 212 IRQ_TYPE_EDGE_RISING>,
+                    <GIC_SPI 213 IRQ_TYPE_EDGE_RISING>;
+-      interrupt-names = "tgia0", "tgib0", "tgic0", "tgid0", "tgiv0", "tgie0",
++      interrupt-names = "tgia0", "tgib0", "tgic0", "tgid0", "tciv0", "tgie0",
+                         "tgif0",
+-                        "tgia1", "tgib1", "tgiv1", "tgiu1",
+-                        "tgia2", "tgib2", "tgiv2", "tgiu2",
+-                        "tgia3", "tgib3", "tgic3", "tgid3", "tgiv3",
+-                        "tgia4", "tgib4", "tgic4", "tgid4", "tgiv4",
++                        "tgia1", "tgib1", "tciv1", "tciu1",
++                        "tgia2", "tgib2", "tciv2", "tciu2",
++                        "tgia3", "tgib3", "tgic3", "tgid3", "tciv3",
++                        "tgia4", "tgib4", "tgic4", "tgid4", "tciv4",
+                         "tgiu5", "tgiv5", "tgiw5",
+-                        "tgia6", "tgib6", "tgic6", "tgid6", "tgiv6",
+-                        "tgia7", "tgib7", "tgic7", "tgid7", "tgiv7",
+-                        "tgia8", "tgib8", "tgic8", "tgid8", "tgiv8", "tgiu8";
++                        "tgia6", "tgib6", "tgic6", "tgid6", "tciv6",
++                        "tgia7", "tgib7", "tgic7", "tgid7", "tciv7",
++                        "tgia8", "tgib8", "tgic8", "tgid8", "tciv8", "tciu8";
+       clocks = <&cpg CPG_MOD R9A07G044_MTU_X_MCK_MTU3>;
+       power-domains = <&cpg>;
+       resets = <&cpg R9A07G044_MTU_X_PRESET_MTU3>;
+diff --git a/Documentation/i2c/busses/i2c-i801.rst b/Documentation/i2c/busses/i2c-i801.rst
+index e76e68ccf7182..10eced6c2e462 100644
+--- a/Documentation/i2c/busses/i2c-i801.rst
++++ b/Documentation/i2c/busses/i2c-i801.rst
+@@ -47,6 +47,7 @@ Supported adapters:
+   * Intel Alder Lake (PCH)
+   * Intel Raptor Lake (PCH)
+   * Intel Meteor Lake (SOC and PCH)
++  * Intel Birch Stream (SOC)
+ 
+    Datasheets: Publicly available at the Intel website
+ 
+diff --git a/Documentation/networking/device_drivers/ethernet/mellanox/mlx5/counters.rst b/Documentation/networking/device_drivers/ethernet/mellanox/mlx5/counters.rst
+index a395df9c27513..008e560e12b58 100644
+--- a/Documentation/networking/device_drivers/ethernet/mellanox/mlx5/counters.rst
++++ b/Documentation/networking/device_drivers/ethernet/mellanox/mlx5/counters.rst
+@@ -683,6 +683,12 @@ the software port.
+        time protocol.
+      - Error
+ 
++   * - `ptp_cq[i]_late_cqe`
++     - Number of times a CQE has been delivered on the PTP timestamping CQ when
++       the CQE was not expected since a certain amount of time had elapsed where
++       the device typically ensures not posting the CQE.
++     - Error
++
+ .. [#ring_global] The corresponding ring and global counters do not share the
+                   same name (i.e. do not follow the common naming scheme).
+ 
+diff --git a/Documentation/networking/device_drivers/ethernet/mellanox/mlx5/devlink.rst b/Documentation/networking/device_drivers/ethernet/mellanox/mlx5/devlink.rst
+deleted file mode 100644
+index a4edf908b707c..0000000000000
+--- a/Documentation/networking/device_drivers/ethernet/mellanox/mlx5/devlink.rst
++++ /dev/null
+@@ -1,313 +0,0 @@
+-.. SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+-.. include:: <isonum.txt>
+-
+-=======
+-Devlink
+-=======
+-
+-:Copyright: |copy| 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+-
+-Contents
+-========
+-
+-- `Info`_
+-- `Parameters`_
+-- `Health reporters`_
+-
+-Info
+-====
+-
+-The devlink info reports the running and stored firmware versions on device.
+-It also prints the device PSID which represents the HCA board type ID.
+-
+-User command example::
+-
+-   $ devlink dev info pci/0000:00:06.0
+-      pci/0000:00:06.0:
+-      driver mlx5_core
+-      versions:
+-         fixed:
+-            fw.psid MT_0000000009
+-         running:
+-            fw.version 16.26.0100
+-         stored:
+-            fw.version 16.26.0100
+-
+-Parameters
+-==========
+-
+-flow_steering_mode: Device flow steering mode
+----------------------------------------------
+-The flow steering mode parameter controls the flow steering mode of the driver.
+-Two modes are supported:
+-
+-1. 'dmfs' - Device managed flow steering.
+-2. 'smfs' - Software/Driver managed flow steering.
+-
+-In DMFS mode, the HW steering entities are created and managed through the
+-Firmware.
+-In SMFS mode, the HW steering entities are created and managed though by
+-the driver directly into hardware without firmware intervention.
+-
+-SMFS mode is faster and provides better rule insertion rate compared to default DMFS mode.
+-
+-User command examples:
+-
+-- Set SMFS flow steering mode::
+-
+-    $ devlink dev param set pci/0000:06:00.0 name flow_steering_mode value "smfs" cmode runtime
+-
+-- Read device flow steering mode::
+-
+-    $ devlink dev param show pci/0000:06:00.0 name flow_steering_mode
+-      pci/0000:06:00.0:
+-      name flow_steering_mode type driver-specific
+-      values:
+-         cmode runtime value smfs
+-
+-enable_roce: RoCE enablement state
+-----------------------------------
+-If the device supports RoCE disablement, RoCE enablement state controls device
+-support for RoCE capability. Otherwise, the control occurs in the driver stack.
+-When RoCE is disabled at the driver level, only raw ethernet QPs are supported.
+-
+-To change RoCE enablement state, a user must change the driverinit cmode value
+-and run devlink reload.
+-
+-User command examples:
+-
+-- Disable RoCE::
+-
+-    $ devlink dev param set pci/0000:06:00.0 name enable_roce value false cmode driverinit
+-    $ devlink dev reload pci/0000:06:00.0
+-
+-- Read RoCE enablement state::
+-
+-    $ devlink dev param show pci/0000:06:00.0 name enable_roce
+-      pci/0000:06:00.0:
+-      name enable_roce type generic
+-      values:
+-         cmode driverinit value true
+-
+-esw_port_metadata: Eswitch port metadata state
+-----------------------------------------------
+-When applicable, disabling eswitch metadata can increase packet rate
+-up to 20% depending on the use case and packet sizes.
+-
+-Eswitch port metadata state controls whether to internally tag packets with
+-metadata. Metadata tagging must be enabled for multi-port RoCE, failover
+-between representors and stacked devices.
+-By default metadata is enabled on the supported devices in E-switch.
+-Metadata is applicable only for E-switch in switchdev mode and
+-users may disable it when NONE of the below use cases will be in use:
+-
+-1. HCA is in Dual/multi-port RoCE mode.
+-2. VF/SF representor bonding (Usually used for Live migration)
+-3. Stacked devices
+-
+-When metadata is disabled, the above use cases will fail to initialize if
+-users try to enable them.
+-
+-- Show eswitch port metadata::
+-
+-    $ devlink dev param show pci/0000:06:00.0 name esw_port_metadata
+-      pci/0000:06:00.0:
+-        name esw_port_metadata type driver-specific
+-          values:
+-            cmode runtime value true
+-
+-- Disable eswitch port metadata::
+-
+-    $ devlink dev param set pci/0000:06:00.0 name esw_port_metadata value false cmode runtime
+-
+-- Change eswitch mode to switchdev mode where after choosing the metadata value::
+-
+-    $ devlink dev eswitch set pci/0000:06:00.0 mode switchdev
+-
+-hairpin_num_queues: Number of hairpin queues
+---------------------------------------------
+-We refer to a TC NIC rule that involves forwarding as "hairpin".
+-
+-Hairpin queues are mlx5 hardware specific implementation for hardware
+-forwarding of such packets.
+-
+-- Show the number of hairpin queues::
+-
+-    $ devlink dev param show pci/0000:06:00.0 name hairpin_num_queues
+-      pci/0000:06:00.0:
+-        name hairpin_num_queues type driver-specific
+-          values:
+-            cmode driverinit value 2
+-
+-- Change the number of hairpin queues::
+-
+-    $ devlink dev param set pci/0000:06:00.0 name hairpin_num_queues value 4 cmode driverinit
+-
+-hairpin_queue_size: Size of the hairpin queues
+-----------------------------------------------
+-Control the size of the hairpin queues.
+-
+-- Show the size of the hairpin queues::
+-
+-    $ devlink dev param show pci/0000:06:00.0 name hairpin_queue_size
+-      pci/0000:06:00.0:
+-        name hairpin_queue_size type driver-specific
+-          values:
+-            cmode driverinit value 1024
+-
+-- Change the size (in packets) of the hairpin queues::
+-
+-    $ devlink dev param set pci/0000:06:00.0 name hairpin_queue_size value 512 cmode driverinit
+-
+-Health reporters
+-================
+-
+-tx reporter
+------------
+-The tx reporter is responsible for reporting and recovering of the following two error scenarios:
+-
+-- tx timeout
+-    Report on kernel tx timeout detection.
+-    Recover by searching lost interrupts.
+-- tx error completion
+-    Report on error tx completion.
+-    Recover by flushing the tx queue and reset it.
+-
+-tx reporter also support on demand diagnose callback, on which it provides
+-real time information of its send queues status.
+-
+-User commands examples:
+-
+-- Diagnose send queues status::
+-
+-    $ devlink health diagnose pci/0000:82:00.0 reporter tx
+-
+-.. note::
+-   This command has valid output only when interface is up, otherwise the command has empty output.
+-
+-- Show number of tx errors indicated, number of recover flows ended successfully,
+-  is autorecover enabled and graceful period from last recover::
+-
+-    $ devlink health show pci/0000:82:00.0 reporter tx
+-
+-rx reporter
+------------
+-The rx reporter is responsible for reporting and recovering of the following two error scenarios:
+-
+-- rx queues' initialization (population) timeout
+-    Population of rx queues' descriptors on ring initialization is done
+-    in napi context via triggering an irq. In case of a failure to get
+-    the minimum amount of descriptors, a timeout would occur, and
+-    descriptors could be recovered by polling the EQ (Event Queue).
+-- rx completions with errors (reported by HW on interrupt context)
+-    Report on rx completion error.
+-    Recover (if needed) by flushing the related queue and reset it.
+-
+-rx reporter also supports on demand diagnose callback, on which it
+-provides real time information of its receive queues' status.
+-
+-- Diagnose rx queues' status and corresponding completion queue::
+-
+-    $ devlink health diagnose pci/0000:82:00.0 reporter rx
+-
+-NOTE: This command has valid output only when interface is up. Otherwise, the command has empty output.
+-
+-- Show number of rx errors indicated, number of recover flows ended successfully,
+-  is autorecover enabled, and graceful period from last recover::
+-
+-    $ devlink health show pci/0000:82:00.0 reporter rx
+-
+-fw reporter
+------------
+-The fw reporter implements `diagnose` and `dump` callbacks.
+-It follows symptoms of fw error such as fw syndrome by triggering
+-fw core dump and storing it into the dump buffer.
+-The fw reporter diagnose command can be triggered any time by the user to check
+-current fw status.
+-
+-User commands examples:
+-
+-- Check fw heath status::
+-
+-    $ devlink health diagnose pci/0000:82:00.0 reporter fw
+-
+-- Read FW core dump if already stored or trigger new one::
+-
+-    $ devlink health dump show pci/0000:82:00.0 reporter fw
+-
+-.. note::
+-   This command can run only on the PF which has fw tracer ownership,
+-   running it on other PF or any VF will return "Operation not permitted".
+-
+-fw fatal reporter
+------------------
+-The fw fatal reporter implements `dump` and `recover` callbacks.
+-It follows fatal errors indications by CR-space dump and recover flow.
+-The CR-space dump uses vsc interface which is valid even if the FW command
+-interface is not functional, which is the case in most FW fatal errors.
+-The recover function runs recover flow which reloads the driver and triggers fw
+-reset if needed.
+-On firmware error, the health buffer is dumped into the dmesg. The log
+-level is derived from the error's severity (given in health buffer).
+-
+-User commands examples:
+-
+-- Run fw recover flow manually::
+-
+-    $ devlink health recover pci/0000:82:00.0 reporter fw_fatal
+-
+-- Read FW CR-space dump if already stored or trigger new one::
+-
+-    $ devlink health dump show pci/0000:82:00.1 reporter fw_fatal
+-
+-.. note::
+-   This command can run only on PF.
+-
+-vnic reporter
+--------------
+-The vnic reporter implements only the `diagnose` callback.
+-It is responsible for querying the vnic diagnostic counters from fw and displaying
+-them in realtime.
+-
+-Description of the vnic counters:
+-
+-- total_q_under_processor_handle
+-        number of queues in an error state due to
+-        an async error or errored command.
+-- send_queue_priority_update_flow
+-        number of QP/SQ priority/SL update events.
+-- cq_overrun
+-        number of times CQ entered an error state due to an overflow.
+-- async_eq_overrun
+-        number of times an EQ mapped to async events was overrun.
+-        comp_eq_overrun number of times an EQ mapped to completion events was
+-        overrun.
+-- quota_exceeded_command
+-        number of commands issued and failed due to quota exceeded.
+-- invalid_command
+-        number of commands issued and failed dues to any reason other than quota
+-        exceeded.
+-- nic_receive_steering_discard
+-        number of packets that completed RX flow
+-        steering but were discarded due to a mismatch in flow table.
+-- generated_pkt_steering_fail
+-	number of packets generated by the VNIC experiencing unexpected steering
+-	failure (at any point in steering flow).
+-- handled_pkt_steering_fail
+-	number of packets handled by the VNIC experiencing unexpected steering
+-	failure (at any point in steering flow owned by the VNIC, including the FDB
+-	for the eswitch owner).
+-
+-User commands examples:
+-
+-- Diagnose PF/VF vnic counters::
+-
+-        $ devlink health diagnose pci/0000:82:00.1 reporter vnic
+-
+-- Diagnose representor vnic counters (performed by supplying devlink port of the
+-  representor, which can be obtained via devlink port command)::
+-
+-        $ devlink health diagnose pci/0000:82:00.1/65537 reporter vnic
+-
+-.. note::
+-   This command can run over all interfaces such as PF/VF and representor ports.
+diff --git a/Documentation/networking/device_drivers/ethernet/mellanox/mlx5/index.rst b/Documentation/networking/device_drivers/ethernet/mellanox/mlx5/index.rst
+index 3fdcd6b61ccfa..581a91caa5795 100644
+--- a/Documentation/networking/device_drivers/ethernet/mellanox/mlx5/index.rst
++++ b/Documentation/networking/device_drivers/ethernet/mellanox/mlx5/index.rst
+@@ -13,7 +13,6 @@ Contents:
+    :maxdepth: 2
+ 
+    kconfig
+-   devlink
+    switchdev
+    tracepoints
+    counters
+diff --git a/Documentation/networking/devlink/mlx5.rst b/Documentation/networking/devlink/mlx5.rst
+index 202798d6501e7..702f204a3dbd3 100644
+--- a/Documentation/networking/devlink/mlx5.rst
++++ b/Documentation/networking/devlink/mlx5.rst
+@@ -18,6 +18,11 @@ Parameters
+    * - ``enable_roce``
+      - driverinit
+      - Type: Boolean
++
++       If the device supports RoCE disablement, RoCE enablement state controls
++       device support for RoCE capability. Otherwise, the control occurs in the
++       driver stack. When RoCE is disabled at the driver level, only raw
++       ethernet QPs are supported.
+    * - ``io_eq_size``
+      - driverinit
+      - The range is between 64 and 4096.
+@@ -48,6 +53,9 @@ parameters.
+        * ``smfs`` Software managed flow steering. In SMFS mode, the HW
+          steering entities are created and manage through the driver without
+          firmware intervention.
++
++       SMFS mode is faster and provides better rule insertion rate compared to
++       default DMFS mode.
+    * - ``fdb_large_groups``
+      - u32
+      - driverinit
+@@ -71,7 +79,24 @@ parameters.
+        deprecated.
+ 
+        Default: disabled
++   * - ``esw_port_metadata``
++     - Boolean
++     - runtime
++     - When applicable, disabling eswitch metadata can increase packet rate up
++       to 20% depending on the use case and packet sizes.
++
++       Eswitch port metadata state controls whether to internally tag packets
++       with metadata. Metadata tagging must be enabled for multi-port RoCE,
++       failover between representors and stacked devices. By default metadata is
++       enabled on the supported devices in E-switch. Metadata is applicable only
++       for E-switch in switchdev mode and users may disable it when NONE of the
++       below use cases will be in use:
++       1. HCA is in Dual/multi-port RoCE mode.
++       2. VF/SF representor bonding (Usually used for Live migration)
++       3. Stacked devices
+ 
++       When metadata is disabled, the above use cases will fail to initialize if
++       users try to enable them.
+    * - ``hairpin_num_queues``
+      - u32
+      - driverinit
+@@ -104,3 +129,160 @@ The ``mlx5`` driver reports the following versions
+    * - ``fw.version``
+      - stored, running
+      - Three digit major.minor.subminor firmware version number.
++
++Health reporters
++================
++
++tx reporter
++-----------
++The tx reporter is responsible for reporting and recovering of the following three error scenarios:
++
++- tx timeout
++    Report on kernel tx timeout detection.
++    Recover by searching lost interrupts.
++- tx error completion
++    Report on error tx completion.
++    Recover by flushing the tx queue and reset it.
++- tx PTP port timestamping CQ unhealthy
++    Report too many CQEs never delivered on port ts CQ.
++    Recover by flushing and re-creating all PTP channels.
++
++tx reporter also support on demand diagnose callback, on which it provides
++real time information of its send queues status.
++
++User commands examples:
++
++- Diagnose send queues status::
++
++    $ devlink health diagnose pci/0000:82:00.0 reporter tx
++
++.. note::
++   This command has valid output only when interface is up, otherwise the command has empty output.
++
++- Show number of tx errors indicated, number of recover flows ended successfully,
++  is autorecover enabled and graceful period from last recover::
++
++    $ devlink health show pci/0000:82:00.0 reporter tx
++
++rx reporter
++-----------
++The rx reporter is responsible for reporting and recovering of the following two error scenarios:
++
++- rx queues' initialization (population) timeout
++    Population of rx queues' descriptors on ring initialization is done
++    in napi context via triggering an irq. In case of a failure to get
++    the minimum amount of descriptors, a timeout would occur, and
++    descriptors could be recovered by polling the EQ (Event Queue).
++- rx completions with errors (reported by HW on interrupt context)
++    Report on rx completion error.
++    Recover (if needed) by flushing the related queue and reset it.
++
++rx reporter also supports on demand diagnose callback, on which it
++provides real time information of its receive queues' status.
++
++- Diagnose rx queues' status and corresponding completion queue::
++
++    $ devlink health diagnose pci/0000:82:00.0 reporter rx
++
++.. note::
++   This command has valid output only when interface is up. Otherwise, the command has empty output.
++
++- Show number of rx errors indicated, number of recover flows ended successfully,
++  is autorecover enabled, and graceful period from last recover::
++
++    $ devlink health show pci/0000:82:00.0 reporter rx
++
++fw reporter
++-----------
++The fw reporter implements `diagnose` and `dump` callbacks.
++It follows symptoms of fw error such as fw syndrome by triggering
++fw core dump and storing it into the dump buffer.
++The fw reporter diagnose command can be triggered any time by the user to check
++current fw status.
++
++User commands examples:
++
++- Check fw heath status::
++
++    $ devlink health diagnose pci/0000:82:00.0 reporter fw
++
++- Read FW core dump if already stored or trigger new one::
++
++    $ devlink health dump show pci/0000:82:00.0 reporter fw
++
++.. note::
++   This command can run only on the PF which has fw tracer ownership,
++   running it on other PF or any VF will return "Operation not permitted".
++
++fw fatal reporter
++-----------------
++The fw fatal reporter implements `dump` and `recover` callbacks.
++It follows fatal errors indications by CR-space dump and recover flow.
++The CR-space dump uses vsc interface which is valid even if the FW command
++interface is not functional, which is the case in most FW fatal errors.
++The recover function runs recover flow which reloads the driver and triggers fw
++reset if needed.
++On firmware error, the health buffer is dumped into the dmesg. The log
++level is derived from the error's severity (given in health buffer).
++
++User commands examples:
++
++- Run fw recover flow manually::
++
++    $ devlink health recover pci/0000:82:00.0 reporter fw_fatal
++
++- Read FW CR-space dump if already stored or trigger new one::
++
++    $ devlink health dump show pci/0000:82:00.1 reporter fw_fatal
++
++.. note::
++   This command can run only on PF.
++
++vnic reporter
++-------------
++The vnic reporter implements only the `diagnose` callback.
++It is responsible for querying the vnic diagnostic counters from fw and displaying
++them in realtime.
++
++Description of the vnic counters:
++
++- total_q_under_processor_handle
++        number of queues in an error state due to
++        an async error or errored command.
++- send_queue_priority_update_flow
++        number of QP/SQ priority/SL update events.
++- cq_overrun
++        number of times CQ entered an error state due to an overflow.
++- async_eq_overrun
++        number of times an EQ mapped to async events was overrun.
++        comp_eq_overrun number of times an EQ mapped to completion events was
++        overrun.
++- quota_exceeded_command
++        number of commands issued and failed due to quota exceeded.
++- invalid_command
++        number of commands issued and failed dues to any reason other than quota
++        exceeded.
++- nic_receive_steering_discard
++        number of packets that completed RX flow
++        steering but were discarded due to a mismatch in flow table.
++- generated_pkt_steering_fail
++	number of packets generated by the VNIC experiencing unexpected steering
++	failure (at any point in steering flow).
++- handled_pkt_steering_fail
++	number of packets handled by the VNIC experiencing unexpected steering
++	failure (at any point in steering flow owned by the VNIC, including the FDB
++	for the eswitch owner).
++
++User commands examples:
++
++- Diagnose PF/VF vnic counters::
++
++        $ devlink health diagnose pci/0000:82:00.1 reporter vnic
++
++- Diagnose representor vnic counters (performed by supplying devlink port of the
++  representor, which can be obtained via devlink port command)::
++
++        $ devlink health diagnose pci/0000:82:00.1/65537 reporter vnic
++
++.. note::
++   This command can run over all interfaces such as PF/VF and representor ports.
+diff --git a/Makefile b/Makefile
+index a6e152146028a..80e2b991dd0f3 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 5
+-SUBLEVEL = 12
++SUBLEVEL = 13
+ EXTRAVERSION =
+ NAME = Hurr durr I'ma ninja sloth
+ 
+diff --git a/arch/arm/include/asm/exception.h b/arch/arm/include/asm/exception.h
+index 58e039a851af0..3c82975d46db3 100644
+--- a/arch/arm/include/asm/exception.h
++++ b/arch/arm/include/asm/exception.h
+@@ -10,10 +10,6 @@
+ 
+ #include <linux/interrupt.h>
+ 
+-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ #define __exception_irq_entry	__irq_entry
+-#else
+-#define __exception_irq_entry
+-#endif
+ 
+ #endif /* __ASM_ARM_EXCEPTION_H */
+diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
+index 73085b30b3092..9e0a2453b87d6 100644
+--- a/arch/arm64/Kconfig
++++ b/arch/arm64/Kconfig
+@@ -1369,6 +1369,8 @@ choice
+ config CPU_BIG_ENDIAN
+ 	bool "Build big-endian kernel"
+ 	depends on !LD_IS_LLD || LLD_VERSION >= 130000
++	# https://github.com/llvm/llvm-project/commit/1379b150991f70a5782e9a143c2ba5308da1161c
++	depends on AS_IS_GNU || AS_VERSION >= 150000
+ 	help
+ 	  Say Y if you plan on running a kernel with a big-endian userspace.
+ 
+diff --git a/arch/arm64/boot/dts/freescale/fsl-ls208xa.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls208xa.dtsi
+index d2f5345d05600..717288bbdb8b6 100644
+--- a/arch/arm64/boot/dts/freescale/fsl-ls208xa.dtsi
++++ b/arch/arm64/boot/dts/freescale/fsl-ls208xa.dtsi
+@@ -1186,26 +1186,34 @@
+ 			dma-coherent;
+ 		};
+ 
+-		usb0: usb@3100000 {
+-			status = "disabled";
+-			compatible = "snps,dwc3";
+-			reg = <0x0 0x3100000 0x0 0x10000>;
+-			interrupts = <0 80 0x4>; /* Level high type */
+-			dr_mode = "host";
+-			snps,quirk-frame-length-adjustment = <0x20>;
+-			snps,dis_rxdet_inp3_quirk;
+-			snps,incr-burst-type-adjustment = <1>, <4>, <8>, <16>;
+-		};
++		bus: bus {
++			#address-cells = <2>;
++			#size-cells = <2>;
++			compatible = "simple-bus";
++			ranges;
++			dma-ranges = <0x0 0x0 0x0 0x0 0x100 0x00000000>;
++
++			usb0: usb@3100000 {
++				compatible = "snps,dwc3";
++				reg = <0x0 0x3100000 0x0 0x10000>;
++				interrupts = <0 80 0x4>; /* Level high type */
++				dr_mode = "host";
++				snps,quirk-frame-length-adjustment = <0x20>;
++				snps,dis_rxdet_inp3_quirk;
++				snps,incr-burst-type-adjustment = <1>, <4>, <8>, <16>;
++				status = "disabled";
++			};
+ 
+-		usb1: usb@3110000 {
+-			status = "disabled";
+-			compatible = "snps,dwc3";
+-			reg = <0x0 0x3110000 0x0 0x10000>;
+-			interrupts = <0 81 0x4>; /* Level high type */
+-			dr_mode = "host";
+-			snps,quirk-frame-length-adjustment = <0x20>;
+-			snps,dis_rxdet_inp3_quirk;
+-			snps,incr-burst-type-adjustment = <1>, <4>, <8>, <16>;
++			usb1: usb@3110000 {
++				compatible = "snps,dwc3";
++				reg = <0x0 0x3110000 0x0 0x10000>;
++				interrupts = <0 81 0x4>; /* Level high type */
++				dr_mode = "host";
++				snps,quirk-frame-length-adjustment = <0x20>;
++				snps,dis_rxdet_inp3_quirk;
++				snps,incr-burst-type-adjustment = <1>, <4>, <8>, <16>;
++				status = "disabled";
++			};
+ 		};
+ 
+ 		ccn@4000000 {
+diff --git a/arch/arm64/boot/dts/qcom/ipq5332.dtsi b/arch/arm64/boot/dts/qcom/ipq5332.dtsi
+index 8bfc2db44624a..e40c55adff23d 100644
+--- a/arch/arm64/boot/dts/qcom/ipq5332.dtsi
++++ b/arch/arm64/boot/dts/qcom/ipq5332.dtsi
+@@ -135,7 +135,7 @@
+ 			reg = <0x0 0x4a800000 0x0 0x100000>;
+ 			no-map;
+ 
+-			hwlocks = <&tcsr_mutex 0>;
++			hwlocks = <&tcsr_mutex 3>;
+ 		};
+ 	};
+ 
+diff --git a/arch/arm64/boot/dts/qcom/ipq6018.dtsi b/arch/arm64/boot/dts/qcom/ipq6018.dtsi
+index 7355f266742aa..cdd7690132734 100644
+--- a/arch/arm64/boot/dts/qcom/ipq6018.dtsi
++++ b/arch/arm64/boot/dts/qcom/ipq6018.dtsi
+@@ -207,7 +207,7 @@
+ 	smem {
+ 		compatible = "qcom,smem";
+ 		memory-region = <&smem_region>;
+-		hwlocks = <&tcsr_mutex 0>;
++		hwlocks = <&tcsr_mutex 3>;
+ 	};
+ 
+ 	soc: soc@0 {
+@@ -389,7 +389,7 @@
+ 
+ 		tcsr_mutex: hwlock@1905000 {
+ 			compatible = "qcom,ipq6018-tcsr-mutex", "qcom,tcsr-mutex";
+-			reg = <0x0 0x01905000 0x0 0x1000>;
++			reg = <0x0 0x01905000 0x0 0x20000>;
+ 			#hwlock-cells = <1>;
+ 		};
+ 
+diff --git a/arch/arm64/boot/dts/qcom/ipq8074.dtsi b/arch/arm64/boot/dts/qcom/ipq8074.dtsi
+index 00ed71936b472..92fd924bbdbe5 100644
+--- a/arch/arm64/boot/dts/qcom/ipq8074.dtsi
++++ b/arch/arm64/boot/dts/qcom/ipq8074.dtsi
+@@ -101,7 +101,7 @@
+ 			reg = <0x0 0x4ab00000 0x0 0x100000>;
+ 			no-map;
+ 
+-			hwlocks = <&tcsr_mutex 0>;
++			hwlocks = <&tcsr_mutex 3>;
+ 		};
+ 
+ 		memory@4ac00000 {
+diff --git a/arch/arm64/boot/dts/qcom/ipq9574.dtsi b/arch/arm64/boot/dts/qcom/ipq9574.dtsi
+index f120c7c523517..cbe07fd44788d 100644
+--- a/arch/arm64/boot/dts/qcom/ipq9574.dtsi
++++ b/arch/arm64/boot/dts/qcom/ipq9574.dtsi
+@@ -174,7 +174,7 @@
+ 		smem@4aa00000 {
+ 			compatible = "qcom,smem";
+ 			reg = <0x0 0x4aa00000 0x0 0x100000>;
+-			hwlocks = <&tcsr_mutex 0>;
++			hwlocks = <&tcsr_mutex 3>;
+ 			no-map;
+ 		};
+ 	};
+diff --git a/arch/arm64/kernel/module-plts.c b/arch/arm64/kernel/module-plts.c
+index bd69a4e7cd605..79200f21e1239 100644
+--- a/arch/arm64/kernel/module-plts.c
++++ b/arch/arm64/kernel/module-plts.c
+@@ -167,9 +167,6 @@ static unsigned int count_plts(Elf64_Sym *syms, Elf64_Rela *rela, int num,
+ 		switch (ELF64_R_TYPE(rela[i].r_info)) {
+ 		case R_AARCH64_JUMP26:
+ 		case R_AARCH64_CALL26:
+-			if (!IS_ENABLED(CONFIG_RANDOMIZE_BASE))
+-				break;
+-
+ 			/*
+ 			 * We only have to consider branch targets that resolve
+ 			 * to symbols that are defined in a different section.
+@@ -269,9 +266,6 @@ static int partition_branch_plt_relas(Elf64_Sym *syms, Elf64_Rela *rela,
+ {
+ 	int i = 0, j = numrels - 1;
+ 
+-	if (!IS_ENABLED(CONFIG_RANDOMIZE_BASE))
+-		return 0;
+-
+ 	while (i < j) {
+ 		if (branch_rela_needs_plt(syms, &rela[i], dstidx))
+ 			i++;
+diff --git a/arch/loongarch/include/asm/percpu.h b/arch/loongarch/include/asm/percpu.h
+index b9f567e660166..ed5da02b1cf6f 100644
+--- a/arch/loongarch/include/asm/percpu.h
++++ b/arch/loongarch/include/asm/percpu.h
+@@ -32,7 +32,7 @@ static inline void set_my_cpu_offset(unsigned long off)
+ #define __my_cpu_offset __my_cpu_offset
+ 
+ #define PERCPU_OP(op, asm_op, c_op)					\
+-static inline unsigned long __percpu_##op(void *ptr,			\
++static __always_inline unsigned long __percpu_##op(void *ptr,		\
+ 			unsigned long val, int size)			\
+ {									\
+ 	unsigned long ret;						\
+@@ -63,7 +63,7 @@ PERCPU_OP(and, and, &)
+ PERCPU_OP(or, or, |)
+ #undef PERCPU_OP
+ 
+-static inline unsigned long __percpu_read(void *ptr, int size)
++static __always_inline unsigned long __percpu_read(void *ptr, int size)
+ {
+ 	unsigned long ret;
+ 
+@@ -100,7 +100,7 @@ static inline unsigned long __percpu_read(void *ptr, int size)
+ 	return ret;
+ }
+ 
+-static inline void __percpu_write(void *ptr, unsigned long val, int size)
++static __always_inline void __percpu_write(void *ptr, unsigned long val, int size)
+ {
+ 	switch (size) {
+ 	case 1:
+@@ -132,8 +132,8 @@ static inline void __percpu_write(void *ptr, unsigned long val, int size)
+ 	}
+ }
+ 
+-static inline unsigned long __percpu_xchg(void *ptr, unsigned long val,
+-						int size)
++static __always_inline unsigned long __percpu_xchg(void *ptr, unsigned long val,
++						   int size)
+ {
+ 	switch (size) {
+ 	case 1:
+diff --git a/arch/parisc/include/uapi/asm/pdc.h b/arch/parisc/include/uapi/asm/pdc.h
+index 7a90070136e82..8e38a86996fc6 100644
+--- a/arch/parisc/include/uapi/asm/pdc.h
++++ b/arch/parisc/include/uapi/asm/pdc.h
+@@ -472,6 +472,7 @@ struct pdc_model {		/* for PDC_MODEL */
+ 	unsigned long arch_rev;
+ 	unsigned long pot_key;
+ 	unsigned long curr_key;
++	unsigned long width;	/* default of PSW_W bit (1=enabled) */
+ };
+ 
+ struct pdc_cache_cf {		/* for PDC_CACHE  (I/D-caches) */
+diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S
+index ae03b8679696e..ab23e61a6f016 100644
+--- a/arch/parisc/kernel/entry.S
++++ b/arch/parisc/kernel/entry.S
+@@ -36,6 +36,24 @@
+ 	.level 2.0
+ #endif
+ 
++/*
++ * We need seven instructions after a TLB insert for it to take effect.
++ * The PA8800/PA8900 processors are an exception and need 12 instructions.
++ * The RFI changes both IAOQ_Back and IAOQ_Front, so it counts as one.
++ */
++#ifdef CONFIG_64BIT
++#define NUM_PIPELINE_INSNS    12
++#else
++#define NUM_PIPELINE_INSNS    7
++#endif
++
++	/* Insert num nops */
++	.macro	insert_nops num
++	.rept \num
++	nop
++	.endr
++	.endm
++
+ 	/* Get aligned page_table_lock address for this mm from cr28/tr4 */
+ 	.macro  get_ptl reg
+ 	mfctl	%cr28,\reg
+@@ -415,24 +433,20 @@
+ 3:
+ 	.endm
+ 
+-	/* Release page_table_lock without reloading lock address.
+-	   We use an ordered store to ensure all prior accesses are
+-	   performed prior to releasing the lock. */
+-	.macro		ptl_unlock0	spc,tmp,tmp2
++	/* Release page_table_lock if for user space. We use an ordered
++	   store to ensure all prior accesses are performed prior to
++	   releasing the lock. Note stw may not be executed, so we
++	   provide one extra nop when CONFIG_TLB_PTLOCK is defined. */
++	.macro		ptl_unlock	spc,tmp,tmp2
+ #ifdef CONFIG_TLB_PTLOCK
+-98:	ldi		__ARCH_SPIN_LOCK_UNLOCKED_VAL, \tmp2
++98:	get_ptl		\tmp
++	ldi		__ARCH_SPIN_LOCK_UNLOCKED_VAL, \tmp2
+ 	or,COND(=)	%r0,\spc,%r0
+ 	stw,ma		\tmp2,0(\tmp)
+ 99:	ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
+-#endif
+-	.endm
+-
+-	/* Release page_table_lock. */
+-	.macro		ptl_unlock1	spc,tmp,tmp2
+-#ifdef CONFIG_TLB_PTLOCK
+-98:	get_ptl		\tmp
+-	ptl_unlock0	\spc,\tmp,\tmp2
+-99:	ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
++	insert_nops	NUM_PIPELINE_INSNS - 4
++#else
++	insert_nops	NUM_PIPELINE_INSNS - 1
+ #endif
+ 	.endm
+ 
+@@ -461,13 +475,13 @@
+ 	 * to a CPU TLB 4k PFN (4k => 12 bits to shift) */
+ 	#define PAGE_ADD_SHIFT		(PAGE_SHIFT-12)
+ 	#define PAGE_ADD_HUGE_SHIFT	(REAL_HPAGE_SHIFT-12)
++	#define PFN_START_BIT	(63-ASM_PFN_PTE_SHIFT+(63-58)-PAGE_ADD_SHIFT)
+ 
+ 	/* Drop prot bits and convert to page addr for iitlbt and idtlbt */
+ 	.macro		convert_for_tlb_insert20 pte,tmp
+ #ifdef CONFIG_HUGETLB_PAGE
+ 	copy		\pte,\tmp
+-	extrd,u		\tmp,(63-ASM_PFN_PTE_SHIFT)+(63-58)+PAGE_ADD_SHIFT,\
+-				64-PAGE_SHIFT-PAGE_ADD_SHIFT,\pte
++	extrd,u		\tmp,PFN_START_BIT,PFN_START_BIT+1,\pte
+ 
+ 	depdi		_PAGE_SIZE_ENCODING_DEFAULT,63,\
+ 				(63-58)+PAGE_ADD_SHIFT,\pte
+@@ -475,8 +489,7 @@
+ 	depdi		_HUGE_PAGE_SIZE_ENCODING_DEFAULT,63,\
+ 				(63-58)+PAGE_ADD_HUGE_SHIFT,\pte
+ #else /* Huge pages disabled */
+-	extrd,u		\pte,(63-ASM_PFN_PTE_SHIFT)+(63-58)+PAGE_ADD_SHIFT,\
+-				64-PAGE_SHIFT-PAGE_ADD_SHIFT,\pte
++	extrd,u		\pte,PFN_START_BIT,PFN_START_BIT+1,\pte
+ 	depdi		_PAGE_SIZE_ENCODING_DEFAULT,63,\
+ 				(63-58)+PAGE_ADD_SHIFT,\pte
+ #endif
+@@ -1124,7 +1137,7 @@ dtlb_miss_20w:
+ 	
+ 	idtlbt          pte,prot
+ 
+-	ptl_unlock1	spc,t0,t1
++	ptl_unlock	spc,t0,t1
+ 	rfir
+ 	nop
+ 
+@@ -1133,6 +1146,7 @@ dtlb_check_alias_20w:
+ 
+ 	idtlbt          pte,prot
+ 
++	insert_nops	NUM_PIPELINE_INSNS - 1
+ 	rfir
+ 	nop
+ 
+@@ -1150,7 +1164,7 @@ nadtlb_miss_20w:
+ 
+ 	idtlbt          pte,prot
+ 
+-	ptl_unlock1	spc,t0,t1
++	ptl_unlock	spc,t0,t1
+ 	rfir
+ 	nop
+ 
+@@ -1159,6 +1173,7 @@ nadtlb_check_alias_20w:
+ 
+ 	idtlbt          pte,prot
+ 
++	insert_nops	NUM_PIPELINE_INSNS - 1
+ 	rfir
+ 	nop
+ 
+@@ -1184,7 +1199,7 @@ dtlb_miss_11:
+ 
+ 	mtsp		t1, %sr1	/* Restore sr1 */
+ 
+-	ptl_unlock1	spc,t0,t1
++	ptl_unlock	spc,t0,t1
+ 	rfir
+ 	nop
+ 
+@@ -1194,6 +1209,7 @@ dtlb_check_alias_11:
+ 	idtlba          pte,(va)
+ 	idtlbp          prot,(va)
+ 
++	insert_nops	NUM_PIPELINE_INSNS - 1
+ 	rfir
+ 	nop
+ 
+@@ -1217,7 +1233,7 @@ nadtlb_miss_11:
+ 
+ 	mtsp		t1, %sr1	/* Restore sr1 */
+ 
+-	ptl_unlock1	spc,t0,t1
++	ptl_unlock	spc,t0,t1
+ 	rfir
+ 	nop
+ 
+@@ -1227,6 +1243,7 @@ nadtlb_check_alias_11:
+ 	idtlba          pte,(va)
+ 	idtlbp          prot,(va)
+ 
++	insert_nops	NUM_PIPELINE_INSNS - 1
+ 	rfir
+ 	nop
+ 
+@@ -1246,7 +1263,7 @@ dtlb_miss_20:
+ 
+ 	idtlbt          pte,prot
+ 
+-	ptl_unlock1	spc,t0,t1
++	ptl_unlock	spc,t0,t1
+ 	rfir
+ 	nop
+ 
+@@ -1255,6 +1272,7 @@ dtlb_check_alias_20:
+ 	
+ 	idtlbt          pte,prot
+ 
++	insert_nops	NUM_PIPELINE_INSNS - 1
+ 	rfir
+ 	nop
+ 
+@@ -1274,7 +1292,7 @@ nadtlb_miss_20:
+ 	
+ 	idtlbt		pte,prot
+ 
+-	ptl_unlock1	spc,t0,t1
++	ptl_unlock	spc,t0,t1
+ 	rfir
+ 	nop
+ 
+@@ -1283,6 +1301,7 @@ nadtlb_check_alias_20:
+ 
+ 	idtlbt          pte,prot
+ 
++	insert_nops	NUM_PIPELINE_INSNS - 1
+ 	rfir
+ 	nop
+ 
+@@ -1319,7 +1338,7 @@ itlb_miss_20w:
+ 	
+ 	iitlbt          pte,prot
+ 
+-	ptl_unlock1	spc,t0,t1
++	ptl_unlock	spc,t0,t1
+ 	rfir
+ 	nop
+ 
+@@ -1343,7 +1362,7 @@ naitlb_miss_20w:
+ 
+ 	iitlbt          pte,prot
+ 
+-	ptl_unlock1	spc,t0,t1
++	ptl_unlock	spc,t0,t1
+ 	rfir
+ 	nop
+ 
+@@ -1352,6 +1371,7 @@ naitlb_check_alias_20w:
+ 
+ 	iitlbt		pte,prot
+ 
++	insert_nops	NUM_PIPELINE_INSNS - 1
+ 	rfir
+ 	nop
+ 
+@@ -1377,7 +1397,7 @@ itlb_miss_11:
+ 
+ 	mtsp		t1, %sr1	/* Restore sr1 */
+ 
+-	ptl_unlock1	spc,t0,t1
++	ptl_unlock	spc,t0,t1
+ 	rfir
+ 	nop
+ 
+@@ -1401,7 +1421,7 @@ naitlb_miss_11:
+ 
+ 	mtsp		t1, %sr1	/* Restore sr1 */
+ 
+-	ptl_unlock1	spc,t0,t1
++	ptl_unlock	spc,t0,t1
+ 	rfir
+ 	nop
+ 
+@@ -1411,6 +1431,7 @@ naitlb_check_alias_11:
+ 	iitlba          pte,(%sr0, va)
+ 	iitlbp          prot,(%sr0, va)
+ 
++	insert_nops	NUM_PIPELINE_INSNS - 1
+ 	rfir
+ 	nop
+ 
+@@ -1431,7 +1452,7 @@ itlb_miss_20:
+ 
+ 	iitlbt          pte,prot
+ 
+-	ptl_unlock1	spc,t0,t1
++	ptl_unlock	spc,t0,t1
+ 	rfir
+ 	nop
+ 
+@@ -1451,7 +1472,7 @@ naitlb_miss_20:
+ 
+ 	iitlbt          pte,prot
+ 
+-	ptl_unlock1	spc,t0,t1
++	ptl_unlock	spc,t0,t1
+ 	rfir
+ 	nop
+ 
+@@ -1460,6 +1481,7 @@ naitlb_check_alias_20:
+ 
+ 	iitlbt          pte,prot
+ 
++	insert_nops	NUM_PIPELINE_INSNS - 1
+ 	rfir
+ 	nop
+ 
+@@ -1481,7 +1503,7 @@ dbit_trap_20w:
+ 		
+ 	idtlbt          pte,prot
+ 
+-	ptl_unlock0	spc,t0,t1
++	ptl_unlock	spc,t0,t1
+ 	rfir
+ 	nop
+ #else
+@@ -1507,7 +1529,7 @@ dbit_trap_11:
+ 
+ 	mtsp            t1, %sr1     /* Restore sr1 */
+ 
+-	ptl_unlock0	spc,t0,t1
++	ptl_unlock	spc,t0,t1
+ 	rfir
+ 	nop
+ 
+@@ -1527,7 +1549,7 @@ dbit_trap_20:
+ 	
+ 	idtlbt		pte,prot
+ 
+-	ptl_unlock0	spc,t0,t1
++	ptl_unlock	spc,t0,t1
+ 	rfir
+ 	nop
+ #endif
+diff --git a/arch/parisc/kernel/head.S b/arch/parisc/kernel/head.S
+index fd15fd4bbb61b..5a7d43c0f469c 100644
+--- a/arch/parisc/kernel/head.S
++++ b/arch/parisc/kernel/head.S
+@@ -70,9 +70,8 @@ $bss_loop:
+ 	stw,ma          %arg2,4(%r1)
+ 	stw,ma          %arg3,4(%r1)
+ 
+-#if !defined(CONFIG_64BIT) && defined(CONFIG_PA20)
+-	/* This 32-bit kernel was compiled for PA2.0 CPUs. Check current CPU
+-	 * and halt kernel if we detect a PA1.x CPU. */
++#if defined(CONFIG_PA20)
++	/* check for 64-bit capable CPU as required by current kernel */
+ 	ldi		32,%r10
+ 	mtctl		%r10,%cr11
+ 	.level 2.0
+diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
+index 8c1f7def596e4..10b946e9c6e75 100644
+--- a/arch/powerpc/perf/core-book3s.c
++++ b/arch/powerpc/perf/core-book3s.c
+@@ -1371,8 +1371,7 @@ static void power_pmu_disable(struct pmu *pmu)
+ 		/*
+ 		 * Disable instruction sampling if it was enabled
+ 		 */
+-		if (cpuhw->mmcr.mmcra & MMCRA_SAMPLE_ENABLE)
+-			val &= ~MMCRA_SAMPLE_ENABLE;
++		val &= ~MMCRA_SAMPLE_ENABLE;
+ 
+ 		/* Disable BHRB via mmcra (BHRBRD) for p10 */
+ 		if (ppmu->flags & PPMU_ARCH_31)
+@@ -1383,7 +1382,7 @@ static void power_pmu_disable(struct pmu *pmu)
+ 		 * instruction sampling or BHRB.
+ 		 */
+ 		if (val != mmcra) {
+-			mtspr(SPRN_MMCRA, mmcra);
++			mtspr(SPRN_MMCRA, val);
+ 			mb();
+ 			isync();
+ 		}
+diff --git a/arch/powerpc/platforms/powernv/opal-prd.c b/arch/powerpc/platforms/powernv/opal-prd.c
+index 113bdb151f687..40e26e9f318fd 100644
+--- a/arch/powerpc/platforms/powernv/opal-prd.c
++++ b/arch/powerpc/platforms/powernv/opal-prd.c
+@@ -24,13 +24,20 @@
+ #include <linux/uaccess.h>
+ 
+ 
++struct opal_prd_msg {
++	union {
++		struct opal_prd_msg_header header;
++		DECLARE_FLEX_ARRAY(u8, data);
++	};
++};
++
+ /*
+  * The msg member must be at the end of the struct, as it's followed by the
+  * message data.
+  */
+ struct opal_prd_msg_queue_item {
+-	struct list_head		list;
+-	struct opal_prd_msg_header	msg;
++	struct list_head	list;
++	struct opal_prd_msg	msg;
+ };
+ 
+ static struct device_node *prd_node;
+@@ -156,7 +163,7 @@ static ssize_t opal_prd_read(struct file *file, char __user *buf,
+ 	int rc;
+ 
+ 	/* we need at least a header's worth of data */
+-	if (count < sizeof(item->msg))
++	if (count < sizeof(item->msg.header))
+ 		return -EINVAL;
+ 
+ 	if (*ppos)
+@@ -186,7 +193,7 @@ static ssize_t opal_prd_read(struct file *file, char __user *buf,
+ 			return -EINTR;
+ 	}
+ 
+-	size = be16_to_cpu(item->msg.size);
++	size = be16_to_cpu(item->msg.header.size);
+ 	if (size > count) {
+ 		err = -EINVAL;
+ 		goto err_requeue;
+@@ -352,7 +359,7 @@ static int opal_prd_msg_notifier(struct notifier_block *nb,
+ 	if (!item)
+ 		return -ENOMEM;
+ 
+-	memcpy(&item->msg, msg->params, msg_size);
++	memcpy(&item->msg.data, msg->params, msg_size);
+ 
+ 	spin_lock_irqsave(&opal_prd_msg_queue_lock, flags);
+ 	list_add_tail(&item->list, &opal_prd_msg_queue);
+diff --git a/arch/riscv/include/asm/asm-prototypes.h b/arch/riscv/include/asm/asm-prototypes.h
+index 61ba8ed43d8fe..36b955c762ba0 100644
+--- a/arch/riscv/include/asm/asm-prototypes.h
++++ b/arch/riscv/include/asm/asm-prototypes.h
+@@ -25,7 +25,6 @@ DECLARE_DO_ERROR_INFO(do_trap_ecall_s);
+ DECLARE_DO_ERROR_INFO(do_trap_ecall_m);
+ DECLARE_DO_ERROR_INFO(do_trap_break);
+ 
+-asmlinkage unsigned long get_overflow_stack(void);
+ asmlinkage void handle_bad_stack(struct pt_regs *regs);
+ asmlinkage void do_page_fault(struct pt_regs *regs);
+ asmlinkage void do_irq(struct pt_regs *regs);
+diff --git a/arch/riscv/include/asm/asm.h b/arch/riscv/include/asm/asm.h
+index 114bbadaef41e..bfb4c26f113c4 100644
+--- a/arch/riscv/include/asm/asm.h
++++ b/arch/riscv/include/asm/asm.h
+@@ -82,6 +82,28 @@
+ 	.endr
+ .endm
+ 
++#ifdef CONFIG_SMP
++#ifdef CONFIG_32BIT
++#define PER_CPU_OFFSET_SHIFT 2
++#else
++#define PER_CPU_OFFSET_SHIFT 3
++#endif
++
++.macro asm_per_cpu dst sym tmp
++	REG_L \tmp, TASK_TI_CPU_NUM(tp)
++	slli  \tmp, \tmp, PER_CPU_OFFSET_SHIFT
++	la    \dst, __per_cpu_offset
++	add   \dst, \dst, \tmp
++	REG_L \tmp, 0(\dst)
++	la    \dst, \sym
++	add   \dst, \dst, \tmp
++.endm
++#else /* CONFIG_SMP */
++.macro asm_per_cpu dst sym tmp
++	la    \dst, \sym
++.endm
++#endif /* CONFIG_SMP */
++
+ 	/* save all GPs except x1 ~ x5 */
+ 	.macro save_from_x6_to_x31
+ 	REG_S x6,  PT_T1(sp)
+diff --git a/arch/riscv/include/asm/hwprobe.h b/arch/riscv/include/asm/hwprobe.h
+index 78936f4ff5133..7cad513538d8d 100644
+--- a/arch/riscv/include/asm/hwprobe.h
++++ b/arch/riscv/include/asm/hwprobe.h
+@@ -10,4 +10,9 @@
+ 
+ #define RISCV_HWPROBE_MAX_KEY 5
+ 
++static inline bool riscv_hwprobe_key_is_valid(__s64 key)
++{
++	return key >= 0 && key <= RISCV_HWPROBE_MAX_KEY;
++}
++
+ #endif
+diff --git a/arch/riscv/include/asm/page.h b/arch/riscv/include/asm/page.h
+index b55ba20903ecc..53c00164c0421 100644
+--- a/arch/riscv/include/asm/page.h
++++ b/arch/riscv/include/asm/page.h
+@@ -33,8 +33,8 @@
+ #define PAGE_OFFSET		_AC(CONFIG_PAGE_OFFSET, UL)
+ #endif
+ /*
+- * By default, CONFIG_PAGE_OFFSET value corresponds to SV48 address space so
+- * define the PAGE_OFFSET value for SV39.
++ * By default, CONFIG_PAGE_OFFSET value corresponds to SV57 address space so
++ * define the PAGE_OFFSET value for SV48 and SV39.
+  */
+ #define PAGE_OFFSET_L4		_AC(0xffffaf8000000000, UL)
+ #define PAGE_OFFSET_L3		_AC(0xffffffd800000000, UL)
+diff --git a/arch/riscv/include/asm/thread_info.h b/arch/riscv/include/asm/thread_info.h
+index 1833beb00489c..d18ce0113ca1f 100644
+--- a/arch/riscv/include/asm/thread_info.h
++++ b/arch/riscv/include/asm/thread_info.h
+@@ -34,9 +34,6 @@
+ 
+ #ifndef __ASSEMBLY__
+ 
+-extern long shadow_stack[SHADOW_OVERFLOW_STACK_SIZE / sizeof(long)];
+-extern unsigned long spin_shadow_stack;
+-
+ #include <asm/processor.h>
+ #include <asm/csr.h>
+ 
+diff --git a/arch/riscv/include/asm/vdso/processor.h b/arch/riscv/include/asm/vdso/processor.h
+index 14f5d27783b85..96b65a5396dfc 100644
+--- a/arch/riscv/include/asm/vdso/processor.h
++++ b/arch/riscv/include/asm/vdso/processor.h
+@@ -14,7 +14,7 @@ static inline void cpu_relax(void)
+ 	__asm__ __volatile__ ("div %0, %0, zero" : "=r" (dummy));
+ #endif
+ 
+-#ifdef __riscv_zihintpause
++#ifdef CONFIG_TOOLCHAIN_HAS_ZIHINTPAUSE
+ 	/*
+ 	 * Reduce instruction retirement.
+ 	 * This assumes the PC changes.
+diff --git a/arch/riscv/kernel/asm-offsets.c b/arch/riscv/kernel/asm-offsets.c
+index d6a75aac1d27a..9f535d5de33f9 100644
+--- a/arch/riscv/kernel/asm-offsets.c
++++ b/arch/riscv/kernel/asm-offsets.c
+@@ -39,6 +39,7 @@ void asm_offsets(void)
+ 	OFFSET(TASK_TI_KERNEL_SP, task_struct, thread_info.kernel_sp);
+ 	OFFSET(TASK_TI_USER_SP, task_struct, thread_info.user_sp);
+ 
++	OFFSET(TASK_TI_CPU_NUM, task_struct, thread_info.cpu);
+ 	OFFSET(TASK_THREAD_F0,  task_struct, thread.fstate.f[0]);
+ 	OFFSET(TASK_THREAD_F1,  task_struct, thread.fstate.f[1]);
+ 	OFFSET(TASK_THREAD_F2,  task_struct, thread.fstate.f[2]);
+diff --git a/arch/riscv/kernel/entry.S b/arch/riscv/kernel/entry.S
+index 143a2bb3e6976..278d01d2911fd 100644
+--- a/arch/riscv/kernel/entry.S
++++ b/arch/riscv/kernel/entry.S
+@@ -10,9 +10,13 @@
+ #include <asm/asm.h>
+ #include <asm/csr.h>
+ #include <asm/unistd.h>
++#include <asm/page.h>
+ #include <asm/thread_info.h>
+ #include <asm/asm-offsets.h>
+ #include <asm/errata_list.h>
++#include <linux/sizes.h>
++
++	.section .irqentry.text, "ax"
+ 
+ SYM_CODE_START(handle_exception)
+ 	/*
+@@ -170,67 +174,15 @@ SYM_CODE_END(ret_from_exception)
+ 
+ #ifdef CONFIG_VMAP_STACK
+ SYM_CODE_START_LOCAL(handle_kernel_stack_overflow)
+-	/*
+-	 * Takes the psuedo-spinlock for the shadow stack, in case multiple
+-	 * harts are concurrently overflowing their kernel stacks.  We could
+-	 * store any value here, but since we're overflowing the kernel stack
+-	 * already we only have SP to use as a scratch register.  So we just
+-	 * swap in the address of the spinlock, as that's definately non-zero.
+-	 *
+-	 * Pairs with a store_release in handle_bad_stack().
+-	 */
+-1:	la sp, spin_shadow_stack
+-	REG_AMOSWAP_AQ sp, sp, (sp)
+-	bnez sp, 1b
+-
+-	la sp, shadow_stack
+-	addi sp, sp, SHADOW_OVERFLOW_STACK_SIZE
+-
+-	//save caller register to shadow stack
+-	addi sp, sp, -(PT_SIZE_ON_STACK)
+-	REG_S x1,  PT_RA(sp)
+-	REG_S x5,  PT_T0(sp)
+-	REG_S x6,  PT_T1(sp)
+-	REG_S x7,  PT_T2(sp)
+-	REG_S x10, PT_A0(sp)
+-	REG_S x11, PT_A1(sp)
+-	REG_S x12, PT_A2(sp)
+-	REG_S x13, PT_A3(sp)
+-	REG_S x14, PT_A4(sp)
+-	REG_S x15, PT_A5(sp)
+-	REG_S x16, PT_A6(sp)
+-	REG_S x17, PT_A7(sp)
+-	REG_S x28, PT_T3(sp)
+-	REG_S x29, PT_T4(sp)
+-	REG_S x30, PT_T5(sp)
+-	REG_S x31, PT_T6(sp)
+-
+-	la ra, restore_caller_reg
+-	tail get_overflow_stack
+-
+-restore_caller_reg:
+-	//save per-cpu overflow stack
+-	REG_S a0, -8(sp)
+-	//restore caller register from shadow_stack
+-	REG_L x1,  PT_RA(sp)
+-	REG_L x5,  PT_T0(sp)
+-	REG_L x6,  PT_T1(sp)
+-	REG_L x7,  PT_T2(sp)
+-	REG_L x10, PT_A0(sp)
+-	REG_L x11, PT_A1(sp)
+-	REG_L x12, PT_A2(sp)
+-	REG_L x13, PT_A3(sp)
+-	REG_L x14, PT_A4(sp)
+-	REG_L x15, PT_A5(sp)
+-	REG_L x16, PT_A6(sp)
+-	REG_L x17, PT_A7(sp)
+-	REG_L x28, PT_T3(sp)
+-	REG_L x29, PT_T4(sp)
+-	REG_L x30, PT_T5(sp)
+-	REG_L x31, PT_T6(sp)
++	/* we reach here from kernel context, sscratch must be 0 */
++	csrrw x31, CSR_SCRATCH, x31
++	asm_per_cpu sp, overflow_stack, x31
++	li x31, OVERFLOW_STACK_SIZE
++	add sp, sp, x31
++	/* zero out x31 again and restore x31 */
++	xor x31, x31, x31
++	csrrw x31, CSR_SCRATCH, x31
+ 
+-	//load per-cpu overflow stack
+-	REG_L sp, -8(sp)
+ 	addi sp, sp, -(PT_SIZE_ON_STACK)
+ 
+ 	//save context to overflow stack
+diff --git a/arch/riscv/kernel/probes/simulate-insn.c b/arch/riscv/kernel/probes/simulate-insn.c
+index 7441ac8a68436..8aaebe720e267 100644
+--- a/arch/riscv/kernel/probes/simulate-insn.c
++++ b/arch/riscv/kernel/probes/simulate-insn.c
+@@ -24,7 +24,7 @@ static inline bool rv_insn_reg_set_val(struct pt_regs *regs, u32 index,
+ 				       unsigned long val)
+ {
+ 	if (index == 0)
+-		return false;
++		return true;
+ 	else if (index <= 31)
+ 		*((unsigned long *)regs + index) = val;
+ 	else
+diff --git a/arch/riscv/kernel/probes/uprobes.c b/arch/riscv/kernel/probes/uprobes.c
+index 194f166b2cc40..4b3dc8beaf77d 100644
+--- a/arch/riscv/kernel/probes/uprobes.c
++++ b/arch/riscv/kernel/probes/uprobes.c
+@@ -3,6 +3,7 @@
+ #include <linux/highmem.h>
+ #include <linux/ptrace.h>
+ #include <linux/uprobes.h>
++#include <asm/insn.h>
+ 
+ #include "decode-insn.h"
+ 
+@@ -17,6 +18,11 @@ bool is_swbp_insn(uprobe_opcode_t *insn)
+ #endif
+ }
+ 
++bool is_trap_insn(uprobe_opcode_t *insn)
++{
++	return riscv_insn_is_ebreak(*insn) || riscv_insn_is_c_ebreak(*insn);
++}
++
+ unsigned long uprobe_get_swbp_addr(struct pt_regs *regs)
+ {
+ 	return instruction_pointer(regs);
+diff --git a/arch/riscv/kernel/traps.c b/arch/riscv/kernel/traps.c
+index cd6f10c73a163..061117b8a3438 100644
+--- a/arch/riscv/kernel/traps.c
++++ b/arch/riscv/kernel/traps.c
+@@ -408,48 +408,14 @@ int is_valid_bugaddr(unsigned long pc)
+ #endif /* CONFIG_GENERIC_BUG */
+ 
+ #ifdef CONFIG_VMAP_STACK
+-/*
+- * Extra stack space that allows us to provide panic messages when the kernel
+- * has overflowed its stack.
+- */
+-static DEFINE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)],
++DEFINE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)],
+ 		overflow_stack)__aligned(16);
+-/*
+- * A temporary stack for use by handle_kernel_stack_overflow.  This is used so
+- * we can call into C code to get the per-hart overflow stack.  Usage of this
+- * stack must be protected by spin_shadow_stack.
+- */
+-long shadow_stack[SHADOW_OVERFLOW_STACK_SIZE/sizeof(long)] __aligned(16);
+-
+-/*
+- * A pseudo spinlock to protect the shadow stack from being used by multiple
+- * harts concurrently.  This isn't a real spinlock because the lock side must
+- * be taken without a valid stack and only a single register, it's only taken
+- * while in the process of panicing anyway so the performance and error
+- * checking a proper spinlock gives us doesn't matter.
+- */
+-unsigned long spin_shadow_stack;
+-
+-asmlinkage unsigned long get_overflow_stack(void)
+-{
+-	return (unsigned long)this_cpu_ptr(overflow_stack) +
+-		OVERFLOW_STACK_SIZE;
+-}
+ 
+ asmlinkage void handle_bad_stack(struct pt_regs *regs)
+ {
+ 	unsigned long tsk_stk = (unsigned long)current->stack;
+ 	unsigned long ovf_stk = (unsigned long)this_cpu_ptr(overflow_stack);
+ 
+-	/*
+-	 * We're done with the shadow stack by this point, as we're on the
+-	 * overflow stack.  Tell any other concurrent overflowing harts that
+-	 * they can proceed with panicing by releasing the pseudo-spinlock.
+-	 *
+-	 * This pairs with an amoswap.aq in handle_kernel_stack_overflow.
+-	 */
+-	smp_store_release(&spin_shadow_stack, 0);
+-
+ 	console_verbose();
+ 
+ 	pr_emerg("Insufficient stack space to handle exception!\n");
+diff --git a/arch/riscv/kernel/vdso/hwprobe.c b/arch/riscv/kernel/vdso/hwprobe.c
+index d40bec6ac0786..cadf725ef7983 100644
+--- a/arch/riscv/kernel/vdso/hwprobe.c
++++ b/arch/riscv/kernel/vdso/hwprobe.c
+@@ -37,7 +37,7 @@ int __vdso_riscv_hwprobe(struct riscv_hwprobe *pairs, size_t pair_count,
+ 
+ 	/* This is something we can handle, fill out the pairs. */
+ 	while (p < end) {
+-		if (p->key <= RISCV_HWPROBE_MAX_KEY) {
++		if (riscv_hwprobe_key_is_valid(p->key)) {
+ 			p->value = avd->all_cpu_hwprobe_values[p->key];
+ 
+ 		} else {
+diff --git a/arch/riscv/mm/ptdump.c b/arch/riscv/mm/ptdump.c
+index 20a9f991a6d74..e9090b38f8117 100644
+--- a/arch/riscv/mm/ptdump.c
++++ b/arch/riscv/mm/ptdump.c
+@@ -384,6 +384,9 @@ static int __init ptdump_init(void)
+ 
+ 	kernel_ptd_info.base_addr = KERN_VIRT_START;
+ 
++	pg_level[1].name = pgtable_l5_enabled ? "P4D" : "PGD";
++	pg_level[2].name = pgtable_l4_enabled ? "PUD" : "PGD";
++
+ 	for (i = 0; i < ARRAY_SIZE(pg_level); i++)
+ 		for (j = 0; j < ARRAY_SIZE(pte_bits); j++)
+ 			pg_level[i].mask |= pte_bits[j].mask;
+diff --git a/arch/s390/mm/page-states.c b/arch/s390/mm/page-states.c
+index 1e2ea706aa228..00e7b0876dc50 100644
+--- a/arch/s390/mm/page-states.c
++++ b/arch/s390/mm/page-states.c
+@@ -121,7 +121,7 @@ static void mark_kernel_pud(p4d_t *p4d, unsigned long addr, unsigned long end)
+ 			continue;
+ 		if (!pud_folded(*pud)) {
+ 			page = phys_to_page(pud_val(*pud));
+-			for (i = 0; i < 3; i++)
++			for (i = 0; i < 4; i++)
+ 				set_bit(PG_arch_1, &page[i].flags);
+ 		}
+ 		mark_kernel_pmd(pud, addr, next);
+@@ -142,7 +142,7 @@ static void mark_kernel_p4d(pgd_t *pgd, unsigned long addr, unsigned long end)
+ 			continue;
+ 		if (!p4d_folded(*p4d)) {
+ 			page = phys_to_page(p4d_val(*p4d));
+-			for (i = 0; i < 3; i++)
++			for (i = 0; i < 4; i++)
+ 				set_bit(PG_arch_1, &page[i].flags);
+ 		}
+ 		mark_kernel_pud(p4d, addr, next);
+@@ -164,7 +164,7 @@ static void mark_kernel_pgd(void)
+ 			continue;
+ 		if (!pgd_folded(*pgd)) {
+ 			page = phys_to_page(pgd_val(*pgd));
+-			for (i = 0; i < 3; i++)
++			for (i = 0; i < 4; i++)
+ 				set_bit(PG_arch_1, &page[i].flags);
+ 		}
+ 		mark_kernel_p4d(pgd, addr, next);
+diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
+index 24a66670f5c3a..8e5a9f3e7404c 100644
+--- a/arch/s390/mm/vmem.c
++++ b/arch/s390/mm/vmem.c
+@@ -13,6 +13,7 @@
+ #include <linux/hugetlb.h>
+ #include <linux/slab.h>
+ #include <linux/sort.h>
++#include <asm/page-states.h>
+ #include <asm/cacheflush.h>
+ #include <asm/nospec-branch.h>
+ #include <asm/pgalloc.h>
+@@ -46,8 +47,11 @@ void *vmem_crst_alloc(unsigned long val)
+ 	unsigned long *table;
+ 
+ 	table = vmem_alloc_pages(CRST_ALLOC_ORDER);
+-	if (table)
+-		crst_table_init(table, val);
++	if (!table)
++		return NULL;
++	crst_table_init(table, val);
++	if (slab_is_available())
++		arch_set_page_dat(virt_to_page(table), CRST_ALLOC_ORDER);
+ 	return table;
+ }
+ 
+diff --git a/arch/x86/crypto/sha1_ssse3_glue.c b/arch/x86/crypto/sha1_ssse3_glue.c
+index 44340a1139e0b..959afa705e95c 100644
+--- a/arch/x86/crypto/sha1_ssse3_glue.c
++++ b/arch/x86/crypto/sha1_ssse3_glue.c
+@@ -24,8 +24,17 @@
+ #include <linux/types.h>
+ #include <crypto/sha1.h>
+ #include <crypto/sha1_base.h>
++#include <asm/cpu_device_id.h>
+ #include <asm/simd.h>
+ 
++static const struct x86_cpu_id module_cpu_ids[] = {
++	X86_MATCH_FEATURE(X86_FEATURE_AVX2, NULL),
++	X86_MATCH_FEATURE(X86_FEATURE_AVX, NULL),
++	X86_MATCH_FEATURE(X86_FEATURE_SSSE3, NULL),
++	{}
++};
++MODULE_DEVICE_TABLE(x86cpu, module_cpu_ids);
++
+ static int sha1_update(struct shash_desc *desc, const u8 *data,
+ 			     unsigned int len, sha1_block_fn *sha1_xform)
+ {
+@@ -301,6 +310,9 @@ static inline void unregister_sha1_ni(void) { }
+ 
+ static int __init sha1_ssse3_mod_init(void)
+ {
++	if (!x86_match_cpu(module_cpu_ids))
++		return -ENODEV;
++
+ 	if (register_sha1_ssse3())
+ 		goto fail;
+ 
+diff --git a/arch/x86/crypto/sha256_ssse3_glue.c b/arch/x86/crypto/sha256_ssse3_glue.c
+index 3a5f6be7dbba4..d25235f0ccafc 100644
+--- a/arch/x86/crypto/sha256_ssse3_glue.c
++++ b/arch/x86/crypto/sha256_ssse3_glue.c
+@@ -38,11 +38,20 @@
+ #include <crypto/sha2.h>
+ #include <crypto/sha256_base.h>
+ #include <linux/string.h>
++#include <asm/cpu_device_id.h>
+ #include <asm/simd.h>
+ 
+ asmlinkage void sha256_transform_ssse3(struct sha256_state *state,
+ 				       const u8 *data, int blocks);
+ 
++static const struct x86_cpu_id module_cpu_ids[] = {
++	X86_MATCH_FEATURE(X86_FEATURE_AVX2, NULL),
++	X86_MATCH_FEATURE(X86_FEATURE_AVX, NULL),
++	X86_MATCH_FEATURE(X86_FEATURE_SSSE3, NULL),
++	{}
++};
++MODULE_DEVICE_TABLE(x86cpu, module_cpu_ids);
++
+ static int _sha256_update(struct shash_desc *desc, const u8 *data,
+ 			  unsigned int len, sha256_block_fn *sha256_xform)
+ {
+@@ -366,6 +375,9 @@ static inline void unregister_sha256_ni(void) { }
+ 
+ static int __init sha256_ssse3_mod_init(void)
+ {
++	if (!x86_match_cpu(module_cpu_ids))
++		return -ENODEV;
++
+ 	if (register_sha256_ssse3())
+ 		goto fail;
+ 
+diff --git a/arch/x86/include/asm/kvm-x86-ops.h b/arch/x86/include/asm/kvm-x86-ops.h
+index e3054e3e46d52..9b419f0de713c 100644
+--- a/arch/x86/include/asm/kvm-x86-ops.h
++++ b/arch/x86/include/asm/kvm-x86-ops.h
+@@ -108,6 +108,7 @@ KVM_X86_OP_OPTIONAL(vcpu_blocking)
+ KVM_X86_OP_OPTIONAL(vcpu_unblocking)
+ KVM_X86_OP_OPTIONAL(pi_update_irte)
+ KVM_X86_OP_OPTIONAL(pi_start_assignment)
++KVM_X86_OP_OPTIONAL(apicv_pre_state_restore)
+ KVM_X86_OP_OPTIONAL(apicv_post_state_restore)
+ KVM_X86_OP_OPTIONAL_RET0(dy_apicv_has_pending_interrupt)
+ KVM_X86_OP_OPTIONAL(set_hv_timer)
+diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
+index f72b30d2238a6..9bdbb1cc03d38 100644
+--- a/arch/x86/include/asm/kvm_host.h
++++ b/arch/x86/include/asm/kvm_host.h
+@@ -1690,6 +1690,7 @@ struct kvm_x86_ops {
+ 	int (*pi_update_irte)(struct kvm *kvm, unsigned int host_irq,
+ 			      uint32_t guest_irq, bool set);
+ 	void (*pi_start_assignment)(struct kvm *kvm);
++	void (*apicv_pre_state_restore)(struct kvm_vcpu *vcpu);
+ 	void (*apicv_post_state_restore)(struct kvm_vcpu *vcpu);
+ 	bool (*dy_apicv_has_pending_interrupt)(struct kvm_vcpu *vcpu);
+ 
+diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
+index b37abb55e948b..389f9594746ef 100644
+--- a/arch/x86/include/asm/msr-index.h
++++ b/arch/x86/include/asm/msr-index.h
+@@ -553,6 +553,7 @@
+ #define MSR_AMD64_CPUID_FN_1		0xc0011004
+ #define MSR_AMD64_LS_CFG		0xc0011020
+ #define MSR_AMD64_DC_CFG		0xc0011022
++#define MSR_AMD64_TW_CFG		0xc0011023
+ 
+ #define MSR_AMD64_DE_CFG		0xc0011029
+ #define MSR_AMD64_DE_CFG_LFENCE_SERIALIZE_BIT	 1
+diff --git a/arch/x86/include/asm/numa.h b/arch/x86/include/asm/numa.h
+index e3bae2b60a0db..ef2844d691735 100644
+--- a/arch/x86/include/asm/numa.h
++++ b/arch/x86/include/asm/numa.h
+@@ -12,13 +12,6 @@
+ 
+ #define NR_NODE_MEMBLKS		(MAX_NUMNODES*2)
+ 
+-/*
+- * Too small node sizes may confuse the VM badly. Usually they
+- * result from BIOS bugs. So dont recognize nodes as standalone
+- * NUMA entities that have less than this amount of RAM listed:
+- */
+-#define NODE_MIN_SIZE (4*1024*1024)
+-
+ extern int numa_off;
+ 
+ /*
+diff --git a/arch/x86/kernel/apic/msi.c b/arch/x86/kernel/apic/msi.c
+index 35d5b8fb18efe..81f2a9ebea6f7 100644
+--- a/arch/x86/kernel/apic/msi.c
++++ b/arch/x86/kernel/apic/msi.c
+@@ -55,14 +55,14 @@ msi_set_affinity(struct irq_data *irqd, const struct cpumask *mask, bool force)
+ 	 * caused by the non-atomic update of the address/data pair.
+ 	 *
+ 	 * Direct update is possible when:
+-	 * - The MSI is maskable (remapped MSI does not use this code path)).
+-	 *   The quirk bit is not set in this case.
++	 * - The MSI is maskable (remapped MSI does not use this code path).
++	 *   The reservation mode bit is set in this case.
+ 	 * - The new vector is the same as the old vector
+ 	 * - The old vector is MANAGED_IRQ_SHUTDOWN_VECTOR (interrupt starts up)
+ 	 * - The interrupt is not yet started up
+ 	 * - The new destination CPU is the same as the old destination CPU
+ 	 */
+-	if (!irqd_msi_nomask_quirk(irqd) ||
++	if (!irqd_can_reserve(irqd) ||
+ 	    cfg->vector == old_cfg.vector ||
+ 	    old_cfg.vector == MANAGED_IRQ_SHUTDOWN_VECTOR ||
+ 	    !irqd_is_started(irqd) ||
+@@ -215,8 +215,6 @@ static bool x86_init_dev_msi_info(struct device *dev, struct irq_domain *domain,
+ 		if (WARN_ON_ONCE(domain != real_parent))
+ 			return false;
+ 		info->chip->irq_set_affinity = msi_set_affinity;
+-		/* See msi_set_affinity() for the gory details */
+-		info->flags |= MSI_FLAG_NOMASK_QUIRK;
+ 		break;
+ 	case DOMAIN_BUS_DMAR:
+ 	case DOMAIN_BUS_AMDVI:
+diff --git a/arch/x86/kernel/cpu/hygon.c b/arch/x86/kernel/cpu/hygon.c
+index 5a2962c492d31..046f88066c9b4 100644
+--- a/arch/x86/kernel/cpu/hygon.c
++++ b/arch/x86/kernel/cpu/hygon.c
+@@ -86,8 +86,12 @@ static void hygon_get_topology(struct cpuinfo_x86 *c)
+ 		if (!err)
+ 			c->x86_coreid_bits = get_count_order(c->x86_max_cores);
+ 
+-		/* Socket ID is ApicId[6] for these processors. */
+-		c->phys_proc_id = c->apicid >> APICID_SOCKET_ID_BIT;
++		/*
++		 * Socket ID is ApicId[6] for the processors with model <= 0x3
++		 * when running on host.
++		 */
++		if (!boot_cpu_has(X86_FEATURE_HYPERVISOR) && c->x86_model <= 0x3)
++			c->phys_proc_id = c->apicid >> APICID_SOCKET_ID_BIT;
+ 
+ 		cacheinfo_hygon_init_llc_id(c, cpu);
+ 	} else if (cpu_has(c, X86_FEATURE_NODEID_MSR)) {
+diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c
+index b28fd020066f6..b4990c851ade3 100644
+--- a/arch/x86/kvm/hyperv.c
++++ b/arch/x86/kvm/hyperv.c
+@@ -727,10 +727,12 @@ static int stimer_set_count(struct kvm_vcpu_hv_stimer *stimer, u64 count,
+ 
+ 	stimer_cleanup(stimer);
+ 	stimer->count = count;
+-	if (stimer->count == 0)
+-		stimer->config.enable = 0;
+-	else if (stimer->config.auto_enable)
+-		stimer->config.enable = 1;
++	if (!host) {
++		if (stimer->count == 0)
++			stimer->config.enable = 0;
++		else if (stimer->config.auto_enable)
++			stimer->config.enable = 1;
++	}
+ 
+ 	if (stimer->config.enable)
+ 		stimer_mark_pending(stimer, false);
+diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
+index e74e223f46aa3..90ad9cb6bd808 100644
+--- a/arch/x86/kvm/lapic.c
++++ b/arch/x86/kvm/lapic.c
+@@ -2423,22 +2423,22 @@ EXPORT_SYMBOL_GPL(kvm_lapic_set_eoi);
+ void kvm_apic_write_nodecode(struct kvm_vcpu *vcpu, u32 offset)
+ {
+ 	struct kvm_lapic *apic = vcpu->arch.apic;
+-	u64 val;
+ 
+ 	/*
+-	 * ICR is a single 64-bit register when x2APIC is enabled.  For legacy
+-	 * xAPIC, ICR writes need to go down the common (slightly slower) path
+-	 * to get the upper half from ICR2.
++	 * ICR is a single 64-bit register when x2APIC is enabled, all others
++	 * registers hold 32-bit values.  For legacy xAPIC, ICR writes need to
++	 * go down the common path to get the upper half from ICR2.
++	 *
++	 * Note, using the write helpers may incur an unnecessary write to the
++	 * virtual APIC state, but KVM needs to conditionally modify the value
++	 * in certain cases, e.g. to clear the ICR busy bit.  The cost of extra
++	 * conditional branches is likely a wash relative to the cost of the
++	 * maybe-unecessary write, and both are in the noise anyways.
+ 	 */
+-	if (apic_x2apic_mode(apic) && offset == APIC_ICR) {
+-		val = kvm_lapic_get_reg64(apic, APIC_ICR);
+-		kvm_apic_send_ipi(apic, (u32)val, (u32)(val >> 32));
+-		trace_kvm_apic_write(APIC_ICR, val);
+-	} else {
+-		/* TODO: optimize to just emulate side effect w/o one more write */
+-		val = kvm_lapic_get_reg(apic, offset);
+-		kvm_lapic_reg_write(apic, offset, (u32)val);
+-	}
++	if (apic_x2apic_mode(apic) && offset == APIC_ICR)
++		kvm_x2apic_icr_write(apic, kvm_lapic_get_reg64(apic, APIC_ICR));
++	else
++		kvm_lapic_reg_write(apic, offset, kvm_lapic_get_reg(apic, offset));
+ }
+ EXPORT_SYMBOL_GPL(kvm_apic_write_nodecode);
+ 
+@@ -2649,6 +2649,8 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event)
+ 	u64 msr_val;
+ 	int i;
+ 
++	static_call_cond(kvm_x86_apicv_pre_state_restore)(vcpu);
++
+ 	if (!init_event) {
+ 		msr_val = APIC_DEFAULT_PHYS_BASE | MSR_IA32_APICBASE_ENABLE;
+ 		if (kvm_vcpu_is_reset_bsp(vcpu))
+@@ -2960,6 +2962,8 @@ int kvm_apic_set_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s)
+ 	struct kvm_lapic *apic = vcpu->arch.apic;
+ 	int r;
+ 
++	static_call_cond(kvm_x86_apicv_pre_state_restore)(vcpu);
++
+ 	kvm_lapic_set_base(vcpu, vcpu->arch.apic_base);
+ 	/* set SPIV separately to get count of SW disabled APICs right */
+ 	apic_set_spiv(apic, *((u32 *)(s->regs + APIC_SPIV)));
+diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
+index bc6f0fea48b43..52af279f793db 100644
+--- a/arch/x86/kvm/vmx/vmx.c
++++ b/arch/x86/kvm/vmx/vmx.c
+@@ -6909,7 +6909,7 @@ static void vmx_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
+ 	vmcs_write64(EOI_EXIT_BITMAP3, eoi_exit_bitmap[3]);
+ }
+ 
+-static void vmx_apicv_post_state_restore(struct kvm_vcpu *vcpu)
++static void vmx_apicv_pre_state_restore(struct kvm_vcpu *vcpu)
+ {
+ 	struct vcpu_vmx *vmx = to_vmx(vcpu);
+ 
+@@ -8275,7 +8275,7 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = {
+ 	.set_apic_access_page_addr = vmx_set_apic_access_page_addr,
+ 	.refresh_apicv_exec_ctrl = vmx_refresh_apicv_exec_ctrl,
+ 	.load_eoi_exitmap = vmx_load_eoi_exitmap,
+-	.apicv_post_state_restore = vmx_apicv_post_state_restore,
++	.apicv_pre_state_restore = vmx_apicv_pre_state_restore,
+ 	.required_apicv_inhibits = VMX_REQUIRED_APICV_INHIBITS,
+ 	.hwapic_irr_update = vmx_hwapic_irr_update,
+ 	.hwapic_isr_update = vmx_hwapic_isr_update,
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index 7bcf1a76a6abc..a5c8a01f7e7eb 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -3643,6 +3643,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
+ 	case MSR_AMD64_PATCH_LOADER:
+ 	case MSR_AMD64_BU_CFG2:
+ 	case MSR_AMD64_DC_CFG:
++	case MSR_AMD64_TW_CFG:
+ 	case MSR_F15H_EX_CFG:
+ 		break;
+ 
+@@ -4067,6 +4068,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
+ 	case MSR_AMD64_BU_CFG2:
+ 	case MSR_IA32_PERF_CTL:
+ 	case MSR_AMD64_DC_CFG:
++	case MSR_AMD64_TW_CFG:
+ 	case MSR_F15H_EX_CFG:
+ 	/*
+ 	 * Intel Sandy Bridge CPUs must support the RAPL (running average power
+diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
+index c01c5506fd4ae..aa39d678fe81d 100644
+--- a/arch/x86/mm/numa.c
++++ b/arch/x86/mm/numa.c
+@@ -602,13 +602,6 @@ static int __init numa_register_memblks(struct numa_meminfo *mi)
+ 		if (start >= end)
+ 			continue;
+ 
+-		/*
+-		 * Don't confuse VM with a node that doesn't have the
+-		 * minimum amount of memory:
+-		 */
+-		if (end && (end - start) < NODE_MIN_SIZE)
+-			continue;
+-
+ 		alloc_node_data(nid);
+ 	}
+ 
+diff --git a/arch/x86/pci/fixup.c b/arch/x86/pci/fixup.c
+index e3ec02e6ac9fe..f347c20247d30 100644
+--- a/arch/x86/pci/fixup.c
++++ b/arch/x86/pci/fixup.c
+@@ -3,9 +3,11 @@
+  * Exceptions for specific devices. Usually work-arounds for fatal design flaws.
+  */
+ 
++#include <linux/bitfield.h>
+ #include <linux/delay.h>
+ #include <linux/dmi.h>
+ #include <linux/pci.h>
++#include <linux/suspend.h>
+ #include <linux/vgaarb.h>
+ #include <asm/amd_nb.h>
+ #include <asm/hpet.h>
+@@ -904,3 +906,60 @@ static void chromeos_fixup_apl_pci_l1ss_capability(struct pci_dev *dev)
+ }
+ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x5ad6, chromeos_save_apl_pci_l1ss_capability);
+ DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, 0x5ad6, chromeos_fixup_apl_pci_l1ss_capability);
++
++#ifdef CONFIG_SUSPEND
++/*
++ * Root Ports on some AMD SoCs advertise PME_Support for D3hot and D3cold, but
++ * if the SoC is put into a hardware sleep state by the amd-pmc driver, the
++ * Root Ports don't generate wakeup interrupts for USB devices.
++ *
++ * When suspending, remove D3hot and D3cold from the PME_Support advertised
++ * by the Root Port so we don't use those states if we're expecting wakeup
++ * interrupts.  Restore the advertised PME_Support when resuming.
++ */
++static void amd_rp_pme_suspend(struct pci_dev *dev)
++{
++	struct pci_dev *rp;
++
++	/*
++	 * PM_SUSPEND_ON means we're doing runtime suspend, which means
++	 * amd-pmc will not be involved so PMEs during D3 work as advertised.
++	 *
++	 * The PMEs *do* work if amd-pmc doesn't put the SoC in the hardware
++	 * sleep state, but we assume amd-pmc is always present.
++	 */
++	if (pm_suspend_target_state == PM_SUSPEND_ON)
++		return;
++
++	rp = pcie_find_root_port(dev);
++	if (!rp->pm_cap)
++		return;
++
++	rp->pme_support &= ~((PCI_PM_CAP_PME_D3hot|PCI_PM_CAP_PME_D3cold) >>
++				    PCI_PM_CAP_PME_SHIFT);
++	dev_info_once(&rp->dev, "quirk: disabling D3cold for suspend\n");
++}
++
++static void amd_rp_pme_resume(struct pci_dev *dev)
++{
++	struct pci_dev *rp;
++	u16 pmc;
++
++	rp = pcie_find_root_port(dev);
++	if (!rp->pm_cap)
++		return;
++
++	pci_read_config_word(rp, rp->pm_cap + PCI_PM_PMC, &pmc);
++	rp->pme_support = FIELD_GET(PCI_PM_CAP_PME_MASK, pmc);
++}
++/* Rembrandt (yellow_carp) */
++DECLARE_PCI_FIXUP_SUSPEND(PCI_VENDOR_ID_AMD, 0x162e, amd_rp_pme_suspend);
++DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, 0x162e, amd_rp_pme_resume);
++DECLARE_PCI_FIXUP_SUSPEND(PCI_VENDOR_ID_AMD, 0x162f, amd_rp_pme_suspend);
++DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, 0x162f, amd_rp_pme_resume);
++/* Phoenix (pink_sardine) */
++DECLARE_PCI_FIXUP_SUSPEND(PCI_VENDOR_ID_AMD, 0x1668, amd_rp_pme_suspend);
++DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, 0x1668, amd_rp_pme_resume);
++DECLARE_PCI_FIXUP_SUSPEND(PCI_VENDOR_ID_AMD, 0x1669, amd_rp_pme_suspend);
++DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, 0x1669, amd_rp_pme_resume);
++#endif /* CONFIG_SUSPEND */
+diff --git a/block/blk-mq.c b/block/blk-mq.c
+index c21bc81a790ff..5fb31b9a16403 100644
+--- a/block/blk-mq.c
++++ b/block/blk-mq.c
+@@ -2874,11 +2874,8 @@ static struct request *blk_mq_get_new_requests(struct request_queue *q,
+ 	};
+ 	struct request *rq;
+ 
+-	if (unlikely(bio_queue_enter(bio)))
+-		return NULL;
+-
+ 	if (blk_mq_attempt_bio_merge(q, bio, nsegs))
+-		goto queue_exit;
++		return NULL;
+ 
+ 	rq_qos_throttle(q, bio);
+ 
+@@ -2894,35 +2891,23 @@ static struct request *blk_mq_get_new_requests(struct request_queue *q,
+ 	rq_qos_cleanup(q, bio);
+ 	if (bio->bi_opf & REQ_NOWAIT)
+ 		bio_wouldblock_error(bio);
+-queue_exit:
+-	blk_queue_exit(q);
+ 	return NULL;
+ }
+ 
+-static inline struct request *blk_mq_get_cached_request(struct request_queue *q,
+-		struct blk_plug *plug, struct bio **bio, unsigned int nsegs)
++/* return true if this @rq can be used for @bio */
++static bool blk_mq_can_use_cached_rq(struct request *rq, struct blk_plug *plug,
++		struct bio *bio)
+ {
+-	struct request *rq;
+-	enum hctx_type type, hctx_type;
++	enum hctx_type type = blk_mq_get_hctx_type(bio->bi_opf);
++	enum hctx_type hctx_type = rq->mq_hctx->type;
+ 
+-	if (!plug)
+-		return NULL;
+-	rq = rq_list_peek(&plug->cached_rq);
+-	if (!rq || rq->q != q)
+-		return NULL;
++	WARN_ON_ONCE(rq_list_peek(&plug->cached_rq) != rq);
+ 
+-	if (blk_mq_attempt_bio_merge(q, *bio, nsegs)) {
+-		*bio = NULL;
+-		return NULL;
+-	}
+-
+-	type = blk_mq_get_hctx_type((*bio)->bi_opf);
+-	hctx_type = rq->mq_hctx->type;
+ 	if (type != hctx_type &&
+ 	    !(type == HCTX_TYPE_READ && hctx_type == HCTX_TYPE_DEFAULT))
+-		return NULL;
+-	if (op_is_flush(rq->cmd_flags) != op_is_flush((*bio)->bi_opf))
+-		return NULL;
++		return false;
++	if (op_is_flush(rq->cmd_flags) != op_is_flush(bio->bi_opf))
++		return false;
+ 
+ 	/*
+ 	 * If any qos ->throttle() end up blocking, we will have flushed the
+@@ -2930,12 +2915,12 @@ static inline struct request *blk_mq_get_cached_request(struct request_queue *q,
+ 	 * before we throttle.
+ 	 */
+ 	plug->cached_rq = rq_list_next(rq);
+-	rq_qos_throttle(q, *bio);
++	rq_qos_throttle(rq->q, bio);
+ 
+ 	blk_mq_rq_time_init(rq, 0);
+-	rq->cmd_flags = (*bio)->bi_opf;
++	rq->cmd_flags = bio->bi_opf;
+ 	INIT_LIST_HEAD(&rq->queuelist);
+-	return rq;
++	return true;
+ }
+ 
+ static void bio_set_ioprio(struct bio *bio)
+@@ -2965,7 +2950,7 @@ void blk_mq_submit_bio(struct bio *bio)
+ 	struct blk_plug *plug = blk_mq_plug(bio);
+ 	const int is_sync = op_is_sync(bio->bi_opf);
+ 	struct blk_mq_hw_ctx *hctx;
+-	struct request *rq;
++	struct request *rq = NULL;
+ 	unsigned int nr_segs = 1;
+ 	blk_status_t ret;
+ 
+@@ -2976,20 +2961,36 @@ void blk_mq_submit_bio(struct bio *bio)
+ 			return;
+ 	}
+ 
+-	if (!bio_integrity_prep(bio))
+-		return;
+-
+ 	bio_set_ioprio(bio);
+ 
+-	rq = blk_mq_get_cached_request(q, plug, &bio, nr_segs);
+-	if (!rq) {
+-		if (!bio)
++	if (plug) {
++		rq = rq_list_peek(&plug->cached_rq);
++		if (rq && rq->q != q)
++			rq = NULL;
++	}
++	if (rq) {
++		if (!bio_integrity_prep(bio))
+ 			return;
+-		rq = blk_mq_get_new_requests(q, plug, bio, nr_segs);
+-		if (unlikely(!rq))
++		if (blk_mq_attempt_bio_merge(q, bio, nr_segs))
+ 			return;
++		if (blk_mq_can_use_cached_rq(rq, plug, bio))
++			goto done;
++		percpu_ref_get(&q->q_usage_counter);
++	} else {
++		if (unlikely(bio_queue_enter(bio)))
++			return;
++		if (!bio_integrity_prep(bio))
++			goto fail;
++	}
++
++	rq = blk_mq_get_new_requests(q, plug, bio, nr_segs);
++	if (unlikely(!rq)) {
++fail:
++		blk_queue_exit(q);
++		return;
+ 	}
+ 
++done:
+ 	trace_block_getrq(bio);
+ 
+ 	rq_qos_track(q, rq, bio);
+diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c
+index 8c1d0ca412137..d0d954fe9d54f 100644
+--- a/crypto/pcrypt.c
++++ b/crypto/pcrypt.c
+@@ -117,6 +117,8 @@ static int pcrypt_aead_encrypt(struct aead_request *req)
+ 	err = padata_do_parallel(ictx->psenc, padata, &ctx->cb_cpu);
+ 	if (!err)
+ 		return -EINPROGRESS;
++	if (err == -EBUSY)
++		return -EAGAIN;
+ 
+ 	return err;
+ }
+@@ -164,6 +166,8 @@ static int pcrypt_aead_decrypt(struct aead_request *req)
+ 	err = padata_do_parallel(ictx->psdec, padata, &ctx->cb_cpu);
+ 	if (!err)
+ 		return -EINPROGRESS;
++	if (err == -EBUSY)
++		return -EAGAIN;
+ 
+ 	return err;
+ }
+diff --git a/drivers/acpi/acpi_fpdt.c b/drivers/acpi/acpi_fpdt.c
+index a2056c4c8cb70..271092f2700a1 100644
+--- a/drivers/acpi/acpi_fpdt.c
++++ b/drivers/acpi/acpi_fpdt.c
+@@ -194,12 +194,19 @@ static int fpdt_process_subtable(u64 address, u32 subtable_type)
+ 		record_header = (void *)subtable_header + offset;
+ 		offset += record_header->length;
+ 
++		if (!record_header->length) {
++			pr_err(FW_BUG "Zero-length record found in FPTD.\n");
++			result = -EINVAL;
++			goto err;
++		}
++
+ 		switch (record_header->type) {
+ 		case RECORD_S3_RESUME:
+ 			if (subtable_type != SUBTABLE_S3PT) {
+ 				pr_err(FW_BUG "Invalid record %d for subtable %s\n",
+ 				     record_header->type, signature);
+-				return -EINVAL;
++				result = -EINVAL;
++				goto err;
+ 			}
+ 			if (record_resume) {
+ 				pr_err("Duplicate resume performance record found.\n");
+@@ -208,7 +215,7 @@ static int fpdt_process_subtable(u64 address, u32 subtable_type)
+ 			record_resume = (struct resume_performance_record *)record_header;
+ 			result = sysfs_create_group(fpdt_kobj, &resume_attr_group);
+ 			if (result)
+-				return result;
++				goto err;
+ 			break;
+ 		case RECORD_S3_SUSPEND:
+ 			if (subtable_type != SUBTABLE_S3PT) {
+@@ -223,13 +230,14 @@ static int fpdt_process_subtable(u64 address, u32 subtable_type)
+ 			record_suspend = (struct suspend_performance_record *)record_header;
+ 			result = sysfs_create_group(fpdt_kobj, &suspend_attr_group);
+ 			if (result)
+-				return result;
++				goto err;
+ 			break;
+ 		case RECORD_BOOT:
+ 			if (subtable_type != SUBTABLE_FBPT) {
+ 				pr_err(FW_BUG "Invalid %d for subtable %s\n",
+ 				     record_header->type, signature);
+-				return -EINVAL;
++				result = -EINVAL;
++				goto err;
+ 			}
+ 			if (record_boot) {
+ 				pr_err("Duplicate boot performance record found.\n");
+@@ -238,7 +246,7 @@ static int fpdt_process_subtable(u64 address, u32 subtable_type)
+ 			record_boot = (struct boot_performance_record *)record_header;
+ 			result = sysfs_create_group(fpdt_kobj, &boot_attr_group);
+ 			if (result)
+-				return result;
++				goto err;
+ 			break;
+ 
+ 		default:
+@@ -247,6 +255,18 @@ static int fpdt_process_subtable(u64 address, u32 subtable_type)
+ 		}
+ 	}
+ 	return 0;
++
++err:
++	if (record_boot)
++		sysfs_remove_group(fpdt_kobj, &boot_attr_group);
++
++	if (record_suspend)
++		sysfs_remove_group(fpdt_kobj, &suspend_attr_group);
++
++	if (record_resume)
++		sysfs_remove_group(fpdt_kobj, &resume_attr_group);
++
++	return result;
+ }
+ 
+ static int __init acpi_init_fpdt(void)
+@@ -255,6 +275,7 @@ static int __init acpi_init_fpdt(void)
+ 	struct acpi_table_header *header;
+ 	struct fpdt_subtable_entry *subtable;
+ 	u32 offset = sizeof(*header);
++	int result;
+ 
+ 	status = acpi_get_table(ACPI_SIG_FPDT, 0, &header);
+ 
+@@ -263,8 +284,8 @@ static int __init acpi_init_fpdt(void)
+ 
+ 	fpdt_kobj = kobject_create_and_add("fpdt", acpi_kobj);
+ 	if (!fpdt_kobj) {
+-		acpi_put_table(header);
+-		return -ENOMEM;
++		result = -ENOMEM;
++		goto err_nomem;
+ 	}
+ 
+ 	while (offset < header->length) {
+@@ -272,8 +293,10 @@ static int __init acpi_init_fpdt(void)
+ 		switch (subtable->type) {
+ 		case SUBTABLE_FBPT:
+ 		case SUBTABLE_S3PT:
+-			fpdt_process_subtable(subtable->address,
++			result = fpdt_process_subtable(subtable->address,
+ 					      subtable->type);
++			if (result)
++				goto err_subtable;
+ 			break;
+ 		default:
+ 			/* Other types are reserved in ACPI 6.4 spec. */
+@@ -282,6 +305,12 @@ static int __init acpi_init_fpdt(void)
+ 		offset += sizeof(*subtable);
+ 	}
+ 	return 0;
++err_subtable:
++	kobject_put(fpdt_kobj);
++
++err_nomem:
++	acpi_put_table(header);
++	return result;
+ }
+ 
+ fs_initcall(acpi_init_fpdt);
+diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
+index ef59d6ea16da0..63ad0541db381 100644
+--- a/drivers/acpi/apei/ghes.c
++++ b/drivers/acpi/apei/ghes.c
+@@ -209,6 +209,20 @@ err_pool_alloc:
+ 	return -ENOMEM;
+ }
+ 
++/**
++ * ghes_estatus_pool_region_free - free previously allocated memory
++ *				   from the ghes_estatus_pool.
++ * @addr: address of memory to free.
++ * @size: size of memory to free.
++ *
++ * Returns none.
++ */
++void ghes_estatus_pool_region_free(unsigned long addr, u32 size)
++{
++	gen_pool_free(ghes_estatus_pool, addr, size);
++}
++EXPORT_SYMBOL_GPL(ghes_estatus_pool_region_free);
++
+ static int map_gen_v2(struct ghes *ghes)
+ {
+ 	return apei_map_generic_address(&ghes->generic_v2->read_ack_register);
+@@ -564,6 +578,7 @@ static void ghes_handle_aer(struct acpi_hest_generic_data *gdata)
+ 	    pcie_err->validation_bits & CPER_PCIE_VALID_AER_INFO) {
+ 		unsigned int devfn;
+ 		int aer_severity;
++		u8 *aer_info;
+ 
+ 		devfn = PCI_DEVFN(pcie_err->device_id.device,
+ 				  pcie_err->device_id.function);
+@@ -577,11 +592,17 @@ static void ghes_handle_aer(struct acpi_hest_generic_data *gdata)
+ 		if (gdata->flags & CPER_SEC_RESET)
+ 			aer_severity = AER_FATAL;
+ 
++		aer_info = (void *)gen_pool_alloc(ghes_estatus_pool,
++						  sizeof(struct aer_capability_regs));
++		if (!aer_info)
++			return;
++		memcpy(aer_info, pcie_err->aer_info, sizeof(struct aer_capability_regs));
++
+ 		aer_recover_queue(pcie_err->device_id.segment,
+ 				  pcie_err->device_id.bus,
+ 				  devfn, aer_severity,
+ 				  (struct aer_capability_regs *)
+-				  pcie_err->aer_info);
++				  aer_info);
+ 	}
+ #endif
+ }
+diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
+index c95d0edb0be9e..a59c11df73754 100644
+--- a/drivers/acpi/ec.c
++++ b/drivers/acpi/ec.c
+@@ -1924,6 +1924,16 @@ static const struct dmi_system_id ec_dmi_table[] __initconst = {
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion Gaming Laptop 15-dk1xxx"),
+ 		},
+ 	},
++	{
++		/*
++		 * HP 250 G7 Notebook PC
++		 */
++		.callback = ec_honor_dsdt_gpe,
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "HP"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "HP 250 G7 Notebook PC"),
++		},
++	},
+ 	{
+ 		/*
+ 		 * Samsung hardware
+diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
+index 297a88587031e..80fbd385e8b4f 100644
+--- a/drivers/acpi/resource.c
++++ b/drivers/acpi/resource.c
+@@ -495,6 +495,18 @@ static const struct dmi_system_id maingear_laptop[] = {
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "MG-VCP2-15A3070T"),
+ 		}
+ 	},
++	{
++		/* TongFang GMxXGxx/TUXEDO Polaris 15 Gen5 AMD */
++		.matches = {
++			DMI_MATCH(DMI_BOARD_NAME, "GMxXGxx"),
++		},
++	},
++	{
++		/* TongFang GM6XGxX/TUXEDO Stellaris 16 Gen5 AMD */
++		.matches = {
++			DMI_MATCH(DMI_BOARD_NAME, "GM6XGxX"),
++		},
++	},
+ 	{
+ 		.ident = "MAINGEAR Vector Pro 2 17",
+ 		.matches = {
+diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
+index 3241486869530..9bba8f280a4d4 100644
+--- a/drivers/atm/iphase.c
++++ b/drivers/atm/iphase.c
+@@ -2291,19 +2291,21 @@ static int get_esi(struct atm_dev *dev)
+ static int reset_sar(struct atm_dev *dev)  
+ {  
+ 	IADEV *iadev;  
+-	int i, error = 1;  
++	int i, error;
+ 	unsigned int pci[64];  
+ 	  
+ 	iadev = INPH_IA_DEV(dev);  
+-	for(i=0; i<64; i++)  
+-	  if ((error = pci_read_config_dword(iadev->pci,  
+-				i*4, &pci[i])) != PCIBIOS_SUCCESSFUL)  
+-  	      return error;  
++	for (i = 0; i < 64; i++) {
++		error = pci_read_config_dword(iadev->pci, i * 4, &pci[i]);
++		if (error != PCIBIOS_SUCCESSFUL)
++			return error;
++	}
+ 	writel(0, iadev->reg+IPHASE5575_EXT_RESET);  
+-	for(i=0; i<64; i++)  
+-	  if ((error = pci_write_config_dword(iadev->pci,  
+-					i*4, pci[i])) != PCIBIOS_SUCCESSFUL)  
+-	    return error;  
++	for (i = 0; i < 64; i++) {
++		error = pci_write_config_dword(iadev->pci, i * 4, pci[i]);
++		if (error != PCIBIOS_SUCCESSFUL)
++			return error;
++	}
+ 	udelay(5);  
+ 	return 0;  
+ }  
+diff --git a/drivers/base/dd.c b/drivers/base/dd.c
+index a528cec24264a..0c3725c3eefa4 100644
+--- a/drivers/base/dd.c
++++ b/drivers/base/dd.c
+@@ -1274,8 +1274,8 @@ static void __device_release_driver(struct device *dev, struct device *parent)
+ 		if (dev->bus && dev->bus->dma_cleanup)
+ 			dev->bus->dma_cleanup(dev);
+ 
+-		device_links_driver_cleanup(dev);
+ 		device_unbind_cleanup(dev);
++		device_links_driver_cleanup(dev);
+ 
+ 		klist_remove(&dev->p->knode_driver);
+ 		device_pm_check_callbacks(dev);
+diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c
+index 7d3e47436056e..d525f99830664 100644
+--- a/drivers/base/regmap/regcache.c
++++ b/drivers/base/regmap/regcache.c
+@@ -334,6 +334,11 @@ static int regcache_default_sync(struct regmap *map, unsigned int min,
+ 	return 0;
+ }
+ 
++static int rbtree_all(const void *key, const struct rb_node *node)
++{
++	return 0;
++}
++
+ /**
+  * regcache_sync - Sync the register cache with the hardware.
+  *
+@@ -351,6 +356,7 @@ int regcache_sync(struct regmap *map)
+ 	unsigned int i;
+ 	const char *name;
+ 	bool bypass;
++	struct rb_node *node;
+ 
+ 	if (WARN_ON(map->cache_type == REGCACHE_NONE))
+ 		return -EINVAL;
+@@ -392,6 +398,30 @@ out:
+ 	/* Restore the bypass state */
+ 	map->cache_bypass = bypass;
+ 	map->no_sync_defaults = false;
++
++	/*
++	 * If we did any paging with cache bypassed and a cached
++	 * paging register then the register and cache state might
++	 * have gone out of sync, force writes of all the paging
++	 * registers.
++	 */
++	rb_for_each(node, 0, &map->range_tree, rbtree_all) {
++		struct regmap_range_node *this =
++			rb_entry(node, struct regmap_range_node, node);
++
++		/* If there's nothing in the cache there's nothing to sync */
++		ret = regcache_read(map, this->selector_reg, &i);
++		if (ret != 0)
++			continue;
++
++		ret = _regmap_write(map, this->selector_reg, i);
++		if (ret != 0) {
++			dev_err(map->dev, "Failed to write %x = %x: %d\n",
++				this->selector_reg, i, ret);
++			break;
++		}
++	}
++
+ 	map->unlock(map->lock_arg);
+ 
+ 	regmap_async_complete(map);
+diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
+index 1fe011676d070..4a4b9bad551e8 100644
+--- a/drivers/block/virtio_blk.c
++++ b/drivers/block/virtio_blk.c
+@@ -1313,6 +1313,7 @@ static int virtblk_probe(struct virtio_device *vdev)
+ 	u16 min_io_size;
+ 	u8 physical_block_exp, alignment_offset;
+ 	unsigned int queue_depth;
++	size_t max_dma_size;
+ 
+ 	if (!vdev->config->get) {
+ 		dev_err(&vdev->dev, "%s failure: config access disabled\n",
+@@ -1411,7 +1412,8 @@ static int virtblk_probe(struct virtio_device *vdev)
+ 	/* No real sector limit. */
+ 	blk_queue_max_hw_sectors(q, UINT_MAX);
+ 
+-	max_size = virtio_max_dma_size(vdev);
++	max_dma_size = virtio_max_dma_size(vdev);
++	max_size = max_dma_size > U32_MAX ? U32_MAX : max_dma_size;
+ 
+ 	/* Host can optionally specify maximum segment size and number of
+ 	 * segments. */
+diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
+index ca9e2a210fff2..ea29469fe0cff 100644
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -2803,6 +2803,9 @@ static int btusb_mtk_hci_wmt_sync(struct hci_dev *hdev,
+ 		goto err_free_wc;
+ 	}
+ 
++	if (data->evt_skb == NULL)
++		goto err_free_wc;
++
+ 	/* Parse and handle the return WMT event */
+ 	wmt_evt = (struct btmtk_hci_wmt_evt *)data->evt_skb->data;
+ 	if (wmt_evt->whdr.op != hdr->op) {
+diff --git a/drivers/char/agp/parisc-agp.c b/drivers/char/agp/parisc-agp.c
+index c6f181702b9a7..edbc4d3381177 100644
+--- a/drivers/char/agp/parisc-agp.c
++++ b/drivers/char/agp/parisc-agp.c
+@@ -38,7 +38,7 @@ static struct _parisc_agp_info {
+ 
+ 	int lba_cap_offset;
+ 
+-	u64 *gatt;
++	__le64 *gatt;
+ 	u64 gatt_entries;
+ 
+ 	u64 gart_base;
+@@ -104,7 +104,7 @@ parisc_agp_create_gatt_table(struct agp_bridge_data *bridge)
+ 	int i;
+ 
+ 	for (i = 0; i < info->gatt_entries; i++) {
+-		info->gatt[i] = (unsigned long)agp_bridge->scratch_page;
++		info->gatt[i] = cpu_to_le64(agp_bridge->scratch_page);
+ 	}
+ 
+ 	return 0;
+@@ -158,9 +158,9 @@ parisc_agp_insert_memory(struct agp_memory *mem, off_t pg_start, int type)
+ 		for (k = 0;
+ 		     k < info->io_pages_per_kpage;
+ 		     k++, j++, paddr += info->io_page_size) {
+-			info->gatt[j] =
++			info->gatt[j] = cpu_to_le64(
+ 				parisc_agp_mask_memory(agp_bridge,
+-					paddr, type);
++					paddr, type));
+ 			asm_io_fdc(&info->gatt[j]);
+ 		}
+ 	}
+@@ -184,7 +184,7 @@ parisc_agp_remove_memory(struct agp_memory *mem, off_t pg_start, int type)
+ 	io_pg_start = info->io_pages_per_kpage * pg_start;
+ 	io_pg_count = info->io_pages_per_kpage * mem->page_count;
+ 	for (i = io_pg_start; i < io_pg_count + io_pg_start; i++) {
+-		info->gatt[i] = agp_bridge->scratch_page;
++		info->gatt[i] = cpu_to_le64(agp_bridge->scratch_page);
+ 	}
+ 
+ 	agp_bridge->driver->tlb_flush(mem);
+@@ -204,7 +204,8 @@ parisc_agp_mask_memory(struct agp_bridge_data *bridge, dma_addr_t addr,
+ 	pa |= (ci >> PAGE_SHIFT) & 0xff;/* move CI (8 bits) into lowest byte */
+ 	pa |= SBA_PDIR_VALID_BIT;	/* set "valid" bit */
+ 
+-	return cpu_to_le64(pa);
++	/* return native (big-endian) PDIR entry */
++	return pa;
+ }
+ 
+ static void
+@@ -251,7 +252,8 @@ static int __init
+ agp_ioc_init(void __iomem *ioc_regs)
+ {
+ 	struct _parisc_agp_info *info = &parisc_agp_info;
+-        u64 iova_base, *io_pdir, io_tlb_ps;
++        u64 iova_base, io_tlb_ps;
++	__le64 *io_pdir;
+         int io_tlb_shift;
+ 
+         printk(KERN_INFO DRVPFX "IO PDIR shared with sba_iommu\n");
+diff --git a/drivers/clk/qcom/gcc-ipq6018.c b/drivers/clk/qcom/gcc-ipq6018.c
+index 86b43175b0422..d05c589d52bf2 100644
+--- a/drivers/clk/qcom/gcc-ipq6018.c
++++ b/drivers/clk/qcom/gcc-ipq6018.c
+@@ -73,7 +73,6 @@ static struct clk_fixed_factor gpll0_out_main_div2 = {
+ 				&gpll0_main.clkr.hw },
+ 		.num_parents = 1,
+ 		.ops = &clk_fixed_factor_ops,
+-		.flags = CLK_SET_RATE_PARENT,
+ 	},
+ };
+ 
+@@ -87,7 +86,6 @@ static struct clk_alpha_pll_postdiv gpll0 = {
+ 				&gpll0_main.clkr.hw },
+ 		.num_parents = 1,
+ 		.ops = &clk_alpha_pll_postdiv_ro_ops,
+-		.flags = CLK_SET_RATE_PARENT,
+ 	},
+ };
+ 
+@@ -162,7 +160,6 @@ static struct clk_alpha_pll_postdiv gpll6 = {
+ 				&gpll6_main.clkr.hw },
+ 		.num_parents = 1,
+ 		.ops = &clk_alpha_pll_postdiv_ro_ops,
+-		.flags = CLK_SET_RATE_PARENT,
+ 	},
+ };
+ 
+@@ -193,7 +190,6 @@ static struct clk_alpha_pll_postdiv gpll4 = {
+ 				&gpll4_main.clkr.hw },
+ 		.num_parents = 1,
+ 		.ops = &clk_alpha_pll_postdiv_ro_ops,
+-		.flags = CLK_SET_RATE_PARENT,
+ 	},
+ };
+ 
+@@ -244,7 +240,6 @@ static struct clk_alpha_pll_postdiv gpll2 = {
+ 				&gpll2_main.clkr.hw },
+ 		.num_parents = 1,
+ 		.ops = &clk_alpha_pll_postdiv_ro_ops,
+-		.flags = CLK_SET_RATE_PARENT,
+ 	},
+ };
+ 
+@@ -275,7 +270,6 @@ static struct clk_alpha_pll_postdiv nss_crypto_pll = {
+ 				&nss_crypto_pll_main.clkr.hw },
+ 		.num_parents = 1,
+ 		.ops = &clk_alpha_pll_postdiv_ro_ops,
+-		.flags = CLK_SET_RATE_PARENT,
+ 	},
+ };
+ 
+diff --git a/drivers/clk/qcom/gcc-ipq8074.c b/drivers/clk/qcom/gcc-ipq8074.c
+index 6541d98c03483..6ed4eba00731a 100644
+--- a/drivers/clk/qcom/gcc-ipq8074.c
++++ b/drivers/clk/qcom/gcc-ipq8074.c
+@@ -76,7 +76,6 @@ static struct clk_fixed_factor gpll0_out_main_div2 = {
+ 				&gpll0_main.clkr.hw },
+ 		.num_parents = 1,
+ 		.ops = &clk_fixed_factor_ops,
+-		.flags = CLK_SET_RATE_PARENT,
+ 	},
+ };
+ 
+@@ -122,7 +121,6 @@ static struct clk_alpha_pll_postdiv gpll2 = {
+ 				&gpll2_main.clkr.hw },
+ 		.num_parents = 1,
+ 		.ops = &clk_alpha_pll_postdiv_ro_ops,
+-		.flags = CLK_SET_RATE_PARENT,
+ 	},
+ };
+ 
+@@ -155,7 +153,6 @@ static struct clk_alpha_pll_postdiv gpll4 = {
+ 				&gpll4_main.clkr.hw },
+ 		.num_parents = 1,
+ 		.ops = &clk_alpha_pll_postdiv_ro_ops,
+-		.flags = CLK_SET_RATE_PARENT,
+ 	},
+ };
+ 
+@@ -189,7 +186,6 @@ static struct clk_alpha_pll_postdiv gpll6 = {
+ 				&gpll6_main.clkr.hw },
+ 		.num_parents = 1,
+ 		.ops = &clk_alpha_pll_postdiv_ro_ops,
+-		.flags = CLK_SET_RATE_PARENT,
+ 	},
+ };
+ 
+@@ -202,7 +198,6 @@ static struct clk_fixed_factor gpll6_out_main_div2 = {
+ 				&gpll6_main.clkr.hw },
+ 		.num_parents = 1,
+ 		.ops = &clk_fixed_factor_ops,
+-		.flags = CLK_SET_RATE_PARENT,
+ 	},
+ };
+ 
+@@ -267,7 +262,6 @@ static struct clk_alpha_pll_postdiv nss_crypto_pll = {
+ 				&nss_crypto_pll_main.clkr.hw },
+ 		.num_parents = 1,
+ 		.ops = &clk_alpha_pll_postdiv_ro_ops,
+-		.flags = CLK_SET_RATE_PARENT,
+ 	},
+ };
+ 
+diff --git a/drivers/clk/socfpga/stratix10-clk.h b/drivers/clk/socfpga/stratix10-clk.h
+index 75234e0783e1c..83fe4eb3133cb 100644
+--- a/drivers/clk/socfpga/stratix10-clk.h
++++ b/drivers/clk/socfpga/stratix10-clk.h
+@@ -7,8 +7,10 @@
+ #define	__STRATIX10_CLK_H
+ 
+ struct stratix10_clock_data {
+-	struct clk_hw_onecell_data	clk_data;
+ 	void __iomem		*base;
++
++	/* Must be last */
++	struct clk_hw_onecell_data	clk_data;
+ };
+ 
+ struct stratix10_pll_clock {
+diff --git a/drivers/clk/visconti/pll.h b/drivers/clk/visconti/pll.h
+index 01d07f1bf01b1..c4bd40676da4b 100644
+--- a/drivers/clk/visconti/pll.h
++++ b/drivers/clk/visconti/pll.h
+@@ -15,8 +15,10 @@
+ 
+ struct visconti_pll_provider {
+ 	void __iomem *reg_base;
+-	struct clk_hw_onecell_data clk_data;
+ 	struct device_node *node;
++
++	/* Must be last */
++	struct clk_hw_onecell_data clk_data;
+ };
+ 
+ #define VISCONTI_PLL_RATE(_rate, _dacen, _dsmen, \
+diff --git a/drivers/clocksource/timer-atmel-tcb.c b/drivers/clocksource/timer-atmel-tcb.c
+index 27af17c995900..2a90c92a9182a 100644
+--- a/drivers/clocksource/timer-atmel-tcb.c
++++ b/drivers/clocksource/timer-atmel-tcb.c
+@@ -315,6 +315,7 @@ static void __init tcb_setup_dual_chan(struct atmel_tc *tc, int mck_divisor_idx)
+ 	writel(mck_divisor_idx			/* likely divide-by-8 */
+ 			| ATMEL_TC_WAVE
+ 			| ATMEL_TC_WAVESEL_UP		/* free-run */
++			| ATMEL_TC_ASWTRG_SET		/* TIOA0 rises at software trigger */
+ 			| ATMEL_TC_ACPA_SET		/* TIOA0 rises at 0 */
+ 			| ATMEL_TC_ACPC_CLEAR,		/* (duty cycle 50%) */
+ 			tcaddr + ATMEL_TC_REG(0, CMR));
+diff --git a/drivers/clocksource/timer-imx-gpt.c b/drivers/clocksource/timer-imx-gpt.c
+index 28ab4f1a7c713..6a878d227a13b 100644
+--- a/drivers/clocksource/timer-imx-gpt.c
++++ b/drivers/clocksource/timer-imx-gpt.c
+@@ -434,12 +434,16 @@ static int __init mxc_timer_init_dt(struct device_node *np,  enum imx_gpt_type t
+ 		return -ENOMEM;
+ 
+ 	imxtm->base = of_iomap(np, 0);
+-	if (!imxtm->base)
+-		return -ENXIO;
++	if (!imxtm->base) {
++		ret = -ENXIO;
++		goto err_kfree;
++	}
+ 
+ 	imxtm->irq = irq_of_parse_and_map(np, 0);
+-	if (imxtm->irq <= 0)
+-		return -EINVAL;
++	if (imxtm->irq <= 0) {
++		ret = -EINVAL;
++		goto err_kfree;
++	}
+ 
+ 	imxtm->clk_ipg = of_clk_get_by_name(np, "ipg");
+ 
+@@ -452,11 +456,15 @@ static int __init mxc_timer_init_dt(struct device_node *np,  enum imx_gpt_type t
+ 
+ 	ret = _mxc_timer_init(imxtm);
+ 	if (ret)
+-		return ret;
++		goto err_kfree;
+ 
+ 	initialized = 1;
+ 
+ 	return 0;
++
++err_kfree:
++	kfree(imxtm);
++	return ret;
+ }
+ 
+ static int __init imx1_timer_init_dt(struct device_node *np)
+diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
+index 55c7ffd37d1cc..4a7cce339fae8 100644
+--- a/drivers/cpufreq/cpufreq_stats.c
++++ b/drivers/cpufreq/cpufreq_stats.c
+@@ -131,23 +131,23 @@ static ssize_t show_trans_table(struct cpufreq_policy *policy, char *buf)
+ 	len += sysfs_emit_at(buf, len, "   From  :    To\n");
+ 	len += sysfs_emit_at(buf, len, "         : ");
+ 	for (i = 0; i < stats->state_num; i++) {
+-		if (len >= PAGE_SIZE)
++		if (len >= PAGE_SIZE - 1)
+ 			break;
+ 		len += sysfs_emit_at(buf, len, "%9u ", stats->freq_table[i]);
+ 	}
+-	if (len >= PAGE_SIZE)
+-		return PAGE_SIZE;
++	if (len >= PAGE_SIZE - 1)
++		return PAGE_SIZE - 1;
+ 
+ 	len += sysfs_emit_at(buf, len, "\n");
+ 
+ 	for (i = 0; i < stats->state_num; i++) {
+-		if (len >= PAGE_SIZE)
++		if (len >= PAGE_SIZE - 1)
+ 			break;
+ 
+ 		len += sysfs_emit_at(buf, len, "%9u: ", stats->freq_table[i]);
+ 
+ 		for (j = 0; j < stats->state_num; j++) {
+-			if (len >= PAGE_SIZE)
++			if (len >= PAGE_SIZE - 1)
+ 				break;
+ 
+ 			if (pending)
+@@ -157,12 +157,12 @@ static ssize_t show_trans_table(struct cpufreq_policy *policy, char *buf)
+ 
+ 			len += sysfs_emit_at(buf, len, "%9u ", count);
+ 		}
+-		if (len >= PAGE_SIZE)
++		if (len >= PAGE_SIZE - 1)
+ 			break;
+ 		len += sysfs_emit_at(buf, len, "\n");
+ 	}
+ 
+-	if (len >= PAGE_SIZE) {
++	if (len >= PAGE_SIZE - 1) {
+ 		pr_warn_once("cpufreq transition table exceeds PAGE_SIZE. Disabling\n");
+ 		return -EFBIG;
+ 	}
+diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c
+index ba4852744c052..2aec118ba6775 100644
+--- a/drivers/crypto/hisilicon/qm.c
++++ b/drivers/crypto/hisilicon/qm.c
+@@ -845,6 +845,8 @@ static void qm_poll_req_cb(struct hisi_qp *qp)
+ 		qm_db(qm, qp->qp_id, QM_DOORBELL_CMD_CQ,
+ 		      qp->qp_status.cq_head, 0);
+ 		atomic_dec(&qp->qp_status.used);
++
++		cond_resched();
+ 	}
+ 
+ 	/* set c_flag */
+diff --git a/drivers/cxl/core/port.c b/drivers/cxl/core/port.c
+index 2c6001592fe20..6a75a3cb601ec 100644
+--- a/drivers/cxl/core/port.c
++++ b/drivers/cxl/core/port.c
+@@ -1242,35 +1242,39 @@ static struct device *grandparent(struct device *dev)
+ 	return NULL;
+ }
+ 
++static struct device *endpoint_host(struct cxl_port *endpoint)
++{
++	struct cxl_port *port = to_cxl_port(endpoint->dev.parent);
++
++	if (is_cxl_root(port))
++		return port->uport_dev;
++	return &port->dev;
++}
++
+ static void delete_endpoint(void *data)
+ {
+ 	struct cxl_memdev *cxlmd = data;
+ 	struct cxl_port *endpoint = cxlmd->endpoint;
+-	struct cxl_port *parent_port;
+-	struct device *parent;
+-
+-	parent_port = cxl_mem_find_port(cxlmd, NULL);
+-	if (!parent_port)
+-		goto out;
+-	parent = &parent_port->dev;
++	struct device *host = endpoint_host(endpoint);
+ 
+-	device_lock(parent);
+-	if (parent->driver && !endpoint->dead) {
+-		devm_release_action(parent, cxl_unlink_parent_dport, endpoint);
+-		devm_release_action(parent, cxl_unlink_uport, endpoint);
+-		devm_release_action(parent, unregister_port, endpoint);
++	device_lock(host);
++	if (host->driver && !endpoint->dead) {
++		devm_release_action(host, cxl_unlink_parent_dport, endpoint);
++		devm_release_action(host, cxl_unlink_uport, endpoint);
++		devm_release_action(host, unregister_port, endpoint);
+ 	}
+ 	cxlmd->endpoint = NULL;
+-	device_unlock(parent);
+-	put_device(parent);
+-out:
++	device_unlock(host);
+ 	put_device(&endpoint->dev);
++	put_device(host);
+ }
+ 
+ int cxl_endpoint_autoremove(struct cxl_memdev *cxlmd, struct cxl_port *endpoint)
+ {
++	struct device *host = endpoint_host(endpoint);
+ 	struct device *dev = &cxlmd->dev;
+ 
++	get_device(host);
+ 	get_device(&endpoint->dev);
+ 	cxlmd->endpoint = endpoint;
+ 	cxlmd->depth = endpoint->depth;
+diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c
+index a25f5deb3de51..c7e70ccdb9ef0 100644
+--- a/drivers/cxl/core/region.c
++++ b/drivers/cxl/core/region.c
+@@ -1127,7 +1127,14 @@ static int cxl_port_setup_targets(struct cxl_port *port,
+ 	}
+ 
+ 	if (is_cxl_root(parent_port)) {
+-		parent_ig = cxlrd->cxlsd.cxld.interleave_granularity;
++		/*
++		 * Root decoder IG is always set to value in CFMWS which
++		 * may be different than this region's IG.  We can use the
++		 * region's IG here since interleave_granularity_store()
++		 * does not allow interleaved host-bridges with
++		 * root IG != region IG.
++		 */
++		parent_ig = p->interleave_granularity;
+ 		parent_iw = cxlrd->cxlsd.cxld.interleave_ways;
+ 		/*
+ 		 * For purposes of address bit routing, use power-of-2 math for
+@@ -1676,6 +1683,12 @@ static int cxl_region_attach(struct cxl_region *cxlr,
+ 		return -ENXIO;
+ 	}
+ 
++	if (p->nr_targets >= p->interleave_ways) {
++		dev_dbg(&cxlr->dev, "region already has %d endpoints\n",
++			p->nr_targets);
++		return -EINVAL;
++	}
++
+ 	ep_port = cxled_to_port(cxled);
+ 	root_port = cxlrd_to_port(cxlrd);
+ 	dport = cxl_find_dport_by_dev(root_port, ep_port->host_bridge);
+@@ -1768,7 +1781,7 @@ static int cxl_region_attach(struct cxl_region *cxlr,
+ 	if (p->nr_targets == p->interleave_ways) {
+ 		rc = cxl_region_setup_targets(cxlr);
+ 		if (rc)
+-			goto err_decrement;
++			return rc;
+ 		p->state = CXL_CONFIG_ACTIVE;
+ 	}
+ 
+@@ -1800,12 +1813,6 @@ static int cxl_region_attach(struct cxl_region *cxlr,
+ 	}
+ 
+ 	return 0;
+-
+-err_decrement:
+-	p->nr_targets--;
+-	cxled->pos = -1;
+-	p->targets[pos] = NULL;
+-	return rc;
+ }
+ 
+ static int cxl_region_detach(struct cxl_endpoint_decoder *cxled)
+diff --git a/drivers/dma/stm32-mdma.c b/drivers/dma/stm32-mdma.c
+index 2e37c47044af5..5e17e2d3dccad 100644
+--- a/drivers/dma/stm32-mdma.c
++++ b/drivers/dma/stm32-mdma.c
+@@ -490,7 +490,7 @@ static int stm32_mdma_set_xfer_param(struct stm32_mdma_chan *chan,
+ 	src_maxburst = chan->dma_config.src_maxburst;
+ 	dst_maxburst = chan->dma_config.dst_maxburst;
+ 
+-	ccr = stm32_mdma_read(dmadev, STM32_MDMA_CCR(chan->id));
++	ccr = stm32_mdma_read(dmadev, STM32_MDMA_CCR(chan->id)) & ~STM32_MDMA_CCR_EN;
+ 	ctcr = stm32_mdma_read(dmadev, STM32_MDMA_CTCR(chan->id));
+ 	ctbr = stm32_mdma_read(dmadev, STM32_MDMA_CTBR(chan->id));
+ 
+@@ -966,7 +966,7 @@ stm32_mdma_prep_dma_memcpy(struct dma_chan *c, dma_addr_t dest, dma_addr_t src,
+ 	if (!desc)
+ 		return NULL;
+ 
+-	ccr = stm32_mdma_read(dmadev, STM32_MDMA_CCR(chan->id));
++	ccr = stm32_mdma_read(dmadev, STM32_MDMA_CCR(chan->id)) & ~STM32_MDMA_CCR_EN;
+ 	ctcr = stm32_mdma_read(dmadev, STM32_MDMA_CTCR(chan->id));
+ 	ctbr = stm32_mdma_read(dmadev, STM32_MDMA_CTBR(chan->id));
+ 	cbndtr = stm32_mdma_read(dmadev, STM32_MDMA_CBNDTR(chan->id));
+diff --git a/drivers/firmware/qcom_scm.c b/drivers/firmware/qcom_scm.c
+index fde33acd46b75..18060a2dcf906 100644
+--- a/drivers/firmware/qcom_scm.c
++++ b/drivers/firmware/qcom_scm.c
+@@ -171,6 +171,12 @@ static enum qcom_scm_convention __get_convention(void)
+ 	if (likely(qcom_scm_convention != SMC_CONVENTION_UNKNOWN))
+ 		return qcom_scm_convention;
+ 
++	/*
++	 * Per the "SMC calling convention specification", the 64-bit calling
++	 * convention can only be used when the client is 64-bit, otherwise
++	 * system will encounter the undefined behaviour.
++	 */
++#if IS_ENABLED(CONFIG_ARM64)
+ 	/*
+ 	 * Device isn't required as there is only one argument - no device
+ 	 * needed to dma_map_single to secure world
+@@ -191,6 +197,7 @@ static enum qcom_scm_convention __get_convention(void)
+ 		forced = true;
+ 		goto found;
+ 	}
++#endif
+ 
+ 	probed_convention = SMC_CONVENTION_ARM_32;
+ 	ret = __scm_smc_call(NULL, &desc, probed_convention, &res, true);
+diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
+index a775d2bdac94f..980ec04892173 100644
+--- a/drivers/gpio/gpiolib-acpi.c
++++ b/drivers/gpio/gpiolib-acpi.c
+@@ -1655,6 +1655,26 @@ static const struct dmi_system_id gpiolib_acpi_quirks[] __initconst = {
+ 			.ignore_wake = "SYNA1202:00@16",
+ 		},
+ 	},
++	{
++		/*
++		 * On the Peaq C1010 2-in-1 INT33FC:00 pin 3 is connected to
++		 * a "dolby" button. At the ACPI level an _AEI event-handler
++		 * is connected which sets an ACPI variable to 1 on both
++		 * edges. This variable can be polled + cleared to 0 using
++		 * WMI. But since the variable is set on both edges the WMI
++		 * interface is pretty useless even when polling.
++		 * So instead the x86-android-tablets code instantiates
++		 * a gpio-keys platform device for it.
++		 * Ignore the _AEI handler for the pin, so that it is not busy.
++		 */
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "PEAQ"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "PEAQ PMM C1010 MD99187"),
++		},
++		.driver_data = &(struct acpi_gpiolib_dmi_quirk) {
++			.ignore_interrupt = "INT33FC:00@3",
++		},
++	},
+ 	{} /* Terminating entry */
+ };
+ 
+diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
+index 1436cdb5fa268..219bf8a82d8f9 100644
+--- a/drivers/gpio/gpiolib-of.c
++++ b/drivers/gpio/gpiolib-of.c
+@@ -496,6 +496,10 @@ static struct gpio_desc *of_find_gpio_rename(struct device_node *np,
+ #if IS_ENABLED(CONFIG_SND_SOC_CS42L56)
+ 		{ "reset",	"cirrus,gpio-nreset",	"cirrus,cs42l56" },
+ #endif
++#if IS_ENABLED(CONFIG_SND_SOC_MT2701_CS42448)
++		{ "i2s1-in-sel-gpio1",	NULL,	"mediatek,mt2701-cs42448-machine" },
++		{ "i2s1-in-sel-gpio2",	NULL,	"mediatek,mt2701-cs42448-machine" },
++#endif
+ #if IS_ENABLED(CONFIG_SND_SOC_TLV320AIC3X)
+ 		{ "reset",	"gpio-reset",	"ti,tlv320aic3x" },
+ 		{ "reset",	"gpio-reset",	"ti,tlv320aic33" },
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
+index b582b83c4984f..3c52f49069330 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
+@@ -29,6 +29,7 @@
+ #include "amdgpu.h"
+ #include "atom.h"
+ 
++#include <linux/device.h>
+ #include <linux/pci.h>
+ #include <linux/slab.h>
+ #include <linux/acpi.h>
+@@ -287,6 +288,10 @@ static bool amdgpu_atrm_get_bios(struct amdgpu_device *adev)
+ 	if (adev->flags & AMD_IS_APU)
+ 		return false;
+ 
++	/* ATRM is for on-platform devices only */
++	if (dev_is_removable(&adev->pdev->dev))
++		return false;
++
+ 	while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) {
+ 		dhandle = ACPI_HANDLE(&pdev->dev);
+ 		if (!dhandle)
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
+index 252a876b07258..fdc302aa59e7b 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
+@@ -179,6 +179,7 @@ int amdgpu_bo_list_get(struct amdgpu_fpriv *fpriv, int id,
+ 	}
+ 
+ 	rcu_read_unlock();
++	*result = NULL;
+ 	return -ENOENT;
+ }
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+index 946d031d2520e..73f7ced3bf072 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+@@ -1438,7 +1438,7 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
+ 		if (r == -ENOMEM)
+ 			DRM_ERROR("Not enough memory for command submission!\n");
+ 		else if (r != -ERESTARTSYS && r != -EAGAIN)
+-			DRM_ERROR("Failed to process the buffer list %d!\n", r);
++			DRM_DEBUG("Failed to process the buffer list %d!\n", r);
+ 		goto error_fini;
+ 	}
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+index 56e89e76ff179..33cada366eeb1 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+@@ -747,6 +747,9 @@ static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf,
+ 	ssize_t result = 0;
+ 	int r;
+ 
++	if (!adev->smc_rreg)
++		return -EPERM;
++
+ 	if (size & 0x3 || *pos & 0x3)
+ 		return -EINVAL;
+ 
+@@ -803,6 +806,9 @@ static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *
+ 	ssize_t result = 0;
+ 	int r;
+ 
++	if (!adev->smc_wreg)
++		return -EPERM;
++
+ 	if (size & 0x3 || *pos & 0x3)
+ 		return -EINVAL;
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index 8940ee73f2dfe..77fc71e74c124 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -43,6 +43,7 @@
+ #include <drm/drm_fb_helper.h>
+ #include <drm/drm_probe_helper.h>
+ #include <drm/amdgpu_drm.h>
++#include <linux/device.h>
+ #include <linux/vgaarb.h>
+ #include <linux/vga_switcheroo.h>
+ #include <linux/efi.h>
+@@ -2233,7 +2234,6 @@ out:
+  */
+ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
+ {
+-	struct drm_device *dev = adev_to_drm(adev);
+ 	struct pci_dev *parent;
+ 	int i, r;
+ 	bool total;
+@@ -2304,7 +2304,7 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
+ 	    (amdgpu_is_atpx_hybrid() ||
+ 	     amdgpu_has_atpx_dgpu_power_cntl()) &&
+ 	    ((adev->flags & AMD_IS_APU) == 0) &&
+-	    !pci_is_thunderbolt_attached(to_pci_dev(dev->dev)))
++	    !dev_is_removable(&adev->pdev->dev))
+ 		adev->flags |= AMD_IS_PX;
+ 
+ 	if (!(adev->flags & AMD_IS_APU)) {
+@@ -2318,6 +2318,8 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
+ 		adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
+ 	if (amdgpu_sriov_vf(adev) && adev->asic_type == CHIP_SIENNA_CICHLID)
+ 		adev->pm.pp_feature &= ~PP_OVERDRIVE_MASK;
++	if (!amdgpu_device_pcie_dynamic_switching_supported())
++		adev->pm.pp_feature &= ~PP_PCIE_DPM_MASK;
+ 
+ 	total = true;
+ 	for (i = 0; i < adev->num_ip_blocks; i++) {
+@@ -4130,7 +4132,7 @@ fence_driver_init:
+ 
+ 	px = amdgpu_device_supports_px(ddev);
+ 
+-	if (px || (!pci_is_thunderbolt_attached(adev->pdev) &&
++	if (px || (!dev_is_removable(&adev->pdev->dev) &&
+ 				apple_gmux_detect(NULL, NULL)))
+ 		vga_switcheroo_register_client(adev->pdev,
+ 					       &amdgpu_switcheroo_ops, px);
+@@ -4276,7 +4278,7 @@ void amdgpu_device_fini_sw(struct amdgpu_device *adev)
+ 
+ 	px = amdgpu_device_supports_px(adev_to_drm(adev));
+ 
+-	if (px || (!pci_is_thunderbolt_attached(adev->pdev) &&
++	if (px || (!dev_is_removable(&adev->pdev->dev) &&
+ 				apple_gmux_detect(NULL, NULL)))
+ 		vga_switcheroo_unregister_client(adev->pdev);
+ 
+@@ -5399,7 +5401,8 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
+ 	 * Flush RAM to disk so that after reboot
+ 	 * the user can read log and see why the system rebooted.
+ 	 */
+-	if (need_emergency_restart && amdgpu_ras_get_context(adev)->reboot) {
++	if (need_emergency_restart && amdgpu_ras_get_context(adev) &&
++		amdgpu_ras_get_context(adev)->reboot) {
+ 		DRM_WARN("Emergency reboot.");
+ 
+ 		ksys_sync_helper();
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
+index 8e1cfc87122d6..dcf685b501e01 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
+@@ -92,6 +92,7 @@
+ MODULE_FIRMWARE(FIRMWARE_IP_DISCOVERY);
+ 
+ #define mmRCC_CONFIG_MEMSIZE	0xde3
++#define mmMP0_SMN_C2PMSG_33	0x16061
+ #define mmMM_INDEX		0x0
+ #define mmMM_INDEX_HI		0x6
+ #define mmMM_DATA		0x1
+@@ -230,8 +231,26 @@ static int amdgpu_discovery_read_binary_from_sysmem(struct amdgpu_device *adev,
+ static int amdgpu_discovery_read_binary_from_mem(struct amdgpu_device *adev,
+ 						 uint8_t *binary)
+ {
+-	uint64_t vram_size = (uint64_t)RREG32(mmRCC_CONFIG_MEMSIZE) << 20;
+-	int ret = 0;
++	uint64_t vram_size;
++	u32 msg;
++	int i, ret = 0;
++
++	/* It can take up to a second for IFWI init to complete on some dGPUs,
++	 * but generally it should be in the 60-100ms range.  Normally this starts
++	 * as soon as the device gets power so by the time the OS loads this has long
++	 * completed.  However, when a card is hotplugged via e.g., USB4, we need to
++	 * wait for this to complete.  Once the C2PMSG is updated, we can
++	 * continue.
++	 */
++	if (dev_is_removable(&adev->pdev->dev)) {
++		for (i = 0; i < 1000; i++) {
++			msg = RREG32(mmMP0_SMN_C2PMSG_33);
++			if (msg & 0x80000000)
++				break;
++			msleep(1);
++		}
++	}
++	vram_size = (uint64_t)RREG32(mmRCC_CONFIG_MEMSIZE) << 20;
+ 
+ 	if (vram_size) {
+ 		uint64_t pos = vram_size - DISCOVERY_TMR_OFFSET;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
+index f808841310fdf..1035f7982f3b5 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
+@@ -627,8 +627,20 @@ static void amdgpu_mes_queue_init_mqd(struct amdgpu_device *adev,
+ 	mqd_prop.hqd_queue_priority = p->hqd_queue_priority;
+ 	mqd_prop.hqd_active = false;
+ 
++	if (p->queue_type == AMDGPU_RING_TYPE_GFX ||
++	    p->queue_type == AMDGPU_RING_TYPE_COMPUTE) {
++		mutex_lock(&adev->srbm_mutex);
++		amdgpu_gfx_select_me_pipe_q(adev, p->ring->me, p->ring->pipe, 0, 0, 0);
++	}
++
+ 	mqd_mgr->init_mqd(adev, q->mqd_cpu_ptr, &mqd_prop);
+ 
++	if (p->queue_type == AMDGPU_RING_TYPE_GFX ||
++	    p->queue_type == AMDGPU_RING_TYPE_COMPUTE) {
++		amdgpu_gfx_select_me_pipe_q(adev, 0, 0, 0, 0, 0);
++		mutex_unlock(&adev->srbm_mutex);
++	}
++
+ 	amdgpu_bo_unreserve(q->mqd_obj);
+ }
+ 
+@@ -1062,9 +1074,13 @@ int amdgpu_mes_add_ring(struct amdgpu_device *adev, int gang_id,
+ 	switch (queue_type) {
+ 	case AMDGPU_RING_TYPE_GFX:
+ 		ring->funcs = adev->gfx.gfx_ring[0].funcs;
++		ring->me = adev->gfx.gfx_ring[0].me;
++		ring->pipe = adev->gfx.gfx_ring[0].pipe;
+ 		break;
+ 	case AMDGPU_RING_TYPE_COMPUTE:
+ 		ring->funcs = adev->gfx.compute_ring[0].funcs;
++		ring->me = adev->gfx.compute_ring[0].me;
++		ring->pipe = adev->gfx.compute_ring[0].pipe;
+ 		break;
+ 	case AMDGPU_RING_TYPE_SDMA:
+ 		ring->funcs = adev->sdma.instance[0].ring.funcs;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+index 7d5019a884024..2003be3390aab 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+@@ -1380,7 +1380,8 @@ static void amdgpu_ras_sysfs_remove_bad_page_node(struct amdgpu_device *adev)
+ {
+ 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ 
+-	sysfs_remove_file_from_group(&adev->dev->kobj,
++	if (adev->dev->kobj.sd)
++		sysfs_remove_file_from_group(&adev->dev->kobj,
+ 				&con->badpages_attr.attr,
+ 				RAS_FS_NAME);
+ }
+@@ -1397,7 +1398,8 @@ static int amdgpu_ras_sysfs_remove_feature_node(struct amdgpu_device *adev)
+ 		.attrs = attrs,
+ 	};
+ 
+-	sysfs_remove_group(&adev->dev->kobj, &group);
++	if (adev->dev->kobj.sd)
++		sysfs_remove_group(&adev->dev->kobj, &group);
+ 
+ 	return 0;
+ }
+@@ -1444,7 +1446,8 @@ int amdgpu_ras_sysfs_remove(struct amdgpu_device *adev,
+ 	if (!obj || !obj->attr_inuse)
+ 		return -EINVAL;
+ 
+-	sysfs_remove_file_from_group(&adev->dev->kobj,
++	if (adev->dev->kobj.sd)
++		sysfs_remove_file_from_group(&adev->dev->kobj,
+ 				&obj->sysfs_attr.attr,
+ 				RAS_FS_NAME);
+ 	obj->attr_inuse = 0;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+index ae455aab5d29d..7e54abca45206 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+@@ -292,8 +292,15 @@ int amdgpu_vcn_suspend(struct amdgpu_device *adev)
+ 	void *ptr;
+ 	int i, idx;
+ 
++	bool in_ras_intr = amdgpu_ras_intr_triggered();
++
+ 	cancel_delayed_work_sync(&adev->vcn.idle_work);
+ 
++	/* err_event_athub will corrupt VCPU buffer, so we need to
++	 * restore fw data and clear buffer in amdgpu_vcn_resume() */
++	if (in_ras_intr)
++		return 0;
++
+ 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ 		if (adev->vcn.harvest_config & (1 << i))
+ 			continue;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
+index d0748bcfad16b..75d25fba80821 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
+@@ -239,6 +239,8 @@ static int amdgpu_vkms_conn_get_modes(struct drm_connector *connector)
+ 
+ 	for (i = 0; i < ARRAY_SIZE(common_modes); i++) {
+ 		mode = drm_cvt_mode(dev, common_modes[i].w, common_modes[i].h, 60, false, false, false);
++		if (!mode)
++			continue;
+ 		drm_mode_probed_add(connector, mode);
+ 	}
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+index 7a67bb1490159..0d2e50385c3e4 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+@@ -1099,8 +1099,8 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
+ 				bo = gem_to_amdgpu_bo(gobj);
+ 		}
+ 		mem = bo->tbo.resource;
+-		if (mem->mem_type == TTM_PL_TT ||
+-		    mem->mem_type == AMDGPU_PL_PREEMPT)
++		if (mem && (mem->mem_type == TTM_PL_TT ||
++			    mem->mem_type == AMDGPU_PL_PREEMPT))
+ 			pages_addr = bo->tbo.ttm->dma_address;
+ 	}
+ 
+@@ -2129,7 +2129,8 @@ long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout)
+  * Returns:
+  * 0 for success, error for failure.
+  */
+-int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, int32_t xcp_id)
++int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
++		   int32_t xcp_id)
+ {
+ 	struct amdgpu_bo *root_bo;
+ 	struct amdgpu_bo_vm *root;
+@@ -2148,6 +2149,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, int32_t xcp
+ 	INIT_LIST_HEAD(&vm->done);
+ 	INIT_LIST_HEAD(&vm->pt_freed);
+ 	INIT_WORK(&vm->pt_free_work, amdgpu_vm_pt_free_work);
++	INIT_KFIFO(vm->faults);
+ 
+ 	r = amdgpu_vm_init_entities(adev, vm);
+ 	if (r)
+@@ -2182,34 +2184,33 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, int32_t xcp
+ 				false, &root, xcp_id);
+ 	if (r)
+ 		goto error_free_delayed;
+-	root_bo = &root->bo;
++
++	root_bo = amdgpu_bo_ref(&root->bo);
+ 	r = amdgpu_bo_reserve(root_bo, true);
+-	if (r)
+-		goto error_free_root;
++	if (r) {
++		amdgpu_bo_unref(&root->shadow);
++		amdgpu_bo_unref(&root_bo);
++		goto error_free_delayed;
++	}
+ 
++	amdgpu_vm_bo_base_init(&vm->root, vm, root_bo);
+ 	r = dma_resv_reserve_fences(root_bo->tbo.base.resv, 1);
+ 	if (r)
+-		goto error_unreserve;
+-
+-	amdgpu_vm_bo_base_init(&vm->root, vm, root_bo);
++		goto error_free_root;
+ 
+ 	r = amdgpu_vm_pt_clear(adev, vm, root, false);
+ 	if (r)
+-		goto error_unreserve;
++		goto error_free_root;
+ 
+ 	amdgpu_bo_unreserve(vm->root.bo);
+-
+-	INIT_KFIFO(vm->faults);
++	amdgpu_bo_unref(&root_bo);
+ 
+ 	return 0;
+ 
+-error_unreserve:
+-	amdgpu_bo_unreserve(vm->root.bo);
+-
+ error_free_root:
+-	amdgpu_bo_unref(&root->shadow);
++	amdgpu_vm_pt_free_root(adev, vm);
++	amdgpu_bo_unreserve(vm->root.bo);
+ 	amdgpu_bo_unref(&root_bo);
+-	vm->root.bo = NULL;
+ 
+ error_free_delayed:
+ 	dma_fence_put(vm->last_tlb_flush);
+diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c b/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c
+index 4038455d79984..ef368ca79a668 100644
+--- a/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c
++++ b/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c
+@@ -28,6 +28,7 @@
+ #include "nbio/nbio_2_3_offset.h"
+ #include "nbio/nbio_2_3_sh_mask.h"
+ #include <uapi/linux/kfd_ioctl.h>
++#include <linux/device.h>
+ #include <linux/pci.h>
+ 
+ #define smnPCIE_CONFIG_CNTL	0x11180044
+@@ -361,7 +362,7 @@ static void nbio_v2_3_enable_aspm(struct amdgpu_device *adev,
+ 
+ 		data |= NAVI10_PCIE__LC_L0S_INACTIVITY_DEFAULT << PCIE_LC_CNTL__LC_L0S_INACTIVITY__SHIFT;
+ 
+-		if (pci_is_thunderbolt_attached(adev->pdev))
++		if (dev_is_removable(&adev->pdev->dev))
+ 			data |= NAVI10_PCIE__LC_L1_INACTIVITY_TBT_DEFAULT  << PCIE_LC_CNTL__LC_L1_INACTIVITY__SHIFT;
+ 		else
+ 			data |= NAVI10_PCIE__LC_L1_INACTIVITY_DEFAULT << PCIE_LC_CNTL__LC_L1_INACTIVITY__SHIFT;
+@@ -480,7 +481,7 @@ static void nbio_v2_3_program_aspm(struct amdgpu_device *adev)
+ 
+ 	def = data = RREG32_PCIE(smnPCIE_LC_CNTL);
+ 	data |= NAVI10_PCIE__LC_L0S_INACTIVITY_DEFAULT << PCIE_LC_CNTL__LC_L0S_INACTIVITY__SHIFT;
+-	if (pci_is_thunderbolt_attached(adev->pdev))
++	if (dev_is_removable(&adev->pdev->dev))
+ 		data |= NAVI10_PCIE__LC_L1_INACTIVITY_TBT_DEFAULT  << PCIE_LC_CNTL__LC_L1_INACTIVITY__SHIFT;
+ 	else
+ 		data |= NAVI10_PCIE__LC_L1_INACTIVITY_DEFAULT << PCIE_LC_CNTL__LC_L1_INACTIVITY__SHIFT;
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c
+index c7991e07b6be5..a7697ec8188e0 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c
+@@ -268,7 +268,7 @@ static void event_interrupt_wq_v10(struct kfd_node *dev,
+ 						SQ_INTERRUPT_WORD_WAVE_CTXID1, ENCODING);
+ 			switch (encoding) {
+ 			case SQ_INTERRUPT_WORD_ENCODING_AUTO:
+-				pr_debug(
++				pr_debug_ratelimited(
+ 					"sq_intr: auto, se %d, ttrace %d, wlt %d, ttrac_buf0_full %d, ttrac_buf1_full %d, ttrace_utc_err %d\n",
+ 					REG_GET_FIELD(context_id1, SQ_INTERRUPT_WORD_AUTO_CTXID1,
+ 							SE_ID),
+@@ -284,7 +284,7 @@ static void event_interrupt_wq_v10(struct kfd_node *dev,
+ 							THREAD_TRACE_UTC_ERROR));
+ 				break;
+ 			case SQ_INTERRUPT_WORD_ENCODING_INST:
+-				pr_debug("sq_intr: inst, se %d, data 0x%x, sa %d, priv %d, wave_id %d, simd_id %d, wgp_id %d\n",
++				pr_debug_ratelimited("sq_intr: inst, se %d, data 0x%x, sa %d, priv %d, wave_id %d, simd_id %d, wgp_id %d\n",
+ 					REG_GET_FIELD(context_id1, SQ_INTERRUPT_WORD_WAVE_CTXID1,
+ 							SE_ID),
+ 					REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0,
+@@ -310,7 +310,7 @@ static void event_interrupt_wq_v10(struct kfd_node *dev,
+ 			case SQ_INTERRUPT_WORD_ENCODING_ERROR:
+ 				sq_intr_err_type = REG_GET_FIELD(context_id0, KFD_CTXID0,
+ 								ERR_TYPE);
+-				pr_warn("sq_intr: error, se %d, data 0x%x, sa %d, priv %d, wave_id %d, simd_id %d, wgp_id %d, err_type %d\n",
++				pr_warn_ratelimited("sq_intr: error, se %d, data 0x%x, sa %d, priv %d, wave_id %d, simd_id %d, wgp_id %d, err_type %d\n",
+ 					REG_GET_FIELD(context_id1, SQ_INTERRUPT_WORD_WAVE_CTXID1,
+ 							SE_ID),
+ 					REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0,
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c
+index f933bd231fb9c..2a65792fd1162 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c
+@@ -150,7 +150,7 @@ enum SQ_INTERRUPT_ERROR_TYPE {
+ 
+ static void print_sq_intr_info_auto(uint32_t context_id0, uint32_t context_id1)
+ {
+-	pr_debug(
++	pr_debug_ratelimited(
+ 		"sq_intr: auto, ttrace %d, wlt %d, ttrace_buf_full %d, reg_tms %d, cmd_tms %d, host_cmd_ovf %d, host_reg_ovf %d, immed_ovf %d, ttrace_utc_err %d\n",
+ 		REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0, THREAD_TRACE),
+ 		REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0, WLT),
+@@ -165,7 +165,7 @@ static void print_sq_intr_info_auto(uint32_t context_id0, uint32_t context_id1)
+ 
+ static void print_sq_intr_info_inst(uint32_t context_id0, uint32_t context_id1)
+ {
+-	pr_debug(
++	pr_debug_ratelimited(
+ 		"sq_intr: inst, data 0x%08x, sh %d, priv %d, wave_id %d, simd_id %d, wgp_id %d\n",
+ 		REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0, DATA),
+ 		REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0, SH_ID),
+@@ -177,7 +177,7 @@ static void print_sq_intr_info_inst(uint32_t context_id0, uint32_t context_id1)
+ 
+ static void print_sq_intr_info_error(uint32_t context_id0, uint32_t context_id1)
+ {
+-	pr_warn(
++	pr_warn_ratelimited(
+ 		"sq_intr: error, detail 0x%08x, type %d, sh %d, priv %d, wave_id %d, simd_id %d, wgp_id %d\n",
+ 		REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_ERROR_CTXID0, DETAIL),
+ 		REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_ERROR_CTXID0, TYPE),
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
+index f0731a6a5306c..02695ccd22d6e 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
+@@ -333,7 +333,7 @@ static void event_interrupt_wq_v9(struct kfd_node *dev,
+ 			encoding = REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, ENCODING);
+ 			switch (encoding) {
+ 			case SQ_INTERRUPT_WORD_ENCODING_AUTO:
+-				pr_debug(
++				pr_debug_ratelimited(
+ 					"sq_intr: auto, se %d, ttrace %d, wlt %d, ttrac_buf_full %d, reg_tms %d, cmd_tms %d, host_cmd_ovf %d, host_reg_ovf %d, immed_ovf %d, ttrace_utc_err %d\n",
+ 					REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID, SE_ID),
+ 					REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID, THREAD_TRACE),
+@@ -347,7 +347,7 @@ static void event_interrupt_wq_v9(struct kfd_node *dev,
+ 					REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID, THREAD_TRACE_UTC_ERROR));
+ 				break;
+ 			case SQ_INTERRUPT_WORD_ENCODING_INST:
+-				pr_debug("sq_intr: inst, se %d, data 0x%x, sh %d, priv %d, wave_id %d, simd_id %d, cu_id %d, intr_data 0x%x\n",
++				pr_debug_ratelimited("sq_intr: inst, se %d, data 0x%x, sh %d, priv %d, wave_id %d, simd_id %d, cu_id %d, intr_data 0x%x\n",
+ 					REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, SE_ID),
+ 					REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, DATA),
+ 					REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, SH_ID),
+@@ -366,7 +366,7 @@ static void event_interrupt_wq_v9(struct kfd_node *dev,
+ 				break;
+ 			case SQ_INTERRUPT_WORD_ENCODING_ERROR:
+ 				sq_intr_err = REG_GET_FIELD(sq_int_data, KFD_SQ_INT_DATA, ERR_TYPE);
+-				pr_warn("sq_intr: error, se %d, data 0x%x, sh %d, priv %d, wave_id %d, simd_id %d, cu_id %d, err_type %d\n",
++				pr_warn_ratelimited("sq_intr: error, se %d, data 0x%x, sh %d, priv %d, wave_id %d, simd_id %d, cu_id %d, err_type %d\n",
+ 					REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, SE_ID),
+ 					REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, DATA),
+ 					REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, SH_ID),
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+index 50f943e04f8a4..a5c394fcbb350 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+@@ -617,8 +617,15 @@ create_bo_failed:
+ 
+ void svm_range_vram_node_free(struct svm_range *prange)
+ {
+-	svm_range_bo_unref(prange->svm_bo);
+-	prange->ttm_res = NULL;
++	/* serialize prange->svm_bo unref */
++	mutex_lock(&prange->lock);
++	/* prange->svm_bo has not been unref */
++	if (prange->ttm_res) {
++		prange->ttm_res = NULL;
++		mutex_unlock(&prange->lock);
++		svm_range_bo_unref(prange->svm_bo);
++	} else
++		mutex_unlock(&prange->lock);
+ }
+ 
+ struct kfd_node *
+@@ -749,7 +756,7 @@ svm_range_apply_attrs(struct kfd_process *p, struct svm_range *prange,
+ 			prange->flags &= ~attrs[i].value;
+ 			break;
+ 		case KFD_IOCTL_SVM_ATTR_GRANULARITY:
+-			prange->granularity = attrs[i].value;
++			prange->granularity = min_t(uint32_t, attrs[i].value, 0x3F);
+ 			break;
+ 		default:
+ 			WARN_ONCE(1, "svm_range_check_attrs wasn't called?");
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index 573e27399c790..256058cd42851 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -2077,7 +2077,7 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
+ 	struct dmub_srv_create_params create_params;
+ 	struct dmub_srv_region_params region_params;
+ 	struct dmub_srv_region_info region_info;
+-	struct dmub_srv_fb_params fb_params;
++	struct dmub_srv_memory_params memory_params;
+ 	struct dmub_srv_fb_info *fb_info;
+ 	struct dmub_srv *dmub_srv;
+ 	const struct dmcub_firmware_header_v1_0 *hdr;
+@@ -2177,6 +2177,7 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
+ 		adev->dm.dmub_fw->data +
+ 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
+ 		PSP_HEADER_BYTES;
++	region_params.is_mailbox_in_inbox = false;
+ 
+ 	status = dmub_srv_calc_region_info(dmub_srv, &region_params,
+ 					   &region_info);
+@@ -2200,10 +2201,10 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
+ 		return r;
+ 
+ 	/* Rebase the regions on the framebuffer address. */
+-	memset(&fb_params, 0, sizeof(fb_params));
+-	fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
+-	fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
+-	fb_params.region_info = &region_info;
++	memset(&memory_params, 0, sizeof(memory_params));
++	memory_params.cpu_fb_addr = adev->dm.dmub_bo_cpu_addr;
++	memory_params.gpu_fb_addr = adev->dm.dmub_bo_gpu_addr;
++	memory_params.region_info = &region_info;
+ 
+ 	adev->dm.dmub_fb_info =
+ 		kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
+@@ -2215,7 +2216,7 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
+ 		return -ENOMEM;
+ 	}
+ 
+-	status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
++	status = dmub_srv_calc_mem_info(dmub_srv, &memory_params, fb_info);
+ 	if (status != DMUB_STATUS_OK) {
+ 		DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
+ 		return -EINVAL;
+@@ -7394,6 +7395,9 @@ static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
+ 	int i;
+ 	int result = -EIO;
+ 
++	if (!ddc_service->ddc_pin || !ddc_service->ddc_pin->hw_info.hw_supported)
++		return result;
++
+ 	cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
+ 
+ 	if (!cmd.payloads)
+@@ -9504,14 +9508,14 @@ static bool should_reset_plane(struct drm_atomic_state *state,
+ 	struct drm_plane *other;
+ 	struct drm_plane_state *old_other_state, *new_other_state;
+ 	struct drm_crtc_state *new_crtc_state;
++	struct amdgpu_device *adev = drm_to_adev(plane->dev);
+ 	int i;
+ 
+ 	/*
+-	 * TODO: Remove this hack once the checks below are sufficient
+-	 * enough to determine when we need to reset all the planes on
+-	 * the stream.
++	 * TODO: Remove this hack for all asics once it proves that the
++	 * fast updates works fine on DCN3.2+.
+ 	 */
+-	if (state->allow_modeset)
++	if (adev->ip_versions[DCE_HWIP][0] < IP_VERSION(3, 2, 0) && state->allow_modeset)
+ 		return true;
+ 
+ 	/* Exit early if we know that we're adding or removing the plane. */
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+index b885c39bd16ba..ad1a1368f5779 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+@@ -1591,31 +1591,31 @@ enum dc_status dm_dp_mst_is_port_support_mode(
+ 	unsigned int upper_link_bw_in_kbps = 0, down_link_bw_in_kbps = 0;
+ 	unsigned int max_compressed_bw_in_kbps = 0;
+ 	struct dc_dsc_bw_range bw_range = {0};
+-	struct drm_dp_mst_topology_mgr *mst_mgr;
++	uint16_t full_pbn = aconnector->mst_output_port->full_pbn;
+ 
+ 	/*
+-	 * check if the mode could be supported if DSC pass-through is supported
+-	 * AND check if there enough bandwidth available to support the mode
+-	 * with DSC enabled.
++	 * Consider the case with the depth of the mst topology tree is equal or less than 2
++	 * A. When dsc bitstream can be transmitted along the entire path
++	 *    1. dsc is possible between source and branch/leaf device (common dsc params is possible), AND
++	 *    2. dsc passthrough supported at MST branch, or
++	 *    3. dsc decoding supported at leaf MST device
++	 *    Use maximum dsc compression as bw constraint
++	 * B. When dsc bitstream cannot be transmitted along the entire path
++	 *    Use native bw as bw constraint
+ 	 */
+ 	if (is_dsc_common_config_possible(stream, &bw_range) &&
+-	    aconnector->mst_output_port->passthrough_aux) {
+-		mst_mgr = aconnector->mst_output_port->mgr;
+-		mutex_lock(&mst_mgr->lock);
+-
++	   (aconnector->mst_output_port->passthrough_aux ||
++	    aconnector->dsc_aux == &aconnector->mst_output_port->aux)) {
+ 		cur_link_settings = stream->link->verified_link_cap;
+ 
+ 		upper_link_bw_in_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
+-							       &cur_link_settings
+-							       );
+-		down_link_bw_in_kbps = kbps_from_pbn(aconnector->mst_output_port->full_pbn);
++							       &cur_link_settings);
++		down_link_bw_in_kbps = kbps_from_pbn(full_pbn);
+ 
+ 		/* pick the bottleneck */
+ 		end_to_end_bw_in_kbps = min(upper_link_bw_in_kbps,
+ 					    down_link_bw_in_kbps);
+ 
+-		mutex_unlock(&mst_mgr->lock);
+-
+ 		/*
+ 		 * use the maximum dsc compression bandwidth as the required
+ 		 * bandwidth for the mode
+@@ -1630,8 +1630,7 @@ enum dc_status dm_dp_mst_is_port_support_mode(
+ 		/* check if mode could be supported within full_pbn */
+ 		bpp = convert_dc_color_depth_into_bpc(stream->timing.display_color_depth) * 3;
+ 		pbn = drm_dp_calc_pbn_mode(stream->timing.pix_clk_100hz / 10, bpp, false);
+-
+-		if (pbn > aconnector->mst_output_port->full_pbn)
++		if (pbn > full_pbn)
+ 			return DC_FAIL_BANDWIDTH_VALIDATE;
+ 	}
+ 
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
+index 609048160aa20..b386f3b0fd428 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
+@@ -993,7 +993,8 @@ static bool dc_construct(struct dc *dc,
+ 	/* set i2c speed if not done by the respective dcnxxx__resource.c */
+ 	if (dc->caps.i2c_speed_in_khz_hdcp == 0)
+ 		dc->caps.i2c_speed_in_khz_hdcp = dc->caps.i2c_speed_in_khz;
+-
++	if (dc->caps.max_optimizable_video_width == 0)
++		dc->caps.max_optimizable_video_width = 5120;
+ 	dc->clk_mgr = dc_clk_mgr_create(dc->ctx, dc->res_pool->pp_smu, dc->res_pool->dccg);
+ 	if (!dc->clk_mgr)
+ 		goto fail;
+@@ -1070,53 +1071,6 @@ static void apply_ctx_interdependent_lock(struct dc *dc, struct dc_state *contex
+ 	}
+ }
+ 
+-static void phantom_pipe_blank(
+-		struct dc *dc,
+-		struct timing_generator *tg,
+-		int width,
+-		int height)
+-{
+-	struct dce_hwseq *hws = dc->hwseq;
+-	enum dc_color_space color_space;
+-	struct tg_color black_color = {0};
+-	struct output_pixel_processor *opp = NULL;
+-	uint32_t num_opps, opp_id_src0, opp_id_src1;
+-	uint32_t otg_active_width, otg_active_height;
+-	uint32_t i;
+-
+-	/* program opp dpg blank color */
+-	color_space = COLOR_SPACE_SRGB;
+-	color_space_to_black_color(dc, color_space, &black_color);
+-
+-	otg_active_width = width;
+-	otg_active_height = height;
+-
+-	/* get the OPTC source */
+-	tg->funcs->get_optc_source(tg, &num_opps, &opp_id_src0, &opp_id_src1);
+-	ASSERT(opp_id_src0 < dc->res_pool->res_cap->num_opp);
+-
+-	for (i = 0; i < dc->res_pool->res_cap->num_opp; i++) {
+-		if (dc->res_pool->opps[i] != NULL && dc->res_pool->opps[i]->inst == opp_id_src0) {
+-			opp = dc->res_pool->opps[i];
+-			break;
+-		}
+-	}
+-
+-	if (opp && opp->funcs->opp_set_disp_pattern_generator)
+-		opp->funcs->opp_set_disp_pattern_generator(
+-				opp,
+-				CONTROLLER_DP_TEST_PATTERN_SOLID_COLOR,
+-				CONTROLLER_DP_COLOR_SPACE_UDEFINED,
+-				COLOR_DEPTH_UNDEFINED,
+-				&black_color,
+-				otg_active_width,
+-				otg_active_height,
+-				0);
+-
+-	if (tg->funcs->is_tg_enabled(tg))
+-		hws->funcs.wait_for_blank_complete(opp);
+-}
+-
+ static void dc_update_viusal_confirm_color(struct dc *dc, struct dc_state *context, struct pipe_ctx *pipe_ctx)
+ {
+ 	if (dc->ctx->dce_version >= DCN_VERSION_1_0) {
+@@ -1207,7 +1161,8 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
+ 
+ 					main_pipe_width = old_stream->mall_stream_config.paired_stream->dst.width;
+ 					main_pipe_height = old_stream->mall_stream_config.paired_stream->dst.height;
+-					phantom_pipe_blank(dc, tg, main_pipe_width, main_pipe_height);
++					if (dc->hwss.blank_phantom)
++						dc->hwss.blank_phantom(dc, tg, main_pipe_width, main_pipe_height);
+ 					tg->funcs->enable_crtc(tg);
+ 				}
+ 			}
+@@ -2476,6 +2431,7 @@ static enum surface_update_type get_plane_info_update_type(const struct dc_surfa
+ }
+ 
+ static enum surface_update_type get_scaling_info_update_type(
++		const struct dc *dc,
+ 		const struct dc_surface_update *u)
+ {
+ 	union surface_update_flags *update_flags = &u->surface->update_flags;
+@@ -2510,6 +2466,12 @@ static enum surface_update_type get_scaling_info_update_type(
+ 			update_flags->bits.clock_change = 1;
+ 	}
+ 
++	if (u->scaling_info->src_rect.width > dc->caps.max_optimizable_video_width &&
++		(u->scaling_info->clip_rect.width > u->surface->clip_rect.width ||
++		 u->scaling_info->clip_rect.height > u->surface->clip_rect.height))
++		 /* Changing clip size of a large surface may result in MPC slice count change */
++		update_flags->bits.bandwidth_change = 1;
++
+ 	if (u->scaling_info->src_rect.x != u->surface->src_rect.x
+ 			|| u->scaling_info->src_rect.y != u->surface->src_rect.y
+ 			|| u->scaling_info->clip_rect.x != u->surface->clip_rect.x
+@@ -2547,7 +2509,7 @@ static enum surface_update_type det_surface_update(const struct dc *dc,
+ 	type = get_plane_info_update_type(u);
+ 	elevate_update_type(&overall_type, type);
+ 
+-	type = get_scaling_info_update_type(u);
++	type = get_scaling_info_update_type(dc, u);
+ 	elevate_update_type(&overall_type, type);
+ 
+ 	if (u->flip_addr) {
+@@ -4460,6 +4422,14 @@ bool dc_update_planes_and_stream(struct dc *dc,
+ 				update_type,
+ 				context);
+ 	} else {
++		if (!stream_update &&
++				dc->hwss.is_pipe_topology_transition_seamless &&
++				!dc->hwss.is_pipe_topology_transition_seamless(
++						dc, dc->current_state, context)) {
++
++			DC_LOG_ERROR("performing non-seamless pipe topology transition with surface only update!\n");
++			BREAK_TO_DEBUGGER();
++		}
+ 		commit_planes_for_stream(
+ 				dc,
+ 				srf_updates,
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+index 6e11d2b701f82..569d40eb7059d 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+@@ -556,7 +556,7 @@ uint32_t dc_stream_get_vblank_counter(const struct dc_stream_state *stream)
+ 	for (i = 0; i < MAX_PIPES; i++) {
+ 		struct timing_generator *tg = res_ctx->pipe_ctx[i].stream_res.tg;
+ 
+-		if (res_ctx->pipe_ctx[i].stream != stream)
++		if (res_ctx->pipe_ctx[i].stream != stream || !tg)
+ 			continue;
+ 
+ 		return tg->funcs->get_frame_count(tg);
+@@ -615,7 +615,7 @@ bool dc_stream_get_scanoutpos(const struct dc_stream_state *stream,
+ 	for (i = 0; i < MAX_PIPES; i++) {
+ 		struct timing_generator *tg = res_ctx->pipe_ctx[i].stream_res.tg;
+ 
+-		if (res_ctx->pipe_ctx[i].stream != stream)
++		if (res_ctx->pipe_ctx[i].stream != stream || !tg)
+ 			continue;
+ 
+ 		tg->funcs->get_scanoutpos(tg,
+diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
+index 81258392d44a1..dc0e0af616506 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc.h
++++ b/drivers/gpu/drm/amd/display/dc/dc.h
+@@ -229,6 +229,11 @@ struct dc_caps {
+ 	uint32_t dmdata_alloc_size;
+ 	unsigned int max_cursor_size;
+ 	unsigned int max_video_width;
++	/*
++	 * max video plane width that can be safely assumed to be always
++	 * supported by single DPP pipe.
++	 */
++	unsigned int max_optimizable_video_width;
+ 	unsigned int min_horizontal_blanking_period;
+ 	int linear_pitch_alignment;
+ 	bool dcc_const_color;
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+index 9834b75f1837b..79befa17bb037 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+@@ -111,7 +111,8 @@ void dcn10_lock_all_pipes(struct dc *dc,
+ 		if (pipe_ctx->top_pipe ||
+ 		    !pipe_ctx->stream ||
+ 		    (!pipe_ctx->plane_state && !old_pipe_ctx->plane_state) ||
+-		    !tg->funcs->is_tg_enabled(tg))
++		    !tg->funcs->is_tg_enabled(tg) ||
++			pipe_ctx->stream->mall_stream_config.type == SUBVP_PHANTOM)
+ 			continue;
+ 
+ 		if (lock)
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
+index 62a077adcdbfa..84fe449a2c7ed 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
+@@ -1846,8 +1846,16 @@ void dcn20_program_front_end_for_ctx(
+ 			dc->current_state->res_ctx.pipe_ctx[i].stream->mall_stream_config.type == SUBVP_PHANTOM) {
+ 			struct timing_generator *tg = dc->current_state->res_ctx.pipe_ctx[i].stream_res.tg;
+ 
+-			if (tg->funcs->enable_crtc)
++			if (tg->funcs->enable_crtc) {
++				if (dc->hwss.blank_phantom) {
++					int main_pipe_width, main_pipe_height;
++
++					main_pipe_width = dc->current_state->res_ctx.pipe_ctx[i].stream->mall_stream_config.paired_stream->dst.width;
++					main_pipe_height = dc->current_state->res_ctx.pipe_ctx[i].stream->mall_stream_config.paired_stream->dst.height;
++					dc->hwss.blank_phantom(dc, tg, main_pipe_width, main_pipe_height);
++				}
+ 				tg->funcs->enable_crtc(tg);
++			}
+ 		}
+ 	}
+ 	/* OTG blank before disabling all front ends */
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
+index d52d5feeb311b..ccbcfd6bd6b85 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
+@@ -216,7 +216,7 @@ static bool dcn32_check_no_memory_request_for_cab(struct dc *dc)
+ static uint32_t dcn32_calculate_cab_allocation(struct dc *dc, struct dc_state *ctx)
+ {
+ 	int i;
+-	uint8_t num_ways = 0;
++	uint32_t num_ways = 0;
+ 	uint32_t mall_ss_size_bytes = 0;
+ 
+ 	mall_ss_size_bytes = ctx->bw_ctx.bw.dcn.mall_ss_size_bytes;
+@@ -246,7 +246,8 @@ static uint32_t dcn32_calculate_cab_allocation(struct dc *dc, struct dc_state *c
+ bool dcn32_apply_idle_power_optimizations(struct dc *dc, bool enable)
+ {
+ 	union dmub_rb_cmd cmd;
+-	uint8_t ways, i;
++	uint8_t i;
++	uint32_t ways;
+ 	int j;
+ 	bool mall_ss_unsupported = false;
+ 	struct dc_plane_state *plane = NULL;
+@@ -306,7 +307,7 @@ bool dcn32_apply_idle_power_optimizations(struct dc *dc, bool enable)
+ 				cmd.cab.header.type = DMUB_CMD__CAB_FOR_SS;
+ 				cmd.cab.header.sub_type = DMUB_CMD__CAB_DCN_SS_FIT_IN_CAB;
+ 				cmd.cab.header.payload_bytes = sizeof(cmd.cab) - sizeof(cmd.cab.header);
+-				cmd.cab.cab_alloc_ways = ways;
++				cmd.cab.cab_alloc_ways = (uint8_t)ways;
+ 
+ 				dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT);
+ 
+@@ -1575,3 +1576,101 @@ void dcn32_init_blank(
+ 	if (opp)
+ 		hws->funcs.wait_for_blank_complete(opp);
+ }
++
++void dcn32_blank_phantom(struct dc *dc,
++		struct timing_generator *tg,
++		int width,
++		int height)
++{
++	struct dce_hwseq *hws = dc->hwseq;
++	enum dc_color_space color_space;
++	struct tg_color black_color = {0};
++	struct output_pixel_processor *opp = NULL;
++	uint32_t num_opps, opp_id_src0, opp_id_src1;
++	uint32_t otg_active_width, otg_active_height;
++	uint32_t i;
++
++	/* program opp dpg blank color */
++	color_space = COLOR_SPACE_SRGB;
++	color_space_to_black_color(dc, color_space, &black_color);
++
++	otg_active_width = width;
++	otg_active_height = height;
++
++	/* get the OPTC source */
++	tg->funcs->get_optc_source(tg, &num_opps, &opp_id_src0, &opp_id_src1);
++	ASSERT(opp_id_src0 < dc->res_pool->res_cap->num_opp);
++
++	for (i = 0; i < dc->res_pool->res_cap->num_opp; i++) {
++		if (dc->res_pool->opps[i] != NULL && dc->res_pool->opps[i]->inst == opp_id_src0) {
++			opp = dc->res_pool->opps[i];
++			break;
++		}
++	}
++
++	if (opp && opp->funcs->opp_set_disp_pattern_generator)
++		opp->funcs->opp_set_disp_pattern_generator(
++				opp,
++				CONTROLLER_DP_TEST_PATTERN_SOLID_COLOR,
++				CONTROLLER_DP_COLOR_SPACE_UDEFINED,
++				COLOR_DEPTH_UNDEFINED,
++				&black_color,
++				otg_active_width,
++				otg_active_height,
++				0);
++
++	if (tg->funcs->is_tg_enabled(tg))
++		hws->funcs.wait_for_blank_complete(opp);
++}
++
++bool dcn32_is_pipe_topology_transition_seamless(struct dc *dc,
++		const struct dc_state *cur_ctx,
++		const struct dc_state *new_ctx)
++{
++	int i;
++	const struct pipe_ctx *cur_pipe, *new_pipe;
++	bool is_seamless = true;
++
++	for (i = 0; i < dc->res_pool->pipe_count; i++) {
++		cur_pipe = &cur_ctx->res_ctx.pipe_ctx[i];
++		new_pipe = &new_ctx->res_ctx.pipe_ctx[i];
++
++		if (resource_is_pipe_type(cur_pipe, FREE_PIPE) ||
++				resource_is_pipe_type(new_pipe, FREE_PIPE))
++			/* adding or removing free pipes is always seamless */
++			continue;
++		else if (resource_is_pipe_type(cur_pipe, OTG_MASTER)) {
++			if (resource_is_pipe_type(new_pipe, OTG_MASTER))
++				if (cur_pipe->stream->stream_id == new_pipe->stream->stream_id)
++				/* OTG master with the same stream is seamless */
++					continue;
++		} else if (resource_is_pipe_type(cur_pipe, OPP_HEAD)) {
++			if (resource_is_pipe_type(new_pipe, OPP_HEAD)) {
++				if (cur_pipe->stream_res.tg == new_pipe->stream_res.tg)
++					/*
++					 * OPP heads sharing the same timing
++					 * generator is seamless
++					 */
++					continue;
++			}
++		} else if (resource_is_pipe_type(cur_pipe, DPP_PIPE)) {
++			if (resource_is_pipe_type(new_pipe, DPP_PIPE)) {
++				if (cur_pipe->stream_res.opp == new_pipe->stream_res.opp)
++					/*
++					 * DPP pipes sharing the same OPP head is
++					 * seamless
++					 */
++					continue;
++			}
++		}
++
++		/*
++		 * This pipe's transition doesn't fall under any seamless
++		 * conditions
++		 */
++		is_seamless = false;
++		break;
++	}
++
++	return is_seamless;
++}
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.h
+index 2d2628f31bed7..9992e40acd217 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.h
++++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.h
+@@ -115,4 +115,13 @@ void dcn32_init_blank(
+ 		struct dc *dc,
+ 		struct timing_generator *tg);
+ 
++void dcn32_blank_phantom(struct dc *dc,
++		struct timing_generator *tg,
++		int width,
++		int height);
++
++bool dcn32_is_pipe_topology_transition_seamless(struct dc *dc,
++		const struct dc_state *cur_ctx,
++		const struct dc_state *new_ctx);
++
+ #endif /* __DC_HWSS_DCN32_H__ */
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c
+index 777b2fac20c4e..12e0f48a13e48 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c
+@@ -115,6 +115,8 @@ static const struct hw_sequencer_funcs dcn32_funcs = {
+ 	.update_phantom_vp_position = dcn32_update_phantom_vp_position,
+ 	.update_dsc_pg = dcn32_update_dsc_pg,
+ 	.apply_update_flags_for_phantom = dcn32_apply_update_flags_for_phantom,
++	.blank_phantom = dcn32_blank_phantom,
++	.is_pipe_topology_transition_seamless = dcn32_is_pipe_topology_transition_seamless,
+ };
+ 
+ static const struct hwseq_private_funcs dcn32_private_funcs = {
+diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
+index 02ff99f7bec2b..66e680902c95c 100644
+--- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
++++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
+@@ -388,6 +388,11 @@ struct hw_sequencer_funcs {
+ 	void (*z10_restore)(const struct dc *dc);
+ 	void (*z10_save_init)(struct dc *dc);
+ 
++	void (*blank_phantom)(struct dc *dc,
++			struct timing_generator *tg,
++			int width,
++			int height);
++
+ 	void (*update_visual_confirm_color)(struct dc *dc,
+ 			struct pipe_ctx *pipe_ctx,
+ 			int mpcc_id);
+@@ -396,6 +401,9 @@ struct hw_sequencer_funcs {
+ 			struct dc_state *context,
+ 			struct pipe_ctx *phantom_pipe);
+ 	void (*apply_update_flags_for_phantom)(struct pipe_ctx *phantom_pipe);
++	bool (*is_pipe_topology_transition_seamless)(struct dc *dc,
++			const struct dc_state *cur_ctx,
++			const struct dc_state *new_ctx);
+ 
+ 	void (*commit_subvp_config)(struct dc *dc, struct dc_state *context);
+ 	void (*enable_phantom_streams)(struct dc *dc, struct dc_state *context);
+diff --git a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
+index 4585e0419da61..e2e8b35f3e26d 100644
+--- a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
++++ b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
+@@ -186,6 +186,7 @@ struct dmub_srv_region_params {
+ 	uint32_t vbios_size;
+ 	const uint8_t *fw_inst_const;
+ 	const uint8_t *fw_bss_data;
++	bool is_mailbox_in_inbox;
+ };
+ 
+ /**
+@@ -205,20 +206,25 @@ struct dmub_srv_region_params {
+  */
+ struct dmub_srv_region_info {
+ 	uint32_t fb_size;
++	uint32_t inbox_size;
+ 	uint8_t num_regions;
+ 	struct dmub_region regions[DMUB_WINDOW_TOTAL];
+ };
+ 
+ /**
+- * struct dmub_srv_fb_params - parameters used for driver fb setup
++ * struct dmub_srv_memory_params - parameters used for driver fb setup
+  * @region_info: region info calculated by dmub service
+- * @cpu_addr: base cpu address for the framebuffer
+- * @gpu_addr: base gpu virtual address for the framebuffer
++ * @cpu_fb_addr: base cpu address for the framebuffer
++ * @cpu_inbox_addr: base cpu address for the gart
++ * @gpu_fb_addr: base gpu virtual address for the framebuffer
++ * @gpu_inbox_addr: base gpu virtual address for the gart
+  */
+-struct dmub_srv_fb_params {
++struct dmub_srv_memory_params {
+ 	const struct dmub_srv_region_info *region_info;
+-	void *cpu_addr;
+-	uint64_t gpu_addr;
++	void *cpu_fb_addr;
++	void *cpu_inbox_addr;
++	uint64_t gpu_fb_addr;
++	uint64_t gpu_inbox_addr;
+ };
+ 
+ /**
+@@ -545,8 +551,8 @@ dmub_srv_calc_region_info(struct dmub_srv *dmub,
+  *   DMUB_STATUS_OK - success
+  *   DMUB_STATUS_INVALID - unspecified error
+  */
+-enum dmub_status dmub_srv_calc_fb_info(struct dmub_srv *dmub,
+-				       const struct dmub_srv_fb_params *params,
++enum dmub_status dmub_srv_calc_mem_info(struct dmub_srv *dmub,
++				       const struct dmub_srv_memory_params *params,
+ 				       struct dmub_srv_fb_info *out);
+ 
+ /**
+diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
+index bdaf43892f47b..13ee22e9a1f4a 100644
+--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
++++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
+@@ -385,7 +385,7 @@ dmub_srv_calc_region_info(struct dmub_srv *dmub,
+ 	uint32_t fw_state_size = DMUB_FW_STATE_SIZE;
+ 	uint32_t trace_buffer_size = DMUB_TRACE_BUFFER_SIZE;
+ 	uint32_t scratch_mem_size = DMUB_SCRATCH_MEM_SIZE;
+-
++	uint32_t previous_top = 0;
+ 	if (!dmub->sw_init)
+ 		return DMUB_STATUS_INVALID;
+ 
+@@ -410,8 +410,15 @@ dmub_srv_calc_region_info(struct dmub_srv *dmub,
+ 	bios->base = dmub_align(stack->top, 256);
+ 	bios->top = bios->base + params->vbios_size;
+ 
+-	mail->base = dmub_align(bios->top, 256);
+-	mail->top = mail->base + DMUB_MAILBOX_SIZE;
++	if (params->is_mailbox_in_inbox) {
++		mail->base = 0;
++		mail->top = mail->base + DMUB_MAILBOX_SIZE;
++		previous_top = bios->top;
++	} else {
++		mail->base = dmub_align(bios->top, 256);
++		mail->top = mail->base + DMUB_MAILBOX_SIZE;
++		previous_top = mail->top;
++	}
+ 
+ 	fw_info = dmub_get_fw_meta_info(params);
+ 
+@@ -430,7 +437,7 @@ dmub_srv_calc_region_info(struct dmub_srv *dmub,
+ 			dmub->fw_version = fw_info->fw_version;
+ 	}
+ 
+-	trace_buff->base = dmub_align(mail->top, 256);
++	trace_buff->base = dmub_align(previous_top, 256);
+ 	trace_buff->top = trace_buff->base + dmub_align(trace_buffer_size, 64);
+ 
+ 	fw_state->base = dmub_align(trace_buff->top, 256);
+@@ -441,11 +448,14 @@ dmub_srv_calc_region_info(struct dmub_srv *dmub,
+ 
+ 	out->fb_size = dmub_align(scratch_mem->top, 4096);
+ 
++	if (params->is_mailbox_in_inbox)
++		out->inbox_size = dmub_align(mail->top, 4096);
++
+ 	return DMUB_STATUS_OK;
+ }
+ 
+-enum dmub_status dmub_srv_calc_fb_info(struct dmub_srv *dmub,
+-				       const struct dmub_srv_fb_params *params,
++enum dmub_status dmub_srv_calc_mem_info(struct dmub_srv *dmub,
++				       const struct dmub_srv_memory_params *params,
+ 				       struct dmub_srv_fb_info *out)
+ {
+ 	uint8_t *cpu_base;
+@@ -460,8 +470,8 @@ enum dmub_status dmub_srv_calc_fb_info(struct dmub_srv *dmub,
+ 	if (params->region_info->num_regions != DMUB_NUM_WINDOWS)
+ 		return DMUB_STATUS_INVALID;
+ 
+-	cpu_base = (uint8_t *)params->cpu_addr;
+-	gpu_base = params->gpu_addr;
++	cpu_base = (uint8_t *)params->cpu_fb_addr;
++	gpu_base = params->gpu_fb_addr;
+ 
+ 	for (i = 0; i < DMUB_NUM_WINDOWS; ++i) {
+ 		const struct dmub_region *reg =
+@@ -469,6 +479,12 @@ enum dmub_status dmub_srv_calc_fb_info(struct dmub_srv *dmub,
+ 
+ 		out->fb[i].cpu_addr = cpu_base + reg->base;
+ 		out->fb[i].gpu_addr = gpu_base + reg->base;
++
++		if (i == DMUB_WINDOW_4_MAILBOX && params->cpu_inbox_addr != 0) {
++			out->fb[i].cpu_addr = (uint8_t *)params->cpu_inbox_addr + reg->base;
++			out->fb[i].gpu_addr = params->gpu_inbox_addr + reg->base;
++		}
++
+ 		out->fb[i].size = reg->top - reg->base;
+ 	}
+ 
+@@ -657,9 +673,16 @@ enum dmub_status dmub_srv_sync_inbox1(struct dmub_srv *dmub)
+ 		return DMUB_STATUS_INVALID;
+ 
+ 	if (dmub->hw_funcs.get_inbox1_rptr && dmub->hw_funcs.get_inbox1_wptr) {
+-		dmub->inbox1_rb.rptr = dmub->hw_funcs.get_inbox1_rptr(dmub);
+-		dmub->inbox1_rb.wrpt = dmub->hw_funcs.get_inbox1_wptr(dmub);
+-		dmub->inbox1_last_wptr = dmub->inbox1_rb.wrpt;
++		uint32_t rptr = dmub->hw_funcs.get_inbox1_rptr(dmub);
++		uint32_t wptr = dmub->hw_funcs.get_inbox1_wptr(dmub);
++
++		if (rptr > dmub->inbox1_rb.capacity || wptr > dmub->inbox1_rb.capacity) {
++			return DMUB_STATUS_HW_FAILURE;
++		} else {
++			dmub->inbox1_rb.rptr = rptr;
++			dmub->inbox1_rb.wrpt = wptr;
++			dmub->inbox1_last_wptr = dmub->inbox1_rb.wrpt;
++		}
+ 	}
+ 
+ 	return DMUB_STATUS_OK;
+@@ -693,6 +716,11 @@ enum dmub_status dmub_srv_cmd_queue(struct dmub_srv *dmub,
+ 	if (!dmub->hw_init)
+ 		return DMUB_STATUS_INVALID;
+ 
++	if (dmub->inbox1_rb.rptr > dmub->inbox1_rb.capacity ||
++	    dmub->inbox1_rb.wrpt > dmub->inbox1_rb.capacity) {
++		return DMUB_STATUS_HW_FAILURE;
++	}
++
+ 	if (dmub_rb_push_front(&dmub->inbox1_rb, cmd))
+ 		return DMUB_STATUS_OK;
+ 
+diff --git a/drivers/gpu/drm/amd/include/pptable.h b/drivers/gpu/drm/amd/include/pptable.h
+index 0b6a057e0a4c4..5aac8d545bdc6 100644
+--- a/drivers/gpu/drm/amd/include/pptable.h
++++ b/drivers/gpu/drm/amd/include/pptable.h
+@@ -78,7 +78,7 @@ typedef struct _ATOM_PPLIB_THERMALCONTROLLER
+ typedef struct _ATOM_PPLIB_STATE
+ {
+     UCHAR ucNonClockStateIndex;
+-    UCHAR ucClockStateIndices[1]; // variable-sized
++    UCHAR ucClockStateIndices[]; // variable-sized
+ } ATOM_PPLIB_STATE;
+ 
+ 
+@@ -473,7 +473,7 @@ typedef struct _ATOM_PPLIB_STATE_V2
+       /**
+       * Driver will read the first ucNumDPMLevels in this array
+       */
+-      UCHAR clockInfoIndex[1];
++      UCHAR clockInfoIndex[];
+ } ATOM_PPLIB_STATE_V2;
+ 
+ typedef struct _StateArray{
+diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
+index 7f7a476b6829c..eb2c1d59bc6a7 100644
+--- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c
++++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
+@@ -734,7 +734,7 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
+ 	if (adev->in_suspend && !adev->in_runpm)
+ 		return -EPERM;
+ 
+-	if (count > 127)
++	if (count > 127 || count == 0)
+ 		return -EINVAL;
+ 
+ 	if (*buf == 's')
+@@ -754,7 +754,8 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
+ 	else
+ 		return -EINVAL;
+ 
+-	memcpy(buf_cpy, buf, count+1);
++	memcpy(buf_cpy, buf, count);
++	buf_cpy[count] = 0;
+ 
+ 	tmp_str = buf_cpy;
+ 
+@@ -771,6 +772,9 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
+ 			return -EINVAL;
+ 		parameter_size++;
+ 
++		if (!tmp_str)
++			break;
++
+ 		while (isspace(*tmp_str))
+ 			tmp_str++;
+ 	}
+diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/pptable_v1_0.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/pptable_v1_0.h
+index b0ac4d121adca..e0e40b054c08b 100644
+--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/pptable_v1_0.h
++++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/pptable_v1_0.h
+@@ -164,7 +164,7 @@ typedef struct _ATOM_Tonga_State {
+ typedef struct _ATOM_Tonga_State_Array {
+ 	UCHAR ucRevId;
+ 	UCHAR ucNumEntries;		/* Number of entries. */
+-	ATOM_Tonga_State entries[1];	/* Dynamically allocate entries. */
++	ATOM_Tonga_State entries[];	/* Dynamically allocate entries. */
+ } ATOM_Tonga_State_Array;
+ 
+ typedef struct _ATOM_Tonga_MCLK_Dependency_Record {
+@@ -179,7 +179,7 @@ typedef struct _ATOM_Tonga_MCLK_Dependency_Record {
+ typedef struct _ATOM_Tonga_MCLK_Dependency_Table {
+ 	UCHAR ucRevId;
+ 	UCHAR ucNumEntries; 										/* Number of entries. */
+-	ATOM_Tonga_MCLK_Dependency_Record entries[1];				/* Dynamically allocate entries. */
++	ATOM_Tonga_MCLK_Dependency_Record entries[];				/* Dynamically allocate entries. */
+ } ATOM_Tonga_MCLK_Dependency_Table;
+ 
+ typedef struct _ATOM_Tonga_SCLK_Dependency_Record {
+@@ -194,7 +194,7 @@ typedef struct _ATOM_Tonga_SCLK_Dependency_Record {
+ typedef struct _ATOM_Tonga_SCLK_Dependency_Table {
+ 	UCHAR ucRevId;
+ 	UCHAR ucNumEntries; 										/* Number of entries. */
+-	ATOM_Tonga_SCLK_Dependency_Record entries[1];				 /* Dynamically allocate entries. */
++	ATOM_Tonga_SCLK_Dependency_Record entries[];				 /* Dynamically allocate entries. */
+ } ATOM_Tonga_SCLK_Dependency_Table;
+ 
+ typedef struct _ATOM_Polaris_SCLK_Dependency_Record {
+@@ -210,7 +210,7 @@ typedef struct _ATOM_Polaris_SCLK_Dependency_Record {
+ typedef struct _ATOM_Polaris_SCLK_Dependency_Table {
+ 	UCHAR ucRevId;
+ 	UCHAR ucNumEntries;							/* Number of entries. */
+-	ATOM_Polaris_SCLK_Dependency_Record entries[1];				 /* Dynamically allocate entries. */
++	ATOM_Polaris_SCLK_Dependency_Record entries[];				 /* Dynamically allocate entries. */
+ } ATOM_Polaris_SCLK_Dependency_Table;
+ 
+ typedef struct _ATOM_Tonga_PCIE_Record {
+@@ -222,7 +222,7 @@ typedef struct _ATOM_Tonga_PCIE_Record {
+ typedef struct _ATOM_Tonga_PCIE_Table {
+ 	UCHAR ucRevId;
+ 	UCHAR ucNumEntries; 										/* Number of entries. */
+-	ATOM_Tonga_PCIE_Record entries[1];							/* Dynamically allocate entries. */
++	ATOM_Tonga_PCIE_Record entries[];							/* Dynamically allocate entries. */
+ } ATOM_Tonga_PCIE_Table;
+ 
+ typedef struct _ATOM_Polaris10_PCIE_Record {
+@@ -235,7 +235,7 @@ typedef struct _ATOM_Polaris10_PCIE_Record {
+ typedef struct _ATOM_Polaris10_PCIE_Table {
+ 	UCHAR ucRevId;
+ 	UCHAR ucNumEntries;                                         /* Number of entries. */
+-	ATOM_Polaris10_PCIE_Record entries[1];                      /* Dynamically allocate entries. */
++	ATOM_Polaris10_PCIE_Record entries[];                      /* Dynamically allocate entries. */
+ } ATOM_Polaris10_PCIE_Table;
+ 
+ 
+@@ -252,7 +252,7 @@ typedef struct _ATOM_Tonga_MM_Dependency_Record {
+ typedef struct _ATOM_Tonga_MM_Dependency_Table {
+ 	UCHAR ucRevId;
+ 	UCHAR ucNumEntries; 										/* Number of entries. */
+-	ATOM_Tonga_MM_Dependency_Record entries[1]; 			   /* Dynamically allocate entries. */
++	ATOM_Tonga_MM_Dependency_Record entries[]; 			   /* Dynamically allocate entries. */
+ } ATOM_Tonga_MM_Dependency_Table;
+ 
+ typedef struct _ATOM_Tonga_Voltage_Lookup_Record {
+@@ -265,7 +265,7 @@ typedef struct _ATOM_Tonga_Voltage_Lookup_Record {
+ typedef struct _ATOM_Tonga_Voltage_Lookup_Table {
+ 	UCHAR ucRevId;
+ 	UCHAR ucNumEntries; 										/* Number of entries. */
+-	ATOM_Tonga_Voltage_Lookup_Record entries[1];				/* Dynamically allocate entries. */
++	ATOM_Tonga_Voltage_Lookup_Record entries[];				/* Dynamically allocate entries. */
+ } ATOM_Tonga_Voltage_Lookup_Table;
+ 
+ typedef struct _ATOM_Tonga_Fan_Table {
+diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
+index 1cb4022644977..a38888176805d 100644
+--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
++++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
+@@ -1823,9 +1823,7 @@ static void smu7_init_dpm_defaults(struct pp_hwmgr *hwmgr)
+ 
+ 	data->mclk_dpm_key_disabled = hwmgr->feature_mask & PP_MCLK_DPM_MASK ? false : true;
+ 	data->sclk_dpm_key_disabled = hwmgr->feature_mask & PP_SCLK_DPM_MASK ? false : true;
+-	data->pcie_dpm_key_disabled =
+-		!amdgpu_device_pcie_dynamic_switching_supported() ||
+-		!(hwmgr->feature_mask & PP_PCIE_DPM_MASK);
++	data->pcie_dpm_key_disabled = !(hwmgr->feature_mask & PP_PCIE_DPM_MASK);
+ 	/* need to set voltage control types before EVV patching */
+ 	data->voltage_control = SMU7_VOLTAGE_CONTROL_NONE;
+ 	data->vddci_control = SMU7_VOLTAGE_CONTROL_NONE;
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+index 222af2fae7458..16c03771c1239 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+@@ -1232,7 +1232,7 @@ static int smu_smc_hw_setup(struct smu_context *smu)
+ {
+ 	struct smu_feature *feature = &smu->smu_feature;
+ 	struct amdgpu_device *adev = smu->adev;
+-	uint32_t pcie_gen = 0, pcie_width = 0;
++	uint8_t pcie_gen = 0, pcie_width = 0;
+ 	uint64_t features_supported;
+ 	int ret = 0;
+ 
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
+index 6e2069dcb6b9d..d1d7713b97794 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
++++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
+@@ -856,7 +856,7 @@ struct pptable_funcs {
+ 	 * &pcie_gen_cap: Maximum allowed PCIe generation.
+ 	 * &pcie_width_cap: Maximum allowed PCIe width.
+ 	 */
+-	int (*update_pcie_parameters)(struct smu_context *smu, uint32_t pcie_gen_cap, uint32_t pcie_width_cap);
++	int (*update_pcie_parameters)(struct smu_context *smu, uint8_t pcie_gen_cap, uint8_t pcie_width_cap);
+ 
+ 	/**
+ 	 * @i2c_init: Initialize i2c.
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
+index 355c156d871af..cc02f979e9e98 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
++++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
+@@ -296,8 +296,8 @@ int smu_v13_0_get_pptable_from_firmware(struct smu_context *smu,
+ 					uint32_t pptable_id);
+ 
+ int smu_v13_0_update_pcie_parameters(struct smu_context *smu,
+-				     uint32_t pcie_gen_cap,
+-				     uint32_t pcie_width_cap);
++				     uint8_t pcie_gen_cap,
++				     uint8_t pcie_width_cap);
+ 
+ #endif
+ #endif
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
+index 95f6d821bacbc..addaa69119b8e 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
+@@ -2375,8 +2375,8 @@ static int navi10_get_power_limit(struct smu_context *smu,
+ }
+ 
+ static int navi10_update_pcie_parameters(struct smu_context *smu,
+-				     uint32_t pcie_gen_cap,
+-				     uint32_t pcie_width_cap)
++					 uint8_t pcie_gen_cap,
++					 uint8_t pcie_width_cap)
+ {
+ 	struct smu_11_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
+ 	PPTable_t *pptable = smu->smu_table.driver_pptable;
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+index 9119b0df2419f..94f22df5ac205 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+@@ -2084,14 +2084,14 @@ static int sienna_cichlid_display_disable_memory_clock_switch(struct smu_context
+ #define MAX(a, b)	((a) > (b) ? (a) : (b))
+ 
+ static int sienna_cichlid_update_pcie_parameters(struct smu_context *smu,
+-					 uint32_t pcie_gen_cap,
+-					 uint32_t pcie_width_cap)
++						 uint8_t pcie_gen_cap,
++						 uint8_t pcie_width_cap)
+ {
+ 	struct smu_11_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
+ 	struct smu_11_0_pcie_table *pcie_table = &dpm_context->dpm_tables.pcie_table;
+ 	uint8_t *table_member1, *table_member2;
+-	uint32_t min_gen_speed, max_gen_speed;
+-	uint32_t min_lane_width, max_lane_width;
++	uint8_t min_gen_speed, max_gen_speed;
++	uint8_t min_lane_width, max_lane_width;
+ 	uint32_t smu_pcie_arg;
+ 	int ret, i;
+ 
+@@ -2107,7 +2107,7 @@ static int sienna_cichlid_update_pcie_parameters(struct smu_context *smu,
+ 	min_lane_width = min_lane_width > max_lane_width ?
+ 			 max_lane_width : min_lane_width;
+ 
+-	if (!amdgpu_device_pcie_dynamic_switching_supported()) {
++	if (!(smu->adev->pm.pp_feature & PP_PCIE_DPM_MASK)) {
+ 		pcie_table->pcie_gen[0] = max_gen_speed;
+ 		pcie_table->pcie_lane[0] = max_lane_width;
+ 	} else {
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
+index 9b62b45ebb7f0..3bc60ecc7bfef 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
+@@ -2426,8 +2426,8 @@ int smu_v13_0_mode1_reset(struct smu_context *smu)
+ }
+ 
+ int smu_v13_0_update_pcie_parameters(struct smu_context *smu,
+-				     uint32_t pcie_gen_cap,
+-				     uint32_t pcie_width_cap)
++				     uint8_t pcie_gen_cap,
++				     uint8_t pcie_width_cap)
+ {
+ 	struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
+ 	struct smu_13_0_pcie_table *pcie_table =
+@@ -2436,7 +2436,10 @@ int smu_v13_0_update_pcie_parameters(struct smu_context *smu,
+ 	uint32_t smu_pcie_arg;
+ 	int ret, i;
+ 
+-	if (!amdgpu_device_pcie_dynamic_switching_supported()) {
++	if (!num_of_levels)
++		return 0;
++
++	if (!(smu->adev->pm.pp_feature & PP_PCIE_DPM_MASK)) {
+ 		if (pcie_table->pcie_gen[num_of_levels - 1] < pcie_gen_cap)
+ 			pcie_gen_cap = pcie_table->pcie_gen[num_of_levels - 1];
+ 
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
+index 0fb6be11a0cc7..41783c0243006 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
+@@ -343,12 +343,12 @@ static int smu_v13_0_0_check_powerplay_table(struct smu_context *smu)
+ 	if (powerplay_table->platform_caps & SMU_13_0_0_PP_PLATFORM_CAP_HARDWAREDC)
+ 		smu->dc_controlled_by_gpio = true;
+ 
+-	if (powerplay_table->platform_caps & SMU_13_0_0_PP_PLATFORM_CAP_BACO ||
+-	    powerplay_table->platform_caps & SMU_13_0_0_PP_PLATFORM_CAP_MACO)
++	if (powerplay_table->platform_caps & SMU_13_0_0_PP_PLATFORM_CAP_BACO) {
+ 		smu_baco->platform_support = true;
+ 
+-	if (powerplay_table->platform_caps & SMU_13_0_0_PP_PLATFORM_CAP_MACO)
+-		smu_baco->maco_support = true;
++		if (powerplay_table->platform_caps & SMU_13_0_0_PP_PLATFORM_CAP_MACO)
++			smu_baco->maco_support = true;
++	}
+ 
+ 	/*
+ 	 * We are in the transition to a new OD mechanism.
+@@ -2162,38 +2162,10 @@ static int smu_v13_0_0_set_power_profile_mode(struct smu_context *smu,
+ 		}
+ 	}
+ 
+-	if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_COMPUTE &&
+-		(((smu->adev->pdev->device == 0x744C) && (smu->adev->pdev->revision == 0xC8)) ||
+-		((smu->adev->pdev->device == 0x744C) && (smu->adev->pdev->revision == 0xCC)))) {
+-		ret = smu_cmn_update_table(smu,
+-					   SMU_TABLE_ACTIVITY_MONITOR_COEFF,
+-					   WORKLOAD_PPLIB_COMPUTE_BIT,
+-					   (void *)(&activity_monitor_external),
+-					   false);
+-		if (ret) {
+-			dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__);
+-			return ret;
+-		}
+-
+-		ret = smu_cmn_update_table(smu,
+-					   SMU_TABLE_ACTIVITY_MONITOR_COEFF,
+-					   WORKLOAD_PPLIB_CUSTOM_BIT,
+-					   (void *)(&activity_monitor_external),
+-					   true);
+-		if (ret) {
+-			dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__);
+-			return ret;
+-		}
+-
+-		workload_type = smu_cmn_to_asic_specific_index(smu,
+-						       CMN2ASIC_MAPPING_WORKLOAD,
+-						       PP_SMC_POWER_PROFILE_CUSTOM);
+-	} else {
+-		/* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
+-		workload_type = smu_cmn_to_asic_specific_index(smu,
++	/* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
++	workload_type = smu_cmn_to_asic_specific_index(smu,
+ 						       CMN2ASIC_MAPPING_WORKLOAD,
+ 						       smu->power_profile_mode);
+-	}
+ 
+ 	if (workload_type < 0)
+ 		return -EINVAL;
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
+index 62f2886ab4df6..8cc16b3d18a3a 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
+@@ -333,12 +333,13 @@ static int smu_v13_0_7_check_powerplay_table(struct smu_context *smu)
+ 	if (powerplay_table->platform_caps & SMU_13_0_7_PP_PLATFORM_CAP_HARDWAREDC)
+ 		smu->dc_controlled_by_gpio = true;
+ 
+-	if (powerplay_table->platform_caps & SMU_13_0_7_PP_PLATFORM_CAP_BACO ||
+-	    powerplay_table->platform_caps & SMU_13_0_7_PP_PLATFORM_CAP_MACO)
++	if (powerplay_table->platform_caps & SMU_13_0_7_PP_PLATFORM_CAP_BACO) {
+ 		smu_baco->platform_support = true;
+ 
+-	if (smu_baco->platform_support && (BoardTable->HsrEnabled || BoardTable->VddqOffEnabled))
+-		smu_baco->maco_support = true;
++		if ((powerplay_table->platform_caps & SMU_13_0_7_PP_PLATFORM_CAP_MACO)
++					&& (BoardTable->HsrEnabled || BoardTable->VddqOffEnabled))
++			smu_baco->maco_support = true;
++	}
+ 
+ #if 0
+ 	if (!overdrive_lowerlimits->FeatureCtrlMask ||
+diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c
+index 3276a3e82c628..916f2c36bf2f7 100644
+--- a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c
++++ b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c
+@@ -1223,7 +1223,7 @@ int komeda_build_display_data_flow(struct komeda_crtc *kcrtc,
+ 	return 0;
+ }
+ 
+-static void
++static int
+ komeda_pipeline_unbound_components(struct komeda_pipeline *pipe,
+ 				   struct komeda_pipeline_state *new)
+ {
+@@ -1243,8 +1243,12 @@ komeda_pipeline_unbound_components(struct komeda_pipeline *pipe,
+ 		c = komeda_pipeline_get_component(pipe, id);
+ 		c_st = komeda_component_get_state_and_set_user(c,
+ 				drm_st, NULL, new->crtc);
++		if (PTR_ERR(c_st) == -EDEADLK)
++			return -EDEADLK;
+ 		WARN_ON(IS_ERR(c_st));
+ 	}
++
++	return 0;
+ }
+ 
+ /* release unclaimed pipeline resource */
+@@ -1266,9 +1270,8 @@ int komeda_release_unclaimed_resources(struct komeda_pipeline *pipe,
+ 	if (WARN_ON(IS_ERR_OR_NULL(st)))
+ 		return -EINVAL;
+ 
+-	komeda_pipeline_unbound_components(pipe, st);
++	return komeda_pipeline_unbound_components(pipe, st);
+ 
+-	return 0;
+ }
+ 
+ /* Since standalone disabled components must be disabled separately and in the
+diff --git a/drivers/gpu/drm/bridge/ite-it66121.c b/drivers/gpu/drm/bridge/ite-it66121.c
+index fc7f5ec5fb381..8f5846b76d594 100644
+--- a/drivers/gpu/drm/bridge/ite-it66121.c
++++ b/drivers/gpu/drm/bridge/ite-it66121.c
+@@ -884,14 +884,14 @@ static struct edid *it66121_bridge_get_edid(struct drm_bridge *bridge,
+ 	mutex_lock(&ctx->lock);
+ 	ret = it66121_preamble_ddc(ctx);
+ 	if (ret) {
+-		edid = ERR_PTR(ret);
++		edid = NULL;
+ 		goto out_unlock;
+ 	}
+ 
+ 	ret = regmap_write(ctx->regmap, IT66121_DDC_HEADER_REG,
+ 			   IT66121_DDC_HEADER_EDID);
+ 	if (ret) {
+-		edid = ERR_PTR(ret);
++		edid = NULL;
+ 		goto out_unlock;
+ 	}
+ 
+diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
+index 69d855123d3e3..f1ceb7d08519e 100644
+--- a/drivers/gpu/drm/drm_edid.c
++++ b/drivers/gpu/drm/drm_edid.c
+@@ -3499,11 +3499,19 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_connector *connecto
+ 	mode->vsync_end = mode->vsync_start + vsync_pulse_width;
+ 	mode->vtotal = mode->vdisplay + vblank;
+ 
+-	/* Some EDIDs have bogus h/vtotal values */
+-	if (mode->hsync_end > mode->htotal)
+-		mode->htotal = mode->hsync_end + 1;
+-	if (mode->vsync_end > mode->vtotal)
+-		mode->vtotal = mode->vsync_end + 1;
++	/* Some EDIDs have bogus h/vsync_end values */
++	if (mode->hsync_end > mode->htotal) {
++		drm_dbg_kms(dev, "[CONNECTOR:%d:%s] reducing hsync_end %d->%d\n",
++			    connector->base.id, connector->name,
++			    mode->hsync_end, mode->htotal);
++		mode->hsync_end = mode->htotal;
++	}
++	if (mode->vsync_end > mode->vtotal) {
++		drm_dbg_kms(dev, "[CONNECTOR:%d:%s] reducing vsync_end %d->%d\n",
++			    connector->base.id, connector->name,
++			    mode->vsync_end, mode->vtotal);
++		mode->vsync_end = mode->vtotal;
++	}
+ 
+ 	drm_mode_do_interlace_quirk(mode, pt);
+ 
+diff --git a/drivers/gpu/drm/drm_lease.c b/drivers/gpu/drm/drm_lease.c
+index 150fe15550680..94375c6a54256 100644
+--- a/drivers/gpu/drm/drm_lease.c
++++ b/drivers/gpu/drm/drm_lease.c
+@@ -510,8 +510,8 @@ int drm_mode_create_lease_ioctl(struct drm_device *dev,
+ 	/* Handle leased objects, if any */
+ 	idr_init(&leases);
+ 	if (object_count != 0) {
+-		object_ids = memdup_user(u64_to_user_ptr(cl->object_ids),
+-					 array_size(object_count, sizeof(__u32)));
++		object_ids = memdup_array_user(u64_to_user_ptr(cl->object_ids),
++					       object_count, sizeof(__u32));
+ 		if (IS_ERR(object_ids)) {
+ 			ret = PTR_ERR(object_ids);
+ 			idr_destroy(&leases);
+diff --git a/drivers/gpu/drm/gma500/psb_drv.h b/drivers/gpu/drm/gma500/psb_drv.h
+index f7f709df99b49..70d9adafa2333 100644
+--- a/drivers/gpu/drm/gma500/psb_drv.h
++++ b/drivers/gpu/drm/gma500/psb_drv.h
+@@ -424,6 +424,7 @@ struct drm_psb_private {
+ 	uint32_t pipestat[PSB_NUM_PIPE];
+ 
+ 	spinlock_t irqmask_lock;
++	bool irq_enabled;
+ 
+ 	/* Power */
+ 	bool pm_initialized;
+diff --git a/drivers/gpu/drm/gma500/psb_irq.c b/drivers/gpu/drm/gma500/psb_irq.c
+index 343c51250207d..7bbb79b0497d8 100644
+--- a/drivers/gpu/drm/gma500/psb_irq.c
++++ b/drivers/gpu/drm/gma500/psb_irq.c
+@@ -327,6 +327,8 @@ int gma_irq_install(struct drm_device *dev)
+ 
+ 	gma_irq_postinstall(dev);
+ 
++	dev_priv->irq_enabled = true;
++
+ 	return 0;
+ }
+ 
+@@ -337,6 +339,9 @@ void gma_irq_uninstall(struct drm_device *dev)
+ 	unsigned long irqflags;
+ 	unsigned int i;
+ 
++	if (!dev_priv->irq_enabled)
++		return;
++
+ 	spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
+ 
+ 	if (dev_priv->ops->hotplug_enable)
+diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c
+index 4207863b7b2ae..4bba2f536b421 100644
+--- a/drivers/gpu/drm/i915/display/intel_cdclk.c
++++ b/drivers/gpu/drm/i915/display/intel_cdclk.c
+@@ -2680,6 +2680,18 @@ static int intel_compute_min_cdclk(struct intel_cdclk_state *cdclk_state)
+ 	for_each_pipe(dev_priv, pipe)
+ 		min_cdclk = max(cdclk_state->min_cdclk[pipe], min_cdclk);
+ 
++	/*
++	 * Avoid glk_force_audio_cdclk() causing excessive screen
++	 * blinking when multiple pipes are active by making sure
++	 * CDCLK frequency is always high enough for audio. With a
++	 * single active pipe we can always change CDCLK frequency
++	 * by changing the cd2x divider (see glk_cdclk_table[]) and
++	 * thus a full modeset won't be needed then.
++	 */
++	if (IS_GEMINILAKE(dev_priv) && cdclk_state->active_pipes &&
++	    !is_power_of_2(cdclk_state->active_pipes))
++		min_cdclk = max(2 * 96000, min_cdclk);
++
+ 	if (min_cdclk > dev_priv->display.cdclk.max_cdclk_freq) {
+ 		drm_dbg_kms(&dev_priv->drm,
+ 			    "required cdclk (%d kHz) exceeds max (%d kHz)\n",
+diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
+index cb55112d60657..ec28354efc1c3 100644
+--- a/drivers/gpu/drm/i915/display/intel_dp.c
++++ b/drivers/gpu/drm/i915/display/intel_dp.c
+@@ -430,7 +430,7 @@ static int mtl_max_source_rate(struct intel_dp *intel_dp)
+ 	enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
+ 
+ 	if (intel_is_c10phy(i915, phy))
+-		return intel_dp_is_edp(intel_dp) ? 675000 : 810000;
++		return 810000;
+ 
+ 	return 2000000;
+ }
+diff --git a/drivers/gpu/drm/i915/display/intel_tc.c b/drivers/gpu/drm/i915/display/intel_tc.c
+index 3ebf41859043e..cdf2455440bea 100644
+--- a/drivers/gpu/drm/i915/display/intel_tc.c
++++ b/drivers/gpu/drm/i915/display/intel_tc.c
+@@ -58,7 +58,7 @@ struct intel_tc_port {
+ 	struct delayed_work link_reset_work;
+ 	int link_refcount;
+ 	bool legacy_port:1;
+-	char port_name[8];
++	const char *port_name;
+ 	enum tc_port_mode mode;
+ 	enum tc_port_mode init_mode;
+ 	enum phy_fia phy_fia;
+@@ -1841,8 +1841,12 @@ int intel_tc_port_init(struct intel_digital_port *dig_port, bool is_legacy)
+ 	else
+ 		tc->phy_ops = &icl_tc_phy_ops;
+ 
+-	snprintf(tc->port_name, sizeof(tc->port_name),
+-		 "%c/TC#%d", port_name(port), tc_port + 1);
++	tc->port_name = kasprintf(GFP_KERNEL, "%c/TC#%d", port_name(port),
++				  tc_port + 1);
++	if (!tc->port_name) {
++		kfree(tc);
++		return -ENOMEM;
++	}
+ 
+ 	mutex_init(&tc->lock);
+ 	/* TODO: Combine the two works */
+@@ -1863,6 +1867,7 @@ void intel_tc_port_cleanup(struct intel_digital_port *dig_port)
+ {
+ 	intel_tc_port_suspend(dig_port);
+ 
++	kfree(dig_port->tc->port_name);
+ 	kfree(dig_port->tc);
+ 	dig_port->tc = NULL;
+ }
+diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
+index 9a9ff84c90d7e..e38f06a6e56eb 100644
+--- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
++++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
+@@ -844,6 +844,7 @@ static int set_proto_ctx_sseu(struct drm_i915_file_private *fpriv,
+ 		if (idx >= pc->num_user_engines)
+ 			return -EINVAL;
+ 
++		idx = array_index_nospec(idx, pc->num_user_engines);
+ 		pe = &pc->user_engines[idx];
+ 
+ 		/* Only render engine supports RPCS configuration. */
+diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt.c b/drivers/gpu/drm/i915/gt/intel_ggtt.c
+index da21f2786b5d7..b20d8fe8aa95d 100644
+--- a/drivers/gpu/drm/i915/gt/intel_ggtt.c
++++ b/drivers/gpu/drm/i915/gt/intel_ggtt.c
+@@ -190,6 +190,21 @@ void gen6_ggtt_invalidate(struct i915_ggtt *ggtt)
+ 	spin_unlock_irq(&uncore->lock);
+ }
+ 
++static bool needs_wc_ggtt_mapping(struct drm_i915_private *i915)
++{
++	/*
++	 * On BXT+/ICL+ writes larger than 64 bit to the GTT pagetable range
++	 * will be dropped. For WC mappings in general we have 64 byte burst
++	 * writes when the WC buffer is flushed, so we can't use it, but have to
++	 * resort to an uncached mapping. The WC issue is easily caught by the
++	 * readback check when writing GTT PTE entries.
++	 */
++	if (!IS_GEN9_LP(i915) && GRAPHICS_VER(i915) < 11)
++		return true;
++
++	return false;
++}
++
+ static void gen8_ggtt_invalidate(struct i915_ggtt *ggtt)
+ {
+ 	struct intel_uncore *uncore = ggtt->vm.gt->uncore;
+@@ -197,8 +212,12 @@ static void gen8_ggtt_invalidate(struct i915_ggtt *ggtt)
+ 	/*
+ 	 * Note that as an uncached mmio write, this will flush the
+ 	 * WCB of the writes into the GGTT before it triggers the invalidate.
++	 *
++	 * Only perform this when GGTT is mapped as WC, see ggtt_probe_common().
+ 	 */
+-	intel_uncore_write_fw(uncore, GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
++	if (needs_wc_ggtt_mapping(ggtt->vm.i915))
++		intel_uncore_write_fw(uncore, GFX_FLSH_CNTL_GEN6,
++				      GFX_FLSH_CNTL_EN);
+ }
+ 
+ static void guc_ggtt_invalidate(struct i915_ggtt *ggtt)
+@@ -902,17 +921,11 @@ static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
+ 	GEM_WARN_ON(pci_resource_len(pdev, GEN4_GTTMMADR_BAR) != gen6_gttmmadr_size(i915));
+ 	phys_addr = pci_resource_start(pdev, GEN4_GTTMMADR_BAR) + gen6_gttadr_offset(i915);
+ 
+-	/*
+-	 * On BXT+/ICL+ writes larger than 64 bit to the GTT pagetable range
+-	 * will be dropped. For WC mappings in general we have 64 byte burst
+-	 * writes when the WC buffer is flushed, so we can't use it, but have to
+-	 * resort to an uncached mapping. The WC issue is easily caught by the
+-	 * readback check when writing GTT PTE entries.
+-	 */
+-	if (IS_GEN9_LP(i915) || GRAPHICS_VER(i915) >= 11)
+-		ggtt->gsm = ioremap(phys_addr, size);
+-	else
++	if (needs_wc_ggtt_mapping(i915))
+ 		ggtt->gsm = ioremap_wc(phys_addr, size);
++	else
++		ggtt->gsm = ioremap(phys_addr, size);
++
+ 	if (!ggtt->gsm) {
+ 		drm_err(&i915->drm, "Failed to map the ggtt page table\n");
+ 		return -ENOMEM;
+diff --git a/drivers/gpu/drm/i915/gt/intel_rc6.c b/drivers/gpu/drm/i915/gt/intel_rc6.c
+index 58bb1c55294c9..ccdc1afbf11b5 100644
+--- a/drivers/gpu/drm/i915/gt/intel_rc6.c
++++ b/drivers/gpu/drm/i915/gt/intel_rc6.c
+@@ -584,19 +584,23 @@ static void __intel_rc6_disable(struct intel_rc6 *rc6)
+ 
+ static void rc6_res_reg_init(struct intel_rc6 *rc6)
+ {
+-	memset(rc6->res_reg, INVALID_MMIO_REG.reg, sizeof(rc6->res_reg));
++	i915_reg_t res_reg[INTEL_RC6_RES_MAX] = {
++		[0 ... INTEL_RC6_RES_MAX - 1] = INVALID_MMIO_REG,
++	};
+ 
+ 	switch (rc6_to_gt(rc6)->type) {
+ 	case GT_MEDIA:
+-		rc6->res_reg[INTEL_RC6_RES_RC6] = MTL_MEDIA_MC6;
++		res_reg[INTEL_RC6_RES_RC6] = MTL_MEDIA_MC6;
+ 		break;
+ 	default:
+-		rc6->res_reg[INTEL_RC6_RES_RC6_LOCKED] = GEN6_GT_GFX_RC6_LOCKED;
+-		rc6->res_reg[INTEL_RC6_RES_RC6] = GEN6_GT_GFX_RC6;
+-		rc6->res_reg[INTEL_RC6_RES_RC6p] = GEN6_GT_GFX_RC6p;
+-		rc6->res_reg[INTEL_RC6_RES_RC6pp] = GEN6_GT_GFX_RC6pp;
++		res_reg[INTEL_RC6_RES_RC6_LOCKED] = GEN6_GT_GFX_RC6_LOCKED;
++		res_reg[INTEL_RC6_RES_RC6] = GEN6_GT_GFX_RC6;
++		res_reg[INTEL_RC6_RES_RC6p] = GEN6_GT_GFX_RC6p;
++		res_reg[INTEL_RC6_RES_RC6pp] = GEN6_GT_GFX_RC6pp;
+ 		break;
+ 	}
++
++	memcpy(rc6->res_reg, res_reg, sizeof(res_reg));
+ }
+ 
+ void intel_rc6_init(struct intel_rc6 *rc6)
+diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
+index 331685e1b7b7d..da5b0fac745b4 100644
+--- a/drivers/gpu/drm/i915/i915_perf.c
++++ b/drivers/gpu/drm/i915/i915_perf.c
+@@ -4286,11 +4286,8 @@ int i915_perf_open_ioctl(struct drm_device *dev, void *data,
+ 	u32 known_open_flags;
+ 	int ret;
+ 
+-	if (!perf->i915) {
+-		drm_dbg(&perf->i915->drm,
+-			"i915 perf interface not available for this system\n");
++	if (!perf->i915)
+ 		return -ENOTSUPP;
+-	}
+ 
+ 	known_open_flags = I915_PERF_FLAG_FD_CLOEXEC |
+ 			   I915_PERF_FLAG_FD_NONBLOCK |
+@@ -4666,11 +4663,8 @@ int i915_perf_add_config_ioctl(struct drm_device *dev, void *data,
+ 	struct i915_oa_reg *regs;
+ 	int err, id;
+ 
+-	if (!perf->i915) {
+-		drm_dbg(&perf->i915->drm,
+-			"i915 perf interface not available for this system\n");
++	if (!perf->i915)
+ 		return -ENOTSUPP;
+-	}
+ 
+ 	if (!perf->metrics_kobj) {
+ 		drm_dbg(&perf->i915->drm,
+@@ -4832,11 +4826,8 @@ int i915_perf_remove_config_ioctl(struct drm_device *dev, void *data,
+ 	struct i915_oa_config *oa_config;
+ 	int ret;
+ 
+-	if (!perf->i915) {
+-		drm_dbg(&perf->i915->drm,
+-			"i915 perf interface not available for this system\n");
++	if (!perf->i915)
+ 		return -ENOTSUPP;
+-	}
+ 
+ 	if (i915_perf_stream_paranoid && !perfmon_capable()) {
+ 		drm_dbg(&perf->i915->drm,
+diff --git a/drivers/gpu/drm/mediatek/mtk_dp.c b/drivers/gpu/drm/mediatek/mtk_dp.c
+index 076aa54910571..bd6ace487c048 100644
+--- a/drivers/gpu/drm/mediatek/mtk_dp.c
++++ b/drivers/gpu/drm/mediatek/mtk_dp.c
+@@ -1983,7 +1983,6 @@ static struct edid *mtk_dp_get_edid(struct drm_bridge *bridge,
+ 	bool enabled = mtk_dp->enabled;
+ 	struct edid *new_edid = NULL;
+ 	struct mtk_dp_audio_cfg *audio_caps = &mtk_dp->info.audio_cur_cfg;
+-	struct cea_sad *sads;
+ 
+ 	if (!enabled) {
+ 		drm_atomic_bridge_chain_pre_enable(bridge, connector->state->state);
+@@ -2006,11 +2005,16 @@ static struct edid *mtk_dp_get_edid(struct drm_bridge *bridge,
+ 	 */
+ 	if (mtk_dp_parse_capabilities(mtk_dp)) {
+ 		drm_err(mtk_dp->drm_dev, "Can't parse capabilities\n");
++		kfree(new_edid);
+ 		new_edid = NULL;
+ 	}
+ 
+ 	if (new_edid) {
++		struct cea_sad *sads;
++
+ 		audio_caps->sad_count = drm_edid_to_sad(new_edid, &sads);
++		kfree(sads);
++
+ 		audio_caps->detect_monitor = drm_detect_monitor_audio(new_edid);
+ 	}
+ 
+diff --git a/drivers/gpu/drm/msm/dp/dp_panel.c b/drivers/gpu/drm/msm/dp/dp_panel.c
+index 42d52510ffd4a..86a8e06c7a60f 100644
+--- a/drivers/gpu/drm/msm/dp/dp_panel.c
++++ b/drivers/gpu/drm/msm/dp/dp_panel.c
+@@ -289,26 +289,9 @@ int dp_panel_get_modes(struct dp_panel *dp_panel,
+ 
+ static u8 dp_panel_get_edid_checksum(struct edid *edid)
+ {
+-	struct edid *last_block;
+-	u8 *raw_edid;
+-	bool is_edid_corrupt = false;
++	edid += edid->extensions;
+ 
+-	if (!edid) {
+-		DRM_ERROR("invalid edid input\n");
+-		return 0;
+-	}
+-
+-	raw_edid = (u8 *)edid;
+-	raw_edid += (edid->extensions * EDID_LENGTH);
+-	last_block = (struct edid *)raw_edid;
+-
+-	/* block type extension */
+-	drm_edid_block_valid(raw_edid, 1, false, &is_edid_corrupt);
+-	if (!is_edid_corrupt)
+-		return last_block->checksum;
+-
+-	DRM_ERROR("Invalid block, no checksum\n");
+-	return 0;
++	return edid->checksum;
+ }
+ 
+ void dp_panel_handle_sink_request(struct dp_panel *dp_panel)
+diff --git a/drivers/gpu/drm/panel/panel-arm-versatile.c b/drivers/gpu/drm/panel/panel-arm-versatile.c
+index abb0788843c60..503ecea72c5ea 100644
+--- a/drivers/gpu/drm/panel/panel-arm-versatile.c
++++ b/drivers/gpu/drm/panel/panel-arm-versatile.c
+@@ -267,6 +267,8 @@ static int versatile_panel_get_modes(struct drm_panel *panel,
+ 	connector->display_info.bus_flags = vpanel->panel_type->bus_flags;
+ 
+ 	mode = drm_mode_duplicate(connector->dev, &vpanel->panel_type->mode);
++	if (!mode)
++		return -ENOMEM;
+ 	drm_mode_set_name(mode);
+ 	mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+ 
+diff --git a/drivers/gpu/drm/panel/panel-sitronix-st7703.c b/drivers/gpu/drm/panel/panel-sitronix-st7703.c
+index 3aa31f3d61574..687749bbec62c 100644
+--- a/drivers/gpu/drm/panel/panel-sitronix-st7703.c
++++ b/drivers/gpu/drm/panel/panel-sitronix-st7703.c
+@@ -506,29 +506,30 @@ static int st7703_prepare(struct drm_panel *panel)
+ 		return 0;
+ 
+ 	dev_dbg(ctx->dev, "Resetting the panel\n");
+-	ret = regulator_enable(ctx->vcc);
++	gpiod_set_value_cansleep(ctx->reset_gpio, 1);
++
++	ret = regulator_enable(ctx->iovcc);
+ 	if (ret < 0) {
+-		dev_err(ctx->dev, "Failed to enable vcc supply: %d\n", ret);
++		dev_err(ctx->dev, "Failed to enable iovcc supply: %d\n", ret);
+ 		return ret;
+ 	}
+-	ret = regulator_enable(ctx->iovcc);
++
++	ret = regulator_enable(ctx->vcc);
+ 	if (ret < 0) {
+-		dev_err(ctx->dev, "Failed to enable iovcc supply: %d\n", ret);
+-		goto disable_vcc;
++		dev_err(ctx->dev, "Failed to enable vcc supply: %d\n", ret);
++		regulator_disable(ctx->iovcc);
++		return ret;
+ 	}
+ 
+-	gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+-	usleep_range(20, 40);
++	/* Give power supplies time to stabilize before deasserting reset. */
++	usleep_range(10000, 20000);
++
+ 	gpiod_set_value_cansleep(ctx->reset_gpio, 0);
+-	msleep(20);
++	usleep_range(15000, 20000);
+ 
+ 	ctx->prepared = true;
+ 
+ 	return 0;
+-
+-disable_vcc:
+-	regulator_disable(ctx->vcc);
+-	return ret;
+ }
+ 
+ static const u32 mantix_bus_formats[] = {
+diff --git a/drivers/gpu/drm/panel/panel-tpo-tpg110.c b/drivers/gpu/drm/panel/panel-tpo-tpg110.c
+index 845304435e235..f6a212e542cb9 100644
+--- a/drivers/gpu/drm/panel/panel-tpo-tpg110.c
++++ b/drivers/gpu/drm/panel/panel-tpo-tpg110.c
+@@ -379,6 +379,8 @@ static int tpg110_get_modes(struct drm_panel *panel,
+ 	connector->display_info.bus_flags = tpg->panel_mode->bus_flags;
+ 
+ 	mode = drm_mode_duplicate(connector->dev, &tpg->panel_mode->mode);
++	if (!mode)
++		return -ENOMEM;
+ 	drm_mode_set_name(mode);
+ 	mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+ 
+diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
+index 6492a70e3c396..404b0483bb7cb 100644
+--- a/drivers/gpu/drm/qxl/qxl_display.c
++++ b/drivers/gpu/drm/qxl/qxl_display.c
+@@ -1229,6 +1229,9 @@ int qxl_destroy_monitors_object(struct qxl_device *qdev)
+ 	if (!qdev->monitors_config_bo)
+ 		return 0;
+ 
++	kfree(qdev->dumb_heads);
++	qdev->dumb_heads = NULL;
++
+ 	qdev->monitors_config = NULL;
+ 	qdev->ram_header->monitors_config = 0;
+ 
+diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
+index 07193cd0c4174..4859d965d67e3 100644
+--- a/drivers/gpu/drm/radeon/radeon_connectors.c
++++ b/drivers/gpu/drm/radeon/radeon_connectors.c
+@@ -1122,6 +1122,8 @@ static int radeon_tv_get_modes(struct drm_connector *connector)
+ 	else {
+ 		/* only 800x600 is supported right now on pre-avivo chips */
+ 		tv_mode = drm_cvt_mode(dev, 800, 600, 60, false, false, false);
++		if (!tv_mode)
++			return 0;
+ 		tv_mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+ 		drm_mode_probed_add(connector, tv_mode);
+ 	}
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+index 3829be282ff00..17463aeeef28f 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+@@ -774,9 +774,9 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
+ 	       sizeof(metadata->mip_levels));
+ 	metadata->num_sizes = num_sizes;
+ 	metadata->sizes =
+-		memdup_user((struct drm_vmw_size __user *)(unsigned long)
++		memdup_array_user((struct drm_vmw_size __user *)(unsigned long)
+ 			    req->size_addr,
+-			    sizeof(*metadata->sizes) * metadata->num_sizes);
++			    metadata->num_sizes, sizeof(*metadata->sizes));
+ 	if (IS_ERR(metadata->sizes)) {
+ 		ret = PTR_ERR(metadata->sizes);
+ 		goto out_no_sizes;
+diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
+index cc0d0186a0d95..fafc40ecfd200 100644
+--- a/drivers/hid/hid-ids.h
++++ b/drivers/hid/hid-ids.h
+@@ -366,6 +366,7 @@
+ 
+ #define USB_VENDOR_ID_DELL				0x413c
+ #define USB_DEVICE_ID_DELL_PIXART_USB_OPTICAL_MOUSE	0x301a
++#define USB_DEVICE_ID_DELL_PRO_WIRELESS_KM5221W		0x4503
+ 
+ #define USB_VENDOR_ID_DELORME		0x1163
+ #define USB_DEVICE_ID_DELORME_EARTHMATE	0x0100
+diff --git a/drivers/hid/hid-lenovo.c b/drivers/hid/hid-lenovo.c
+index 44763c0da4441..7c1b33be9d134 100644
+--- a/drivers/hid/hid-lenovo.c
++++ b/drivers/hid/hid-lenovo.c
+@@ -51,7 +51,12 @@ struct lenovo_drvdata {
+ 	int select_right;
+ 	int sensitivity;
+ 	int press_speed;
+-	u8 middlebutton_state; /* 0:Up, 1:Down (undecided), 2:Scrolling */
++	/* 0: Up
++	 * 1: Down (undecided)
++	 * 2: Scrolling
++	 * 3: Patched firmware, disable workaround
++	 */
++	u8 middlebutton_state;
+ 	bool fn_lock;
+ };
+ 
+@@ -521,6 +526,19 @@ static void lenovo_features_set_cptkbd(struct hid_device *hdev)
+ 	int ret;
+ 	struct lenovo_drvdata *cptkbd_data = hid_get_drvdata(hdev);
+ 
++	/*
++	 * Tell the keyboard a driver understands it, and turn F7, F9, F11 into
++	 * regular keys
++	 */
++	ret = lenovo_send_cmd_cptkbd(hdev, 0x01, 0x03);
++	if (ret)
++		hid_warn(hdev, "Failed to switch F7/9/11 mode: %d\n", ret);
++
++	/* Switch middle button to native mode */
++	ret = lenovo_send_cmd_cptkbd(hdev, 0x09, 0x01);
++	if (ret)
++		hid_warn(hdev, "Failed to switch middle button: %d\n", ret);
++
+ 	ret = lenovo_send_cmd_cptkbd(hdev, 0x05, cptkbd_data->fn_lock);
+ 	if (ret)
+ 		hid_err(hdev, "Fn-lock setting failed: %d\n", ret);
+@@ -668,31 +686,48 @@ static int lenovo_event_cptkbd(struct hid_device *hdev,
+ {
+ 	struct lenovo_drvdata *cptkbd_data = hid_get_drvdata(hdev);
+ 
+-	/* "wheel" scroll events */
+-	if (usage->type == EV_REL && (usage->code == REL_WHEEL ||
+-			usage->code == REL_HWHEEL)) {
+-		/* Scroll events disable middle-click event */
+-		cptkbd_data->middlebutton_state = 2;
+-		return 0;
+-	}
++	if (cptkbd_data->middlebutton_state != 3) {
++		/* REL_X and REL_Y events during middle button pressed
++		 * are only possible on patched, bug-free firmware
++		 * so set middlebutton_state to 3
++		 * to never apply workaround anymore
++		 */
++		if (cptkbd_data->middlebutton_state == 1 &&
++				usage->type == EV_REL &&
++				(usage->code == REL_X || usage->code == REL_Y)) {
++			cptkbd_data->middlebutton_state = 3;
++			/* send middle button press which was hold before */
++			input_event(field->hidinput->input,
++				EV_KEY, BTN_MIDDLE, 1);
++			input_sync(field->hidinput->input);
++		}
++
++		/* "wheel" scroll events */
++		if (usage->type == EV_REL && (usage->code == REL_WHEEL ||
++				usage->code == REL_HWHEEL)) {
++			/* Scroll events disable middle-click event */
++			cptkbd_data->middlebutton_state = 2;
++			return 0;
++		}
+ 
+-	/* Middle click events */
+-	if (usage->type == EV_KEY && usage->code == BTN_MIDDLE) {
+-		if (value == 1) {
+-			cptkbd_data->middlebutton_state = 1;
+-		} else if (value == 0) {
+-			if (cptkbd_data->middlebutton_state == 1) {
+-				/* No scrolling inbetween, send middle-click */
+-				input_event(field->hidinput->input,
+-					EV_KEY, BTN_MIDDLE, 1);
+-				input_sync(field->hidinput->input);
+-				input_event(field->hidinput->input,
+-					EV_KEY, BTN_MIDDLE, 0);
+-				input_sync(field->hidinput->input);
++		/* Middle click events */
++		if (usage->type == EV_KEY && usage->code == BTN_MIDDLE) {
++			if (value == 1) {
++				cptkbd_data->middlebutton_state = 1;
++			} else if (value == 0) {
++				if (cptkbd_data->middlebutton_state == 1) {
++					/* No scrolling inbetween, send middle-click */
++					input_event(field->hidinput->input,
++						EV_KEY, BTN_MIDDLE, 1);
++					input_sync(field->hidinput->input);
++					input_event(field->hidinput->input,
++						EV_KEY, BTN_MIDDLE, 0);
++					input_sync(field->hidinput->input);
++				}
++				cptkbd_data->middlebutton_state = 0;
+ 			}
+-			cptkbd_data->middlebutton_state = 0;
++			return 1;
+ 		}
+-		return 1;
+ 	}
+ 
+ 	if (usage->type == EV_KEY && usage->code == KEY_FN_ESC && value == 1) {
+@@ -1126,22 +1161,6 @@ static int lenovo_probe_cptkbd(struct hid_device *hdev)
+ 	}
+ 	hid_set_drvdata(hdev, cptkbd_data);
+ 
+-	/*
+-	 * Tell the keyboard a driver understands it, and turn F7, F9, F11 into
+-	 * regular keys (Compact only)
+-	 */
+-	if (hdev->product == USB_DEVICE_ID_LENOVO_CUSBKBD ||
+-	    hdev->product == USB_DEVICE_ID_LENOVO_CBTKBD) {
+-		ret = lenovo_send_cmd_cptkbd(hdev, 0x01, 0x03);
+-		if (ret)
+-			hid_warn(hdev, "Failed to switch F7/9/11 mode: %d\n", ret);
+-	}
+-
+-	/* Switch middle button to native mode */
+-	ret = lenovo_send_cmd_cptkbd(hdev, 0x09, 0x01);
+-	if (ret)
+-		hid_warn(hdev, "Failed to switch middle button: %d\n", ret);
+-
+ 	/* Set keyboard settings to known state */
+ 	cptkbd_data->middlebutton_state = 0;
+ 	cptkbd_data->fn_lock = true;
+@@ -1264,6 +1283,24 @@ err:
+ 	return ret;
+ }
+ 
++#ifdef CONFIG_PM
++static int lenovo_reset_resume(struct hid_device *hdev)
++{
++	switch (hdev->product) {
++	case USB_DEVICE_ID_LENOVO_CUSBKBD:
++	case USB_DEVICE_ID_LENOVO_TPIIUSBKBD:
++		if (hdev->type == HID_TYPE_USBMOUSE)
++			lenovo_features_set_cptkbd(hdev);
++
++		break;
++	default:
++		break;
++	}
++
++	return 0;
++}
++#endif
++
+ static void lenovo_remove_tpkbd(struct hid_device *hdev)
+ {
+ 	struct lenovo_drvdata *data_pointer = hid_get_drvdata(hdev);
+@@ -1380,6 +1417,9 @@ static struct hid_driver lenovo_driver = {
+ 	.raw_event = lenovo_raw_event,
+ 	.event = lenovo_event,
+ 	.report_fixup = lenovo_report_fixup,
++#ifdef CONFIG_PM
++	.reset_resume = lenovo_reset_resume,
++#endif
+ };
+ module_hid_driver(lenovo_driver);
+ 
+diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c
+index 3983b4f282f8f..5a48fcaa32f00 100644
+--- a/drivers/hid/hid-quirks.c
++++ b/drivers/hid/hid-quirks.c
+@@ -66,6 +66,7 @@ static const struct hid_device_id hid_quirks[] = {
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_STRAFE), HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_CREATIVE_SB_OMNI_SURROUND_51), HID_QUIRK_NOGET },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_DELL, USB_DEVICE_ID_DELL_PIXART_USB_OPTICAL_MOUSE), HID_QUIRK_ALWAYS_POLL },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_DELL, USB_DEVICE_ID_DELL_PRO_WIRELESS_KM5221W), HID_QUIRK_ALWAYS_POLL },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC), HID_QUIRK_NOGET },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_DRACAL_RAPHNET, USB_DEVICE_ID_RAPHNET_2NES2SNES), HID_QUIRK_MULTI_INPUT },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_DRACAL_RAPHNET, USB_DEVICE_ID_RAPHNET_4NES4SNES), HID_QUIRK_MULTI_INPUT },
+diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
+index 9cfe8fc509d7d..c91d4ea35c9b8 100644
+--- a/drivers/i2c/busses/Kconfig
++++ b/drivers/i2c/busses/Kconfig
+@@ -158,6 +158,7 @@ config I2C_I801
+ 	    Alder Lake (PCH)
+ 	    Raptor Lake (PCH)
+ 	    Meteor Lake (SOC and PCH)
++	    Birch Stream (SOC)
+ 
+ 	  This driver can also be built as a module.  If so, the module
+ 	  will be called i2c-i801.
+diff --git a/drivers/i2c/busses/i2c-designware-master.c b/drivers/i2c/busses/i2c-designware-master.c
+index 24bef0025c988..cebb39a1f15e6 100644
+--- a/drivers/i2c/busses/i2c-designware-master.c
++++ b/drivers/i2c/busses/i2c-designware-master.c
+@@ -518,10 +518,16 @@ i2c_dw_xfer_msg(struct dw_i2c_dev *dev)
+ 
+ 		/*
+ 		 * Because we don't know the buffer length in the
+-		 * I2C_FUNC_SMBUS_BLOCK_DATA case, we can't stop
+-		 * the transaction here.
++		 * I2C_FUNC_SMBUS_BLOCK_DATA case, we can't stop the
++		 * transaction here. Also disable the TX_EMPTY IRQ
++		 * while waiting for the data length byte to avoid the
++		 * bogus interrupts flood.
+ 		 */
+-		if (buf_len > 0 || flags & I2C_M_RECV_LEN) {
++		if (flags & I2C_M_RECV_LEN) {
++			dev->status |= STATUS_WRITE_IN_PROGRESS;
++			intr_mask &= ~DW_IC_INTR_TX_EMPTY;
++			break;
++		} else if (buf_len > 0) {
+ 			/* more bytes to be written */
+ 			dev->status |= STATUS_WRITE_IN_PROGRESS;
+ 			break;
+@@ -557,6 +563,13 @@ i2c_dw_recv_len(struct dw_i2c_dev *dev, u8 len)
+ 	msgs[dev->msg_read_idx].len = len;
+ 	msgs[dev->msg_read_idx].flags &= ~I2C_M_RECV_LEN;
+ 
++	/*
++	 * Received buffer length, re-enable TX_EMPTY interrupt
++	 * to resume the SMBUS transaction.
++	 */
++	regmap_update_bits(dev->map, DW_IC_INTR_MASK, DW_IC_INTR_TX_EMPTY,
++			   DW_IC_INTR_TX_EMPTY);
++
+ 	return len;
+ }
+ 
+diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
+index 2a3215ac01b3a..7d78df30fe132 100644
+--- a/drivers/i2c/busses/i2c-i801.c
++++ b/drivers/i2c/busses/i2c-i801.c
+@@ -79,6 +79,7 @@
+  * Meteor Lake-P (SOC)		0x7e22	32	hard	yes	yes	yes
+  * Meteor Lake SoC-S (SOC)	0xae22	32	hard	yes	yes	yes
+  * Meteor Lake PCH-S (PCH)	0x7f23	32	hard	yes	yes	yes
++ * Birch Stream (SOC)		0x5796	32	hard	yes	yes	yes
+  *
+  * Features supported by this driver:
+  * Software PEC				no
+@@ -231,6 +232,7 @@
+ #define PCI_DEVICE_ID_INTEL_JASPER_LAKE_SMBUS		0x4da3
+ #define PCI_DEVICE_ID_INTEL_ALDER_LAKE_P_SMBUS		0x51a3
+ #define PCI_DEVICE_ID_INTEL_ALDER_LAKE_M_SMBUS		0x54a3
++#define PCI_DEVICE_ID_INTEL_BIRCH_STREAM_SMBUS		0x5796
+ #define PCI_DEVICE_ID_INTEL_BROXTON_SMBUS		0x5ad4
+ #define PCI_DEVICE_ID_INTEL_RAPTOR_LAKE_S_SMBUS		0x7a23
+ #define PCI_DEVICE_ID_INTEL_ALDER_LAKE_S_SMBUS		0x7aa3
+@@ -679,15 +681,11 @@ static int i801_block_transaction_byte_by_byte(struct i801_priv *priv,
+ 		return result ? priv->status : -ETIMEDOUT;
+ 	}
+ 
+-	for (i = 1; i <= len; i++) {
+-		if (i == len && read_write == I2C_SMBUS_READ)
+-			smbcmd |= SMBHSTCNT_LAST_BYTE;
+-		outb_p(smbcmd, SMBHSTCNT(priv));
+-
+-		if (i == 1)
+-			outb_p(inb(SMBHSTCNT(priv)) | SMBHSTCNT_START,
+-			       SMBHSTCNT(priv));
++	if (len == 1 && read_write == I2C_SMBUS_READ)
++		smbcmd |= SMBHSTCNT_LAST_BYTE;
++	outb_p(smbcmd | SMBHSTCNT_START, SMBHSTCNT(priv));
+ 
++	for (i = 1; i <= len; i++) {
+ 		status = i801_wait_byte_done(priv);
+ 		if (status)
+ 			return status;
+@@ -710,9 +708,12 @@ static int i801_block_transaction_byte_by_byte(struct i801_priv *priv,
+ 			data->block[0] = len;
+ 		}
+ 
+-		/* Retrieve/store value in SMBBLKDAT */
+-		if (read_write == I2C_SMBUS_READ)
++		if (read_write == I2C_SMBUS_READ) {
+ 			data->block[i] = inb_p(SMBBLKDAT(priv));
++			if (i == len - 1)
++				outb_p(smbcmd | SMBHSTCNT_LAST_BYTE, SMBHSTCNT(priv));
++		}
++
+ 		if (read_write == I2C_SMBUS_WRITE && i+1 <= len)
+ 			outb_p(data->block[i+1], SMBBLKDAT(priv));
+ 
+@@ -1044,6 +1045,7 @@ static const struct pci_device_id i801_ids[] = {
+ 	{ PCI_DEVICE_DATA(INTEL, METEOR_LAKE_P_SMBUS,		FEATURES_ICH5 | FEATURE_TCO_CNL) },
+ 	{ PCI_DEVICE_DATA(INTEL, METEOR_LAKE_SOC_S_SMBUS,	FEATURES_ICH5 | FEATURE_TCO_CNL) },
+ 	{ PCI_DEVICE_DATA(INTEL, METEOR_LAKE_PCH_S_SMBUS,	FEATURES_ICH5 | FEATURE_TCO_CNL) },
++	{ PCI_DEVICE_DATA(INTEL, BIRCH_STREAM_SMBUS,		FEATURES_ICH5 | FEATURE_TCO_CNL) },
+ 	{ 0, }
+ };
+ 
+diff --git a/drivers/i2c/busses/i2c-pxa.c b/drivers/i2c/busses/i2c-pxa.c
+index 937f7eebe9067..ef8ef3e270f8a 100644
+--- a/drivers/i2c/busses/i2c-pxa.c
++++ b/drivers/i2c/busses/i2c-pxa.c
+@@ -264,6 +264,9 @@ struct pxa_i2c {
+ 	u32			hs_mask;
+ 
+ 	struct i2c_bus_recovery_info recovery;
++	struct pinctrl		*pinctrl;
++	struct pinctrl_state	*pinctrl_default;
++	struct pinctrl_state	*pinctrl_recovery;
+ };
+ 
+ #define _IBMR(i2c)	((i2c)->reg_ibmr)
+@@ -1300,12 +1303,13 @@ static void i2c_pxa_prepare_recovery(struct i2c_adapter *adap)
+ 	 */
+ 	gpiod_set_value(i2c->recovery.scl_gpiod, ibmr & IBMR_SCLS);
+ 	gpiod_set_value(i2c->recovery.sda_gpiod, ibmr & IBMR_SDAS);
++
++	WARN_ON(pinctrl_select_state(i2c->pinctrl, i2c->pinctrl_recovery));
+ }
+ 
+ static void i2c_pxa_unprepare_recovery(struct i2c_adapter *adap)
+ {
+ 	struct pxa_i2c *i2c = adap->algo_data;
+-	struct i2c_bus_recovery_info *bri = adap->bus_recovery_info;
+ 	u32 isr;
+ 
+ 	/*
+@@ -1319,7 +1323,7 @@ static void i2c_pxa_unprepare_recovery(struct i2c_adapter *adap)
+ 		i2c_pxa_do_reset(i2c);
+ 	}
+ 
+-	WARN_ON(pinctrl_select_state(bri->pinctrl, bri->pins_default));
++	WARN_ON(pinctrl_select_state(i2c->pinctrl, i2c->pinctrl_default));
+ 
+ 	dev_dbg(&i2c->adap.dev, "recovery: IBMR 0x%08x ISR 0x%08x\n",
+ 	        readl(_IBMR(i2c)), readl(_ISR(i2c)));
+@@ -1341,20 +1345,76 @@ static int i2c_pxa_init_recovery(struct pxa_i2c *i2c)
+ 	if (IS_ENABLED(CONFIG_I2C_PXA_SLAVE))
+ 		return 0;
+ 
+-	bri->pinctrl = devm_pinctrl_get(dev);
+-	if (PTR_ERR(bri->pinctrl) == -ENODEV) {
+-		bri->pinctrl = NULL;
++	i2c->pinctrl = devm_pinctrl_get(dev);
++	if (PTR_ERR(i2c->pinctrl) == -ENODEV)
++		i2c->pinctrl = NULL;
++	if (IS_ERR(i2c->pinctrl))
++		return PTR_ERR(i2c->pinctrl);
++
++	if (!i2c->pinctrl)
++		return 0;
++
++	i2c->pinctrl_default = pinctrl_lookup_state(i2c->pinctrl,
++						    PINCTRL_STATE_DEFAULT);
++	i2c->pinctrl_recovery = pinctrl_lookup_state(i2c->pinctrl, "recovery");
++
++	if (IS_ERR(i2c->pinctrl_default) || IS_ERR(i2c->pinctrl_recovery)) {
++		dev_info(dev, "missing pinmux recovery information: %ld %ld\n",
++			 PTR_ERR(i2c->pinctrl_default),
++			 PTR_ERR(i2c->pinctrl_recovery));
++		return 0;
++	}
++
++	/*
++	 * Claiming GPIOs can influence the pinmux state, and may glitch the
++	 * I2C bus. Do this carefully.
++	 */
++	bri->scl_gpiod = devm_gpiod_get(dev, "scl", GPIOD_OUT_HIGH_OPEN_DRAIN);
++	if (bri->scl_gpiod == ERR_PTR(-EPROBE_DEFER))
++		return -EPROBE_DEFER;
++	if (IS_ERR(bri->scl_gpiod)) {
++		dev_info(dev, "missing scl gpio recovery information: %pe\n",
++			 bri->scl_gpiod);
++		return 0;
++	}
++
++	/*
++	 * We have SCL. Pull SCL low and wait a bit so that SDA glitches
++	 * have no effect.
++	 */
++	gpiod_direction_output(bri->scl_gpiod, 0);
++	udelay(10);
++	bri->sda_gpiod = devm_gpiod_get(dev, "sda", GPIOD_OUT_HIGH_OPEN_DRAIN);
++
++	/* Wait a bit in case of a SDA glitch, and then release SCL. */
++	udelay(10);
++	gpiod_direction_output(bri->scl_gpiod, 1);
++
++	if (bri->sda_gpiod == ERR_PTR(-EPROBE_DEFER))
++		return -EPROBE_DEFER;
++
++	if (IS_ERR(bri->sda_gpiod)) {
++		dev_info(dev, "missing sda gpio recovery information: %pe\n",
++			 bri->sda_gpiod);
+ 		return 0;
+ 	}
+-	if (IS_ERR(bri->pinctrl))
+-		return PTR_ERR(bri->pinctrl);
+ 
+ 	bri->prepare_recovery = i2c_pxa_prepare_recovery;
+ 	bri->unprepare_recovery = i2c_pxa_unprepare_recovery;
++	bri->recover_bus = i2c_generic_scl_recovery;
+ 
+ 	i2c->adap.bus_recovery_info = bri;
+ 
+-	return 0;
++	/*
++	 * Claiming GPIOs can change the pinmux state, which confuses the
++	 * pinctrl since pinctrl's idea of the current setting is unaffected
++	 * by the pinmux change caused by claiming the GPIO. Work around that
++	 * by switching pinctrl to the GPIO state here. We do it this way to
++	 * avoid glitching the I2C bus.
++	 */
++	pinctrl_select_state(i2c->pinctrl, i2c->pinctrl_recovery);
++
++	return pinctrl_select_state(i2c->pinctrl, i2c->pinctrl_default);
+ }
+ 
+ static int i2c_pxa_probe(struct platform_device *dev)
+diff --git a/drivers/i2c/busses/i2c-sun6i-p2wi.c b/drivers/i2c/busses/i2c-sun6i-p2wi.c
+index fa6020dced595..85e035e7a1d75 100644
+--- a/drivers/i2c/busses/i2c-sun6i-p2wi.c
++++ b/drivers/i2c/busses/i2c-sun6i-p2wi.c
+@@ -201,6 +201,11 @@ static int p2wi_probe(struct platform_device *pdev)
+ 		return -EINVAL;
+ 	}
+ 
++	if (clk_freq == 0) {
++		dev_err(dev, "clock-frequency is set to 0 in DT\n");
++		return -EINVAL;
++	}
++
+ 	if (of_get_child_count(np) > 1) {
+ 		dev_err(dev, "P2WI only supports one slave device\n");
+ 		return -EINVAL;
+diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c
+index 60746652fd525..7f30bcceebaed 100644
+--- a/drivers/i2c/i2c-core-base.c
++++ b/drivers/i2c/i2c-core-base.c
+@@ -931,8 +931,9 @@ int i2c_dev_irq_from_resources(const struct resource *resources,
+ struct i2c_client *
+ i2c_new_client_device(struct i2c_adapter *adap, struct i2c_board_info const *info)
+ {
+-	struct i2c_client	*client;
+-	int			status;
++	struct i2c_client *client;
++	bool need_put = false;
++	int status;
+ 
+ 	client = kzalloc(sizeof *client, GFP_KERNEL);
+ 	if (!client)
+@@ -970,7 +971,6 @@ i2c_new_client_device(struct i2c_adapter *adap, struct i2c_board_info const *inf
+ 	client->dev.fwnode = info->fwnode;
+ 
+ 	device_enable_async_suspend(&client->dev);
+-	i2c_dev_set_name(adap, client, info);
+ 
+ 	if (info->swnode) {
+ 		status = device_add_software_node(&client->dev, info->swnode);
+@@ -982,6 +982,7 @@ i2c_new_client_device(struct i2c_adapter *adap, struct i2c_board_info const *inf
+ 		}
+ 	}
+ 
++	i2c_dev_set_name(adap, client, info);
+ 	status = device_register(&client->dev);
+ 	if (status)
+ 		goto out_remove_swnode;
+@@ -993,6 +994,7 @@ i2c_new_client_device(struct i2c_adapter *adap, struct i2c_board_info const *inf
+ 
+ out_remove_swnode:
+ 	device_remove_software_node(&client->dev);
++	need_put = true;
+ out_err_put_of_node:
+ 	of_node_put(info->of_node);
+ out_err:
+@@ -1000,7 +1002,10 @@ out_err:
+ 		"Failed to register i2c client %s at 0x%02x (%d)\n",
+ 		client->name, client->addr, status);
+ out_err_silent:
+-	kfree(client);
++	if (need_put)
++		put_device(&client->dev);
++	else
++		kfree(client);
+ 	return ERR_PTR(status);
+ }
+ EXPORT_SYMBOL_GPL(i2c_new_client_device);
+diff --git a/drivers/i2c/i2c-core.h b/drivers/i2c/i2c-core.h
+index 1247e6e6e9751..05b8b8dfa9bdd 100644
+--- a/drivers/i2c/i2c-core.h
++++ b/drivers/i2c/i2c-core.h
+@@ -29,7 +29,7 @@ int i2c_dev_irq_from_resources(const struct resource *resources,
+  */
+ static inline bool i2c_in_atomic_xfer_mode(void)
+ {
+-	return system_state > SYSTEM_RUNNING && irqs_disabled();
++	return system_state > SYSTEM_RUNNING && !preemptible();
+ }
+ 
+ static inline int __i2c_lock_bus_helper(struct i2c_adapter *adap)
+diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
+index a01b59e3599b5..7d337380a05d9 100644
+--- a/drivers/i2c/i2c-dev.c
++++ b/drivers/i2c/i2c-dev.c
+@@ -450,8 +450,8 @@ static long i2cdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+ 		if (rdwr_arg.nmsgs > I2C_RDWR_IOCTL_MAX_MSGS)
+ 			return -EINVAL;
+ 
+-		rdwr_pa = memdup_user(rdwr_arg.msgs,
+-				      rdwr_arg.nmsgs * sizeof(struct i2c_msg));
++		rdwr_pa = memdup_array_user(rdwr_arg.msgs,
++					    rdwr_arg.nmsgs, sizeof(struct i2c_msg));
+ 		if (IS_ERR(rdwr_pa))
+ 			return PTR_ERR(rdwr_pa);
+ 
+diff --git a/drivers/i3c/master/i3c-master-cdns.c b/drivers/i3c/master/i3c-master-cdns.c
+index 01610fa5b0ccf..cfa5d53e5be0e 100644
+--- a/drivers/i3c/master/i3c-master-cdns.c
++++ b/drivers/i3c/master/i3c-master-cdns.c
+@@ -192,7 +192,7 @@
+ #define SLV_STATUS1_HJ_DIS		BIT(18)
+ #define SLV_STATUS1_MR_DIS		BIT(17)
+ #define SLV_STATUS1_PROT_ERR		BIT(16)
+-#define SLV_STATUS1_DA(x)		(((s) & GENMASK(15, 9)) >> 9)
++#define SLV_STATUS1_DA(s)		(((s) & GENMASK(15, 9)) >> 9)
+ #define SLV_STATUS1_HAS_DA		BIT(8)
+ #define SLV_STATUS1_DDR_RX_FULL		BIT(7)
+ #define SLV_STATUS1_DDR_TX_FULL		BIT(6)
+@@ -1624,13 +1624,13 @@ static int cdns_i3c_master_probe(struct platform_device *pdev)
+ 	/* Device ID0 is reserved to describe this master. */
+ 	master->maxdevs = CONF_STATUS0_DEVS_NUM(val);
+ 	master->free_rr_slots = GENMASK(master->maxdevs, 1);
++	master->caps.ibirfifodepth = CONF_STATUS0_IBIR_DEPTH(val);
++	master->caps.cmdrfifodepth = CONF_STATUS0_CMDR_DEPTH(val);
+ 
+ 	val = readl(master->regs + CONF_STATUS1);
+ 	master->caps.cmdfifodepth = CONF_STATUS1_CMD_DEPTH(val);
+ 	master->caps.rxfifodepth = CONF_STATUS1_RX_DEPTH(val);
+ 	master->caps.txfifodepth = CONF_STATUS1_TX_DEPTH(val);
+-	master->caps.ibirfifodepth = CONF_STATUS0_IBIR_DEPTH(val);
+-	master->caps.cmdrfifodepth = CONF_STATUS0_CMDR_DEPTH(val);
+ 
+ 	spin_lock_init(&master->ibi.lock);
+ 	master->ibi.num_slots = CONF_STATUS1_IBI_HW_RES(val);
+diff --git a/drivers/i3c/master/mipi-i3c-hci/dat_v1.c b/drivers/i3c/master/mipi-i3c-hci/dat_v1.c
+index 97bb49ff5b53b..47b9b4d4ed3fc 100644
+--- a/drivers/i3c/master/mipi-i3c-hci/dat_v1.c
++++ b/drivers/i3c/master/mipi-i3c-hci/dat_v1.c
+@@ -64,15 +64,17 @@ static int hci_dat_v1_init(struct i3c_hci *hci)
+ 		return -EOPNOTSUPP;
+ 	}
+ 
+-	/* use a bitmap for faster free slot search */
+-	hci->DAT_data = bitmap_zalloc(hci->DAT_entries, GFP_KERNEL);
+-	if (!hci->DAT_data)
+-		return -ENOMEM;
+-
+-	/* clear them */
+-	for (dat_idx = 0; dat_idx < hci->DAT_entries; dat_idx++) {
+-		dat_w0_write(dat_idx, 0);
+-		dat_w1_write(dat_idx, 0);
++	if (!hci->DAT_data) {
++		/* use a bitmap for faster free slot search */
++		hci->DAT_data = bitmap_zalloc(hci->DAT_entries, GFP_KERNEL);
++		if (!hci->DAT_data)
++			return -ENOMEM;
++
++		/* clear them */
++		for (dat_idx = 0; dat_idx < hci->DAT_entries; dat_idx++) {
++			dat_w0_write(dat_idx, 0);
++			dat_w1_write(dat_idx, 0);
++		}
+ 	}
+ 
+ 	return 0;
+@@ -87,7 +89,13 @@ static void hci_dat_v1_cleanup(struct i3c_hci *hci)
+ static int hci_dat_v1_alloc_entry(struct i3c_hci *hci)
+ {
+ 	unsigned int dat_idx;
++	int ret;
+ 
++	if (!hci->DAT_data) {
++		ret = hci_dat_v1_init(hci);
++		if (ret)
++			return ret;
++	}
+ 	dat_idx = find_first_zero_bit(hci->DAT_data, hci->DAT_entries);
+ 	if (dat_idx >= hci->DAT_entries)
+ 		return -ENOENT;
+@@ -103,7 +111,8 @@ static void hci_dat_v1_free_entry(struct i3c_hci *hci, unsigned int dat_idx)
+ {
+ 	dat_w0_write(dat_idx, 0);
+ 	dat_w1_write(dat_idx, 0);
+-	__clear_bit(dat_idx, hci->DAT_data);
++	if (hci->DAT_data)
++		__clear_bit(dat_idx, hci->DAT_data);
+ }
+ 
+ static void hci_dat_v1_set_dynamic_addr(struct i3c_hci *hci,
+diff --git a/drivers/i3c/master/mipi-i3c-hci/dma.c b/drivers/i3c/master/mipi-i3c-hci/dma.c
+index 2990ac9eaade7..71b5dbe45c45c 100644
+--- a/drivers/i3c/master/mipi-i3c-hci/dma.c
++++ b/drivers/i3c/master/mipi-i3c-hci/dma.c
+@@ -734,7 +734,7 @@ static bool hci_dma_irq_handler(struct i3c_hci *hci, unsigned int mask)
+ 	unsigned int i;
+ 	bool handled = false;
+ 
+-	for (i = 0; mask && i < 8; i++) {
++	for (i = 0; mask && i < rings->total; i++) {
+ 		struct hci_rh_data *rh;
+ 		u32 status;
+ 
+diff --git a/drivers/i3c/master/svc-i3c-master.c b/drivers/i3c/master/svc-i3c-master.c
+index 6c43992c8cf6b..aa79334cb06c7 100644
+--- a/drivers/i3c/master/svc-i3c-master.c
++++ b/drivers/i3c/master/svc-i3c-master.c
+@@ -93,6 +93,7 @@
+ #define SVC_I3C_MINTMASKED   0x098
+ #define SVC_I3C_MERRWARN     0x09C
+ #define   SVC_I3C_MERRWARN_NACK BIT(2)
++#define   SVC_I3C_MERRWARN_TIMEOUT BIT(20)
+ #define SVC_I3C_MDMACTRL     0x0A0
+ #define SVC_I3C_MDATACTRL    0x0AC
+ #define   SVC_I3C_MDATACTRL_FLUSHTB BIT(0)
+@@ -175,6 +176,7 @@ struct svc_i3c_regs_save {
+  * @ibi.slots: Available IBI slots
+  * @ibi.tbq_slot: To be queued IBI slot
+  * @ibi.lock: IBI lock
++ * @lock: Transfer lock, protect between IBI work thread and callbacks from master
+  */
+ struct svc_i3c_master {
+ 	struct i3c_master_controller base;
+@@ -203,6 +205,7 @@ struct svc_i3c_master {
+ 		/* Prevent races within IBI handlers */
+ 		spinlock_t lock;
+ 	} ibi;
++	struct mutex lock;
+ };
+ 
+ /**
+@@ -225,6 +228,14 @@ static bool svc_i3c_master_error(struct svc_i3c_master *master)
+ 	if (SVC_I3C_MSTATUS_ERRWARN(mstatus)) {
+ 		merrwarn = readl(master->regs + SVC_I3C_MERRWARN);
+ 		writel(merrwarn, master->regs + SVC_I3C_MERRWARN);
++
++		/* Ignore timeout error */
++		if (merrwarn & SVC_I3C_MERRWARN_TIMEOUT) {
++			dev_dbg(master->dev, "Warning condition: MSTATUS 0x%08x, MERRWARN 0x%08x\n",
++				mstatus, merrwarn);
++			return false;
++		}
++
+ 		dev_err(master->dev,
+ 			"Error condition: MSTATUS 0x%08x, MERRWARN 0x%08x\n",
+ 			mstatus, merrwarn);
+@@ -331,6 +342,7 @@ static int svc_i3c_master_handle_ibi(struct svc_i3c_master *master,
+ 	struct i3c_ibi_slot *slot;
+ 	unsigned int count;
+ 	u32 mdatactrl;
++	int ret, val;
+ 	u8 *buf;
+ 
+ 	slot = i3c_generic_ibi_get_free_slot(data->ibi_pool);
+@@ -340,6 +352,13 @@ static int svc_i3c_master_handle_ibi(struct svc_i3c_master *master,
+ 	slot->len = 0;
+ 	buf = slot->data;
+ 
++	ret = readl_relaxed_poll_timeout(master->regs + SVC_I3C_MSTATUS, val,
++						SVC_I3C_MSTATUS_COMPLETE(val), 0, 1000);
++	if (ret) {
++		dev_err(master->dev, "Timeout when polling for COMPLETE\n");
++		return ret;
++	}
++
+ 	while (SVC_I3C_MSTATUS_RXPEND(readl(master->regs + SVC_I3C_MSTATUS))  &&
+ 	       slot->len < SVC_I3C_FIFO_SIZE) {
+ 		mdatactrl = readl(master->regs + SVC_I3C_MDATACTRL);
+@@ -384,6 +403,7 @@ static void svc_i3c_master_ibi_work(struct work_struct *work)
+ 	u32 status, val;
+ 	int ret;
+ 
++	mutex_lock(&master->lock);
+ 	/* Acknowledge the incoming interrupt with the AUTOIBI mechanism */
+ 	writel(SVC_I3C_MCTRL_REQUEST_AUTO_IBI |
+ 	       SVC_I3C_MCTRL_IBIRESP_AUTO,
+@@ -394,6 +414,7 @@ static void svc_i3c_master_ibi_work(struct work_struct *work)
+ 					 SVC_I3C_MSTATUS_IBIWON(val), 0, 1000);
+ 	if (ret) {
+ 		dev_err(master->dev, "Timeout when polling for IBIWON\n");
++		svc_i3c_master_emit_stop(master);
+ 		goto reenable_ibis;
+ 	}
+ 
+@@ -460,12 +481,13 @@ static void svc_i3c_master_ibi_work(struct work_struct *work)
+ 
+ reenable_ibis:
+ 	svc_i3c_master_enable_interrupts(master, SVC_I3C_MINT_SLVSTART);
++	mutex_unlock(&master->lock);
+ }
+ 
+ static irqreturn_t svc_i3c_master_irq_handler(int irq, void *dev_id)
+ {
+ 	struct svc_i3c_master *master = (struct svc_i3c_master *)dev_id;
+-	u32 active = readl(master->regs + SVC_I3C_MINTMASKED);
++	u32 active = readl(master->regs + SVC_I3C_MSTATUS);
+ 
+ 	if (!SVC_I3C_MSTATUS_SLVSTART(active))
+ 		return IRQ_NONE;
+@@ -1007,6 +1029,9 @@ static int svc_i3c_master_xfer(struct svc_i3c_master *master,
+ 	u32 reg;
+ 	int ret;
+ 
++	/* clean SVC_I3C_MINT_IBIWON w1c bits */
++	writel(SVC_I3C_MINT_IBIWON, master->regs + SVC_I3C_MSTATUS);
++
+ 	writel(SVC_I3C_MCTRL_REQUEST_START_ADDR |
+ 	       xfer_type |
+ 	       SVC_I3C_MCTRL_IBIRESP_NACK |
+@@ -1025,6 +1050,23 @@ static int svc_i3c_master_xfer(struct svc_i3c_master *master,
+ 		goto emit_stop;
+ 	}
+ 
++	/*
++	 * According to I3C spec ver 1.1.1, 5.1.2.2.3 Consequence of Controller Starting a Frame
++	 * with I3C Target Address.
++	 *
++	 * The I3C Controller normally should start a Frame, the Address may be arbitrated, and so
++	 * the Controller shall monitor to see whether an In-Band Interrupt request, a Controller
++	 * Role Request (i.e., Secondary Controller requests to become the Active Controller), or
++	 * a Hot-Join Request has been made.
++	 *
++	 * If missed IBIWON check, the wrong data will be return. When IBIWON happen, return failure
++	 * and yield the above events handler.
++	 */
++	if (SVC_I3C_MSTATUS_IBIWON(reg)) {
++		ret = -ENXIO;
++		goto emit_stop;
++	}
++
+ 	if (rnw)
+ 		ret = svc_i3c_master_read(master, in, xfer_len);
+ 	else
+@@ -1204,9 +1246,11 @@ static int svc_i3c_master_send_bdcast_ccc_cmd(struct svc_i3c_master *master,
+ 	cmd->read_len = 0;
+ 	cmd->continued = false;
+ 
++	mutex_lock(&master->lock);
+ 	svc_i3c_master_enqueue_xfer(master, xfer);
+ 	if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000)))
+ 		svc_i3c_master_dequeue_xfer(master, xfer);
++	mutex_unlock(&master->lock);
+ 
+ 	ret = xfer->ret;
+ 	kfree(buf);
+@@ -1250,9 +1294,11 @@ static int svc_i3c_master_send_direct_ccc_cmd(struct svc_i3c_master *master,
+ 	cmd->read_len = read_len;
+ 	cmd->continued = false;
+ 
++	mutex_lock(&master->lock);
+ 	svc_i3c_master_enqueue_xfer(master, xfer);
+ 	if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000)))
+ 		svc_i3c_master_dequeue_xfer(master, xfer);
++	mutex_unlock(&master->lock);
+ 
+ 	if (cmd->read_len != xfer_len)
+ 		ccc->dests[0].payload.len = cmd->read_len;
+@@ -1309,9 +1355,11 @@ static int svc_i3c_master_priv_xfers(struct i3c_dev_desc *dev,
+ 		cmd->continued = (i + 1) < nxfers;
+ 	}
+ 
++	mutex_lock(&master->lock);
+ 	svc_i3c_master_enqueue_xfer(master, xfer);
+ 	if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000)))
+ 		svc_i3c_master_dequeue_xfer(master, xfer);
++	mutex_unlock(&master->lock);
+ 
+ 	ret = xfer->ret;
+ 	svc_i3c_master_free_xfer(xfer);
+@@ -1347,9 +1395,11 @@ static int svc_i3c_master_i2c_xfers(struct i2c_dev_desc *dev,
+ 		cmd->continued = (i + 1 < nxfers);
+ 	}
+ 
++	mutex_lock(&master->lock);
+ 	svc_i3c_master_enqueue_xfer(master, xfer);
+ 	if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000)))
+ 		svc_i3c_master_dequeue_xfer(master, xfer);
++	mutex_unlock(&master->lock);
+ 
+ 	ret = xfer->ret;
+ 	svc_i3c_master_free_xfer(xfer);
+@@ -1540,6 +1590,8 @@ static int svc_i3c_master_probe(struct platform_device *pdev)
+ 
+ 	INIT_WORK(&master->hj_work, svc_i3c_master_hj_work);
+ 	INIT_WORK(&master->ibi_work, svc_i3c_master_ibi_work);
++	mutex_init(&master->lock);
++
+ 	ret = devm_request_irq(dev, master->irq, svc_i3c_master_irq_handler,
+ 			       IRQF_NO_SUSPEND, "svc-i3c-irq", master);
+ 	if (ret)
+diff --git a/drivers/iio/adc/stm32-adc-core.c b/drivers/iio/adc/stm32-adc-core.c
+index 48f02dcc81c1b..70011fdbf5f63 100644
+--- a/drivers/iio/adc/stm32-adc-core.c
++++ b/drivers/iio/adc/stm32-adc-core.c
+@@ -706,6 +706,8 @@ static int stm32_adc_probe(struct platform_device *pdev)
+ 	struct stm32_adc_priv *priv;
+ 	struct device *dev = &pdev->dev;
+ 	struct device_node *np = pdev->dev.of_node;
++	const struct of_device_id *of_id;
++
+ 	struct resource *res;
+ 	u32 max_rate;
+ 	int ret;
+@@ -718,8 +720,11 @@ static int stm32_adc_probe(struct platform_device *pdev)
+ 		return -ENOMEM;
+ 	platform_set_drvdata(pdev, &priv->common);
+ 
+-	priv->cfg = (const struct stm32_adc_priv_cfg *)
+-		of_match_device(dev->driver->of_match_table, dev)->data;
++	of_id = of_match_device(dev->driver->of_match_table, dev);
++	if (!of_id)
++		return -ENODEV;
++
++	priv->cfg = (const struct stm32_adc_priv_cfg *)of_id->data;
+ 	priv->nb_adc_max = priv->cfg->num_adcs;
+ 	spin_lock_init(&priv->common.lock);
+ 
+diff --git a/drivers/infiniband/hw/hfi1/pcie.c b/drivers/infiniband/hw/hfi1/pcie.c
+index 08732e1ac9662..c132a9c073bff 100644
+--- a/drivers/infiniband/hw/hfi1/pcie.c
++++ b/drivers/infiniband/hw/hfi1/pcie.c
+@@ -3,6 +3,7 @@
+  * Copyright(c) 2015 - 2019 Intel Corporation.
+  */
+ 
++#include <linux/bitfield.h>
+ #include <linux/pci.h>
+ #include <linux/io.h>
+ #include <linux/delay.h>
+@@ -210,12 +211,6 @@ static u32 extract_speed(u16 linkstat)
+ 	return speed;
+ }
+ 
+-/* return the PCIe link speed from the given link status */
+-static u32 extract_width(u16 linkstat)
+-{
+-	return (linkstat & PCI_EXP_LNKSTA_NLW) >> PCI_EXP_LNKSTA_NLW_SHIFT;
+-}
+-
+ /* read the link status and set dd->{lbus_width,lbus_speed,lbus_info} */
+ static void update_lbus_info(struct hfi1_devdata *dd)
+ {
+@@ -228,7 +223,7 @@ static void update_lbus_info(struct hfi1_devdata *dd)
+ 		return;
+ 	}
+ 
+-	dd->lbus_width = extract_width(linkstat);
++	dd->lbus_width = FIELD_GET(PCI_EXP_LNKSTA_NLW, linkstat);
+ 	dd->lbus_speed = extract_speed(linkstat);
+ 	snprintf(dd->lbus_info, sizeof(dd->lbus_info),
+ 		 "PCIe,%uMHz,x%u", dd->lbus_speed, dd->lbus_width);
+diff --git a/drivers/iommu/iommufd/io_pagetable.c b/drivers/iommu/iommufd/io_pagetable.c
+index 9b3935042459e..8064959a95acd 100644
+--- a/drivers/iommu/iommufd/io_pagetable.c
++++ b/drivers/iommu/iommufd/io_pagetable.c
+@@ -1060,6 +1060,16 @@ static int iopt_area_split(struct iopt_area *area, unsigned long iova)
+ 	if (WARN_ON(rc))
+ 		goto err_remove_lhs;
+ 
++	/*
++	 * If the original area has filled a domain, domains_itree has to be
++	 * updated.
++	 */
++	if (area->storage_domain) {
++		interval_tree_remove(&area->pages_node, &pages->domains_itree);
++		interval_tree_insert(&lhs->pages_node, &pages->domains_itree);
++		interval_tree_insert(&rhs->pages_node, &pages->domains_itree);
++	}
++
+ 	lhs->storage_domain = area->storage_domain;
+ 	lhs->pages = area->pages;
+ 	rhs->storage_domain = area->storage_domain;
+diff --git a/drivers/leds/trigger/ledtrig-netdev.c b/drivers/leds/trigger/ledtrig-netdev.c
+index 03c58e50cc44f..3d882cc605542 100644
+--- a/drivers/leds/trigger/ledtrig-netdev.c
++++ b/drivers/leds/trigger/ledtrig-netdev.c
+@@ -221,6 +221,9 @@ static ssize_t device_name_show(struct device *dev,
+ static int set_device_name(struct led_netdev_data *trigger_data,
+ 			   const char *name, size_t size)
+ {
++	if (size >= IFNAMSIZ)
++		return -EINVAL;
++
+ 	cancel_delayed_work_sync(&trigger_data->work);
+ 
+ 	mutex_lock(&trigger_data->lock);
+@@ -263,9 +266,6 @@ static ssize_t device_name_store(struct device *dev,
+ 	struct led_netdev_data *trigger_data = led_trigger_get_drvdata(dev);
+ 	int ret;
+ 
+-	if (size >= IFNAMSIZ)
+-		return -EINVAL;
+-
+ 	ret = set_device_name(trigger_data, buf, size);
+ 
+ 	if (ret < 0)
+diff --git a/drivers/mcb/mcb-core.c b/drivers/mcb/mcb-core.c
+index 0cac5bead84fa..d4eec09009809 100644
+--- a/drivers/mcb/mcb-core.c
++++ b/drivers/mcb/mcb-core.c
+@@ -246,6 +246,7 @@ int mcb_device_register(struct mcb_bus *bus, struct mcb_device *dev)
+ 	return 0;
+ 
+ out:
++	put_device(&dev->dev);
+ 
+ 	return ret;
+ }
+diff --git a/drivers/mcb/mcb-parse.c b/drivers/mcb/mcb-parse.c
+index 656b6b71c7682..1ae37e693de04 100644
+--- a/drivers/mcb/mcb-parse.c
++++ b/drivers/mcb/mcb-parse.c
+@@ -106,7 +106,7 @@ static int chameleon_parse_gdd(struct mcb_bus *bus,
+ 	return 0;
+ 
+ err:
+-	put_device(&mdev->dev);
++	mcb_free_dev(mdev);
+ 
+ 	return ret;
+ }
+diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
+index bc309e41d074a..486e1180cc3a3 100644
+--- a/drivers/md/dm-bufio.c
++++ b/drivers/md/dm-bufio.c
+@@ -254,7 +254,7 @@ enum evict_result {
+ 
+ typedef enum evict_result (*le_predicate)(struct lru_entry *le, void *context);
+ 
+-static struct lru_entry *lru_evict(struct lru *lru, le_predicate pred, void *context)
++static struct lru_entry *lru_evict(struct lru *lru, le_predicate pred, void *context, bool no_sleep)
+ {
+ 	unsigned long tested = 0;
+ 	struct list_head *h = lru->cursor;
+@@ -295,7 +295,8 @@ static struct lru_entry *lru_evict(struct lru *lru, le_predicate pred, void *con
+ 
+ 		h = h->next;
+ 
+-		cond_resched();
++		if (!no_sleep)
++			cond_resched();
+ 	}
+ 
+ 	return NULL;
+@@ -382,7 +383,10 @@ struct dm_buffer {
+  */
+ 
+ struct buffer_tree {
+-	struct rw_semaphore lock;
++	union {
++		struct rw_semaphore lock;
++		rwlock_t spinlock;
++	} u;
+ 	struct rb_root root;
+ } ____cacheline_aligned_in_smp;
+ 
+@@ -393,9 +397,12 @@ struct dm_buffer_cache {
+ 	 * on the locks.
+ 	 */
+ 	unsigned int num_locks;
++	bool no_sleep;
+ 	struct buffer_tree trees[];
+ };
+ 
++static DEFINE_STATIC_KEY_FALSE(no_sleep_enabled);
++
+ static inline unsigned int cache_index(sector_t block, unsigned int num_locks)
+ {
+ 	return dm_hash_locks_index(block, num_locks);
+@@ -403,22 +410,34 @@ static inline unsigned int cache_index(sector_t block, unsigned int num_locks)
+ 
+ static inline void cache_read_lock(struct dm_buffer_cache *bc, sector_t block)
+ {
+-	down_read(&bc->trees[cache_index(block, bc->num_locks)].lock);
++	if (static_branch_unlikely(&no_sleep_enabled) && bc->no_sleep)
++		read_lock_bh(&bc->trees[cache_index(block, bc->num_locks)].u.spinlock);
++	else
++		down_read(&bc->trees[cache_index(block, bc->num_locks)].u.lock);
+ }
+ 
+ static inline void cache_read_unlock(struct dm_buffer_cache *bc, sector_t block)
+ {
+-	up_read(&bc->trees[cache_index(block, bc->num_locks)].lock);
++	if (static_branch_unlikely(&no_sleep_enabled) && bc->no_sleep)
++		read_unlock_bh(&bc->trees[cache_index(block, bc->num_locks)].u.spinlock);
++	else
++		up_read(&bc->trees[cache_index(block, bc->num_locks)].u.lock);
+ }
+ 
+ static inline void cache_write_lock(struct dm_buffer_cache *bc, sector_t block)
+ {
+-	down_write(&bc->trees[cache_index(block, bc->num_locks)].lock);
++	if (static_branch_unlikely(&no_sleep_enabled) && bc->no_sleep)
++		write_lock_bh(&bc->trees[cache_index(block, bc->num_locks)].u.spinlock);
++	else
++		down_write(&bc->trees[cache_index(block, bc->num_locks)].u.lock);
+ }
+ 
+ static inline void cache_write_unlock(struct dm_buffer_cache *bc, sector_t block)
+ {
+-	up_write(&bc->trees[cache_index(block, bc->num_locks)].lock);
++	if (static_branch_unlikely(&no_sleep_enabled) && bc->no_sleep)
++		write_unlock_bh(&bc->trees[cache_index(block, bc->num_locks)].u.spinlock);
++	else
++		up_write(&bc->trees[cache_index(block, bc->num_locks)].u.lock);
+ }
+ 
+ /*
+@@ -442,18 +461,32 @@ static void lh_init(struct lock_history *lh, struct dm_buffer_cache *cache, bool
+ 
+ static void __lh_lock(struct lock_history *lh, unsigned int index)
+ {
+-	if (lh->write)
+-		down_write(&lh->cache->trees[index].lock);
+-	else
+-		down_read(&lh->cache->trees[index].lock);
++	if (lh->write) {
++		if (static_branch_unlikely(&no_sleep_enabled) && lh->cache->no_sleep)
++			write_lock_bh(&lh->cache->trees[index].u.spinlock);
++		else
++			down_write(&lh->cache->trees[index].u.lock);
++	} else {
++		if (static_branch_unlikely(&no_sleep_enabled) && lh->cache->no_sleep)
++			read_lock_bh(&lh->cache->trees[index].u.spinlock);
++		else
++			down_read(&lh->cache->trees[index].u.lock);
++	}
+ }
+ 
+ static void __lh_unlock(struct lock_history *lh, unsigned int index)
+ {
+-	if (lh->write)
+-		up_write(&lh->cache->trees[index].lock);
+-	else
+-		up_read(&lh->cache->trees[index].lock);
++	if (lh->write) {
++		if (static_branch_unlikely(&no_sleep_enabled) && lh->cache->no_sleep)
++			write_unlock_bh(&lh->cache->trees[index].u.spinlock);
++		else
++			up_write(&lh->cache->trees[index].u.lock);
++	} else {
++		if (static_branch_unlikely(&no_sleep_enabled) && lh->cache->no_sleep)
++			read_unlock_bh(&lh->cache->trees[index].u.spinlock);
++		else
++			up_read(&lh->cache->trees[index].u.lock);
++	}
+ }
+ 
+ /*
+@@ -502,14 +535,18 @@ static struct dm_buffer *list_to_buffer(struct list_head *l)
+ 	return le_to_buffer(le);
+ }
+ 
+-static void cache_init(struct dm_buffer_cache *bc, unsigned int num_locks)
++static void cache_init(struct dm_buffer_cache *bc, unsigned int num_locks, bool no_sleep)
+ {
+ 	unsigned int i;
+ 
+ 	bc->num_locks = num_locks;
++	bc->no_sleep = no_sleep;
+ 
+ 	for (i = 0; i < bc->num_locks; i++) {
+-		init_rwsem(&bc->trees[i].lock);
++		if (no_sleep)
++			rwlock_init(&bc->trees[i].u.spinlock);
++		else
++			init_rwsem(&bc->trees[i].u.lock);
+ 		bc->trees[i].root = RB_ROOT;
+ 	}
+ 
+@@ -648,7 +685,7 @@ static struct dm_buffer *__cache_evict(struct dm_buffer_cache *bc, int list_mode
+ 	struct lru_entry *le;
+ 	struct dm_buffer *b;
+ 
+-	le = lru_evict(&bc->lru[list_mode], __evict_pred, &w);
++	le = lru_evict(&bc->lru[list_mode], __evict_pred, &w, bc->no_sleep);
+ 	if (!le)
+ 		return NULL;
+ 
+@@ -702,7 +739,7 @@ static void __cache_mark_many(struct dm_buffer_cache *bc, int old_mode, int new_
+ 	struct evict_wrapper w = {.lh = lh, .pred = pred, .context = context};
+ 
+ 	while (true) {
+-		le = lru_evict(&bc->lru[old_mode], __evict_pred, &w);
++		le = lru_evict(&bc->lru[old_mode], __evict_pred, &w, bc->no_sleep);
+ 		if (!le)
+ 			break;
+ 
+@@ -915,10 +952,11 @@ static void cache_remove_range(struct dm_buffer_cache *bc,
+ {
+ 	unsigned int i;
+ 
++	BUG_ON(bc->no_sleep);
+ 	for (i = 0; i < bc->num_locks; i++) {
+-		down_write(&bc->trees[i].lock);
++		down_write(&bc->trees[i].u.lock);
+ 		__remove_range(bc, &bc->trees[i].root, begin, end, pred, release);
+-		up_write(&bc->trees[i].lock);
++		up_write(&bc->trees[i].u.lock);
+ 	}
+ }
+ 
+@@ -979,8 +1017,6 @@ struct dm_bufio_client {
+ 	struct dm_buffer_cache cache; /* must be last member */
+ };
+ 
+-static DEFINE_STATIC_KEY_FALSE(no_sleep_enabled);
+-
+ /*----------------------------------------------------------------*/
+ 
+ #define dm_bufio_in_request()	(!!current->bio_list)
+@@ -1871,7 +1907,8 @@ static void *new_read(struct dm_bufio_client *c, sector_t block,
+ 	if (need_submit)
+ 		submit_io(b, REQ_OP_READ, read_endio);
+ 
+-	wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE);
++	if (nf != NF_GET)	/* we already tested this condition above */
++		wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE);
+ 
+ 	if (b->read_error) {
+ 		int error = blk_status_to_errno(b->read_error);
+@@ -2421,7 +2458,7 @@ struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsign
+ 		r = -ENOMEM;
+ 		goto bad_client;
+ 	}
+-	cache_init(&c->cache, num_locks);
++	cache_init(&c->cache, num_locks, (flags & DM_BUFIO_CLIENT_NO_SLEEP) != 0);
+ 
+ 	c->bdev = bdev;
+ 	c->block_size = block_size;
+diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
+index dc0463bf3c2cf..0fadb656a2158 100644
+--- a/drivers/md/dm-crypt.c
++++ b/drivers/md/dm-crypt.c
+@@ -1700,11 +1700,17 @@ retry:
+ 		order = min(order, remaining_order);
+ 
+ 		while (order > 0) {
++			if (unlikely(percpu_counter_read_positive(&cc->n_allocated_pages) +
++					(1 << order) > dm_crypt_pages_per_client))
++				goto decrease_order;
+ 			pages = alloc_pages(gfp_mask
+ 				| __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN | __GFP_COMP,
+ 				order);
+-			if (likely(pages != NULL))
++			if (likely(pages != NULL)) {
++				percpu_counter_add(&cc->n_allocated_pages, 1 << order);
+ 				goto have_pages;
++			}
++decrease_order:
+ 			order--;
+ 		}
+ 
+@@ -1742,10 +1748,13 @@ static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone)
+ 
+ 	if (clone->bi_vcnt > 0) { /* bio_for_each_folio_all crashes with an empty bio */
+ 		bio_for_each_folio_all(fi, clone) {
+-			if (folio_test_large(fi.folio))
++			if (folio_test_large(fi.folio)) {
++				percpu_counter_sub(&cc->n_allocated_pages,
++						1 << folio_order(fi.folio));
+ 				folio_put(fi.folio);
+-			else
++			} else {
+ 				mempool_free(&fi.folio->page, &cc->page_pool);
++			}
+ 		}
+ 	}
+ }
+diff --git a/drivers/md/dm-verity-fec.c b/drivers/md/dm-verity-fec.c
+index 3ef9f018da60c..2099c755119e3 100644
+--- a/drivers/md/dm-verity-fec.c
++++ b/drivers/md/dm-verity-fec.c
+@@ -185,7 +185,7 @@ static int fec_is_erasure(struct dm_verity *v, struct dm_verity_io *io,
+ {
+ 	if (unlikely(verity_hash(v, verity_io_hash_req(v, io),
+ 				 data, 1 << v->data_dev_block_bits,
+-				 verity_io_real_digest(v, io))))
++				 verity_io_real_digest(v, io), true)))
+ 		return 0;
+ 
+ 	return memcmp(verity_io_real_digest(v, io), want_digest,
+@@ -386,7 +386,7 @@ static int fec_decode_rsb(struct dm_verity *v, struct dm_verity_io *io,
+ 	/* Always re-validate the corrected block against the expected hash */
+ 	r = verity_hash(v, verity_io_hash_req(v, io), fio->output,
+ 			1 << v->data_dev_block_bits,
+-			verity_io_real_digest(v, io));
++			verity_io_real_digest(v, io), true);
+ 	if (unlikely(r < 0))
+ 		return r;
+ 
+diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c
+index 26adcfea03022..e115fcfe723c9 100644
+--- a/drivers/md/dm-verity-target.c
++++ b/drivers/md/dm-verity-target.c
+@@ -135,20 +135,21 @@ static int verity_hash_update(struct dm_verity *v, struct ahash_request *req,
+  * Wrapper for crypto_ahash_init, which handles verity salting.
+  */
+ static int verity_hash_init(struct dm_verity *v, struct ahash_request *req,
+-				struct crypto_wait *wait)
++				struct crypto_wait *wait, bool may_sleep)
+ {
+ 	int r;
+ 
+ 	ahash_request_set_tfm(req, v->tfm);
+-	ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP |
+-					CRYPTO_TFM_REQ_MAY_BACKLOG,
+-					crypto_req_done, (void *)wait);
++	ahash_request_set_callback(req,
++		may_sleep ? CRYPTO_TFM_REQ_MAY_SLEEP | CRYPTO_TFM_REQ_MAY_BACKLOG : 0,
++		crypto_req_done, (void *)wait);
+ 	crypto_init_wait(wait);
+ 
+ 	r = crypto_wait_req(crypto_ahash_init(req), wait);
+ 
+ 	if (unlikely(r < 0)) {
+-		DMERR("crypto_ahash_init failed: %d", r);
++		if (r != -ENOMEM)
++			DMERR("crypto_ahash_init failed: %d", r);
+ 		return r;
+ 	}
+ 
+@@ -179,12 +180,12 @@ out:
+ }
+ 
+ int verity_hash(struct dm_verity *v, struct ahash_request *req,
+-		const u8 *data, size_t len, u8 *digest)
++		const u8 *data, size_t len, u8 *digest, bool may_sleep)
+ {
+ 	int r;
+ 	struct crypto_wait wait;
+ 
+-	r = verity_hash_init(v, req, &wait);
++	r = verity_hash_init(v, req, &wait, may_sleep);
+ 	if (unlikely(r < 0))
+ 		goto out;
+ 
+@@ -322,7 +323,7 @@ static int verity_verify_level(struct dm_verity *v, struct dm_verity_io *io,
+ 
+ 		r = verity_hash(v, verity_io_hash_req(v, io),
+ 				data, 1 << v->hash_dev_block_bits,
+-				verity_io_real_digest(v, io));
++				verity_io_real_digest(v, io), !io->in_tasklet);
+ 		if (unlikely(r < 0))
+ 			goto release_ret_r;
+ 
+@@ -556,7 +557,7 @@ static int verity_verify_io(struct dm_verity_io *io)
+ 			continue;
+ 		}
+ 
+-		r = verity_hash_init(v, req, &wait);
++		r = verity_hash_init(v, req, &wait, !io->in_tasklet);
+ 		if (unlikely(r < 0))
+ 			return r;
+ 
+@@ -652,7 +653,7 @@ static void verity_tasklet(unsigned long data)
+ 
+ 	io->in_tasklet = true;
+ 	err = verity_verify_io(io);
+-	if (err == -EAGAIN) {
++	if (err == -EAGAIN || err == -ENOMEM) {
+ 		/* fallback to retrying with work-queue */
+ 		INIT_WORK(&io->work, verity_work);
+ 		queue_work(io->v->verify_wq, &io->work);
+@@ -1033,7 +1034,7 @@ static int verity_alloc_zero_digest(struct dm_verity *v)
+ 		goto out;
+ 
+ 	r = verity_hash(v, req, zero_data, 1 << v->data_dev_block_bits,
+-			v->zero_digest);
++			v->zero_digest, true);
+ 
+ out:
+ 	kfree(req);
+diff --git a/drivers/md/dm-verity.h b/drivers/md/dm-verity.h
+index 2f555b4203679..f96f4e281ee4a 100644
+--- a/drivers/md/dm-verity.h
++++ b/drivers/md/dm-verity.h
+@@ -128,7 +128,7 @@ extern int verity_for_bv_block(struct dm_verity *v, struct dm_verity_io *io,
+ 					      u8 *data, size_t len));
+ 
+ extern int verity_hash(struct dm_verity *v, struct ahash_request *req,
+-		       const u8 *data, size_t len, u8 *digest);
++		       const u8 *data, size_t len, u8 *digest, bool may_sleep);
+ 
+ extern int verity_hash_for_block(struct dm_verity *v, struct dm_verity_io *io,
+ 				 sector_t block, u8 *digest, bool *is_zero);
+diff --git a/drivers/md/md.c b/drivers/md/md.c
+index 78d51dddf3a00..34b7196d9634c 100644
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -449,7 +449,7 @@ void mddev_suspend(struct mddev *mddev)
+ 	set_bit(MD_ALLOW_SB_UPDATE, &mddev->flags);
+ 	percpu_ref_kill(&mddev->active_io);
+ 
+-	if (mddev->pers->prepare_suspend)
++	if (mddev->pers && mddev->pers->prepare_suspend)
+ 		mddev->pers->prepare_suspend(mddev);
+ 
+ 	wait_event(mddev->sb_wait, percpu_ref_is_zero(&mddev->active_io));
+diff --git a/drivers/media/i2c/ccs/ccs-core.c b/drivers/media/i2c/ccs/ccs-core.c
+index 49e0d9a095302..6f8fbd82e21c8 100644
+--- a/drivers/media/i2c/ccs/ccs-core.c
++++ b/drivers/media/i2c/ccs/ccs-core.c
+@@ -3097,7 +3097,7 @@ static int ccs_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+ 		try_fmt->code = sensor->internal_csi_format->code;
+ 		try_fmt->field = V4L2_FIELD_NONE;
+ 
+-		if (ssd != sensor->pixel_array)
++		if (ssd == sensor->pixel_array)
+ 			continue;
+ 
+ 		try_comp = v4l2_subdev_get_try_compose(sd, fh->state, i);
+diff --git a/drivers/media/i2c/ccs/ccs-quirk.h b/drivers/media/i2c/ccs/ccs-quirk.h
+index 5838fcda92fd4..0b1a64958d714 100644
+--- a/drivers/media/i2c/ccs/ccs-quirk.h
++++ b/drivers/media/i2c/ccs/ccs-quirk.h
+@@ -32,12 +32,10 @@ struct ccs_sensor;
+  *		@reg: Pointer to the register to access
+  *		@value: Register value, set by the caller on write, or
+  *			by the quirk on read
+- *
+- * @flags: Quirk flags
+- *
+  *		@return: 0 on success, -ENOIOCTLCMD if no register
+  *			 access may be done by the caller (default read
+  *			 value is zero), else negative error code on error
++ * @flags: Quirk flags
+  */
+ struct ccs_quirk {
+ 	int (*limits)(struct ccs_sensor *sensor);
+diff --git a/drivers/media/pci/cobalt/cobalt-driver.c b/drivers/media/pci/cobalt/cobalt-driver.c
+index 74edcc76d12f4..6e1a0614e6d06 100644
+--- a/drivers/media/pci/cobalt/cobalt-driver.c
++++ b/drivers/media/pci/cobalt/cobalt-driver.c
+@@ -8,6 +8,7 @@
+  *  All rights reserved.
+  */
+ 
++#include <linux/bitfield.h>
+ #include <linux/delay.h>
+ #include <media/i2c/adv7604.h>
+ #include <media/i2c/adv7842.h>
+@@ -210,17 +211,17 @@ void cobalt_pcie_status_show(struct cobalt *cobalt)
+ 	pcie_capability_read_word(pci_dev, PCI_EXP_LNKSTA, &stat);
+ 	cobalt_info("PCIe link capability 0x%08x: %s per lane and %u lanes\n",
+ 			capa, get_link_speed(capa),
+-			(capa & PCI_EXP_LNKCAP_MLW) >> 4);
++			FIELD_GET(PCI_EXP_LNKCAP_MLW, capa));
+ 	cobalt_info("PCIe link control 0x%04x\n", ctrl);
+ 	cobalt_info("PCIe link status 0x%04x: %s per lane and %u lanes\n",
+ 		    stat, get_link_speed(stat),
+-		    (stat & PCI_EXP_LNKSTA_NLW) >> 4);
++		    FIELD_GET(PCI_EXP_LNKSTA_NLW, stat));
+ 
+ 	/* Bus */
+ 	pcie_capability_read_dword(pci_bus_dev, PCI_EXP_LNKCAP, &capa);
+ 	cobalt_info("PCIe bus link capability 0x%08x: %s per lane and %u lanes\n",
+ 			capa, get_link_speed(capa),
+-			(capa & PCI_EXP_LNKCAP_MLW) >> 4);
++			FIELD_GET(PCI_EXP_LNKCAP_MLW, capa));
+ 
+ 	/* Slot */
+ 	pcie_capability_read_dword(pci_dev, PCI_EXP_SLTCAP, &capa);
+@@ -239,7 +240,7 @@ static unsigned pcie_link_get_lanes(struct cobalt *cobalt)
+ 	if (!pci_is_pcie(pci_dev))
+ 		return 0;
+ 	pcie_capability_read_word(pci_dev, PCI_EXP_LNKSTA, &link);
+-	return (link & PCI_EXP_LNKSTA_NLW) >> 4;
++	return FIELD_GET(PCI_EXP_LNKSTA_NLW, link);
+ }
+ 
+ static unsigned pcie_bus_link_get_lanes(struct cobalt *cobalt)
+@@ -250,7 +251,7 @@ static unsigned pcie_bus_link_get_lanes(struct cobalt *cobalt)
+ 	if (!pci_is_pcie(pci_dev))
+ 		return 0;
+ 	pcie_capability_read_dword(pci_dev, PCI_EXP_LNKCAP, &link);
+-	return (link & PCI_EXP_LNKCAP_MLW) >> 4;
++	return FIELD_GET(PCI_EXP_LNKCAP_MLW, link);
+ }
+ 
+ static void msi_config_show(struct cobalt *cobalt, struct pci_dev *pci_dev)
+diff --git a/drivers/media/pci/intel/ipu-bridge.h b/drivers/media/pci/intel/ipu-bridge.h
+index 1ff0b2d04d929..1ed53d51e16a1 100644
+--- a/drivers/media/pci/intel/ipu-bridge.h
++++ b/drivers/media/pci/intel/ipu-bridge.h
+@@ -103,7 +103,7 @@ struct ipu_property_names {
+ struct ipu_node_names {
+ 	char port[7];
+ 	char endpoint[11];
+-	char remote_port[7];
++	char remote_port[9];
+ 	char vcm[16];
+ };
+ 
+diff --git a/drivers/media/platform/qcom/camss/camss-csid-gen2.c b/drivers/media/platform/qcom/camss/camss-csid-gen2.c
+index 0f8ac29d038db..23acc387be5f0 100644
+--- a/drivers/media/platform/qcom/camss/camss-csid-gen2.c
++++ b/drivers/media/platform/qcom/camss/camss-csid-gen2.c
+@@ -355,9 +355,6 @@ static void __csid_configure_stream(struct csid_device *csid, u8 enable, u8 vc)
+ 		u8 dt_id = vc;
+ 
+ 		if (tg->enabled) {
+-			/* Config Test Generator */
+-			vc = 0xa;
+-
+ 			/* configure one DT, infinite frames */
+ 			val = vc << TPG_VC_CFG0_VC_NUM;
+ 			val |= INTELEAVING_MODE_ONE_SHOT << TPG_VC_CFG0_LINE_INTERLEAVING_MODE;
+@@ -370,14 +367,14 @@ static void __csid_configure_stream(struct csid_device *csid, u8 enable, u8 vc)
+ 
+ 			writel_relaxed(0x12345678, csid->base + CSID_TPG_LFSR_SEED);
+ 
+-			val = input_format->height & 0x1fff << TPG_DT_n_CFG_0_FRAME_HEIGHT;
+-			val |= input_format->width & 0x1fff << TPG_DT_n_CFG_0_FRAME_WIDTH;
++			val = (input_format->height & 0x1fff) << TPG_DT_n_CFG_0_FRAME_HEIGHT;
++			val |= (input_format->width & 0x1fff) << TPG_DT_n_CFG_0_FRAME_WIDTH;
+ 			writel_relaxed(val, csid->base + CSID_TPG_DT_n_CFG_0(0));
+ 
+ 			val = format->data_type << TPG_DT_n_CFG_1_DATA_TYPE;
+ 			writel_relaxed(val, csid->base + CSID_TPG_DT_n_CFG_1(0));
+ 
+-			val = tg->mode << TPG_DT_n_CFG_2_PAYLOAD_MODE;
++			val = (tg->mode - 1) << TPG_DT_n_CFG_2_PAYLOAD_MODE;
+ 			val |= 0xBE << TPG_DT_n_CFG_2_USER_SPECIFIED_PAYLOAD;
+ 			val |= format->decode_format << TPG_DT_n_CFG_2_ENCODE_FORMAT;
+ 			writel_relaxed(val, csid->base + CSID_TPG_DT_n_CFG_2(0));
+@@ -449,6 +446,8 @@ static void __csid_configure_stream(struct csid_device *csid, u8 enable, u8 vc)
+ 	writel_relaxed(val, csid->base + CSID_CSI2_RX_CFG0);
+ 
+ 	val = 1 << CSI2_RX_CFG1_PACKET_ECC_CORRECTION_EN;
++	if (vc > 3)
++		val |= 1 << CSI2_RX_CFG1_VC_MODE;
+ 	val |= 1 << CSI2_RX_CFG1_MISR_EN;
+ 	writel_relaxed(val, csid->base + CSID_CSI2_RX_CFG1);
+ 
+diff --git a/drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c b/drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c
+index 04baa80494c66..4dba61b8d3f2a 100644
+--- a/drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c
++++ b/drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c
+@@ -476,7 +476,7 @@ static void csiphy_lanes_enable(struct csiphy_device *csiphy,
+ 
+ 	settle_cnt = csiphy_settle_cnt_calc(link_freq, csiphy->timer_clk_rate);
+ 
+-	val = is_gen2 ? BIT(7) : CSIPHY_3PH_CMN_CSI_COMMON_CTRL5_CLK_ENABLE;
++	val = CSIPHY_3PH_CMN_CSI_COMMON_CTRL5_CLK_ENABLE;
+ 	for (i = 0; i < c->num_data; i++)
+ 		val |= BIT(c->data[i].pos * 2);
+ 
+diff --git a/drivers/media/platform/qcom/camss/camss-vfe-170.c b/drivers/media/platform/qcom/camss/camss-vfe-170.c
+index 02494c89da91c..168baaa80d4e6 100644
+--- a/drivers/media/platform/qcom/camss/camss-vfe-170.c
++++ b/drivers/media/platform/qcom/camss/camss-vfe-170.c
+@@ -7,7 +7,6 @@
+  * Copyright (C) 2020-2021 Linaro Ltd.
+  */
+ 
+-#include <linux/delay.h>
+ #include <linux/interrupt.h>
+ #include <linux/io.h>
+ #include <linux/iopoll.h>
+@@ -494,35 +493,20 @@ static int vfe_enable_output(struct vfe_line *line)
+ 	return 0;
+ }
+ 
+-static int vfe_disable_output(struct vfe_line *line)
++static void vfe_disable_output(struct vfe_line *line)
+ {
+ 	struct vfe_device *vfe = to_vfe(line);
+ 	struct vfe_output *output = &line->output;
+ 	unsigned long flags;
+ 	unsigned int i;
+-	bool done;
+-	int timeout = 0;
+-
+-	do {
+-		spin_lock_irqsave(&vfe->output_lock, flags);
+-		done = !output->gen2.active_num;
+-		spin_unlock_irqrestore(&vfe->output_lock, flags);
+-		usleep_range(10000, 20000);
+-
+-		if (timeout++ == 100) {
+-			dev_err(vfe->camss->dev, "VFE idle timeout - resetting\n");
+-			vfe_reset(vfe);
+-			output->gen2.active_num = 0;
+-			return 0;
+-		}
+-	} while (!done);
+ 
+ 	spin_lock_irqsave(&vfe->output_lock, flags);
+ 	for (i = 0; i < output->wm_num; i++)
+ 		vfe_wm_stop(vfe, output->wm_idx[i]);
++	output->gen2.active_num = 0;
+ 	spin_unlock_irqrestore(&vfe->output_lock, flags);
+ 
+-	return 0;
++	vfe_reset(vfe);
+ }
+ 
+ /*
+diff --git a/drivers/media/platform/qcom/camss/camss-vfe-480.c b/drivers/media/platform/qcom/camss/camss-vfe-480.c
+index f70aad2e8c237..8ddb8016434ae 100644
+--- a/drivers/media/platform/qcom/camss/camss-vfe-480.c
++++ b/drivers/media/platform/qcom/camss/camss-vfe-480.c
+@@ -8,7 +8,6 @@
+  * Copyright (C) 2021 Jonathan Marek
+  */
+ 
+-#include <linux/delay.h>
+ #include <linux/interrupt.h>
+ #include <linux/io.h>
+ #include <linux/iopoll.h>
+@@ -328,35 +327,20 @@ static int vfe_enable_output(struct vfe_line *line)
+ 	return 0;
+ }
+ 
+-static int vfe_disable_output(struct vfe_line *line)
++static void vfe_disable_output(struct vfe_line *line)
+ {
+ 	struct vfe_device *vfe = to_vfe(line);
+ 	struct vfe_output *output = &line->output;
+ 	unsigned long flags;
+ 	unsigned int i;
+-	bool done;
+-	int timeout = 0;
+-
+-	do {
+-		spin_lock_irqsave(&vfe->output_lock, flags);
+-		done = !output->gen2.active_num;
+-		spin_unlock_irqrestore(&vfe->output_lock, flags);
+-		usleep_range(10000, 20000);
+-
+-		if (timeout++ == 100) {
+-			dev_err(vfe->camss->dev, "VFE idle timeout - resetting\n");
+-			vfe_reset(vfe);
+-			output->gen2.active_num = 0;
+-			return 0;
+-		}
+-	} while (!done);
+ 
+ 	spin_lock_irqsave(&vfe->output_lock, flags);
+ 	for (i = 0; i < output->wm_num; i++)
+ 		vfe_wm_stop(vfe, output->wm_idx[i]);
++	output->gen2.active_num = 0;
+ 	spin_unlock_irqrestore(&vfe->output_lock, flags);
+ 
+-	return 0;
++	vfe_reset(vfe);
+ }
+ 
+ /*
+diff --git a/drivers/media/platform/qcom/camss/camss-vfe.c b/drivers/media/platform/qcom/camss/camss-vfe.c
+index 06c95568e5af4..965500b83d073 100644
+--- a/drivers/media/platform/qcom/camss/camss-vfe.c
++++ b/drivers/media/platform/qcom/camss/camss-vfe.c
+@@ -535,7 +535,8 @@ static int vfe_check_clock_rates(struct vfe_device *vfe)
+ 		struct camss_clock *clock = &vfe->clock[i];
+ 
+ 		if (!strcmp(clock->name, "vfe0") ||
+-		    !strcmp(clock->name, "vfe1")) {
++		    !strcmp(clock->name, "vfe1") ||
++		    !strcmp(clock->name, "vfe_lite")) {
+ 			u64 min_rate = 0;
+ 			unsigned long rate;
+ 
+@@ -611,7 +612,7 @@ int vfe_get(struct vfe_device *vfe)
+ 	} else {
+ 		ret = vfe_check_clock_rates(vfe);
+ 		if (ret < 0)
+-			goto error_pm_runtime_get;
++			goto error_pm_domain;
+ 	}
+ 	vfe->power_count++;
+ 
+diff --git a/drivers/media/platform/qcom/camss/camss.c b/drivers/media/platform/qcom/camss/camss.c
+index 1ef26aea3eae6..62e2e8bd3eb52 100644
+--- a/drivers/media/platform/qcom/camss/camss.c
++++ b/drivers/media/platform/qcom/camss/camss.c
+@@ -1627,6 +1627,12 @@ static int camss_probe(struct platform_device *pdev)
+ 	if (ret < 0)
+ 		goto err_cleanup;
+ 
++	ret = camss_configure_pd(camss);
++	if (ret < 0) {
++		dev_err(dev, "Failed to configure power domains: %d\n", ret);
++		goto err_cleanup;
++	}
++
+ 	ret = camss_init_subdevices(camss);
+ 	if (ret < 0)
+ 		goto err_cleanup;
+@@ -1679,12 +1685,6 @@ static int camss_probe(struct platform_device *pdev)
+ 		}
+ 	}
+ 
+-	ret = camss_configure_pd(camss);
+-	if (ret < 0) {
+-		dev_err(dev, "Failed to configure power domains: %d\n", ret);
+-		return ret;
+-	}
+-
+ 	pm_runtime_enable(dev);
+ 
+ 	return 0;
+diff --git a/drivers/media/platform/qcom/venus/hfi_msgs.c b/drivers/media/platform/qcom/venus/hfi_msgs.c
+index 3d5dadfa19009..3e85bd85066b7 100644
+--- a/drivers/media/platform/qcom/venus/hfi_msgs.c
++++ b/drivers/media/platform/qcom/venus/hfi_msgs.c
+@@ -398,7 +398,7 @@ session_get_prop_buf_req(struct hfi_msg_session_property_info_pkt *pkt,
+ 		memcpy(&bufreq[idx], buf_req, sizeof(*bufreq));
+ 		idx++;
+ 
+-		if (idx > HFI_BUFFER_TYPE_MAX)
++		if (idx >= HFI_BUFFER_TYPE_MAX)
+ 			return HFI_ERR_SESSION_INVALID_PARAMETER;
+ 
+ 		req_bytes -= sizeof(struct hfi_buffer_requirements);
+diff --git a/drivers/media/platform/qcom/venus/hfi_parser.c b/drivers/media/platform/qcom/venus/hfi_parser.c
+index 6cf74b2bc5ae3..c43839539d4dd 100644
+--- a/drivers/media/platform/qcom/venus/hfi_parser.c
++++ b/drivers/media/platform/qcom/venus/hfi_parser.c
+@@ -19,6 +19,9 @@ static void init_codecs(struct venus_core *core)
+ 	struct hfi_plat_caps *caps = core->caps, *cap;
+ 	unsigned long bit;
+ 
++	if (hweight_long(core->dec_codecs) + hweight_long(core->enc_codecs) > MAX_CODEC_NUM)
++		return;
++
+ 	for_each_set_bit(bit, &core->dec_codecs, MAX_CODEC_NUM) {
+ 		cap = &caps[core->codecs_count++];
+ 		cap->codec = BIT(bit);
+@@ -86,6 +89,9 @@ static void fill_profile_level(struct hfi_plat_caps *cap, const void *data,
+ {
+ 	const struct hfi_profile_level *pl = data;
+ 
++	if (cap->num_pl + num >= HFI_MAX_PROFILE_COUNT)
++		return;
++
+ 	memcpy(&cap->pl[cap->num_pl], pl, num * sizeof(*pl));
+ 	cap->num_pl += num;
+ }
+@@ -111,6 +117,9 @@ fill_caps(struct hfi_plat_caps *cap, const void *data, unsigned int num)
+ {
+ 	const struct hfi_capability *caps = data;
+ 
++	if (cap->num_caps + num >= MAX_CAP_ENTRIES)
++		return;
++
+ 	memcpy(&cap->caps[cap->num_caps], caps, num * sizeof(*caps));
+ 	cap->num_caps += num;
+ }
+@@ -137,6 +146,9 @@ static void fill_raw_fmts(struct hfi_plat_caps *cap, const void *fmts,
+ {
+ 	const struct raw_formats *formats = fmts;
+ 
++	if (cap->num_fmts + num_fmts >= MAX_FMT_ENTRIES)
++		return;
++
+ 	memcpy(&cap->fmts[cap->num_fmts], formats, num_fmts * sizeof(*formats));
+ 	cap->num_fmts += num_fmts;
+ }
+@@ -159,6 +171,9 @@ parse_raw_formats(struct venus_core *core, u32 codecs, u32 domain, void *data)
+ 		rawfmts[i].buftype = fmt->buffer_type;
+ 		i++;
+ 
++		if (i >= MAX_FMT_ENTRIES)
++			return;
++
+ 		if (pinfo->num_planes > MAX_PLANES)
+ 			break;
+ 
+diff --git a/drivers/media/platform/qcom/venus/hfi_venus.c b/drivers/media/platform/qcom/venus/hfi_venus.c
+index 5506a0d196ef9..90f09cc5c600d 100644
+--- a/drivers/media/platform/qcom/venus/hfi_venus.c
++++ b/drivers/media/platform/qcom/venus/hfi_venus.c
+@@ -205,6 +205,11 @@ static int venus_write_queue(struct venus_hfi_device *hdev,
+ 
+ 	new_wr_idx = wr_idx + dwords;
+ 	wr_ptr = (u32 *)(queue->qmem.kva + (wr_idx << 2));
++
++	if (wr_ptr < (u32 *)queue->qmem.kva ||
++	    wr_ptr > (u32 *)(queue->qmem.kva + queue->qmem.size - sizeof(*wr_ptr)))
++		return -EINVAL;
++
+ 	if (new_wr_idx < qsize) {
+ 		memcpy(wr_ptr, packet, dwords << 2);
+ 	} else {
+@@ -272,6 +277,11 @@ static int venus_read_queue(struct venus_hfi_device *hdev,
+ 	}
+ 
+ 	rd_ptr = (u32 *)(queue->qmem.kva + (rd_idx << 2));
++
++	if (rd_ptr < (u32 *)queue->qmem.kva ||
++	    rd_ptr > (u32 *)(queue->qmem.kva + queue->qmem.size - sizeof(*rd_ptr)))
++		return -EINVAL;
++
+ 	dwords = *rd_ptr >> 2;
+ 	if (!dwords)
+ 		return -EINVAL;
+diff --git a/drivers/media/rc/imon.c b/drivers/media/rc/imon.c
+index 74546f7e34691..5719dda6e0f0e 100644
+--- a/drivers/media/rc/imon.c
++++ b/drivers/media/rc/imon.c
+@@ -2427,6 +2427,12 @@ static int imon_probe(struct usb_interface *interface,
+ 		goto fail;
+ 	}
+ 
++	if (first_if->dev.driver != interface->dev.driver) {
++		dev_err(&interface->dev, "inconsistent driver matching\n");
++		ret = -EINVAL;
++		goto fail;
++	}
++
+ 	if (ifnum == 0) {
+ 		ictx = imon_init_intf0(interface, id);
+ 		if (!ictx) {
+diff --git a/drivers/media/rc/ir-sharp-decoder.c b/drivers/media/rc/ir-sharp-decoder.c
+index 3d8488c39c561..3311099cbd573 100644
+--- a/drivers/media/rc/ir-sharp-decoder.c
++++ b/drivers/media/rc/ir-sharp-decoder.c
+@@ -15,7 +15,9 @@
+ #define SHARP_UNIT		40  /* us */
+ #define SHARP_BIT_PULSE		(8    * SHARP_UNIT) /* 320us */
+ #define SHARP_BIT_0_PERIOD	(25   * SHARP_UNIT) /* 1ms (680us space) */
+-#define SHARP_BIT_1_PERIOD	(50   * SHARP_UNIT) /* 2ms (1680ms space) */
++#define SHARP_BIT_1_PERIOD	(50   * SHARP_UNIT) /* 2ms (1680us space) */
++#define SHARP_BIT_0_SPACE	(17   * SHARP_UNIT) /* 680us space */
++#define SHARP_BIT_1_SPACE	(42   * SHARP_UNIT) /* 1680us space */
+ #define SHARP_ECHO_SPACE	(1000 * SHARP_UNIT) /* 40 ms */
+ #define SHARP_TRAILER_SPACE	(125  * SHARP_UNIT) /* 5 ms (even longer) */
+ 
+@@ -168,8 +170,8 @@ static const struct ir_raw_timings_pd ir_sharp_timings = {
+ 	.header_pulse  = 0,
+ 	.header_space  = 0,
+ 	.bit_pulse     = SHARP_BIT_PULSE,
+-	.bit_space[0]  = SHARP_BIT_0_PERIOD,
+-	.bit_space[1]  = SHARP_BIT_1_PERIOD,
++	.bit_space[0]  = SHARP_BIT_0_SPACE,
++	.bit_space[1]  = SHARP_BIT_1_SPACE,
+ 	.trailer_pulse = SHARP_BIT_PULSE,
+ 	.trailer_space = SHARP_ECHO_SPACE,
+ 	.msb_first     = 1,
+diff --git a/drivers/media/rc/lirc_dev.c b/drivers/media/rc/lirc_dev.c
+index 043d23aaa3cbc..a537734832c50 100644
+--- a/drivers/media/rc/lirc_dev.c
++++ b/drivers/media/rc/lirc_dev.c
+@@ -276,7 +276,11 @@ static ssize_t lirc_transmit(struct file *file, const char __user *buf,
+ 		if (ret < 0)
+ 			goto out_kfree_raw;
+ 
+-		count = ret;
++		/* drop trailing space */
++		if (!(ret % 2))
++			count = ret - 1;
++		else
++			count = ret;
+ 
+ 		txbuf = kmalloc_array(count, sizeof(unsigned int), GFP_KERNEL);
+ 		if (!txbuf) {
+diff --git a/drivers/media/test-drivers/vivid/vivid-rds-gen.c b/drivers/media/test-drivers/vivid/vivid-rds-gen.c
+index b5b104ee64c99..c57771119a34b 100644
+--- a/drivers/media/test-drivers/vivid/vivid-rds-gen.c
++++ b/drivers/media/test-drivers/vivid/vivid-rds-gen.c
+@@ -145,7 +145,7 @@ void vivid_rds_gen_fill(struct vivid_rds_gen *rds, unsigned freq,
+ 	rds->ta = alt;
+ 	rds->ms = true;
+ 	snprintf(rds->psname, sizeof(rds->psname), "%6d.%1d",
+-		 freq / 16, ((freq & 0xf) * 10) / 16);
++		 (freq / 16) % 1000000, (((freq & 0xf) * 10) / 16) % 10);
+ 	if (alt)
+ 		strscpy(rds->radiotext,
+ 			" The Radio Data System can switch between different Radio Texts ",
+diff --git a/drivers/media/usb/gspca/cpia1.c b/drivers/media/usb/gspca/cpia1.c
+index 46ed95483e222..5f5fa851ca640 100644
+--- a/drivers/media/usb/gspca/cpia1.c
++++ b/drivers/media/usb/gspca/cpia1.c
+@@ -18,6 +18,7 @@
+ 
+ #include <linux/input.h>
+ #include <linux/sched/signal.h>
++#include <linux/bitops.h>
+ 
+ #include "gspca.h"
+ 
+@@ -1028,6 +1029,8 @@ static int set_flicker(struct gspca_dev *gspca_dev, int on, int apply)
+ 			sd->params.exposure.expMode = 2;
+ 			sd->exposure_status = EXPOSURE_NORMAL;
+ 		}
++		if (sd->params.exposure.gain >= BITS_PER_TYPE(currentexp))
++			return -EINVAL;
+ 		currentexp = currentexp << sd->params.exposure.gain;
+ 		sd->params.exposure.gain = 0;
+ 		/* round down current exposure to nearest value */
+diff --git a/drivers/mfd/intel-lpss-pci.c b/drivers/mfd/intel-lpss-pci.c
+index 699f44ffff0e4..ae5759200622c 100644
+--- a/drivers/mfd/intel-lpss-pci.c
++++ b/drivers/mfd/intel-lpss-pci.c
+@@ -561,6 +561,19 @@ static const struct pci_device_id intel_lpss_pci_ids[] = {
+ 	{ PCI_VDEVICE(INTEL, 0xa3e2), (kernel_ulong_t)&spt_i2c_info },
+ 	{ PCI_VDEVICE(INTEL, 0xa3e3), (kernel_ulong_t)&spt_i2c_info },
+ 	{ PCI_VDEVICE(INTEL, 0xa3e6), (kernel_ulong_t)&spt_uart_info },
++	/* LNL-M */
++	{ PCI_VDEVICE(INTEL, 0xa825), (kernel_ulong_t)&bxt_uart_info },
++	{ PCI_VDEVICE(INTEL, 0xa826), (kernel_ulong_t)&bxt_uart_info },
++	{ PCI_VDEVICE(INTEL, 0xa827), (kernel_ulong_t)&tgl_info },
++	{ PCI_VDEVICE(INTEL, 0xa830), (kernel_ulong_t)&tgl_info },
++	{ PCI_VDEVICE(INTEL, 0xa846), (kernel_ulong_t)&tgl_info },
++	{ PCI_VDEVICE(INTEL, 0xa850), (kernel_ulong_t)&ehl_i2c_info },
++	{ PCI_VDEVICE(INTEL, 0xa851), (kernel_ulong_t)&ehl_i2c_info },
++	{ PCI_VDEVICE(INTEL, 0xa852), (kernel_ulong_t)&bxt_uart_info },
++	{ PCI_VDEVICE(INTEL, 0xa878), (kernel_ulong_t)&ehl_i2c_info },
++	{ PCI_VDEVICE(INTEL, 0xa879), (kernel_ulong_t)&ehl_i2c_info },
++	{ PCI_VDEVICE(INTEL, 0xa87a), (kernel_ulong_t)&ehl_i2c_info },
++	{ PCI_VDEVICE(INTEL, 0xa87b), (kernel_ulong_t)&ehl_i2c_info },
+ 	{ }
+ };
+ MODULE_DEVICE_TABLE(pci, intel_lpss_pci_ids);
+diff --git a/drivers/mfd/qcom-spmi-pmic.c b/drivers/mfd/qcom-spmi-pmic.c
+index 7e2cd79d17ebf..8e449cff5cec4 100644
+--- a/drivers/mfd/qcom-spmi-pmic.c
++++ b/drivers/mfd/qcom-spmi-pmic.c
+@@ -30,6 +30,8 @@ struct qcom_spmi_dev {
+ 	struct qcom_spmi_pmic pmic;
+ };
+ 
++static DEFINE_MUTEX(pmic_spmi_revid_lock);
++
+ #define N_USIDS(n)		((void *)n)
+ 
+ static const struct of_device_id pmic_spmi_id_table[] = {
+@@ -76,24 +78,21 @@ static const struct of_device_id pmic_spmi_id_table[] = {
+  *
+  * This only supports PMICs with 1 or 2 USIDs.
+  */
+-static struct spmi_device *qcom_pmic_get_base_usid(struct device *dev)
++static struct spmi_device *qcom_pmic_get_base_usid(struct spmi_device *sdev, struct qcom_spmi_dev *ctx)
+ {
+-	struct spmi_device *sdev;
+-	struct qcom_spmi_dev *ctx;
+ 	struct device_node *spmi_bus;
+-	struct device_node *other_usid = NULL;
++	struct device_node *child;
+ 	int function_parent_usid, ret;
+ 	u32 pmic_addr;
+ 
+-	sdev = to_spmi_device(dev);
+-	ctx = dev_get_drvdata(&sdev->dev);
+-
+ 	/*
+ 	 * Quick return if the function device is already in the base
+ 	 * USID. This will always be hit for PMICs with only 1 USID.
+ 	 */
+-	if (sdev->usid % ctx->num_usids == 0)
++	if (sdev->usid % ctx->num_usids == 0) {
++		get_device(&sdev->dev);
+ 		return sdev;
++	}
+ 
+ 	function_parent_usid = sdev->usid;
+ 
+@@ -105,28 +104,61 @@ static struct spmi_device *qcom_pmic_get_base_usid(struct device *dev)
+ 	 * device for USID 2.
+ 	 */
+ 	spmi_bus = of_get_parent(sdev->dev.of_node);
+-	do {
+-		other_usid = of_get_next_child(spmi_bus, other_usid);
+-
+-		ret = of_property_read_u32_index(other_usid, "reg", 0, &pmic_addr);
+-		if (ret)
+-			return ERR_PTR(ret);
++	sdev = ERR_PTR(-ENODATA);
++	for_each_child_of_node(spmi_bus, child) {
++		ret = of_property_read_u32_index(child, "reg", 0, &pmic_addr);
++		if (ret) {
++			of_node_put(child);
++			sdev = ERR_PTR(ret);
++			break;
++		}
+ 
+-		sdev = spmi_device_from_of(other_usid);
+ 		if (pmic_addr == function_parent_usid - (ctx->num_usids - 1)) {
+-			if (!sdev)
++			sdev = spmi_device_from_of(child);
++			if (!sdev) {
+ 				/*
+-				 * If the base USID for this PMIC hasn't probed yet
+-				 * but the secondary USID has, then we need to defer
+-				 * the function driver so that it will attempt to
+-				 * probe again when the base USID is ready.
++				 * If the base USID for this PMIC hasn't been
++				 * registered yet then we need to defer.
+ 				 */
+-				return ERR_PTR(-EPROBE_DEFER);
+-			return sdev;
++				sdev = ERR_PTR(-EPROBE_DEFER);
++			}
++			of_node_put(child);
++			break;
+ 		}
+-	} while (other_usid->sibling);
++	}
+ 
+-	return ERR_PTR(-ENODATA);
++	of_node_put(spmi_bus);
++
++	return sdev;
++}
++
++static int pmic_spmi_get_base_revid(struct spmi_device *sdev, struct qcom_spmi_dev *ctx)
++{
++	struct qcom_spmi_dev *base_ctx;
++	struct spmi_device *base;
++	int ret = 0;
++
++	base = qcom_pmic_get_base_usid(sdev, ctx);
++	if (IS_ERR(base))
++		return PTR_ERR(base);
++
++	/*
++	 * Copy revid info from base device if it has probed and is still
++	 * bound to its driver.
++	 */
++	mutex_lock(&pmic_spmi_revid_lock);
++	base_ctx = spmi_device_get_drvdata(base);
++	if (!base_ctx) {
++		ret = -EPROBE_DEFER;
++		goto out_unlock;
++	}
++	memcpy(&ctx->pmic, &base_ctx->pmic, sizeof(ctx->pmic));
++out_unlock:
++	mutex_unlock(&pmic_spmi_revid_lock);
++
++	put_device(&base->dev);
++
++	return ret;
+ }
+ 
+ static int pmic_spmi_load_revid(struct regmap *map, struct device *dev,
+@@ -204,11 +236,7 @@ const struct qcom_spmi_pmic *qcom_pmic_get(struct device *dev)
+ 	if (!of_match_device(pmic_spmi_id_table, dev->parent))
+ 		return ERR_PTR(-EINVAL);
+ 
+-	sdev = qcom_pmic_get_base_usid(dev->parent);
+-
+-	if (IS_ERR(sdev))
+-		return ERR_CAST(sdev);
+-
++	sdev = to_spmi_device(dev->parent);
+ 	spmi = dev_get_drvdata(&sdev->dev);
+ 
+ 	return &spmi->pmic;
+@@ -243,16 +271,31 @@ static int pmic_spmi_probe(struct spmi_device *sdev)
+ 		ret = pmic_spmi_load_revid(regmap, &sdev->dev, &ctx->pmic);
+ 		if (ret < 0)
+ 			return ret;
++	} else {
++		ret = pmic_spmi_get_base_revid(sdev, ctx);
++		if (ret)
++			return ret;
+ 	}
++
++	mutex_lock(&pmic_spmi_revid_lock);
+ 	spmi_device_set_drvdata(sdev, ctx);
++	mutex_unlock(&pmic_spmi_revid_lock);
+ 
+ 	return devm_of_platform_populate(&sdev->dev);
+ }
+ 
++static void pmic_spmi_remove(struct spmi_device *sdev)
++{
++	mutex_lock(&pmic_spmi_revid_lock);
++	spmi_device_set_drvdata(sdev, NULL);
++	mutex_unlock(&pmic_spmi_revid_lock);
++}
++
+ MODULE_DEVICE_TABLE(of, pmic_spmi_id_table);
+ 
+ static struct spmi_driver pmic_spmi_driver = {
+ 	.probe = pmic_spmi_probe,
++	.remove = pmic_spmi_remove,
+ 	.driver = {
+ 		.name = "pmic-spmi",
+ 		.of_match_table = pmic_spmi_id_table,
+diff --git a/drivers/misc/pci_endpoint_test.c b/drivers/misc/pci_endpoint_test.c
+index 7e1acc68d4359..af519088732d9 100644
+--- a/drivers/misc/pci_endpoint_test.c
++++ b/drivers/misc/pci_endpoint_test.c
+@@ -82,6 +82,7 @@
+ #define PCI_DEVICE_ID_RENESAS_R8A774B1		0x002b
+ #define PCI_DEVICE_ID_RENESAS_R8A774C0		0x002d
+ #define PCI_DEVICE_ID_RENESAS_R8A774E1		0x0025
++#define PCI_DEVICE_ID_RENESAS_R8A779F0		0x0031
+ 
+ static DEFINE_IDA(pci_endpoint_test_ida);
+ 
+@@ -991,6 +992,9 @@ static const struct pci_device_id pci_endpoint_test_tbl[] = {
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A774B1),},
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A774C0),},
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A774E1),},
++	{ PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A779F0),
++	  .driver_data = (kernel_ulong_t)&default_data,
++	},
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_J721E),
+ 	  .driver_data = (kernel_ulong_t)&j721e_data,
+ 	},
+diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
+index b66aa5de2ddec..e4e6556a9840c 100644
+--- a/drivers/mmc/core/block.c
++++ b/drivers/mmc/core/block.c
+@@ -2389,8 +2389,10 @@ enum mmc_issued mmc_blk_mq_issue_rq(struct mmc_queue *mq, struct request *req)
+ 			}
+ 			ret = mmc_blk_cqe_issue_flush(mq, req);
+ 			break;
+-		case REQ_OP_READ:
+ 		case REQ_OP_WRITE:
++			card->written_flag = true;
++			fallthrough;
++		case REQ_OP_READ:
+ 			if (host->cqe_enabled)
+ 				ret = mmc_blk_cqe_issue_rw_rq(mq, req);
+ 			else
+diff --git a/drivers/mmc/core/card.h b/drivers/mmc/core/card.h
+index 4edf9057fa79d..b7754a1b8d978 100644
+--- a/drivers/mmc/core/card.h
++++ b/drivers/mmc/core/card.h
+@@ -280,4 +280,8 @@ static inline int mmc_card_broken_sd_cache(const struct mmc_card *c)
+ 	return c->quirks & MMC_QUIRK_BROKEN_SD_CACHE;
+ }
+ 
++static inline int mmc_card_broken_cache_flush(const struct mmc_card *c)
++{
++	return c->quirks & MMC_QUIRK_BROKEN_CACHE_FLUSH;
++}
+ #endif
+diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
+index 89cd48fcec79f..a46ce0868fe1f 100644
+--- a/drivers/mmc/core/mmc.c
++++ b/drivers/mmc/core/mmc.c
+@@ -2081,13 +2081,17 @@ static int _mmc_flush_cache(struct mmc_host *host)
+ {
+ 	int err = 0;
+ 
++	if (mmc_card_broken_cache_flush(host->card) && !host->card->written_flag)
++		return 0;
++
+ 	if (_mmc_cache_enabled(host)) {
+ 		err = mmc_switch(host->card, EXT_CSD_CMD_SET_NORMAL,
+ 				 EXT_CSD_FLUSH_CACHE, 1,
+ 				 CACHE_FLUSH_TIMEOUT_MS);
+ 		if (err)
+-			pr_err("%s: cache flush error %d\n",
+-			       mmc_hostname(host), err);
++			pr_err("%s: cache flush error %d\n", mmc_hostname(host), err);
++		else
++			host->card->written_flag = false;
+ 	}
+ 
+ 	return err;
+diff --git a/drivers/mmc/core/quirks.h b/drivers/mmc/core/quirks.h
+index 32b64b564fb1f..cca71867bc4ad 100644
+--- a/drivers/mmc/core/quirks.h
++++ b/drivers/mmc/core/quirks.h
+@@ -110,11 +110,12 @@ static const struct mmc_fixup __maybe_unused mmc_blk_fixups[] = {
+ 		  MMC_QUIRK_TRIM_BROKEN),
+ 
+ 	/*
+-	 * Micron MTFC4GACAJCN-1M advertises TRIM but it does not seems to
+-	 * support being used to offload WRITE_ZEROES.
++	 * Micron MTFC4GACAJCN-1M supports TRIM but does not appear to support
++	 * WRITE_ZEROES offloading. It also supports caching, but the cache can
++	 * only be flushed after a write has occurred.
+ 	 */
+ 	MMC_FIXUP("Q2J54A", CID_MANFID_MICRON, 0x014e, add_quirk_mmc,
+-		  MMC_QUIRK_TRIM_BROKEN),
++		  MMC_QUIRK_TRIM_BROKEN | MMC_QUIRK_BROKEN_CACHE_FLUSH),
+ 
+ 	/*
+ 	 * Kingston EMMC04G-M627 advertises TRIM but it does not seems to
+diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c
+index ee9a25b900aec..6cdab4a0f0bb2 100644
+--- a/drivers/mmc/host/meson-gx-mmc.c
++++ b/drivers/mmc/host/meson-gx-mmc.c
+@@ -801,7 +801,6 @@ static void meson_mmc_start_cmd(struct mmc_host *mmc, struct mmc_command *cmd)
+ 
+ 	cmd_cfg |= FIELD_PREP(CMD_CFG_CMD_INDEX_MASK, cmd->opcode);
+ 	cmd_cfg |= CMD_CFG_OWNER;  /* owned by CPU */
+-	cmd_cfg |= CMD_CFG_ERROR; /* stop in case of error */
+ 
+ 	meson_mmc_set_response_bits(cmd, &cmd_cfg);
+ 
+diff --git a/drivers/mmc/host/sdhci-pci-gli.c b/drivers/mmc/host/sdhci-pci-gli.c
+index 109d4b010f978..d8a991b349a82 100644
+--- a/drivers/mmc/host/sdhci-pci-gli.c
++++ b/drivers/mmc/host/sdhci-pci-gli.c
+@@ -25,6 +25,12 @@
+ #define   GLI_9750_WT_EN_ON	    0x1
+ #define   GLI_9750_WT_EN_OFF	    0x0
+ 
++#define PCI_GLI_9750_PM_CTRL	0xFC
++#define   PCI_GLI_9750_PM_STATE	  GENMASK(1, 0)
++
++#define PCI_GLI_9750_CORRERR_MASK				0x214
++#define   PCI_GLI_9750_CORRERR_MASK_REPLAY_TIMER_TIMEOUT	  BIT(12)
++
+ #define SDHCI_GLI_9750_CFG2          0x848
+ #define   SDHCI_GLI_9750_CFG2_L1DLY    GENMASK(28, 24)
+ #define   GLI_9750_CFG2_L1DLY_VALUE    0x1F
+@@ -149,6 +155,9 @@
+ #define PCI_GLI_9755_PM_CTRL     0xFC
+ #define   PCI_GLI_9755_PM_STATE    GENMASK(1, 0)
+ 
++#define PCI_GLI_9755_CORRERR_MASK				0x214
++#define   PCI_GLI_9755_CORRERR_MASK_REPLAY_TIMER_TIMEOUT	  BIT(12)
++
+ #define SDHCI_GLI_9767_GM_BURST_SIZE			0x510
+ #define   SDHCI_GLI_9767_GM_BURST_SIZE_AXI_ALWAYS_SET	  BIT(8)
+ 
+@@ -536,8 +545,12 @@ static void sdhci_gl9750_set_clock(struct sdhci_host *host, unsigned int clock)
+ 
+ static void gl9750_hw_setting(struct sdhci_host *host)
+ {
++	struct sdhci_pci_slot *slot = sdhci_priv(host);
++	struct pci_dev *pdev;
+ 	u32 value;
+ 
++	pdev = slot->chip->pdev;
++
+ 	gl9750_wt_on(host);
+ 
+ 	value = sdhci_readl(host, SDHCI_GLI_9750_CFG2);
+@@ -547,6 +560,18 @@ static void gl9750_hw_setting(struct sdhci_host *host)
+ 			    GLI_9750_CFG2_L1DLY_VALUE);
+ 	sdhci_writel(host, value, SDHCI_GLI_9750_CFG2);
+ 
++	/* toggle PM state to allow GL9750 to enter ASPM L1.2 */
++	pci_read_config_dword(pdev, PCI_GLI_9750_PM_CTRL, &value);
++	value |= PCI_GLI_9750_PM_STATE;
++	pci_write_config_dword(pdev, PCI_GLI_9750_PM_CTRL, value);
++	value &= ~PCI_GLI_9750_PM_STATE;
++	pci_write_config_dword(pdev, PCI_GLI_9750_PM_CTRL, value);
++
++	/* mask the replay timer timeout of AER */
++	pci_read_config_dword(pdev, PCI_GLI_9750_CORRERR_MASK, &value);
++	value |= PCI_GLI_9750_CORRERR_MASK_REPLAY_TIMER_TIMEOUT;
++	pci_write_config_dword(pdev, PCI_GLI_9750_CORRERR_MASK, value);
++
+ 	gl9750_wt_off(host);
+ }
+ 
+@@ -756,6 +781,11 @@ static void gl9755_hw_setting(struct sdhci_pci_slot *slot)
+ 	value &= ~PCI_GLI_9755_PM_STATE;
+ 	pci_write_config_dword(pdev, PCI_GLI_9755_PM_CTRL, value);
+ 
++	/* mask the replay timer timeout of AER */
++	pci_read_config_dword(pdev, PCI_GLI_9755_CORRERR_MASK, &value);
++	value |= PCI_GLI_9755_CORRERR_MASK_REPLAY_TIMER_TIMEOUT;
++	pci_write_config_dword(pdev, PCI_GLI_9755_CORRERR_MASK, value);
++
+ 	gl9755_wt_off(pdev);
+ }
+ 
+diff --git a/drivers/mmc/host/sdhci_am654.c b/drivers/mmc/host/sdhci_am654.c
+index 7cdf0f54e3a50..5d1faa8fbfbf1 100644
+--- a/drivers/mmc/host/sdhci_am654.c
++++ b/drivers/mmc/host/sdhci_am654.c
+@@ -598,7 +598,7 @@ static int sdhci_am654_get_otap_delay(struct sdhci_host *host,
+ 		return 0;
+ 	}
+ 
+-	for (i = MMC_TIMING_MMC_HS; i <= MMC_TIMING_MMC_HS400; i++) {
++	for (i = MMC_TIMING_LEGACY; i <= MMC_TIMING_MMC_HS400; i++) {
+ 
+ 		ret = device_property_read_u32(dev, td[i].otap_binding,
+ 					       &sdhci_am654->otap_del_sel[i]);
+diff --git a/drivers/mmc/host/vub300.c b/drivers/mmc/host/vub300.c
+index 9ec593d52f0fa..cef0e716ad16f 100644
+--- a/drivers/mmc/host/vub300.c
++++ b/drivers/mmc/host/vub300.c
+@@ -2309,6 +2309,7 @@ static int vub300_probe(struct usb_interface *interface,
+ 		vub300->read_only =
+ 			(0x0010 & vub300->system_port_status.port_flags) ? 1 : 0;
+ 	} else {
++		retval = -EINVAL;
+ 		goto error5;
+ 	}
+ 	usb_set_intfdata(interface, vub300);
+diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c
+index 11b06fefaa0e2..c10693ba265ba 100644
+--- a/drivers/mtd/chips/cfi_cmdset_0001.c
++++ b/drivers/mtd/chips/cfi_cmdset_0001.c
+@@ -422,9 +422,25 @@ read_pri_intelext(struct map_info *map, __u16 adr)
+ 		extra_size = 0;
+ 
+ 		/* Protection Register info */
+-		if (extp->NumProtectionFields)
++		if (extp->NumProtectionFields) {
++			struct cfi_intelext_otpinfo *otp =
++				(struct cfi_intelext_otpinfo *)&extp->extra[0];
++
+ 			extra_size += (extp->NumProtectionFields - 1) *
+-				      sizeof(struct cfi_intelext_otpinfo);
++				sizeof(struct cfi_intelext_otpinfo);
++
++			if (extp_size >= sizeof(*extp) + extra_size) {
++				int i;
++
++				/* Do some byteswapping if necessary */
++				for (i = 0; i < extp->NumProtectionFields - 1; i++) {
++					otp->ProtRegAddr = le32_to_cpu(otp->ProtRegAddr);
++					otp->FactGroups = le16_to_cpu(otp->FactGroups);
++					otp->UserGroups = le16_to_cpu(otp->UserGroups);
++					otp++;
++				}
++			}
++		}
+ 	}
+ 
+ 	if (extp->MinorVersion >= '1') {
+diff --git a/drivers/mtd/nand/raw/intel-nand-controller.c b/drivers/mtd/nand/raw/intel-nand-controller.c
+index a9909eb081244..8231e9828dce7 100644
+--- a/drivers/mtd/nand/raw/intel-nand-controller.c
++++ b/drivers/mtd/nand/raw/intel-nand-controller.c
+@@ -619,6 +619,11 @@ static int ebu_nand_probe(struct platform_device *pdev)
+ 	ebu_host->cs_num = cs;
+ 
+ 	resname = devm_kasprintf(dev, GFP_KERNEL, "nand_cs%d", cs);
++	if (!resname) {
++		ret = -ENOMEM;
++		goto err_of_node_put;
++	}
++
+ 	ebu_host->cs[cs].chipaddr = devm_platform_ioremap_resource_byname(pdev,
+ 									  resname);
+ 	if (IS_ERR(ebu_host->cs[cs].chipaddr)) {
+@@ -655,6 +660,11 @@ static int ebu_nand_probe(struct platform_device *pdev)
+ 	}
+ 
+ 	resname = devm_kasprintf(dev, GFP_KERNEL, "addr_sel%d", cs);
++	if (!resname) {
++		ret = -ENOMEM;
++		goto err_cleanup_dma;
++	}
++
+ 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, resname);
+ 	if (!res) {
+ 		ret = -EINVAL;
+diff --git a/drivers/mtd/nand/raw/meson_nand.c b/drivers/mtd/nand/raw/meson_nand.c
+index b10011dec1e62..b16adc0e92e9d 100644
+--- a/drivers/mtd/nand/raw/meson_nand.c
++++ b/drivers/mtd/nand/raw/meson_nand.c
+@@ -1115,6 +1115,9 @@ static int meson_nfc_clk_init(struct meson_nfc *nfc)
+ 	init.name = devm_kasprintf(nfc->dev,
+ 				   GFP_KERNEL, "%s#div",
+ 				   dev_name(nfc->dev));
++	if (!init.name)
++		return -ENOMEM;
++
+ 	init.ops = &clk_divider_ops;
+ 	nfc_divider_parent_data[0].fw_name = "device";
+ 	init.parent_data = nfc_divider_parent_data;
+diff --git a/drivers/mtd/nand/raw/tegra_nand.c b/drivers/mtd/nand/raw/tegra_nand.c
+index eb0b9d16e8dae..a553e3ac8ff41 100644
+--- a/drivers/mtd/nand/raw/tegra_nand.c
++++ b/drivers/mtd/nand/raw/tegra_nand.c
+@@ -1197,6 +1197,10 @@ static int tegra_nand_probe(struct platform_device *pdev)
+ 	init_completion(&ctrl->dma_complete);
+ 
+ 	ctrl->irq = platform_get_irq(pdev, 0);
++	if (ctrl->irq < 0) {
++		err = ctrl->irq;
++		goto err_put_pm;
++	}
+ 	err = devm_request_irq(&pdev->dev, ctrl->irq, tegra_nand_irq, 0,
+ 			       dev_name(&pdev->dev), ctrl);
+ 	if (err) {
+diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
+index a64ebb7f5b712..363b6cb33ae08 100644
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -1499,6 +1499,10 @@ done:
+ static void bond_setup_by_slave(struct net_device *bond_dev,
+ 				struct net_device *slave_dev)
+ {
++	bool was_up = !!(bond_dev->flags & IFF_UP);
++
++	dev_close(bond_dev);
++
+ 	bond_dev->header_ops	    = slave_dev->header_ops;
+ 
+ 	bond_dev->type		    = slave_dev->type;
+@@ -1513,6 +1517,8 @@ static void bond_setup_by_slave(struct net_device *bond_dev,
+ 		bond_dev->flags &= ~(IFF_BROADCAST | IFF_MULTICAST);
+ 		bond_dev->flags |= (IFF_POINTOPOINT | IFF_NOARP);
+ 	}
++	if (was_up)
++		dev_open(bond_dev, NULL);
+ }
+ 
+ /* On bonding slaves other than the currently active slave, suppress
+diff --git a/drivers/net/dsa/lan9303_mdio.c b/drivers/net/dsa/lan9303_mdio.c
+index d8ab2b77d201e..167a86f39f277 100644
+--- a/drivers/net/dsa/lan9303_mdio.c
++++ b/drivers/net/dsa/lan9303_mdio.c
+@@ -32,7 +32,7 @@ static int lan9303_mdio_write(void *ctx, uint32_t reg, uint32_t val)
+ 	struct lan9303_mdio *sw_dev = (struct lan9303_mdio *)ctx;
+ 
+ 	reg <<= 2; /* reg num to offset */
+-	mutex_lock(&sw_dev->device->bus->mdio_lock);
++	mutex_lock_nested(&sw_dev->device->bus->mdio_lock, MDIO_MUTEX_NESTED);
+ 	lan9303_mdio_real_write(sw_dev->device, reg, val & 0xffff);
+ 	lan9303_mdio_real_write(sw_dev->device, reg + 2, (val >> 16) & 0xffff);
+ 	mutex_unlock(&sw_dev->device->bus->mdio_lock);
+@@ -50,7 +50,7 @@ static int lan9303_mdio_read(void *ctx, uint32_t reg, uint32_t *val)
+ 	struct lan9303_mdio *sw_dev = (struct lan9303_mdio *)ctx;
+ 
+ 	reg <<= 2; /* reg num to offset */
+-	mutex_lock(&sw_dev->device->bus->mdio_lock);
++	mutex_lock_nested(&sw_dev->device->bus->mdio_lock, MDIO_MUTEX_NESTED);
+ 	*val = lan9303_mdio_real_read(sw_dev->device, reg);
+ 	*val |= (lan9303_mdio_real_read(sw_dev->device, reg + 2) << 16);
+ 	mutex_unlock(&sw_dev->device->bus->mdio_lock);
+diff --git a/drivers/net/ethernet/amd/pds_core/adminq.c b/drivers/net/ethernet/amd/pds_core/adminq.c
+index 045fe133f6ee9..5beadabc21361 100644
+--- a/drivers/net/ethernet/amd/pds_core/adminq.c
++++ b/drivers/net/ethernet/amd/pds_core/adminq.c
+@@ -146,7 +146,7 @@ irqreturn_t pdsc_adminq_isr(int irq, void *data)
+ 	}
+ 
+ 	queue_work(pdsc->wq, &qcq->work);
+-	pds_core_intr_mask(&pdsc->intr_ctrl[irq], PDS_CORE_INTR_MASK_CLEAR);
++	pds_core_intr_mask(&pdsc->intr_ctrl[qcq->intx], PDS_CORE_INTR_MASK_CLEAR);
+ 
+ 	return IRQ_HANDLED;
+ }
+diff --git a/drivers/net/ethernet/amd/pds_core/core.h b/drivers/net/ethernet/amd/pds_core/core.h
+index e545fafc48196..b1c1f1007b065 100644
+--- a/drivers/net/ethernet/amd/pds_core/core.h
++++ b/drivers/net/ethernet/amd/pds_core/core.h
+@@ -15,7 +15,7 @@
+ #define PDSC_DRV_DESCRIPTION	"AMD/Pensando Core Driver"
+ 
+ #define PDSC_WATCHDOG_SECS	5
+-#define PDSC_QUEUE_NAME_MAX_SZ  32
++#define PDSC_QUEUE_NAME_MAX_SZ  16
+ #define PDSC_ADMINQ_MIN_LENGTH	16	/* must be a power of two */
+ #define PDSC_NOTIFYQ_LENGTH	64	/* must be a power of two */
+ #define PDSC_TEARDOWN_RECOVERY	false
+diff --git a/drivers/net/ethernet/amd/pds_core/dev.c b/drivers/net/ethernet/amd/pds_core/dev.c
+index f77cd9f5a2fda..eb178728edba9 100644
+--- a/drivers/net/ethernet/amd/pds_core/dev.c
++++ b/drivers/net/ethernet/amd/pds_core/dev.c
+@@ -254,10 +254,14 @@ static int pdsc_identify(struct pdsc *pdsc)
+ 	struct pds_core_drv_identity drv = {};
+ 	size_t sz;
+ 	int err;
++	int n;
+ 
+ 	drv.drv_type = cpu_to_le32(PDS_DRIVER_LINUX);
+-	snprintf(drv.driver_ver_str, sizeof(drv.driver_ver_str),
+-		 "%s %s", PDS_CORE_DRV_NAME, utsname()->release);
++	/* Catching the return quiets a Wformat-truncation complaint */
++	n = snprintf(drv.driver_ver_str, sizeof(drv.driver_ver_str),
++		     "%s %s", PDS_CORE_DRV_NAME, utsname()->release);
++	if (n > sizeof(drv.driver_ver_str))
++		dev_dbg(pdsc->dev, "release name truncated, don't care\n");
+ 
+ 	/* Next let's get some info about the device
+ 	 * We use the devcmd_lock at this level in order to
+diff --git a/drivers/net/ethernet/amd/pds_core/devlink.c b/drivers/net/ethernet/amd/pds_core/devlink.c
+index d9607033bbf21..d2abf32b93fe3 100644
+--- a/drivers/net/ethernet/amd/pds_core/devlink.c
++++ b/drivers/net/ethernet/amd/pds_core/devlink.c
+@@ -104,7 +104,7 @@ int pdsc_dl_info_get(struct devlink *dl, struct devlink_info_req *req,
+ 	struct pds_core_fw_list_info fw_list;
+ 	struct pdsc *pdsc = devlink_priv(dl);
+ 	union pds_core_dev_comp comp;
+-	char buf[16];
++	char buf[32];
+ 	int listlen;
+ 	int err;
+ 	int i;
+diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c.h b/drivers/net/ethernet/atheros/atl1c/atl1c.h
+index 43d821fe7a542..63ba64dbb7310 100644
+--- a/drivers/net/ethernet/atheros/atl1c/atl1c.h
++++ b/drivers/net/ethernet/atheros/atl1c/atl1c.h
+@@ -504,15 +504,12 @@ struct atl1c_rrd_ring {
+ 	u16 next_to_use;
+ 	u16 next_to_clean;
+ 	struct napi_struct napi;
+-	struct page *rx_page;
+-	unsigned int rx_page_offset;
+ };
+ 
+ /* board specific private data structure */
+ struct atl1c_adapter {
+ 	struct net_device   *netdev;
+ 	struct pci_dev      *pdev;
+-	unsigned int	    rx_frag_size;
+ 	struct atl1c_hw        hw;
+ 	struct atl1c_hw_stats  hw_stats;
+ 	struct mii_if_info  mii;    /* MII interface info */
+diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+index 940c5d1ff9cfc..74b78164cf74a 100644
+--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
++++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+@@ -483,15 +483,10 @@ static int atl1c_set_mac_addr(struct net_device *netdev, void *p)
+ static void atl1c_set_rxbufsize(struct atl1c_adapter *adapter,
+ 				struct net_device *dev)
+ {
+-	unsigned int head_size;
+ 	int mtu = dev->mtu;
+ 
+ 	adapter->rx_buffer_len = mtu > AT_RX_BUF_SIZE ?
+ 		roundup(mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN, 8) : AT_RX_BUF_SIZE;
+-
+-	head_size = SKB_DATA_ALIGN(adapter->rx_buffer_len + NET_SKB_PAD + NET_IP_ALIGN) +
+-		    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+-	adapter->rx_frag_size = roundup_pow_of_two(head_size);
+ }
+ 
+ static netdev_features_t atl1c_fix_features(struct net_device *netdev,
+@@ -964,7 +959,6 @@ static void atl1c_init_ring_ptrs(struct atl1c_adapter *adapter)
+ static void atl1c_free_ring_resources(struct atl1c_adapter *adapter)
+ {
+ 	struct pci_dev *pdev = adapter->pdev;
+-	int i;
+ 
+ 	dma_free_coherent(&pdev->dev, adapter->ring_header.size,
+ 			  adapter->ring_header.desc, adapter->ring_header.dma);
+@@ -977,12 +971,6 @@ static void atl1c_free_ring_resources(struct atl1c_adapter *adapter)
+ 		kfree(adapter->tpd_ring[0].buffer_info);
+ 		adapter->tpd_ring[0].buffer_info = NULL;
+ 	}
+-	for (i = 0; i < adapter->rx_queue_count; ++i) {
+-		if (adapter->rrd_ring[i].rx_page) {
+-			put_page(adapter->rrd_ring[i].rx_page);
+-			adapter->rrd_ring[i].rx_page = NULL;
+-		}
+-	}
+ }
+ 
+ /**
+@@ -1754,48 +1742,11 @@ static inline void atl1c_rx_checksum(struct atl1c_adapter *adapter,
+ 	skb_checksum_none_assert(skb);
+ }
+ 
+-static struct sk_buff *atl1c_alloc_skb(struct atl1c_adapter *adapter,
+-				       u32 queue, bool napi_mode)
+-{
+-	struct atl1c_rrd_ring *rrd_ring = &adapter->rrd_ring[queue];
+-	struct sk_buff *skb;
+-	struct page *page;
+-
+-	if (adapter->rx_frag_size > PAGE_SIZE) {
+-		if (likely(napi_mode))
+-			return napi_alloc_skb(&rrd_ring->napi,
+-					      adapter->rx_buffer_len);
+-		else
+-			return netdev_alloc_skb_ip_align(adapter->netdev,
+-							 adapter->rx_buffer_len);
+-	}
+-
+-	page = rrd_ring->rx_page;
+-	if (!page) {
+-		page = alloc_page(GFP_ATOMIC);
+-		if (unlikely(!page))
+-			return NULL;
+-		rrd_ring->rx_page = page;
+-		rrd_ring->rx_page_offset = 0;
+-	}
+-
+-	skb = build_skb(page_address(page) + rrd_ring->rx_page_offset,
+-			adapter->rx_frag_size);
+-	if (likely(skb)) {
+-		skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
+-		rrd_ring->rx_page_offset += adapter->rx_frag_size;
+-		if (rrd_ring->rx_page_offset >= PAGE_SIZE)
+-			rrd_ring->rx_page = NULL;
+-		else
+-			get_page(page);
+-	}
+-	return skb;
+-}
+-
+ static int atl1c_alloc_rx_buffer(struct atl1c_adapter *adapter, u32 queue,
+ 				 bool napi_mode)
+ {
+ 	struct atl1c_rfd_ring *rfd_ring = &adapter->rfd_ring[queue];
++	struct atl1c_rrd_ring *rrd_ring = &adapter->rrd_ring[queue];
+ 	struct pci_dev *pdev = adapter->pdev;
+ 	struct atl1c_buffer *buffer_info, *next_info;
+ 	struct sk_buff *skb;
+@@ -1814,13 +1765,27 @@ static int atl1c_alloc_rx_buffer(struct atl1c_adapter *adapter, u32 queue,
+ 	while (next_info->flags & ATL1C_BUFFER_FREE) {
+ 		rfd_desc = ATL1C_RFD_DESC(rfd_ring, rfd_next_to_use);
+ 
+-		skb = atl1c_alloc_skb(adapter, queue, napi_mode);
++		/* When DMA RX address is set to something like
++		 * 0x....fc0, it will be very likely to cause DMA
++		 * RFD overflow issue.
++		 *
++		 * To work around it, we apply rx skb with 64 bytes
++		 * longer space, and offset the address whenever
++		 * 0x....fc0 is detected.
++		 */
++		if (likely(napi_mode))
++			skb = napi_alloc_skb(&rrd_ring->napi, adapter->rx_buffer_len + 64);
++		else
++			skb = netdev_alloc_skb(adapter->netdev, adapter->rx_buffer_len + 64);
+ 		if (unlikely(!skb)) {
+ 			if (netif_msg_rx_err(adapter))
+ 				dev_warn(&pdev->dev, "alloc rx buffer failed\n");
+ 			break;
+ 		}
+ 
++		if (((unsigned long)skb->data & 0xfff) == 0xfc0)
++			skb_reserve(skb, 64);
++
+ 		/*
+ 		 * Make buffer alignment 2 beyond a 16 byte boundary
+ 		 * this will result in a 16 byte aligned IP header after
+diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c
+index 5715b9ab2712e..e7137b468f5bc 100644
+--- a/drivers/net/ethernet/cortina/gemini.c
++++ b/drivers/net/ethernet/cortina/gemini.c
+@@ -432,8 +432,8 @@ static const struct gmac_max_framelen gmac_maxlens[] = {
+ 		.val = CONFIG0_MAXLEN_1536,
+ 	},
+ 	{
+-		.max_l3_len = 1542,
+-		.val = CONFIG0_MAXLEN_1542,
++		.max_l3_len = 1548,
++		.val = CONFIG0_MAXLEN_1548,
+ 	},
+ 	{
+ 		.max_l3_len = 9212,
+@@ -1145,6 +1145,7 @@ static int gmac_map_tx_bufs(struct net_device *netdev, struct sk_buff *skb,
+ 	dma_addr_t mapping;
+ 	unsigned short mtu;
+ 	void *buffer;
++	int ret;
+ 
+ 	mtu  = ETH_HLEN;
+ 	mtu += netdev->mtu;
+@@ -1159,9 +1160,30 @@ static int gmac_map_tx_bufs(struct net_device *netdev, struct sk_buff *skb,
+ 		word3 |= mtu;
+ 	}
+ 
+-	if (skb->ip_summed != CHECKSUM_NONE) {
++	if (skb->len >= ETH_FRAME_LEN) {
++		/* Hardware offloaded checksumming isn't working on frames
++		 * bigger than 1514 bytes. A hypothesis about this is that the
++		 * checksum buffer is only 1518 bytes, so when the frames get
++		 * bigger they get truncated, or the last few bytes get
++		 * overwritten by the FCS.
++		 *
++		 * Just use software checksumming and bypass on bigger frames.
++		 */
++		if (skb->ip_summed == CHECKSUM_PARTIAL) {
++			ret = skb_checksum_help(skb);
++			if (ret)
++				return ret;
++		}
++		word1 |= TSS_BYPASS_BIT;
++	} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ 		int tcp = 0;
+ 
++		/* We do not switch off the checksumming on non TCP/UDP
++		 * frames: as is shown from tests, the checksumming engine
++		 * is smart enough to see that a frame is not actually TCP
++		 * or UDP and then just pass it through without any changes
++		 * to the frame.
++		 */
+ 		if (skb->protocol == htons(ETH_P_IP)) {
+ 			word1 |= TSS_IP_CHKSUM_BIT;
+ 			tcp = ip_hdr(skb)->protocol == IPPROTO_TCP;
+@@ -1978,15 +2000,6 @@ static int gmac_change_mtu(struct net_device *netdev, int new_mtu)
+ 	return 0;
+ }
+ 
+-static netdev_features_t gmac_fix_features(struct net_device *netdev,
+-					   netdev_features_t features)
+-{
+-	if (netdev->mtu + ETH_HLEN + VLAN_HLEN > MTU_SIZE_BIT_MASK)
+-		features &= ~GMAC_OFFLOAD_FEATURES;
+-
+-	return features;
+-}
+-
+ static int gmac_set_features(struct net_device *netdev,
+ 			     netdev_features_t features)
+ {
+@@ -2212,7 +2225,6 @@ static const struct net_device_ops gmac_351x_ops = {
+ 	.ndo_set_mac_address	= gmac_set_mac_address,
+ 	.ndo_get_stats64	= gmac_get_stats64,
+ 	.ndo_change_mtu		= gmac_change_mtu,
+-	.ndo_fix_features	= gmac_fix_features,
+ 	.ndo_set_features	= gmac_set_features,
+ };
+ 
+@@ -2464,11 +2476,12 @@ static int gemini_ethernet_port_probe(struct platform_device *pdev)
+ 
+ 	netdev->hw_features = GMAC_OFFLOAD_FEATURES;
+ 	netdev->features |= GMAC_OFFLOAD_FEATURES | NETIF_F_GRO;
+-	/* We can handle jumbo frames up to 10236 bytes so, let's accept
+-	 * payloads of 10236 bytes minus VLAN and ethernet header
++	/* We can receive jumbo frames up to 10236 bytes but only
++	 * transmit 2047 bytes so, let's accept payloads of 2047
++	 * bytes minus VLAN and ethernet header
+ 	 */
+ 	netdev->min_mtu = ETH_MIN_MTU;
+-	netdev->max_mtu = 10236 - VLAN_ETH_HLEN;
++	netdev->max_mtu = MTU_SIZE_BIT_MASK - VLAN_ETH_HLEN;
+ 
+ 	port->freeq_refill = 0;
+ 	netif_napi_add(netdev, &port->napi, gmac_napi_poll);
+diff --git a/drivers/net/ethernet/cortina/gemini.h b/drivers/net/ethernet/cortina/gemini.h
+index 9fdf77d5eb374..24bb989981f23 100644
+--- a/drivers/net/ethernet/cortina/gemini.h
++++ b/drivers/net/ethernet/cortina/gemini.h
+@@ -502,7 +502,7 @@ union gmac_txdesc_3 {
+ #define SOF_BIT			0x80000000
+ #define EOF_BIT			0x40000000
+ #define EOFIE_BIT		BIT(29)
+-#define MTU_SIZE_BIT_MASK	0x1fff
++#define MTU_SIZE_BIT_MASK	0x7ff /* Max MTU 2047 bytes */
+ 
+ /* GMAC Tx Descriptor */
+ struct gmac_txdesc {
+@@ -787,7 +787,7 @@ union gmac_config0 {
+ #define  CONFIG0_MAXLEN_1536	0
+ #define  CONFIG0_MAXLEN_1518	1
+ #define  CONFIG0_MAXLEN_1522	2
+-#define  CONFIG0_MAXLEN_1542	3
++#define  CONFIG0_MAXLEN_1548	3
+ #define  CONFIG0_MAXLEN_9k	4	/* 9212 */
+ #define  CONFIG0_MAXLEN_10k	5	/* 10236 */
+ #define  CONFIG0_MAXLEN_1518__6	6
+diff --git a/drivers/net/ethernet/engleder/tsnep.h b/drivers/net/ethernet/engleder/tsnep.h
+index 11b29f56aaf9c..b91abe9efb517 100644
+--- a/drivers/net/ethernet/engleder/tsnep.h
++++ b/drivers/net/ethernet/engleder/tsnep.h
+@@ -142,7 +142,7 @@ struct tsnep_rx {
+ 
+ struct tsnep_queue {
+ 	struct tsnep_adapter *adapter;
+-	char name[IFNAMSIZ + 9];
++	char name[IFNAMSIZ + 16];
+ 
+ 	struct tsnep_tx *tx;
+ 	struct tsnep_rx *rx;
+diff --git a/drivers/net/ethernet/engleder/tsnep_main.c b/drivers/net/ethernet/engleder/tsnep_main.c
+index 479156576bc8a..e3fc894fa3f6f 100644
+--- a/drivers/net/ethernet/engleder/tsnep_main.c
++++ b/drivers/net/ethernet/engleder/tsnep_main.c
+@@ -1778,14 +1778,14 @@ static int tsnep_request_irq(struct tsnep_queue *queue, bool first)
+ 		dev = queue->adapter;
+ 	} else {
+ 		if (queue->tx && queue->rx)
+-			sprintf(queue->name, "%s-txrx-%d", name,
+-				queue->rx->queue_index);
++			snprintf(queue->name, sizeof(queue->name), "%s-txrx-%d",
++				 name, queue->rx->queue_index);
+ 		else if (queue->tx)
+-			sprintf(queue->name, "%s-tx-%d", name,
+-				queue->tx->queue_index);
++			snprintf(queue->name, sizeof(queue->name), "%s-tx-%d",
++				 name, queue->tx->queue_index);
+ 		else
+-			sprintf(queue->name, "%s-rx-%d", name,
+-				queue->rx->queue_index);
++			snprintf(queue->name, sizeof(queue->name), "%s-rx-%d",
++				 name, queue->rx->queue_index);
+ 		handler = tsnep_irq_txrx;
+ 		dev = queue;
+ 	}
+diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c
+index 465a6db5a40a8..79bfa2837a0e6 100644
+--- a/drivers/net/ethernet/google/gve/gve_main.c
++++ b/drivers/net/ethernet/google/gve/gve_main.c
+@@ -255,10 +255,13 @@ static int gve_napi_poll(struct napi_struct *napi, int budget)
+ 	if (block->tx) {
+ 		if (block->tx->q_num < priv->tx_cfg.num_queues)
+ 			reschedule |= gve_tx_poll(block, budget);
+-		else
++		else if (budget)
+ 			reschedule |= gve_xdp_poll(block, budget);
+ 	}
+ 
++	if (!budget)
++		return 0;
++
+ 	if (block->rx) {
+ 		work_done = gve_rx_poll(block, budget);
+ 		reschedule |= work_done == budget;
+@@ -299,6 +302,9 @@ static int gve_napi_poll_dqo(struct napi_struct *napi, int budget)
+ 	if (block->tx)
+ 		reschedule |= gve_tx_poll_dqo(block, /*do_clean=*/true);
+ 
++	if (!budget)
++		return 0;
++
+ 	if (block->rx) {
+ 		work_done = gve_rx_poll_dqo(block, budget);
+ 		reschedule |= work_done == budget;
+diff --git a/drivers/net/ethernet/google/gve/gve_rx.c b/drivers/net/ethernet/google/gve/gve_rx.c
+index e84a066aa1a40..73655347902d2 100644
+--- a/drivers/net/ethernet/google/gve/gve_rx.c
++++ b/drivers/net/ethernet/google/gve/gve_rx.c
+@@ -1007,10 +1007,6 @@ int gve_rx_poll(struct gve_notify_block *block, int budget)
+ 
+ 	feat = block->napi.dev->features;
+ 
+-	/* If budget is 0, do all the work */
+-	if (budget == 0)
+-		budget = INT_MAX;
+-
+ 	if (budget > 0)
+ 		work_done = gve_clean_rx_done(rx, budget, feat);
+ 
+diff --git a/drivers/net/ethernet/google/gve/gve_tx.c b/drivers/net/ethernet/google/gve/gve_tx.c
+index 6957a865cff37..9f6ffc4a54f0b 100644
+--- a/drivers/net/ethernet/google/gve/gve_tx.c
++++ b/drivers/net/ethernet/google/gve/gve_tx.c
+@@ -925,10 +925,6 @@ bool gve_xdp_poll(struct gve_notify_block *block, int budget)
+ 	bool repoll;
+ 	u32 to_do;
+ 
+-	/* If budget is 0, do all the work */
+-	if (budget == 0)
+-		budget = INT_MAX;
+-
+ 	/* Find out how much work there is to be done */
+ 	nic_done = gve_tx_load_event_counter(priv, tx);
+ 	to_do = min_t(u32, (nic_done - tx->done), budget);
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
+index 26fb6fefcb9d9..5d1814ed51427 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
+@@ -500,11 +500,14 @@ static void hns3_get_coal_info(struct hns3_enet_tqp_vector *tqp_vector,
+ 	}
+ 
+ 	sprintf(result[j++], "%d", i);
+-	sprintf(result[j++], "%s", dim_state_str[dim->state]);
++	sprintf(result[j++], "%s", dim->state < ARRAY_SIZE(dim_state_str) ?
++		dim_state_str[dim->state] : "unknown");
+ 	sprintf(result[j++], "%u", dim->profile_ix);
+-	sprintf(result[j++], "%s", dim_cqe_mode_str[dim->mode]);
++	sprintf(result[j++], "%s", dim->mode < ARRAY_SIZE(dim_cqe_mode_str) ?
++		dim_cqe_mode_str[dim->mode] : "unknown");
+ 	sprintf(result[j++], "%s",
+-		dim_tune_stat_str[dim->tune_state]);
++		dim->tune_state < ARRAY_SIZE(dim_tune_stat_str) ?
++		dim_tune_stat_str[dim->tune_state] : "unknown");
+ 	sprintf(result[j++], "%u", dim->steps_left);
+ 	sprintf(result[j++], "%u", dim->steps_right);
+ 	sprintf(result[j++], "%u", dim->tired);
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+index 71a2ec03f2b38..f644210afb70a 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+@@ -5139,7 +5139,7 @@ static int hns3_init_mac_addr(struct net_device *netdev)
+ 	struct hns3_nic_priv *priv = netdev_priv(netdev);
+ 	char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN];
+ 	struct hnae3_handle *h = priv->ae_handle;
+-	u8 mac_addr_temp[ETH_ALEN];
++	u8 mac_addr_temp[ETH_ALEN] = {0};
+ 	int ret = 0;
+ 
+ 	if (h->ae_algo->ops->get_mac_addr)
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+index ed6cf59853bf6..0f868605300a2 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+@@ -61,6 +61,7 @@ static void hclge_sync_fd_table(struct hclge_dev *hdev);
+ static void hclge_update_fec_stats(struct hclge_dev *hdev);
+ static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret,
+ 				      int wait_cnt);
++static int hclge_update_port_info(struct hclge_dev *hdev);
+ 
+ static struct hnae3_ae_algo ae_algo;
+ 
+@@ -3043,6 +3044,9 @@ static void hclge_update_link_status(struct hclge_dev *hdev)
+ 
+ 	if (state != hdev->hw.mac.link) {
+ 		hdev->hw.mac.link = state;
++		if (state == HCLGE_LINK_STATUS_UP)
++			hclge_update_port_info(hdev);
++
+ 		client->ops->link_status_change(handle, state);
+ 		hclge_config_mac_tnl_int(hdev, state);
+ 		if (rclient && rclient->ops->link_status_change)
+@@ -10026,8 +10030,6 @@ static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
+ 	struct hclge_vport_vlan_cfg *vlan, *tmp;
+ 	struct hclge_dev *hdev = vport->back;
+ 
+-	mutex_lock(&hdev->vport_lock);
+-
+ 	list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
+ 		if (vlan->vlan_id == vlan_id) {
+ 			if (is_write_tbl && vlan->hd_tbl_status)
+@@ -10042,8 +10044,6 @@ static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
+ 			break;
+ 		}
+ 	}
+-
+-	mutex_unlock(&hdev->vport_lock);
+ }
+ 
+ void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
+@@ -10452,11 +10452,16 @@ int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
+ 	 * handle mailbox. Just record the vlan id, and remove it after
+ 	 * reset finished.
+ 	 */
++	mutex_lock(&hdev->vport_lock);
+ 	if ((test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
+ 	     test_bit(HCLGE_STATE_RST_FAIL, &hdev->state)) && is_kill) {
+ 		set_bit(vlan_id, vport->vlan_del_fail_bmap);
++		mutex_unlock(&hdev->vport_lock);
+ 		return -EBUSY;
++	} else if (!is_kill && test_bit(vlan_id, vport->vlan_del_fail_bmap)) {
++		clear_bit(vlan_id, vport->vlan_del_fail_bmap);
+ 	}
++	mutex_unlock(&hdev->vport_lock);
+ 
+ 	/* when port base vlan enabled, we use port base vlan as the vlan
+ 	 * filter entry. In this case, we don't update vlan filter table
+@@ -10471,17 +10476,22 @@ int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
+ 	}
+ 
+ 	if (!ret) {
+-		if (!is_kill)
++		if (!is_kill) {
+ 			hclge_add_vport_vlan_table(vport, vlan_id,
+ 						   writen_to_tbl);
+-		else if (is_kill && vlan_id != 0)
++		} else if (is_kill && vlan_id != 0) {
++			mutex_lock(&hdev->vport_lock);
+ 			hclge_rm_vport_vlan_table(vport, vlan_id, false);
++			mutex_unlock(&hdev->vport_lock);
++		}
+ 	} else if (is_kill) {
+ 		/* when remove hw vlan filter failed, record the vlan id,
+ 		 * and try to remove it from hw later, to be consistence
+ 		 * with stack
+ 		 */
++		mutex_lock(&hdev->vport_lock);
+ 		set_bit(vlan_id, vport->vlan_del_fail_bmap);
++		mutex_unlock(&hdev->vport_lock);
+ 	}
+ 
+ 	hclge_set_vport_vlan_fltr_change(vport);
+@@ -10521,6 +10531,7 @@ static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
+ 	int i, ret, sync_cnt = 0;
+ 	u16 vlan_id;
+ 
++	mutex_lock(&hdev->vport_lock);
+ 	/* start from vport 1 for PF is always alive */
+ 	for (i = 0; i < hdev->num_alloc_vport; i++) {
+ 		struct hclge_vport *vport = &hdev->vport[i];
+@@ -10531,21 +10542,26 @@ static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
+ 			ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
+ 						       vport->vport_id, vlan_id,
+ 						       true);
+-			if (ret && ret != -EINVAL)
++			if (ret && ret != -EINVAL) {
++				mutex_unlock(&hdev->vport_lock);
+ 				return;
++			}
+ 
+ 			clear_bit(vlan_id, vport->vlan_del_fail_bmap);
+ 			hclge_rm_vport_vlan_table(vport, vlan_id, false);
+ 			hclge_set_vport_vlan_fltr_change(vport);
+ 
+ 			sync_cnt++;
+-			if (sync_cnt >= HCLGE_MAX_SYNC_COUNT)
++			if (sync_cnt >= HCLGE_MAX_SYNC_COUNT) {
++				mutex_unlock(&hdev->vport_lock);
+ 				return;
++			}
+ 
+ 			vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
+ 						 VLAN_N_VID);
+ 		}
+ 	}
++	mutex_unlock(&hdev->vport_lock);
+ 
+ 	hclge_sync_vlan_fltr_state(hdev);
+ }
+@@ -11652,6 +11668,7 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
+ 		goto err_msi_irq_uninit;
+ 
+ 	if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
++		clear_bit(HNAE3_DEV_SUPPORT_FEC_B, ae_dev->caps);
+ 		if (hnae3_dev_phy_imp_supported(hdev))
+ 			ret = hclge_update_tp_port_info(hdev);
+ 		else
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+index a4d68fb216fb9..0aa9beefd1c7e 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+@@ -1206,6 +1206,8 @@ static int hclgevf_set_vlan_filter(struct hnae3_handle *handle,
+ 	     test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state)) && is_kill) {
+ 		set_bit(vlan_id, hdev->vlan_del_fail_bmap);
+ 		return -EBUSY;
++	} else if (!is_kill && test_bit(vlan_id, hdev->vlan_del_fail_bmap)) {
++		clear_bit(vlan_id, hdev->vlan_del_fail_bmap);
+ 	}
+ 
+ 	hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN,
+@@ -1233,20 +1235,25 @@ static void hclgevf_sync_vlan_filter(struct hclgevf_dev *hdev)
+ 	int ret, sync_cnt = 0;
+ 	u16 vlan_id;
+ 
++	if (bitmap_empty(hdev->vlan_del_fail_bmap, VLAN_N_VID))
++		return;
++
++	rtnl_lock();
+ 	vlan_id = find_first_bit(hdev->vlan_del_fail_bmap, VLAN_N_VID);
+ 	while (vlan_id != VLAN_N_VID) {
+ 		ret = hclgevf_set_vlan_filter(handle, htons(ETH_P_8021Q),
+ 					      vlan_id, true);
+ 		if (ret)
+-			return;
++			break;
+ 
+ 		clear_bit(vlan_id, hdev->vlan_del_fail_bmap);
+ 		sync_cnt++;
+ 		if (sync_cnt >= HCLGEVF_MAX_SYNC_COUNT)
+-			return;
++			break;
+ 
+ 		vlan_id = find_first_bit(hdev->vlan_del_fail_bmap, VLAN_N_VID);
+ 	}
++	rtnl_unlock();
+ }
+ 
+ static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
+@@ -1974,8 +1981,18 @@ static enum hclgevf_evt_cause hclgevf_check_evt_cause(struct hclgevf_dev *hdev,
+ 	return HCLGEVF_VECTOR0_EVENT_OTHER;
+ }
+ 
++static void hclgevf_reset_timer(struct timer_list *t)
++{
++	struct hclgevf_dev *hdev = from_timer(hdev, t, reset_timer);
++
++	hclgevf_clear_event_cause(hdev, HCLGEVF_VECTOR0_EVENT_RST);
++	hclgevf_reset_task_schedule(hdev);
++}
++
+ static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data)
+ {
++#define HCLGEVF_RESET_DELAY	5
++
+ 	enum hclgevf_evt_cause event_cause;
+ 	struct hclgevf_dev *hdev = data;
+ 	u32 clearval;
+@@ -1987,7 +2004,8 @@ static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data)
+ 
+ 	switch (event_cause) {
+ 	case HCLGEVF_VECTOR0_EVENT_RST:
+-		hclgevf_reset_task_schedule(hdev);
++		mod_timer(&hdev->reset_timer,
++			  jiffies + msecs_to_jiffies(HCLGEVF_RESET_DELAY));
+ 		break;
+ 	case HCLGEVF_VECTOR0_EVENT_MBX:
+ 		hclgevf_mbx_handler(hdev);
+@@ -2930,6 +2948,7 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
+ 		 HCLGEVF_DRIVER_NAME);
+ 
+ 	hclgevf_task_schedule(hdev, round_jiffies_relative(HZ));
++	timer_setup(&hdev->reset_timer, hclgevf_reset_timer, 0);
+ 
+ 	return 0;
+ 
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
+index 81c16b8c8da29..a73f2bf3a56a6 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
+@@ -219,6 +219,7 @@ struct hclgevf_dev {
+ 	enum hnae3_reset_type reset_level;
+ 	unsigned long reset_pending;
+ 	enum hnae3_reset_type reset_type;
++	struct timer_list reset_timer;
+ 
+ #define HCLGEVF_RESET_REQUESTED		0
+ #define HCLGEVF_RESET_PENDING		1
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
+index bbf7b14079de3..85c2a634c8f96 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
+@@ -63,6 +63,9 @@ static int hclgevf_get_mbx_resp(struct hclgevf_dev *hdev, u16 code0, u16 code1,
+ 		i++;
+ 	}
+ 
++	/* ensure additional_info will be seen after received_resp */
++	smp_rmb();
++
+ 	if (i >= HCLGEVF_MAX_TRY_TIMES) {
+ 		dev_err(&hdev->pdev->dev,
+ 			"VF could not get mbx(%u,%u) resp(=%d) from PF in %d tries\n",
+@@ -178,6 +181,10 @@ static void hclgevf_handle_mbx_response(struct hclgevf_dev *hdev,
+ 	resp->resp_status = hclgevf_resp_to_errno(resp_status);
+ 	memcpy(resp->additional_info, req->msg.resp_data,
+ 	       HCLGE_MBX_MAX_RESP_DATA_SIZE * sizeof(u8));
++
++	/* ensure additional_info will be seen before setting received_resp */
++	smp_wmb();
++
+ 	if (match_id) {
+ 		/* If match_id is not zero, it means PF support match_id.
+ 		 * if the match_id is right, VF get the right response, or
+diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
+index acf4f6ba73a6f..f4692a8726b1c 100644
+--- a/drivers/net/ethernet/marvell/mvneta.c
++++ b/drivers/net/ethernet/marvell/mvneta.c
+@@ -4790,14 +4790,17 @@ static void mvneta_ethtool_get_strings(struct net_device *netdev, u32 sset,
+ 				       u8 *data)
+ {
+ 	if (sset == ETH_SS_STATS) {
++		struct mvneta_port *pp = netdev_priv(netdev);
+ 		int i;
+ 
+ 		for (i = 0; i < ARRAY_SIZE(mvneta_statistics); i++)
+ 			memcpy(data + i * ETH_GSTRING_LEN,
+ 			       mvneta_statistics[i].name, ETH_GSTRING_LEN);
+ 
+-		data += ETH_GSTRING_LEN * ARRAY_SIZE(mvneta_statistics);
+-		page_pool_ethtool_stats_get_strings(data);
++		if (!pp->bm_priv) {
++			data += ETH_GSTRING_LEN * ARRAY_SIZE(mvneta_statistics);
++			page_pool_ethtool_stats_get_strings(data);
++		}
+ 	}
+ }
+ 
+@@ -4915,8 +4918,10 @@ static void mvneta_ethtool_pp_stats(struct mvneta_port *pp, u64 *data)
+ 	struct page_pool_stats stats = {};
+ 	int i;
+ 
+-	for (i = 0; i < rxq_number; i++)
+-		page_pool_get_stats(pp->rxqs[i].page_pool, &stats);
++	for (i = 0; i < rxq_number; i++) {
++		if (pp->rxqs[i].page_pool)
++			page_pool_get_stats(pp->rxqs[i].page_pool, &stats);
++	}
+ 
+ 	page_pool_ethtool_stats_get(data, &stats);
+ }
+@@ -4932,14 +4937,21 @@ static void mvneta_ethtool_get_stats(struct net_device *dev,
+ 	for (i = 0; i < ARRAY_SIZE(mvneta_statistics); i++)
+ 		*data++ = pp->ethtool_stats[i];
+ 
+-	mvneta_ethtool_pp_stats(pp, data);
++	if (!pp->bm_priv)
++		mvneta_ethtool_pp_stats(pp, data);
+ }
+ 
+ static int mvneta_ethtool_get_sset_count(struct net_device *dev, int sset)
+ {
+-	if (sset == ETH_SS_STATS)
+-		return ARRAY_SIZE(mvneta_statistics) +
+-		       page_pool_ethtool_stats_get_count();
++	if (sset == ETH_SS_STATS) {
++		int count = ARRAY_SIZE(mvneta_statistics);
++		struct mvneta_port *pp = netdev_priv(dev);
++
++		if (!pp->bm_priv)
++			count += page_pool_ethtool_stats_get_count();
++
++		return count;
++	}
+ 
+ 	return -EOPNOTSUPP;
+ }
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/health.h b/drivers/net/ethernet/mellanox/mlx5/core/en/health.h
+index 0107e4e73bb06..415840c3ef84f 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/health.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/health.h
+@@ -18,6 +18,7 @@ void mlx5e_reporter_tx_create(struct mlx5e_priv *priv);
+ void mlx5e_reporter_tx_destroy(struct mlx5e_priv *priv);
+ void mlx5e_reporter_tx_err_cqe(struct mlx5e_txqsq *sq);
+ int mlx5e_reporter_tx_timeout(struct mlx5e_txqsq *sq);
++void mlx5e_reporter_tx_ptpsq_unhealthy(struct mlx5e_ptpsq *ptpsq);
+ 
+ int mlx5e_health_cq_diag_fmsg(struct mlx5e_cq *cq, struct devlink_fmsg *fmsg);
+ int mlx5e_health_cq_common_diag_fmsg(struct mlx5e_cq *cq, struct devlink_fmsg *fmsg);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
+index b0b429a0321ed..af3928eddafd1 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
+@@ -2,9 +2,12 @@
+ // Copyright (c) 2020 Mellanox Technologies
+ 
+ #include "en/ptp.h"
++#include "en/health.h"
+ #include "en/txrx.h"
+ #include "en/params.h"
+ #include "en/fs_tt_redirect.h"
++#include <linux/list.h>
++#include <linux/spinlock.h>
+ 
+ struct mlx5e_ptp_fs {
+ 	struct mlx5_flow_handle *l2_rule;
+@@ -19,6 +22,48 @@ struct mlx5e_ptp_params {
+ 	struct mlx5e_rq_param rq_param;
+ };
+ 
++struct mlx5e_ptp_port_ts_cqe_tracker {
++	u8 metadata_id;
++	bool inuse : 1;
++	struct list_head entry;
++};
++
++struct mlx5e_ptp_port_ts_cqe_list {
++	struct mlx5e_ptp_port_ts_cqe_tracker *nodes;
++	struct list_head tracker_list_head;
++	/* Sync list operations in xmit and napi_poll contexts */
++	spinlock_t tracker_list_lock;
++};
++
++static inline void
++mlx5e_ptp_port_ts_cqe_list_add(struct mlx5e_ptp_port_ts_cqe_list *list, u8 metadata)
++{
++	struct mlx5e_ptp_port_ts_cqe_tracker *tracker = &list->nodes[metadata];
++
++	WARN_ON_ONCE(tracker->inuse);
++	tracker->inuse = true;
++	spin_lock(&list->tracker_list_lock);
++	list_add_tail(&tracker->entry, &list->tracker_list_head);
++	spin_unlock(&list->tracker_list_lock);
++}
++
++static void
++mlx5e_ptp_port_ts_cqe_list_remove(struct mlx5e_ptp_port_ts_cqe_list *list, u8 metadata)
++{
++	struct mlx5e_ptp_port_ts_cqe_tracker *tracker = &list->nodes[metadata];
++
++	WARN_ON_ONCE(!tracker->inuse);
++	tracker->inuse = false;
++	spin_lock(&list->tracker_list_lock);
++	list_del(&tracker->entry);
++	spin_unlock(&list->tracker_list_lock);
++}
++
++void mlx5e_ptpsq_track_metadata(struct mlx5e_ptpsq *ptpsq, u8 metadata)
++{
++	mlx5e_ptp_port_ts_cqe_list_add(ptpsq->ts_cqe_pending_list, metadata);
++}
++
+ struct mlx5e_skb_cb_hwtstamp {
+ 	ktime_t cqe_hwtstamp;
+ 	ktime_t port_hwtstamp;
+@@ -79,84 +124,113 @@ void mlx5e_skb_cb_hwtstamp_handler(struct sk_buff *skb, int hwtstamp_type,
+ 	memset(skb->cb, 0, sizeof(struct mlx5e_skb_cb_hwtstamp));
+ }
+ 
+-#define PTP_WQE_CTR2IDX(val) ((val) & ptpsq->ts_cqe_ctr_mask)
+-
+-static bool mlx5e_ptp_ts_cqe_drop(struct mlx5e_ptpsq *ptpsq, u16 skb_ci, u16 skb_id)
++static struct sk_buff *
++mlx5e_ptp_metadata_map_lookup(struct mlx5e_ptp_metadata_map *map, u16 metadata)
+ {
+-	return (ptpsq->ts_cqe_ctr_mask && (skb_ci != skb_id));
++	return map->data[metadata];
+ }
+ 
+-static bool mlx5e_ptp_ts_cqe_ooo(struct mlx5e_ptpsq *ptpsq, u16 skb_id)
++static struct sk_buff *
++mlx5e_ptp_metadata_map_remove(struct mlx5e_ptp_metadata_map *map, u16 metadata)
+ {
+-	u16 skb_ci = PTP_WQE_CTR2IDX(ptpsq->skb_fifo_cc);
+-	u16 skb_pi = PTP_WQE_CTR2IDX(ptpsq->skb_fifo_pc);
++	struct sk_buff *skb;
+ 
+-	if (PTP_WQE_CTR2IDX(skb_id - skb_ci) >= PTP_WQE_CTR2IDX(skb_pi - skb_ci))
+-		return true;
++	skb = map->data[metadata];
++	map->data[metadata] = NULL;
+ 
+-	return false;
++	return skb;
+ }
+ 
+-static void mlx5e_ptp_skb_fifo_ts_cqe_resync(struct mlx5e_ptpsq *ptpsq, u16 skb_ci,
+-					     u16 skb_id, int budget)
++static bool mlx5e_ptp_metadata_map_unhealthy(struct mlx5e_ptp_metadata_map *map)
+ {
+-	struct skb_shared_hwtstamps hwts = {};
+-	struct sk_buff *skb;
++	/* Considered beginning unhealthy state if size * 15 / 2^4 cannot be reclaimed. */
++	return map->undelivered_counter > (map->capacity >> 4) * 15;
++}
+ 
+-	ptpsq->cq_stats->resync_event++;
++static void mlx5e_ptpsq_mark_ts_cqes_undelivered(struct mlx5e_ptpsq *ptpsq,
++						 ktime_t port_tstamp)
++{
++	struct mlx5e_ptp_port_ts_cqe_list *cqe_list = ptpsq->ts_cqe_pending_list;
++	ktime_t timeout = ns_to_ktime(MLX5E_PTP_TS_CQE_UNDELIVERED_TIMEOUT);
++	struct mlx5e_ptp_metadata_map *metadata_map = &ptpsq->metadata_map;
++	struct mlx5e_ptp_port_ts_cqe_tracker *pos, *n;
+ 
+-	while (skb_ci != skb_id) {
+-		skb = mlx5e_skb_fifo_pop(&ptpsq->skb_fifo);
+-		hwts.hwtstamp = mlx5e_skb_cb_get_hwts(skb)->cqe_hwtstamp;
+-		skb_tstamp_tx(skb, &hwts);
+-		ptpsq->cq_stats->resync_cqe++;
+-		napi_consume_skb(skb, budget);
+-		skb_ci = PTP_WQE_CTR2IDX(ptpsq->skb_fifo_cc);
++	spin_lock(&cqe_list->tracker_list_lock);
++	list_for_each_entry_safe(pos, n, &cqe_list->tracker_list_head, entry) {
++		struct sk_buff *skb =
++			mlx5e_ptp_metadata_map_lookup(metadata_map, pos->metadata_id);
++		ktime_t dma_tstamp = mlx5e_skb_cb_get_hwts(skb)->cqe_hwtstamp;
++
++		if (!dma_tstamp ||
++		    ktime_after(ktime_add(dma_tstamp, timeout), port_tstamp))
++			break;
++
++		metadata_map->undelivered_counter++;
++		WARN_ON_ONCE(!pos->inuse);
++		pos->inuse = false;
++		list_del(&pos->entry);
+ 	}
++	spin_unlock(&cqe_list->tracker_list_lock);
+ }
+ 
++#define PTP_WQE_CTR2IDX(val) ((val) & ptpsq->ts_cqe_ctr_mask)
++
+ static void mlx5e_ptp_handle_ts_cqe(struct mlx5e_ptpsq *ptpsq,
+ 				    struct mlx5_cqe64 *cqe,
++				    u8 *md_buff,
++				    u8 *md_buff_sz,
+ 				    int budget)
+ {
+-	u16 skb_id = PTP_WQE_CTR2IDX(be16_to_cpu(cqe->wqe_counter));
+-	u16 skb_ci = PTP_WQE_CTR2IDX(ptpsq->skb_fifo_cc);
++	struct mlx5e_ptp_port_ts_cqe_list *pending_cqe_list = ptpsq->ts_cqe_pending_list;
++	u8 metadata_id = PTP_WQE_CTR2IDX(be16_to_cpu(cqe->wqe_counter));
++	bool is_err_cqe = !!MLX5E_RX_ERR_CQE(cqe);
+ 	struct mlx5e_txqsq *sq = &ptpsq->txqsq;
+ 	struct sk_buff *skb;
+ 	ktime_t hwtstamp;
+ 
+-	if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
+-		skb = mlx5e_skb_fifo_pop(&ptpsq->skb_fifo);
+-		ptpsq->cq_stats->err_cqe++;
+-		goto out;
++	if (likely(pending_cqe_list->nodes[metadata_id].inuse)) {
++		mlx5e_ptp_port_ts_cqe_list_remove(pending_cqe_list, metadata_id);
++	} else {
++		/* Reclaim space in the unlikely event CQE was delivered after
++		 * marking it late.
++		 */
++		ptpsq->metadata_map.undelivered_counter--;
++		ptpsq->cq_stats->late_cqe++;
+ 	}
+ 
+-	if (mlx5e_ptp_ts_cqe_drop(ptpsq, skb_ci, skb_id)) {
+-		if (mlx5e_ptp_ts_cqe_ooo(ptpsq, skb_id)) {
+-			/* already handled by a previous resync */
+-			ptpsq->cq_stats->ooo_cqe_drop++;
+-			return;
+-		}
+-		mlx5e_ptp_skb_fifo_ts_cqe_resync(ptpsq, skb_ci, skb_id, budget);
++	skb = mlx5e_ptp_metadata_map_remove(&ptpsq->metadata_map, metadata_id);
++
++	if (unlikely(is_err_cqe)) {
++		ptpsq->cq_stats->err_cqe++;
++		goto out;
+ 	}
+ 
+-	skb = mlx5e_skb_fifo_pop(&ptpsq->skb_fifo);
+ 	hwtstamp = mlx5e_cqe_ts_to_ns(sq->ptp_cyc2time, sq->clock, get_cqe_ts(cqe));
+ 	mlx5e_skb_cb_hwtstamp_handler(skb, MLX5E_SKB_CB_PORT_HWTSTAMP,
+ 				      hwtstamp, ptpsq->cq_stats);
+ 	ptpsq->cq_stats->cqe++;
+ 
++	mlx5e_ptpsq_mark_ts_cqes_undelivered(ptpsq, hwtstamp);
+ out:
+ 	napi_consume_skb(skb, budget);
++	md_buff[*md_buff_sz++] = metadata_id;
++	if (unlikely(mlx5e_ptp_metadata_map_unhealthy(&ptpsq->metadata_map)) &&
++	    !test_and_set_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state))
++		queue_work(ptpsq->txqsq.priv->wq, &ptpsq->report_unhealthy_work);
+ }
+ 
+-static bool mlx5e_ptp_poll_ts_cq(struct mlx5e_cq *cq, int budget)
++static bool mlx5e_ptp_poll_ts_cq(struct mlx5e_cq *cq, int napi_budget)
+ {
+ 	struct mlx5e_ptpsq *ptpsq = container_of(cq, struct mlx5e_ptpsq, ts_cq);
+-	struct mlx5_cqwq *cqwq = &cq->wq;
++	int budget = min(napi_budget, MLX5E_TX_CQ_POLL_BUDGET);
++	u8 metadata_buff[MLX5E_TX_CQ_POLL_BUDGET];
++	u8 metadata_buff_sz = 0;
++	struct mlx5_cqwq *cqwq;
+ 	struct mlx5_cqe64 *cqe;
+ 	int work_done = 0;
+ 
++	cqwq = &cq->wq;
++
+ 	if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &ptpsq->txqsq.state)))
+ 		return false;
+ 
+@@ -167,7 +241,8 @@ static bool mlx5e_ptp_poll_ts_cq(struct mlx5e_cq *cq, int budget)
+ 	do {
+ 		mlx5_cqwq_pop(cqwq);
+ 
+-		mlx5e_ptp_handle_ts_cqe(ptpsq, cqe, budget);
++		mlx5e_ptp_handle_ts_cqe(ptpsq, cqe,
++					metadata_buff, &metadata_buff_sz, napi_budget);
+ 	} while ((++work_done < budget) && (cqe = mlx5_cqwq_get_cqe(cqwq)));
+ 
+ 	mlx5_cqwq_update_db_record(cqwq);
+@@ -175,6 +250,10 @@ static bool mlx5e_ptp_poll_ts_cq(struct mlx5e_cq *cq, int budget)
+ 	/* ensure cq space is freed before enabling more cqes */
+ 	wmb();
+ 
++	while (metadata_buff_sz > 0)
++		mlx5e_ptp_metadata_fifo_push(&ptpsq->metadata_freelist,
++					     metadata_buff[--metadata_buff_sz]);
++
+ 	mlx5e_txqsq_wake(&ptpsq->txqsq);
+ 
+ 	return work_done == budget;
+@@ -291,36 +370,86 @@ static void mlx5e_ptp_destroy_sq(struct mlx5_core_dev *mdev, u32 sqn)
+ 
+ static int mlx5e_ptp_alloc_traffic_db(struct mlx5e_ptpsq *ptpsq, int numa)
+ {
+-	int wq_sz = mlx5_wq_cyc_get_size(&ptpsq->txqsq.wq);
+-	struct mlx5_core_dev *mdev = ptpsq->txqsq.mdev;
++	struct mlx5e_ptp_metadata_fifo *metadata_freelist = &ptpsq->metadata_freelist;
++	struct mlx5e_ptp_metadata_map *metadata_map = &ptpsq->metadata_map;
++	struct mlx5e_ptp_port_ts_cqe_list *cqe_list;
++	int db_sz;
++	int md;
+ 
+-	ptpsq->skb_fifo.fifo = kvzalloc_node(array_size(wq_sz, sizeof(*ptpsq->skb_fifo.fifo)),
+-					     GFP_KERNEL, numa);
+-	if (!ptpsq->skb_fifo.fifo)
++	cqe_list = kvzalloc_node(sizeof(*ptpsq->ts_cqe_pending_list), GFP_KERNEL, numa);
++	if (!cqe_list)
+ 		return -ENOMEM;
++	ptpsq->ts_cqe_pending_list = cqe_list;
++
++	db_sz = min_t(u32, mlx5_wq_cyc_get_size(&ptpsq->txqsq.wq),
++		      1 << MLX5_CAP_GEN_2(ptpsq->txqsq.mdev,
++					  ts_cqe_metadata_size2wqe_counter));
++	ptpsq->ts_cqe_ctr_mask = db_sz - 1;
++
++	cqe_list->nodes = kvzalloc_node(array_size(db_sz, sizeof(*cqe_list->nodes)),
++					GFP_KERNEL, numa);
++	if (!cqe_list->nodes)
++		goto free_cqe_list;
++	INIT_LIST_HEAD(&cqe_list->tracker_list_head);
++	spin_lock_init(&cqe_list->tracker_list_lock);
++
++	metadata_freelist->data =
++		kvzalloc_node(array_size(db_sz, sizeof(*metadata_freelist->data)),
++			      GFP_KERNEL, numa);
++	if (!metadata_freelist->data)
++		goto free_cqe_list_nodes;
++	metadata_freelist->mask = ptpsq->ts_cqe_ctr_mask;
++
++	for (md = 0; md < db_sz; ++md) {
++		cqe_list->nodes[md].metadata_id = md;
++		metadata_freelist->data[md] = md;
++	}
++	metadata_freelist->pc = db_sz;
++
++	metadata_map->data =
++		kvzalloc_node(array_size(db_sz, sizeof(*metadata_map->data)),
++			      GFP_KERNEL, numa);
++	if (!metadata_map->data)
++		goto free_metadata_freelist;
++	metadata_map->capacity = db_sz;
+ 
+-	ptpsq->skb_fifo.pc   = &ptpsq->skb_fifo_pc;
+-	ptpsq->skb_fifo.cc   = &ptpsq->skb_fifo_cc;
+-	ptpsq->skb_fifo.mask = wq_sz - 1;
+-	if (MLX5_CAP_GEN_2(mdev, ts_cqe_metadata_size2wqe_counter))
+-		ptpsq->ts_cqe_ctr_mask =
+-			(1 << MLX5_CAP_GEN_2(mdev, ts_cqe_metadata_size2wqe_counter)) - 1;
+ 	return 0;
++
++free_metadata_freelist:
++	kvfree(metadata_freelist->data);
++free_cqe_list_nodes:
++	kvfree(cqe_list->nodes);
++free_cqe_list:
++	kvfree(cqe_list);
++	return -ENOMEM;
+ }
+ 
+-static void mlx5e_ptp_drain_skb_fifo(struct mlx5e_skb_fifo *skb_fifo)
++static void mlx5e_ptp_drain_metadata_map(struct mlx5e_ptp_metadata_map *map)
+ {
+-	while (*skb_fifo->pc != *skb_fifo->cc) {
+-		struct sk_buff *skb = mlx5e_skb_fifo_pop(skb_fifo);
++	int idx;
++
++	for (idx = 0; idx < map->capacity; ++idx) {
++		struct sk_buff *skb = map->data[idx];
+ 
+ 		dev_kfree_skb_any(skb);
+ 	}
+ }
+ 
+-static void mlx5e_ptp_free_traffic_db(struct mlx5e_skb_fifo *skb_fifo)
++static void mlx5e_ptp_free_traffic_db(struct mlx5e_ptpsq *ptpsq)
+ {
+-	mlx5e_ptp_drain_skb_fifo(skb_fifo);
+-	kvfree(skb_fifo->fifo);
++	mlx5e_ptp_drain_metadata_map(&ptpsq->metadata_map);
++	kvfree(ptpsq->metadata_map.data);
++	kvfree(ptpsq->metadata_freelist.data);
++	kvfree(ptpsq->ts_cqe_pending_list->nodes);
++	kvfree(ptpsq->ts_cqe_pending_list);
++}
++
++static void mlx5e_ptpsq_unhealthy_work(struct work_struct *work)
++{
++	struct mlx5e_ptpsq *ptpsq =
++		container_of(work, struct mlx5e_ptpsq, report_unhealthy_work);
++
++	mlx5e_reporter_tx_ptpsq_unhealthy(ptpsq);
+ }
+ 
+ static int mlx5e_ptp_open_txqsq(struct mlx5e_ptp *c, u32 tisn,
+@@ -348,11 +477,12 @@ static int mlx5e_ptp_open_txqsq(struct mlx5e_ptp *c, u32 tisn,
+ 	if (err)
+ 		goto err_free_txqsq;
+ 
+-	err = mlx5e_ptp_alloc_traffic_db(ptpsq,
+-					 dev_to_node(mlx5_core_dma_dev(c->mdev)));
++	err = mlx5e_ptp_alloc_traffic_db(ptpsq, dev_to_node(mlx5_core_dma_dev(c->mdev)));
+ 	if (err)
+ 		goto err_free_txqsq;
+ 
++	INIT_WORK(&ptpsq->report_unhealthy_work, mlx5e_ptpsq_unhealthy_work);
++
+ 	return 0;
+ 
+ err_free_txqsq:
+@@ -366,7 +496,9 @@ static void mlx5e_ptp_close_txqsq(struct mlx5e_ptpsq *ptpsq)
+ 	struct mlx5e_txqsq *sq = &ptpsq->txqsq;
+ 	struct mlx5_core_dev *mdev = sq->mdev;
+ 
+-	mlx5e_ptp_free_traffic_db(&ptpsq->skb_fifo);
++	if (current_work() != &ptpsq->report_unhealthy_work)
++		cancel_work_sync(&ptpsq->report_unhealthy_work);
++	mlx5e_ptp_free_traffic_db(ptpsq);
+ 	cancel_work_sync(&sq->recover_work);
+ 	mlx5e_ptp_destroy_sq(mdev, sq->sqn);
+ 	mlx5e_free_txqsq_descs(sq);
+@@ -534,7 +666,10 @@ static void mlx5e_ptp_build_params(struct mlx5e_ptp *c,
+ 
+ 	/* SQ */
+ 	if (test_bit(MLX5E_PTP_STATE_TX, c->state)) {
+-		params->log_sq_size = orig->log_sq_size;
++		params->log_sq_size =
++			min(MLX5_CAP_GEN_2(c->mdev, ts_cqe_metadata_size2wqe_counter),
++			    MLX5E_PTP_MAX_LOG_SQ_SIZE);
++		params->log_sq_size = min(params->log_sq_size, orig->log_sq_size);
+ 		mlx5e_ptp_build_sq_param(c->mdev, params, &cparams->txq_sq_param);
+ 	}
+ 	/* RQ */
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h
+index cc7efde88ac3c..7b700d0f956a8 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h
+@@ -7,18 +7,38 @@
+ #include "en.h"
+ #include "en_stats.h"
+ #include "en/txrx.h"
++#include <linux/ktime.h>
+ #include <linux/ptp_classify.h>
++#include <linux/time64.h>
++#include <linux/workqueue.h>
+ 
+ #define MLX5E_PTP_CHANNEL_IX 0
++#define MLX5E_PTP_MAX_LOG_SQ_SIZE (8U)
++#define MLX5E_PTP_TS_CQE_UNDELIVERED_TIMEOUT (1 * NSEC_PER_SEC)
++
++struct mlx5e_ptp_metadata_fifo {
++	u8  cc;
++	u8  pc;
++	u8  mask;
++	u8  *data;
++};
++
++struct mlx5e_ptp_metadata_map {
++	u16             undelivered_counter;
++	u16             capacity;
++	struct sk_buff  **data;
++};
+ 
+ struct mlx5e_ptpsq {
+ 	struct mlx5e_txqsq       txqsq;
+ 	struct mlx5e_cq          ts_cq;
+-	u16                      skb_fifo_cc;
+-	u16                      skb_fifo_pc;
+-	struct mlx5e_skb_fifo    skb_fifo;
+ 	struct mlx5e_ptp_cq_stats *cq_stats;
+ 	u16                      ts_cqe_ctr_mask;
++
++	struct work_struct                 report_unhealthy_work;
++	struct mlx5e_ptp_port_ts_cqe_list  *ts_cqe_pending_list;
++	struct mlx5e_ptp_metadata_fifo     metadata_freelist;
++	struct mlx5e_ptp_metadata_map      metadata_map;
+ };
+ 
+ enum {
+@@ -69,12 +89,35 @@ static inline bool mlx5e_use_ptpsq(struct sk_buff *skb)
+ 		fk.ports.dst == htons(PTP_EV_PORT));
+ }
+ 
+-static inline bool mlx5e_ptpsq_fifo_has_room(struct mlx5e_txqsq *sq)
++static inline void mlx5e_ptp_metadata_fifo_push(struct mlx5e_ptp_metadata_fifo *fifo, u8 metadata)
+ {
+-	if (!sq->ptpsq)
+-		return true;
++	fifo->data[fifo->mask & fifo->pc++] = metadata;
++}
++
++static inline u8
++mlx5e_ptp_metadata_fifo_pop(struct mlx5e_ptp_metadata_fifo *fifo)
++{
++	return fifo->data[fifo->mask & fifo->cc++];
++}
+ 
+-	return mlx5e_skb_fifo_has_room(&sq->ptpsq->skb_fifo);
++static inline void
++mlx5e_ptp_metadata_map_put(struct mlx5e_ptp_metadata_map *map,
++			   struct sk_buff *skb, u8 metadata)
++{
++	WARN_ON_ONCE(map->data[metadata]);
++	map->data[metadata] = skb;
++}
++
++static inline bool mlx5e_ptpsq_metadata_freelist_empty(struct mlx5e_ptpsq *ptpsq)
++{
++	struct mlx5e_ptp_metadata_fifo *freelist;
++
++	if (likely(!ptpsq))
++		return false;
++
++	freelist = &ptpsq->metadata_freelist;
++
++	return freelist->pc == freelist->cc;
+ }
+ 
+ int mlx5e_ptp_open(struct mlx5e_priv *priv, struct mlx5e_params *params,
+@@ -89,6 +132,8 @@ void mlx5e_ptp_free_rx_fs(struct mlx5e_flow_steering *fs,
+ 			  const struct mlx5e_profile *profile);
+ int mlx5e_ptp_rx_manage_fs(struct mlx5e_priv *priv, bool set);
+ 
++void mlx5e_ptpsq_track_metadata(struct mlx5e_ptpsq *ptpsq, u8 metadata);
++
+ enum {
+ 	MLX5E_SKB_CB_CQE_HWTSTAMP  = BIT(0),
+ 	MLX5E_SKB_CB_PORT_HWTSTAMP = BIT(1),
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
+index e8eea9ffd5eb6..03b119a434bc9 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
+@@ -702,11 +702,11 @@ static int mlx5e_rx_reporter_dump(struct devlink_health_reporter *reporter,
+ 
+ void mlx5e_reporter_rx_timeout(struct mlx5e_rq *rq)
+ {
+-	char icosq_str[MLX5E_REPORTER_PER_Q_MAX_LEN] = {};
+ 	char err_str[MLX5E_REPORTER_PER_Q_MAX_LEN];
+ 	struct mlx5e_icosq *icosq = rq->icosq;
+ 	struct mlx5e_priv *priv = rq->priv;
+ 	struct mlx5e_err_ctx err_ctx = {};
++	char icosq_str[32] = {};
+ 
+ 	err_ctx.ctx = rq;
+ 	err_ctx.recover = mlx5e_rx_reporter_timeout_recover;
+@@ -715,7 +715,7 @@ void mlx5e_reporter_rx_timeout(struct mlx5e_rq *rq)
+ 	if (icosq)
+ 		snprintf(icosq_str, sizeof(icosq_str), "ICOSQ: 0x%x, ", icosq->sqn);
+ 	snprintf(err_str, sizeof(err_str),
+-		 "RX timeout on channel: %d, %sRQ: 0x%x, CQ: 0x%x",
++		 "RX timeout on channel: %d, %s RQ: 0x%x, CQ: 0x%x",
+ 		 rq->ix, icosq_str, rq->rqn, rq->cq.mcq.cqn);
+ 
+ 	mlx5e_health_report(priv, priv->rx_reporter, err_str, &err_ctx);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
+index b35ff289af492..ff8242f67c545 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
+@@ -164,6 +164,43 @@ static int mlx5e_tx_reporter_timeout_recover(void *ctx)
+ 	return err;
+ }
+ 
++static int mlx5e_tx_reporter_ptpsq_unhealthy_recover(void *ctx)
++{
++	struct mlx5e_ptpsq *ptpsq = ctx;
++	struct mlx5e_channels *chs;
++	struct net_device *netdev;
++	struct mlx5e_priv *priv;
++	int carrier_ok;
++	int err;
++
++	if (!test_bit(MLX5E_SQ_STATE_RECOVERING, &ptpsq->txqsq.state))
++		return 0;
++
++	priv = ptpsq->txqsq.priv;
++
++	mutex_lock(&priv->state_lock);
++	chs = &priv->channels;
++	netdev = priv->netdev;
++
++	carrier_ok = netif_carrier_ok(netdev);
++	netif_carrier_off(netdev);
++
++	mlx5e_deactivate_priv_channels(priv);
++
++	mlx5e_ptp_close(chs->ptp);
++	err = mlx5e_ptp_open(priv, &chs->params, chs->c[0]->lag_port, &chs->ptp);
++
++	mlx5e_activate_priv_channels(priv);
++
++	/* return carrier back if needed */
++	if (carrier_ok)
++		netif_carrier_on(netdev);
++
++	mutex_unlock(&priv->state_lock);
++
++	return err;
++}
++
+ /* state lock cannot be grabbed within this function.
+  * It can cause a dead lock or a read-after-free.
+  */
+@@ -516,6 +553,15 @@ static int mlx5e_tx_reporter_timeout_dump(struct mlx5e_priv *priv, struct devlin
+ 	return mlx5e_tx_reporter_dump_sq(priv, fmsg, to_ctx->sq);
+ }
+ 
++static int mlx5e_tx_reporter_ptpsq_unhealthy_dump(struct mlx5e_priv *priv,
++						  struct devlink_fmsg *fmsg,
++						  void *ctx)
++{
++	struct mlx5e_ptpsq *ptpsq = ctx;
++
++	return mlx5e_tx_reporter_dump_sq(priv, fmsg, &ptpsq->txqsq);
++}
++
+ static int mlx5e_tx_reporter_dump_all_sqs(struct mlx5e_priv *priv,
+ 					  struct devlink_fmsg *fmsg)
+ {
+@@ -621,6 +667,25 @@ int mlx5e_reporter_tx_timeout(struct mlx5e_txqsq *sq)
+ 	return to_ctx.status;
+ }
+ 
++void mlx5e_reporter_tx_ptpsq_unhealthy(struct mlx5e_ptpsq *ptpsq)
++{
++	struct mlx5e_ptp_metadata_map *map = &ptpsq->metadata_map;
++	char err_str[MLX5E_REPORTER_PER_Q_MAX_LEN];
++	struct mlx5e_txqsq *txqsq = &ptpsq->txqsq;
++	struct mlx5e_cq *ts_cq = &ptpsq->ts_cq;
++	struct mlx5e_priv *priv = txqsq->priv;
++	struct mlx5e_err_ctx err_ctx = {};
++
++	err_ctx.ctx = ptpsq;
++	err_ctx.recover = mlx5e_tx_reporter_ptpsq_unhealthy_recover;
++	err_ctx.dump = mlx5e_tx_reporter_ptpsq_unhealthy_dump;
++	snprintf(err_str, sizeof(err_str),
++		 "Unhealthy TX port TS queue: %d, SQ: 0x%x, CQ: 0x%x, Undelivered CQEs: %u Map Capacity: %u",
++		 txqsq->ch_ix, txqsq->sqn, ts_cq->mcq.cqn, map->undelivered_counter, map->capacity);
++
++	mlx5e_health_report(priv, priv->tx_reporter, err_str, &err_ctx);
++}
++
+ static const struct devlink_health_reporter_ops mlx5_tx_reporter_ops = {
+ 		.name = "tx",
+ 		.recover = mlx5e_tx_reporter_recover,
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
+index 00a04fdd756f5..668da5c70e63d 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
+@@ -300,9 +300,6 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv,
+ 	if (err)
+ 		goto destroy_neigh_entry;
+ 
+-	e->encap_size = ipv4_encap_size;
+-	e->encap_header = encap_header;
+-
+ 	if (!(nud_state & NUD_VALID)) {
+ 		neigh_event_send(attr.n, NULL);
+ 		/* the encap entry will be made valid on neigh update event
+@@ -322,6 +319,8 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv,
+ 		goto destroy_neigh_entry;
+ 	}
+ 
++	e->encap_size = ipv4_encap_size;
++	e->encap_header = encap_header;
+ 	e->flags |= MLX5_ENCAP_ENTRY_VALID;
+ 	mlx5e_rep_queue_neigh_stats_work(netdev_priv(attr.out_dev));
+ 	mlx5e_route_lookup_ipv4_put(&attr);
+@@ -404,16 +403,12 @@ int mlx5e_tc_tun_update_header_ipv4(struct mlx5e_priv *priv,
+ 	if (err)
+ 		goto free_encap;
+ 
+-	e->encap_size = ipv4_encap_size;
+-	kfree(e->encap_header);
+-	e->encap_header = encap_header;
+-
+ 	if (!(nud_state & NUD_VALID)) {
+ 		neigh_event_send(attr.n, NULL);
+ 		/* the encap entry will be made valid on neigh update event
+ 		 * and not used before that.
+ 		 */
+-		goto release_neigh;
++		goto free_encap;
+ 	}
+ 
+ 	memset(&reformat_params, 0, sizeof(reformat_params));
+@@ -427,6 +422,10 @@ int mlx5e_tc_tun_update_header_ipv4(struct mlx5e_priv *priv,
+ 		goto free_encap;
+ 	}
+ 
++	e->encap_size = ipv4_encap_size;
++	kfree(e->encap_header);
++	e->encap_header = encap_header;
++
+ 	e->flags |= MLX5_ENCAP_ENTRY_VALID;
+ 	mlx5e_rep_queue_neigh_stats_work(netdev_priv(attr.out_dev));
+ 	mlx5e_route_lookup_ipv4_put(&attr);
+@@ -568,9 +567,6 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv,
+ 	if (err)
+ 		goto destroy_neigh_entry;
+ 
+-	e->encap_size = ipv6_encap_size;
+-	e->encap_header = encap_header;
+-
+ 	if (!(nud_state & NUD_VALID)) {
+ 		neigh_event_send(attr.n, NULL);
+ 		/* the encap entry will be made valid on neigh update event
+@@ -590,6 +586,8 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv,
+ 		goto destroy_neigh_entry;
+ 	}
+ 
++	e->encap_size = ipv6_encap_size;
++	e->encap_header = encap_header;
+ 	e->flags |= MLX5_ENCAP_ENTRY_VALID;
+ 	mlx5e_rep_queue_neigh_stats_work(netdev_priv(attr.out_dev));
+ 	mlx5e_route_lookup_ipv6_put(&attr);
+@@ -671,16 +669,12 @@ int mlx5e_tc_tun_update_header_ipv6(struct mlx5e_priv *priv,
+ 	if (err)
+ 		goto free_encap;
+ 
+-	e->encap_size = ipv6_encap_size;
+-	kfree(e->encap_header);
+-	e->encap_header = encap_header;
+-
+ 	if (!(nud_state & NUD_VALID)) {
+ 		neigh_event_send(attr.n, NULL);
+ 		/* the encap entry will be made valid on neigh update event
+ 		 * and not used before that.
+ 		 */
+-		goto release_neigh;
++		goto free_encap;
+ 	}
+ 
+ 	memset(&reformat_params, 0, sizeof(reformat_params));
+@@ -694,6 +688,10 @@ int mlx5e_tc_tun_update_header_ipv6(struct mlx5e_priv *priv,
+ 		goto free_encap;
+ 	}
+ 
++	e->encap_size = ipv6_encap_size;
++	kfree(e->encap_header);
++	e->encap_header = encap_header;
++
+ 	e->flags |= MLX5_ENCAP_ENTRY_VALID;
+ 	mlx5e_rep_queue_neigh_stats_work(netdev_priv(attr.out_dev));
+ 	mlx5e_route_lookup_ipv6_put(&attr);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+index 27861b68ced57..bd3fabb007c94 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+@@ -43,12 +43,17 @@ void mlx5e_ethtool_get_drvinfo(struct mlx5e_priv *priv,
+ 			       struct ethtool_drvinfo *drvinfo)
+ {
+ 	struct mlx5_core_dev *mdev = priv->mdev;
++	int count;
+ 
+ 	strscpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
+-	snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+-		 "%d.%d.%04d (%.16s)",
+-		 fw_rev_maj(mdev), fw_rev_min(mdev), fw_rev_sub(mdev),
+-		 mdev->board_id);
++	count = snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
++			 "%d.%d.%04d (%.16s)", fw_rev_maj(mdev),
++			 fw_rev_min(mdev), fw_rev_sub(mdev), mdev->board_id);
++	if (count == sizeof(drvinfo->fw_version))
++		snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
++			 "%d.%d.%04d", fw_rev_maj(mdev),
++			 fw_rev_min(mdev), fw_rev_sub(mdev));
++
+ 	strscpy(drvinfo->bus_info, dev_name(mdev->device),
+ 		sizeof(drvinfo->bus_info));
+ }
+@@ -2061,7 +2066,8 @@ static int set_pflag_tx_port_ts(struct net_device *netdev, bool enable)
+ 	struct mlx5e_params new_params;
+ 	int err;
+ 
+-	if (!MLX5_CAP_GEN(mdev, ts_cqe_to_dest_cqn))
++	if (!MLX5_CAP_GEN(mdev, ts_cqe_to_dest_cqn) ||
++	    !MLX5_CAP_GEN_2(mdev, ts_cqe_metadata_size2wqe_counter))
+ 		return -EOPNOTSUPP;
+ 
+ 	/* Don't allow changing the PTP state if HTB offload is active, because
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+index 0cd44ef190058..87fda65852fb7 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+@@ -71,13 +71,17 @@ static void mlx5e_rep_get_drvinfo(struct net_device *dev,
+ {
+ 	struct mlx5e_priv *priv = netdev_priv(dev);
+ 	struct mlx5_core_dev *mdev = priv->mdev;
++	int count;
+ 
+ 	strscpy(drvinfo->driver, mlx5e_rep_driver_name,
+ 		sizeof(drvinfo->driver));
+-	snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+-		 "%d.%d.%04d (%.16s)",
+-		 fw_rev_maj(mdev), fw_rev_min(mdev),
+-		 fw_rev_sub(mdev), mdev->board_id);
++	count = snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
++			 "%d.%d.%04d (%.16s)", fw_rev_maj(mdev),
++			 fw_rev_min(mdev), fw_rev_sub(mdev), mdev->board_id);
++	if (count == sizeof(drvinfo->fw_version))
++		snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
++			 "%d.%d.%04d", fw_rev_maj(mdev),
++			 fw_rev_min(mdev), fw_rev_sub(mdev));
+ }
+ 
+ static const struct counter_desc sw_rep_stats_desc[] = {
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
+index 4d77055abd4be..dfdd357974164 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
+@@ -2142,9 +2142,7 @@ static const struct counter_desc ptp_cq_stats_desc[] = {
+ 	{ MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, err_cqe) },
+ 	{ MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, abort) },
+ 	{ MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, abort_abs_diff_ns) },
+-	{ MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, resync_cqe) },
+-	{ MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, resync_event) },
+-	{ MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, ooo_cqe_drop) },
++	{ MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, late_cqe) },
+ };
+ 
+ static const struct counter_desc ptp_rq_stats_desc[] = {
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
+index 67938b4ea1b90..13a07e52ae92b 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
+@@ -449,9 +449,7 @@ struct mlx5e_ptp_cq_stats {
+ 	u64 err_cqe;
+ 	u64 abort;
+ 	u64 abort_abs_diff_ns;
+-	u64 resync_cqe;
+-	u64 resync_event;
+-	u64 ooo_cqe_drop;
++	u64 late_cqe;
+ };
+ 
+ struct mlx5e_rep_stats {
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+index 5797d8607633e..fdef505c4b88f 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+@@ -3148,7 +3148,7 @@ static struct mlx5_fields fields[] = {
+ 	OFFLOAD(DIPV6_31_0,   32, U32_MAX, ip6.daddr.s6_addr32[3], 0,
+ 		dst_ipv4_dst_ipv6.ipv6_layout.ipv6[12]),
+ 	OFFLOAD(IPV6_HOPLIMIT, 8,  U8_MAX, ip6.hop_limit, 0, ttl_hoplimit),
+-	OFFLOAD(IP_DSCP, 16,  0xc00f, ip6, 0, ip_dscp),
++	OFFLOAD(IP_DSCP, 16,  0x0fc0, ip6, 0, ip_dscp),
+ 
+ 	OFFLOAD(TCP_SPORT, 16, U16_MAX, tcp.source,  0, tcp_sport),
+ 	OFFLOAD(TCP_DPORT, 16, U16_MAX, tcp.dest,    0, tcp_dport),
+@@ -3159,21 +3159,31 @@ static struct mlx5_fields fields[] = {
+ 	OFFLOAD(UDP_DPORT, 16, U16_MAX, udp.dest,   0, udp_dport),
+ };
+ 
+-static unsigned long mask_to_le(unsigned long mask, int size)
++static u32 mask_field_get(void *mask, struct mlx5_fields *f)
+ {
+-	__be32 mask_be32;
+-	__be16 mask_be16;
+-
+-	if (size == 32) {
+-		mask_be32 = (__force __be32)(mask);
+-		mask = (__force unsigned long)cpu_to_le32(be32_to_cpu(mask_be32));
+-	} else if (size == 16) {
+-		mask_be32 = (__force __be32)(mask);
+-		mask_be16 = *(__be16 *)&mask_be32;
+-		mask = (__force unsigned long)cpu_to_le16(be16_to_cpu(mask_be16));
++	switch (f->field_bsize) {
++	case 32:
++		return be32_to_cpu(*(__be32 *)mask) & f->field_mask;
++	case 16:
++		return be16_to_cpu(*(__be16 *)mask) & (u16)f->field_mask;
++	default:
++		return *(u8 *)mask & (u8)f->field_mask;
+ 	}
++}
+ 
+-	return mask;
++static void mask_field_clear(void *mask, struct mlx5_fields *f)
++{
++	switch (f->field_bsize) {
++	case 32:
++		*(__be32 *)mask &= ~cpu_to_be32(f->field_mask);
++		break;
++	case 16:
++		*(__be16 *)mask &= ~cpu_to_be16((u16)f->field_mask);
++		break;
++	default:
++		*(u8 *)mask &= ~(u8)f->field_mask;
++		break;
++	}
+ }
+ 
+ static int offload_pedit_fields(struct mlx5e_priv *priv,
+@@ -3185,11 +3195,12 @@ static int offload_pedit_fields(struct mlx5e_priv *priv,
+ 	struct pedit_headers *set_masks, *add_masks, *set_vals, *add_vals;
+ 	struct pedit_headers_action *hdrs = parse_attr->hdrs;
+ 	void *headers_c, *headers_v, *action, *vals_p;
+-	u32 *s_masks_p, *a_masks_p, s_mask, a_mask;
+ 	struct mlx5e_tc_mod_hdr_acts *mod_acts;
+-	unsigned long mask, field_mask;
++	void *s_masks_p, *a_masks_p;
+ 	int i, first, last, next_z;
+ 	struct mlx5_fields *f;
++	unsigned long mask;
++	u32 s_mask, a_mask;
+ 	u8 cmd;
+ 
+ 	mod_acts = &parse_attr->mod_hdr_acts;
+@@ -3205,15 +3216,11 @@ static int offload_pedit_fields(struct mlx5e_priv *priv,
+ 		bool skip;
+ 
+ 		f = &fields[i];
+-		/* avoid seeing bits set from previous iterations */
+-		s_mask = 0;
+-		a_mask = 0;
+-
+ 		s_masks_p = (void *)set_masks + f->offset;
+ 		a_masks_p = (void *)add_masks + f->offset;
+ 
+-		s_mask = *s_masks_p & f->field_mask;
+-		a_mask = *a_masks_p & f->field_mask;
++		s_mask = mask_field_get(s_masks_p, f);
++		a_mask = mask_field_get(a_masks_p, f);
+ 
+ 		if (!s_mask && !a_mask) /* nothing to offload here */
+ 			continue;
+@@ -3240,22 +3247,20 @@ static int offload_pedit_fields(struct mlx5e_priv *priv,
+ 					 match_mask, f->field_bsize))
+ 				skip = true;
+ 			/* clear to denote we consumed this field */
+-			*s_masks_p &= ~f->field_mask;
++			mask_field_clear(s_masks_p, f);
+ 		} else {
+ 			cmd  = MLX5_ACTION_TYPE_ADD;
+ 			mask = a_mask;
+ 			vals_p = (void *)add_vals + f->offset;
+ 			/* add 0 is no change */
+-			if ((*(u32 *)vals_p & f->field_mask) == 0)
++			if (!mask_field_get(vals_p, f))
+ 				skip = true;
+ 			/* clear to denote we consumed this field */
+-			*a_masks_p &= ~f->field_mask;
++			mask_field_clear(a_masks_p, f);
+ 		}
+ 		if (skip)
+ 			continue;
+ 
+-		mask = mask_to_le(mask, f->field_bsize);
+-
+ 		first = find_first_bit(&mask, f->field_bsize);
+ 		next_z = find_next_zero_bit(&mask, f->field_bsize, first);
+ 		last  = find_last_bit(&mask, f->field_bsize);
+@@ -3282,10 +3287,9 @@ static int offload_pedit_fields(struct mlx5e_priv *priv,
+ 		MLX5_SET(set_action_in, action, field, f->field);
+ 
+ 		if (cmd == MLX5_ACTION_TYPE_SET) {
++			unsigned long field_mask = f->field_mask;
+ 			int start;
+ 
+-			field_mask = mask_to_le(f->field_mask, f->field_bsize);
+-
+ 			/* if field is bit sized it can start not from first bit */
+ 			start = find_first_bit(&field_mask, f->field_bsize);
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+index c7eb6b238c2ba..f0b506e562df3 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+@@ -372,7 +372,7 @@ mlx5e_txwqe_complete(struct mlx5e_txqsq *sq, struct sk_buff *skb,
+ 		     const struct mlx5e_tx_attr *attr,
+ 		     const struct mlx5e_tx_wqe_attr *wqe_attr, u8 num_dma,
+ 		     struct mlx5e_tx_wqe_info *wi, struct mlx5_wqe_ctrl_seg *cseg,
+-		     bool xmit_more)
++		     struct mlx5_wqe_eth_seg *eseg, bool xmit_more)
+ {
+ 	struct mlx5_wq_cyc *wq = &sq->wq;
+ 	bool send_doorbell;
+@@ -394,11 +394,16 @@ mlx5e_txwqe_complete(struct mlx5e_txqsq *sq, struct sk_buff *skb,
+ 
+ 	mlx5e_tx_check_stop(sq);
+ 
+-	if (unlikely(sq->ptpsq)) {
++	if (unlikely(sq->ptpsq &&
++		     (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))) {
++		u8 metadata_index = be32_to_cpu(eseg->flow_table_metadata);
++
+ 		mlx5e_skb_cb_hwtstamp_init(skb);
+-		mlx5e_skb_fifo_push(&sq->ptpsq->skb_fifo, skb);
++		mlx5e_ptp_metadata_map_put(&sq->ptpsq->metadata_map, skb,
++					   metadata_index);
++		mlx5e_ptpsq_track_metadata(sq->ptpsq, metadata_index);
+ 		if (!netif_tx_queue_stopped(sq->txq) &&
+-		    !mlx5e_skb_fifo_has_room(&sq->ptpsq->skb_fifo)) {
++		    mlx5e_ptpsq_metadata_freelist_empty(sq->ptpsq)) {
+ 			netif_tx_stop_queue(sq->txq);
+ 			sq->stats->stopped++;
+ 		}
+@@ -483,12 +488,15 @@ mlx5e_sq_xmit_wqe(struct mlx5e_txqsq *sq, struct sk_buff *skb,
+ 	if (unlikely(num_dma < 0))
+ 		goto err_drop;
+ 
+-	mlx5e_txwqe_complete(sq, skb, attr, wqe_attr, num_dma, wi, cseg, xmit_more);
++	mlx5e_txwqe_complete(sq, skb, attr, wqe_attr, num_dma, wi, cseg, eseg, xmit_more);
+ 
+ 	return;
+ 
+ err_drop:
+ 	stats->dropped++;
++	if (unlikely(sq->ptpsq && (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)))
++		mlx5e_ptp_metadata_fifo_push(&sq->ptpsq->metadata_freelist,
++					     be32_to_cpu(eseg->flow_table_metadata));
+ 	dev_kfree_skb_any(skb);
+ 	mlx5e_tx_flush(sq);
+ }
+@@ -645,9 +653,9 @@ void mlx5e_tx_mpwqe_ensure_complete(struct mlx5e_txqsq *sq)
+ static void mlx5e_cqe_ts_id_eseg(struct mlx5e_ptpsq *ptpsq, struct sk_buff *skb,
+ 				 struct mlx5_wqe_eth_seg *eseg)
+ {
+-	if (ptpsq->ts_cqe_ctr_mask && unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
+-		eseg->flow_table_metadata = cpu_to_be32(ptpsq->skb_fifo_pc &
+-							ptpsq->ts_cqe_ctr_mask);
++	if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
++		eseg->flow_table_metadata =
++			cpu_to_be32(mlx5e_ptp_metadata_fifo_pop(&ptpsq->metadata_freelist));
+ }
+ 
+ static void mlx5e_txwqe_build_eseg(struct mlx5e_priv *priv, struct mlx5e_txqsq *sq,
+@@ -766,7 +774,7 @@ void mlx5e_txqsq_wake(struct mlx5e_txqsq *sq)
+ {
+ 	if (netif_tx_queue_stopped(sq->txq) &&
+ 	    mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, sq->stop_room) &&
+-	    mlx5e_ptpsq_fifo_has_room(sq) &&
++	    !mlx5e_ptpsq_metadata_freelist_empty(sq->ptpsq) &&
+ 	    !test_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state)) {
+ 		netif_tx_wake_queue(sq->txq);
+ 		sq->stats->wake++;
+@@ -1031,7 +1039,7 @@ void mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
+ 	if (unlikely(num_dma < 0))
+ 		goto err_drop;
+ 
+-	mlx5e_txwqe_complete(sq, skb, &attr, &wqe_attr, num_dma, wi, cseg, xmit_more);
++	mlx5e_txwqe_complete(sq, skb, &attr, &wqe_attr, num_dma, wi, cseg, eseg, xmit_more);
+ 
+ 	return;
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
+index aa29f09e83564..0c83ef174275a 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
+@@ -384,7 +384,12 @@ static int mlx5_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
+ 
+ static int mlx5_ptp_adjphase(struct ptp_clock_info *ptp, s32 delta)
+ {
+-	return mlx5_ptp_adjtime(ptp, delta);
++	struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
++	struct mlx5_core_dev *mdev;
++
++	mdev = container_of(clock, struct mlx5_core_dev, clock);
++
++	return mlx5_ptp_adjtime_real_time(mdev, delta);
+ }
+ 
+ static int mlx5_ptp_freq_adj_real_time(struct mlx5_core_dev *mdev, long scaled_ppm)
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
+index cba2a4afb5fda..235e170c65bb7 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
+@@ -28,7 +28,7 @@
+ struct mlx5_irq {
+ 	struct atomic_notifier_head nh;
+ 	cpumask_var_t mask;
+-	char name[MLX5_MAX_IRQ_NAME];
++	char name[MLX5_MAX_IRQ_FORMATTED_NAME];
+ 	struct mlx5_irq_pool *pool;
+ 	int refcount;
+ 	struct msi_map map;
+@@ -289,8 +289,8 @@ struct mlx5_irq *mlx5_irq_alloc(struct mlx5_irq_pool *pool, int i,
+ 	else
+ 		irq_sf_set_name(pool, name, i);
+ 	ATOMIC_INIT_NOTIFIER_HEAD(&irq->nh);
+-	snprintf(irq->name, MLX5_MAX_IRQ_NAME,
+-		 "%s@pci:%s", name, pci_name(dev->pdev));
++	snprintf(irq->name, MLX5_MAX_IRQ_FORMATTED_NAME,
++		 MLX5_IRQ_NAME_FORMAT_STR, name, pci_name(dev->pdev));
+ 	err = request_irq(irq->map.virq, irq_int_handler, 0, irq->name,
+ 			  &irq->nh);
+ 	if (err) {
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.h b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.h
+index d3a77a0ab8488..c4d377f8df308 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.h
+@@ -7,6 +7,9 @@
+ #include <linux/mlx5/driver.h>
+ 
+ #define MLX5_MAX_IRQ_NAME (32)
++#define MLX5_IRQ_NAME_FORMAT_STR ("%s@pci:%s")
++#define MLX5_MAX_IRQ_FORMATTED_NAME \
++	(MLX5_MAX_IRQ_NAME + sizeof(MLX5_IRQ_NAME_FORMAT_STR))
+ /* max irq_index is 2047, so four chars */
+ #define MLX5_MAX_IRQ_IDX_CHARS (4)
+ #define MLX5_EQ_REFS_PER_IRQ (2)
+diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
+index 0c76c162b8a9f..295366a85c630 100644
+--- a/drivers/net/ethernet/realtek/r8169_main.c
++++ b/drivers/net/ethernet/realtek/r8169_main.c
+@@ -624,6 +624,7 @@ struct rtl8169_private {
+ 
+ 	unsigned supports_gmii:1;
+ 	unsigned aspm_manageable:1;
++	unsigned dash_enabled:1;
+ 	dma_addr_t counters_phys_addr;
+ 	struct rtl8169_counters *counters;
+ 	struct rtl8169_tc_offsets tc_offset;
+@@ -1253,14 +1254,26 @@ static bool r8168ep_check_dash(struct rtl8169_private *tp)
+ 	return r8168ep_ocp_read(tp, 0x128) & BIT(0);
+ }
+ 
+-static enum rtl_dash_type rtl_check_dash(struct rtl8169_private *tp)
++static bool rtl_dash_is_enabled(struct rtl8169_private *tp)
++{
++	switch (tp->dash_type) {
++	case RTL_DASH_DP:
++		return r8168dp_check_dash(tp);
++	case RTL_DASH_EP:
++		return r8168ep_check_dash(tp);
++	default:
++		return false;
++	}
++}
++
++static enum rtl_dash_type rtl_get_dash_type(struct rtl8169_private *tp)
+ {
+ 	switch (tp->mac_version) {
+ 	case RTL_GIGA_MAC_VER_28:
+ 	case RTL_GIGA_MAC_VER_31:
+-		return r8168dp_check_dash(tp) ? RTL_DASH_DP : RTL_DASH_NONE;
++		return RTL_DASH_DP;
+ 	case RTL_GIGA_MAC_VER_51 ... RTL_GIGA_MAC_VER_53:
+-		return r8168ep_check_dash(tp) ? RTL_DASH_EP : RTL_DASH_NONE;
++		return RTL_DASH_EP;
+ 	default:
+ 		return RTL_DASH_NONE;
+ 	}
+@@ -1453,7 +1466,7 @@ static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
+ 
+ 	device_set_wakeup_enable(tp_to_dev(tp), wolopts);
+ 
+-	if (tp->dash_type == RTL_DASH_NONE) {
++	if (!tp->dash_enabled) {
+ 		rtl_set_d3_pll_down(tp, !wolopts);
+ 		tp->dev->wol_enabled = wolopts ? 1 : 0;
+ 	}
+@@ -2512,7 +2525,7 @@ static void rtl_wol_enable_rx(struct rtl8169_private *tp)
+ 
+ static void rtl_prepare_power_down(struct rtl8169_private *tp)
+ {
+-	if (tp->dash_type != RTL_DASH_NONE)
++	if (tp->dash_enabled)
+ 		return;
+ 
+ 	if (tp->mac_version == RTL_GIGA_MAC_VER_32 ||
+@@ -2586,9 +2599,7 @@ static void rtl_set_rx_mode(struct net_device *dev)
+ 		rx_mode &= ~AcceptMulticast;
+ 	} else if (netdev_mc_count(dev) > MC_FILTER_LIMIT ||
+ 		   dev->flags & IFF_ALLMULTI ||
+-		   tp->mac_version == RTL_GIGA_MAC_VER_35 ||
+-		   tp->mac_version == RTL_GIGA_MAC_VER_46 ||
+-		   tp->mac_version == RTL_GIGA_MAC_VER_48) {
++		   tp->mac_version == RTL_GIGA_MAC_VER_35) {
+ 		/* accept all multicasts */
+ 	} else if (netdev_mc_empty(dev)) {
+ 		rx_mode &= ~AcceptMulticast;
+@@ -4648,10 +4659,16 @@ static void rtl8169_down(struct rtl8169_private *tp)
+ 	rtl8169_cleanup(tp);
+ 	rtl_disable_exit_l1(tp);
+ 	rtl_prepare_power_down(tp);
++
++	if (tp->dash_type != RTL_DASH_NONE)
++		rtl8168_driver_stop(tp);
+ }
+ 
+ static void rtl8169_up(struct rtl8169_private *tp)
+ {
++	if (tp->dash_type != RTL_DASH_NONE)
++		rtl8168_driver_start(tp);
++
+ 	pci_set_master(tp->pci_dev);
+ 	phy_init_hw(tp->phydev);
+ 	phy_resume(tp->phydev);
+@@ -4869,7 +4886,7 @@ static int rtl8169_runtime_idle(struct device *device)
+ {
+ 	struct rtl8169_private *tp = dev_get_drvdata(device);
+ 
+-	if (tp->dash_type != RTL_DASH_NONE)
++	if (tp->dash_enabled)
+ 		return -EBUSY;
+ 
+ 	if (!netif_running(tp->dev) || !netif_carrier_ok(tp->dev))
+@@ -4895,8 +4912,7 @@ static void rtl_shutdown(struct pci_dev *pdev)
+ 	/* Restore original MAC address */
+ 	rtl_rar_set(tp, tp->dev->perm_addr);
+ 
+-	if (system_state == SYSTEM_POWER_OFF &&
+-	    tp->dash_type == RTL_DASH_NONE) {
++	if (system_state == SYSTEM_POWER_OFF && !tp->dash_enabled) {
+ 		pci_wake_from_d3(pdev, tp->saved_wolopts);
+ 		pci_set_power_state(pdev, PCI_D3hot);
+ 	}
+@@ -5254,7 +5270,8 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 		rc = pci_disable_link_state(pdev, PCIE_LINK_STATE_L1);
+ 	tp->aspm_manageable = !rc;
+ 
+-	tp->dash_type = rtl_check_dash(tp);
++	tp->dash_type = rtl_get_dash_type(tp);
++	tp->dash_enabled = rtl_dash_is_enabled(tp);
+ 
+ 	tp->cp_cmd = RTL_R16(tp, CPlusCmd) & CPCMD_MASK;
+ 
+@@ -5325,7 +5342,7 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 	/* configure chip for default features */
+ 	rtl8169_set_features(dev, dev->features);
+ 
+-	if (tp->dash_type == RTL_DASH_NONE) {
++	if (!tp->dash_enabled) {
+ 		rtl_set_d3_pll_down(tp, true);
+ 	} else {
+ 		rtl_set_d3_pll_down(tp, false);
+@@ -5365,7 +5382,8 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 			    "ok" : "ko");
+ 
+ 	if (tp->dash_type != RTL_DASH_NONE) {
+-		netdev_info(dev, "DASH enabled\n");
++		netdev_info(dev, "DASH %s\n",
++			    tp->dash_enabled ? "enabled" : "disabled");
+ 		rtl8168_driver_start(tp);
+ 	}
+ 
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+index e840cadb2d75a..86ff015fba354 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+@@ -5223,6 +5223,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
+ 
+ 	dma_dir = page_pool_get_dma_dir(rx_q->page_pool);
+ 	buf_sz = DIV_ROUND_UP(priv->dma_conf.dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
++	limit = min(priv->dma_conf.dma_rx_size - 1, (unsigned int)limit);
+ 
+ 	if (netif_msg_rx_status(priv)) {
+ 		void *rx_head;
+@@ -5258,10 +5259,10 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
+ 			len = 0;
+ 		}
+ 
++read_again:
+ 		if (count >= limit)
+ 			break;
+ 
+-read_again:
+ 		buf1_len = 0;
+ 		buf2_len = 0;
+ 		entry = next_entry;
+diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c
+index 21e9cac731218..2d5b021b4ea60 100644
+--- a/drivers/net/ipvlan/ipvlan_core.c
++++ b/drivers/net/ipvlan/ipvlan_core.c
+@@ -411,7 +411,7 @@ struct ipvl_addr *ipvlan_addr_lookup(struct ipvl_port *port, void *lyr3h,
+ 	return addr;
+ }
+ 
+-static int ipvlan_process_v4_outbound(struct sk_buff *skb)
++static noinline_for_stack int ipvlan_process_v4_outbound(struct sk_buff *skb)
+ {
+ 	const struct iphdr *ip4h = ip_hdr(skb);
+ 	struct net_device *dev = skb->dev;
+@@ -453,13 +453,11 @@ out:
+ }
+ 
+ #if IS_ENABLED(CONFIG_IPV6)
+-static int ipvlan_process_v6_outbound(struct sk_buff *skb)
++
++static noinline_for_stack int
++ipvlan_route_v6_outbound(struct net_device *dev, struct sk_buff *skb)
+ {
+ 	const struct ipv6hdr *ip6h = ipv6_hdr(skb);
+-	struct net_device *dev = skb->dev;
+-	struct net *net = dev_net(dev);
+-	struct dst_entry *dst;
+-	int err, ret = NET_XMIT_DROP;
+ 	struct flowi6 fl6 = {
+ 		.flowi6_oif = dev->ifindex,
+ 		.daddr = ip6h->daddr,
+@@ -469,27 +467,38 @@ static int ipvlan_process_v6_outbound(struct sk_buff *skb)
+ 		.flowi6_mark = skb->mark,
+ 		.flowi6_proto = ip6h->nexthdr,
+ 	};
++	struct dst_entry *dst;
++	int err;
+ 
+-	dst = ip6_route_output(net, NULL, &fl6);
+-	if (dst->error) {
+-		ret = dst->error;
++	dst = ip6_route_output(dev_net(dev), NULL, &fl6);
++	err = dst->error;
++	if (err) {
+ 		dst_release(dst);
+-		goto err;
++		return err;
+ 	}
+ 	skb_dst_set(skb, dst);
++	return 0;
++}
++
++static int ipvlan_process_v6_outbound(struct sk_buff *skb)
++{
++	struct net_device *dev = skb->dev;
++	int err, ret = NET_XMIT_DROP;
++
++	err = ipvlan_route_v6_outbound(dev, skb);
++	if (unlikely(err)) {
++		DEV_STATS_INC(dev, tx_errors);
++		kfree_skb(skb);
++		return err;
++	}
+ 
+ 	memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
+ 
+-	err = ip6_local_out(net, skb->sk, skb);
++	err = ip6_local_out(dev_net(dev), skb->sk, skb);
+ 	if (unlikely(net_xmit_eval(err)))
+ 		DEV_STATS_INC(dev, tx_errors);
+ 	else
+ 		ret = NET_XMIT_SUCCESS;
+-	goto out;
+-err:
+-	DEV_STATS_INC(dev, tx_errors);
+-	kfree_skb(skb);
+-out:
+ 	return ret;
+ }
+ #else
+diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
+index ed908165a8b4e..347f288350619 100644
+--- a/drivers/net/macvlan.c
++++ b/drivers/net/macvlan.c
+@@ -780,7 +780,7 @@ static void macvlan_change_rx_flags(struct net_device *dev, int change)
+ 	if (dev->flags & IFF_UP) {
+ 		if (change & IFF_ALLMULTI)
+ 			dev_set_allmulti(lowerdev, dev->flags & IFF_ALLMULTI ? 1 : -1);
+-		if (change & IFF_PROMISC)
++		if (!macvlan_passthru(vlan->port) && change & IFF_PROMISC)
+ 			dev_set_promiscuity(lowerdev,
+ 					    dev->flags & IFF_PROMISC ? 1 : -1);
+ 
+diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
+index d0aaa5cad8533..24ae13ea03b0b 100644
+--- a/drivers/net/phy/phylink.c
++++ b/drivers/net/phy/phylink.c
+@@ -1568,6 +1568,7 @@ struct phylink *phylink_create(struct phylink_config *config,
+ 	pl->config = config;
+ 	if (config->type == PHYLINK_NETDEV) {
+ 		pl->netdev = to_net_dev(config->dev);
++		netif_carrier_off(pl->netdev);
+ 	} else if (config->type == PHYLINK_DEV) {
+ 		pl->dev = config->dev;
+ 	} else {
+diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c
+index d855a18308d78..f411ded5344a8 100644
+--- a/drivers/net/phy/sfp.c
++++ b/drivers/net/phy/sfp.c
+@@ -452,6 +452,11 @@ static const struct sfp_quirk sfp_quirks[] = {
+ 	// Rollball protocol to talk to the PHY.
+ 	SFP_QUIRK_F("FS", "SFP-10G-T", sfp_fixup_fs_10gt),
+ 
++	// Fiberstore GPON-ONU-34-20BI can operate at 2500base-X, but report 1.2GBd
++	// NRZ in their EEPROM
++	SFP_QUIRK("FS", "GPON-ONU-34-20BI", sfp_quirk_2500basex,
++		  sfp_fixup_ignore_tx_fault),
++
+ 	SFP_QUIRK_F("HALNy", "HL-GSFP", sfp_fixup_halny_gsfp),
+ 
+ 	// HG MXPD-483II-F 2.5G supports 2500Base-X, but incorrectly reports
+@@ -463,6 +468,9 @@ static const struct sfp_quirk sfp_quirks[] = {
+ 	SFP_QUIRK("HUAWEI", "MA5671A", sfp_quirk_2500basex,
+ 		  sfp_fixup_ignore_tx_fault),
+ 
++	// FS 2.5G Base-T
++	SFP_QUIRK_M("FS", "SFP-2.5G-T", sfp_quirk_oem_2_5g),
++
+ 	// Lantech 8330-262D-E can operate at 2500base-X, but incorrectly report
+ 	// 2500MBd NRZ in their EEPROM
+ 	SFP_QUIRK_M("Lantech", "8330-262D-E", sfp_quirk_2500basex),
+diff --git a/drivers/net/ppp/ppp_synctty.c b/drivers/net/ppp/ppp_synctty.c
+index 18283b7b94bcd..94ef6f9ca5103 100644
+--- a/drivers/net/ppp/ppp_synctty.c
++++ b/drivers/net/ppp/ppp_synctty.c
+@@ -462,6 +462,10 @@ ppp_sync_ioctl(struct ppp_channel *chan, unsigned int cmd, unsigned long arg)
+ 	case PPPIOCSMRU:
+ 		if (get_user(val, (int __user *) argp))
+ 			break;
++		if (val > U16_MAX) {
++			err = -EINVAL;
++			break;
++		}
+ 		if (val < PPP_MRU)
+ 			val = PPP_MRU;
+ 		ap->mru = val;
+@@ -697,7 +701,7 @@ ppp_sync_input(struct syncppp *ap, const unsigned char *buf,
+ 
+ 	/* strip address/control field if present */
+ 	p = skb->data;
+-	if (p[0] == PPP_ALLSTATIONS && p[1] == PPP_UI) {
++	if (skb->len >= 2 && p[0] == PPP_ALLSTATIONS && p[1] == PPP_UI) {
+ 		/* chop off address/control */
+ 		if (skb->len < 3)
+ 			goto err;
+diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c
+index f9518e1c99039..fe89bc61e5317 100644
+--- a/drivers/net/wireless/ath/ath10k/debug.c
++++ b/drivers/net/wireless/ath/ath10k/debug.c
+@@ -1140,7 +1140,7 @@ void ath10k_debug_get_et_strings(struct ieee80211_hw *hw,
+ 				 u32 sset, u8 *data)
+ {
+ 	if (sset == ETH_SS_STATS)
+-		memcpy(data, *ath10k_gstrings_stats,
++		memcpy(data, ath10k_gstrings_stats,
+ 		       sizeof(ath10k_gstrings_stats));
+ }
+ 
+diff --git a/drivers/net/wireless/ath/ath10k/snoc.c b/drivers/net/wireless/ath/ath10k/snoc.c
+index 26214c00cd0d7..2c39bad7ebfb9 100644
+--- a/drivers/net/wireless/ath/ath10k/snoc.c
++++ b/drivers/net/wireless/ath/ath10k/snoc.c
+@@ -828,12 +828,20 @@ static void ath10k_snoc_hif_get_default_pipe(struct ath10k *ar,
+ 
+ static inline void ath10k_snoc_irq_disable(struct ath10k *ar)
+ {
+-	ath10k_ce_disable_interrupts(ar);
++	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
++	int id;
++
++	for (id = 0; id < CE_COUNT_MAX; id++)
++		disable_irq(ar_snoc->ce_irqs[id].irq_line);
+ }
+ 
+ static inline void ath10k_snoc_irq_enable(struct ath10k *ar)
+ {
+-	ath10k_ce_enable_interrupts(ar);
++	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
++	int id;
++
++	for (id = 0; id < CE_COUNT_MAX; id++)
++		enable_irq(ar_snoc->ce_irqs[id].irq_line);
+ }
+ 
+ static void ath10k_snoc_rx_pipe_cleanup(struct ath10k_snoc_pipe *snoc_pipe)
+@@ -1090,6 +1098,8 @@ static int ath10k_snoc_hif_power_up(struct ath10k *ar,
+ 		goto err_free_rri;
+ 	}
+ 
++	ath10k_ce_enable_interrupts(ar);
++
+ 	return 0;
+ 
+ err_free_rri:
+@@ -1253,8 +1263,8 @@ static int ath10k_snoc_request_irq(struct ath10k *ar)
+ 
+ 	for (id = 0; id < CE_COUNT_MAX; id++) {
+ 		ret = request_irq(ar_snoc->ce_irqs[id].irq_line,
+-				  ath10k_snoc_per_engine_handler, 0,
+-				  ce_name[id], ar);
++				  ath10k_snoc_per_engine_handler,
++				  IRQF_NO_AUTOEN, ce_name[id], ar);
+ 		if (ret) {
+ 			ath10k_err(ar,
+ 				   "failed to register IRQ handler for CE %d: %d\n",
+diff --git a/drivers/net/wireless/ath/ath11k/dp_rx.c b/drivers/net/wireless/ath/ath11k/dp_rx.c
+index 1e488eed282b5..8ed7d3b7f049f 100644
+--- a/drivers/net/wireless/ath/ath11k/dp_rx.c
++++ b/drivers/net/wireless/ath/ath11k/dp_rx.c
+@@ -1621,14 +1621,20 @@ static void ath11k_htt_pktlog(struct ath11k_base *ab, struct sk_buff *skb)
+ 	u8 pdev_id;
+ 
+ 	pdev_id = FIELD_GET(HTT_T2H_PPDU_STATS_INFO_PDEV_ID, data->hdr);
++
++	rcu_read_lock();
++
+ 	ar = ath11k_mac_get_ar_by_pdev_id(ab, pdev_id);
+ 	if (!ar) {
+ 		ath11k_warn(ab, "invalid pdev id %d on htt pktlog\n", pdev_id);
+-		return;
++		goto out;
+ 	}
+ 
+ 	trace_ath11k_htt_pktlog(ar, data->payload, hdr->size,
+ 				ar->ab->pktlog_defs_checksum);
++
++out:
++	rcu_read_unlock();
+ }
+ 
+ static void ath11k_htt_backpressure_event_handler(struct ath11k_base *ab,
+diff --git a/drivers/net/wireless/ath/ath11k/wmi.c b/drivers/net/wireless/ath/ath11k/wmi.c
+index 23ad6825e5be5..1c07f55c25e67 100644
+--- a/drivers/net/wireless/ath/ath11k/wmi.c
++++ b/drivers/net/wireless/ath/ath11k/wmi.c
+@@ -8337,6 +8337,8 @@ ath11k_wmi_pdev_dfs_radar_detected_event(struct ath11k_base *ab, struct sk_buff
+ 		   ev->detector_id, ev->segment_id, ev->timestamp, ev->is_chirp,
+ 		   ev->freq_offset, ev->sidx);
+ 
++	rcu_read_lock();
++
+ 	ar = ath11k_mac_get_ar_by_pdev_id(ab, ev->pdev_id);
+ 
+ 	if (!ar) {
+@@ -8354,6 +8356,8 @@ ath11k_wmi_pdev_dfs_radar_detected_event(struct ath11k_base *ab, struct sk_buff
+ 		ieee80211_radar_detected(ar->hw);
+ 
+ exit:
++	rcu_read_unlock();
++
+ 	kfree(tb);
+ }
+ 
+@@ -8383,15 +8387,19 @@ ath11k_wmi_pdev_temperature_event(struct ath11k_base *ab,
+ 	ath11k_dbg(ab, ATH11K_DBG_WMI, "event pdev temperature ev temp %d pdev_id %d\n",
+ 		   ev->temp, ev->pdev_id);
+ 
++	rcu_read_lock();
++
+ 	ar = ath11k_mac_get_ar_by_pdev_id(ab, ev->pdev_id);
+ 	if (!ar) {
+ 		ath11k_warn(ab, "invalid pdev id in pdev temperature ev %d", ev->pdev_id);
+-		kfree(tb);
+-		return;
++		goto exit;
+ 	}
+ 
+ 	ath11k_thermal_event_temperature(ar, ev->temp);
+ 
++exit:
++	rcu_read_unlock();
++
+ 	kfree(tb);
+ }
+ 
+@@ -8611,12 +8619,13 @@ static void ath11k_wmi_gtk_offload_status_event(struct ath11k_base *ab,
+ 		return;
+ 	}
+ 
++	rcu_read_lock();
++
+ 	arvif = ath11k_mac_get_arvif_by_vdev_id(ab, ev->vdev_id);
+ 	if (!arvif) {
+ 		ath11k_warn(ab, "failed to get arvif for vdev_id:%d\n",
+ 			    ev->vdev_id);
+-		kfree(tb);
+-		return;
++		goto exit;
+ 	}
+ 
+ 	ath11k_dbg(ab, ATH11K_DBG_WMI, "event gtk offload refresh_cnt %d\n",
+@@ -8633,6 +8642,8 @@ static void ath11k_wmi_gtk_offload_status_event(struct ath11k_base *ab,
+ 
+ 	ieee80211_gtk_rekey_notify(arvif->vif, arvif->bssid,
+ 				   (void *)&replay_ctr_be, GFP_ATOMIC);
++exit:
++	rcu_read_unlock();
+ 
+ 	kfree(tb);
+ }
+diff --git a/drivers/net/wireless/ath/ath12k/dp.c b/drivers/net/wireless/ath/ath12k/dp.c
+index f933896f2a68d..6893466f61f04 100644
+--- a/drivers/net/wireless/ath/ath12k/dp.c
++++ b/drivers/net/wireless/ath/ath12k/dp.c
+@@ -38,6 +38,7 @@ void ath12k_dp_peer_cleanup(struct ath12k *ar, int vdev_id, const u8 *addr)
+ 
+ 	ath12k_dp_rx_peer_tid_cleanup(ar, peer);
+ 	crypto_free_shash(peer->tfm_mmic);
++	peer->dp_setup_done = false;
+ 	spin_unlock_bh(&ab->base_lock);
+ }
+ 
+diff --git a/drivers/net/wireless/ath/ath12k/dp_rx.c b/drivers/net/wireless/ath/ath12k/dp_rx.c
+index fcb91b8ef00e3..71d12c28b3a76 100644
+--- a/drivers/net/wireless/ath/ath12k/dp_rx.c
++++ b/drivers/net/wireless/ath/ath12k/dp_rx.c
+@@ -1555,6 +1555,13 @@ static int ath12k_htt_pull_ppdu_stats(struct ath12k_base *ab,
+ 
+ 	msg = (struct ath12k_htt_ppdu_stats_msg *)skb->data;
+ 	len = le32_get_bits(msg->info, HTT_T2H_PPDU_STATS_INFO_PAYLOAD_SIZE);
++	if (len > (skb->len - struct_size(msg, data, 0))) {
++		ath12k_warn(ab,
++			    "HTT PPDU STATS event has unexpected payload size %u, should be smaller than %u\n",
++			    len, skb->len);
++		return -EINVAL;
++	}
++
+ 	pdev_id = le32_get_bits(msg->info, HTT_T2H_PPDU_STATS_INFO_PDEV_ID);
+ 	ppdu_id = le32_to_cpu(msg->ppdu_id);
+ 
+@@ -1583,6 +1590,16 @@ static int ath12k_htt_pull_ppdu_stats(struct ath12k_base *ab,
+ 		goto exit;
+ 	}
+ 
++	if (ppdu_info->ppdu_stats.common.num_users >= HTT_PPDU_STATS_MAX_USERS) {
++		spin_unlock_bh(&ar->data_lock);
++		ath12k_warn(ab,
++			    "HTT PPDU STATS event has unexpected num_users %u, should be smaller than %u\n",
++			    ppdu_info->ppdu_stats.common.num_users,
++			    HTT_PPDU_STATS_MAX_USERS);
++		ret = -EINVAL;
++		goto exit;
++	}
++
+ 	/* back up data rate tlv for all peers */
+ 	if (ppdu_info->frame_type == HTT_STATS_PPDU_FTYPE_DATA &&
+ 	    (ppdu_info->tlv_bitmap & (1 << HTT_PPDU_STATS_TAG_USR_COMMON)) &&
+@@ -1641,11 +1658,12 @@ static void ath12k_htt_mlo_offset_event_handler(struct ath12k_base *ab,
+ 	msg = (struct ath12k_htt_mlo_offset_msg *)skb->data;
+ 	pdev_id = u32_get_bits(__le32_to_cpu(msg->info),
+ 			       HTT_T2H_MLO_OFFSET_INFO_PDEV_ID);
+-	ar = ath12k_mac_get_ar_by_pdev_id(ab, pdev_id);
+ 
++	rcu_read_lock();
++	ar = ath12k_mac_get_ar_by_pdev_id(ab, pdev_id);
+ 	if (!ar) {
+ 		ath12k_warn(ab, "invalid pdev id %d on htt mlo offset\n", pdev_id);
+-		return;
++		goto exit;
+ 	}
+ 
+ 	spin_lock_bh(&ar->data_lock);
+@@ -1661,6 +1679,8 @@ static void ath12k_htt_mlo_offset_event_handler(struct ath12k_base *ab,
+ 	pdev->timestamp.mlo_comp_timer = __le32_to_cpu(msg->mlo_comp_timer);
+ 
+ 	spin_unlock_bh(&ar->data_lock);
++exit:
++	rcu_read_unlock();
+ }
+ 
+ void ath12k_dp_htt_htc_t2h_msg_handler(struct ath12k_base *ab,
+@@ -2747,6 +2767,7 @@ int ath12k_dp_rx_peer_frag_setup(struct ath12k *ar, const u8 *peer_mac, int vdev
+ 	}
+ 
+ 	peer->tfm_mmic = tfm;
++	peer->dp_setup_done = true;
+ 	spin_unlock_bh(&ab->base_lock);
+ 
+ 	return 0;
+@@ -3213,6 +3234,14 @@ static int ath12k_dp_rx_frag_h_mpdu(struct ath12k *ar,
+ 		ret = -ENOENT;
+ 		goto out_unlock;
+ 	}
++
++	if (!peer->dp_setup_done) {
++		ath12k_warn(ab, "The peer %pM [%d] has uninitialized datapath\n",
++			    peer->addr, peer_id);
++		ret = -ENOENT;
++		goto out_unlock;
++	}
++
+ 	rx_tid = &peer->rx_tid[tid];
+ 
+ 	if ((!skb_queue_empty(&rx_tid->rx_frags) && seqno != rx_tid->cur_sn) ||
+diff --git a/drivers/net/wireless/ath/ath12k/mhi.c b/drivers/net/wireless/ath/ath12k/mhi.c
+index 42f1140baa4fe..f83d3e09ae366 100644
+--- a/drivers/net/wireless/ath/ath12k/mhi.c
++++ b/drivers/net/wireless/ath/ath12k/mhi.c
+@@ -370,8 +370,7 @@ int ath12k_mhi_register(struct ath12k_pci *ab_pci)
+ 	ret = ath12k_mhi_get_msi(ab_pci);
+ 	if (ret) {
+ 		ath12k_err(ab, "failed to get msi for mhi\n");
+-		mhi_free_controller(mhi_ctrl);
+-		return ret;
++		goto free_controller;
+ 	}
+ 
+ 	mhi_ctrl->iova_start = 0;
+@@ -388,11 +387,15 @@ int ath12k_mhi_register(struct ath12k_pci *ab_pci)
+ 	ret = mhi_register_controller(mhi_ctrl, ab->hw_params->mhi_config);
+ 	if (ret) {
+ 		ath12k_err(ab, "failed to register to mhi bus, err = %d\n", ret);
+-		mhi_free_controller(mhi_ctrl);
+-		return ret;
++		goto free_controller;
+ 	}
+ 
+ 	return 0;
++
++free_controller:
++	mhi_free_controller(mhi_ctrl);
++	ab_pci->mhi_ctrl = NULL;
++	return ret;
+ }
+ 
+ void ath12k_mhi_unregister(struct ath12k_pci *ab_pci)
+diff --git a/drivers/net/wireless/ath/ath12k/peer.h b/drivers/net/wireless/ath/ath12k/peer.h
+index b296dc0e2f671..c6edb24cbedd8 100644
+--- a/drivers/net/wireless/ath/ath12k/peer.h
++++ b/drivers/net/wireless/ath/ath12k/peer.h
+@@ -44,6 +44,9 @@ struct ath12k_peer {
+ 	struct ppdu_user_delayba ppdu_stats_delayba;
+ 	bool delayba_flag;
+ 	bool is_authorized;
++
++	/* protected by ab->data_lock */
++	bool dp_setup_done;
+ };
+ 
+ void ath12k_peer_unmap_event(struct ath12k_base *ab, u16 peer_id);
+diff --git a/drivers/net/wireless/ath/ath12k/wmi.c b/drivers/net/wireless/ath/ath12k/wmi.c
+index eebc5a65ce3b4..491d23ddabf06 100644
+--- a/drivers/net/wireless/ath/ath12k/wmi.c
++++ b/drivers/net/wireless/ath/ath12k/wmi.c
+@@ -3799,6 +3799,12 @@ static int ath12k_wmi_ext_hal_reg_caps(struct ath12k_base *soc,
+ 			ath12k_warn(soc, "failed to extract reg cap %d\n", i);
+ 			return ret;
+ 		}
++
++		if (reg_cap.phy_id >= MAX_RADIOS) {
++			ath12k_warn(soc, "unexpected phy id %u\n", reg_cap.phy_id);
++			return -EINVAL;
++		}
++
+ 		soc->hal_reg_cap[reg_cap.phy_id] = reg_cap;
+ 	}
+ 	return 0;
+@@ -6228,6 +6234,8 @@ ath12k_wmi_pdev_dfs_radar_detected_event(struct ath12k_base *ab, struct sk_buff
+ 		   ev->detector_id, ev->segment_id, ev->timestamp, ev->is_chirp,
+ 		   ev->freq_offset, ev->sidx);
+ 
++	rcu_read_lock();
++
+ 	ar = ath12k_mac_get_ar_by_pdev_id(ab, le32_to_cpu(ev->pdev_id));
+ 
+ 	if (!ar) {
+@@ -6245,6 +6253,8 @@ ath12k_wmi_pdev_dfs_radar_detected_event(struct ath12k_base *ab, struct sk_buff
+ 		ieee80211_radar_detected(ar->hw);
+ 
+ exit:
++	rcu_read_unlock();
++
+ 	kfree(tb);
+ }
+ 
+@@ -6263,11 +6273,16 @@ ath12k_wmi_pdev_temperature_event(struct ath12k_base *ab,
+ 	ath12k_dbg(ab, ATH12K_DBG_WMI,
+ 		   "pdev temperature ev temp %d pdev_id %d\n", ev.temp, ev.pdev_id);
+ 
++	rcu_read_lock();
++
+ 	ar = ath12k_mac_get_ar_by_pdev_id(ab, le32_to_cpu(ev.pdev_id));
+ 	if (!ar) {
+ 		ath12k_warn(ab, "invalid pdev id in pdev temperature ev %d", ev.pdev_id);
+-		return;
++		goto exit;
+ 	}
++
++exit:
++	rcu_read_unlock();
+ }
+ 
+ static void ath12k_fils_discovery_event(struct ath12k_base *ab,
+diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
+index fb7a2952d0ce8..d9bac1c343490 100644
+--- a/drivers/net/wireless/ath/ath9k/debug.c
++++ b/drivers/net/wireless/ath/ath9k/debug.c
+@@ -1333,7 +1333,7 @@ void ath9k_get_et_strings(struct ieee80211_hw *hw,
+ 			  u32 sset, u8 *data)
+ {
+ 	if (sset == ETH_SS_STATS)
+-		memcpy(data, *ath9k_gstrings_stats,
++		memcpy(data, ath9k_gstrings_stats,
+ 		       sizeof(ath9k_gstrings_stats));
+ }
+ 
+diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_debug.c b/drivers/net/wireless/ath/ath9k/htc_drv_debug.c
+index c55aab01fff5d..e79bbcd3279af 100644
+--- a/drivers/net/wireless/ath/ath9k/htc_drv_debug.c
++++ b/drivers/net/wireless/ath/ath9k/htc_drv_debug.c
+@@ -428,7 +428,7 @@ void ath9k_htc_get_et_strings(struct ieee80211_hw *hw,
+ 			      u32 sset, u8 *data)
+ {
+ 	if (sset == ETH_SS_STATS)
+-		memcpy(data, *ath9k_htc_gstrings_stats,
++		memcpy(data, ath9k_htc_gstrings_stats,
+ 		       sizeof(ath9k_htc_gstrings_stats));
+ }
+ 
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/link.c b/drivers/net/wireless/intel/iwlwifi/mvm/link.c
+index 6e1ad65527d12..4ab55a1fcbf04 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/link.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/link.c
+@@ -60,7 +60,7 @@ int iwl_mvm_add_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ 	if (link_info->fw_link_id == IWL_MVM_FW_LINK_ID_INVALID) {
+ 		link_info->fw_link_id = iwl_mvm_get_free_fw_link_id(mvm,
+ 								    mvmvif);
+-		if (link_info->fw_link_id == IWL_MVM_FW_LINK_ID_INVALID)
++		if (link_info->fw_link_id >= ARRAY_SIZE(mvm->link_id_to_link_conf))
+ 			return -EINVAL;
+ 
+ 		rcu_assign_pointer(mvm->link_id_to_link_conf[link_info->fw_link_id],
+@@ -243,7 +243,7 @@ int iwl_mvm_remove_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ 	int ret;
+ 
+ 	if (WARN_ON(!link_info ||
+-		    link_info->fw_link_id == IWL_MVM_FW_LINK_ID_INVALID))
++		    link_info->fw_link_id >= ARRAY_SIZE(mvm->link_id_to_link_conf)))
+ 		return -EINVAL;
+ 
+ 	RCU_INIT_POINTER(mvm->link_id_to_link_conf[link_info->fw_link_id],
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+index 2ede69132fee9..177a4628a913e 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+@@ -536,16 +536,20 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
+ 			flags |= IWL_TX_FLAGS_ENCRYPT_DIS;
+ 
+ 		/*
+-		 * For data packets rate info comes from the fw. Only
+-		 * set rate/antenna during connection establishment or in case
+-		 * no station is given.
++		 * For data and mgmt packets rate info comes from the fw. Only
++		 * set rate/antenna for injected frames with fixed rate, or
++		 * when no sta is given.
+ 		 */
+-		if (!sta || !ieee80211_is_data(hdr->frame_control) ||
+-		    mvmsta->sta_state < IEEE80211_STA_AUTHORIZED) {
++		if (unlikely(!sta ||
++			     info->control.flags & IEEE80211_TX_CTRL_RATE_INJECT)) {
+ 			flags |= IWL_TX_FLAGS_CMD_RATE;
+ 			rate_n_flags =
+ 				iwl_mvm_get_tx_rate_n_flags(mvm, info, sta,
+ 							    hdr->frame_control);
++		} else if (!ieee80211_is_data(hdr->frame_control) ||
++			   mvmsta->sta_state < IEEE80211_STA_AUTHORIZED) {
++			/* These are important frames */
++			flags |= IWL_TX_FLAGS_HIGH_PRI;
+ 		}
+ 
+ 		if (mvm->trans->trans_cfg->device_family >=
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/pci.c b/drivers/net/wireless/mediatek/mt76/mt7921/pci.c
+index 95610a117d2f0..ed5a220763ce6 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7921/pci.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7921/pci.c
+@@ -17,6 +17,8 @@ static const struct pci_device_id mt7921_pci_device_table[] = {
+ 		.driver_data = (kernel_ulong_t)MT7921_FIRMWARE_WM },
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x7922),
+ 		.driver_data = (kernel_ulong_t)MT7922_FIRMWARE_WM },
++	{ PCI_DEVICE(PCI_VENDOR_ID_ITTIM, 0x7922),
++		.driver_data = (kernel_ulong_t)MT7922_FIRMWARE_WM },
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x0608),
+ 		.driver_data = (kernel_ulong_t)MT7921_FIRMWARE_WM },
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x0616),
+diff --git a/drivers/net/wireless/microchip/wilc1000/wlan.c b/drivers/net/wireless/microchip/wilc1000/wlan.c
+index 58bbf50081e47..9eb115c79c90a 100644
+--- a/drivers/net/wireless/microchip/wilc1000/wlan.c
++++ b/drivers/net/wireless/microchip/wilc1000/wlan.c
+@@ -1492,7 +1492,7 @@ int wilc_wlan_init(struct net_device *dev)
+ 	}
+ 
+ 	if (!wilc->vmm_table)
+-		wilc->vmm_table = kzalloc(WILC_VMM_TBL_SIZE, GFP_KERNEL);
++		wilc->vmm_table = kcalloc(WILC_VMM_TBL_SIZE, sizeof(u32), GFP_KERNEL);
+ 
+ 	if (!wilc->vmm_table) {
+ 		ret = -ENOBUFS;
+diff --git a/drivers/net/wireless/purelifi/plfxlc/mac.c b/drivers/net/wireless/purelifi/plfxlc/mac.c
+index 94ee831b5de35..506d2f31efb5a 100644
+--- a/drivers/net/wireless/purelifi/plfxlc/mac.c
++++ b/drivers/net/wireless/purelifi/plfxlc/mac.c
+@@ -666,7 +666,7 @@ static void plfxlc_get_et_strings(struct ieee80211_hw *hw,
+ 				  u32 sset, u8 *data)
+ {
+ 	if (sset == ETH_SS_STATS)
+-		memcpy(data, *et_strings, sizeof(et_strings));
++		memcpy(data, et_strings, sizeof(et_strings));
+ }
+ 
+ static void plfxlc_get_et_stats(struct ieee80211_hw *hw,
+diff --git a/drivers/net/wireless/virtual/mac80211_hwsim.c b/drivers/net/wireless/virtual/mac80211_hwsim.c
+index 23307c8baea21..6dc153a267872 100644
+--- a/drivers/net/wireless/virtual/mac80211_hwsim.c
++++ b/drivers/net/wireless/virtual/mac80211_hwsim.c
+@@ -3170,7 +3170,7 @@ static void mac80211_hwsim_get_et_strings(struct ieee80211_hw *hw,
+ 					  u32 sset, u8 *data)
+ {
+ 	if (sset == ETH_SS_STATS)
+-		memcpy(data, *mac80211_hwsim_gstrings_stats,
++		memcpy(data, mac80211_hwsim_gstrings_stats,
+ 		       sizeof(mac80211_hwsim_gstrings_stats));
+ }
+ 
+diff --git a/drivers/of/address.c b/drivers/of/address.c
+index e692809ff8227..3219c51777507 100644
+--- a/drivers/of/address.c
++++ b/drivers/of/address.c
+@@ -100,6 +100,32 @@ static unsigned int of_bus_default_get_flags(const __be32 *addr)
+ 	return IORESOURCE_MEM;
+ }
+ 
++static u64 of_bus_default_flags_map(__be32 *addr, const __be32 *range, int na,
++				    int ns, int pna)
++{
++	u64 cp, s, da;
++
++	/* Check that flags match */
++	if (*addr != *range)
++		return OF_BAD_ADDR;
++
++	/* Read address values, skipping high cell */
++	cp = of_read_number(range + 1, na - 1);
++	s  = of_read_number(range + na + pna, ns);
++	da = of_read_number(addr + 1, na - 1);
++
++	pr_debug("default flags map, cp=%llx, s=%llx, da=%llx\n", cp, s, da);
++
++	if (da < cp || da >= (cp + s))
++		return OF_BAD_ADDR;
++	return da - cp;
++}
++
++static int of_bus_default_flags_translate(__be32 *addr, u64 offset, int na)
++{
++	/* Keep "flags" part (high cell) in translated address */
++	return of_bus_default_translate(addr + 1, offset, na - 1);
++}
+ 
+ #ifdef CONFIG_PCI
+ static unsigned int of_bus_pci_get_flags(const __be32 *addr)
+@@ -374,8 +400,8 @@ static struct of_bus of_busses[] = {
+ 		.addresses = "reg",
+ 		.match = of_bus_default_flags_match,
+ 		.count_cells = of_bus_default_count_cells,
+-		.map = of_bus_default_map,
+-		.translate = of_bus_default_translate,
++		.map = of_bus_default_flags_map,
++		.translate = of_bus_default_flags_translate,
+ 		.has_flags = true,
+ 		.get_flags = of_bus_default_flags_get_flags,
+ 	},
+diff --git a/drivers/parisc/power.c b/drivers/parisc/power.c
+index 6f5e5f0230d39..332bcc0053a5e 100644
+--- a/drivers/parisc/power.c
++++ b/drivers/parisc/power.c
+@@ -197,6 +197,14 @@ static struct notifier_block parisc_panic_block = {
+ 	.priority	= INT_MAX,
+ };
+ 
++/* qemu soft power-off function */
++static int qemu_power_off(struct sys_off_data *data)
++{
++	/* this turns the system off via SeaBIOS */
++	gsc_writel(0, (unsigned long) data->cb_data);
++	pdc_soft_power_button(1);
++	return NOTIFY_DONE;
++}
+ 
+ static int __init power_init(void)
+ {
+@@ -226,7 +234,13 @@ static int __init power_init(void)
+ 				soft_power_reg);
+ 	}
+ 
+-	power_task = kthread_run(kpowerswd, (void*)soft_power_reg, KTHREAD_NAME);
++	power_task = NULL;
++	if (running_on_qemu && soft_power_reg)
++		register_sys_off_handler(SYS_OFF_MODE_POWER_OFF, SYS_OFF_PRIO_DEFAULT,
++					qemu_power_off, (void *)soft_power_reg);
++	else
++		power_task = kthread_run(kpowerswd, (void*)soft_power_reg,
++					KTHREAD_NAME);
+ 	if (IS_ERR(power_task)) {
+ 		printk(KERN_ERR DRIVER_NAME ": thread creation failed.  Driver not loaded.\n");
+ 		pdc_soft_power_button(0);
+diff --git a/drivers/pci/controller/dwc/pci-exynos.c b/drivers/pci/controller/dwc/pci-exynos.c
+index ec56110055665..e5519978ba475 100644
+--- a/drivers/pci/controller/dwc/pci-exynos.c
++++ b/drivers/pci/controller/dwc/pci-exynos.c
+@@ -375,7 +375,7 @@ fail_probe:
+ 	return ret;
+ }
+ 
+-static int __exit exynos_pcie_remove(struct platform_device *pdev)
++static int exynos_pcie_remove(struct platform_device *pdev)
+ {
+ 	struct exynos_pcie *ep = platform_get_drvdata(pdev);
+ 
+@@ -431,7 +431,7 @@ static const struct of_device_id exynos_pcie_of_match[] = {
+ 
+ static struct platform_driver exynos_pcie_driver = {
+ 	.probe		= exynos_pcie_probe,
+-	.remove		= __exit_p(exynos_pcie_remove),
++	.remove		= exynos_pcie_remove,
+ 	.driver = {
+ 		.name	= "exynos-pcie",
+ 		.of_match_table = exynos_pcie_of_match,
+diff --git a/drivers/pci/controller/dwc/pci-keystone.c b/drivers/pci/controller/dwc/pci-keystone.c
+index 78818853af9e4..d2634dafb68e5 100644
+--- a/drivers/pci/controller/dwc/pci-keystone.c
++++ b/drivers/pci/controller/dwc/pci-keystone.c
+@@ -1101,7 +1101,7 @@ static const struct of_device_id ks_pcie_of_match[] = {
+ 	{ },
+ };
+ 
+-static int __init ks_pcie_probe(struct platform_device *pdev)
++static int ks_pcie_probe(struct platform_device *pdev)
+ {
+ 	const struct dw_pcie_host_ops *host_ops;
+ 	const struct dw_pcie_ep_ops *ep_ops;
+@@ -1303,7 +1303,7 @@ err_link:
+ 	return ret;
+ }
+ 
+-static int __exit ks_pcie_remove(struct platform_device *pdev)
++static int ks_pcie_remove(struct platform_device *pdev)
+ {
+ 	struct keystone_pcie *ks_pcie = platform_get_drvdata(pdev);
+ 	struct device_link **link = ks_pcie->link;
+@@ -1319,9 +1319,9 @@ static int __exit ks_pcie_remove(struct platform_device *pdev)
+ 	return 0;
+ }
+ 
+-static struct platform_driver ks_pcie_driver __refdata = {
++static struct platform_driver ks_pcie_driver = {
+ 	.probe  = ks_pcie_probe,
+-	.remove = __exit_p(ks_pcie_remove),
++	.remove = ks_pcie_remove,
+ 	.driver = {
+ 		.name	= "keystone-pcie",
+ 		.of_match_table = ks_pcie_of_match,
+diff --git a/drivers/pci/controller/dwc/pcie-designware.c b/drivers/pci/controller/dwc/pcie-designware.c
+index 1f2ee71da4da2..8e6f6ac42dc96 100644
+--- a/drivers/pci/controller/dwc/pcie-designware.c
++++ b/drivers/pci/controller/dwc/pcie-designware.c
+@@ -732,6 +732,53 @@ static void dw_pcie_link_set_max_speed(struct dw_pcie *pci, u32 link_gen)
+ 
+ }
+ 
++static void dw_pcie_link_set_max_link_width(struct dw_pcie *pci, u32 num_lanes)
++{
++	u32 lnkcap, lwsc, plc;
++	u8 cap;
++
++	if (!num_lanes)
++		return;
++
++	/* Set the number of lanes */
++	plc = dw_pcie_readl_dbi(pci, PCIE_PORT_LINK_CONTROL);
++	plc &= ~PORT_LINK_FAST_LINK_MODE;
++	plc &= ~PORT_LINK_MODE_MASK;
++
++	/* Set link width speed control register */
++	lwsc = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
++	lwsc &= ~PORT_LOGIC_LINK_WIDTH_MASK;
++	switch (num_lanes) {
++	case 1:
++		plc |= PORT_LINK_MODE_1_LANES;
++		lwsc |= PORT_LOGIC_LINK_WIDTH_1_LANES;
++		break;
++	case 2:
++		plc |= PORT_LINK_MODE_2_LANES;
++		lwsc |= PORT_LOGIC_LINK_WIDTH_2_LANES;
++		break;
++	case 4:
++		plc |= PORT_LINK_MODE_4_LANES;
++		lwsc |= PORT_LOGIC_LINK_WIDTH_4_LANES;
++		break;
++	case 8:
++		plc |= PORT_LINK_MODE_8_LANES;
++		lwsc |= PORT_LOGIC_LINK_WIDTH_8_LANES;
++		break;
++	default:
++		dev_err(pci->dev, "num-lanes %u: invalid value\n", num_lanes);
++		return;
++	}
++	dw_pcie_writel_dbi(pci, PCIE_PORT_LINK_CONTROL, plc);
++	dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, lwsc);
++
++	cap = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
++	lnkcap = dw_pcie_readl_dbi(pci, cap + PCI_EXP_LNKCAP);
++	lnkcap &= ~PCI_EXP_LNKCAP_MLW;
++	lnkcap |= FIELD_PREP(PCI_EXP_LNKCAP_MLW, num_lanes);
++	dw_pcie_writel_dbi(pci, cap + PCI_EXP_LNKCAP, lnkcap);
++}
++
+ void dw_pcie_iatu_detect(struct dw_pcie *pci)
+ {
+ 	int max_region, ob, ib;
+@@ -1013,49 +1060,5 @@ void dw_pcie_setup(struct dw_pcie *pci)
+ 	val |= PORT_LINK_DLL_LINK_EN;
+ 	dw_pcie_writel_dbi(pci, PCIE_PORT_LINK_CONTROL, val);
+ 
+-	if (!pci->num_lanes) {
+-		dev_dbg(pci->dev, "Using h/w default number of lanes\n");
+-		return;
+-	}
+-
+-	/* Set the number of lanes */
+-	val &= ~PORT_LINK_FAST_LINK_MODE;
+-	val &= ~PORT_LINK_MODE_MASK;
+-	switch (pci->num_lanes) {
+-	case 1:
+-		val |= PORT_LINK_MODE_1_LANES;
+-		break;
+-	case 2:
+-		val |= PORT_LINK_MODE_2_LANES;
+-		break;
+-	case 4:
+-		val |= PORT_LINK_MODE_4_LANES;
+-		break;
+-	case 8:
+-		val |= PORT_LINK_MODE_8_LANES;
+-		break;
+-	default:
+-		dev_err(pci->dev, "num-lanes %u: invalid value\n", pci->num_lanes);
+-		return;
+-	}
+-	dw_pcie_writel_dbi(pci, PCIE_PORT_LINK_CONTROL, val);
+-
+-	/* Set link width speed control register */
+-	val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
+-	val &= ~PORT_LOGIC_LINK_WIDTH_MASK;
+-	switch (pci->num_lanes) {
+-	case 1:
+-		val |= PORT_LOGIC_LINK_WIDTH_1_LANES;
+-		break;
+-	case 2:
+-		val |= PORT_LOGIC_LINK_WIDTH_2_LANES;
+-		break;
+-	case 4:
+-		val |= PORT_LOGIC_LINK_WIDTH_4_LANES;
+-		break;
+-	case 8:
+-		val |= PORT_LOGIC_LINK_WIDTH_8_LANES;
+-		break;
+-	}
+-	dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val);
++	dw_pcie_link_set_max_link_width(pci, pci->num_lanes);
+ }
+diff --git a/drivers/pci/controller/dwc/pcie-kirin.c b/drivers/pci/controller/dwc/pcie-kirin.c
+index d09507f822a7d..a824d8e8edb9d 100644
+--- a/drivers/pci/controller/dwc/pcie-kirin.c
++++ b/drivers/pci/controller/dwc/pcie-kirin.c
+@@ -742,7 +742,7 @@ err:
+ 	return ret;
+ }
+ 
+-static int __exit kirin_pcie_remove(struct platform_device *pdev)
++static int kirin_pcie_remove(struct platform_device *pdev)
+ {
+ 	struct kirin_pcie *kirin_pcie = platform_get_drvdata(pdev);
+ 
+@@ -819,7 +819,7 @@ static int kirin_pcie_probe(struct platform_device *pdev)
+ 
+ static struct platform_driver kirin_pcie_driver = {
+ 	.probe			= kirin_pcie_probe,
+-	.remove	        	= __exit_p(kirin_pcie_remove),
++	.remove	        	= kirin_pcie_remove,
+ 	.driver			= {
+ 		.name			= "kirin-pcie",
+ 		.of_match_table		= kirin_pcie_match,
+diff --git a/drivers/pci/controller/dwc/pcie-qcom-ep.c b/drivers/pci/controller/dwc/pcie-qcom-ep.c
+index 267e1247d548f..4a9741428619f 100644
+--- a/drivers/pci/controller/dwc/pcie-qcom-ep.c
++++ b/drivers/pci/controller/dwc/pcie-qcom-ep.c
+@@ -121,6 +121,7 @@
+ 
+ /* ELBI registers */
+ #define ELBI_SYS_STTS				0x08
++#define ELBI_CS2_ENABLE				0xa4
+ 
+ /* DBI registers */
+ #define DBI_CON_STATUS				0x44
+@@ -253,6 +254,21 @@ static void qcom_pcie_dw_stop_link(struct dw_pcie *pci)
+ 	disable_irq(pcie_ep->perst_irq);
+ }
+ 
++static void qcom_pcie_dw_write_dbi2(struct dw_pcie *pci, void __iomem *base,
++				    u32 reg, size_t size, u32 val)
++{
++	struct qcom_pcie_ep *pcie_ep = to_pcie_ep(pci);
++	int ret;
++
++	writel(1, pcie_ep->elbi + ELBI_CS2_ENABLE);
++
++	ret = dw_pcie_write(pci->dbi_base2 + reg, size, val);
++	if (ret)
++		dev_err(pci->dev, "Failed to write DBI2 register (0x%x): %d\n", reg, ret);
++
++	writel(0, pcie_ep->elbi + ELBI_CS2_ENABLE);
++}
++
+ static int qcom_pcie_enable_resources(struct qcom_pcie_ep *pcie_ep)
+ {
+ 	int ret;
+@@ -451,6 +467,7 @@ static const struct dw_pcie_ops pci_ops = {
+ 	.link_up = qcom_pcie_dw_link_up,
+ 	.start_link = qcom_pcie_dw_start_link,
+ 	.stop_link = qcom_pcie_dw_stop_link,
++	.write_dbi2 = qcom_pcie_dw_write_dbi2,
+ };
+ 
+ static int qcom_pcie_ep_get_io_resources(struct platform_device *pdev,
+diff --git a/drivers/pci/controller/dwc/pcie-tegra194.c b/drivers/pci/controller/dwc/pcie-tegra194.c
+index ccff8cde5cff6..07cb0818a5138 100644
+--- a/drivers/pci/controller/dwc/pcie-tegra194.c
++++ b/drivers/pci/controller/dwc/pcie-tegra194.c
+@@ -9,6 +9,7 @@
+  * Author: Vidya Sagar <vidyas@nvidia.com>
+  */
+ 
++#include <linux/bitfield.h>
+ #include <linux/clk.h>
+ #include <linux/debugfs.h>
+ #include <linux/delay.h>
+@@ -347,8 +348,7 @@ static void apply_bad_link_workaround(struct dw_pcie_rp *pp)
+ 	 */
+ 	val = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA);
+ 	if (val & PCI_EXP_LNKSTA_LBMS) {
+-		current_link_width = (val & PCI_EXP_LNKSTA_NLW) >>
+-				     PCI_EXP_LNKSTA_NLW_SHIFT;
++		current_link_width = FIELD_GET(PCI_EXP_LNKSTA_NLW, val);
+ 		if (pcie->init_link_width > current_link_width) {
+ 			dev_warn(pci->dev, "PCIe link is bad, width reduced\n");
+ 			val = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base +
+@@ -761,8 +761,7 @@ static void tegra_pcie_enable_system_interrupts(struct dw_pcie_rp *pp)
+ 
+ 	val_w = dw_pcie_readw_dbi(&pcie->pci, pcie->pcie_cap_base +
+ 				  PCI_EXP_LNKSTA);
+-	pcie->init_link_width = (val_w & PCI_EXP_LNKSTA_NLW) >>
+-				PCI_EXP_LNKSTA_NLW_SHIFT;
++	pcie->init_link_width = FIELD_GET(PCI_EXP_LNKSTA_NLW, val_w);
+ 
+ 	val_w = dw_pcie_readw_dbi(&pcie->pci, pcie->pcie_cap_base +
+ 				  PCI_EXP_LNKCTL);
+@@ -921,7 +920,7 @@ static int tegra_pcie_dw_host_init(struct dw_pcie_rp *pp)
+ 	/* Configure Max lane width from DT */
+ 	val = dw_pcie_readl_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKCAP);
+ 	val &= ~PCI_EXP_LNKCAP_MLW;
+-	val |= (pcie->num_lanes << PCI_EXP_LNKSTA_NLW_SHIFT);
++	val |= FIELD_PREP(PCI_EXP_LNKCAP_MLW, pcie->num_lanes);
+ 	dw_pcie_writel_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKCAP, val);
+ 
+ 	/* Clear Slot Clock Configuration bit if SRNS configuration */
+diff --git a/drivers/pci/controller/pci-mvebu.c b/drivers/pci/controller/pci-mvebu.c
+index c931b1b07b1d8..0cacd58f6c05e 100644
+--- a/drivers/pci/controller/pci-mvebu.c
++++ b/drivers/pci/controller/pci-mvebu.c
+@@ -265,7 +265,7 @@ static void mvebu_pcie_setup_hw(struct mvebu_pcie_port *port)
+ 	 */
+ 	lnkcap = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_LNKCAP);
+ 	lnkcap &= ~PCI_EXP_LNKCAP_MLW;
+-	lnkcap |= (port->is_x4 ? 4 : 1) << 4;
++	lnkcap |= FIELD_PREP(PCI_EXP_LNKCAP_MLW, port->is_x4 ? 4 : 1);
+ 	mvebu_writel(port, lnkcap, PCIE_CAP_PCIEXP + PCI_EXP_LNKCAP);
+ 
+ 	/* Disable Root Bridge I/O space, memory space and bus mastering. */
+diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
+index a05350a4e49cb..05b7357bd2586 100644
+--- a/drivers/pci/pci-acpi.c
++++ b/drivers/pci/pci-acpi.c
+@@ -911,7 +911,7 @@ pci_power_t acpi_pci_choose_state(struct pci_dev *pdev)
+ {
+ 	int acpi_state, d_max;
+ 
+-	if (pdev->no_d3cold)
++	if (pdev->no_d3cold || !pdev->d3cold_allowed)
+ 		d_max = ACPI_STATE_D3_HOT;
+ 	else
+ 		d_max = ACPI_STATE_D3_COLD;
+diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
+index ab32a91f287b4..e1e53d1b88a46 100644
+--- a/drivers/pci/pci-sysfs.c
++++ b/drivers/pci/pci-sysfs.c
+@@ -12,7 +12,7 @@
+  * Modeled after usb's driverfs.c
+  */
+ 
+-
++#include <linux/bitfield.h>
+ #include <linux/kernel.h>
+ #include <linux/sched.h>
+ #include <linux/pci.h>
+@@ -230,8 +230,7 @@ static ssize_t current_link_width_show(struct device *dev,
+ 	if (err)
+ 		return -EINVAL;
+ 
+-	return sysfs_emit(buf, "%u\n",
+-		(linkstat & PCI_EXP_LNKSTA_NLW) >> PCI_EXP_LNKSTA_NLW_SHIFT);
++	return sysfs_emit(buf, "%u\n", FIELD_GET(PCI_EXP_LNKSTA_NLW, linkstat));
+ }
+ static DEVICE_ATTR_RO(current_link_width);
+ 
+@@ -530,10 +529,7 @@ static ssize_t d3cold_allowed_store(struct device *dev,
+ 		return -EINVAL;
+ 
+ 	pdev->d3cold_allowed = !!val;
+-	if (pdev->d3cold_allowed)
+-		pci_d3cold_enable(pdev);
+-	else
+-		pci_d3cold_disable(pdev);
++	pci_bridge_d3_update(pdev);
+ 
+ 	pm_runtime_resume(dev);
+ 
+diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
+index 702fe577089b4..a7793abdd74ee 100644
+--- a/drivers/pci/pci.c
++++ b/drivers/pci/pci.c
+@@ -732,15 +732,18 @@ u16 pci_find_vsec_capability(struct pci_dev *dev, u16 vendor, int cap)
+ {
+ 	u16 vsec = 0;
+ 	u32 header;
++	int ret;
+ 
+ 	if (vendor != dev->vendor)
+ 		return 0;
+ 
+ 	while ((vsec = pci_find_next_ext_capability(dev, vsec,
+ 						     PCI_EXT_CAP_ID_VNDR))) {
+-		if (pci_read_config_dword(dev, vsec + PCI_VNDR_HEADER,
+-					  &header) == PCIBIOS_SUCCESSFUL &&
+-		    PCI_VNDR_HEADER_ID(header) == cap)
++		ret = pci_read_config_dword(dev, vsec + PCI_VNDR_HEADER, &header);
++		if (ret != PCIBIOS_SUCCESSFUL)
++			continue;
++
++		if (PCI_VNDR_HEADER_ID(header) == cap)
+ 			return vsec;
+ 	}
+ 
+@@ -3743,14 +3746,14 @@ u32 pci_rebar_get_possible_sizes(struct pci_dev *pdev, int bar)
+ 		return 0;
+ 
+ 	pci_read_config_dword(pdev, pos + PCI_REBAR_CAP, &cap);
+-	cap &= PCI_REBAR_CAP_SIZES;
++	cap = FIELD_GET(PCI_REBAR_CAP_SIZES, cap);
+ 
+ 	/* Sapphire RX 5600 XT Pulse has an invalid cap dword for BAR 0 */
+ 	if (pdev->vendor == PCI_VENDOR_ID_ATI && pdev->device == 0x731f &&
+-	    bar == 0 && cap == 0x7000)
+-		cap = 0x3f000;
++	    bar == 0 && cap == 0x700)
++		return 0x3f00;
+ 
+-	return cap >> 4;
++	return cap;
+ }
+ EXPORT_SYMBOL(pci_rebar_get_possible_sizes);
+ 
+@@ -6252,8 +6255,7 @@ u32 pcie_bandwidth_available(struct pci_dev *dev, struct pci_dev **limiting_dev,
+ 		pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta);
+ 
+ 		next_speed = pcie_link_speed[lnksta & PCI_EXP_LNKSTA_CLS];
+-		next_width = (lnksta & PCI_EXP_LNKSTA_NLW) >>
+-			PCI_EXP_LNKSTA_NLW_SHIFT;
++		next_width = FIELD_GET(PCI_EXP_LNKSTA_NLW, lnksta);
+ 
+ 		next_bw = next_width * PCIE_SPEED2MBS_ENC(next_speed);
+ 
+@@ -6325,7 +6327,7 @@ enum pcie_link_width pcie_get_width_cap(struct pci_dev *dev)
+ 
+ 	pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap);
+ 	if (lnkcap)
+-		return (lnkcap & PCI_EXP_LNKCAP_MLW) >> 4;
++		return FIELD_GET(PCI_EXP_LNKCAP_MLW, lnkcap);
+ 
+ 	return PCIE_LNK_WIDTH_UNKNOWN;
+ }
+diff --git a/drivers/pci/pcie/aer.c b/drivers/pci/pcie/aer.c
+index f6c24ded134cd..67025ee2b7454 100644
+--- a/drivers/pci/pcie/aer.c
++++ b/drivers/pci/pcie/aer.c
+@@ -29,6 +29,7 @@
+ #include <linux/kfifo.h>
+ #include <linux/slab.h>
+ #include <acpi/apei.h>
++#include <acpi/ghes.h>
+ #include <ras/ras_event.h>
+ 
+ #include "../pci.h"
+@@ -1010,6 +1011,15 @@ static void aer_recover_work_func(struct work_struct *work)
+ 			continue;
+ 		}
+ 		cper_print_aer(pdev, entry.severity, entry.regs);
++		/*
++		 * Memory for aer_capability_regs(entry.regs) is being allocated from the
++		 * ghes_estatus_pool to protect it from overwriting when multiple sections
++		 * are present in the error status. Thus free the same after processing
++		 * the data.
++		 */
++		ghes_estatus_pool_region_free((unsigned long)entry.regs,
++					      sizeof(struct aer_capability_regs));
++
+ 		if (entry.severity == AER_NONFATAL)
+ 			pcie_do_recovery(pdev, pci_channel_io_normal,
+ 					 aer_root_reset);
+diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
+index 530c3bb5708c5..fc18e42f0a6ed 100644
+--- a/drivers/pci/pcie/aspm.c
++++ b/drivers/pci/pcie/aspm.c
+@@ -1248,6 +1248,8 @@ static ssize_t aspm_attr_store_common(struct device *dev,
+ 			link->aspm_disable &= ~ASPM_STATE_L1;
+ 	} else {
+ 		link->aspm_disable |= state;
++		if (state & ASPM_STATE_L1)
++			link->aspm_disable |= ASPM_STATE_L1SS;
+ 	}
+ 
+ 	pcie_config_aspm_link(link, policy_to_aspm_state(link));
+diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
+index 24a83cf5ace8c..cd08d39fdb1ff 100644
+--- a/drivers/pci/probe.c
++++ b/drivers/pci/probe.c
+@@ -1653,15 +1653,15 @@ static void pci_set_removable(struct pci_dev *dev)
+ static bool pci_ext_cfg_is_aliased(struct pci_dev *dev)
+ {
+ #ifdef CONFIG_PCI_QUIRKS
+-	int pos;
++	int pos, ret;
+ 	u32 header, tmp;
+ 
+ 	pci_read_config_dword(dev, PCI_VENDOR_ID, &header);
+ 
+ 	for (pos = PCI_CFG_SPACE_SIZE;
+ 	     pos < PCI_CFG_SPACE_EXP_SIZE; pos += PCI_CFG_SPACE_SIZE) {
+-		if (pci_read_config_dword(dev, pos, &tmp) != PCIBIOS_SUCCESSFUL
+-		    || header != tmp)
++		ret = pci_read_config_dword(dev, pos, &tmp);
++		if ((ret != PCIBIOS_SUCCESSFUL) || (header != tmp))
+ 			return false;
+ 	}
+ 
+diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
+index eb65170b97ff0..d78c75fedf112 100644
+--- a/drivers/pci/quirks.c
++++ b/drivers/pci/quirks.c
+@@ -5383,7 +5383,7 @@ int pci_dev_specific_disable_acs_redir(struct pci_dev *dev)
+  */
+ static void quirk_intel_qat_vf_cap(struct pci_dev *pdev)
+ {
+-	int pos, i = 0;
++	int pos, i = 0, ret;
+ 	u8 next_cap;
+ 	u16 reg16, *cap;
+ 	struct pci_cap_saved_state *state;
+@@ -5429,8 +5429,8 @@ static void quirk_intel_qat_vf_cap(struct pci_dev *pdev)
+ 		pdev->pcie_mpss = reg16 & PCI_EXP_DEVCAP_PAYLOAD;
+ 
+ 		pdev->cfg_size = PCI_CFG_SPACE_EXP_SIZE;
+-		if (pci_read_config_dword(pdev, PCI_CFG_SPACE_SIZE, &status) !=
+-		    PCIBIOS_SUCCESSFUL || (status == 0xffffffff))
++		ret = pci_read_config_dword(pdev, PCI_CFG_SPACE_SIZE, &status);
++		if ((ret != PCIBIOS_SUCCESSFUL) || (PCI_POSSIBLE_ERROR(status)))
+ 			pdev->cfg_size = PCI_CFG_SPACE_SIZE;
+ 
+ 		if (pci_find_saved_cap(pdev, PCI_CAP_ID_EXP))
+@@ -5507,6 +5507,12 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0420, quirk_no_ext_tags);
+ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0422, quirk_no_ext_tags);
+ 
+ #ifdef CONFIG_PCI_ATS
++static void quirk_no_ats(struct pci_dev *pdev)
++{
++	pci_info(pdev, "disabling ATS\n");
++	pdev->ats_cap = 0;
++}
++
+ /*
+  * Some devices require additional driver setup to enable ATS.  Don't use
+  * ATS for those devices as ATS will be enabled before the driver has had a
+@@ -5520,14 +5526,10 @@ static void quirk_amd_harvest_no_ats(struct pci_dev *pdev)
+ 		    (pdev->subsystem_device == 0xce19 ||
+ 		     pdev->subsystem_device == 0xcc10 ||
+ 		     pdev->subsystem_device == 0xcc08))
+-			goto no_ats;
+-		else
+-			return;
++			quirk_no_ats(pdev);
++	} else {
++		quirk_no_ats(pdev);
+ 	}
+-
+-no_ats:
+-	pci_info(pdev, "disabling ATS\n");
+-	pdev->ats_cap = 0;
+ }
+ 
+ /* AMD Stoney platform GPU */
+@@ -5550,6 +5552,25 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7347, quirk_amd_harvest_no_ats);
+ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x734f, quirk_amd_harvest_no_ats);
+ /* AMD Raven platform iGPU */
+ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x15d8, quirk_amd_harvest_no_ats);
++
++/*
++ * Intel IPU E2000 revisions before C0 implement incorrect endianness
++ * in ATS Invalidate Request message body. Disable ATS for those devices.
++ */
++static void quirk_intel_e2000_no_ats(struct pci_dev *pdev)
++{
++	if (pdev->revision < 0x20)
++		quirk_no_ats(pdev);
++}
++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1451, quirk_intel_e2000_no_ats);
++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1452, quirk_intel_e2000_no_ats);
++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1453, quirk_intel_e2000_no_ats);
++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1454, quirk_intel_e2000_no_ats);
++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1455, quirk_intel_e2000_no_ats);
++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1457, quirk_intel_e2000_no_ats);
++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1459, quirk_intel_e2000_no_ats);
++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x145a, quirk_intel_e2000_no_ats);
++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x145c, quirk_intel_e2000_no_ats);
+ #endif /* CONFIG_PCI_ATS */
+ 
+ /* Freescale PCIe doesn't support MSI in RC mode */
+@@ -6140,3 +6161,15 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x9a2d, dpc_log_size);
+ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x9a2f, dpc_log_size);
+ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x9a31, dpc_log_size);
+ #endif
++
++/*
++ * Devices known to require a longer delay before first config space access
++ * after reset recovery or resume from D3cold:
++ *
++ * VideoPropulsion (aka Genroco) Torrent QN16e MPEG QAM Modulator
++ */
++static void pci_fixup_d3cold_delay_1sec(struct pci_dev *pdev)
++{
++	pdev->d3cold_delay = 1000;
++}
++DECLARE_PCI_FIXUP_FINAL(0x5555, 0x0004, pci_fixup_d3cold_delay_1sec);
+diff --git a/drivers/perf/arm_cspmu/arm_cspmu.c b/drivers/perf/arm_cspmu/arm_cspmu.c
+index e2b7827c45635..9363c31f31b89 100644
+--- a/drivers/perf/arm_cspmu/arm_cspmu.c
++++ b/drivers/perf/arm_cspmu/arm_cspmu.c
+@@ -635,6 +635,9 @@ static int arm_cspmu_event_init(struct perf_event *event)
+ 
+ 	cspmu = to_arm_cspmu(event->pmu);
+ 
++	if (event->attr.type != event->pmu->type)
++		return -ENOENT;
++
+ 	/*
+ 	 * Following other "uncore" PMUs, we do not support sampling mode or
+ 	 * attach to a task (per-process mode).
+diff --git a/drivers/perf/riscv_pmu_sbi.c b/drivers/perf/riscv_pmu_sbi.c
+index 4163ff5174715..6225239b64550 100644
+--- a/drivers/perf/riscv_pmu_sbi.c
++++ b/drivers/perf/riscv_pmu_sbi.c
+@@ -629,6 +629,11 @@ static irqreturn_t pmu_sbi_ovf_handler(int irq, void *dev)
+ 
+ 	/* Firmware counter don't support overflow yet */
+ 	fidx = find_first_bit(cpu_hw_evt->used_hw_ctrs, RISCV_MAX_COUNTERS);
++	if (fidx == RISCV_MAX_COUNTERS) {
++		csr_clear(CSR_SIP, BIT(riscv_pmu_irq_num));
++		return IRQ_NONE;
++	}
++
+ 	event = cpu_hw_evt->events[fidx];
+ 	if (!event) {
+ 		csr_clear(CSR_SIP, BIT(riscv_pmu_irq_num));
+diff --git a/drivers/phy/qualcomm/phy-qcom-eusb2-repeater.c b/drivers/phy/qualcomm/phy-qcom-eusb2-repeater.c
+index 90f8543ba265b..6777532dd4dc9 100644
+--- a/drivers/phy/qualcomm/phy-qcom-eusb2-repeater.c
++++ b/drivers/phy/qualcomm/phy-qcom-eusb2-repeater.c
+@@ -25,23 +25,73 @@
+ #define EUSB2_FORCE_VAL_5		0xeD
+ #define V_CLK_19P2M_EN			BIT(6)
+ 
++#define EUSB2_TUNE_USB2_CROSSOVER	0x50
+ #define EUSB2_TUNE_IUSB2		0x51
++#define EUSB2_TUNE_RES_FSDIF		0x52
++#define EUSB2_TUNE_HSDISC		0x53
+ #define EUSB2_TUNE_SQUELCH_U		0x54
++#define EUSB2_TUNE_USB2_SLEW		0x55
++#define EUSB2_TUNE_USB2_EQU		0x56
+ #define EUSB2_TUNE_USB2_PREEM		0x57
++#define EUSB2_TUNE_USB2_HS_COMP_CUR	0x58
++#define EUSB2_TUNE_EUSB_SLEW		0x59
++#define EUSB2_TUNE_EUSB_EQU		0x5A
++#define EUSB2_TUNE_EUSB_HS_COMP_CUR	0x5B
+ 
+-#define QCOM_EUSB2_REPEATER_INIT_CFG(o, v)	\
++#define QCOM_EUSB2_REPEATER_INIT_CFG(r, v)	\
+ 	{					\
+-		.offset = o,			\
++		.reg = r,			\
+ 		.val = v,			\
+ 	}
+ 
+-struct eusb2_repeater_init_tbl {
+-	unsigned int offset;
+-	unsigned int val;
++enum reg_fields {
++	F_TUNE_EUSB_HS_COMP_CUR,
++	F_TUNE_EUSB_EQU,
++	F_TUNE_EUSB_SLEW,
++	F_TUNE_USB2_HS_COMP_CUR,
++	F_TUNE_USB2_PREEM,
++	F_TUNE_USB2_EQU,
++	F_TUNE_USB2_SLEW,
++	F_TUNE_SQUELCH_U,
++	F_TUNE_HSDISC,
++	F_TUNE_RES_FSDIF,
++	F_TUNE_IUSB2,
++	F_TUNE_USB2_CROSSOVER,
++	F_NUM_TUNE_FIELDS,
++
++	F_FORCE_VAL_5 = F_NUM_TUNE_FIELDS,
++	F_FORCE_EN_5,
++
++	F_EN_CTL1,
++
++	F_RPTR_STATUS,
++	F_NUM_FIELDS,
++};
++
++static struct reg_field eusb2_repeater_tune_reg_fields[F_NUM_FIELDS] = {
++	[F_TUNE_EUSB_HS_COMP_CUR] = REG_FIELD(EUSB2_TUNE_EUSB_HS_COMP_CUR, 0, 1),
++	[F_TUNE_EUSB_EQU] = REG_FIELD(EUSB2_TUNE_EUSB_EQU, 0, 1),
++	[F_TUNE_EUSB_SLEW] = REG_FIELD(EUSB2_TUNE_EUSB_SLEW, 0, 1),
++	[F_TUNE_USB2_HS_COMP_CUR] = REG_FIELD(EUSB2_TUNE_USB2_HS_COMP_CUR, 0, 1),
++	[F_TUNE_USB2_PREEM] = REG_FIELD(EUSB2_TUNE_USB2_PREEM, 0, 2),
++	[F_TUNE_USB2_EQU] = REG_FIELD(EUSB2_TUNE_USB2_EQU, 0, 1),
++	[F_TUNE_USB2_SLEW] = REG_FIELD(EUSB2_TUNE_USB2_SLEW, 0, 1),
++	[F_TUNE_SQUELCH_U] = REG_FIELD(EUSB2_TUNE_SQUELCH_U, 0, 2),
++	[F_TUNE_HSDISC] = REG_FIELD(EUSB2_TUNE_HSDISC, 0, 2),
++	[F_TUNE_RES_FSDIF] = REG_FIELD(EUSB2_TUNE_RES_FSDIF, 0, 2),
++	[F_TUNE_IUSB2] = REG_FIELD(EUSB2_TUNE_IUSB2, 0, 3),
++	[F_TUNE_USB2_CROSSOVER] = REG_FIELD(EUSB2_TUNE_USB2_CROSSOVER, 0, 2),
++
++	[F_FORCE_VAL_5] = REG_FIELD(EUSB2_FORCE_VAL_5, 0, 7),
++	[F_FORCE_EN_5] = REG_FIELD(EUSB2_FORCE_EN_5, 0, 7),
++
++	[F_EN_CTL1] = REG_FIELD(EUSB2_EN_CTL1, 0, 7),
++
++	[F_RPTR_STATUS] = REG_FIELD(EUSB2_RPTR_STATUS, 0, 7),
+ };
+ 
+ struct eusb2_repeater_cfg {
+-	const struct eusb2_repeater_init_tbl *init_tbl;
++	const u32 *init_tbl;
+ 	int init_tbl_num;
+ 	const char * const *vreg_list;
+ 	int num_vregs;
+@@ -49,11 +99,10 @@ struct eusb2_repeater_cfg {
+ 
+ struct eusb2_repeater {
+ 	struct device *dev;
+-	struct regmap *regmap;
++	struct regmap_field *regs[F_NUM_FIELDS];
+ 	struct phy *phy;
+ 	struct regulator_bulk_data *vregs;
+ 	const struct eusb2_repeater_cfg *cfg;
+-	u16 base;
+ 	enum phy_mode mode;
+ };
+ 
+@@ -61,10 +110,10 @@ static const char * const pm8550b_vreg_l[] = {
+ 	"vdd18", "vdd3",
+ };
+ 
+-static const struct eusb2_repeater_init_tbl pm8550b_init_tbl[] = {
+-	QCOM_EUSB2_REPEATER_INIT_CFG(EUSB2_TUNE_IUSB2, 0x8),
+-	QCOM_EUSB2_REPEATER_INIT_CFG(EUSB2_TUNE_SQUELCH_U, 0x3),
+-	QCOM_EUSB2_REPEATER_INIT_CFG(EUSB2_TUNE_USB2_PREEM, 0x5),
++static const u32 pm8550b_init_tbl[F_NUM_TUNE_FIELDS] = {
++	[F_TUNE_IUSB2] = 0x8,
++	[F_TUNE_SQUELCH_U] = 0x3,
++	[F_TUNE_USB2_PREEM] = 0x5,
+ };
+ 
+ static const struct eusb2_repeater_cfg pm8550b_eusb2_cfg = {
+@@ -92,9 +141,9 @@ static int eusb2_repeater_init_vregs(struct eusb2_repeater *rptr)
+ 
+ static int eusb2_repeater_init(struct phy *phy)
+ {
++	struct reg_field *regfields = eusb2_repeater_tune_reg_fields;
+ 	struct eusb2_repeater *rptr = phy_get_drvdata(phy);
+-	const struct eusb2_repeater_init_tbl *init_tbl = rptr->cfg->init_tbl;
+-	int num = rptr->cfg->init_tbl_num;
++	const u32 *init_tbl = rptr->cfg->init_tbl;
+ 	u32 val;
+ 	int ret;
+ 	int i;
+@@ -103,17 +152,21 @@ static int eusb2_repeater_init(struct phy *phy)
+ 	if (ret)
+ 		return ret;
+ 
+-	regmap_update_bits(rptr->regmap, rptr->base + EUSB2_EN_CTL1,
+-			   EUSB2_RPTR_EN, EUSB2_RPTR_EN);
++	regmap_field_update_bits(rptr->regs[F_EN_CTL1], EUSB2_RPTR_EN, EUSB2_RPTR_EN);
+ 
+-	for (i = 0; i < num; i++)
+-		regmap_update_bits(rptr->regmap,
+-				   rptr->base + init_tbl[i].offset,
+-				   init_tbl[i].val, init_tbl[i].val);
++	for (i = 0; i < F_NUM_TUNE_FIELDS; i++) {
++		if (init_tbl[i]) {
++			regmap_field_update_bits(rptr->regs[i], init_tbl[i], init_tbl[i]);
++		} else {
++			/* Write 0 if there's no value set */
++			u32 mask = GENMASK(regfields[i].msb, regfields[i].lsb);
++
++			regmap_field_update_bits(rptr->regs[i], mask, 0);
++		}
++	}
+ 
+-	ret = regmap_read_poll_timeout(rptr->regmap,
+-				       rptr->base + EUSB2_RPTR_STATUS, val,
+-				       val & RPTR_OK, 10, 5);
++	ret = regmap_field_read_poll_timeout(rptr->regs[F_RPTR_STATUS],
++					     val, val & RPTR_OK, 10, 5);
+ 	if (ret)
+ 		dev_err(rptr->dev, "initialization timed-out\n");
+ 
+@@ -132,10 +185,10 @@ static int eusb2_repeater_set_mode(struct phy *phy,
+ 		 * per eUSB 1.2 Spec. Below implement software workaround until
+ 		 * PHY and controller is fixing seen observation.
+ 		 */
+-		regmap_update_bits(rptr->regmap, rptr->base + EUSB2_FORCE_EN_5,
+-				   F_CLK_19P2M_EN, F_CLK_19P2M_EN);
+-		regmap_update_bits(rptr->regmap, rptr->base + EUSB2_FORCE_VAL_5,
+-				   V_CLK_19P2M_EN, V_CLK_19P2M_EN);
++		regmap_field_update_bits(rptr->regs[F_FORCE_EN_5],
++					 F_CLK_19P2M_EN, F_CLK_19P2M_EN);
++		regmap_field_update_bits(rptr->regs[F_FORCE_VAL_5],
++					 V_CLK_19P2M_EN, V_CLK_19P2M_EN);
+ 		break;
+ 	case PHY_MODE_USB_DEVICE:
+ 		/*
+@@ -144,10 +197,10 @@ static int eusb2_repeater_set_mode(struct phy *phy,
+ 		 * repeater doesn't clear previous value due to shared
+ 		 * regulators (say host <-> device mode switch).
+ 		 */
+-		regmap_update_bits(rptr->regmap, rptr->base + EUSB2_FORCE_EN_5,
+-				   F_CLK_19P2M_EN, 0);
+-		regmap_update_bits(rptr->regmap, rptr->base + EUSB2_FORCE_VAL_5,
+-				   V_CLK_19P2M_EN, 0);
++		regmap_field_update_bits(rptr->regs[F_FORCE_EN_5],
++					 F_CLK_19P2M_EN, 0);
++		regmap_field_update_bits(rptr->regs[F_FORCE_VAL_5],
++					 V_CLK_19P2M_EN, 0);
+ 		break;
+ 	default:
+ 		return -EINVAL;
+@@ -176,8 +229,9 @@ static int eusb2_repeater_probe(struct platform_device *pdev)
+ 	struct device *dev = &pdev->dev;
+ 	struct phy_provider *phy_provider;
+ 	struct device_node *np = dev->of_node;
++	struct regmap *regmap;
++	int i, ret;
+ 	u32 res;
+-	int ret;
+ 
+ 	rptr = devm_kzalloc(dev, sizeof(*rptr), GFP_KERNEL);
+ 	if (!rptr)
+@@ -190,15 +244,22 @@ static int eusb2_repeater_probe(struct platform_device *pdev)
+ 	if (!rptr->cfg)
+ 		return -EINVAL;
+ 
+-	rptr->regmap = dev_get_regmap(dev->parent, NULL);
+-	if (!rptr->regmap)
++	regmap = dev_get_regmap(dev->parent, NULL);
++	if (!regmap)
+ 		return -ENODEV;
+ 
+ 	ret = of_property_read_u32(np, "reg", &res);
+ 	if (ret < 0)
+ 		return ret;
+ 
+-	rptr->base = res;
++	for (i = 0; i < F_NUM_FIELDS; i++)
++		eusb2_repeater_tune_reg_fields[i].reg += res;
++
++	ret = devm_regmap_field_bulk_alloc(dev, regmap, rptr->regs,
++					   eusb2_repeater_tune_reg_fields,
++					   F_NUM_FIELDS);
++	if (ret)
++		return ret;
+ 
+ 	ret = eusb2_repeater_init_vregs(rptr);
+ 	if (ret < 0) {
+diff --git a/drivers/platform/chrome/cros_ec_proto_test.c b/drivers/platform/chrome/cros_ec_proto_test.c
+index 5b9748e0463bc..63e38671e95a6 100644
+--- a/drivers/platform/chrome/cros_ec_proto_test.c
++++ b/drivers/platform/chrome/cros_ec_proto_test.c
+@@ -2668,6 +2668,7 @@ static int cros_ec_proto_test_init(struct kunit *test)
+ 	ec_dev->dev->release = cros_ec_proto_test_release;
+ 	ec_dev->cmd_xfer = cros_kunit_ec_xfer_mock;
+ 	ec_dev->pkt_xfer = cros_kunit_ec_xfer_mock;
++	mutex_init(&ec_dev->lock);
+ 
+ 	priv->msg = (struct cros_ec_command *)priv->_msg;
+ 
+diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
+index ad460417f901a..4b13d3e704bf3 100644
+--- a/drivers/platform/x86/thinkpad_acpi.c
++++ b/drivers/platform/x86/thinkpad_acpi.c
+@@ -9810,6 +9810,7 @@ static const struct tpacpi_quirk battery_quirk_table[] __initconst = {
+ 	 * Individual addressing is broken on models that expose the
+ 	 * primary battery as BAT1.
+ 	 */
++	TPACPI_Q_LNV('8', 'F', true),       /* Thinkpad X120e */
+ 	TPACPI_Q_LNV('J', '7', true),       /* B5400 */
+ 	TPACPI_Q_LNV('J', 'I', true),       /* Thinkpad 11e */
+ 	TPACPI_Q_LNV3('R', '0', 'B', true), /* Thinkpad 11e gen 3 */
+diff --git a/drivers/powercap/intel_rapl_common.c b/drivers/powercap/intel_rapl_common.c
+index e618ed5aa8caa..fd37831dc98f8 100644
+--- a/drivers/powercap/intel_rapl_common.c
++++ b/drivers/powercap/intel_rapl_common.c
+@@ -892,7 +892,7 @@ static int rapl_write_pl_data(struct rapl_domain *rd, int pl,
+ 		return -EINVAL;
+ 
+ 	if (rd->rpl[pl].locked) {
+-		pr_warn("%s:%s:%s locked by BIOS\n", rd->rp->name, rd->name, pl_names[pl]);
++		pr_debug("%s:%s:%s locked by BIOS\n", rd->rp->name, rd->name, pl_names[pl]);
+ 		return -EACCES;
+ 	}
+ 
+diff --git a/drivers/ptp/ptp_chardev.c b/drivers/ptp/ptp_chardev.c
+index 362bf756e6b78..5a3a4cc0bec82 100644
+--- a/drivers/ptp/ptp_chardev.c
++++ b/drivers/ptp/ptp_chardev.c
+@@ -490,7 +490,8 @@ ssize_t ptp_read(struct posix_clock *pc,
+ 
+ 	for (i = 0; i < cnt; i++) {
+ 		event[i] = queue->buf[queue->head];
+-		queue->head = (queue->head + 1) % PTP_MAX_TIMESTAMPS;
++		/* Paired with READ_ONCE() in queue_cnt() */
++		WRITE_ONCE(queue->head, (queue->head + 1) % PTP_MAX_TIMESTAMPS);
+ 	}
+ 
+ 	spin_unlock_irqrestore(&queue->lock, flags);
+diff --git a/drivers/ptp/ptp_clock.c b/drivers/ptp/ptp_clock.c
+index 80f74e38c2da4..9a50bfb56453c 100644
+--- a/drivers/ptp/ptp_clock.c
++++ b/drivers/ptp/ptp_clock.c
+@@ -56,10 +56,11 @@ static void enqueue_external_timestamp(struct timestamp_event_queue *queue,
+ 	dst->t.sec = seconds;
+ 	dst->t.nsec = remainder;
+ 
++	/* Both WRITE_ONCE() are paired with READ_ONCE() in queue_cnt() */
+ 	if (!queue_free(queue))
+-		queue->head = (queue->head + 1) % PTP_MAX_TIMESTAMPS;
++		WRITE_ONCE(queue->head, (queue->head + 1) % PTP_MAX_TIMESTAMPS);
+ 
+-	queue->tail = (queue->tail + 1) % PTP_MAX_TIMESTAMPS;
++	WRITE_ONCE(queue->tail, (queue->tail + 1) % PTP_MAX_TIMESTAMPS);
+ 
+ 	spin_unlock_irqrestore(&queue->lock, flags);
+ }
+diff --git a/drivers/ptp/ptp_private.h b/drivers/ptp/ptp_private.h
+index 75f58fc468a71..b8d4f61f14be4 100644
+--- a/drivers/ptp/ptp_private.h
++++ b/drivers/ptp/ptp_private.h
+@@ -76,9 +76,13 @@ struct ptp_vclock {
+  * that a writer might concurrently increment the tail does not
+  * matter, since the queue remains nonempty nonetheless.
+  */
+-static inline int queue_cnt(struct timestamp_event_queue *q)
++static inline int queue_cnt(const struct timestamp_event_queue *q)
+ {
+-	int cnt = q->tail - q->head;
++	/*
++	 * Paired with WRITE_ONCE() in enqueue_external_timestamp(),
++	 * ptp_read(), extts_fifo_show().
++	 */
++	int cnt = READ_ONCE(q->tail) - READ_ONCE(q->head);
+ 	return cnt < 0 ? PTP_MAX_TIMESTAMPS + cnt : cnt;
+ }
+ 
+diff --git a/drivers/ptp/ptp_sysfs.c b/drivers/ptp/ptp_sysfs.c
+index 6e4d5456a8851..34ea5c16123a1 100644
+--- a/drivers/ptp/ptp_sysfs.c
++++ b/drivers/ptp/ptp_sysfs.c
+@@ -90,7 +90,8 @@ static ssize_t extts_fifo_show(struct device *dev,
+ 	qcnt = queue_cnt(queue);
+ 	if (qcnt) {
+ 		event = queue->buf[queue->head];
+-		queue->head = (queue->head + 1) % PTP_MAX_TIMESTAMPS;
++		/* Paired with READ_ONCE() in queue_cnt() */
++		WRITE_ONCE(queue->head, (queue->head + 1) % PTP_MAX_TIMESTAMPS);
+ 	}
+ 	spin_unlock_irqrestore(&queue->lock, flags);
+ 
+diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
+index 8028f76923b24..ea3c19156c25e 100644
+--- a/drivers/s390/crypto/ap_bus.c
++++ b/drivers/s390/crypto/ap_bus.c
+@@ -1030,6 +1030,10 @@ EXPORT_SYMBOL(ap_driver_unregister);
+ 
+ void ap_bus_force_rescan(void)
+ {
++	/* Only trigger AP bus scans after the initial scan is done */
++	if (atomic64_read(&ap_scan_bus_count) <= 0)
++		return;
++
+ 	/* processing a asynchronous bus rescan */
+ 	del_timer(&ap_config_timer);
+ 	queue_work(system_long_wq, &ap_scan_work);
+diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+index 2f33e6b4a92fb..9285ae508afa6 100644
+--- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
++++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+@@ -4861,6 +4861,12 @@ static void debugfs_bist_init_v3_hw(struct hisi_hba *hisi_hba)
+ 	hisi_hba->debugfs_bist_linkrate = SAS_LINK_RATE_1_5_GBPS;
+ }
+ 
++static void debugfs_exit_v3_hw(struct hisi_hba *hisi_hba)
++{
++	debugfs_remove_recursive(hisi_hba->debugfs_dir);
++	hisi_hba->debugfs_dir = NULL;
++}
++
+ static void debugfs_init_v3_hw(struct hisi_hba *hisi_hba)
+ {
+ 	struct device *dev = hisi_hba->dev;
+@@ -4884,18 +4890,13 @@ static void debugfs_init_v3_hw(struct hisi_hba *hisi_hba)
+ 
+ 	for (i = 0; i < hisi_sas_debugfs_dump_count; i++) {
+ 		if (debugfs_alloc_v3_hw(hisi_hba, i)) {
+-			debugfs_remove_recursive(hisi_hba->debugfs_dir);
++			debugfs_exit_v3_hw(hisi_hba);
+ 			dev_dbg(dev, "failed to init debugfs!\n");
+ 			break;
+ 		}
+ 	}
+ }
+ 
+-static void debugfs_exit_v3_hw(struct hisi_hba *hisi_hba)
+-{
+-	debugfs_remove_recursive(hisi_hba->debugfs_dir);
+-}
+-
+ static int
+ hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ {
+diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
+index 470e8e6c41b62..c98346e464b48 100644
+--- a/drivers/scsi/ibmvscsi/ibmvfc.c
++++ b/drivers/scsi/ibmvscsi/ibmvfc.c
+@@ -1518,7 +1518,11 @@ static struct ibmvfc_event *ibmvfc_get_event(struct ibmvfc_queue *queue)
+ 	unsigned long flags;
+ 
+ 	spin_lock_irqsave(&queue->l_lock, flags);
+-	BUG_ON(list_empty(&queue->free));
++	if (list_empty(&queue->free)) {
++		ibmvfc_log(queue->vhost, 4, "empty event pool on queue:%ld\n", queue->hwq_id);
++		spin_unlock_irqrestore(&queue->l_lock, flags);
++		return NULL;
++	}
+ 	evt = list_entry(queue->free.next, struct ibmvfc_event, queue_list);
+ 	atomic_set(&evt->free, 0);
+ 	list_del(&evt->queue_list);
+@@ -1947,9 +1951,15 @@ static int ibmvfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
+ 	if (vhost->using_channels) {
+ 		scsi_channel = hwq % vhost->scsi_scrqs.active_queues;
+ 		evt = ibmvfc_get_event(&vhost->scsi_scrqs.scrqs[scsi_channel]);
++		if (!evt)
++			return SCSI_MLQUEUE_HOST_BUSY;
++
+ 		evt->hwq = hwq % vhost->scsi_scrqs.active_queues;
+-	} else
++	} else {
+ 		evt = ibmvfc_get_event(&vhost->crq);
++		if (!evt)
++			return SCSI_MLQUEUE_HOST_BUSY;
++	}
+ 
+ 	ibmvfc_init_event(evt, ibmvfc_scsi_done, IBMVFC_CMD_FORMAT);
+ 	evt->cmnd = cmnd;
+@@ -2037,6 +2047,11 @@ static int ibmvfc_bsg_timeout(struct bsg_job *job)
+ 
+ 	vhost->aborting_passthru = 1;
+ 	evt = ibmvfc_get_event(&vhost->crq);
++	if (!evt) {
++		spin_unlock_irqrestore(vhost->host->host_lock, flags);
++		return -ENOMEM;
++	}
++
+ 	ibmvfc_init_event(evt, ibmvfc_bsg_timeout_done, IBMVFC_MAD_FORMAT);
+ 
+ 	tmf = &evt->iu.tmf;
+@@ -2095,6 +2110,10 @@ static int ibmvfc_bsg_plogi(struct ibmvfc_host *vhost, unsigned int port_id)
+ 		goto unlock_out;
+ 
+ 	evt = ibmvfc_get_event(&vhost->crq);
++	if (!evt) {
++		rc = -ENOMEM;
++		goto unlock_out;
++	}
+ 	ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_MAD_FORMAT);
+ 	plogi = &evt->iu.plogi;
+ 	memset(plogi, 0, sizeof(*plogi));
+@@ -2213,6 +2232,11 @@ static int ibmvfc_bsg_request(struct bsg_job *job)
+ 	}
+ 
+ 	evt = ibmvfc_get_event(&vhost->crq);
++	if (!evt) {
++		spin_unlock_irqrestore(vhost->host->host_lock, flags);
++		rc = -ENOMEM;
++		goto out;
++	}
+ 	ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_MAD_FORMAT);
+ 	mad = &evt->iu.passthru;
+ 
+@@ -2301,6 +2325,11 @@ static int ibmvfc_reset_device(struct scsi_device *sdev, int type, char *desc)
+ 		else
+ 			evt = ibmvfc_get_event(&vhost->crq);
+ 
++		if (!evt) {
++			spin_unlock_irqrestore(vhost->host->host_lock, flags);
++			return -ENOMEM;
++		}
++
+ 		ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_CMD_FORMAT);
+ 		tmf = ibmvfc_init_vfc_cmd(evt, sdev);
+ 		iu = ibmvfc_get_fcp_iu(vhost, tmf);
+@@ -2504,6 +2533,8 @@ static struct ibmvfc_event *ibmvfc_init_tmf(struct ibmvfc_queue *queue,
+ 	struct ibmvfc_tmf *tmf;
+ 
+ 	evt = ibmvfc_get_event(queue);
++	if (!evt)
++		return NULL;
+ 	ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_MAD_FORMAT);
+ 
+ 	tmf = &evt->iu.tmf;
+@@ -2560,6 +2591,11 @@ static int ibmvfc_cancel_all_mq(struct scsi_device *sdev, int type)
+ 
+ 		if (found_evt && vhost->logged_in) {
+ 			evt = ibmvfc_init_tmf(&queues[i], sdev, type);
++			if (!evt) {
++				spin_unlock(queues[i].q_lock);
++				spin_unlock_irqrestore(vhost->host->host_lock, flags);
++				return -ENOMEM;
++			}
+ 			evt->sync_iu = &queues[i].cancel_rsp;
+ 			ibmvfc_send_event(evt, vhost, default_timeout);
+ 			list_add_tail(&evt->cancel, &cancelq);
+@@ -2773,6 +2809,10 @@ static int ibmvfc_abort_task_set(struct scsi_device *sdev)
+ 
+ 	if (vhost->state == IBMVFC_ACTIVE) {
+ 		evt = ibmvfc_get_event(&vhost->crq);
++		if (!evt) {
++			spin_unlock_irqrestore(vhost->host->host_lock, flags);
++			return -ENOMEM;
++		}
+ 		ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_CMD_FORMAT);
+ 		tmf = ibmvfc_init_vfc_cmd(evt, sdev);
+ 		iu = ibmvfc_get_fcp_iu(vhost, tmf);
+@@ -4031,6 +4071,12 @@ static void ibmvfc_tgt_send_prli(struct ibmvfc_target *tgt)
+ 
+ 	kref_get(&tgt->kref);
+ 	evt = ibmvfc_get_event(&vhost->crq);
++	if (!evt) {
++		ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
++		kref_put(&tgt->kref, ibmvfc_release_tgt);
++		__ibmvfc_reset_host(vhost);
++		return;
++	}
+ 	vhost->discovery_threads++;
+ 	ibmvfc_init_event(evt, ibmvfc_tgt_prli_done, IBMVFC_MAD_FORMAT);
+ 	evt->tgt = tgt;
+@@ -4138,6 +4184,12 @@ static void ibmvfc_tgt_send_plogi(struct ibmvfc_target *tgt)
+ 	kref_get(&tgt->kref);
+ 	tgt->logo_rcvd = 0;
+ 	evt = ibmvfc_get_event(&vhost->crq);
++	if (!evt) {
++		ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
++		kref_put(&tgt->kref, ibmvfc_release_tgt);
++		__ibmvfc_reset_host(vhost);
++		return;
++	}
+ 	vhost->discovery_threads++;
+ 	ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
+ 	ibmvfc_init_event(evt, ibmvfc_tgt_plogi_done, IBMVFC_MAD_FORMAT);
+@@ -4214,6 +4266,8 @@ static struct ibmvfc_event *__ibmvfc_tgt_get_implicit_logout_evt(struct ibmvfc_t
+ 
+ 	kref_get(&tgt->kref);
+ 	evt = ibmvfc_get_event(&vhost->crq);
++	if (!evt)
++		return NULL;
+ 	ibmvfc_init_event(evt, done, IBMVFC_MAD_FORMAT);
+ 	evt->tgt = tgt;
+ 	mad = &evt->iu.implicit_logout;
+@@ -4241,6 +4295,13 @@ static void ibmvfc_tgt_implicit_logout(struct ibmvfc_target *tgt)
+ 	vhost->discovery_threads++;
+ 	evt = __ibmvfc_tgt_get_implicit_logout_evt(tgt,
+ 						   ibmvfc_tgt_implicit_logout_done);
++	if (!evt) {
++		vhost->discovery_threads--;
++		ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
++		kref_put(&tgt->kref, ibmvfc_release_tgt);
++		__ibmvfc_reset_host(vhost);
++		return;
++	}
+ 
+ 	ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
+ 	if (ibmvfc_send_event(evt, vhost, default_timeout)) {
+@@ -4380,6 +4441,12 @@ static void ibmvfc_tgt_move_login(struct ibmvfc_target *tgt)
+ 
+ 	kref_get(&tgt->kref);
+ 	evt = ibmvfc_get_event(&vhost->crq);
++	if (!evt) {
++		ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
++		kref_put(&tgt->kref, ibmvfc_release_tgt);
++		__ibmvfc_reset_host(vhost);
++		return;
++	}
+ 	vhost->discovery_threads++;
+ 	ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
+ 	ibmvfc_init_event(evt, ibmvfc_tgt_move_login_done, IBMVFC_MAD_FORMAT);
+@@ -4546,6 +4613,14 @@ static void ibmvfc_adisc_timeout(struct timer_list *t)
+ 	vhost->abort_threads++;
+ 	kref_get(&tgt->kref);
+ 	evt = ibmvfc_get_event(&vhost->crq);
++	if (!evt) {
++		tgt_err(tgt, "Failed to get cancel event for ADISC.\n");
++		vhost->abort_threads--;
++		kref_put(&tgt->kref, ibmvfc_release_tgt);
++		__ibmvfc_reset_host(vhost);
++		spin_unlock_irqrestore(vhost->host->host_lock, flags);
++		return;
++	}
+ 	ibmvfc_init_event(evt, ibmvfc_tgt_adisc_cancel_done, IBMVFC_MAD_FORMAT);
+ 
+ 	evt->tgt = tgt;
+@@ -4596,6 +4671,12 @@ static void ibmvfc_tgt_adisc(struct ibmvfc_target *tgt)
+ 
+ 	kref_get(&tgt->kref);
+ 	evt = ibmvfc_get_event(&vhost->crq);
++	if (!evt) {
++		ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
++		kref_put(&tgt->kref, ibmvfc_release_tgt);
++		__ibmvfc_reset_host(vhost);
++		return;
++	}
+ 	vhost->discovery_threads++;
+ 	ibmvfc_init_event(evt, ibmvfc_tgt_adisc_done, IBMVFC_MAD_FORMAT);
+ 	evt->tgt = tgt;
+@@ -4699,6 +4780,12 @@ static void ibmvfc_tgt_query_target(struct ibmvfc_target *tgt)
+ 
+ 	kref_get(&tgt->kref);
+ 	evt = ibmvfc_get_event(&vhost->crq);
++	if (!evt) {
++		ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
++		kref_put(&tgt->kref, ibmvfc_release_tgt);
++		__ibmvfc_reset_host(vhost);
++		return;
++	}
+ 	vhost->discovery_threads++;
+ 	evt->tgt = tgt;
+ 	ibmvfc_init_event(evt, ibmvfc_tgt_query_target_done, IBMVFC_MAD_FORMAT);
+@@ -4871,6 +4958,13 @@ static void ibmvfc_discover_targets(struct ibmvfc_host *vhost)
+ {
+ 	struct ibmvfc_discover_targets *mad;
+ 	struct ibmvfc_event *evt = ibmvfc_get_event(&vhost->crq);
++	int level = IBMVFC_DEFAULT_LOG_LEVEL;
++
++	if (!evt) {
++		ibmvfc_log(vhost, level, "Discover Targets failed: no available events\n");
++		ibmvfc_hard_reset_host(vhost);
++		return;
++	}
+ 
+ 	ibmvfc_init_event(evt, ibmvfc_discover_targets_done, IBMVFC_MAD_FORMAT);
+ 	mad = &evt->iu.discover_targets;
+@@ -4948,8 +5042,15 @@ static void ibmvfc_channel_setup(struct ibmvfc_host *vhost)
+ 	struct ibmvfc_scsi_channels *scrqs = &vhost->scsi_scrqs;
+ 	unsigned int num_channels =
+ 		min(vhost->client_scsi_channels, vhost->max_vios_scsi_channels);
++	int level = IBMVFC_DEFAULT_LOG_LEVEL;
+ 	int i;
+ 
++	if (!evt) {
++		ibmvfc_log(vhost, level, "Channel Setup failed: no available events\n");
++		ibmvfc_hard_reset_host(vhost);
++		return;
++	}
++
+ 	memset(setup_buf, 0, sizeof(*setup_buf));
+ 	if (num_channels == 0)
+ 		setup_buf->flags = cpu_to_be32(IBMVFC_CANCEL_CHANNELS);
+@@ -5011,6 +5112,13 @@ static void ibmvfc_channel_enquiry(struct ibmvfc_host *vhost)
+ {
+ 	struct ibmvfc_channel_enquiry *mad;
+ 	struct ibmvfc_event *evt = ibmvfc_get_event(&vhost->crq);
++	int level = IBMVFC_DEFAULT_LOG_LEVEL;
++
++	if (!evt) {
++		ibmvfc_log(vhost, level, "Channel Enquiry failed: no available events\n");
++		ibmvfc_hard_reset_host(vhost);
++		return;
++	}
+ 
+ 	ibmvfc_init_event(evt, ibmvfc_channel_enquiry_done, IBMVFC_MAD_FORMAT);
+ 	mad = &evt->iu.channel_enquiry;
+@@ -5133,6 +5241,12 @@ static void ibmvfc_npiv_login(struct ibmvfc_host *vhost)
+ 	struct ibmvfc_npiv_login_mad *mad;
+ 	struct ibmvfc_event *evt = ibmvfc_get_event(&vhost->crq);
+ 
++	if (!evt) {
++		ibmvfc_dbg(vhost, "NPIV Login failed: no available events\n");
++		ibmvfc_hard_reset_host(vhost);
++		return;
++	}
++
+ 	ibmvfc_gather_partition_info(vhost);
+ 	ibmvfc_set_login_info(vhost);
+ 	ibmvfc_init_event(evt, ibmvfc_npiv_login_done, IBMVFC_MAD_FORMAT);
+@@ -5197,6 +5311,12 @@ static void ibmvfc_npiv_logout(struct ibmvfc_host *vhost)
+ 	struct ibmvfc_event *evt;
+ 
+ 	evt = ibmvfc_get_event(&vhost->crq);
++	if (!evt) {
++		ibmvfc_dbg(vhost, "NPIV Logout failed: no available events\n");
++		ibmvfc_hard_reset_host(vhost);
++		return;
++	}
++
+ 	ibmvfc_init_event(evt, ibmvfc_npiv_logout_done, IBMVFC_MAD_FORMAT);
+ 
+ 	mad = &evt->iu.npiv_logout;
+diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c
+index 9c02c9523c4d4..ab06e9aeb613e 100644
+--- a/drivers/scsi/libfc/fc_lport.c
++++ b/drivers/scsi/libfc/fc_lport.c
+@@ -241,6 +241,12 @@ static void fc_lport_ptp_setup(struct fc_lport *lport,
+ 	}
+ 	mutex_lock(&lport->disc.disc_mutex);
+ 	lport->ptp_rdata = fc_rport_create(lport, remote_fid);
++	if (!lport->ptp_rdata) {
++		printk(KERN_WARNING "libfc: Failed to setup lport 0x%x\n",
++			lport->port_id);
++		mutex_unlock(&lport->disc.disc_mutex);
++		return;
++	}
+ 	kref_get(&lport->ptp_rdata->kref);
+ 	lport->ptp_rdata->ids.port_name = remote_wwpn;
+ 	lport->ptp_rdata->ids.node_name = remote_wwnn;
+diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
+index c0d47141f6d38..2a3279b902d60 100644
+--- a/drivers/scsi/megaraid/megaraid_sas_base.c
++++ b/drivers/scsi/megaraid/megaraid_sas_base.c
+@@ -263,13 +263,13 @@ u32 megasas_readl(struct megasas_instance *instance,
+ 	 * Fusion registers could intermittently return all zeroes.
+ 	 * This behavior is transient in nature and subsequent reads will
+ 	 * return valid value. As a workaround in driver, retry readl for
+-	 * upto three times until a non-zero value is read.
++	 * up to thirty times until a non-zero value is read.
+ 	 */
+ 	if (instance->adapter_type == AERO_SERIES) {
+ 		do {
+ 			ret_val = readl(addr);
+ 			i++;
+-		} while (ret_val == 0 && i < 3);
++		} while (ret_val == 0 && i < 30);
+ 		return ret_val;
+ 	} else {
+ 		return readl(addr);
+diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
+index 5284584e4cd2b..2fa56ef7f6594 100644
+--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
++++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
+@@ -223,8 +223,8 @@ _base_readl_ext_retry(const volatile void __iomem *addr)
+ 
+ 	for (i = 0 ; i < 30 ; i++) {
+ 		ret_val = readl(addr);
+-		if (ret_val == 0)
+-			continue;
++		if (ret_val != 0)
++			break;
+ 	}
+ 
+ 	return ret_val;
+diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
+index 67176be79dffd..6d6b4ed49612d 100644
+--- a/drivers/scsi/qla2xxx/qla_os.c
++++ b/drivers/scsi/qla2xxx/qla_os.c
+@@ -1835,8 +1835,16 @@ static void qla2x00_abort_srb(struct qla_qpair *qp, srb_t *sp, const int res,
+ 		}
+ 
+ 		spin_lock_irqsave(qp->qp_lock_ptr, *flags);
+-		if (ret_cmd && blk_mq_request_started(scsi_cmd_to_rq(cmd)))
+-			sp->done(sp, res);
++		switch (sp->type) {
++		case SRB_SCSI_CMD:
++			if (ret_cmd && blk_mq_request_started(scsi_cmd_to_rq(cmd)))
++				sp->done(sp, res);
++			break;
++		default:
++			if (ret_cmd)
++				sp->done(sp, res);
++			break;
++		}
+ 	} else {
+ 		sp->done(sp, res);
+ 	}
+diff --git a/drivers/soc/amlogic/meson-ee-pwrc.c b/drivers/soc/amlogic/meson-ee-pwrc.c
+index f54acffc83f9f..f2b24361c8cac 100644
+--- a/drivers/soc/amlogic/meson-ee-pwrc.c
++++ b/drivers/soc/amlogic/meson-ee-pwrc.c
+@@ -229,7 +229,7 @@ static struct meson_ee_pwrc_mem_domain sm1_pwrc_mem_audio[] = {
+ 
+ static struct meson_ee_pwrc_mem_domain g12a_pwrc_mem_nna[] = {
+ 	{ G12A_HHI_NANOQ_MEM_PD_REG0, GENMASK(31, 0) },
+-	{ G12A_HHI_NANOQ_MEM_PD_REG1, GENMASK(23, 0) },
++	{ G12A_HHI_NANOQ_MEM_PD_REG1, GENMASK(31, 0) },
+ };
+ 
+ #define VPU_PD(__name, __top_pd, __mem, __is_pwr_off, __resets, __clks)	\
+diff --git a/drivers/soc/bcm/bcm2835-power.c b/drivers/soc/bcm/bcm2835-power.c
+index 1a179d4e011cf..d2f0233cb6206 100644
+--- a/drivers/soc/bcm/bcm2835-power.c
++++ b/drivers/soc/bcm/bcm2835-power.c
+@@ -175,7 +175,7 @@ static int bcm2835_asb_control(struct bcm2835_power *power, u32 reg, bool enable
+ 	}
+ 	writel(PM_PASSWORD | val, base + reg);
+ 
+-	while (readl(base + reg) & ASB_ACK) {
++	while (!!(readl(base + reg) & ASB_ACK) == enable) {
+ 		cpu_relax();
+ 		if (ktime_get_ns() - start >= 1000)
+ 			return -ETIMEDOUT;
+diff --git a/drivers/soc/imx/gpc.c b/drivers/soc/imx/gpc.c
+index 90a8b2c0676ff..419ed15cc10c4 100644
+--- a/drivers/soc/imx/gpc.c
++++ b/drivers/soc/imx/gpc.c
+@@ -498,6 +498,7 @@ static int imx_gpc_probe(struct platform_device *pdev)
+ 
+ 			pd_pdev->dev.parent = &pdev->dev;
+ 			pd_pdev->dev.of_node = np;
++			pd_pdev->dev.fwnode = of_fwnode_handle(np);
+ 
+ 			ret = platform_device_add(pd_pdev);
+ 			if (ret) {
+diff --git a/drivers/soundwire/dmi-quirks.c b/drivers/soundwire/dmi-quirks.c
+index 2a1096dab63d3..9ebdd0cd0b1cf 100644
+--- a/drivers/soundwire/dmi-quirks.c
++++ b/drivers/soundwire/dmi-quirks.c
+@@ -141,7 +141,7 @@ static const struct dmi_system_id adr_remap_quirk_table[] = {
+ 	{
+ 		.matches = {
+ 			DMI_MATCH(DMI_SYS_VENDOR, "HP"),
+-			DMI_MATCH(DMI_PRODUCT_NAME, "OMEN by HP Gaming Laptop 16-k0xxx"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "OMEN by HP Gaming Laptop 16"),
+ 		},
+ 		.driver_data = (void *)hp_omen_16,
+ 	},
+diff --git a/drivers/thermal/intel/intel_powerclamp.c b/drivers/thermal/intel/intel_powerclamp.c
+index 36243a3972fd7..5ac5cb60bae67 100644
+--- a/drivers/thermal/intel/intel_powerclamp.c
++++ b/drivers/thermal/intel/intel_powerclamp.c
+@@ -256,7 +256,7 @@ skip_limit_set:
+ 
+ static const struct kernel_param_ops max_idle_ops = {
+ 	.set = max_idle_set,
+-	.get = param_get_int,
++	.get = param_get_byte,
+ };
+ 
+ module_param_cb(max_idle, &max_idle_ops, &max_idle, 0644);
+diff --git a/drivers/thunderbolt/quirks.c b/drivers/thunderbolt/quirks.c
+index 488138a28ae13..e6bfa63b40aee 100644
+--- a/drivers/thunderbolt/quirks.c
++++ b/drivers/thunderbolt/quirks.c
+@@ -31,6 +31,9 @@ static void quirk_usb3_maximum_bandwidth(struct tb_switch *sw)
+ {
+ 	struct tb_port *port;
+ 
++	if (tb_switch_is_icm(sw))
++		return;
++
+ 	tb_switch_for_each_port(sw, port) {
+ 		if (!tb_port_is_usb3_down(port))
+ 			continue;
+diff --git a/drivers/tty/hvc/hvc_xen.c b/drivers/tty/hvc/hvc_xen.c
+index 98764e740c078..34c01874f45be 100644
+--- a/drivers/tty/hvc/hvc_xen.c
++++ b/drivers/tty/hvc/hvc_xen.c
+@@ -377,18 +377,21 @@ void xen_console_resume(void)
+ #ifdef CONFIG_HVC_XEN_FRONTEND
+ static void xencons_disconnect_backend(struct xencons_info *info)
+ {
+-	if (info->irq > 0)
+-		unbind_from_irqhandler(info->irq, NULL);
+-	info->irq = 0;
++	if (info->hvc != NULL)
++		hvc_remove(info->hvc);
++	info->hvc = NULL;
++	if (info->irq > 0) {
++		evtchn_put(info->evtchn);
++		info->irq = 0;
++		info->evtchn = 0;
++	}
++	/* evtchn_put() will also close it so this is only an error path */
+ 	if (info->evtchn > 0)
+ 		xenbus_free_evtchn(info->xbdev, info->evtchn);
+ 	info->evtchn = 0;
+ 	if (info->gntref > 0)
+ 		gnttab_free_grant_references(info->gntref);
+ 	info->gntref = 0;
+-	if (info->hvc != NULL)
+-		hvc_remove(info->hvc);
+-	info->hvc = NULL;
+ }
+ 
+ static void xencons_free(struct xencons_info *info)
+@@ -433,7 +436,7 @@ static int xencons_connect_backend(struct xenbus_device *dev,
+ 	if (ret)
+ 		return ret;
+ 	info->evtchn = evtchn;
+-	irq = bind_interdomain_evtchn_to_irq_lateeoi(dev, evtchn);
++	irq = bind_evtchn_to_irq_lateeoi(evtchn);
+ 	if (irq < 0)
+ 		return irq;
+ 	info->irq = irq;
+@@ -553,10 +556,23 @@ static void xencons_backend_changed(struct xenbus_device *dev,
+ 		if (dev->state == XenbusStateClosed)
+ 			break;
+ 		fallthrough;	/* Missed the backend's CLOSING state */
+-	case XenbusStateClosing:
++	case XenbusStateClosing: {
++		struct xencons_info *info = dev_get_drvdata(&dev->dev);;
++
++		/*
++		 * Don't tear down the evtchn and grant ref before the other
++		 * end has disconnected, but do stop userspace from trying
++		 * to use the device before we allow the backend to close.
++		 */
++		if (info->hvc) {
++			hvc_remove(info->hvc);
++			info->hvc = NULL;
++		}
++
+ 		xenbus_frontend_closed(dev);
+ 		break;
+ 	}
++	}
+ }
+ 
+ static const struct xenbus_device_id xencons_ids[] = {
+@@ -588,7 +604,7 @@ static int __init xen_hvc_init(void)
+ 		ops = &dom0_hvc_ops;
+ 		r = xen_initial_domain_console_init();
+ 		if (r < 0)
+-			return r;
++			goto register_fe;
+ 		info = vtermno_to_xencons(HVC_COOKIE);
+ 	} else {
+ 		ops = &domU_hvc_ops;
+@@ -597,7 +613,7 @@ static int __init xen_hvc_init(void)
+ 		else
+ 			r = xen_pv_console_init();
+ 		if (r < 0)
+-			return r;
++			goto register_fe;
+ 
+ 		info = vtermno_to_xencons(HVC_COOKIE);
+ 		info->irq = bind_evtchn_to_irq_lateeoi(info->evtchn);
+@@ -616,12 +632,13 @@ static int __init xen_hvc_init(void)
+ 		list_del(&info->list);
+ 		spin_unlock_irqrestore(&xencons_lock, flags);
+ 		if (info->irq)
+-			unbind_from_irqhandler(info->irq, NULL);
++			evtchn_put(info->evtchn);
+ 		kfree(info);
+ 		return r;
+ 	}
+ 
+ 	r = 0;
++ register_fe:
+ #ifdef CONFIG_HVC_XEN_FRONTEND
+ 	r = xenbus_register_frontend(&xencons_driver);
+ #endif
+diff --git a/drivers/tty/serial/meson_uart.c b/drivers/tty/serial/meson_uart.c
+index 2501db5a7aaf7..677584cab57e4 100644
+--- a/drivers/tty/serial/meson_uart.c
++++ b/drivers/tty/serial/meson_uart.c
+@@ -379,10 +379,14 @@ static void meson_uart_set_termios(struct uart_port *port,
+ 	else
+ 		val |= AML_UART_STOP_BIT_1SB;
+ 
+-	if (cflags & CRTSCTS)
+-		val &= ~AML_UART_TWO_WIRE_EN;
+-	else
++	if (cflags & CRTSCTS) {
++		if (port->flags & UPF_HARD_FLOW)
++			val &= ~AML_UART_TWO_WIRE_EN;
++		else
++			termios->c_cflag &= ~CRTSCTS;
++	} else {
+ 		val |= AML_UART_TWO_WIRE_EN;
++	}
+ 
+ 	writel(val, port->membase + AML_UART_CONTROL);
+ 
+@@ -697,6 +701,7 @@ static int meson_uart_probe(struct platform_device *pdev)
+ 	u32 fifosize = 64; /* Default is 64, 128 for EE UART_0 */
+ 	int ret = 0;
+ 	int irq;
++	bool has_rtscts;
+ 
+ 	if (pdev->dev.of_node)
+ 		pdev->id = of_alias_get_id(pdev->dev.of_node, "serial");
+@@ -724,6 +729,7 @@ static int meson_uart_probe(struct platform_device *pdev)
+ 		return irq;
+ 
+ 	of_property_read_u32(pdev->dev.of_node, "fifo-size", &fifosize);
++	has_rtscts = of_property_read_bool(pdev->dev.of_node, "uart-has-rtscts");
+ 
+ 	if (meson_ports[pdev->id]) {
+ 		dev_err(&pdev->dev, "port %d already allocated\n", pdev->id);
+@@ -743,6 +749,8 @@ static int meson_uart_probe(struct platform_device *pdev)
+ 	port->mapsize = resource_size(res_mem);
+ 	port->irq = irq;
+ 	port->flags = UPF_BOOT_AUTOCONF | UPF_LOW_LATENCY;
++	if (has_rtscts)
++		port->flags |= UPF_HARD_FLOW;
+ 	port->has_sysrq = IS_ENABLED(CONFIG_SERIAL_MESON_CONSOLE);
+ 	port->dev = &pdev->dev;
+ 	port->line = pdev->id;
+diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
+index b6e70c5cfa174..88f594d369487 100644
+--- a/drivers/tty/sysrq.c
++++ b/drivers/tty/sysrq.c
+@@ -263,13 +263,14 @@ static void sysrq_handle_showallcpus(int key)
+ 		if (in_hardirq())
+ 			regs = get_irq_regs();
+ 
+-		pr_info("CPU%d:\n", smp_processor_id());
++		pr_info("CPU%d:\n", get_cpu());
+ 		if (regs)
+ 			show_regs(regs);
+ 		else
+ 			show_stack(NULL, NULL, KERN_INFO);
+ 
+ 		schedule_work(&sysrq_showallcpus);
++		put_cpu();
+ 	}
+ }
+ 
+diff --git a/drivers/tty/vcc.c b/drivers/tty/vcc.c
+index 34ba6e54789a7..b8b832c75b856 100644
+--- a/drivers/tty/vcc.c
++++ b/drivers/tty/vcc.c
+@@ -579,18 +579,22 @@ static int vcc_probe(struct vio_dev *vdev, const struct vio_device_id *id)
+ 		return -ENOMEM;
+ 
+ 	name = kstrdup(dev_name(&vdev->dev), GFP_KERNEL);
++	if (!name) {
++		rv = -ENOMEM;
++		goto free_port;
++	}
+ 
+ 	rv = vio_driver_init(&port->vio, vdev, VDEV_CONSOLE_CON, vcc_versions,
+ 			     ARRAY_SIZE(vcc_versions), NULL, name);
+ 	if (rv)
+-		goto free_port;
++		goto free_name;
+ 
+ 	port->vio.debug = vcc_dbg_vio;
+ 	vcc_ldc_cfg.debug = vcc_dbg_ldc;
+ 
+ 	rv = vio_ldc_alloc(&port->vio, &vcc_ldc_cfg, port);
+ 	if (rv)
+-		goto free_port;
++		goto free_name;
+ 
+ 	spin_lock_init(&port->lock);
+ 
+@@ -624,6 +628,11 @@ static int vcc_probe(struct vio_dev *vdev, const struct vio_device_id *id)
+ 		goto unreg_tty;
+ 	}
+ 	port->domain = kstrdup(domain, GFP_KERNEL);
++	if (!port->domain) {
++		rv = -ENOMEM;
++		goto unreg_tty;
++	}
++
+ 
+ 	mdesc_release(hp);
+ 
+@@ -653,8 +662,9 @@ free_table:
+ 	vcc_table_remove(port->index);
+ free_ldc:
+ 	vio_ldc_free(&port->vio);
+-free_port:
++free_name:
+ 	kfree(name);
++free_port:
+ 	kfree(port);
+ 
+ 	return rv;
+diff --git a/drivers/ufs/core/ufs-mcq.c b/drivers/ufs/core/ufs-mcq.c
+index 386674ead7f0d..08ff0dd73f1ef 100644
+--- a/drivers/ufs/core/ufs-mcq.c
++++ b/drivers/ufs/core/ufs-mcq.c
+@@ -433,7 +433,7 @@ int ufshcd_mcq_init(struct ufs_hba *hba)
+ 
+ 	for (i = 0; i < hba->nr_hw_queues; i++) {
+ 		hwq = &hba->uhq[i];
+-		hwq->max_entries = hba->nutrs;
++		hwq->max_entries = hba->nutrs + 1;
+ 		spin_lock_init(&hwq->sq_lock);
+ 		spin_lock_init(&hwq->cq_lock);
+ 		mutex_init(&hwq->sq_mutex);
+@@ -632,6 +632,7 @@ int ufshcd_mcq_abort(struct scsi_cmnd *cmd)
+ 	int tag = scsi_cmd_to_rq(cmd)->tag;
+ 	struct ufshcd_lrb *lrbp = &hba->lrb[tag];
+ 	struct ufs_hw_queue *hwq;
++	unsigned long flags;
+ 	int err = FAILED;
+ 
+ 	if (!ufshcd_cmd_inflight(lrbp->cmd)) {
+@@ -672,8 +673,10 @@ int ufshcd_mcq_abort(struct scsi_cmnd *cmd)
+ 	}
+ 
+ 	err = SUCCESS;
++	spin_lock_irqsave(&hwq->cq_lock, flags);
+ 	if (ufshcd_cmd_inflight(lrbp->cmd))
+ 		ufshcd_release_scsi_cmd(hba, lrbp);
++	spin_unlock_irqrestore(&hwq->cq_lock, flags);
+ 
+ out:
+ 	return err;
+diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
+index 13cd0f1207bf1..dbc3bfa98863a 100644
+--- a/drivers/ufs/core/ufshcd.c
++++ b/drivers/ufs/core/ufshcd.c
+@@ -8798,7 +8798,8 @@ static int ufshcd_probe_hba(struct ufs_hba *hba, bool init_dev_params)
+ 	if (ret)
+ 		goto out;
+ 
+-	if (hba->quirks & UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH) {
++	if (!hba->pm_op_in_progress &&
++	    (hba->quirks & UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH)) {
+ 		/* Reset the device and controller before doing reinit */
+ 		ufshcd_device_reset(hba);
+ 		ufshcd_hba_stop(hba);
+diff --git a/drivers/ufs/host/ufs-qcom.c b/drivers/ufs/host/ufs-qcom.c
+index c1557d21b027e..1748ead49b05f 100644
+--- a/drivers/ufs/host/ufs-qcom.c
++++ b/drivers/ufs/host/ufs-qcom.c
+@@ -820,8 +820,13 @@ static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba,
+ 			return ret;
+ 		}
+ 
+-		/* Use the agreed gear */
+-		host->hs_gear = dev_req_params->gear_tx;
++		/*
++		 * Update hs_gear only when the gears are scaled to a higher value. This is because,
++		 * the PHY gear settings are backwards compatible and we only need to change the PHY
++		 * settings while scaling to higher gears.
++		 */
++		if (dev_req_params->gear_tx > host->hs_gear)
++			host->hs_gear = dev_req_params->gear_tx;
+ 
+ 		/* enable the device ref clock before changing to HS mode */
+ 		if (!ufshcd_is_hs_mode(&hba->pwr_info) &&
+diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
+index 343d2570189ff..d25490965b27f 100644
+--- a/drivers/usb/dwc3/core.c
++++ b/drivers/usb/dwc3/core.c
+@@ -1094,6 +1094,111 @@ static void dwc3_set_power_down_clk_scale(struct dwc3 *dwc)
+ 	}
+ }
+ 
++static void dwc3_config_threshold(struct dwc3 *dwc)
++{
++	u32 reg;
++	u8 rx_thr_num;
++	u8 rx_maxburst;
++	u8 tx_thr_num;
++	u8 tx_maxburst;
++
++	/*
++	 * Must config both number of packets and max burst settings to enable
++	 * RX and/or TX threshold.
++	 */
++	if (!DWC3_IP_IS(DWC3) && dwc->dr_mode == USB_DR_MODE_HOST) {
++		rx_thr_num = dwc->rx_thr_num_pkt_prd;
++		rx_maxburst = dwc->rx_max_burst_prd;
++		tx_thr_num = dwc->tx_thr_num_pkt_prd;
++		tx_maxburst = dwc->tx_max_burst_prd;
++
++		if (rx_thr_num && rx_maxburst) {
++			reg = dwc3_readl(dwc->regs, DWC3_GRXTHRCFG);
++			reg |= DWC31_RXTHRNUMPKTSEL_PRD;
++
++			reg &= ~DWC31_RXTHRNUMPKT_PRD(~0);
++			reg |= DWC31_RXTHRNUMPKT_PRD(rx_thr_num);
++
++			reg &= ~DWC31_MAXRXBURSTSIZE_PRD(~0);
++			reg |= DWC31_MAXRXBURSTSIZE_PRD(rx_maxburst);
++
++			dwc3_writel(dwc->regs, DWC3_GRXTHRCFG, reg);
++		}
++
++		if (tx_thr_num && tx_maxburst) {
++			reg = dwc3_readl(dwc->regs, DWC3_GTXTHRCFG);
++			reg |= DWC31_TXTHRNUMPKTSEL_PRD;
++
++			reg &= ~DWC31_TXTHRNUMPKT_PRD(~0);
++			reg |= DWC31_TXTHRNUMPKT_PRD(tx_thr_num);
++
++			reg &= ~DWC31_MAXTXBURSTSIZE_PRD(~0);
++			reg |= DWC31_MAXTXBURSTSIZE_PRD(tx_maxburst);
++
++			dwc3_writel(dwc->regs, DWC3_GTXTHRCFG, reg);
++		}
++	}
++
++	rx_thr_num = dwc->rx_thr_num_pkt;
++	rx_maxburst = dwc->rx_max_burst;
++	tx_thr_num = dwc->tx_thr_num_pkt;
++	tx_maxburst = dwc->tx_max_burst;
++
++	if (DWC3_IP_IS(DWC3)) {
++		if (rx_thr_num && rx_maxburst) {
++			reg = dwc3_readl(dwc->regs, DWC3_GRXTHRCFG);
++			reg |= DWC3_GRXTHRCFG_PKTCNTSEL;
++
++			reg &= ~DWC3_GRXTHRCFG_RXPKTCNT(~0);
++			reg |= DWC3_GRXTHRCFG_RXPKTCNT(rx_thr_num);
++
++			reg &= ~DWC3_GRXTHRCFG_MAXRXBURSTSIZE(~0);
++			reg |= DWC3_GRXTHRCFG_MAXRXBURSTSIZE(rx_maxburst);
++
++			dwc3_writel(dwc->regs, DWC3_GRXTHRCFG, reg);
++		}
++
++		if (tx_thr_num && tx_maxburst) {
++			reg = dwc3_readl(dwc->regs, DWC3_GTXTHRCFG);
++			reg |= DWC3_GTXTHRCFG_PKTCNTSEL;
++
++			reg &= ~DWC3_GTXTHRCFG_TXPKTCNT(~0);
++			reg |= DWC3_GTXTHRCFG_TXPKTCNT(tx_thr_num);
++
++			reg &= ~DWC3_GTXTHRCFG_MAXTXBURSTSIZE(~0);
++			reg |= DWC3_GTXTHRCFG_MAXTXBURSTSIZE(tx_maxburst);
++
++			dwc3_writel(dwc->regs, DWC3_GTXTHRCFG, reg);
++		}
++	} else {
++		if (rx_thr_num && rx_maxburst) {
++			reg = dwc3_readl(dwc->regs, DWC3_GRXTHRCFG);
++			reg |= DWC31_GRXTHRCFG_PKTCNTSEL;
++
++			reg &= ~DWC31_GRXTHRCFG_RXPKTCNT(~0);
++			reg |= DWC31_GRXTHRCFG_RXPKTCNT(rx_thr_num);
++
++			reg &= ~DWC31_GRXTHRCFG_MAXRXBURSTSIZE(~0);
++			reg |= DWC31_GRXTHRCFG_MAXRXBURSTSIZE(rx_maxburst);
++
++			dwc3_writel(dwc->regs, DWC3_GRXTHRCFG, reg);
++		}
++
++		if (tx_thr_num && tx_maxburst) {
++			reg = dwc3_readl(dwc->regs, DWC3_GTXTHRCFG);
++			reg |= DWC31_GTXTHRCFG_PKTCNTSEL;
++
++			reg &= ~DWC31_GTXTHRCFG_TXPKTCNT(~0);
++			reg |= DWC31_GTXTHRCFG_TXPKTCNT(tx_thr_num);
++
++			reg &= ~DWC31_GTXTHRCFG_MAXTXBURSTSIZE(~0);
++			reg |= DWC31_GTXTHRCFG_MAXTXBURSTSIZE(tx_maxburst);
++
++			dwc3_writel(dwc->regs, DWC3_GTXTHRCFG, reg);
++		}
++	}
++}
++
+ /**
+  * dwc3_core_init - Low-level initialization of DWC3 Core
+  * @dwc: Pointer to our controller context structure
+@@ -1246,42 +1351,7 @@ static int dwc3_core_init(struct dwc3 *dwc)
+ 		dwc3_writel(dwc->regs, DWC3_GUCTL1, reg);
+ 	}
+ 
+-	/*
+-	 * Must config both number of packets and max burst settings to enable
+-	 * RX and/or TX threshold.
+-	 */
+-	if (!DWC3_IP_IS(DWC3) && dwc->dr_mode == USB_DR_MODE_HOST) {
+-		u8 rx_thr_num = dwc->rx_thr_num_pkt_prd;
+-		u8 rx_maxburst = dwc->rx_max_burst_prd;
+-		u8 tx_thr_num = dwc->tx_thr_num_pkt_prd;
+-		u8 tx_maxburst = dwc->tx_max_burst_prd;
+-
+-		if (rx_thr_num && rx_maxburst) {
+-			reg = dwc3_readl(dwc->regs, DWC3_GRXTHRCFG);
+-			reg |= DWC31_RXTHRNUMPKTSEL_PRD;
+-
+-			reg &= ~DWC31_RXTHRNUMPKT_PRD(~0);
+-			reg |= DWC31_RXTHRNUMPKT_PRD(rx_thr_num);
+-
+-			reg &= ~DWC31_MAXRXBURSTSIZE_PRD(~0);
+-			reg |= DWC31_MAXRXBURSTSIZE_PRD(rx_maxburst);
+-
+-			dwc3_writel(dwc->regs, DWC3_GRXTHRCFG, reg);
+-		}
+-
+-		if (tx_thr_num && tx_maxburst) {
+-			reg = dwc3_readl(dwc->regs, DWC3_GTXTHRCFG);
+-			reg |= DWC31_TXTHRNUMPKTSEL_PRD;
+-
+-			reg &= ~DWC31_TXTHRNUMPKT_PRD(~0);
+-			reg |= DWC31_TXTHRNUMPKT_PRD(tx_thr_num);
+-
+-			reg &= ~DWC31_MAXTXBURSTSIZE_PRD(~0);
+-			reg |= DWC31_MAXTXBURSTSIZE_PRD(tx_maxburst);
+-
+-			dwc3_writel(dwc->regs, DWC3_GTXTHRCFG, reg);
+-		}
+-	}
++	dwc3_config_threshold(dwc);
+ 
+ 	return 0;
+ 
+@@ -1417,6 +1487,10 @@ static void dwc3_get_properties(struct dwc3 *dwc)
+ 	u8			lpm_nyet_threshold;
+ 	u8			tx_de_emphasis;
+ 	u8			hird_threshold;
++	u8			rx_thr_num_pkt = 0;
++	u8			rx_max_burst = 0;
++	u8			tx_thr_num_pkt = 0;
++	u8			tx_max_burst = 0;
+ 	u8			rx_thr_num_pkt_prd = 0;
+ 	u8			rx_max_burst_prd = 0;
+ 	u8			tx_thr_num_pkt_prd = 0;
+@@ -1479,6 +1553,14 @@ static void dwc3_get_properties(struct dwc3 *dwc)
+ 				"snps,usb2-lpm-disable");
+ 	dwc->usb2_gadget_lpm_disable = device_property_read_bool(dev,
+ 				"snps,usb2-gadget-lpm-disable");
++	device_property_read_u8(dev, "snps,rx-thr-num-pkt",
++				&rx_thr_num_pkt);
++	device_property_read_u8(dev, "snps,rx-max-burst",
++				&rx_max_burst);
++	device_property_read_u8(dev, "snps,tx-thr-num-pkt",
++				&tx_thr_num_pkt);
++	device_property_read_u8(dev, "snps,tx-max-burst",
++				&tx_max_burst);
+ 	device_property_read_u8(dev, "snps,rx-thr-num-pkt-prd",
+ 				&rx_thr_num_pkt_prd);
+ 	device_property_read_u8(dev, "snps,rx-max-burst-prd",
+@@ -1560,6 +1642,12 @@ static void dwc3_get_properties(struct dwc3 *dwc)
+ 
+ 	dwc->hird_threshold = hird_threshold;
+ 
++	dwc->rx_thr_num_pkt = rx_thr_num_pkt;
++	dwc->rx_max_burst = rx_max_burst;
++
++	dwc->tx_thr_num_pkt = tx_thr_num_pkt;
++	dwc->tx_max_burst = tx_max_burst;
++
+ 	dwc->rx_thr_num_pkt_prd = rx_thr_num_pkt_prd;
+ 	dwc->rx_max_burst_prd = rx_max_burst_prd;
+ 
+diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
+index a69ac67d89fe6..6782ec8bfd64c 100644
+--- a/drivers/usb/dwc3/core.h
++++ b/drivers/usb/dwc3/core.h
+@@ -211,6 +211,11 @@
+ #define DWC3_GRXTHRCFG_RXPKTCNT(n) (((n) & 0xf) << 24)
+ #define DWC3_GRXTHRCFG_PKTCNTSEL BIT(29)
+ 
++/* Global TX Threshold Configuration Register */
++#define DWC3_GTXTHRCFG_MAXTXBURSTSIZE(n) (((n) & 0xff) << 16)
++#define DWC3_GTXTHRCFG_TXPKTCNT(n) (((n) & 0xf) << 24)
++#define DWC3_GTXTHRCFG_PKTCNTSEL BIT(29)
++
+ /* Global RX Threshold Configuration Register for DWC_usb31 only */
+ #define DWC31_GRXTHRCFG_MAXRXBURSTSIZE(n)	(((n) & 0x1f) << 16)
+ #define DWC31_GRXTHRCFG_RXPKTCNT(n)		(((n) & 0x1f) << 21)
+@@ -1045,6 +1050,10 @@ struct dwc3_scratchpad_array {
+  * @test_mode_nr: test feature selector
+  * @lpm_nyet_threshold: LPM NYET response threshold
+  * @hird_threshold: HIRD threshold
++ * @rx_thr_num_pkt: USB receive packet count
++ * @rx_max_burst: max USB receive burst size
++ * @tx_thr_num_pkt: USB transmit packet count
++ * @tx_max_burst: max USB transmit burst size
+  * @rx_thr_num_pkt_prd: periodic ESS receive packet count
+  * @rx_max_burst_prd: max periodic ESS receive burst size
+  * @tx_thr_num_pkt_prd: periodic ESS transmit packet count
+@@ -1273,6 +1282,10 @@ struct dwc3 {
+ 	u8			test_mode_nr;
+ 	u8			lpm_nyet_threshold;
+ 	u8			hird_threshold;
++	u8			rx_thr_num_pkt;
++	u8			rx_max_burst;
++	u8			tx_thr_num_pkt;
++	u8			tx_max_burst;
+ 	u8			rx_thr_num_pkt_prd;
+ 	u8			rx_max_burst_prd;
+ 	u8			tx_thr_num_pkt_prd;
+diff --git a/drivers/usb/gadget/function/f_ncm.c b/drivers/usb/gadget/function/f_ncm.c
+index faf90a2174194..bbb6ff6b11aa1 100644
+--- a/drivers/usb/gadget/function/f_ncm.c
++++ b/drivers/usb/gadget/function/f_ncm.c
+@@ -1425,7 +1425,7 @@ static int ncm_bind(struct usb_configuration *c, struct usb_function *f)
+ 	struct usb_composite_dev *cdev = c->cdev;
+ 	struct f_ncm		*ncm = func_to_ncm(f);
+ 	struct usb_string	*us;
+-	int			status;
++	int			status = 0;
+ 	struct usb_ep		*ep;
+ 	struct f_ncm_opts	*ncm_opts;
+ 
+@@ -1443,22 +1443,17 @@ static int ncm_bind(struct usb_configuration *c, struct usb_function *f)
+ 		f->os_desc_table[0].os_desc = &ncm_opts->ncm_os_desc;
+ 	}
+ 
+-	/*
+-	 * in drivers/usb/gadget/configfs.c:configfs_composite_bind()
+-	 * configurations are bound in sequence with list_for_each_entry,
+-	 * in each configuration its functions are bound in sequence
+-	 * with list_for_each_entry, so we assume no race condition
+-	 * with regard to ncm_opts->bound access
+-	 */
+-	if (!ncm_opts->bound) {
+-		mutex_lock(&ncm_opts->lock);
+-		gether_set_gadget(ncm_opts->net, cdev->gadget);
++	mutex_lock(&ncm_opts->lock);
++	gether_set_gadget(ncm_opts->net, cdev->gadget);
++	if (!ncm_opts->bound)
+ 		status = gether_register_netdev(ncm_opts->net);
+-		mutex_unlock(&ncm_opts->lock);
+-		if (status)
+-			goto fail;
+-		ncm_opts->bound = true;
+-	}
++	mutex_unlock(&ncm_opts->lock);
++
++	if (status)
++		goto fail;
++
++	ncm_opts->bound = true;
++
+ 	us = usb_gstrings_attach(cdev, ncm_strings,
+ 				 ARRAY_SIZE(ncm_string_defs));
+ 	if (IS_ERR(us)) {
+diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
+index bde43cef8846c..95ed9404f6f85 100644
+--- a/drivers/usb/host/xhci-pci.c
++++ b/drivers/usb/host/xhci-pci.c
+@@ -695,7 +695,9 @@ static int xhci_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
+ 	/* USB-2 and USB-3 roothubs initialized, allow runtime pm suspend */
+ 	pm_runtime_put_noidle(&dev->dev);
+ 
+-	if (xhci->quirks & XHCI_DEFAULT_PM_RUNTIME_ALLOW)
++	if (pci_choose_state(dev, PMSG_SUSPEND) == PCI_D0)
++		pm_runtime_forbid(&dev->dev);
++	else if (xhci->quirks & XHCI_DEFAULT_PM_RUNTIME_ALLOW)
+ 		pm_runtime_allow(&dev->dev);
+ 
+ 	dma_set_max_seg_size(&dev->dev, UINT_MAX);
+diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
+index fae994f679d45..82aab2f9adbb8 100644
+--- a/drivers/usb/host/xhci.c
++++ b/drivers/usb/host/xhci.c
+@@ -968,6 +968,7 @@ int xhci_resume(struct xhci_hcd *xhci, pm_message_t msg)
+ 	int			retval = 0;
+ 	bool			comp_timer_running = false;
+ 	bool			pending_portevent = false;
++	bool			suspended_usb3_devs = false;
+ 	bool			reinit_xhc = false;
+ 
+ 	if (!hcd->state)
+@@ -1115,10 +1116,17 @@ int xhci_resume(struct xhci_hcd *xhci, pm_message_t msg)
+ 		/*
+ 		 * Resume roothubs only if there are pending events.
+ 		 * USB 3 devices resend U3 LFPS wake after a 100ms delay if
+-		 * the first wake signalling failed, give it that chance.
++		 * the first wake signalling failed, give it that chance if
++		 * there are suspended USB 3 devices.
+ 		 */
++		if (xhci->usb3_rhub.bus_state.suspended_ports ||
++		    xhci->usb3_rhub.bus_state.bus_suspended)
++			suspended_usb3_devs = true;
++
+ 		pending_portevent = xhci_pending_portevent(xhci);
+-		if (!pending_portevent && msg.event == PM_EVENT_AUTO_RESUME) {
++
++		if (suspended_usb3_devs && !pending_portevent &&
++		    msg.event == PM_EVENT_AUTO_RESUME) {
+ 			msleep(120);
+ 			pending_portevent = xhci_pending_portevent(xhci);
+ 		}
+diff --git a/drivers/usb/typec/ucsi/ucsi_glink.c b/drivers/usb/typec/ucsi/ucsi_glink.c
+index 1fe9cb5b6bd96..a2d862eebcecb 100644
+--- a/drivers/usb/typec/ucsi/ucsi_glink.c
++++ b/drivers/usb/typec/ucsi/ucsi_glink.c
+@@ -9,9 +9,13 @@
+ #include <linux/mutex.h>
+ #include <linux/property.h>
+ #include <linux/soc/qcom/pdr.h>
++#include <linux/usb/typec_mux.h>
++#include <linux/gpio/consumer.h>
+ #include <linux/soc/qcom/pmic_glink.h>
+ #include "ucsi.h"
+ 
++#define PMIC_GLINK_MAX_PORTS	2
++
+ #define UCSI_BUF_SIZE                   48
+ 
+ #define MSG_TYPE_REQ_RESP               1
+@@ -53,6 +57,9 @@ struct ucsi_notify_ind_msg {
+ struct pmic_glink_ucsi {
+ 	struct device *dev;
+ 
++	struct gpio_desc *port_orientation[PMIC_GLINK_MAX_PORTS];
++	struct typec_switch *port_switch[PMIC_GLINK_MAX_PORTS];
++
+ 	struct pmic_glink_client *client;
+ 
+ 	struct ucsi *ucsi;
+@@ -221,8 +228,20 @@ static void pmic_glink_ucsi_notify(struct work_struct *work)
+ 	}
+ 
+ 	con_num = UCSI_CCI_CONNECTOR(cci);
+-	if (con_num)
++	if (con_num) {
++		if (con_num < PMIC_GLINK_MAX_PORTS &&
++		    ucsi->port_orientation[con_num - 1]) {
++			int orientation = gpiod_get_value(ucsi->port_orientation[con_num - 1]);
++
++			if (orientation >= 0) {
++				typec_switch_set(ucsi->port_switch[con_num - 1],
++						 orientation ? TYPEC_ORIENTATION_REVERSE
++							     : TYPEC_ORIENTATION_NORMAL);
++			}
++		}
++
+ 		ucsi_connector_change(ucsi->ucsi, con_num);
++	}
+ 
+ 	if (ucsi->sync_pending && cci & UCSI_CCI_BUSY) {
+ 		ucsi->sync_val = -EBUSY;
+@@ -283,6 +302,7 @@ static int pmic_glink_ucsi_probe(struct auxiliary_device *adev,
+ {
+ 	struct pmic_glink_ucsi *ucsi;
+ 	struct device *dev = &adev->dev;
++	struct fwnode_handle *fwnode;
+ 	int ret;
+ 
+ 	ucsi = devm_kzalloc(dev, sizeof(*ucsi), GFP_KERNEL);
+@@ -310,6 +330,38 @@ static int pmic_glink_ucsi_probe(struct auxiliary_device *adev,
+ 
+ 	ucsi_set_drvdata(ucsi->ucsi, ucsi);
+ 
++	device_for_each_child_node(dev, fwnode) {
++		struct gpio_desc *desc;
++		u32 port;
++
++		ret = fwnode_property_read_u32(fwnode, "reg", &port);
++		if (ret < 0) {
++			dev_err(dev, "missing reg property of %pOFn\n", fwnode);
++			return ret;
++		}
++
++		if (port >= PMIC_GLINK_MAX_PORTS) {
++			dev_warn(dev, "invalid connector number, ignoring\n");
++			continue;
++		}
++
++		desc = devm_gpiod_get_index_optional(&adev->dev, "orientation", port, GPIOD_IN);
++
++		/* If GPIO isn't found, continue */
++		if (!desc)
++			continue;
++
++		if (IS_ERR(desc))
++			return dev_err_probe(dev, PTR_ERR(desc),
++					     "unable to acquire orientation gpio\n");
++		ucsi->port_orientation[port] = desc;
++
++		ucsi->port_switch[port] = fwnode_typec_switch_get(fwnode);
++		if (IS_ERR(ucsi->port_switch[port]))
++			return dev_err_probe(dev, PTR_ERR(ucsi->port_switch[port]),
++					"failed to acquire orientation-switch\n");
++	}
++
+ 	ucsi->client = devm_pmic_glink_register_client(dev,
+ 						       PMIC_GLINK_OWNER_USBC,
+ 						       pmic_glink_ucsi_callback,
+diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim_blk.c b/drivers/vdpa/vdpa_sim/vdpa_sim_blk.c
+index b3a3cb1657955..b137f36793439 100644
+--- a/drivers/vdpa/vdpa_sim/vdpa_sim_blk.c
++++ b/drivers/vdpa/vdpa_sim/vdpa_sim_blk.c
+@@ -437,7 +437,7 @@ static int vdpasim_blk_dev_add(struct vdpa_mgmt_dev *mdev, const char *name,
+ 	if (blk->shared_backend) {
+ 		blk->buffer = shared_buffer;
+ 	} else {
+-		blk->buffer = kvmalloc(VDPASIM_BLK_CAPACITY << SECTOR_SHIFT,
++		blk->buffer = kvzalloc(VDPASIM_BLK_CAPACITY << SECTOR_SHIFT,
+ 				       GFP_KERNEL);
+ 		if (!blk->buffer) {
+ 			ret = -ENOMEM;
+@@ -495,7 +495,7 @@ static int __init vdpasim_blk_init(void)
+ 		goto parent_err;
+ 
+ 	if (shared_backend) {
+-		shared_buffer = kvmalloc(VDPASIM_BLK_CAPACITY << SECTOR_SHIFT,
++		shared_buffer = kvzalloc(VDPASIM_BLK_CAPACITY << SECTOR_SHIFT,
+ 					 GFP_KERNEL);
+ 		if (!shared_buffer) {
+ 			ret = -ENOMEM;
+diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c
+index b43e8680eee8d..48357c403867f 100644
+--- a/drivers/vhost/vdpa.c
++++ b/drivers/vhost/vdpa.c
+@@ -1498,7 +1498,6 @@ static int vhost_vdpa_probe(struct vdpa_device *vdpa)
+ 
+ err:
+ 	put_device(&v->dev);
+-	ida_simple_remove(&vhost_vdpa_ida, v->minor);
+ 	return r;
+ }
+ 
+diff --git a/drivers/watchdog/sbsa_gwdt.c b/drivers/watchdog/sbsa_gwdt.c
+index fd3cfdda49491..76527324b63c1 100644
+--- a/drivers/watchdog/sbsa_gwdt.c
++++ b/drivers/watchdog/sbsa_gwdt.c
+@@ -153,14 +153,14 @@ static int sbsa_gwdt_set_timeout(struct watchdog_device *wdd,
+ 	timeout = clamp_t(unsigned int, timeout, 1, wdd->max_hw_heartbeat_ms / 1000);
+ 
+ 	if (action)
+-		sbsa_gwdt_reg_write(gwdt->clk * timeout, gwdt);
++		sbsa_gwdt_reg_write((u64)gwdt->clk * timeout, gwdt);
+ 	else
+ 		/*
+ 		 * In the single stage mode, The first signal (WS0) is ignored,
+ 		 * the timeout is (WOR * 2), so the WOR should be configured
+ 		 * to half value of timeout.
+ 		 */
+-		sbsa_gwdt_reg_write(gwdt->clk / 2 * timeout, gwdt);
++		sbsa_gwdt_reg_write(((u64)gwdt->clk / 2) * timeout, gwdt);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
+index c803714d0f0d1..87482b3428bf6 100644
+--- a/drivers/xen/events/events_base.c
++++ b/drivers/xen/events/events_base.c
+@@ -164,6 +164,8 @@ static DEFINE_PER_CPU(int [NR_VIRQS], virq_to_irq) = {[0 ... NR_VIRQS-1] = -1};
+ 
+ /* IRQ <-> IPI mapping */
+ static DEFINE_PER_CPU(int [XEN_NR_IPIS], ipi_to_irq) = {[0 ... XEN_NR_IPIS-1] = -1};
++/* Cache for IPI event channels - needed for hot cpu unplug (avoid RCU usage). */
++static DEFINE_PER_CPU(evtchn_port_t [XEN_NR_IPIS], ipi_to_evtchn) = {[0 ... XEN_NR_IPIS-1] = 0};
+ 
+ /* Event channel distribution data */
+ static atomic_t channels_on_cpu[NR_CPUS];
+@@ -366,6 +368,7 @@ static int xen_irq_info_ipi_setup(unsigned cpu,
+ 	info->u.ipi = ipi;
+ 
+ 	per_cpu(ipi_to_irq, cpu)[ipi] = irq;
++	per_cpu(ipi_to_evtchn, cpu)[ipi] = evtchn;
+ 
+ 	return xen_irq_info_common_setup(info, irq, IRQT_IPI, evtchn, 0);
+ }
+@@ -601,7 +604,9 @@ static void lateeoi_list_add(struct irq_info *info)
+ 
+ 	spin_lock_irqsave(&eoi->eoi_list_lock, flags);
+ 
+-	if (list_empty(&eoi->eoi_list)) {
++	elem = list_first_entry_or_null(&eoi->eoi_list, struct irq_info,
++					eoi_list);
++	if (!elem || info->eoi_time < elem->eoi_time) {
+ 		list_add(&info->eoi_list, &eoi->eoi_list);
+ 		mod_delayed_work_on(info->eoi_cpu, system_wq,
+ 				    &eoi->delayed, delay);
+@@ -981,6 +986,7 @@ static void __unbind_from_irq(unsigned int irq)
+ 			break;
+ 		case IRQT_IPI:
+ 			per_cpu(ipi_to_irq, cpu)[ipi_from_irq(irq)] = -1;
++			per_cpu(ipi_to_evtchn, cpu)[ipi_from_irq(irq)] = 0;
+ 			break;
+ 		case IRQT_EVTCHN:
+ 			dev = info->u.interdomain;
+@@ -1631,7 +1637,7 @@ EXPORT_SYMBOL_GPL(evtchn_put);
+ 
+ void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector)
+ {
+-	int irq;
++	evtchn_port_t evtchn;
+ 
+ #ifdef CONFIG_X86
+ 	if (unlikely(vector == XEN_NMI_VECTOR)) {
+@@ -1642,9 +1648,9 @@ void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector)
+ 		return;
+ 	}
+ #endif
+-	irq = per_cpu(ipi_to_irq, cpu)[vector];
+-	BUG_ON(irq < 0);
+-	notify_remote_via_irq(irq);
++	evtchn = per_cpu(ipi_to_evtchn, cpu)[vector];
++	BUG_ON(evtchn == 0);
++	notify_remote_via_evtchn(evtchn);
+ }
+ 
+ struct evtchn_loop_ctrl {
+diff --git a/fs/9p/xattr.c b/fs/9p/xattr.c
+index e00cf8109b3f3..3c4572ef3a488 100644
+--- a/fs/9p/xattr.c
++++ b/fs/9p/xattr.c
+@@ -68,7 +68,7 @@ ssize_t v9fs_xattr_get(struct dentry *dentry, const char *name,
+ 	struct p9_fid *fid;
+ 	int ret;
+ 
+-	p9_debug(P9_DEBUG_VFS, "name = %s value_len = %zu\n",
++	p9_debug(P9_DEBUG_VFS, "name = '%s' value_len = %zu\n",
+ 		 name, buffer_size);
+ 	fid = v9fs_fid_lookup(dentry);
+ 	if (IS_ERR(fid))
+@@ -139,7 +139,8 @@ int v9fs_fid_xattr_set(struct p9_fid *fid, const char *name,
+ 
+ ssize_t v9fs_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size)
+ {
+-	return v9fs_xattr_get(dentry, NULL, buffer, buffer_size);
++	/* Txattrwalk with an empty string lists xattrs instead */
++	return v9fs_xattr_get(dentry, "", buffer, buffer_size);
+ }
+ 
+ static int v9fs_xattr_handler_get(const struct xattr_handler *handler,
+diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c
+index 5e7a19fca79c4..bf65f801d8439 100644
+--- a/fs/btrfs/block-group.c
++++ b/fs/btrfs/block-group.c
+@@ -2587,7 +2587,7 @@ static int insert_dev_extent(struct btrfs_trans_handle *trans,
+ 	btrfs_set_dev_extent_chunk_offset(leaf, extent, chunk_offset);
+ 
+ 	btrfs_set_dev_extent_length(leaf, extent, num_bytes);
+-	btrfs_mark_buffer_dirty(leaf);
++	btrfs_mark_buffer_dirty(trans, leaf);
+ out:
+ 	btrfs_free_path(path);
+ 	return ret;
+@@ -3011,7 +3011,7 @@ static int update_block_group_item(struct btrfs_trans_handle *trans,
+ 						   cache->global_root_id);
+ 	btrfs_set_stack_block_group_flags(&bgi, cache->flags);
+ 	write_extent_buffer(leaf, &bgi, bi, sizeof(bgi));
+-	btrfs_mark_buffer_dirty(leaf);
++	btrfs_mark_buffer_dirty(trans, leaf);
+ fail:
+ 	btrfs_release_path(path);
+ 	/*
+diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
+index 617d4827eec26..118ad4d2cbbe2 100644
+--- a/fs/btrfs/ctree.c
++++ b/fs/btrfs/ctree.c
+@@ -359,7 +359,7 @@ int btrfs_copy_root(struct btrfs_trans_handle *trans,
+ 		return ret;
+ 	}
+ 
+-	btrfs_mark_buffer_dirty(cow);
++	btrfs_mark_buffer_dirty(trans, cow);
+ 	*cow_ret = cow;
+ 	return 0;
+ }
+@@ -627,7 +627,7 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
+ 					cow->start);
+ 		btrfs_set_node_ptr_generation(parent, parent_slot,
+ 					      trans->transid);
+-		btrfs_mark_buffer_dirty(parent);
++		btrfs_mark_buffer_dirty(trans, parent);
+ 		if (last_ref) {
+ 			ret = btrfs_tree_mod_log_free_eb(buf);
+ 			if (ret) {
+@@ -643,7 +643,7 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
+ 	if (unlock_orig)
+ 		btrfs_tree_unlock(buf);
+ 	free_extent_buffer_stale(buf);
+-	btrfs_mark_buffer_dirty(cow);
++	btrfs_mark_buffer_dirty(trans, cow);
+ 	*cow_ret = cow;
+ 	return 0;
+ }
+@@ -1197,7 +1197,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
+ 				goto out;
+ 			}
+ 			btrfs_set_node_key(parent, &right_key, pslot + 1);
+-			btrfs_mark_buffer_dirty(parent);
++			btrfs_mark_buffer_dirty(trans, parent);
+ 		}
+ 	}
+ 	if (btrfs_header_nritems(mid) == 1) {
+@@ -1255,7 +1255,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
+ 			goto out;
+ 		}
+ 		btrfs_set_node_key(parent, &mid_key, pslot);
+-		btrfs_mark_buffer_dirty(parent);
++		btrfs_mark_buffer_dirty(trans, parent);
+ 	}
+ 
+ 	/* update the path */
+@@ -1362,7 +1362,7 @@ static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
+ 				return ret;
+ 			}
+ 			btrfs_set_node_key(parent, &disk_key, pslot);
+-			btrfs_mark_buffer_dirty(parent);
++			btrfs_mark_buffer_dirty(trans, parent);
+ 			if (btrfs_header_nritems(left) > orig_slot) {
+ 				path->nodes[level] = left;
+ 				path->slots[level + 1] -= 1;
+@@ -1422,7 +1422,7 @@ static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
+ 				return ret;
+ 			}
+ 			btrfs_set_node_key(parent, &disk_key, pslot + 1);
+-			btrfs_mark_buffer_dirty(parent);
++			btrfs_mark_buffer_dirty(trans, parent);
+ 
+ 			if (btrfs_header_nritems(mid) <= orig_slot) {
+ 				path->nodes[level] = right;
+@@ -2678,7 +2678,8 @@ int btrfs_get_next_valid_item(struct btrfs_root *root, struct btrfs_key *key,
+  * higher levels
+  *
+  */
+-static void fixup_low_keys(struct btrfs_path *path,
++static void fixup_low_keys(struct btrfs_trans_handle *trans,
++			   struct btrfs_path *path,
+ 			   struct btrfs_disk_key *key, int level)
+ {
+ 	int i;
+@@ -2695,7 +2696,7 @@ static void fixup_low_keys(struct btrfs_path *path,
+ 						    BTRFS_MOD_LOG_KEY_REPLACE);
+ 		BUG_ON(ret < 0);
+ 		btrfs_set_node_key(t, key, tslot);
+-		btrfs_mark_buffer_dirty(path->nodes[i]);
++		btrfs_mark_buffer_dirty(trans, path->nodes[i]);
+ 		if (tslot != 0)
+ 			break;
+ 	}
+@@ -2707,10 +2708,11 @@ static void fixup_low_keys(struct btrfs_path *path,
+  * This function isn't completely safe. It's the caller's responsibility
+  * that the new key won't break the order
+  */
+-void btrfs_set_item_key_safe(struct btrfs_fs_info *fs_info,
++void btrfs_set_item_key_safe(struct btrfs_trans_handle *trans,
+ 			     struct btrfs_path *path,
+ 			     const struct btrfs_key *new_key)
+ {
++	struct btrfs_fs_info *fs_info = trans->fs_info;
+ 	struct btrfs_disk_key disk_key;
+ 	struct extent_buffer *eb;
+ 	int slot;
+@@ -2748,9 +2750,9 @@ void btrfs_set_item_key_safe(struct btrfs_fs_info *fs_info,
+ 
+ 	btrfs_cpu_key_to_disk(&disk_key, new_key);
+ 	btrfs_set_item_key(eb, &disk_key, slot);
+-	btrfs_mark_buffer_dirty(eb);
++	btrfs_mark_buffer_dirty(trans, eb);
+ 	if (slot == 0)
+-		fixup_low_keys(path, &disk_key, 1);
++		fixup_low_keys(trans, path, &disk_key, 1);
+ }
+ 
+ /*
+@@ -2881,8 +2883,8 @@ static int push_node_left(struct btrfs_trans_handle *trans,
+ 	}
+ 	btrfs_set_header_nritems(src, src_nritems - push_items);
+ 	btrfs_set_header_nritems(dst, dst_nritems + push_items);
+-	btrfs_mark_buffer_dirty(src);
+-	btrfs_mark_buffer_dirty(dst);
++	btrfs_mark_buffer_dirty(trans, src);
++	btrfs_mark_buffer_dirty(trans, dst);
+ 
+ 	return ret;
+ }
+@@ -2957,8 +2959,8 @@ static int balance_node_right(struct btrfs_trans_handle *trans,
+ 	btrfs_set_header_nritems(src, src_nritems - push_items);
+ 	btrfs_set_header_nritems(dst, dst_nritems + push_items);
+ 
+-	btrfs_mark_buffer_dirty(src);
+-	btrfs_mark_buffer_dirty(dst);
++	btrfs_mark_buffer_dirty(trans, src);
++	btrfs_mark_buffer_dirty(trans, dst);
+ 
+ 	return ret;
+ }
+@@ -3007,7 +3009,7 @@ static noinline int insert_new_root(struct btrfs_trans_handle *trans,
+ 
+ 	btrfs_set_node_ptr_generation(c, 0, lower_gen);
+ 
+-	btrfs_mark_buffer_dirty(c);
++	btrfs_mark_buffer_dirty(trans, c);
+ 
+ 	old = root->node;
+ 	ret = btrfs_tree_mod_log_insert_root(root->node, c, false);
+@@ -3079,7 +3081,7 @@ static int insert_ptr(struct btrfs_trans_handle *trans,
+ 	WARN_ON(trans->transid == 0);
+ 	btrfs_set_node_ptr_generation(lower, slot, trans->transid);
+ 	btrfs_set_header_nritems(lower, nritems + 1);
+-	btrfs_mark_buffer_dirty(lower);
++	btrfs_mark_buffer_dirty(trans, lower);
+ 
+ 	return 0;
+ }
+@@ -3158,8 +3160,8 @@ static noinline int split_node(struct btrfs_trans_handle *trans,
+ 	btrfs_set_header_nritems(split, c_nritems - mid);
+ 	btrfs_set_header_nritems(c, mid);
+ 
+-	btrfs_mark_buffer_dirty(c);
+-	btrfs_mark_buffer_dirty(split);
++	btrfs_mark_buffer_dirty(trans, c);
++	btrfs_mark_buffer_dirty(trans, split);
+ 
+ 	ret = insert_ptr(trans, path, &disk_key, split->start,
+ 			 path->slots[level + 1] + 1, level + 1);
+@@ -3325,15 +3327,15 @@ static noinline int __push_leaf_right(struct btrfs_trans_handle *trans,
+ 	btrfs_set_header_nritems(left, left_nritems);
+ 
+ 	if (left_nritems)
+-		btrfs_mark_buffer_dirty(left);
++		btrfs_mark_buffer_dirty(trans, left);
+ 	else
+ 		btrfs_clear_buffer_dirty(trans, left);
+ 
+-	btrfs_mark_buffer_dirty(right);
++	btrfs_mark_buffer_dirty(trans, right);
+ 
+ 	btrfs_item_key(right, &disk_key, 0);
+ 	btrfs_set_node_key(upper, &disk_key, slot + 1);
+-	btrfs_mark_buffer_dirty(upper);
++	btrfs_mark_buffer_dirty(trans, upper);
+ 
+ 	/* then fixup the leaf pointer in the path */
+ 	if (path->slots[0] >= left_nritems) {
+@@ -3545,14 +3547,14 @@ static noinline int __push_leaf_left(struct btrfs_trans_handle *trans,
+ 		btrfs_set_token_item_offset(&token, i, push_space);
+ 	}
+ 
+-	btrfs_mark_buffer_dirty(left);
++	btrfs_mark_buffer_dirty(trans, left);
+ 	if (right_nritems)
+-		btrfs_mark_buffer_dirty(right);
++		btrfs_mark_buffer_dirty(trans, right);
+ 	else
+ 		btrfs_clear_buffer_dirty(trans, right);
+ 
+ 	btrfs_item_key(right, &disk_key, 0);
+-	fixup_low_keys(path, &disk_key, 1);
++	fixup_low_keys(trans, path, &disk_key, 1);
+ 
+ 	/* then fixup the leaf pointer in the path */
+ 	if (path->slots[0] < push_items) {
+@@ -3683,8 +3685,8 @@ static noinline int copy_for_split(struct btrfs_trans_handle *trans,
+ 	if (ret < 0)
+ 		return ret;
+ 
+-	btrfs_mark_buffer_dirty(right);
+-	btrfs_mark_buffer_dirty(l);
++	btrfs_mark_buffer_dirty(trans, right);
++	btrfs_mark_buffer_dirty(trans, l);
+ 	BUG_ON(path->slots[0] != slot);
+ 
+ 	if (mid <= slot) {
+@@ -3925,7 +3927,7 @@ again:
+ 			path->nodes[0] = right;
+ 			path->slots[0] = 0;
+ 			if (path->slots[1] == 0)
+-				fixup_low_keys(path, &disk_key, 1);
++				fixup_low_keys(trans, path, &disk_key, 1);
+ 		}
+ 		/*
+ 		 * We create a new leaf 'right' for the required ins_len and
+@@ -4024,7 +4026,8 @@ err:
+ 	return ret;
+ }
+ 
+-static noinline int split_item(struct btrfs_path *path,
++static noinline int split_item(struct btrfs_trans_handle *trans,
++			       struct btrfs_path *path,
+ 			       const struct btrfs_key *new_key,
+ 			       unsigned long split_offset)
+ {
+@@ -4083,7 +4086,7 @@ static noinline int split_item(struct btrfs_path *path,
+ 	write_extent_buffer(leaf, buf + split_offset,
+ 			    btrfs_item_ptr_offset(leaf, slot),
+ 			    item_size - split_offset);
+-	btrfs_mark_buffer_dirty(leaf);
++	btrfs_mark_buffer_dirty(trans, leaf);
+ 
+ 	BUG_ON(btrfs_leaf_free_space(leaf) < 0);
+ 	kfree(buf);
+@@ -4117,7 +4120,7 @@ int btrfs_split_item(struct btrfs_trans_handle *trans,
+ 	if (ret)
+ 		return ret;
+ 
+-	ret = split_item(path, new_key, split_offset);
++	ret = split_item(trans, path, new_key, split_offset);
+ 	return ret;
+ }
+ 
+@@ -4127,7 +4130,8 @@ int btrfs_split_item(struct btrfs_trans_handle *trans,
+  * off the end of the item or if we shift the item to chop bytes off
+  * the front.
+  */
+-void btrfs_truncate_item(struct btrfs_path *path, u32 new_size, int from_end)
++void btrfs_truncate_item(struct btrfs_trans_handle *trans,
++			 struct btrfs_path *path, u32 new_size, int from_end)
+ {
+ 	int slot;
+ 	struct extent_buffer *leaf;
+@@ -4203,11 +4207,11 @@ void btrfs_truncate_item(struct btrfs_path *path, u32 new_size, int from_end)
+ 		btrfs_set_disk_key_offset(&disk_key, offset + size_diff);
+ 		btrfs_set_item_key(leaf, &disk_key, slot);
+ 		if (slot == 0)
+-			fixup_low_keys(path, &disk_key, 1);
++			fixup_low_keys(trans, path, &disk_key, 1);
+ 	}
+ 
+ 	btrfs_set_item_size(leaf, slot, new_size);
+-	btrfs_mark_buffer_dirty(leaf);
++	btrfs_mark_buffer_dirty(trans, leaf);
+ 
+ 	if (btrfs_leaf_free_space(leaf) < 0) {
+ 		btrfs_print_leaf(leaf);
+@@ -4218,7 +4222,8 @@ void btrfs_truncate_item(struct btrfs_path *path, u32 new_size, int from_end)
+ /*
+  * make the item pointed to by the path bigger, data_size is the added size.
+  */
+-void btrfs_extend_item(struct btrfs_path *path, u32 data_size)
++void btrfs_extend_item(struct btrfs_trans_handle *trans,
++		       struct btrfs_path *path, u32 data_size)
+ {
+ 	int slot;
+ 	struct extent_buffer *leaf;
+@@ -4268,7 +4273,7 @@ void btrfs_extend_item(struct btrfs_path *path, u32 data_size)
+ 	data_end = old_data;
+ 	old_size = btrfs_item_size(leaf, slot);
+ 	btrfs_set_item_size(leaf, slot, old_size + data_size);
+-	btrfs_mark_buffer_dirty(leaf);
++	btrfs_mark_buffer_dirty(trans, leaf);
+ 
+ 	if (btrfs_leaf_free_space(leaf) < 0) {
+ 		btrfs_print_leaf(leaf);
+@@ -4279,6 +4284,7 @@ void btrfs_extend_item(struct btrfs_path *path, u32 data_size)
+ /*
+  * Make space in the node before inserting one or more items.
+  *
++ * @trans:	transaction handle
+  * @root:	root we are inserting items to
+  * @path:	points to the leaf/slot where we are going to insert new items
+  * @batch:      information about the batch of items to insert
+@@ -4286,7 +4292,8 @@ void btrfs_extend_item(struct btrfs_path *path, u32 data_size)
+  * Main purpose is to save stack depth by doing the bulk of the work in a
+  * function that doesn't call btrfs_search_slot
+  */
+-static void setup_items_for_insert(struct btrfs_root *root, struct btrfs_path *path,
++static void setup_items_for_insert(struct btrfs_trans_handle *trans,
++				   struct btrfs_root *root, struct btrfs_path *path,
+ 				   const struct btrfs_item_batch *batch)
+ {
+ 	struct btrfs_fs_info *fs_info = root->fs_info;
+@@ -4306,7 +4313,7 @@ static void setup_items_for_insert(struct btrfs_root *root, struct btrfs_path *p
+ 	 */
+ 	if (path->slots[0] == 0) {
+ 		btrfs_cpu_key_to_disk(&disk_key, &batch->keys[0]);
+-		fixup_low_keys(path, &disk_key, 1);
++		fixup_low_keys(trans, path, &disk_key, 1);
+ 	}
+ 	btrfs_unlock_up_safe(path, 1);
+ 
+@@ -4365,7 +4372,7 @@ static void setup_items_for_insert(struct btrfs_root *root, struct btrfs_path *p
+ 	}
+ 
+ 	btrfs_set_header_nritems(leaf, nritems + batch->nr);
+-	btrfs_mark_buffer_dirty(leaf);
++	btrfs_mark_buffer_dirty(trans, leaf);
+ 
+ 	if (btrfs_leaf_free_space(leaf) < 0) {
+ 		btrfs_print_leaf(leaf);
+@@ -4376,12 +4383,14 @@ static void setup_items_for_insert(struct btrfs_root *root, struct btrfs_path *p
+ /*
+  * Insert a new item into a leaf.
+  *
++ * @trans:     Transaction handle.
+  * @root:      The root of the btree.
+  * @path:      A path pointing to the target leaf and slot.
+  * @key:       The key of the new item.
+  * @data_size: The size of the data associated with the new key.
+  */
+-void btrfs_setup_item_for_insert(struct btrfs_root *root,
++void btrfs_setup_item_for_insert(struct btrfs_trans_handle *trans,
++				 struct btrfs_root *root,
+ 				 struct btrfs_path *path,
+ 				 const struct btrfs_key *key,
+ 				 u32 data_size)
+@@ -4393,7 +4402,7 @@ void btrfs_setup_item_for_insert(struct btrfs_root *root,
+ 	batch.total_data_size = data_size;
+ 	batch.nr = 1;
+ 
+-	setup_items_for_insert(root, path, &batch);
++	setup_items_for_insert(trans, root, path, &batch);
+ }
+ 
+ /*
+@@ -4419,7 +4428,7 @@ int btrfs_insert_empty_items(struct btrfs_trans_handle *trans,
+ 	slot = path->slots[0];
+ 	BUG_ON(slot < 0);
+ 
+-	setup_items_for_insert(root, path, batch);
++	setup_items_for_insert(trans, root, path, batch);
+ 	return 0;
+ }
+ 
+@@ -4444,7 +4453,7 @@ int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root *root,
+ 		leaf = path->nodes[0];
+ 		ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
+ 		write_extent_buffer(leaf, data, ptr, data_size);
+-		btrfs_mark_buffer_dirty(leaf);
++		btrfs_mark_buffer_dirty(trans, leaf);
+ 	}
+ 	btrfs_free_path(path);
+ 	return ret;
+@@ -4475,7 +4484,7 @@ int btrfs_duplicate_item(struct btrfs_trans_handle *trans,
+ 		return ret;
+ 
+ 	path->slots[0]++;
+-	btrfs_setup_item_for_insert(root, path, new_key, item_size);
++	btrfs_setup_item_for_insert(trans, root, path, new_key, item_size);
+ 	leaf = path->nodes[0];
+ 	memcpy_extent_buffer(leaf,
+ 			     btrfs_item_ptr_offset(leaf, path->slots[0]),
+@@ -4533,9 +4542,9 @@ int btrfs_del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root,
+ 		struct btrfs_disk_key disk_key;
+ 
+ 		btrfs_node_key(parent, &disk_key, 0);
+-		fixup_low_keys(path, &disk_key, level + 1);
++		fixup_low_keys(trans, path, &disk_key, level + 1);
+ 	}
+-	btrfs_mark_buffer_dirty(parent);
++	btrfs_mark_buffer_dirty(trans, parent);
+ 	return 0;
+ }
+ 
+@@ -4632,7 +4641,7 @@ int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
+ 			struct btrfs_disk_key disk_key;
+ 
+ 			btrfs_item_key(leaf, &disk_key, 0);
+-			fixup_low_keys(path, &disk_key, 1);
++			fixup_low_keys(trans, path, &disk_key, 1);
+ 		}
+ 
+ 		/*
+@@ -4697,11 +4706,11 @@ int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
+ 				 * dirtied this buffer
+ 				 */
+ 				if (path->nodes[0] == leaf)
+-					btrfs_mark_buffer_dirty(leaf);
++					btrfs_mark_buffer_dirty(trans, leaf);
+ 				free_extent_buffer(leaf);
+ 			}
+ 		} else {
+-			btrfs_mark_buffer_dirty(leaf);
++			btrfs_mark_buffer_dirty(trans, leaf);
+ 		}
+ 	}
+ 	return ret;
+diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
+index ff40acd63a374..06333a74d6c4c 100644
+--- a/fs/btrfs/ctree.h
++++ b/fs/btrfs/ctree.h
+@@ -518,7 +518,7 @@ int btrfs_previous_item(struct btrfs_root *root,
+ 			int type);
+ int btrfs_previous_extent_item(struct btrfs_root *root,
+ 			struct btrfs_path *path, u64 min_objectid);
+-void btrfs_set_item_key_safe(struct btrfs_fs_info *fs_info,
++void btrfs_set_item_key_safe(struct btrfs_trans_handle *trans,
+ 			     struct btrfs_path *path,
+ 			     const struct btrfs_key *new_key);
+ struct extent_buffer *btrfs_root_node(struct btrfs_root *root);
+@@ -545,8 +545,10 @@ int btrfs_block_can_be_shared(struct btrfs_trans_handle *trans,
+ 			      struct extent_buffer *buf);
+ int btrfs_del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root,
+ 		  struct btrfs_path *path, int level, int slot);
+-void btrfs_extend_item(struct btrfs_path *path, u32 data_size);
+-void btrfs_truncate_item(struct btrfs_path *path, u32 new_size, int from_end);
++void btrfs_extend_item(struct btrfs_trans_handle *trans,
++		       struct btrfs_path *path, u32 data_size);
++void btrfs_truncate_item(struct btrfs_trans_handle *trans,
++			 struct btrfs_path *path, u32 new_size, int from_end);
+ int btrfs_split_item(struct btrfs_trans_handle *trans,
+ 		     struct btrfs_root *root,
+ 		     struct btrfs_path *path,
+@@ -610,7 +612,8 @@ struct btrfs_item_batch {
+ 	int nr;
+ };
+ 
+-void btrfs_setup_item_for_insert(struct btrfs_root *root,
++void btrfs_setup_item_for_insert(struct btrfs_trans_handle *trans,
++				 struct btrfs_root *root,
+ 				 struct btrfs_path *path,
+ 				 const struct btrfs_key *key,
+ 				 u32 data_size);
+diff --git a/fs/btrfs/delalloc-space.c b/fs/btrfs/delalloc-space.c
+index 427abaf608b8c..0d105ed1b8def 100644
+--- a/fs/btrfs/delalloc-space.c
++++ b/fs/btrfs/delalloc-space.c
+@@ -322,9 +322,6 @@ int btrfs_delalloc_reserve_metadata(struct btrfs_inode *inode, u64 num_bytes,
+ 	} else {
+ 		if (current->journal_info)
+ 			flush = BTRFS_RESERVE_FLUSH_LIMIT;
+-
+-		if (btrfs_transaction_in_commit(fs_info))
+-			schedule_timeout(1);
+ 	}
+ 
+ 	num_bytes = ALIGN(num_bytes, fs_info->sectorsize);
+diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
+index 142e0a0f6a9fe..5d3229b42b3e2 100644
+--- a/fs/btrfs/delayed-inode.c
++++ b/fs/btrfs/delayed-inode.c
+@@ -1030,7 +1030,7 @@ static int __btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
+ 				    struct btrfs_inode_item);
+ 	write_extent_buffer(leaf, &node->inode_item, (unsigned long)inode_item,
+ 			    sizeof(struct btrfs_inode_item));
+-	btrfs_mark_buffer_dirty(leaf);
++	btrfs_mark_buffer_dirty(trans, leaf);
+ 
+ 	if (!test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &node->flags))
+ 		goto out;
+diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c
+index 5f10965fd72bf..5549cbd9bdf6a 100644
+--- a/fs/btrfs/dev-replace.c
++++ b/fs/btrfs/dev-replace.c
+@@ -442,7 +442,7 @@ int btrfs_run_dev_replace(struct btrfs_trans_handle *trans)
+ 	dev_replace->item_needs_writeback = 0;
+ 	up_write(&dev_replace->rwsem);
+ 
+-	btrfs_mark_buffer_dirty(eb);
++	btrfs_mark_buffer_dirty(trans, eb);
+ 
+ out:
+ 	btrfs_free_path(path);
+diff --git a/fs/btrfs/dir-item.c b/fs/btrfs/dir-item.c
+index 082eb0e195981..9c07d5c3e5ad2 100644
+--- a/fs/btrfs/dir-item.c
++++ b/fs/btrfs/dir-item.c
+@@ -38,7 +38,7 @@ static struct btrfs_dir_item *insert_with_overflow(struct btrfs_trans_handle
+ 		di = btrfs_match_dir_item_name(fs_info, path, name, name_len);
+ 		if (di)
+ 			return ERR_PTR(-EEXIST);
+-		btrfs_extend_item(path, data_size);
++		btrfs_extend_item(trans, path, data_size);
+ 	} else if (ret < 0)
+ 		return ERR_PTR(ret);
+ 	WARN_ON(ret > 0);
+@@ -93,7 +93,7 @@ int btrfs_insert_xattr_item(struct btrfs_trans_handle *trans,
+ 
+ 	write_extent_buffer(leaf, name, name_ptr, name_len);
+ 	write_extent_buffer(leaf, data, data_ptr, data_len);
+-	btrfs_mark_buffer_dirty(path->nodes[0]);
++	btrfs_mark_buffer_dirty(trans, path->nodes[0]);
+ 
+ 	return ret;
+ }
+@@ -153,7 +153,7 @@ int btrfs_insert_dir_item(struct btrfs_trans_handle *trans,
+ 	name_ptr = (unsigned long)(dir_item + 1);
+ 
+ 	write_extent_buffer(leaf, name->name, name_ptr, name->len);
+-	btrfs_mark_buffer_dirty(leaf);
++	btrfs_mark_buffer_dirty(trans, leaf);
+ 
+ second_insert:
+ 	/* FIXME, use some real flag for selecting the extra index */
+@@ -439,7 +439,7 @@ int btrfs_delete_one_dir_name(struct btrfs_trans_handle *trans,
+ 		start = btrfs_item_ptr_offset(leaf, path->slots[0]);
+ 		memmove_extent_buffer(leaf, ptr, ptr + sub_item_len,
+ 			item_len - (ptr + sub_item_len - start));
+-		btrfs_truncate_item(path, item_len - sub_item_len, 1);
++		btrfs_truncate_item(trans, path, item_len - sub_item_len, 1);
+ 	}
+ 	return ret;
+ }
+diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
+index 681594df7334f..1ae781f533582 100644
+--- a/fs/btrfs/disk-io.c
++++ b/fs/btrfs/disk-io.c
+@@ -872,7 +872,7 @@ struct btrfs_root *btrfs_create_tree(struct btrfs_trans_handle *trans,
+ 	}
+ 
+ 	root->node = leaf;
+-	btrfs_mark_buffer_dirty(leaf);
++	btrfs_mark_buffer_dirty(trans, leaf);
+ 
+ 	root->commit_root = btrfs_root_node(root);
+ 	set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
+@@ -947,7 +947,7 @@ int btrfs_alloc_log_tree_node(struct btrfs_trans_handle *trans,
+ 
+ 	root->node = leaf;
+ 
+-	btrfs_mark_buffer_dirty(root->node);
++	btrfs_mark_buffer_dirty(trans, root->node);
+ 	btrfs_tree_unlock(root->node);
+ 
+ 	return 0;
+@@ -4426,7 +4426,8 @@ void __cold close_ctree(struct btrfs_fs_info *fs_info)
+ 	btrfs_close_devices(fs_info->fs_devices);
+ }
+ 
+-void btrfs_mark_buffer_dirty(struct extent_buffer *buf)
++void btrfs_mark_buffer_dirty(struct btrfs_trans_handle *trans,
++			     struct extent_buffer *buf)
+ {
+ 	struct btrfs_fs_info *fs_info = buf->fs_info;
+ 	u64 transid = btrfs_header_generation(buf);
+@@ -4440,10 +4441,14 @@ void btrfs_mark_buffer_dirty(struct extent_buffer *buf)
+ 	if (unlikely(test_bit(EXTENT_BUFFER_UNMAPPED, &buf->bflags)))
+ 		return;
+ #endif
++	/* This is an active transaction (its state < TRANS_STATE_UNBLOCKED). */
++	ASSERT(trans->transid == fs_info->generation);
+ 	btrfs_assert_tree_write_locked(buf);
+-	if (transid != fs_info->generation)
++	if (transid != fs_info->generation) {
+ 		WARN(1, KERN_CRIT "btrfs transid mismatch buffer %llu, found %llu running %llu\n",
+ 			buf->start, transid, fs_info->generation);
++		btrfs_abort_transaction(trans, -EUCLEAN);
++	}
+ 	set_extent_buffer_dirty(buf);
+ #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
+ 	/*
+diff --git a/fs/btrfs/disk-io.h b/fs/btrfs/disk-io.h
+index b03767f4d7edf..e5bdb96912438 100644
+--- a/fs/btrfs/disk-io.h
++++ b/fs/btrfs/disk-io.h
+@@ -105,7 +105,8 @@ static inline struct btrfs_root *btrfs_grab_root(struct btrfs_root *root)
+ }
+ 
+ void btrfs_put_root(struct btrfs_root *root);
+-void btrfs_mark_buffer_dirty(struct extent_buffer *buf);
++void btrfs_mark_buffer_dirty(struct btrfs_trans_handle *trans,
++			     struct extent_buffer *buf);
+ int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid,
+ 			  int atomic);
+ int btrfs_read_extent_buffer(struct extent_buffer *buf,
+diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
+index 14ea6b587e97b..118c56c512bd8 100644
+--- a/fs/btrfs/extent-tree.c
++++ b/fs/btrfs/extent-tree.c
+@@ -596,7 +596,7 @@ static noinline int insert_extent_data_ref(struct btrfs_trans_handle *trans,
+ 			btrfs_set_extent_data_ref_count(leaf, ref, num_refs);
+ 		}
+ 	}
+-	btrfs_mark_buffer_dirty(leaf);
++	btrfs_mark_buffer_dirty(trans, leaf);
+ 	ret = 0;
+ fail:
+ 	btrfs_release_path(path);
+@@ -644,7 +644,7 @@ static noinline int remove_extent_data_ref(struct btrfs_trans_handle *trans,
+ 			btrfs_set_extent_data_ref_count(leaf, ref1, num_refs);
+ 		else if (key.type == BTRFS_SHARED_DATA_REF_KEY)
+ 			btrfs_set_shared_data_ref_count(leaf, ref2, num_refs);
+-		btrfs_mark_buffer_dirty(leaf);
++		btrfs_mark_buffer_dirty(trans, leaf);
+ 	}
+ 	return ret;
+ }
+@@ -997,7 +997,7 @@ out:
+  * helper to add new inline back ref
+  */
+ static noinline_for_stack
+-void setup_inline_extent_backref(struct btrfs_fs_info *fs_info,
++void setup_inline_extent_backref(struct btrfs_trans_handle *trans,
+ 				 struct btrfs_path *path,
+ 				 struct btrfs_extent_inline_ref *iref,
+ 				 u64 parent, u64 root_objectid,
+@@ -1020,7 +1020,7 @@ void setup_inline_extent_backref(struct btrfs_fs_info *fs_info,
+ 	type = extent_ref_type(parent, owner);
+ 	size = btrfs_extent_inline_ref_size(type);
+ 
+-	btrfs_extend_item(path, size);
++	btrfs_extend_item(trans, path, size);
+ 
+ 	ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
+ 	refs = btrfs_extent_refs(leaf, ei);
+@@ -1054,7 +1054,7 @@ void setup_inline_extent_backref(struct btrfs_fs_info *fs_info,
+ 	} else {
+ 		btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
+ 	}
+-	btrfs_mark_buffer_dirty(leaf);
++	btrfs_mark_buffer_dirty(trans, leaf);
+ }
+ 
+ static int lookup_extent_backref(struct btrfs_trans_handle *trans,
+@@ -1087,7 +1087,9 @@ static int lookup_extent_backref(struct btrfs_trans_handle *trans,
+ /*
+  * helper to update/remove inline back ref
+  */
+-static noinline_for_stack int update_inline_extent_backref(struct btrfs_path *path,
++static noinline_for_stack int update_inline_extent_backref(
++				  struct btrfs_trans_handle *trans,
++				  struct btrfs_path *path,
+ 				  struct btrfs_extent_inline_ref *iref,
+ 				  int refs_to_mod,
+ 				  struct btrfs_delayed_extent_op *extent_op)
+@@ -1195,9 +1197,9 @@ static noinline_for_stack int update_inline_extent_backref(struct btrfs_path *pa
+ 			memmove_extent_buffer(leaf, ptr, ptr + size,
+ 					      end - ptr - size);
+ 		item_size -= size;
+-		btrfs_truncate_item(path, item_size, 1);
++		btrfs_truncate_item(trans, path, item_size, 1);
+ 	}
+-	btrfs_mark_buffer_dirty(leaf);
++	btrfs_mark_buffer_dirty(trans, leaf);
+ 	return 0;
+ }
+ 
+@@ -1227,9 +1229,10 @@ int insert_inline_extent_backref(struct btrfs_trans_handle *trans,
+ 				   bytenr, num_bytes, root_objectid, path->slots[0]);
+ 			return -EUCLEAN;
+ 		}
+-		ret = update_inline_extent_backref(path, iref, refs_to_add, extent_op);
++		ret = update_inline_extent_backref(trans, path, iref,
++						   refs_to_add, extent_op);
+ 	} else if (ret == -ENOENT) {
+-		setup_inline_extent_backref(trans->fs_info, path, iref, parent,
++		setup_inline_extent_backref(trans, path, iref, parent,
+ 					    root_objectid, owner, offset,
+ 					    refs_to_add, extent_op);
+ 		ret = 0;
+@@ -1247,7 +1250,8 @@ static int remove_extent_backref(struct btrfs_trans_handle *trans,
+ 
+ 	BUG_ON(!is_data && refs_to_drop != 1);
+ 	if (iref)
+-		ret = update_inline_extent_backref(path, iref, -refs_to_drop, NULL);
++		ret = update_inline_extent_backref(trans, path, iref,
++						   -refs_to_drop, NULL);
+ 	else if (is_data)
+ 		ret = remove_extent_data_ref(trans, root, path, refs_to_drop);
+ 	else
+@@ -1531,7 +1535,7 @@ static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
+ 	if (extent_op)
+ 		__run_delayed_extent_op(extent_op, leaf, item);
+ 
+-	btrfs_mark_buffer_dirty(leaf);
++	btrfs_mark_buffer_dirty(trans, leaf);
+ 	btrfs_release_path(path);
+ 
+ 	/* now insert the actual backref */
+@@ -1697,7 +1701,7 @@ again:
+ 	ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
+ 	__run_delayed_extent_op(extent_op, leaf, ei);
+ 
+-	btrfs_mark_buffer_dirty(leaf);
++	btrfs_mark_buffer_dirty(trans, leaf);
+ out:
+ 	btrfs_free_path(path);
+ 	return err;
+@@ -3171,7 +3175,7 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
+ 			}
+ 		} else {
+ 			btrfs_set_extent_refs(leaf, ei, refs);
+-			btrfs_mark_buffer_dirty(leaf);
++			btrfs_mark_buffer_dirty(trans, leaf);
+ 		}
+ 		if (found_extent) {
+ 			ret = remove_extent_backref(trans, extent_root, path,
+@@ -4679,7 +4683,7 @@ static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
+ 		btrfs_set_extent_data_ref_count(leaf, ref, ref_mod);
+ 	}
+ 
+-	btrfs_mark_buffer_dirty(path->nodes[0]);
++	btrfs_mark_buffer_dirty(trans, path->nodes[0]);
+ 	btrfs_free_path(path);
+ 
+ 	return alloc_reserved_extent(trans, ins->objectid, ins->offset);
+@@ -4754,7 +4758,7 @@ static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
+ 		btrfs_set_extent_inline_ref_offset(leaf, iref, ref->root);
+ 	}
+ 
+-	btrfs_mark_buffer_dirty(leaf);
++	btrfs_mark_buffer_dirty(trans, leaf);
+ 	btrfs_free_path(path);
+ 
+ 	return alloc_reserved_extent(trans, node->bytenr, fs_info->nodesize);
+diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c
+index 1ce5dd1544995..45cae356e89ba 100644
+--- a/fs/btrfs/file-item.c
++++ b/fs/btrfs/file-item.c
+@@ -194,7 +194,7 @@ int btrfs_insert_hole_extent(struct btrfs_trans_handle *trans,
+ 	btrfs_set_file_extent_encryption(leaf, item, 0);
+ 	btrfs_set_file_extent_other_encoding(leaf, item, 0);
+ 
+-	btrfs_mark_buffer_dirty(leaf);
++	btrfs_mark_buffer_dirty(trans, leaf);
+ out:
+ 	btrfs_free_path(path);
+ 	return ret;
+@@ -811,11 +811,12 @@ blk_status_t btrfs_alloc_dummy_sum(struct btrfs_bio *bbio)
+  * This calls btrfs_truncate_item with the correct args based on the overlap,
+  * and fixes up the key as required.
+  */
+-static noinline void truncate_one_csum(struct btrfs_fs_info *fs_info,
++static noinline void truncate_one_csum(struct btrfs_trans_handle *trans,
+ 				       struct btrfs_path *path,
+ 				       struct btrfs_key *key,
+ 				       u64 bytenr, u64 len)
+ {
++	struct btrfs_fs_info *fs_info = trans->fs_info;
+ 	struct extent_buffer *leaf;
+ 	const u32 csum_size = fs_info->csum_size;
+ 	u64 csum_end;
+@@ -836,7 +837,7 @@ static noinline void truncate_one_csum(struct btrfs_fs_info *fs_info,
+ 		 */
+ 		u32 new_size = (bytenr - key->offset) >> blocksize_bits;
+ 		new_size *= csum_size;
+-		btrfs_truncate_item(path, new_size, 1);
++		btrfs_truncate_item(trans, path, new_size, 1);
+ 	} else if (key->offset >= bytenr && csum_end > end_byte &&
+ 		   end_byte > key->offset) {
+ 		/*
+@@ -848,10 +849,10 @@ static noinline void truncate_one_csum(struct btrfs_fs_info *fs_info,
+ 		u32 new_size = (csum_end - end_byte) >> blocksize_bits;
+ 		new_size *= csum_size;
+ 
+-		btrfs_truncate_item(path, new_size, 0);
++		btrfs_truncate_item(trans, path, new_size, 0);
+ 
+ 		key->offset = end_byte;
+-		btrfs_set_item_key_safe(fs_info, path, key);
++		btrfs_set_item_key_safe(trans, path, key);
+ 	} else {
+ 		BUG();
+ 	}
+@@ -994,7 +995,7 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans,
+ 
+ 			key.offset = end_byte - 1;
+ 		} else {
+-			truncate_one_csum(fs_info, path, &key, bytenr, len);
++			truncate_one_csum(trans, path, &key, bytenr, len);
+ 			if (key.offset < bytenr)
+ 				break;
+ 		}
+@@ -1202,7 +1203,7 @@ extend_csum:
+ 		diff /= csum_size;
+ 		diff *= csum_size;
+ 
+-		btrfs_extend_item(path, diff);
++		btrfs_extend_item(trans, path, diff);
+ 		ret = 0;
+ 		goto csum;
+ 	}
+@@ -1249,7 +1250,7 @@ found:
+ 	ins_size /= csum_size;
+ 	total_bytes += ins_size * fs_info->sectorsize;
+ 
+-	btrfs_mark_buffer_dirty(path->nodes[0]);
++	btrfs_mark_buffer_dirty(trans, path->nodes[0]);
+ 	if (total_bytes < sums->len) {
+ 		btrfs_release_path(path);
+ 		cond_resched();
+diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
+index eae9175f2c29b..a407af38a9237 100644
+--- a/fs/btrfs/file.c
++++ b/fs/btrfs/file.c
+@@ -368,7 +368,7 @@ next_slot:
+ 			btrfs_set_file_extent_offset(leaf, fi, extent_offset);
+ 			btrfs_set_file_extent_num_bytes(leaf, fi,
+ 							extent_end - args->start);
+-			btrfs_mark_buffer_dirty(leaf);
++			btrfs_mark_buffer_dirty(trans, leaf);
+ 
+ 			if (update_refs && disk_bytenr > 0) {
+ 				btrfs_init_generic_ref(&ref,
+@@ -405,13 +405,13 @@ next_slot:
+ 
+ 			memcpy(&new_key, &key, sizeof(new_key));
+ 			new_key.offset = args->end;
+-			btrfs_set_item_key_safe(fs_info, path, &new_key);
++			btrfs_set_item_key_safe(trans, path, &new_key);
+ 
+ 			extent_offset += args->end - key.offset;
+ 			btrfs_set_file_extent_offset(leaf, fi, extent_offset);
+ 			btrfs_set_file_extent_num_bytes(leaf, fi,
+ 							extent_end - args->end);
+-			btrfs_mark_buffer_dirty(leaf);
++			btrfs_mark_buffer_dirty(trans, leaf);
+ 			if (update_refs && disk_bytenr > 0)
+ 				args->bytes_found += args->end - key.offset;
+ 			break;
+@@ -431,7 +431,7 @@ next_slot:
+ 
+ 			btrfs_set_file_extent_num_bytes(leaf, fi,
+ 							args->start - key.offset);
+-			btrfs_mark_buffer_dirty(leaf);
++			btrfs_mark_buffer_dirty(trans, leaf);
+ 			if (update_refs && disk_bytenr > 0)
+ 				args->bytes_found += extent_end - args->start;
+ 			if (args->end == extent_end)
+@@ -536,7 +536,8 @@ delete_extent_item:
+ 			if (btrfs_comp_cpu_keys(&key, &slot_key) > 0)
+ 				path->slots[0]++;
+ 		}
+-		btrfs_setup_item_for_insert(root, path, &key, args->extent_item_size);
++		btrfs_setup_item_for_insert(trans, root, path, &key,
++					    args->extent_item_size);
+ 		args->extent_inserted = true;
+ 	}
+ 
+@@ -593,7 +594,6 @@ static int extent_mergeable(struct extent_buffer *leaf, int slot,
+ int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
+ 			      struct btrfs_inode *inode, u64 start, u64 end)
+ {
+-	struct btrfs_fs_info *fs_info = trans->fs_info;
+ 	struct btrfs_root *root = inode->root;
+ 	struct extent_buffer *leaf;
+ 	struct btrfs_path *path;
+@@ -664,7 +664,7 @@ again:
+ 				     ino, bytenr, orig_offset,
+ 				     &other_start, &other_end)) {
+ 			new_key.offset = end;
+-			btrfs_set_item_key_safe(fs_info, path, &new_key);
++			btrfs_set_item_key_safe(trans, path, &new_key);
+ 			fi = btrfs_item_ptr(leaf, path->slots[0],
+ 					    struct btrfs_file_extent_item);
+ 			btrfs_set_file_extent_generation(leaf, fi,
+@@ -679,7 +679,7 @@ again:
+ 							 trans->transid);
+ 			btrfs_set_file_extent_num_bytes(leaf, fi,
+ 							end - other_start);
+-			btrfs_mark_buffer_dirty(leaf);
++			btrfs_mark_buffer_dirty(trans, leaf);
+ 			goto out;
+ 		}
+ 	}
+@@ -698,7 +698,7 @@ again:
+ 							 trans->transid);
+ 			path->slots[0]++;
+ 			new_key.offset = start;
+-			btrfs_set_item_key_safe(fs_info, path, &new_key);
++			btrfs_set_item_key_safe(trans, path, &new_key);
+ 
+ 			fi = btrfs_item_ptr(leaf, path->slots[0],
+ 					    struct btrfs_file_extent_item);
+@@ -708,7 +708,7 @@ again:
+ 							other_end - start);
+ 			btrfs_set_file_extent_offset(leaf, fi,
+ 						     start - orig_offset);
+-			btrfs_mark_buffer_dirty(leaf);
++			btrfs_mark_buffer_dirty(trans, leaf);
+ 			goto out;
+ 		}
+ 	}
+@@ -742,7 +742,7 @@ again:
+ 		btrfs_set_file_extent_offset(leaf, fi, split - orig_offset);
+ 		btrfs_set_file_extent_num_bytes(leaf, fi,
+ 						extent_end - split);
+-		btrfs_mark_buffer_dirty(leaf);
++		btrfs_mark_buffer_dirty(trans, leaf);
+ 
+ 		btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF, bytenr,
+ 				       num_bytes, 0);
+@@ -814,7 +814,7 @@ again:
+ 		btrfs_set_file_extent_type(leaf, fi,
+ 					   BTRFS_FILE_EXTENT_REG);
+ 		btrfs_set_file_extent_generation(leaf, fi, trans->transid);
+-		btrfs_mark_buffer_dirty(leaf);
++		btrfs_mark_buffer_dirty(trans, leaf);
+ 	} else {
+ 		fi = btrfs_item_ptr(leaf, del_slot - 1,
+ 			   struct btrfs_file_extent_item);
+@@ -823,7 +823,7 @@ again:
+ 		btrfs_set_file_extent_generation(leaf, fi, trans->transid);
+ 		btrfs_set_file_extent_num_bytes(leaf, fi,
+ 						extent_end - key.offset);
+-		btrfs_mark_buffer_dirty(leaf);
++		btrfs_mark_buffer_dirty(trans, leaf);
+ 
+ 		ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
+ 		if (ret < 0) {
+@@ -2103,7 +2103,7 @@ static int fill_holes(struct btrfs_trans_handle *trans,
+ 		btrfs_set_file_extent_ram_bytes(leaf, fi, num_bytes);
+ 		btrfs_set_file_extent_offset(leaf, fi, 0);
+ 		btrfs_set_file_extent_generation(leaf, fi, trans->transid);
+-		btrfs_mark_buffer_dirty(leaf);
++		btrfs_mark_buffer_dirty(trans, leaf);
+ 		goto out;
+ 	}
+ 
+@@ -2111,7 +2111,7 @@ static int fill_holes(struct btrfs_trans_handle *trans,
+ 		u64 num_bytes;
+ 
+ 		key.offset = offset;
+-		btrfs_set_item_key_safe(fs_info, path, &key);
++		btrfs_set_item_key_safe(trans, path, &key);
+ 		fi = btrfs_item_ptr(leaf, path->slots[0],
+ 				    struct btrfs_file_extent_item);
+ 		num_bytes = btrfs_file_extent_num_bytes(leaf, fi) + end -
+@@ -2120,7 +2120,7 @@ static int fill_holes(struct btrfs_trans_handle *trans,
+ 		btrfs_set_file_extent_ram_bytes(leaf, fi, num_bytes);
+ 		btrfs_set_file_extent_offset(leaf, fi, 0);
+ 		btrfs_set_file_extent_generation(leaf, fi, trans->transid);
+-		btrfs_mark_buffer_dirty(leaf);
++		btrfs_mark_buffer_dirty(trans, leaf);
+ 		goto out;
+ 	}
+ 	btrfs_release_path(path);
+@@ -2272,7 +2272,7 @@ static int btrfs_insert_replace_extent(struct btrfs_trans_handle *trans,
+ 	btrfs_set_file_extent_num_bytes(leaf, extent, replace_len);
+ 	if (extent_info->is_new_extent)
+ 		btrfs_set_file_extent_generation(leaf, extent, trans->transid);
+-	btrfs_mark_buffer_dirty(leaf);
++	btrfs_mark_buffer_dirty(trans, leaf);
+ 	btrfs_release_path(path);
+ 
+ 	ret = btrfs_inode_set_file_extent_range(inode, extent_info->file_offset,
+diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
+index 8808004180759..6b7383ae5a70c 100644
+--- a/fs/btrfs/free-space-cache.c
++++ b/fs/btrfs/free-space-cache.c
+@@ -195,7 +195,7 @@ static int __create_free_space_inode(struct btrfs_root *root,
+ 	btrfs_set_inode_nlink(leaf, inode_item, 1);
+ 	btrfs_set_inode_transid(leaf, inode_item, trans->transid);
+ 	btrfs_set_inode_block_group(leaf, inode_item, offset);
+-	btrfs_mark_buffer_dirty(leaf);
++	btrfs_mark_buffer_dirty(trans, leaf);
+ 	btrfs_release_path(path);
+ 
+ 	key.objectid = BTRFS_FREE_SPACE_OBJECTID;
+@@ -213,7 +213,7 @@ static int __create_free_space_inode(struct btrfs_root *root,
+ 				struct btrfs_free_space_header);
+ 	memzero_extent_buffer(leaf, (unsigned long)header, sizeof(*header));
+ 	btrfs_set_free_space_key(leaf, header, &disk_key);
+-	btrfs_mark_buffer_dirty(leaf);
++	btrfs_mark_buffer_dirty(trans, leaf);
+ 	btrfs_release_path(path);
+ 
+ 	return 0;
+@@ -1185,7 +1185,7 @@ update_cache_item(struct btrfs_trans_handle *trans,
+ 	btrfs_set_free_space_entries(leaf, header, entries);
+ 	btrfs_set_free_space_bitmaps(leaf, header, bitmaps);
+ 	btrfs_set_free_space_generation(leaf, header, trans->transid);
+-	btrfs_mark_buffer_dirty(leaf);
++	btrfs_mark_buffer_dirty(trans, leaf);
+ 	btrfs_release_path(path);
+ 
+ 	return 0;
+diff --git a/fs/btrfs/free-space-tree.c b/fs/btrfs/free-space-tree.c
+index f169378e2ca6e..ae060a26e1191 100644
+--- a/fs/btrfs/free-space-tree.c
++++ b/fs/btrfs/free-space-tree.c
+@@ -89,7 +89,7 @@ static int add_new_free_space_info(struct btrfs_trans_handle *trans,
+ 			      struct btrfs_free_space_info);
+ 	btrfs_set_free_space_extent_count(leaf, info, 0);
+ 	btrfs_set_free_space_flags(leaf, info, 0);
+-	btrfs_mark_buffer_dirty(leaf);
++	btrfs_mark_buffer_dirty(trans, leaf);
+ 
+ 	ret = 0;
+ out:
+@@ -287,7 +287,7 @@ int convert_free_space_to_bitmaps(struct btrfs_trans_handle *trans,
+ 	flags |= BTRFS_FREE_SPACE_USING_BITMAPS;
+ 	btrfs_set_free_space_flags(leaf, info, flags);
+ 	expected_extent_count = btrfs_free_space_extent_count(leaf, info);
+-	btrfs_mark_buffer_dirty(leaf);
++	btrfs_mark_buffer_dirty(trans, leaf);
+ 	btrfs_release_path(path);
+ 
+ 	if (extent_count != expected_extent_count) {
+@@ -324,7 +324,7 @@ int convert_free_space_to_bitmaps(struct btrfs_trans_handle *trans,
+ 		ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
+ 		write_extent_buffer(leaf, bitmap_cursor, ptr,
+ 				    data_size);
+-		btrfs_mark_buffer_dirty(leaf);
++		btrfs_mark_buffer_dirty(trans, leaf);
+ 		btrfs_release_path(path);
+ 
+ 		i += extent_size;
+@@ -430,7 +430,7 @@ int convert_free_space_to_extents(struct btrfs_trans_handle *trans,
+ 	flags &= ~BTRFS_FREE_SPACE_USING_BITMAPS;
+ 	btrfs_set_free_space_flags(leaf, info, flags);
+ 	expected_extent_count = btrfs_free_space_extent_count(leaf, info);
+-	btrfs_mark_buffer_dirty(leaf);
++	btrfs_mark_buffer_dirty(trans, leaf);
+ 	btrfs_release_path(path);
+ 
+ 	nrbits = block_group->length >> block_group->fs_info->sectorsize_bits;
+@@ -495,7 +495,7 @@ static int update_free_space_extent_count(struct btrfs_trans_handle *trans,
+ 
+ 	extent_count += new_extents;
+ 	btrfs_set_free_space_extent_count(path->nodes[0], info, extent_count);
+-	btrfs_mark_buffer_dirty(path->nodes[0]);
++	btrfs_mark_buffer_dirty(trans, path->nodes[0]);
+ 	btrfs_release_path(path);
+ 
+ 	if (!(flags & BTRFS_FREE_SPACE_USING_BITMAPS) &&
+@@ -533,7 +533,8 @@ int free_space_test_bit(struct btrfs_block_group *block_group,
+ 	return !!extent_buffer_test_bit(leaf, ptr, i);
+ }
+ 
+-static void free_space_set_bits(struct btrfs_block_group *block_group,
++static void free_space_set_bits(struct btrfs_trans_handle *trans,
++				struct btrfs_block_group *block_group,
+ 				struct btrfs_path *path, u64 *start, u64 *size,
+ 				int bit)
+ {
+@@ -563,7 +564,7 @@ static void free_space_set_bits(struct btrfs_block_group *block_group,
+ 		extent_buffer_bitmap_set(leaf, ptr, first, last - first);
+ 	else
+ 		extent_buffer_bitmap_clear(leaf, ptr, first, last - first);
+-	btrfs_mark_buffer_dirty(leaf);
++	btrfs_mark_buffer_dirty(trans, leaf);
+ 
+ 	*size -= end - *start;
+ 	*start = end;
+@@ -656,7 +657,7 @@ static int modify_free_space_bitmap(struct btrfs_trans_handle *trans,
+ 	cur_start = start;
+ 	cur_size = size;
+ 	while (1) {
+-		free_space_set_bits(block_group, path, &cur_start, &cur_size,
++		free_space_set_bits(trans, block_group, path, &cur_start, &cur_size,
+ 				    !remove);
+ 		if (cur_size == 0)
+ 			break;
+diff --git a/fs/btrfs/inode-item.c b/fs/btrfs/inode-item.c
+index 4c322b720a80a..d3ff97374d48a 100644
+--- a/fs/btrfs/inode-item.c
++++ b/fs/btrfs/inode-item.c
+@@ -167,7 +167,7 @@ static int btrfs_del_inode_extref(struct btrfs_trans_handle *trans,
+ 	memmove_extent_buffer(leaf, ptr, ptr + del_len,
+ 			      item_size - (ptr + del_len - item_start));
+ 
+-	btrfs_truncate_item(path, item_size - del_len, 1);
++	btrfs_truncate_item(trans, path, item_size - del_len, 1);
+ 
+ out:
+ 	btrfs_free_path(path);
+@@ -229,7 +229,7 @@ int btrfs_del_inode_ref(struct btrfs_trans_handle *trans,
+ 	item_start = btrfs_item_ptr_offset(leaf, path->slots[0]);
+ 	memmove_extent_buffer(leaf, ptr, ptr + sub_item_len,
+ 			      item_size - (ptr + sub_item_len - item_start));
+-	btrfs_truncate_item(path, item_size - sub_item_len, 1);
++	btrfs_truncate_item(trans, path, item_size - sub_item_len, 1);
+ out:
+ 	btrfs_free_path(path);
+ 
+@@ -282,7 +282,7 @@ static int btrfs_insert_inode_extref(struct btrfs_trans_handle *trans,
+ 						   name))
+ 			goto out;
+ 
+-		btrfs_extend_item(path, ins_len);
++		btrfs_extend_item(trans, path, ins_len);
+ 		ret = 0;
+ 	}
+ 	if (ret < 0)
+@@ -299,7 +299,7 @@ static int btrfs_insert_inode_extref(struct btrfs_trans_handle *trans,
+ 
+ 	ptr = (unsigned long)&extref->name;
+ 	write_extent_buffer(path->nodes[0], name->name, ptr, name->len);
+-	btrfs_mark_buffer_dirty(path->nodes[0]);
++	btrfs_mark_buffer_dirty(trans, path->nodes[0]);
+ 
+ out:
+ 	btrfs_free_path(path);
+@@ -338,7 +338,7 @@ int btrfs_insert_inode_ref(struct btrfs_trans_handle *trans,
+ 			goto out;
+ 
+ 		old_size = btrfs_item_size(path->nodes[0], path->slots[0]);
+-		btrfs_extend_item(path, ins_len);
++		btrfs_extend_item(trans, path, ins_len);
+ 		ref = btrfs_item_ptr(path->nodes[0], path->slots[0],
+ 				     struct btrfs_inode_ref);
+ 		ref = (struct btrfs_inode_ref *)((unsigned long)ref + old_size);
+@@ -364,7 +364,7 @@ int btrfs_insert_inode_ref(struct btrfs_trans_handle *trans,
+ 		ptr = (unsigned long)(ref + 1);
+ 	}
+ 	write_extent_buffer(path->nodes[0], name->name, ptr, name->len);
+-	btrfs_mark_buffer_dirty(path->nodes[0]);
++	btrfs_mark_buffer_dirty(trans, path->nodes[0]);
+ 
+ out:
+ 	btrfs_free_path(path);
+@@ -591,7 +591,7 @@ search_again:
+ 				num_dec = (orig_num_bytes - extent_num_bytes);
+ 				if (extent_start != 0)
+ 					control->sub_bytes += num_dec;
+-				btrfs_mark_buffer_dirty(leaf);
++				btrfs_mark_buffer_dirty(trans, leaf);
+ 			} else {
+ 				extent_num_bytes =
+ 					btrfs_file_extent_disk_num_bytes(leaf, fi);
+@@ -617,7 +617,7 @@ search_again:
+ 
+ 				btrfs_set_file_extent_ram_bytes(leaf, fi, size);
+ 				size = btrfs_file_extent_calc_inline_size(size);
+-				btrfs_truncate_item(path, size, 1);
++				btrfs_truncate_item(trans, path, size, 1);
+ 			} else if (!del_item) {
+ 				/*
+ 				 * We have to bail so the last_size is set to
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index 0f4498dfa30c9..197c1debefed5 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -573,7 +573,7 @@ static int insert_inline_extent(struct btrfs_trans_handle *trans,
+ 		kunmap_local(kaddr);
+ 		put_page(page);
+ 	}
+-	btrfs_mark_buffer_dirty(leaf);
++	btrfs_mark_buffer_dirty(trans, leaf);
+ 	btrfs_release_path(path);
+ 
+ 	/*
+@@ -3072,7 +3072,7 @@ static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
+ 			btrfs_item_ptr_offset(leaf, path->slots[0]),
+ 			sizeof(struct btrfs_file_extent_item));
+ 
+-	btrfs_mark_buffer_dirty(leaf);
++	btrfs_mark_buffer_dirty(trans, leaf);
+ 	btrfs_release_path(path);
+ 
+ 	/*
+@@ -4134,7 +4134,7 @@ static noinline int btrfs_update_inode_item(struct btrfs_trans_handle *trans,
+ 				    struct btrfs_inode_item);
+ 
+ 	fill_inode_item(trans, leaf, inode_item, &inode->vfs_inode);
+-	btrfs_mark_buffer_dirty(leaf);
++	btrfs_mark_buffer_dirty(trans, leaf);
+ 	btrfs_set_inode_last_trans(trans, inode);
+ 	ret = 0;
+ failed:
+@@ -6476,7 +6476,7 @@ int btrfs_create_new_inode(struct btrfs_trans_handle *trans,
+ 		}
+ 	}
+ 
+-	btrfs_mark_buffer_dirty(path->nodes[0]);
++	btrfs_mark_buffer_dirty(trans, path->nodes[0]);
+ 	/*
+ 	 * We don't need the path anymore, plus inheriting properties, adding
+ 	 * ACLs, security xattrs, orphan item or adding the link, will result in
+@@ -7142,8 +7142,15 @@ static struct extent_map *btrfs_new_extent_direct(struct btrfs_inode *inode,
+ 	int ret;
+ 
+ 	alloc_hint = get_extent_allocation_hint(inode, start, len);
++again:
+ 	ret = btrfs_reserve_extent(root, len, len, fs_info->sectorsize,
+ 				   0, alloc_hint, &ins, 1, 1);
++	if (ret == -EAGAIN) {
++		ASSERT(btrfs_is_zoned(fs_info));
++		wait_on_bit_io(&inode->root->fs_info->flags, BTRFS_FS_NEED_ZONE_FINISH,
++			       TASK_UNINTERRUPTIBLE);
++		goto again;
++	}
+ 	if (ret)
+ 		return ERR_PTR(ret);
+ 
+@@ -9630,7 +9637,7 @@ static int btrfs_symlink(struct mnt_idmap *idmap, struct inode *dir,
+ 
+ 	ptr = btrfs_file_extent_inline_start(ei);
+ 	write_extent_buffer(leaf, symname, ptr, name_len);
+-	btrfs_mark_buffer_dirty(leaf);
++	btrfs_mark_buffer_dirty(trans, leaf);
+ 	btrfs_free_path(path);
+ 
+ 	d_instantiate_new(dentry, inode);
+diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
+index 6d0df9bc1e72b..8bdf9bed25c75 100644
+--- a/fs/btrfs/ioctl.c
++++ b/fs/btrfs/ioctl.c
+@@ -663,7 +663,7 @@ static noinline int create_subvol(struct mnt_idmap *idmap,
+ 		goto out;
+ 	}
+ 
+-	btrfs_mark_buffer_dirty(leaf);
++	btrfs_mark_buffer_dirty(trans, leaf);
+ 
+ 	inode_item = &root_item->inode;
+ 	btrfs_set_stack_inode_generation(inode_item, 1);
+@@ -2947,7 +2947,7 @@ static long btrfs_ioctl_default_subvol(struct file *file, void __user *argp)
+ 
+ 	btrfs_cpu_key_to_disk(&disk_key, &new_root->root_key);
+ 	btrfs_set_dir_item_key(path->nodes[0], di, &disk_key);
+-	btrfs_mark_buffer_dirty(path->nodes[0]);
++	btrfs_mark_buffer_dirty(trans, path->nodes[0]);
+ 	btrfs_release_path(path);
+ 
+ 	btrfs_set_fs_incompat(fs_info, DEFAULT_SUBVOL);
+diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
+index 2637d6b157ff9..74cabaa59be71 100644
+--- a/fs/btrfs/qgroup.c
++++ b/fs/btrfs/qgroup.c
+@@ -622,7 +622,7 @@ static int add_qgroup_relation_item(struct btrfs_trans_handle *trans, u64 src,
+ 
+ 	ret = btrfs_insert_empty_item(trans, quota_root, path, &key, 0);
+ 
+-	btrfs_mark_buffer_dirty(path->nodes[0]);
++	btrfs_mark_buffer_dirty(trans, path->nodes[0]);
+ 
+ 	btrfs_free_path(path);
+ 	return ret;
+@@ -700,7 +700,7 @@ static int add_qgroup_item(struct btrfs_trans_handle *trans,
+ 	btrfs_set_qgroup_info_excl(leaf, qgroup_info, 0);
+ 	btrfs_set_qgroup_info_excl_cmpr(leaf, qgroup_info, 0);
+ 
+-	btrfs_mark_buffer_dirty(leaf);
++	btrfs_mark_buffer_dirty(trans, leaf);
+ 
+ 	btrfs_release_path(path);
+ 
+@@ -719,7 +719,7 @@ static int add_qgroup_item(struct btrfs_trans_handle *trans,
+ 	btrfs_set_qgroup_limit_rsv_rfer(leaf, qgroup_limit, 0);
+ 	btrfs_set_qgroup_limit_rsv_excl(leaf, qgroup_limit, 0);
+ 
+-	btrfs_mark_buffer_dirty(leaf);
++	btrfs_mark_buffer_dirty(trans, leaf);
+ 
+ 	ret = 0;
+ out:
+@@ -808,7 +808,7 @@ static int update_qgroup_limit_item(struct btrfs_trans_handle *trans,
+ 	btrfs_set_qgroup_limit_rsv_rfer(l, qgroup_limit, qgroup->rsv_rfer);
+ 	btrfs_set_qgroup_limit_rsv_excl(l, qgroup_limit, qgroup->rsv_excl);
+ 
+-	btrfs_mark_buffer_dirty(l);
++	btrfs_mark_buffer_dirty(trans, l);
+ 
+ out:
+ 	btrfs_free_path(path);
+@@ -854,7 +854,7 @@ static int update_qgroup_info_item(struct btrfs_trans_handle *trans,
+ 	btrfs_set_qgroup_info_excl(l, qgroup_info, qgroup->excl);
+ 	btrfs_set_qgroup_info_excl_cmpr(l, qgroup_info, qgroup->excl_cmpr);
+ 
+-	btrfs_mark_buffer_dirty(l);
++	btrfs_mark_buffer_dirty(trans, l);
+ 
+ out:
+ 	btrfs_free_path(path);
+@@ -896,7 +896,7 @@ static int update_qgroup_status_item(struct btrfs_trans_handle *trans)
+ 	btrfs_set_qgroup_status_rescan(l, ptr,
+ 				fs_info->qgroup_rescan_progress.objectid);
+ 
+-	btrfs_mark_buffer_dirty(l);
++	btrfs_mark_buffer_dirty(trans, l);
+ 
+ out:
+ 	btrfs_free_path(path);
+@@ -1069,7 +1069,7 @@ int btrfs_quota_enable(struct btrfs_fs_info *fs_info)
+ 				      BTRFS_QGROUP_STATUS_FLAGS_MASK);
+ 	btrfs_set_qgroup_status_rescan(leaf, ptr, 0);
+ 
+-	btrfs_mark_buffer_dirty(leaf);
++	btrfs_mark_buffer_dirty(trans, leaf);
+ 
+ 	key.objectid = 0;
+ 	key.type = BTRFS_ROOT_REF_KEY;
+diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
+index 62ed57551824c..31781af447553 100644
+--- a/fs/btrfs/relocation.c
++++ b/fs/btrfs/relocation.c
+@@ -1181,7 +1181,7 @@ int replace_file_extents(struct btrfs_trans_handle *trans,
+ 		}
+ 	}
+ 	if (dirty)
+-		btrfs_mark_buffer_dirty(leaf);
++		btrfs_mark_buffer_dirty(trans, leaf);
+ 	if (inode)
+ 		btrfs_add_delayed_iput(BTRFS_I(inode));
+ 	return ret;
+@@ -1374,13 +1374,13 @@ again:
+ 		 */
+ 		btrfs_set_node_blockptr(parent, slot, new_bytenr);
+ 		btrfs_set_node_ptr_generation(parent, slot, new_ptr_gen);
+-		btrfs_mark_buffer_dirty(parent);
++		btrfs_mark_buffer_dirty(trans, parent);
+ 
+ 		btrfs_set_node_blockptr(path->nodes[level],
+ 					path->slots[level], old_bytenr);
+ 		btrfs_set_node_ptr_generation(path->nodes[level],
+ 					      path->slots[level], old_ptr_gen);
+-		btrfs_mark_buffer_dirty(path->nodes[level]);
++		btrfs_mark_buffer_dirty(trans, path->nodes[level]);
+ 
+ 		btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF, old_bytenr,
+ 				       blocksize, path->nodes[level]->start);
+@@ -2517,7 +2517,7 @@ static int do_relocation(struct btrfs_trans_handle *trans,
+ 						node->eb->start);
+ 			btrfs_set_node_ptr_generation(upper->eb, slot,
+ 						      trans->transid);
+-			btrfs_mark_buffer_dirty(upper->eb);
++			btrfs_mark_buffer_dirty(trans, upper->eb);
+ 
+ 			btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF,
+ 					       node->eb->start, blocksize,
+@@ -3833,7 +3833,7 @@ static int __insert_orphan_inode(struct btrfs_trans_handle *trans,
+ 	btrfs_set_inode_mode(leaf, item, S_IFREG | 0600);
+ 	btrfs_set_inode_flags(leaf, item, BTRFS_INODE_NOCOMPRESS |
+ 					  BTRFS_INODE_PREALLOC);
+-	btrfs_mark_buffer_dirty(leaf);
++	btrfs_mark_buffer_dirty(trans, leaf);
+ out:
+ 	btrfs_free_path(path);
+ 	return ret;
+diff --git a/fs/btrfs/root-tree.c b/fs/btrfs/root-tree.c
+index 859874579456f..5b0f1bccc409c 100644
+--- a/fs/btrfs/root-tree.c
++++ b/fs/btrfs/root-tree.c
+@@ -191,7 +191,7 @@ int btrfs_update_root(struct btrfs_trans_handle *trans, struct btrfs_root
+ 	btrfs_set_root_generation_v2(item, btrfs_root_generation(item));
+ 
+ 	write_extent_buffer(l, item, ptr, sizeof(*item));
+-	btrfs_mark_buffer_dirty(path->nodes[0]);
++	btrfs_mark_buffer_dirty(trans, path->nodes[0]);
+ out:
+ 	btrfs_free_path(path);
+ 	return ret;
+@@ -438,7 +438,7 @@ again:
+ 	btrfs_set_root_ref_name_len(leaf, ref, name->len);
+ 	ptr = (unsigned long)(ref + 1);
+ 	write_extent_buffer(leaf, name->name, ptr, name->len);
+-	btrfs_mark_buffer_dirty(leaf);
++	btrfs_mark_buffer_dirty(trans, leaf);
+ 
+ 	if (key.type == BTRFS_ROOT_BACKREF_KEY) {
+ 		btrfs_release_path(path);
+diff --git a/fs/btrfs/tests/extent-buffer-tests.c b/fs/btrfs/tests/extent-buffer-tests.c
+index 5ef0b90e25c3b..6a43a64ba55ad 100644
+--- a/fs/btrfs/tests/extent-buffer-tests.c
++++ b/fs/btrfs/tests/extent-buffer-tests.c
+@@ -61,7 +61,11 @@ static int test_btrfs_split_item(u32 sectorsize, u32 nodesize)
+ 	key.type = BTRFS_EXTENT_CSUM_KEY;
+ 	key.offset = 0;
+ 
+-	btrfs_setup_item_for_insert(root, path, &key, value_len);
++	/*
++	 * Passing a NULL trans handle is fine here, we have a dummy root eb
++	 * and the tree is a single node (level 0).
++	 */
++	btrfs_setup_item_for_insert(NULL, root, path, &key, value_len);
+ 	write_extent_buffer(eb, value, btrfs_item_ptr_offset(eb, 0),
+ 			    value_len);
+ 
+diff --git a/fs/btrfs/tests/inode-tests.c b/fs/btrfs/tests/inode-tests.c
+index 05b03f5eab83b..492d69d2fa737 100644
+--- a/fs/btrfs/tests/inode-tests.c
++++ b/fs/btrfs/tests/inode-tests.c
+@@ -34,7 +34,11 @@ static void insert_extent(struct btrfs_root *root, u64 start, u64 len,
+ 	key.type = BTRFS_EXTENT_DATA_KEY;
+ 	key.offset = start;
+ 
+-	btrfs_setup_item_for_insert(root, &path, &key, value_len);
++	/*
++	 * Passing a NULL trans handle is fine here, we have a dummy root eb
++	 * and the tree is a single node (level 0).
++	 */
++	btrfs_setup_item_for_insert(NULL, root, &path, &key, value_len);
+ 	fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
+ 	btrfs_set_file_extent_generation(leaf, fi, 1);
+ 	btrfs_set_file_extent_type(leaf, fi, type);
+@@ -64,7 +68,11 @@ static void insert_inode_item_key(struct btrfs_root *root)
+ 	key.type = BTRFS_INODE_ITEM_KEY;
+ 	key.offset = 0;
+ 
+-	btrfs_setup_item_for_insert(root, &path, &key, value_len);
++	/*
++	 * Passing a NULL trans handle is fine here, we have a dummy root eb
++	 * and the tree is a single node (level 0).
++	 */
++	btrfs_setup_item_for_insert(NULL, root, &path, &key, value_len);
+ }
+ 
+ /*
+diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
+index a00e7a0bc713d..ad0d934991741 100644
+--- a/fs/btrfs/tree-log.c
++++ b/fs/btrfs/tree-log.c
+@@ -504,9 +504,9 @@ insert:
+ 		found_size = btrfs_item_size(path->nodes[0],
+ 						path->slots[0]);
+ 		if (found_size > item_size)
+-			btrfs_truncate_item(path, item_size, 1);
++			btrfs_truncate_item(trans, path, item_size, 1);
+ 		else if (found_size < item_size)
+-			btrfs_extend_item(path, item_size - found_size);
++			btrfs_extend_item(trans, path, item_size - found_size);
+ 	} else if (ret) {
+ 		return ret;
+ 	}
+@@ -574,7 +574,7 @@ insert:
+ 		}
+ 	}
+ no_copy:
+-	btrfs_mark_buffer_dirty(path->nodes[0]);
++	btrfs_mark_buffer_dirty(trans, path->nodes[0]);
+ 	btrfs_release_path(path);
+ 	return 0;
+ }
+@@ -3530,7 +3530,7 @@ static noinline int insert_dir_log_key(struct btrfs_trans_handle *trans,
+ 		last_offset = max(last_offset, curr_end);
+ 	}
+ 	btrfs_set_dir_log_end(path->nodes[0], item, last_offset);
+-	btrfs_mark_buffer_dirty(path->nodes[0]);
++	btrfs_mark_buffer_dirty(trans, path->nodes[0]);
+ 	btrfs_release_path(path);
+ 	return 0;
+ }
+@@ -4488,7 +4488,7 @@ copy_item:
+ 		dst_index++;
+ 	}
+ 
+-	btrfs_mark_buffer_dirty(dst_path->nodes[0]);
++	btrfs_mark_buffer_dirty(trans, dst_path->nodes[0]);
+ 	btrfs_release_path(dst_path);
+ out:
+ 	kfree(ins_data);
+@@ -4693,7 +4693,7 @@ static int log_one_extent(struct btrfs_trans_handle *trans,
+ 	write_extent_buffer(leaf, &fi,
+ 			    btrfs_item_ptr_offset(leaf, path->slots[0]),
+ 			    sizeof(fi));
+-	btrfs_mark_buffer_dirty(leaf);
++	btrfs_mark_buffer_dirty(trans, leaf);
+ 
+ 	btrfs_release_path(path);
+ 
+diff --git a/fs/btrfs/uuid-tree.c b/fs/btrfs/uuid-tree.c
+index 7c7001f42b14c..5be74f9e47ebf 100644
+--- a/fs/btrfs/uuid-tree.c
++++ b/fs/btrfs/uuid-tree.c
+@@ -124,7 +124,7 @@ int btrfs_uuid_tree_add(struct btrfs_trans_handle *trans, u8 *uuid, u8 type,
+ 		 * An item with that type already exists.
+ 		 * Extend the item and store the new subid at the end.
+ 		 */
+-		btrfs_extend_item(path, sizeof(subid_le));
++		btrfs_extend_item(trans, path, sizeof(subid_le));
+ 		eb = path->nodes[0];
+ 		slot = path->slots[0];
+ 		offset = btrfs_item_ptr_offset(eb, slot);
+@@ -139,7 +139,7 @@ int btrfs_uuid_tree_add(struct btrfs_trans_handle *trans, u8 *uuid, u8 type,
+ 	ret = 0;
+ 	subid_le = cpu_to_le64(subid_cpu);
+ 	write_extent_buffer(eb, &subid_le, offset, sizeof(subid_le));
+-	btrfs_mark_buffer_dirty(eb);
++	btrfs_mark_buffer_dirty(trans, eb);
+ 
+ out:
+ 	btrfs_free_path(path);
+@@ -221,7 +221,7 @@ int btrfs_uuid_tree_remove(struct btrfs_trans_handle *trans, u8 *uuid, u8 type,
+ 	move_src = offset + sizeof(subid);
+ 	move_len = item_size - (move_src - btrfs_item_ptr_offset(eb, slot));
+ 	memmove_extent_buffer(eb, move_dst, move_src, move_len);
+-	btrfs_truncate_item(path, item_size - sizeof(subid), 1);
++	btrfs_truncate_item(trans, path, item_size - sizeof(subid), 1);
+ 
+ out:
+ 	btrfs_free_path(path);
+diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
+index 5019e9244d2d2..1df496c809376 100644
+--- a/fs/btrfs/volumes.c
++++ b/fs/btrfs/volumes.c
+@@ -1908,7 +1908,7 @@ static int btrfs_add_dev_item(struct btrfs_trans_handle *trans,
+ 	ptr = btrfs_device_fsid(dev_item);
+ 	write_extent_buffer(leaf, trans->fs_info->fs_devices->metadata_uuid,
+ 			    ptr, BTRFS_FSID_SIZE);
+-	btrfs_mark_buffer_dirty(leaf);
++	btrfs_mark_buffer_dirty(trans, leaf);
+ 
+ 	ret = 0;
+ out:
+@@ -2613,7 +2613,7 @@ next_slot:
+ 		if (device->fs_devices->seeding) {
+ 			btrfs_set_device_generation(leaf, dev_item,
+ 						    device->generation);
+-			btrfs_mark_buffer_dirty(leaf);
++			btrfs_mark_buffer_dirty(trans, leaf);
+ 		}
+ 
+ 		path->slots[0]++;
+@@ -2911,7 +2911,7 @@ static noinline int btrfs_update_device(struct btrfs_trans_handle *trans,
+ 				     btrfs_device_get_disk_total_bytes(device));
+ 	btrfs_set_device_bytes_used(leaf, dev_item,
+ 				    btrfs_device_get_bytes_used(device));
+-	btrfs_mark_buffer_dirty(leaf);
++	btrfs_mark_buffer_dirty(trans, leaf);
+ 
+ out:
+ 	btrfs_free_path(path);
+@@ -3499,7 +3499,7 @@ static int insert_balance_item(struct btrfs_fs_info *fs_info,
+ 
+ 	btrfs_set_balance_flags(leaf, item, bctl->flags);
+ 
+-	btrfs_mark_buffer_dirty(leaf);
++	btrfs_mark_buffer_dirty(trans, leaf);
+ out:
+ 	btrfs_free_path(path);
+ 	err = btrfs_commit_transaction(trans);
+@@ -7513,7 +7513,7 @@ static int update_dev_stat_item(struct btrfs_trans_handle *trans,
+ 	for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
+ 		btrfs_set_dev_stats_value(eb, ptr, i,
+ 					  btrfs_dev_stat_read(device, i));
+-	btrfs_mark_buffer_dirty(eb);
++	btrfs_mark_buffer_dirty(trans, eb);
+ 
+ out:
+ 	btrfs_free_path(path);
+diff --git a/fs/btrfs/xattr.c b/fs/btrfs/xattr.c
+index fc4b20c2688a0..c454b8ce6babe 100644
+--- a/fs/btrfs/xattr.c
++++ b/fs/btrfs/xattr.c
+@@ -188,15 +188,15 @@ int btrfs_setxattr(struct btrfs_trans_handle *trans, struct inode *inode,
+ 		if (old_data_len + name_len + sizeof(*di) == item_size) {
+ 			/* No other xattrs packed in the same leaf item. */
+ 			if (size > old_data_len)
+-				btrfs_extend_item(path, size - old_data_len);
++				btrfs_extend_item(trans, path, size - old_data_len);
+ 			else if (size < old_data_len)
+-				btrfs_truncate_item(path, data_size, 1);
++				btrfs_truncate_item(trans, path, data_size, 1);
+ 		} else {
+ 			/* There are other xattrs packed in the same item. */
+ 			ret = btrfs_delete_one_dir_name(trans, root, path, di);
+ 			if (ret)
+ 				goto out;
+-			btrfs_extend_item(path, data_size);
++			btrfs_extend_item(trans, path, data_size);
+ 		}
+ 
+ 		ptr = btrfs_item_ptr(leaf, slot, char);
+@@ -205,7 +205,7 @@ int btrfs_setxattr(struct btrfs_trans_handle *trans, struct inode *inode,
+ 		btrfs_set_dir_data_len(leaf, di, size);
+ 		data_ptr = ((unsigned long)(di + 1)) + name_len;
+ 		write_extent_buffer(leaf, value, data_ptr, size);
+-		btrfs_mark_buffer_dirty(leaf);
++		btrfs_mark_buffer_dirty(trans, leaf);
+ 	} else {
+ 		/*
+ 		 * Insert, and we had space for the xattr, so path->slots[0] is
+diff --git a/fs/exfat/namei.c b/fs/exfat/namei.c
+index e0ff9d156f6f5..43774693f65f5 100644
+--- a/fs/exfat/namei.c
++++ b/fs/exfat/namei.c
+@@ -351,14 +351,20 @@ static int exfat_find_empty_entry(struct inode *inode,
+ 		if (exfat_check_max_dentries(inode))
+ 			return -ENOSPC;
+ 
+-		/* we trust p_dir->size regardless of FAT type */
+-		if (exfat_find_last_cluster(sb, p_dir, &last_clu))
+-			return -EIO;
+-
+ 		/*
+ 		 * Allocate new cluster to this directory
+ 		 */
+-		exfat_chain_set(&clu, last_clu + 1, 0, p_dir->flags);
++		if (ei->start_clu != EXFAT_EOF_CLUSTER) {
++			/* we trust p_dir->size regardless of FAT type */
++			if (exfat_find_last_cluster(sb, p_dir, &last_clu))
++				return -EIO;
++
++			exfat_chain_set(&clu, last_clu + 1, 0, p_dir->flags);
++		} else {
++			/* This directory is empty */
++			exfat_chain_set(&clu, EXFAT_EOF_CLUSTER, 0,
++					ALLOC_NO_FAT_CHAIN);
++		}
+ 
+ 		/* allocate a cluster */
+ 		ret = exfat_alloc_cluster(inode, 1, &clu, IS_DIRSYNC(inode));
+@@ -368,6 +374,11 @@ static int exfat_find_empty_entry(struct inode *inode,
+ 		if (exfat_zeroed_cluster(inode, clu.dir))
+ 			return -EIO;
+ 
++		if (ei->start_clu == EXFAT_EOF_CLUSTER) {
++			ei->start_clu = clu.dir;
++			p_dir->dir = clu.dir;
++		}
++
+ 		/* append to the FAT chain */
+ 		if (clu.flags != p_dir->flags) {
+ 			/* no-fat-chain bit is disabled,
+@@ -646,7 +657,7 @@ static int exfat_find(struct inode *dir, struct qstr *qname,
+ 	info->type = exfat_get_entry_type(ep);
+ 	info->attr = le16_to_cpu(ep->dentry.file.attr);
+ 	info->size = le64_to_cpu(ep2->dentry.stream.valid_size);
+-	if ((info->type == TYPE_FILE) && (info->size == 0)) {
++	if (info->size == 0) {
+ 		info->flags = ALLOC_NO_FAT_CHAIN;
+ 		info->start_clu = EXFAT_EOF_CLUSTER;
+ 	} else {
+@@ -890,6 +901,9 @@ static int exfat_check_dir_empty(struct super_block *sb,
+ 
+ 	dentries_per_clu = sbi->dentries_per_clu;
+ 
++	if (p_dir->dir == EXFAT_EOF_CLUSTER)
++		return 0;
++
+ 	exfat_chain_dup(&clu, p_dir);
+ 
+ 	while (clu.dir != EXFAT_EOF_CLUSTER) {
+@@ -1257,7 +1271,8 @@ static int __exfat_rename(struct inode *old_parent_inode,
+ 		}
+ 
+ 		/* Free the clusters if new_inode is a dir(as if exfat_rmdir) */
+-		if (new_entry_type == TYPE_DIR) {
++		if (new_entry_type == TYPE_DIR &&
++		    new_ei->start_clu != EXFAT_EOF_CLUSTER) {
+ 			/* new_ei, new_clu_to_free */
+ 			struct exfat_chain new_clu_to_free;
+ 
+diff --git a/fs/ext4/acl.h b/fs/ext4/acl.h
+index 0c5a79c3b5d48..ef4c19e5f5706 100644
+--- a/fs/ext4/acl.h
++++ b/fs/ext4/acl.h
+@@ -68,6 +68,11 @@ extern int ext4_init_acl(handle_t *, struct inode *, struct inode *);
+ static inline int
+ ext4_init_acl(handle_t *handle, struct inode *inode, struct inode *dir)
+ {
++	/* usually, the umask is applied by posix_acl_create(), but if
++	   ext4 ACL support is disabled at compile time, we need to do
++	   it here, because posix_acl_create() will never be called */
++	inode->i_mode &= ~current_umask();
++
+ 	return 0;
+ }
+ #endif  /* CONFIG_EXT4_FS_POSIX_ACL */
+diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
+index 9653aab5e9f4a..733abaf805fa4 100644
+--- a/fs/ext4/ext4.h
++++ b/fs/ext4/ext4.h
+@@ -1674,7 +1674,8 @@ struct ext4_sb_info {
+ 
+ 	/*
+ 	 * Barrier between writepages ops and changing any inode's JOURNAL_DATA
+-	 * or EXTENTS flag.
++	 * or EXTENTS flag or between writepages ops and changing DELALLOC or
++	 * DIOREAD_NOLOCK mount options on remount.
+ 	 */
+ 	struct percpu_rw_semaphore s_writepages_rwsem;
+ 	struct dax_device *s_daxdev;
+diff --git a/fs/ext4/extents_status.c b/fs/ext4/extents_status.c
+index 6f7de14c0fa86..f4b50652f0cce 100644
+--- a/fs/ext4/extents_status.c
++++ b/fs/ext4/extents_status.c
+@@ -152,8 +152,9 @@ static int __es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
+ static int es_reclaim_extents(struct ext4_inode_info *ei, int *nr_to_scan);
+ static int __es_shrink(struct ext4_sb_info *sbi, int nr_to_scan,
+ 		       struct ext4_inode_info *locked_ei);
+-static void __revise_pending(struct inode *inode, ext4_lblk_t lblk,
+-			     ext4_lblk_t len);
++static int __revise_pending(struct inode *inode, ext4_lblk_t lblk,
++			    ext4_lblk_t len,
++			    struct pending_reservation **prealloc);
+ 
+ int __init ext4_init_es(void)
+ {
+@@ -448,6 +449,19 @@ static void ext4_es_list_del(struct inode *inode)
+ 	spin_unlock(&sbi->s_es_lock);
+ }
+ 
++static inline struct pending_reservation *__alloc_pending(bool nofail)
++{
++	if (!nofail)
++		return kmem_cache_alloc(ext4_pending_cachep, GFP_ATOMIC);
++
++	return kmem_cache_zalloc(ext4_pending_cachep, GFP_KERNEL | __GFP_NOFAIL);
++}
++
++static inline void __free_pending(struct pending_reservation *pr)
++{
++	kmem_cache_free(ext4_pending_cachep, pr);
++}
++
+ /*
+  * Returns true if we cannot fail to allocate memory for this extent_status
+  * entry and cannot reclaim it until its status changes.
+@@ -836,11 +850,12 @@ void ext4_es_insert_extent(struct inode *inode, ext4_lblk_t lblk,
+ {
+ 	struct extent_status newes;
+ 	ext4_lblk_t end = lblk + len - 1;
+-	int err1 = 0;
+-	int err2 = 0;
++	int err1 = 0, err2 = 0, err3 = 0;
+ 	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
+ 	struct extent_status *es1 = NULL;
+ 	struct extent_status *es2 = NULL;
++	struct pending_reservation *pr = NULL;
++	bool revise_pending = false;
+ 
+ 	if (EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY)
+ 		return;
+@@ -868,11 +883,17 @@ void ext4_es_insert_extent(struct inode *inode, ext4_lblk_t lblk,
+ 
+ 	ext4_es_insert_extent_check(inode, &newes);
+ 
++	revise_pending = sbi->s_cluster_ratio > 1 &&
++			 test_opt(inode->i_sb, DELALLOC) &&
++			 (status & (EXTENT_STATUS_WRITTEN |
++				    EXTENT_STATUS_UNWRITTEN));
+ retry:
+ 	if (err1 && !es1)
+ 		es1 = __es_alloc_extent(true);
+ 	if ((err1 || err2) && !es2)
+ 		es2 = __es_alloc_extent(true);
++	if ((err1 || err2 || err3) && revise_pending && !pr)
++		pr = __alloc_pending(true);
+ 	write_lock(&EXT4_I(inode)->i_es_lock);
+ 
+ 	err1 = __es_remove_extent(inode, lblk, end, NULL, es1);
+@@ -897,13 +918,18 @@ retry:
+ 		es2 = NULL;
+ 	}
+ 
+-	if (sbi->s_cluster_ratio > 1 && test_opt(inode->i_sb, DELALLOC) &&
+-	    (status & EXTENT_STATUS_WRITTEN ||
+-	     status & EXTENT_STATUS_UNWRITTEN))
+-		__revise_pending(inode, lblk, len);
++	if (revise_pending) {
++		err3 = __revise_pending(inode, lblk, len, &pr);
++		if (err3 != 0)
++			goto error;
++		if (pr) {
++			__free_pending(pr);
++			pr = NULL;
++		}
++	}
+ error:
+ 	write_unlock(&EXT4_I(inode)->i_es_lock);
+-	if (err1 || err2)
++	if (err1 || err2 || err3)
+ 		goto retry;
+ 
+ 	ext4_es_print_tree(inode);
+@@ -1311,7 +1337,7 @@ static unsigned int get_rsvd(struct inode *inode, ext4_lblk_t end,
+ 				rc->ndelonly--;
+ 				node = rb_next(&pr->rb_node);
+ 				rb_erase(&pr->rb_node, &tree->root);
+-				kmem_cache_free(ext4_pending_cachep, pr);
++				__free_pending(pr);
+ 				if (!node)
+ 					break;
+ 				pr = rb_entry(node, struct pending_reservation,
+@@ -1405,8 +1431,8 @@ static int __es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
+ 			}
+ 		}
+ 		if (count_reserved)
+-			count_rsvd(inode, lblk, orig_es.es_len - len1 - len2,
+-				   &orig_es, &rc);
++			count_rsvd(inode, orig_es.es_lblk + len1,
++				   orig_es.es_len - len1 - len2, &orig_es, &rc);
+ 		goto out_get_reserved;
+ 	}
+ 
+@@ -1907,11 +1933,13 @@ static struct pending_reservation *__get_pending(struct inode *inode,
+  *
+  * @inode - file containing the cluster
+  * @lblk - logical block in the cluster to be added
++ * @prealloc - preallocated pending entry
+  *
+  * Returns 0 on successful insertion and -ENOMEM on failure.  If the
+  * pending reservation is already in the set, returns successfully.
+  */
+-static int __insert_pending(struct inode *inode, ext4_lblk_t lblk)
++static int __insert_pending(struct inode *inode, ext4_lblk_t lblk,
++			    struct pending_reservation **prealloc)
+ {
+ 	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
+ 	struct ext4_pending_tree *tree = &EXT4_I(inode)->i_pending_tree;
+@@ -1937,10 +1965,15 @@ static int __insert_pending(struct inode *inode, ext4_lblk_t lblk)
+ 		}
+ 	}
+ 
+-	pr = kmem_cache_alloc(ext4_pending_cachep, GFP_ATOMIC);
+-	if (pr == NULL) {
+-		ret = -ENOMEM;
+-		goto out;
++	if (likely(*prealloc == NULL)) {
++		pr = __alloc_pending(false);
++		if (!pr) {
++			ret = -ENOMEM;
++			goto out;
++		}
++	} else {
++		pr = *prealloc;
++		*prealloc = NULL;
+ 	}
+ 	pr->lclu = lclu;
+ 
+@@ -1970,7 +2003,7 @@ static void __remove_pending(struct inode *inode, ext4_lblk_t lblk)
+ 	if (pr != NULL) {
+ 		tree = &EXT4_I(inode)->i_pending_tree;
+ 		rb_erase(&pr->rb_node, &tree->root);
+-		kmem_cache_free(ext4_pending_cachep, pr);
++		__free_pending(pr);
+ 	}
+ }
+ 
+@@ -2029,10 +2062,10 @@ void ext4_es_insert_delayed_block(struct inode *inode, ext4_lblk_t lblk,
+ 				  bool allocated)
+ {
+ 	struct extent_status newes;
+-	int err1 = 0;
+-	int err2 = 0;
++	int err1 = 0, err2 = 0, err3 = 0;
+ 	struct extent_status *es1 = NULL;
+ 	struct extent_status *es2 = NULL;
++	struct pending_reservation *pr = NULL;
+ 
+ 	if (EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY)
+ 		return;
+@@ -2052,6 +2085,8 @@ retry:
+ 		es1 = __es_alloc_extent(true);
+ 	if ((err1 || err2) && !es2)
+ 		es2 = __es_alloc_extent(true);
++	if ((err1 || err2 || err3) && allocated && !pr)
++		pr = __alloc_pending(true);
+ 	write_lock(&EXT4_I(inode)->i_es_lock);
+ 
+ 	err1 = __es_remove_extent(inode, lblk, lblk, NULL, es1);
+@@ -2074,11 +2109,18 @@ retry:
+ 		es2 = NULL;
+ 	}
+ 
+-	if (allocated)
+-		__insert_pending(inode, lblk);
++	if (allocated) {
++		err3 = __insert_pending(inode, lblk, &pr);
++		if (err3 != 0)
++			goto error;
++		if (pr) {
++			__free_pending(pr);
++			pr = NULL;
++		}
++	}
+ error:
+ 	write_unlock(&EXT4_I(inode)->i_es_lock);
+-	if (err1 || err2)
++	if (err1 || err2 || err3)
+ 		goto retry;
+ 
+ 	ext4_es_print_tree(inode);
+@@ -2184,21 +2226,24 @@ unsigned int ext4_es_delayed_clu(struct inode *inode, ext4_lblk_t lblk,
+  * @inode - file containing the range
+  * @lblk - logical block defining the start of range
+  * @len  - length of range in blocks
++ * @prealloc - preallocated pending entry
+  *
+  * Used after a newly allocated extent is added to the extents status tree.
+  * Requires that the extents in the range have either written or unwritten
+  * status.  Must be called while holding i_es_lock.
+  */
+-static void __revise_pending(struct inode *inode, ext4_lblk_t lblk,
+-			     ext4_lblk_t len)
++static int __revise_pending(struct inode *inode, ext4_lblk_t lblk,
++			    ext4_lblk_t len,
++			    struct pending_reservation **prealloc)
+ {
+ 	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
+ 	ext4_lblk_t end = lblk + len - 1;
+ 	ext4_lblk_t first, last;
+ 	bool f_del = false, l_del = false;
++	int ret = 0;
+ 
+ 	if (len == 0)
+-		return;
++		return 0;
+ 
+ 	/*
+ 	 * Two cases - block range within single cluster and block range
+@@ -2219,7 +2264,9 @@ static void __revise_pending(struct inode *inode, ext4_lblk_t lblk,
+ 			f_del = __es_scan_range(inode, &ext4_es_is_delonly,
+ 						first, lblk - 1);
+ 		if (f_del) {
+-			__insert_pending(inode, first);
++			ret = __insert_pending(inode, first, prealloc);
++			if (ret < 0)
++				goto out;
+ 		} else {
+ 			last = EXT4_LBLK_CMASK(sbi, end) +
+ 			       sbi->s_cluster_ratio - 1;
+@@ -2227,9 +2274,11 @@ static void __revise_pending(struct inode *inode, ext4_lblk_t lblk,
+ 				l_del = __es_scan_range(inode,
+ 							&ext4_es_is_delonly,
+ 							end + 1, last);
+-			if (l_del)
+-				__insert_pending(inode, last);
+-			else
++			if (l_del) {
++				ret = __insert_pending(inode, last, prealloc);
++				if (ret < 0)
++					goto out;
++			} else
+ 				__remove_pending(inode, last);
+ 		}
+ 	} else {
+@@ -2237,18 +2286,24 @@ static void __revise_pending(struct inode *inode, ext4_lblk_t lblk,
+ 		if (first != lblk)
+ 			f_del = __es_scan_range(inode, &ext4_es_is_delonly,
+ 						first, lblk - 1);
+-		if (f_del)
+-			__insert_pending(inode, first);
+-		else
++		if (f_del) {
++			ret = __insert_pending(inode, first, prealloc);
++			if (ret < 0)
++				goto out;
++		} else
+ 			__remove_pending(inode, first);
+ 
+ 		last = EXT4_LBLK_CMASK(sbi, end) + sbi->s_cluster_ratio - 1;
+ 		if (last != end)
+ 			l_del = __es_scan_range(inode, &ext4_es_is_delonly,
+ 						end + 1, last);
+-		if (l_del)
+-			__insert_pending(inode, last);
+-		else
++		if (l_del) {
++			ret = __insert_pending(inode, last, prealloc);
++			if (ret < 0)
++				goto out;
++		} else
+ 			__remove_pending(inode, last);
+ 	}
++out:
++	return ret;
+ }
+diff --git a/fs/ext4/file.c b/fs/ext4/file.c
+index 73a4b711be025..a443580115896 100644
+--- a/fs/ext4/file.c
++++ b/fs/ext4/file.c
+@@ -306,80 +306,38 @@ out:
+ }
+ 
+ static ssize_t ext4_handle_inode_extension(struct inode *inode, loff_t offset,
+-					   ssize_t written, size_t count)
++					   ssize_t count)
+ {
+ 	handle_t *handle;
+-	bool truncate = false;
+-	u8 blkbits = inode->i_blkbits;
+-	ext4_lblk_t written_blk, end_blk;
+-	int ret;
+-
+-	/*
+-	 * Note that EXT4_I(inode)->i_disksize can get extended up to
+-	 * inode->i_size while the I/O was running due to writeback of delalloc
+-	 * blocks. But, the code in ext4_iomap_alloc() is careful to use
+-	 * zeroed/unwritten extents if this is possible; thus we won't leave
+-	 * uninitialized blocks in a file even if we didn't succeed in writing
+-	 * as much as we intended.
+-	 */
+-	WARN_ON_ONCE(i_size_read(inode) < EXT4_I(inode)->i_disksize);
+-	if (offset + count <= EXT4_I(inode)->i_disksize) {
+-		/*
+-		 * We need to ensure that the inode is removed from the orphan
+-		 * list if it has been added prematurely, due to writeback of
+-		 * delalloc blocks.
+-		 */
+-		if (!list_empty(&EXT4_I(inode)->i_orphan) && inode->i_nlink) {
+-			handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
+-
+-			if (IS_ERR(handle)) {
+-				ext4_orphan_del(NULL, inode);
+-				return PTR_ERR(handle);
+-			}
+-
+-			ext4_orphan_del(handle, inode);
+-			ext4_journal_stop(handle);
+-		}
+-
+-		return written;
+-	}
+-
+-	if (written < 0)
+-		goto truncate;
+ 
++	lockdep_assert_held_write(&inode->i_rwsem);
+ 	handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
+-	if (IS_ERR(handle)) {
+-		written = PTR_ERR(handle);
+-		goto truncate;
+-	}
++	if (IS_ERR(handle))
++		return PTR_ERR(handle);
+ 
+-	if (ext4_update_inode_size(inode, offset + written)) {
+-		ret = ext4_mark_inode_dirty(handle, inode);
++	if (ext4_update_inode_size(inode, offset + count)) {
++		int ret = ext4_mark_inode_dirty(handle, inode);
+ 		if (unlikely(ret)) {
+-			written = ret;
+ 			ext4_journal_stop(handle);
+-			goto truncate;
++			return ret;
+ 		}
+ 	}
+ 
+-	/*
+-	 * We may need to truncate allocated but not written blocks beyond EOF.
+-	 */
+-	written_blk = ALIGN(offset + written, 1 << blkbits);
+-	end_blk = ALIGN(offset + count, 1 << blkbits);
+-	if (written_blk < end_blk && ext4_can_truncate(inode))
+-		truncate = true;
+-
+-	/*
+-	 * Remove the inode from the orphan list if it has been extended and
+-	 * everything went OK.
+-	 */
+-	if (!truncate && inode->i_nlink)
++	if (inode->i_nlink)
+ 		ext4_orphan_del(handle, inode);
+ 	ext4_journal_stop(handle);
+ 
+-	if (truncate) {
+-truncate:
++	return count;
++}
++
++/*
++ * Clean up the inode after DIO or DAX extending write has completed and the
++ * inode size has been updated using ext4_handle_inode_extension().
++ */
++static void ext4_inode_extension_cleanup(struct inode *inode, ssize_t count)
++{
++	lockdep_assert_held_write(&inode->i_rwsem);
++	if (count < 0) {
+ 		ext4_truncate_failed_write(inode);
+ 		/*
+ 		 * If the truncate operation failed early, then the inode may
+@@ -388,9 +346,28 @@ truncate:
+ 		 */
+ 		if (inode->i_nlink)
+ 			ext4_orphan_del(NULL, inode);
++		return;
+ 	}
++	/*
++	 * If i_disksize got extended due to writeback of delalloc blocks while
++	 * the DIO was running we could fail to cleanup the orphan list in
++	 * ext4_handle_inode_extension(). Do it now.
++	 */
++	if (!list_empty(&EXT4_I(inode)->i_orphan) && inode->i_nlink) {
++		handle_t *handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
+ 
+-	return written;
++		if (IS_ERR(handle)) {
++			/*
++			 * The write has successfully completed. Not much to
++			 * do with the error here so just cleanup the orphan
++			 * list and hope for the best.
++			 */
++			ext4_orphan_del(NULL, inode);
++			return;
++		}
++		ext4_orphan_del(handle, inode);
++		ext4_journal_stop(handle);
++	}
+ }
+ 
+ static int ext4_dio_write_end_io(struct kiocb *iocb, ssize_t size,
+@@ -399,31 +376,22 @@ static int ext4_dio_write_end_io(struct kiocb *iocb, ssize_t size,
+ 	loff_t pos = iocb->ki_pos;
+ 	struct inode *inode = file_inode(iocb->ki_filp);
+ 
++	if (!error && size && flags & IOMAP_DIO_UNWRITTEN)
++		error = ext4_convert_unwritten_extents(NULL, inode, pos, size);
+ 	if (error)
+ 		return error;
+-
+-	if (size && flags & IOMAP_DIO_UNWRITTEN) {
+-		error = ext4_convert_unwritten_extents(NULL, inode, pos, size);
+-		if (error < 0)
+-			return error;
+-	}
+ 	/*
+-	 * If we are extending the file, we have to update i_size here before
+-	 * page cache gets invalidated in iomap_dio_rw(). Otherwise racing
+-	 * buffered reads could zero out too much from page cache pages. Update
+-	 * of on-disk size will happen later in ext4_dio_write_iter() where
+-	 * we have enough information to also perform orphan list handling etc.
+-	 * Note that we perform all extending writes synchronously under
+-	 * i_rwsem held exclusively so i_size update is safe here in that case.
+-	 * If the write was not extending, we cannot see pos > i_size here
+-	 * because operations reducing i_size like truncate wait for all
+-	 * outstanding DIO before updating i_size.
++	 * Note that EXT4_I(inode)->i_disksize can get extended up to
++	 * inode->i_size while the I/O was running due to writeback of delalloc
++	 * blocks. But the code in ext4_iomap_alloc() is careful to use
++	 * zeroed/unwritten extents if this is possible; thus we won't leave
++	 * uninitialized blocks in a file even if we didn't succeed in writing
++	 * as much as we intended.
+ 	 */
+-	pos += size;
+-	if (pos > i_size_read(inode))
+-		i_size_write(inode, pos);
+-
+-	return 0;
++	WARN_ON_ONCE(i_size_read(inode) < READ_ONCE(EXT4_I(inode)->i_disksize));
++	if (pos + size <= READ_ONCE(EXT4_I(inode)->i_disksize))
++		return size;
++	return ext4_handle_inode_extension(inode, pos, size);
+ }
+ 
+ static const struct iomap_dio_ops ext4_dio_write_ops = {
+@@ -569,18 +537,20 @@ static ssize_t ext4_dio_write_iter(struct kiocb *iocb, struct iov_iter *from)
+ 		return ext4_buffered_write_iter(iocb, from);
+ 	}
+ 
++	/*
++	 * Prevent inline data from being created since we are going to allocate
++	 * blocks for DIO. We know the inode does not currently have inline data
++	 * because ext4_should_use_dio() checked for it, but we have to clear
++	 * the state flag before the write checks because a lock cycle could
++	 * introduce races with other writers.
++	 */
++	ext4_clear_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA);
++
+ 	ret = ext4_dio_write_checks(iocb, from, &ilock_shared, &extend,
+ 				    &unwritten, &dio_flags);
+ 	if (ret <= 0)
+ 		return ret;
+ 
+-	/*
+-	 * Make sure inline data cannot be created anymore since we are going
+-	 * to allocate blocks for DIO. We know the inode does not have any
+-	 * inline data now because ext4_dio_supported() checked for that.
+-	 */
+-	ext4_clear_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA);
+-
+ 	offset = iocb->ki_pos;
+ 	count = ret;
+ 
+@@ -606,9 +576,16 @@ static ssize_t ext4_dio_write_iter(struct kiocb *iocb, struct iov_iter *from)
+ 			   dio_flags, NULL, 0);
+ 	if (ret == -ENOTBLK)
+ 		ret = 0;
+-
+-	if (extend)
+-		ret = ext4_handle_inode_extension(inode, offset, ret, count);
++	if (extend) {
++		/*
++		 * We always perform extending DIO write synchronously so by
++		 * now the IO is completed and ext4_handle_inode_extension()
++		 * was called. Cleanup the inode in case of error or race with
++		 * writeback of delalloc blocks.
++		 */
++		WARN_ON_ONCE(ret == -EIOCBQUEUED);
++		ext4_inode_extension_cleanup(inode, ret);
++	}
+ 
+ out:
+ 	if (ilock_shared)
+@@ -689,8 +666,10 @@ ext4_dax_write_iter(struct kiocb *iocb, struct iov_iter *from)
+ 
+ 	ret = dax_iomap_rw(iocb, from, &ext4_iomap_ops);
+ 
+-	if (extend)
+-		ret = ext4_handle_inode_extension(inode, offset, ret, count);
++	if (extend) {
++		ret = ext4_handle_inode_extension(inode, offset, ret);
++		ext4_inode_extension_cleanup(inode, ret);
++	}
+ out:
+ 	inode_unlock(inode);
+ 	if (ret > 0)
+diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
+index 43775a6ca5054..28a92de978f5d 100644
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -789,10 +789,22 @@ int ext4_get_block(struct inode *inode, sector_t iblock,
+ int ext4_get_block_unwritten(struct inode *inode, sector_t iblock,
+ 			     struct buffer_head *bh_result, int create)
+ {
++	int ret = 0;
++
+ 	ext4_debug("ext4_get_block_unwritten: inode %lu, create flag %d\n",
+ 		   inode->i_ino, create);
+-	return _ext4_get_block(inode, iblock, bh_result,
++	ret = _ext4_get_block(inode, iblock, bh_result,
+ 			       EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT);
++
++	/*
++	 * If the buffer is marked unwritten, mark it as new to make sure it is
++	 * zeroed out correctly in case of partial writes. Otherwise, there is
++	 * a chance of stale data getting exposed.
++	 */
++	if (ret == 0 && buffer_unwritten(bh_result))
++		set_buffer_new(bh_result);
++
++	return ret;
+ }
+ 
+ /* Maximum number of blocks we map for direct IO at once. */
+diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
+index 0361c20910def..667381180b261 100644
+--- a/fs/ext4/resize.c
++++ b/fs/ext4/resize.c
+@@ -560,13 +560,8 @@ static int setup_new_flex_group_blocks(struct super_block *sb,
+ 		if (meta_bg == 0 && !ext4_bg_has_super(sb, group))
+ 			goto handle_itb;
+ 
+-		if (meta_bg == 1) {
+-			ext4_group_t first_group;
+-			first_group = ext4_meta_bg_first_group(sb, group);
+-			if (first_group != group + 1 &&
+-			    first_group != group + EXT4_DESC_PER_BLOCK(sb) - 1)
+-				goto handle_itb;
+-		}
++		if (meta_bg == 1)
++			goto handle_itb;
+ 
+ 		block = start + ext4_bg_has_super(sb, group);
+ 		/* Copy all of the GDT blocks into the backup in this group */
+@@ -1191,8 +1186,10 @@ static void update_backups(struct super_block *sb, sector_t blk_off, char *data,
+ 			   ext4_group_first_block_no(sb, group));
+ 		BUFFER_TRACE(bh, "get_write_access");
+ 		if ((err = ext4_journal_get_write_access(handle, sb, bh,
+-							 EXT4_JTR_NONE)))
++							 EXT4_JTR_NONE))) {
++			brelse(bh);
+ 			break;
++		}
+ 		lock_buffer(bh);
+ 		memcpy(bh->b_data, data, size);
+ 		if (rest)
+@@ -1601,6 +1598,8 @@ exit_journal:
+ 		int gdb_num_end = ((group + flex_gd->count - 1) /
+ 				   EXT4_DESC_PER_BLOCK(sb));
+ 		int meta_bg = ext4_has_feature_meta_bg(sb);
++		sector_t padding_blocks = meta_bg ? 0 : sbi->s_sbh->b_blocknr -
++					 ext4_group_first_block_no(sb, 0);
+ 		sector_t old_gdb = 0;
+ 
+ 		update_backups(sb, ext4_group_first_block_no(sb, 0),
+@@ -1612,8 +1611,8 @@ exit_journal:
+ 						     gdb_num);
+ 			if (old_gdb == gdb_bh->b_blocknr)
+ 				continue;
+-			update_backups(sb, gdb_bh->b_blocknr, gdb_bh->b_data,
+-				       gdb_bh->b_size, meta_bg);
++			update_backups(sb, gdb_bh->b_blocknr - padding_blocks,
++				       gdb_bh->b_data, gdb_bh->b_size, meta_bg);
+ 			old_gdb = gdb_bh->b_blocknr;
+ 		}
+ 	}
+@@ -1980,9 +1979,7 @@ static int ext4_convert_meta_bg(struct super_block *sb, struct inode *inode)
+ 
+ errout:
+ 	ret = ext4_journal_stop(handle);
+-	if (!err)
+-		err = ret;
+-	return ret;
++	return err ? err : ret;
+ 
+ invalid_resize_inode:
+ 	ext4_error(sb, "corrupted/inconsistent resize inode");
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index c94ebf704616e..e08fa12c0cd6a 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -6425,6 +6425,7 @@ static int __ext4_remount(struct fs_context *fc, struct super_block *sb)
+ 	struct ext4_mount_options old_opts;
+ 	ext4_group_t g;
+ 	int err = 0;
++	int alloc_ctx;
+ #ifdef CONFIG_QUOTA
+ 	int enable_quota = 0;
+ 	int i, j;
+@@ -6465,7 +6466,16 @@ static int __ext4_remount(struct fs_context *fc, struct super_block *sb)
+ 
+ 	}
+ 
++	/*
++	 * Changing the DIOREAD_NOLOCK or DELALLOC mount options may cause
++	 * two calls to ext4_should_dioread_nolock() to return inconsistent
++	 * values, triggering WARN_ON in ext4_add_complete_io(). we grab
++	 * here s_writepages_rwsem to avoid race between writepages ops and
++	 * remount.
++	 */
++	alloc_ctx = ext4_writepages_down_write(sb);
+ 	ext4_apply_options(fc, sb);
++	ext4_writepages_up_write(sb, alloc_ctx);
+ 
+ 	if ((old_opts.s_mount_opt & EXT4_MOUNT_JOURNAL_CHECKSUM) ^
+ 	    test_opt(sb, JOURNAL_CHECKSUM)) {
+@@ -6683,6 +6693,8 @@ restore_opts:
+ 	if ((sb->s_flags & SB_RDONLY) && !(old_sb_flags & SB_RDONLY) &&
+ 	    sb_any_quota_suspended(sb))
+ 		dquot_resume(sb, -1);
++
++	alloc_ctx = ext4_writepages_down_write(sb);
+ 	sb->s_flags = old_sb_flags;
+ 	sbi->s_mount_opt = old_opts.s_mount_opt;
+ 	sbi->s_mount_opt2 = old_opts.s_mount_opt2;
+@@ -6691,6 +6703,8 @@ restore_opts:
+ 	sbi->s_commit_interval = old_opts.s_commit_interval;
+ 	sbi->s_min_batch_time = old_opts.s_min_batch_time;
+ 	sbi->s_max_batch_time = old_opts.s_max_batch_time;
++	ext4_writepages_up_write(sb, alloc_ctx);
++
+ 	if (!test_opt(sb, BLOCK_VALIDITY) && sbi->s_system_blks)
+ 		ext4_release_system_zone(sb);
+ #ifdef CONFIG_QUOTA
+diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
+index 236d890f560b0..4d1d41143d5f8 100644
+--- a/fs/f2fs/compress.c
++++ b/fs/f2fs/compress.c
+@@ -1988,7 +1988,7 @@ void f2fs_destroy_compress_inode(struct f2fs_sb_info *sbi)
+ int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi)
+ {
+ 	dev_t dev = sbi->sb->s_bdev->bd_dev;
+-	char slab_name[32];
++	char slab_name[35];
+ 
+ 	if (!f2fs_sb_has_compression(sbi))
+ 		return 0;
+diff --git a/fs/f2fs/extent_cache.c b/fs/f2fs/extent_cache.c
+index 0e2d49140c07f..ad8dfac73bd44 100644
+--- a/fs/f2fs/extent_cache.c
++++ b/fs/f2fs/extent_cache.c
+@@ -74,40 +74,14 @@ static void __set_extent_info(struct extent_info *ei,
+ 	}
+ }
+ 
+-static bool __may_read_extent_tree(struct inode *inode)
+-{
+-	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+-
+-	if (!test_opt(sbi, READ_EXTENT_CACHE))
+-		return false;
+-	if (is_inode_flag_set(inode, FI_NO_EXTENT))
+-		return false;
+-	if (is_inode_flag_set(inode, FI_COMPRESSED_FILE) &&
+-			 !f2fs_sb_has_readonly(sbi))
+-		return false;
+-	return S_ISREG(inode->i_mode);
+-}
+-
+-static bool __may_age_extent_tree(struct inode *inode)
+-{
+-	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+-
+-	if (!test_opt(sbi, AGE_EXTENT_CACHE))
+-		return false;
+-	if (is_inode_flag_set(inode, FI_COMPRESSED_FILE))
+-		return false;
+-	if (file_is_cold(inode))
+-		return false;
+-
+-	return S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode);
+-}
+-
+ static bool __init_may_extent_tree(struct inode *inode, enum extent_type type)
+ {
+ 	if (type == EX_READ)
+-		return __may_read_extent_tree(inode);
+-	else if (type == EX_BLOCK_AGE)
+-		return __may_age_extent_tree(inode);
++		return test_opt(F2FS_I_SB(inode), READ_EXTENT_CACHE) &&
++			S_ISREG(inode->i_mode);
++	if (type == EX_BLOCK_AGE)
++		return test_opt(F2FS_I_SB(inode), AGE_EXTENT_CACHE) &&
++			(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode));
+ 	return false;
+ }
+ 
+@@ -120,7 +94,22 @@ static bool __may_extent_tree(struct inode *inode, enum extent_type type)
+ 	if (list_empty(&F2FS_I_SB(inode)->s_list))
+ 		return false;
+ 
+-	return __init_may_extent_tree(inode, type);
++	if (!__init_may_extent_tree(inode, type))
++		return false;
++
++	if (type == EX_READ) {
++		if (is_inode_flag_set(inode, FI_NO_EXTENT))
++			return false;
++		if (is_inode_flag_set(inode, FI_COMPRESSED_FILE) &&
++				 !f2fs_sb_has_readonly(F2FS_I_SB(inode)))
++			return false;
++	} else if (type == EX_BLOCK_AGE) {
++		if (is_inode_flag_set(inode, FI_COMPRESSED_FILE))
++			return false;
++		if (file_is_cold(inode))
++			return false;
++	}
++	return true;
+ }
+ 
+ static void __try_update_largest_extent(struct extent_tree *et,
+diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
+index e53a429bd4c4c..6f08aaf0ea340 100644
+--- a/fs/f2fs/file.c
++++ b/fs/f2fs/file.c
+@@ -4006,6 +4006,15 @@ static int f2fs_ioc_set_compress_option(struct file *filp, unsigned long arg)
+ 	F2FS_I(inode)->i_compress_algorithm = option.algorithm;
+ 	F2FS_I(inode)->i_log_cluster_size = option.log_cluster_size;
+ 	F2FS_I(inode)->i_cluster_size = BIT(option.log_cluster_size);
++	/* Set default level */
++	if (F2FS_I(inode)->i_compress_algorithm == COMPRESS_ZSTD)
++		F2FS_I(inode)->i_compress_level = F2FS_ZSTD_DEFAULT_CLEVEL;
++	else
++		F2FS_I(inode)->i_compress_level = 0;
++	/* Adjust mount option level */
++	if (option.algorithm == F2FS_OPTION(sbi).compress_algorithm &&
++	    F2FS_OPTION(sbi).compress_level)
++		F2FS_I(inode)->i_compress_level = F2FS_OPTION(sbi).compress_level;
+ 	f2fs_mark_inode_dirty_sync(inode, true);
+ 
+ 	if (!f2fs_is_compress_backend_ready(inode))
+diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
+index ee2e1dd64f256..8b30f11f37b46 100644
+--- a/fs/f2fs/node.c
++++ b/fs/f2fs/node.c
+@@ -1467,7 +1467,8 @@ page_hit:
+ 			  ofs_of_node(page), cpver_of_node(page),
+ 			  next_blkaddr_of_node(page));
+ 	set_sbi_flag(sbi, SBI_NEED_FSCK);
+-	err = -EINVAL;
++	f2fs_handle_error(sbi, ERROR_INCONSISTENT_FOOTER);
++	err = -EFSCORRUPTED;
+ out_err:
+ 	ClearPageUptodate(page);
+ out_put_err:
+@@ -2389,7 +2390,7 @@ static int scan_nat_page(struct f2fs_sb_info *sbi,
+ 		blk_addr = le32_to_cpu(nat_blk->entries[i].block_addr);
+ 
+ 		if (blk_addr == NEW_ADDR)
+-			return -EINVAL;
++			return -EFSCORRUPTED;
+ 
+ 		if (blk_addr == NULL_ADDR) {
+ 			add_free_nid(sbi, start_nid, true, true);
+@@ -2504,7 +2505,14 @@ static int __f2fs_build_free_nids(struct f2fs_sb_info *sbi,
+ 
+ 			if (ret) {
+ 				f2fs_up_read(&nm_i->nat_tree_lock);
+-				f2fs_err(sbi, "NAT is corrupt, run fsck to fix it");
++
++				if (ret == -EFSCORRUPTED) {
++					f2fs_err(sbi, "NAT is corrupt, run fsck to fix it");
++					set_sbi_flag(sbi, SBI_NEED_FSCK);
++					f2fs_handle_error(sbi,
++						ERROR_INCONSISTENT_NAT);
++				}
++
+ 				return ret;
+ 			}
+ 		}
+@@ -2743,7 +2751,9 @@ recover_xnid:
+ 	f2fs_update_inode_page(inode);
+ 
+ 	/* 3: update and set xattr node page dirty */
+-	memcpy(F2FS_NODE(xpage), F2FS_NODE(page), VALID_XATTR_BLOCK_SIZE);
++	if (page)
++		memcpy(F2FS_NODE(xpage), F2FS_NODE(page),
++				VALID_XATTR_BLOCK_SIZE);
+ 
+ 	set_page_dirty(xpage);
+ 	f2fs_put_page(xpage, 1);
+diff --git a/fs/f2fs/xattr.c b/fs/f2fs/xattr.c
+index 476b186b90a6c..26217fa578727 100644
+--- a/fs/f2fs/xattr.c
++++ b/fs/f2fs/xattr.c
+@@ -364,10 +364,10 @@ static int lookup_all_xattrs(struct inode *inode, struct page *ipage,
+ 
+ 	*xe = __find_xattr(cur_addr, last_txattr_addr, NULL, index, len, name);
+ 	if (!*xe) {
+-		f2fs_err(F2FS_I_SB(inode), "inode (%lu) has corrupted xattr",
++		f2fs_err(F2FS_I_SB(inode), "lookup inode (%lu) has corrupted xattr",
+ 								inode->i_ino);
+ 		set_sbi_flag(F2FS_I_SB(inode), SBI_NEED_FSCK);
+-		err = -EFSCORRUPTED;
++		err = -ENODATA;
+ 		f2fs_handle_error(F2FS_I_SB(inode),
+ 					ERROR_CORRUPTED_XATTR);
+ 		goto out;
+@@ -584,13 +584,12 @@ ssize_t f2fs_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size)
+ 
+ 		if ((void *)(entry) + sizeof(__u32) > last_base_addr ||
+ 			(void *)XATTR_NEXT_ENTRY(entry) > last_base_addr) {
+-			f2fs_err(F2FS_I_SB(inode), "inode (%lu) has corrupted xattr",
++			f2fs_err(F2FS_I_SB(inode), "list inode (%lu) has corrupted xattr",
+ 						inode->i_ino);
+ 			set_sbi_flag(F2FS_I_SB(inode), SBI_NEED_FSCK);
+-			error = -EFSCORRUPTED;
+ 			f2fs_handle_error(F2FS_I_SB(inode),
+ 						ERROR_CORRUPTED_XATTR);
+-			goto cleanup;
++			break;
+ 		}
+ 
+ 		if (!prefix)
+@@ -650,7 +649,7 @@ static int __f2fs_setxattr(struct inode *inode, int index,
+ 
+ 	if (size > MAX_VALUE_LEN(inode))
+ 		return -E2BIG;
+-
++retry:
+ 	error = read_all_xattrs(inode, ipage, &base_addr);
+ 	if (error)
+ 		return error;
+@@ -660,7 +659,14 @@ static int __f2fs_setxattr(struct inode *inode, int index,
+ 	/* find entry with wanted name. */
+ 	here = __find_xattr(base_addr, last_base_addr, NULL, index, len, name);
+ 	if (!here) {
+-		f2fs_err(F2FS_I_SB(inode), "inode (%lu) has corrupted xattr",
++		if (!F2FS_I(inode)->i_xattr_nid) {
++			f2fs_notice(F2FS_I_SB(inode),
++				"recover xattr in inode (%lu)", inode->i_ino);
++			f2fs_recover_xattr_data(inode, NULL);
++			kfree(base_addr);
++			goto retry;
++		}
++		f2fs_err(F2FS_I_SB(inode), "set inode (%lu) has corrupted xattr",
+ 								inode->i_ino);
+ 		set_sbi_flag(F2FS_I_SB(inode), SBI_NEED_FSCK);
+ 		error = -EFSCORRUPTED;
+diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
+index 17c994a0c0d09..28c3711628805 100644
+--- a/fs/gfs2/inode.c
++++ b/fs/gfs2/inode.c
+@@ -1860,16 +1860,24 @@ out:
+ int gfs2_permission(struct mnt_idmap *idmap, struct inode *inode,
+ 		    int mask)
+ {
++	int may_not_block = mask & MAY_NOT_BLOCK;
+ 	struct gfs2_inode *ip;
+ 	struct gfs2_holder i_gh;
++	struct gfs2_glock *gl;
+ 	int error;
+ 
+ 	gfs2_holder_mark_uninitialized(&i_gh);
+ 	ip = GFS2_I(inode);
+-	if (gfs2_glock_is_locked_by_me(ip->i_gl) == NULL) {
+-		if (mask & MAY_NOT_BLOCK)
++	gl = rcu_dereference_check(ip->i_gl, !may_not_block);
++	if (unlikely(!gl)) {
++		/* inode is getting torn down, must be RCU mode */
++		WARN_ON_ONCE(!may_not_block);
++		return -ECHILD;
++        }
++	if (gfs2_glock_is_locked_by_me(gl) == NULL) {
++		if (may_not_block)
+ 			return -ECHILD;
+-		error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
++		error = gfs2_glock_nq_init(gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
+ 		if (error)
+ 			return error;
+ 	}
+diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
+index 8a27957dbfeed..825d7c8afa3a7 100644
+--- a/fs/gfs2/ops_fstype.c
++++ b/fs/gfs2/ops_fstype.c
+@@ -1261,10 +1261,8 @@ static int gfs2_fill_super(struct super_block *sb, struct fs_context *fc)
+ 
+ 	if (!sb_rdonly(sb)) {
+ 		error = init_threads(sdp);
+-		if (error) {
+-			gfs2_withdraw_delayed(sdp);
++		if (error)
+ 			goto fail_per_node;
+-		}
+ 	}
+ 
+ 	error = gfs2_freeze_lock_shared(sdp);
+diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c
+index 704192b736050..ccecb79eeaf8e 100644
+--- a/fs/gfs2/quota.c
++++ b/fs/gfs2/quota.c
+@@ -441,6 +441,17 @@ static int qd_check_sync(struct gfs2_sbd *sdp, struct gfs2_quota_data *qd,
+ 	    (sync_gen && (qd->qd_sync_gen >= *sync_gen)))
+ 		return 0;
+ 
++	/*
++	 * If qd_change is 0 it means a pending quota change was negated.
++	 * We should not sync it, but we still have a qd reference and slot
++	 * reference taken by gfs2_quota_change -> do_qc that need to be put.
++	 */
++	if (!qd->qd_change && test_and_clear_bit(QDF_CHANGE, &qd->qd_flags)) {
++		slot_put(qd);
++		qd_put(qd);
++		return 0;
++	}
++
+ 	if (!lockref_get_not_dead(&qd->qd_lockref))
+ 		return 0;
+ 
+diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
+index 9f4d5d6549ee6..f98ddb9d19a21 100644
+--- a/fs/gfs2/super.c
++++ b/fs/gfs2/super.c
+@@ -1558,7 +1558,7 @@ out:
+ 		wait_on_bit_io(&ip->i_flags, GIF_GLOP_PENDING, TASK_UNINTERRUPTIBLE);
+ 		gfs2_glock_add_to_lru(ip->i_gl);
+ 		gfs2_glock_put_eventually(ip->i_gl);
+-		ip->i_gl = NULL;
++		rcu_assign_pointer(ip->i_gl, NULL);
+ 	}
+ }
+ 
+diff --git a/fs/inode.c b/fs/inode.c
+index 67611a360031b..f11b4173d3f41 100644
+--- a/fs/inode.c
++++ b/fs/inode.c
+@@ -2498,6 +2498,22 @@ struct timespec64 current_time(struct inode *inode)
+ }
+ EXPORT_SYMBOL(current_time);
+ 
++/**
++ * inode_set_ctime_current - set the ctime to current_time
++ * @inode: inode
++ *
++ * Set the inode->i_ctime to the current value for the inode. Returns
++ * the current value that was assigned to i_ctime.
++ */
++struct timespec64 inode_set_ctime_current(struct inode *inode)
++{
++	struct timespec64 now = current_time(inode);
++
++	inode_set_ctime(inode, now.tv_sec, now.tv_nsec);
++	return now;
++}
++EXPORT_SYMBOL(inode_set_ctime_current);
++
+ /**
+  * in_group_or_capable - check whether caller is CAP_FSETID privileged
+  * @idmap:	idmap of the mount @inode was found from
+diff --git a/fs/jbd2/recovery.c b/fs/jbd2/recovery.c
+index c269a7d29a465..5b771a3d8d9ae 100644
+--- a/fs/jbd2/recovery.c
++++ b/fs/jbd2/recovery.c
+@@ -289,6 +289,8 @@ int jbd2_journal_recover(journal_t *journal)
+ 	journal_superblock_t *	sb;
+ 
+ 	struct recovery_info	info;
++	errseq_t		wb_err;
++	struct address_space	*mapping;
+ 
+ 	memset(&info, 0, sizeof(info));
+ 	sb = journal->j_superblock;
+@@ -306,6 +308,9 @@ int jbd2_journal_recover(journal_t *journal)
+ 		return 0;
+ 	}
+ 
++	wb_err = 0;
++	mapping = journal->j_fs_dev->bd_inode->i_mapping;
++	errseq_check_and_advance(&mapping->wb_err, &wb_err);
+ 	err = do_one_pass(journal, &info, PASS_SCAN);
+ 	if (!err)
+ 		err = do_one_pass(journal, &info, PASS_REVOKE);
+@@ -327,6 +332,9 @@ int jbd2_journal_recover(journal_t *journal)
+ 
+ 	jbd2_journal_clear_revoke(journal);
+ 	err2 = sync_blockdev(journal->j_fs_dev);
++	if (!err)
++		err = err2;
++	err2 = errseq_check_and_advance(&mapping->wb_err, &wb_err);
+ 	if (!err)
+ 		err = err2;
+ 	/* Make sure all replayed data is on permanent storage */
+diff --git a/fs/jfs/jfs_dmap.c b/fs/jfs/jfs_dmap.c
+index 88afd108c2dd2..11c77757ead9e 100644
+--- a/fs/jfs/jfs_dmap.c
++++ b/fs/jfs/jfs_dmap.c
+@@ -87,7 +87,7 @@ static int dbAllocCtl(struct bmap * bmp, s64 nblocks, int l2nb, s64 blkno,
+ static int dbExtend(struct inode *ip, s64 blkno, s64 nblocks, s64 addnblocks);
+ static int dbFindBits(u32 word, int l2nb);
+ static int dbFindCtl(struct bmap * bmp, int l2nb, int level, s64 * blkno);
+-static int dbFindLeaf(dmtree_t * tp, int l2nb, int *leafidx);
++static int dbFindLeaf(dmtree_t *tp, int l2nb, int *leafidx, bool is_ctl);
+ static int dbFreeBits(struct bmap * bmp, struct dmap * dp, s64 blkno,
+ 		      int nblocks);
+ static int dbFreeDmap(struct bmap * bmp, struct dmap * dp, s64 blkno,
+@@ -180,7 +180,8 @@ int dbMount(struct inode *ipbmap)
+ 	bmp->db_nfree = le64_to_cpu(dbmp_le->dn_nfree);
+ 
+ 	bmp->db_l2nbperpage = le32_to_cpu(dbmp_le->dn_l2nbperpage);
+-	if (bmp->db_l2nbperpage > L2PSIZE - L2MINBLOCKSIZE) {
++	if (bmp->db_l2nbperpage > L2PSIZE - L2MINBLOCKSIZE ||
++		bmp->db_l2nbperpage < 0) {
+ 		err = -EINVAL;
+ 		goto err_release_metapage;
+ 	}
+@@ -194,6 +195,12 @@ int dbMount(struct inode *ipbmap)
+ 	bmp->db_maxlevel = le32_to_cpu(dbmp_le->dn_maxlevel);
+ 	bmp->db_maxag = le32_to_cpu(dbmp_le->dn_maxag);
+ 	bmp->db_agpref = le32_to_cpu(dbmp_le->dn_agpref);
++	if (bmp->db_maxag >= MAXAG || bmp->db_maxag < 0 ||
++		bmp->db_agpref >= MAXAG || bmp->db_agpref < 0) {
++		err = -EINVAL;
++		goto err_release_metapage;
++	}
++
+ 	bmp->db_aglevel = le32_to_cpu(dbmp_le->dn_aglevel);
+ 	bmp->db_agheight = le32_to_cpu(dbmp_le->dn_agheight);
+ 	bmp->db_agwidth = le32_to_cpu(dbmp_le->dn_agwidth);
+@@ -1710,7 +1717,7 @@ static int dbFindCtl(struct bmap * bmp, int l2nb, int level, s64 * blkno)
+ 		 * dbFindLeaf() returns the index of the leaf at which
+ 		 * free space was found.
+ 		 */
+-		rc = dbFindLeaf((dmtree_t *) dcp, l2nb, &leafidx);
++		rc = dbFindLeaf((dmtree_t *) dcp, l2nb, &leafidx, true);
+ 
+ 		/* release the buffer.
+ 		 */
+@@ -1957,7 +1964,7 @@ dbAllocDmapLev(struct bmap * bmp,
+ 	 * free space.  if sufficient free space is found, dbFindLeaf()
+ 	 * returns the index of the leaf at which free space was found.
+ 	 */
+-	if (dbFindLeaf((dmtree_t *) & dp->tree, l2nb, &leafidx))
++	if (dbFindLeaf((dmtree_t *) &dp->tree, l2nb, &leafidx, false))
+ 		return -ENOSPC;
+ 
+ 	if (leafidx < 0)
+@@ -2921,14 +2928,18 @@ static void dbAdjTree(dmtree_t * tp, int leafno, int newval)
+  *	leafidx	- return pointer to be set to the index of the leaf
+  *		  describing at least l2nb free blocks if sufficient
+  *		  free blocks are found.
++ *	is_ctl	- determines if the tree is of type ctl
+  *
+  * RETURN VALUES:
+  *	0	- success
+  *	-ENOSPC	- insufficient free blocks.
+  */
+-static int dbFindLeaf(dmtree_t * tp, int l2nb, int *leafidx)
++static int dbFindLeaf(dmtree_t *tp, int l2nb, int *leafidx, bool is_ctl)
+ {
+ 	int ti, n = 0, k, x = 0;
++	int max_size;
++
++	max_size = is_ctl ? CTLTREESIZE : TREESIZE;
+ 
+ 	/* first check the root of the tree to see if there is
+ 	 * sufficient free space.
+@@ -2949,6 +2960,8 @@ static int dbFindLeaf(dmtree_t * tp, int l2nb, int *leafidx)
+ 			/* sufficient free space found.  move to the next
+ 			 * level (or quit if this is the last level).
+ 			 */
++			if (x + n > max_size)
++				return -ENOSPC;
+ 			if (l2nb <= tp->dmt_stree[x + n])
+ 				break;
+ 		}
+diff --git a/fs/jfs/jfs_imap.c b/fs/jfs/jfs_imap.c
+index 6fb28572cb2c6..34f1358264e23 100644
+--- a/fs/jfs/jfs_imap.c
++++ b/fs/jfs/jfs_imap.c
+@@ -1320,7 +1320,7 @@ diInitInode(struct inode *ip, int iagno, int ino, int extno, struct iag * iagp)
+ int diAlloc(struct inode *pip, bool dir, struct inode *ip)
+ {
+ 	int rc, ino, iagno, addext, extno, bitno, sword;
+-	int nwords, rem, i, agno;
++	int nwords, rem, i, agno, dn_numag;
+ 	u32 mask, inosmap, extsmap;
+ 	struct inode *ipimap;
+ 	struct metapage *mp;
+@@ -1356,6 +1356,9 @@ int diAlloc(struct inode *pip, bool dir, struct inode *ip)
+ 
+ 	/* get the ag number of this iag */
+ 	agno = BLKTOAG(JFS_IP(pip)->agstart, JFS_SBI(pip->i_sb));
++	dn_numag = JFS_SBI(pip->i_sb)->bmap->db_numag;
++	if (agno < 0 || agno > dn_numag)
++		return -EIO;
+ 
+ 	if (atomic_read(&JFS_SBI(pip->i_sb)->bmap->db_active[agno])) {
+ 		/*
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index 5f088e3eeca1d..8374fa230ba5a 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -5622,7 +5622,7 @@ static void nfs4_proc_write_setup(struct nfs_pgio_header *hdr,
+ 
+ 	msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_WRITE];
+ 	nfs4_init_sequence(&hdr->args.seq_args, &hdr->res.seq_res, 0, 0);
+-	nfs4_state_protect_write(server->nfs_client, clnt, msg, hdr);
++	nfs4_state_protect_write(hdr->ds_clp ? hdr->ds_clp : server->nfs_client, clnt, msg, hdr);
+ }
+ 
+ static void nfs4_proc_commit_rpc_prepare(struct rpc_task *task, struct nfs_commit_data *data)
+@@ -5663,7 +5663,8 @@ static void nfs4_proc_commit_setup(struct nfs_commit_data *data, struct rpc_mess
+ 	data->res.server = server;
+ 	msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COMMIT];
+ 	nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1, 0);
+-	nfs4_state_protect(server->nfs_client, NFS_SP4_MACH_CRED_COMMIT, clnt, msg);
++	nfs4_state_protect(data->ds_clp ? data->ds_clp : server->nfs_client,
++			NFS_SP4_MACH_CRED_COMMIT, clnt, msg);
+ }
+ 
+ static int _nfs4_proc_commit(struct file *dst, struct nfs_commitargs *args,
+@@ -8934,6 +8935,7 @@ void nfs4_test_session_trunk(struct rpc_clnt *clnt, struct rpc_xprt *xprt,
+ 
+ 	sp4_how = (adata->clp->cl_sp4_flags == 0 ? SP4_NONE : SP4_MACH_CRED);
+ 
++try_again:
+ 	/* Test connection for session trunking. Async exchange_id call */
+ 	task = nfs4_run_exchange_id(adata->clp, adata->cred, sp4_how, xprt);
+ 	if (IS_ERR(task))
+@@ -8946,11 +8948,15 @@ void nfs4_test_session_trunk(struct rpc_clnt *clnt, struct rpc_xprt *xprt,
+ 
+ 	if (status == 0)
+ 		rpc_clnt_xprt_switch_add_xprt(clnt, xprt);
+-	else if (rpc_clnt_xprt_switch_has_addr(clnt,
++	else if (status != -NFS4ERR_DELAY && rpc_clnt_xprt_switch_has_addr(clnt,
+ 				(struct sockaddr *)&xprt->addr))
+ 		rpc_clnt_xprt_switch_remove_xprt(clnt, xprt);
+ 
+ 	rpc_put_task(task);
++	if (status == -NFS4ERR_DELAY) {
++		ssleep(1);
++		goto try_again;
++	}
+ }
+ EXPORT_SYMBOL_GPL(nfs4_test_session_trunk);
+ 
+diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
+index daf305daa7516..2a493cbaf453d 100644
+--- a/fs/nfsd/nfs4state.c
++++ b/fs/nfsd/nfs4state.c
+@@ -2785,7 +2785,7 @@ static int client_opens_release(struct inode *inode, struct file *file)
+ 
+ 	/* XXX: alternatively, we could get/drop in seq start/stop */
+ 	drop_client(clp);
+-	return 0;
++	return seq_release(inode, file);
+ }
+ 
+ static const struct file_operations client_states_fops = {
+diff --git a/fs/nfsd/nfscache.c b/fs/nfsd/nfscache.c
+index a8eda1c85829e..1ad4f30d5f855 100644
+--- a/fs/nfsd/nfscache.c
++++ b/fs/nfsd/nfscache.c
+@@ -582,24 +582,17 @@ void nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
+ 	return;
+ }
+ 
+-/*
+- * Copy cached reply to current reply buffer. Should always fit.
+- * FIXME as reply is in a page, we should just attach the page, and
+- * keep a refcount....
+- */
+ static int
+ nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *data)
+ {
+-	struct kvec	*vec = &rqstp->rq_res.head[0];
+-
+-	if (vec->iov_len + data->iov_len > PAGE_SIZE) {
+-		printk(KERN_WARNING "nfsd: cached reply too large (%zd).\n",
+-				data->iov_len);
+-		return 0;
+-	}
+-	memcpy((char*)vec->iov_base + vec->iov_len, data->iov_base, data->iov_len);
+-	vec->iov_len += data->iov_len;
+-	return 1;
++	__be32 *p;
++
++	p = xdr_reserve_space(&rqstp->rq_res_stream, data->iov_len);
++	if (unlikely(!p))
++		return false;
++	memcpy(p, data->iov_base, data->iov_len);
++	xdr_commit_encode(&rqstp->rq_res_stream);
++	return true;
+ }
+ 
+ /*
+diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
+index 80a70eaa30d90..1ef8c0d8871ed 100644
+--- a/fs/overlayfs/super.c
++++ b/fs/overlayfs/super.c
+@@ -1467,7 +1467,7 @@ int ovl_fill_super(struct super_block *sb, struct fs_context *fc)
+ 		ovl_trusted_xattr_handlers;
+ 	sb->s_fs_info = ofs;
+ 	sb->s_flags |= SB_POSIXACL;
+-	sb->s_iflags |= SB_I_SKIP_SYNC | SB_I_IMA_UNVERIFIABLE_SIGNATURE;
++	sb->s_iflags |= SB_I_SKIP_SYNC;
+ 
+ 	err = -ENOMEM;
+ 	root_dentry = ovl_get_root(sb, ctx->upper.dentry, oe);
+diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
+index 5ea42653126eb..800d34c3a3cca 100644
+--- a/fs/proc/proc_sysctl.c
++++ b/fs/proc/proc_sysctl.c
+@@ -1574,7 +1574,6 @@ static const struct sysctl_alias sysctl_aliases[] = {
+ 	{"hung_task_panic",			"kernel.hung_task_panic" },
+ 	{"numa_zonelist_order",			"vm.numa_zonelist_order" },
+ 	{"softlockup_all_cpu_backtrace",	"kernel.softlockup_all_cpu_backtrace" },
+-	{"softlockup_panic",			"kernel.softlockup_panic" },
+ 	{ }
+ };
+ 
+@@ -1590,6 +1589,13 @@ static const char *sysctl_find_alias(char *param)
+ 	return NULL;
+ }
+ 
++bool sysctl_is_alias(char *param)
++{
++	const char *alias = sysctl_find_alias(param);
++
++	return alias != NULL;
++}
++
+ /* Set sysctl value passed on kernel command line. */
+ static int process_sysctl_arg(char *param, char *val,
+ 			       const char *unused, void *arg)
+diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
+index bfa423ae16e3d..188984b0af66f 100644
+--- a/fs/quota/dquot.c
++++ b/fs/quota/dquot.c
+@@ -2403,6 +2403,20 @@ static int vfs_setup_quota_inode(struct inode *inode, int type)
+ 	if (sb_has_quota_loaded(sb, type))
+ 		return -EBUSY;
+ 
++	/*
++	 * Quota files should never be encrypted.  They should be thought of as
++	 * filesystem metadata, not user data.  New-style internal quota files
++	 * cannot be encrypted by users anyway, but old-style external quota
++	 * files could potentially be incorrectly created in an encrypted
++	 * directory, hence this explicit check.  Some reasons why encrypted
++	 * quota files don't work include: (1) some filesystems that support
++	 * encryption don't handle it in their quota_read and quota_write, and
++	 * (2) cleaning up encrypted quota files at unmount would need special
++	 * consideration, as quota files are cleaned up later than user files.
++	 */
++	if (IS_ENCRYPTED(inode))
++		return -EINVAL;
++
+ 	dqopt->files[type] = igrab(inode);
+ 	if (!dqopt->files[type])
+ 		return -EIO;
+diff --git a/fs/smb/client/cached_dir.c b/fs/smb/client/cached_dir.c
+index fe1bf5b6e0cb3..59f6b8e32cc97 100644
+--- a/fs/smb/client/cached_dir.c
++++ b/fs/smb/client/cached_dir.c
+@@ -32,7 +32,7 @@ static struct cached_fid *find_or_create_cached_dir(struct cached_fids *cfids,
+ 			 * fully cached or it may be in the process of
+ 			 * being deleted due to a lease break.
+ 			 */
+-			if (!cfid->has_lease) {
++			if (!cfid->time || !cfid->has_lease) {
+ 				spin_unlock(&cfids->cfid_list_lock);
+ 				return NULL;
+ 			}
+@@ -193,10 +193,20 @@ int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon,
+ 	npath = path_no_prefix(cifs_sb, path);
+ 	if (IS_ERR(npath)) {
+ 		rc = PTR_ERR(npath);
+-		kfree(utf16_path);
+-		return rc;
++		goto out;
+ 	}
+ 
++	if (!npath[0]) {
++		dentry = dget(cifs_sb->root);
++	} else {
++		dentry = path_to_dentry(cifs_sb, npath);
++		if (IS_ERR(dentry)) {
++			rc = -ENOENT;
++			goto out;
++		}
++	}
++	cfid->dentry = dentry;
++
+ 	/*
+ 	 * We do not hold the lock for the open because in case
+ 	 * SMB2_open needs to reconnect.
+@@ -249,6 +259,15 @@ int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon,
+ 
+ 	smb2_set_related(&rqst[1]);
+ 
++	/*
++	 * Set @cfid->has_lease to true before sending out compounded request so
++	 * its lease reference can be put in cached_dir_lease_break() due to a
++	 * potential lease break right after the request is sent or while @cfid
++	 * is still being cached.  Concurrent processes won't be to use it yet
++	 * due to @cfid->time being zero.
++	 */
++	cfid->has_lease = true;
++
+ 	rc = compound_send_recv(xid, ses, server,
+ 				flags, 2, rqst,
+ 				resp_buftype, rsp_iov);
+@@ -263,6 +282,8 @@ int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon,
+ 	cfid->tcon = tcon;
+ 	cfid->is_open = true;
+ 
++	spin_lock(&cfids->cfid_list_lock);
++
+ 	o_rsp = (struct smb2_create_rsp *)rsp_iov[0].iov_base;
+ 	oparms.fid->persistent_fid = o_rsp->PersistentFileId;
+ 	oparms.fid->volatile_fid = o_rsp->VolatileFileId;
+@@ -270,18 +291,25 @@ int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon,
+ 	oparms.fid->mid = le64_to_cpu(o_rsp->hdr.MessageId);
+ #endif /* CIFS_DEBUG2 */
+ 
+-	if (o_rsp->OplockLevel != SMB2_OPLOCK_LEVEL_LEASE)
++	rc = -EINVAL;
++	if (o_rsp->OplockLevel != SMB2_OPLOCK_LEVEL_LEASE) {
++		spin_unlock(&cfids->cfid_list_lock);
+ 		goto oshr_free;
++	}
+ 
+ 	smb2_parse_contexts(server, o_rsp,
+ 			    &oparms.fid->epoch,
+ 			    oparms.fid->lease_key, &oplock,
+ 			    NULL, NULL);
+-	if (!(oplock & SMB2_LEASE_READ_CACHING_HE))
++	if (!(oplock & SMB2_LEASE_READ_CACHING_HE)) {
++		spin_unlock(&cfids->cfid_list_lock);
+ 		goto oshr_free;
++	}
+ 	qi_rsp = (struct smb2_query_info_rsp *)rsp_iov[1].iov_base;
+-	if (le32_to_cpu(qi_rsp->OutputBufferLength) < sizeof(struct smb2_file_all_info))
++	if (le32_to_cpu(qi_rsp->OutputBufferLength) < sizeof(struct smb2_file_all_info)) {
++		spin_unlock(&cfids->cfid_list_lock);
+ 		goto oshr_free;
++	}
+ 	if (!smb2_validate_and_copy_iov(
+ 				le16_to_cpu(qi_rsp->OutputBufferOffset),
+ 				sizeof(struct smb2_file_all_info),
+@@ -289,37 +317,24 @@ int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon,
+ 				(char *)&cfid->file_all_info))
+ 		cfid->file_all_info_is_valid = true;
+ 
+-	if (!npath[0])
+-		dentry = dget(cifs_sb->root);
+-	else {
+-		dentry = path_to_dentry(cifs_sb, npath);
+-		if (IS_ERR(dentry)) {
+-			rc = -ENOENT;
+-			goto oshr_free;
+-		}
+-	}
+-	spin_lock(&cfids->cfid_list_lock);
+-	cfid->dentry = dentry;
+ 	cfid->time = jiffies;
+-	cfid->has_lease = true;
+ 	spin_unlock(&cfids->cfid_list_lock);
++	/* At this point the directory handle is fully cached */
++	rc = 0;
+ 
+ oshr_free:
+-	kfree(utf16_path);
+ 	SMB2_open_free(&rqst[0]);
+ 	SMB2_query_info_free(&rqst[1]);
+ 	free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
+ 	free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
+-	spin_lock(&cfids->cfid_list_lock);
+-	if (!cfid->has_lease) {
+-		if (rc) {
+-			if (cfid->on_list) {
+-				list_del(&cfid->entry);
+-				cfid->on_list = false;
+-				cfids->num_entries--;
+-			}
+-			rc = -ENOENT;
+-		} else {
++	if (rc) {
++		spin_lock(&cfids->cfid_list_lock);
++		if (cfid->on_list) {
++			list_del(&cfid->entry);
++			cfid->on_list = false;
++			cfids->num_entries--;
++		}
++		if (cfid->has_lease) {
+ 			/*
+ 			 * We are guaranteed to have two references at this
+ 			 * point. One for the caller and one for a potential
+@@ -327,25 +342,24 @@ oshr_free:
+ 			 * will be closed when the caller closes the cached
+ 			 * handle.
+ 			 */
++			cfid->has_lease = false;
+ 			spin_unlock(&cfids->cfid_list_lock);
+ 			kref_put(&cfid->refcount, smb2_close_cached_fid);
+ 			goto out;
+ 		}
++		spin_unlock(&cfids->cfid_list_lock);
+ 	}
+-	spin_unlock(&cfids->cfid_list_lock);
++out:
+ 	if (rc) {
+ 		if (cfid->is_open)
+ 			SMB2_close(0, cfid->tcon, cfid->fid.persistent_fid,
+ 				   cfid->fid.volatile_fid);
+ 		free_cached_dir(cfid);
+-		cfid = NULL;
+-	}
+-out:
+-	if (rc == 0) {
++	} else {
+ 		*ret_cfid = cfid;
+ 		atomic_inc(&tcon->num_remote_opens);
+ 	}
+-
++	kfree(utf16_path);
+ 	return rc;
+ }
+ 
+diff --git a/fs/smb/client/cifs_debug.c b/fs/smb/client/cifs_debug.c
+index aec6e91374742..e59505eff75ca 100644
+--- a/fs/smb/client/cifs_debug.c
++++ b/fs/smb/client/cifs_debug.c
+@@ -452,6 +452,11 @@ skip_rdma:
+ 		seq_printf(m, "\n\n\tSessions: ");
+ 		i = 0;
+ 		list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
++			spin_lock(&ses->ses_lock);
++			if (ses->ses_status == SES_EXITING) {
++				spin_unlock(&ses->ses_lock);
++				continue;
++			}
+ 			i++;
+ 			if ((ses->serverDomain == NULL) ||
+ 				(ses->serverOS == NULL) ||
+@@ -472,6 +477,7 @@ skip_rdma:
+ 				ses->ses_count, ses->serverOS, ses->serverNOS,
+ 				ses->capabilities, ses->ses_status);
+ 			}
++			spin_unlock(&ses->ses_lock);
+ 
+ 			seq_printf(m, "\n\tSecurity type: %s ",
+ 				get_security_type_str(server->ops->select_sectype(server, ses->sectype)));
+diff --git a/fs/smb/client/cifs_ioctl.h b/fs/smb/client/cifs_ioctl.h
+index 332588e77c311..26327442e383b 100644
+--- a/fs/smb/client/cifs_ioctl.h
++++ b/fs/smb/client/cifs_ioctl.h
+@@ -26,6 +26,11 @@ struct smb_mnt_fs_info {
+ 	__u64   cifs_posix_caps;
+ } __packed;
+ 
++struct smb_mnt_tcon_info {
++	__u32	tid;
++	__u64	session_id;
++} __packed;
++
+ struct smb_snapshot_array {
+ 	__u32	number_of_snapshots;
+ 	__u32	number_of_snapshots_returned;
+@@ -108,6 +113,7 @@ struct smb3_notify_info {
+ #define CIFS_IOC_NOTIFY _IOW(CIFS_IOCTL_MAGIC, 9, struct smb3_notify)
+ #define CIFS_DUMP_FULL_KEY _IOWR(CIFS_IOCTL_MAGIC, 10, struct smb3_full_key_debug_info)
+ #define CIFS_IOC_NOTIFY_INFO _IOWR(CIFS_IOCTL_MAGIC, 11, struct smb3_notify_info)
++#define CIFS_IOC_GET_TCON_INFO _IOR(CIFS_IOCTL_MAGIC, 12, struct smb_mnt_tcon_info)
+ #define CIFS_IOC_SHUTDOWN _IOR('X', 125, __u32)
+ 
+ /*
+diff --git a/fs/smb/client/cifs_spnego.c b/fs/smb/client/cifs_spnego.c
+index 6f3285f1dfee5..af7849e5974ff 100644
+--- a/fs/smb/client/cifs_spnego.c
++++ b/fs/smb/client/cifs_spnego.c
+@@ -64,8 +64,8 @@ struct key_type cifs_spnego_key_type = {
+  * strlen(";sec=ntlmsspi") */
+ #define MAX_MECH_STR_LEN	13
+ 
+-/* strlen of "host=" */
+-#define HOST_KEY_LEN		5
++/* strlen of ";host=" */
++#define HOST_KEY_LEN		6
+ 
+ /* strlen of ";ip4=" or ";ip6=" */
+ #define IP_KEY_LEN		5
+diff --git a/fs/smb/client/cifsfs.c b/fs/smb/client/cifsfs.c
+index e19df244ea7ea..3e2cdcaa9c1db 100644
+--- a/fs/smb/client/cifsfs.c
++++ b/fs/smb/client/cifsfs.c
+@@ -1191,6 +1191,7 @@ const char *cifs_get_link(struct dentry *dentry, struct inode *inode,
+ 
+ const struct inode_operations cifs_symlink_inode_ops = {
+ 	.get_link = cifs_get_link,
++	.setattr = cifs_setattr,
+ 	.permission = cifs_permission,
+ 	.listxattr = cifs_listxattr,
+ };
+diff --git a/fs/smb/client/cifsglob.h b/fs/smb/client/cifsglob.h
+index ac68fed5ad28a..64dce1081d007 100644
+--- a/fs/smb/client/cifsglob.h
++++ b/fs/smb/client/cifsglob.h
+@@ -2113,6 +2113,7 @@ static inline int cifs_get_num_sgs(const struct smb_rqst *rqst,
+ 	unsigned int len, skip;
+ 	unsigned int nents = 0;
+ 	unsigned long addr;
++	size_t data_size;
+ 	int i, j;
+ 
+ 	/*
+@@ -2128,17 +2129,21 @@ static inline int cifs_get_num_sgs(const struct smb_rqst *rqst,
+ 	 * rqst[1+].rq_iov[0+] data to be encrypted/decrypted
+ 	 */
+ 	for (i = 0; i < num_rqst; i++) {
++		data_size = iov_iter_count(&rqst[i].rq_iter);
++
+ 		/* We really don't want a mixture of pinned and unpinned pages
+ 		 * in the sglist.  It's hard to keep track of which is what.
+ 		 * Instead, we convert to a BVEC-type iterator higher up.
+ 		 */
+-		if (WARN_ON_ONCE(user_backed_iter(&rqst[i].rq_iter)))
++		if (data_size &&
++		    WARN_ON_ONCE(user_backed_iter(&rqst[i].rq_iter)))
+ 			return -EIO;
+ 
+ 		/* We also don't want to have any extra refs or pins to clean
+ 		 * up in the sglist.
+ 		 */
+-		if (WARN_ON_ONCE(iov_iter_extract_will_pin(&rqst[i].rq_iter)))
++		if (data_size &&
++		    WARN_ON_ONCE(iov_iter_extract_will_pin(&rqst[i].rq_iter)))
+ 			return -EIO;
+ 
+ 		for (j = 0; j < rqst[i].rq_nvec; j++) {
+@@ -2154,7 +2159,8 @@ static inline int cifs_get_num_sgs(const struct smb_rqst *rqst,
+ 			}
+ 			skip = 0;
+ 		}
+-		nents += iov_iter_npages(&rqst[i].rq_iter, INT_MAX);
++		if (data_size)
++			nents += iov_iter_npages(&rqst[i].rq_iter, INT_MAX);
+ 	}
+ 	nents += DIV_ROUND_UP(offset_in_page(sig) + SMB2_SIGNATURE_SIZE, PAGE_SIZE);
+ 	return nents;
+diff --git a/fs/smb/client/cifspdu.h b/fs/smb/client/cifspdu.h
+index e17222fec9d29..a75220db5c1e1 100644
+--- a/fs/smb/client/cifspdu.h
++++ b/fs/smb/client/cifspdu.h
+@@ -2570,7 +2570,7 @@ typedef struct {
+ 
+ 
+ struct win_dev {
+-	unsigned char type[8]; /* IntxCHR or IntxBLK */
++	unsigned char type[8]; /* IntxCHR or IntxBLK or LnxFIFO*/
+ 	__le64 major;
+ 	__le64 minor;
+ } __attribute__((packed));
+diff --git a/fs/smb/client/cifsproto.h b/fs/smb/client/cifsproto.h
+index bd0a1505719a4..a01ee1b0a66ce 100644
+--- a/fs/smb/client/cifsproto.h
++++ b/fs/smb/client/cifsproto.h
+@@ -81,7 +81,7 @@ extern char *cifs_build_path_to_root(struct smb3_fs_context *ctx,
+ extern char *build_wildcard_path_from_dentry(struct dentry *direntry);
+ char *cifs_build_devname(char *nodename, const char *prepath);
+ extern void delete_mid(struct mid_q_entry *mid);
+-extern void release_mid(struct mid_q_entry *mid);
++void __release_mid(struct kref *refcount);
+ extern void cifs_wake_up_task(struct mid_q_entry *mid);
+ extern int cifs_handle_standard(struct TCP_Server_Info *server,
+ 				struct mid_q_entry *mid);
+@@ -741,4 +741,9 @@ static inline bool dfs_src_pathname_equal(const char *s1, const char *s2)
+ 	return true;
+ }
+ 
++static inline void release_mid(struct mid_q_entry *mid)
++{
++	kref_put(&mid->refcount, __release_mid);
++}
++
+ #endif			/* _CIFSPROTO_H */
+diff --git a/fs/smb/client/connect.c b/fs/smb/client/connect.c
+index bd33661dcb57f..024f54a0be052 100644
+--- a/fs/smb/client/connect.c
++++ b/fs/smb/client/connect.c
+@@ -156,13 +156,14 @@ cifs_signal_cifsd_for_reconnect(struct TCP_Server_Info *server,
+ 	/* If server is a channel, select the primary channel */
+ 	pserver = CIFS_SERVER_IS_CHAN(server) ? server->primary_server : server;
+ 
+-	spin_lock(&pserver->srv_lock);
++	/* if we need to signal just this channel */
+ 	if (!all_channels) {
+-		pserver->tcpStatus = CifsNeedReconnect;
+-		spin_unlock(&pserver->srv_lock);
++		spin_lock(&server->srv_lock);
++		if (server->tcpStatus != CifsExiting)
++			server->tcpStatus = CifsNeedReconnect;
++		spin_unlock(&server->srv_lock);
+ 		return;
+ 	}
+-	spin_unlock(&pserver->srv_lock);
+ 
+ 	spin_lock(&cifs_tcp_ses_lock);
+ 	list_for_each_entry(ses, &pserver->smb_ses_list, smb_ses_list) {
+@@ -3849,8 +3850,12 @@ cifs_setup_session(const unsigned int xid, struct cifs_ses *ses,
+ 	is_binding = !CIFS_ALL_CHANS_NEED_RECONNECT(ses);
+ 	spin_unlock(&ses->chan_lock);
+ 
+-	if (!is_binding)
++	if (!is_binding) {
+ 		ses->ses_status = SES_IN_SETUP;
++
++		/* force iface_list refresh */
++		ses->iface_last_update = 0;
++	}
+ 	spin_unlock(&ses->ses_lock);
+ 
+ 	/* update ses ip_addr only for primary chan */
+diff --git a/fs/smb/client/inode.c b/fs/smb/client/inode.c
+index cb85d7977b1e3..9e242b0f48fcd 100644
+--- a/fs/smb/client/inode.c
++++ b/fs/smb/client/inode.c
+@@ -567,6 +567,10 @@ cifs_sfu_type(struct cifs_fattr *fattr, const char *path,
+ 			cifs_dbg(FYI, "Symlink\n");
+ 			fattr->cf_mode |= S_IFLNK;
+ 			fattr->cf_dtype = DT_LNK;
++		} else if (memcmp("LnxFIFO", pbuf, 8) == 0) {
++			cifs_dbg(FYI, "FIFO\n");
++			fattr->cf_mode |= S_IFIFO;
++			fattr->cf_dtype = DT_FIFO;
+ 		} else {
+ 			fattr->cf_mode |= S_IFREG; /* file? */
+ 			fattr->cf_dtype = DT_REG;
+diff --git a/fs/smb/client/ioctl.c b/fs/smb/client/ioctl.c
+index f7160003e0ed9..73ededa8eba5c 100644
+--- a/fs/smb/client/ioctl.c
++++ b/fs/smb/client/ioctl.c
+@@ -117,6 +117,20 @@ out_drop_write:
+ 	return rc;
+ }
+ 
++static long smb_mnt_get_tcon_info(struct cifs_tcon *tcon, void __user *arg)
++{
++	int rc = 0;
++	struct smb_mnt_tcon_info tcon_inf;
++
++	tcon_inf.tid = tcon->tid;
++	tcon_inf.session_id = tcon->ses->Suid;
++
++	if (copy_to_user(arg, &tcon_inf, sizeof(struct smb_mnt_tcon_info)))
++		rc = -EFAULT;
++
++	return rc;
++}
++
+ static long smb_mnt_get_fsinfo(unsigned int xid, struct cifs_tcon *tcon,
+ 				void __user *arg)
+ {
+@@ -414,6 +428,17 @@ long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg)
+ 			tcon = tlink_tcon(pSMBFile->tlink);
+ 			rc = smb_mnt_get_fsinfo(xid, tcon, (void __user *)arg);
+ 			break;
++		case CIFS_IOC_GET_TCON_INFO:
++			cifs_sb = CIFS_SB(inode->i_sb);
++			tlink = cifs_sb_tlink(cifs_sb);
++			if (IS_ERR(tlink)) {
++				rc = PTR_ERR(tlink);
++				break;
++			}
++			tcon = tlink_tcon(tlink);
++			rc = smb_mnt_get_tcon_info(tcon, (void __user *)arg);
++			cifs_put_tlink(tlink);
++			break;
+ 		case CIFS_ENUMERATE_SNAPSHOTS:
+ 			if (pSMBFile == NULL)
+ 				break;
+diff --git a/fs/smb/client/sess.c b/fs/smb/client/sess.c
+index c57ca2050b73f..1e9a49cb5696b 100644
+--- a/fs/smb/client/sess.c
++++ b/fs/smb/client/sess.c
+@@ -186,7 +186,6 @@ int cifs_try_adding_channels(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses)
+ 	}
+ 
+ 	if (!(server->capabilities & SMB2_GLOBAL_CAP_MULTI_CHANNEL)) {
+-		ses->chan_max = 1;
+ 		spin_unlock(&ses->chan_lock);
+ 		cifs_server_dbg(VFS, "no multichannel support\n");
+ 		return 0;
+diff --git a/fs/smb/client/smb2misc.c b/fs/smb/client/smb2misc.c
+index 3935a60db5c31..446647df79dc3 100644
+--- a/fs/smb/client/smb2misc.c
++++ b/fs/smb/client/smb2misc.c
+@@ -787,7 +787,7 @@ __smb2_handle_cancelled_cmd(struct cifs_tcon *tcon, __u16 cmd, __u64 mid,
+ {
+ 	struct close_cancelled_open *cancelled;
+ 
+-	cancelled = kzalloc(sizeof(*cancelled), GFP_ATOMIC);
++	cancelled = kzalloc(sizeof(*cancelled), GFP_KERNEL);
+ 	if (!cancelled)
+ 		return -ENOMEM;
+ 
+diff --git a/fs/smb/client/smb2ops.c b/fs/smb/client/smb2ops.c
+index a5cba71c30aed..0b71a1cc22980 100644
+--- a/fs/smb/client/smb2ops.c
++++ b/fs/smb/client/smb2ops.c
+@@ -5212,7 +5212,7 @@ smb2_make_node(unsigned int xid, struct inode *inode,
+ 	 * over SMB2/SMB3 and Samba will do this with SMB3.1.1 POSIX Extensions
+ 	 */
+ 
+-	if (!S_ISCHR(mode) && !S_ISBLK(mode))
++	if (!S_ISCHR(mode) && !S_ISBLK(mode) && !S_ISFIFO(mode))
+ 		return rc;
+ 
+ 	cifs_dbg(FYI, "sfu compat create special file\n");
+@@ -5260,6 +5260,12 @@ smb2_make_node(unsigned int xid, struct inode *inode,
+ 		pdev->minor = cpu_to_le64(MINOR(dev));
+ 		rc = tcon->ses->server->ops->sync_write(xid, &fid, &io_parms,
+ 							&bytes_written, iov, 1);
++	} else if (S_ISFIFO(mode)) {
++		memcpy(pdev->type, "LnxFIFO", 8);
++		pdev->major = 0;
++		pdev->minor = 0;
++		rc = tcon->ses->server->ops->sync_write(xid, &fid, &io_parms,
++							&bytes_written, iov, 1);
+ 	}
+ 	tcon->ses->server->ops->close(xid, tcon, &fid);
+ 	d_drop(dentry);
+diff --git a/fs/smb/client/smb2transport.c b/fs/smb/client/smb2transport.c
+index 7676091b3e77a..21fc6d84e396d 100644
+--- a/fs/smb/client/smb2transport.c
++++ b/fs/smb/client/smb2transport.c
+@@ -452,6 +452,8 @@ generate_smb3signingkey(struct cifs_ses *ses,
+ 				  ptriplet->encryption.context,
+ 				  ses->smb3encryptionkey,
+ 				  SMB3_ENC_DEC_KEY_SIZE);
++		if (rc)
++			return rc;
+ 		rc = generate_key(ses, ptriplet->decryption.label,
+ 				  ptriplet->decryption.context,
+ 				  ses->smb3decryptionkey,
+@@ -460,9 +462,6 @@ generate_smb3signingkey(struct cifs_ses *ses,
+ 			return rc;
+ 	}
+ 
+-	if (rc)
+-		return rc;
+-
+ #ifdef CONFIG_CIFS_DEBUG_DUMP_KEYS
+ 	cifs_dbg(VFS, "%s: dumping generated AES session keys\n", __func__);
+ 	/*
+diff --git a/fs/smb/client/transport.c b/fs/smb/client/transport.c
+index 2b9a2ed45a652..a48591d79b667 100644
+--- a/fs/smb/client/transport.c
++++ b/fs/smb/client/transport.c
+@@ -76,7 +76,7 @@ alloc_mid(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
+ 	return temp;
+ }
+ 
+-static void __release_mid(struct kref *refcount)
++void __release_mid(struct kref *refcount)
+ {
+ 	struct mid_q_entry *midEntry =
+ 			container_of(refcount, struct mid_q_entry, refcount);
+@@ -156,15 +156,6 @@ static void __release_mid(struct kref *refcount)
+ 	mempool_free(midEntry, cifs_mid_poolp);
+ }
+ 
+-void release_mid(struct mid_q_entry *mid)
+-{
+-	struct TCP_Server_Info *server = mid->server;
+-
+-	spin_lock(&server->mid_lock);
+-	kref_put(&mid->refcount, __release_mid);
+-	spin_unlock(&server->mid_lock);
+-}
+-
+ void
+ delete_mid(struct mid_q_entry *mid)
+ {
+diff --git a/fs/smb/client/xattr.c b/fs/smb/client/xattr.c
+index 4ad5531686d81..c2bf829310bee 100644
+--- a/fs/smb/client/xattr.c
++++ b/fs/smb/client/xattr.c
+@@ -150,10 +150,13 @@ static int cifs_xattr_set(const struct xattr_handler *handler,
+ 		if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
+ 			goto out;
+ 
+-		if (pTcon->ses->server->ops->set_EA)
++		if (pTcon->ses->server->ops->set_EA) {
+ 			rc = pTcon->ses->server->ops->set_EA(xid, pTcon,
+ 				full_path, name, value, (__u16)size,
+ 				cifs_sb->local_nls, cifs_sb);
++			if (rc == 0)
++				inode_set_ctime_current(inode);
++		}
+ 		break;
+ 
+ 	case XATTR_CIFS_ACL:
+diff --git a/fs/smb/server/smb_common.c b/fs/smb/server/smb_common.c
+index c2b75d8988528..6cd9d117efaa2 100644
+--- a/fs/smb/server/smb_common.c
++++ b/fs/smb/server/smb_common.c
+@@ -372,11 +372,22 @@ static int smb1_allocate_rsp_buf(struct ksmbd_work *work)
+ 	return 0;
+ }
+ 
++/**
++ * set_smb1_rsp_status() - set error type in smb response header
++ * @work:	smb work containing smb response header
++ * @err:	error code to set in response
++ */
++static void set_smb1_rsp_status(struct ksmbd_work *work, __le32 err)
++{
++	work->send_no_response = 1;
++}
++
+ static struct smb_version_ops smb1_server_ops = {
+ 	.get_cmd_val = get_smb1_cmd_val,
+ 	.init_rsp_hdr = init_smb1_rsp_hdr,
+ 	.allocate_rsp_buf = smb1_allocate_rsp_buf,
+ 	.check_user_session = smb1_check_user_session,
++	.set_rsp_status = set_smb1_rsp_status,
+ };
+ 
+ static int smb1_negotiate(struct ksmbd_work *work)
+diff --git a/fs/smb/server/smbacl.c b/fs/smb/server/smbacl.c
+index e5e438bf54996..c9de2b4684848 100644
+--- a/fs/smb/server/smbacl.c
++++ b/fs/smb/server/smbacl.c
+@@ -1107,6 +1107,7 @@ pass:
+ 		struct smb_acl *pdacl;
+ 		struct smb_sid *powner_sid = NULL, *pgroup_sid = NULL;
+ 		int powner_sid_size = 0, pgroup_sid_size = 0, pntsd_size;
++		int pntsd_alloc_size;
+ 
+ 		if (parent_pntsd->osidoffset) {
+ 			powner_sid = (struct smb_sid *)((char *)parent_pntsd +
+@@ -1119,9 +1120,10 @@ pass:
+ 			pgroup_sid_size = 1 + 1 + 6 + (pgroup_sid->num_subauth * 4);
+ 		}
+ 
+-		pntsd = kzalloc(sizeof(struct smb_ntsd) + powner_sid_size +
+-				pgroup_sid_size + sizeof(struct smb_acl) +
+-				nt_size, GFP_KERNEL);
++		pntsd_alloc_size = sizeof(struct smb_ntsd) + powner_sid_size +
++			pgroup_sid_size + sizeof(struct smb_acl) + nt_size;
++
++		pntsd = kzalloc(pntsd_alloc_size, GFP_KERNEL);
+ 		if (!pntsd) {
+ 			rc = -ENOMEM;
+ 			goto free_aces_base;
+@@ -1136,6 +1138,27 @@ pass:
+ 		pntsd->gsidoffset = parent_pntsd->gsidoffset;
+ 		pntsd->dacloffset = parent_pntsd->dacloffset;
+ 
++		if ((u64)le32_to_cpu(pntsd->osidoffset) + powner_sid_size >
++		    pntsd_alloc_size) {
++			rc = -EINVAL;
++			kfree(pntsd);
++			goto free_aces_base;
++		}
++
++		if ((u64)le32_to_cpu(pntsd->gsidoffset) + pgroup_sid_size >
++		    pntsd_alloc_size) {
++			rc = -EINVAL;
++			kfree(pntsd);
++			goto free_aces_base;
++		}
++
++		if ((u64)le32_to_cpu(pntsd->dacloffset) + sizeof(struct smb_acl) + nt_size >
++		    pntsd_alloc_size) {
++			rc = -EINVAL;
++			kfree(pntsd);
++			goto free_aces_base;
++		}
++
+ 		if (pntsd->osidoffset) {
+ 			struct smb_sid *owner_sid = (struct smb_sid *)((char *)pntsd +
+ 					le32_to_cpu(pntsd->osidoffset));
+diff --git a/fs/smb/server/vfs.c b/fs/smb/server/vfs.c
+index 3d5d652153a5b..1c9bfc0d67777 100644
+--- a/fs/smb/server/vfs.c
++++ b/fs/smb/server/vfs.c
+@@ -173,10 +173,6 @@ int ksmbd_vfs_create(struct ksmbd_work *work, const char *name, umode_t mode)
+ 		return err;
+ 	}
+ 
+-	err = mnt_want_write(path.mnt);
+-	if (err)
+-		goto out_err;
+-
+ 	mode |= S_IFREG;
+ 	err = vfs_create(mnt_idmap(path.mnt), d_inode(path.dentry),
+ 			 dentry, mode, true);
+@@ -186,9 +182,7 @@ int ksmbd_vfs_create(struct ksmbd_work *work, const char *name, umode_t mode)
+ 	} else {
+ 		pr_err("File(%s): creation failed (err:%d)\n", name, err);
+ 	}
+-	mnt_drop_write(path.mnt);
+ 
+-out_err:
+ 	done_path_create(&path, dentry);
+ 	return err;
+ }
+@@ -219,10 +213,6 @@ int ksmbd_vfs_mkdir(struct ksmbd_work *work, const char *name, umode_t mode)
+ 		return err;
+ 	}
+ 
+-	err = mnt_want_write(path.mnt);
+-	if (err)
+-		goto out_err2;
+-
+ 	idmap = mnt_idmap(path.mnt);
+ 	mode |= S_IFDIR;
+ 	err = vfs_mkdir(idmap, d_inode(path.dentry), dentry, mode);
+@@ -233,21 +223,19 @@ int ksmbd_vfs_mkdir(struct ksmbd_work *work, const char *name, umode_t mode)
+ 			       dentry->d_name.len);
+ 		if (IS_ERR(d)) {
+ 			err = PTR_ERR(d);
+-			goto out_err1;
++			goto out_err;
+ 		}
+ 		if (unlikely(d_is_negative(d))) {
+ 			dput(d);
+ 			err = -ENOENT;
+-			goto out_err1;
++			goto out_err;
+ 		}
+ 
+ 		ksmbd_vfs_inherit_owner(work, d_inode(path.dentry), d_inode(d));
+ 		dput(d);
+ 	}
+ 
+-out_err1:
+-	mnt_drop_write(path.mnt);
+-out_err2:
++out_err:
+ 	done_path_create(&path, dentry);
+ 	if (err)
+ 		pr_err("mkdir(%s): creation failed (err:%d)\n", name, err);
+@@ -665,16 +653,11 @@ int ksmbd_vfs_link(struct ksmbd_work *work, const char *oldname,
+ 		goto out3;
+ 	}
+ 
+-	err = mnt_want_write(newpath.mnt);
+-	if (err)
+-		goto out3;
+-
+ 	err = vfs_link(oldpath.dentry, mnt_idmap(newpath.mnt),
+ 		       d_inode(newpath.dentry),
+ 		       dentry, NULL);
+ 	if (err)
+ 		ksmbd_debug(VFS, "vfs_link failed err %d\n", err);
+-	mnt_drop_write(newpath.mnt);
+ 
+ out3:
+ 	done_path_create(&newpath, dentry);
+diff --git a/fs/xfs/xfs_inode_item_recover.c b/fs/xfs/xfs_inode_item_recover.c
+index 0e5dba2343ea1..e6609067ef261 100644
+--- a/fs/xfs/xfs_inode_item_recover.c
++++ b/fs/xfs/xfs_inode_item_recover.c
+@@ -369,24 +369,26 @@ xlog_recover_inode_commit_pass2(
+ 	 * superblock flag to determine whether we need to look at di_flushiter
+ 	 * to skip replay when the on disk inode is newer than the log one
+ 	 */
+-	if (!xfs_has_v3inodes(mp) &&
+-	    ldip->di_flushiter < be16_to_cpu(dip->di_flushiter)) {
+-		/*
+-		 * Deal with the wrap case, DI_MAX_FLUSH is less
+-		 * than smaller numbers
+-		 */
+-		if (be16_to_cpu(dip->di_flushiter) == DI_MAX_FLUSH &&
+-		    ldip->di_flushiter < (DI_MAX_FLUSH >> 1)) {
+-			/* do nothing */
+-		} else {
+-			trace_xfs_log_recover_inode_skip(log, in_f);
+-			error = 0;
+-			goto out_release;
++	if (!xfs_has_v3inodes(mp)) {
++		if (ldip->di_flushiter < be16_to_cpu(dip->di_flushiter)) {
++			/*
++			 * Deal with the wrap case, DI_MAX_FLUSH is less
++			 * than smaller numbers
++			 */
++			if (be16_to_cpu(dip->di_flushiter) == DI_MAX_FLUSH &&
++			    ldip->di_flushiter < (DI_MAX_FLUSH >> 1)) {
++				/* do nothing */
++			} else {
++				trace_xfs_log_recover_inode_skip(log, in_f);
++				error = 0;
++				goto out_release;
++			}
+ 		}
++
++		/* Take the opportunity to reset the flush iteration count */
++		ldip->di_flushiter = 0;
+ 	}
+ 
+-	/* Take the opportunity to reset the flush iteration count */
+-	ldip->di_flushiter = 0;
+ 
+ 	if (unlikely(S_ISREG(ldip->di_mode))) {
+ 		if ((ldip->di_format != XFS_DINODE_FMT_EXTENTS) &&
+diff --git a/include/acpi/ghes.h b/include/acpi/ghes.h
+index 3c8bba9f1114a..be1dd4c1a9174 100644
+--- a/include/acpi/ghes.h
++++ b/include/acpi/ghes.h
+@@ -73,8 +73,12 @@ int ghes_register_vendor_record_notifier(struct notifier_block *nb);
+ void ghes_unregister_vendor_record_notifier(struct notifier_block *nb);
+ 
+ struct list_head *ghes_get_devices(void);
++
++void ghes_estatus_pool_region_free(unsigned long addr, u32 size);
+ #else
+ static inline struct list_head *ghes_get_devices(void) { return NULL; }
++
++static inline void ghes_estatus_pool_region_free(unsigned long addr, u32 size) { return; }
+ #endif
+ 
+ int ghes_estatus_pool_init(unsigned int num_ghes);
+diff --git a/include/linux/bpf.h b/include/linux/bpf.h
+index 98a7d6fd10360..a8b775e9d4d1a 100644
+--- a/include/linux/bpf.h
++++ b/include/linux/bpf.h
+@@ -890,10 +890,14 @@ bpf_ctx_record_field_size(struct bpf_insn_access_aux *aux, u32 size)
+ 	aux->ctx_field_size = size;
+ }
+ 
++static bool bpf_is_ldimm64(const struct bpf_insn *insn)
++{
++	return insn->code == (BPF_LD | BPF_IMM | BPF_DW);
++}
++
+ static inline bool bpf_pseudo_func(const struct bpf_insn *insn)
+ {
+-	return insn->code == (BPF_LD | BPF_IMM | BPF_DW) &&
+-	       insn->src_reg == BPF_PSEUDO_FUNC;
++	return bpf_is_ldimm64(insn) && insn->src_reg == BPF_PSEUDO_FUNC;
+ }
+ 
+ struct bpf_prog_ops {
+diff --git a/include/linux/damon.h b/include/linux/damon.h
+index d5d4d19928e0a..4ba68cc635e2e 100644
+--- a/include/linux/damon.h
++++ b/include/linux/damon.h
+@@ -626,6 +626,13 @@ static inline bool damon_target_has_pid(const struct damon_ctx *ctx)
+ 	return ctx->ops.id == DAMON_OPS_VADDR || ctx->ops.id == DAMON_OPS_FVADDR;
+ }
+ 
++static inline unsigned int damon_max_nr_accesses(const struct damon_attrs *attrs)
++{
++	/* {aggr,sample}_interval are unsigned long, hence could overflow */
++	return min(attrs->aggr_interval / attrs->sample_interval,
++			(unsigned long)UINT_MAX);
++}
++
+ 
+ int damon_start(struct damon_ctx **ctxs, int nr_ctxs, bool exclusive);
+ int damon_stop(struct damon_ctx **ctxs, int nr_ctxs);
+diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h
+index 62b61527bcc4f..1b523fd48586f 100644
+--- a/include/linux/ethtool.h
++++ b/include/linux/ethtool.h
+@@ -1045,10 +1045,10 @@ static inline int ethtool_mm_frag_size_min_to_add(u32 val_min, u32 *val_add,
+ 
+ /**
+  * ethtool_sprintf - Write formatted string to ethtool string data
+- * @data: Pointer to start of string to update
++ * @data: Pointer to a pointer to the start of string to update
+  * @fmt: Format of string to write
+  *
+- * Write formatted string to data. Update data to point at start of
++ * Write formatted string to *data. Update *data to point at start of
+  * next string.
+  */
+ extern __printf(2, 3) void ethtool_sprintf(u8 **data, const char *fmt, ...);
+diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h
+index a82a4bb6ce68b..cf1adceb02697 100644
+--- a/include/linux/f2fs_fs.h
++++ b/include/linux/f2fs_fs.h
+@@ -104,6 +104,7 @@ enum f2fs_error {
+ 	ERROR_CORRUPTED_VERITY_XATTR,
+ 	ERROR_CORRUPTED_XATTR,
+ 	ERROR_INVALID_NODE_REFERENCE,
++	ERROR_INCONSISTENT_NAT,
+ 	ERROR_MAX,
+ };
+ 
+diff --git a/include/linux/fs.h b/include/linux/fs.h
+index 87a21a18d114a..88cdfd90f0b2b 100644
+--- a/include/linux/fs.h
++++ b/include/linux/fs.h
+@@ -1474,7 +1474,50 @@ static inline bool fsuidgid_has_mapping(struct super_block *sb,
+ 	       kgid_has_mapping(fs_userns, kgid);
+ }
+ 
+-extern struct timespec64 current_time(struct inode *inode);
++struct timespec64 current_time(struct inode *inode);
++struct timespec64 inode_set_ctime_current(struct inode *inode);
++
++/**
++ * inode_get_ctime - fetch the current ctime from the inode
++ * @inode: inode from which to fetch ctime
++ *
++ * Grab the current ctime from the inode and return it.
++ */
++static inline struct timespec64 inode_get_ctime(const struct inode *inode)
++{
++	return inode->i_ctime;
++}
++
++/**
++ * inode_set_ctime_to_ts - set the ctime in the inode
++ * @inode: inode in which to set the ctime
++ * @ts: value to set in the ctime field
++ *
++ * Set the ctime in @inode to @ts
++ */
++static inline struct timespec64 inode_set_ctime_to_ts(struct inode *inode,
++						      struct timespec64 ts)
++{
++	inode->i_ctime = ts;
++	return ts;
++}
++
++/**
++ * inode_set_ctime - set the ctime in the inode
++ * @inode: inode in which to set the ctime
++ * @sec: tv_sec value to set
++ * @nsec: tv_nsec value to set
++ *
++ * Set the ctime in @inode to { @sec, @nsec }
++ */
++static inline struct timespec64 inode_set_ctime(struct inode *inode,
++						time64_t sec, long nsec)
++{
++	struct timespec64 ts = { .tv_sec  = sec,
++				 .tv_nsec = nsec };
++
++	return inode_set_ctime_to_ts(inode, ts);
++}
+ 
+ /*
+  * Snapshotting support.
+diff --git a/include/linux/generic-radix-tree.h b/include/linux/generic-radix-tree.h
+index 107613f7d7920..f6cd0f909d9fb 100644
+--- a/include/linux/generic-radix-tree.h
++++ b/include/linux/generic-radix-tree.h
+@@ -38,6 +38,7 @@
+ 
+ #include <asm/page.h>
+ #include <linux/bug.h>
++#include <linux/limits.h>
+ #include <linux/log2.h>
+ #include <linux/math.h>
+ #include <linux/types.h>
+@@ -184,6 +185,12 @@ void *__genradix_iter_peek(struct genradix_iter *, struct __genradix *, size_t);
+ static inline void __genradix_iter_advance(struct genradix_iter *iter,
+ 					   size_t obj_size)
+ {
++	if (iter->offset + obj_size < iter->offset) {
++		iter->offset	= SIZE_MAX;
++		iter->pos	= SIZE_MAX;
++		return;
++	}
++
+ 	iter->offset += obj_size;
+ 
+ 	if (!is_power_of_2(obj_size) &&
+diff --git a/include/linux/irq.h b/include/linux/irq.h
+index d8a6fdce93738..90081afa10ce5 100644
+--- a/include/linux/irq.h
++++ b/include/linux/irq.h
+@@ -215,8 +215,6 @@ struct irq_data {
+  * IRQD_SINGLE_TARGET		- IRQ allows only a single affinity target
+  * IRQD_DEFAULT_TRIGGER_SET	- Expected trigger already been set
+  * IRQD_CAN_RESERVE		- Can use reservation mode
+- * IRQD_MSI_NOMASK_QUIRK	- Non-maskable MSI quirk for affinity change
+- *				  required
+  * IRQD_HANDLE_ENFORCE_IRQCTX	- Enforce that handle_irq_*() is only invoked
+  *				  from actual interrupt context.
+  * IRQD_AFFINITY_ON_ACTIVATE	- Affinity is set on activation. Don't call
+@@ -247,11 +245,10 @@ enum {
+ 	IRQD_SINGLE_TARGET		= BIT(24),
+ 	IRQD_DEFAULT_TRIGGER_SET	= BIT(25),
+ 	IRQD_CAN_RESERVE		= BIT(26),
+-	IRQD_MSI_NOMASK_QUIRK		= BIT(27),
+-	IRQD_HANDLE_ENFORCE_IRQCTX	= BIT(28),
+-	IRQD_AFFINITY_ON_ACTIVATE	= BIT(29),
+-	IRQD_IRQ_ENABLED_ON_SUSPEND	= BIT(30),
+-	IRQD_RESEND_WHEN_IN_PROGRESS    = BIT(31),
++	IRQD_HANDLE_ENFORCE_IRQCTX	= BIT(27),
++	IRQD_AFFINITY_ON_ACTIVATE	= BIT(28),
++	IRQD_IRQ_ENABLED_ON_SUSPEND	= BIT(29),
++	IRQD_RESEND_WHEN_IN_PROGRESS    = BIT(30),
+ };
+ 
+ #define __irqd_to_state(d) ACCESS_PRIVATE((d)->common, state_use_accessors)
+@@ -426,21 +423,6 @@ static inline bool irqd_can_reserve(struct irq_data *d)
+ 	return __irqd_to_state(d) & IRQD_CAN_RESERVE;
+ }
+ 
+-static inline void irqd_set_msi_nomask_quirk(struct irq_data *d)
+-{
+-	__irqd_to_state(d) |= IRQD_MSI_NOMASK_QUIRK;
+-}
+-
+-static inline void irqd_clr_msi_nomask_quirk(struct irq_data *d)
+-{
+-	__irqd_to_state(d) &= ~IRQD_MSI_NOMASK_QUIRK;
+-}
+-
+-static inline bool irqd_msi_nomask_quirk(struct irq_data *d)
+-{
+-	return __irqd_to_state(d) & IRQD_MSI_NOMASK_QUIRK;
+-}
+-
+ static inline void irqd_set_affinity_on_activate(struct irq_data *d)
+ {
+ 	__irqd_to_state(d) |= IRQD_AFFINITY_ON_ACTIVATE;
+diff --git a/include/linux/lsm_hook_defs.h b/include/linux/lsm_hook_defs.h
+index af796986baee6..1d71a043460cf 100644
+--- a/include/linux/lsm_hook_defs.h
++++ b/include/linux/lsm_hook_defs.h
+@@ -48,7 +48,7 @@ LSM_HOOK(int, 0, quota_on, struct dentry *dentry)
+ LSM_HOOK(int, 0, syslog, int type)
+ LSM_HOOK(int, 0, settime, const struct timespec64 *ts,
+ 	 const struct timezone *tz)
+-LSM_HOOK(int, 0, vm_enough_memory, struct mm_struct *mm, long pages)
++LSM_HOOK(int, 1, vm_enough_memory, struct mm_struct *mm, long pages)
+ LSM_HOOK(int, 0, bprm_creds_for_exec, struct linux_binprm *bprm)
+ LSM_HOOK(int, 0, bprm_creds_from_file, struct linux_binprm *bprm, struct file *file)
+ LSM_HOOK(int, 0, bprm_check_security, struct linux_binprm *bprm)
+@@ -273,7 +273,7 @@ LSM_HOOK(void, LSM_RET_VOID, release_secctx, char *secdata, u32 seclen)
+ LSM_HOOK(void, LSM_RET_VOID, inode_invalidate_secctx, struct inode *inode)
+ LSM_HOOK(int, 0, inode_notifysecctx, struct inode *inode, void *ctx, u32 ctxlen)
+ LSM_HOOK(int, 0, inode_setsecctx, struct dentry *dentry, void *ctx, u32 ctxlen)
+-LSM_HOOK(int, 0, inode_getsecctx, struct inode *inode, void **ctx,
++LSM_HOOK(int, -EOPNOTSUPP, inode_getsecctx, struct inode *inode, void **ctx,
+ 	 u32 *ctxlen)
+ 
+ #if defined(CONFIG_SECURITY) && defined(CONFIG_WATCH_QUEUE)
+diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h
+index daa2f40d9ce65..7b12eebc5586d 100644
+--- a/include/linux/mmc/card.h
++++ b/include/linux/mmc/card.h
+@@ -295,7 +295,9 @@ struct mmc_card {
+ #define MMC_QUIRK_BROKEN_HPI	(1<<13)		/* Disable broken HPI support */
+ #define MMC_QUIRK_BROKEN_SD_DISCARD	(1<<14)	/* Disable broken SD discard support */
+ #define MMC_QUIRK_BROKEN_SD_CACHE	(1<<15)	/* Disable broken SD cache support */
++#define MMC_QUIRK_BROKEN_CACHE_FLUSH	(1<<16)	/* Don't flush cache until the write has occurred */
+ 
++	bool			written_flag;	/* Indicates eMMC has been written since power on */
+ 	bool			reenable_cmdq;	/* Re-enable Command Queue */
+ 
+ 	unsigned int		erase_size;	/* erase size in sectors */
+diff --git a/include/linux/msi.h b/include/linux/msi.h
+index a50ea79522f85..ddace8c34dcf9 100644
+--- a/include/linux/msi.h
++++ b/include/linux/msi.h
+@@ -547,12 +547,6 @@ enum {
+ 	MSI_FLAG_ALLOC_SIMPLE_MSI_DESCS	= (1 << 5),
+ 	/* Free MSI descriptors */
+ 	MSI_FLAG_FREE_MSI_DESCS		= (1 << 6),
+-	/*
+-	 * Quirk to handle MSI implementations which do not provide
+-	 * masking. Currently known to affect x86, but has to be partially
+-	 * handled in the core MSI code.
+-	 */
+-	MSI_FLAG_NOMASK_QUIRK		= (1 << 7),
+ 
+ 	/* Mask for the generic functionality */
+ 	MSI_GENERIC_FLAGS_MASK		= GENMASK(15, 0),
+diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
+index 7702f078ef4ad..54bc1ca7b66fc 100644
+--- a/include/linux/pci_ids.h
++++ b/include/linux/pci_ids.h
+@@ -180,6 +180,8 @@
+ #define PCI_DEVICE_ID_BERKOM_A4T		0xffa4
+ #define PCI_DEVICE_ID_BERKOM_SCITEL_QUADRO	0xffa8
+ 
++#define PCI_VENDOR_ID_ITTIM		0x0b48
++
+ #define PCI_VENDOR_ID_COMPAQ		0x0e11
+ #define PCI_DEVICE_ID_COMPAQ_TOKENRING	0x0508
+ #define PCI_DEVICE_ID_COMPAQ_TACHYON	0xa0fc
+diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
+index e7afd0dd8a3d1..ff29da8e35f6c 100644
+--- a/include/linux/perf_event.h
++++ b/include/linux/perf_event.h
+@@ -843,11 +843,11 @@ struct perf_event {
+ };
+ 
+ /*
+- *           ,-----------------------[1:n]----------------------.
+- *           V                                                  V
+- * perf_event_context <-[1:n]-> perf_event_pmu_context <--- perf_event
+- *           ^                      ^     |                     |
+- *           `--------[1:n]---------'     `-[n:1]-> pmu <-[1:n]-'
++ *           ,-----------------------[1:n]------------------------.
++ *           V                                                    V
++ * perf_event_context <-[1:n]-> perf_event_pmu_context <-[1:n]- perf_event
++ *                                        |                       |
++ *                                        `--[n:1]-> pmu <-[1:n]--'
+  *
+  *
+  * struct perf_event_pmu_context  lifetime is refcount based and RCU freed
+@@ -865,6 +865,9 @@ struct perf_event {
+  * ctx->mutex pinning the configuration. Since we hold a reference on
+  * group_leader (through the filedesc) it can't go away, therefore it's
+  * associated pmu_ctx must exist and cannot change due to ctx->mutex.
++ *
++ * perf_event holds a refcount on perf_event_context
++ * perf_event holds a refcount on perf_event_pmu_context
+  */
+ struct perf_event_pmu_context {
+ 	struct pmu			*pmu;
+diff --git a/include/linux/preempt.h b/include/linux/preempt.h
+index 1424670df161d..9aa6358a1a16b 100644
+--- a/include/linux/preempt.h
++++ b/include/linux/preempt.h
+@@ -99,14 +99,21 @@ static __always_inline unsigned char interrupt_context_level(void)
+ 	return level;
+ }
+ 
++/*
++ * These macro definitions avoid redundant invocations of preempt_count()
++ * because such invocations would result in redundant loads given that
++ * preempt_count() is commonly implemented with READ_ONCE().
++ */
++
+ #define nmi_count()	(preempt_count() & NMI_MASK)
+ #define hardirq_count()	(preempt_count() & HARDIRQ_MASK)
+ #ifdef CONFIG_PREEMPT_RT
+ # define softirq_count()	(current->softirq_disable_cnt & SOFTIRQ_MASK)
++# define irq_count()		((preempt_count() & (NMI_MASK | HARDIRQ_MASK)) | softirq_count())
+ #else
+ # define softirq_count()	(preempt_count() & SOFTIRQ_MASK)
++# define irq_count()		(preempt_count() & (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_MASK))
+ #endif
+-#define irq_count()	(nmi_count() | hardirq_count() | softirq_count())
+ 
+ /*
+  * Macros to retrieve the current execution context:
+@@ -119,7 +126,11 @@ static __always_inline unsigned char interrupt_context_level(void)
+ #define in_nmi()		(nmi_count())
+ #define in_hardirq()		(hardirq_count())
+ #define in_serving_softirq()	(softirq_count() & SOFTIRQ_OFFSET)
+-#define in_task()		(!(in_nmi() | in_hardirq() | in_serving_softirq()))
++#ifdef CONFIG_PREEMPT_RT
++# define in_task()		(!((preempt_count() & (NMI_MASK | HARDIRQ_MASK)) | in_serving_softirq()))
++#else
++# define in_task()		(!(preempt_count() & (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET)))
++#endif
+ 
+ /*
+  * The following macros are deprecated and should not be used in new code:
+diff --git a/include/linux/pwm.h b/include/linux/pwm.h
+index 04ae1d9073a74..0755ba9938f74 100644
+--- a/include/linux/pwm.h
++++ b/include/linux/pwm.h
+@@ -41,8 +41,8 @@ struct pwm_args {
+ };
+ 
+ enum {
+-	PWMF_REQUESTED = 1 << 0,
+-	PWMF_EXPORTED = 1 << 1,
++	PWMF_REQUESTED = 0,
++	PWMF_EXPORTED = 1,
+ };
+ 
+ /*
+diff --git a/include/linux/socket.h b/include/linux/socket.h
+index 39b74d83c7c4a..cfcb7e2c3813f 100644
+--- a/include/linux/socket.h
++++ b/include/linux/socket.h
+@@ -383,6 +383,7 @@ struct ucred {
+ #define SOL_MPTCP	284
+ #define SOL_MCTP	285
+ #define SOL_SMC		286
++#define SOL_VSOCK	287
+ 
+ /* IPX options */
+ #define IPX_TYPE	1
+diff --git a/include/linux/string.h b/include/linux/string.h
+index 9e3cb6923b0ef..5077776e995e0 100644
+--- a/include/linux/string.h
++++ b/include/linux/string.h
+@@ -5,7 +5,9 @@
+ #include <linux/compiler.h>	/* for inline */
+ #include <linux/types.h>	/* for size_t */
+ #include <linux/stddef.h>	/* for NULL */
++#include <linux/err.h>		/* for ERR_PTR() */
+ #include <linux/errno.h>	/* for E2BIG */
++#include <linux/overflow.h>	/* for check_mul_overflow() */
+ #include <linux/stdarg.h>
+ #include <uapi/linux/string.h>
+ 
+@@ -14,6 +16,44 @@ extern void *memdup_user(const void __user *, size_t);
+ extern void *vmemdup_user(const void __user *, size_t);
+ extern void *memdup_user_nul(const void __user *, size_t);
+ 
++/**
++ * memdup_array_user - duplicate array from user space
++ * @src: source address in user space
++ * @n: number of array members to copy
++ * @size: size of one array member
++ *
++ * Return: an ERR_PTR() on failure. Result is physically
++ * contiguous, to be freed by kfree().
++ */
++static inline void *memdup_array_user(const void __user *src, size_t n, size_t size)
++{
++	size_t nbytes;
++
++	if (check_mul_overflow(n, size, &nbytes))
++		return ERR_PTR(-EOVERFLOW);
++
++	return memdup_user(src, nbytes);
++}
++
++/**
++ * vmemdup_array_user - duplicate array from user space
++ * @src: source address in user space
++ * @n: number of array members to copy
++ * @size: size of one array member
++ *
++ * Return: an ERR_PTR() on failure. Result may be not
++ * physically contiguous. Use kvfree() to free.
++ */
++static inline void *vmemdup_array_user(const void __user *src, size_t n, size_t size)
++{
++	size_t nbytes;
++
++	if (check_mul_overflow(n, size, &nbytes))
++		return ERR_PTR(-EOVERFLOW);
++
++	return vmemdup_user(src, nbytes);
++}
++
+ /*
+  * Include machine specific inline routines
+  */
+diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
+index 4f41d839face4..03722690f2c39 100644
+--- a/include/linux/sunrpc/clnt.h
++++ b/include/linux/sunrpc/clnt.h
+@@ -92,6 +92,7 @@ struct rpc_clnt {
+ 	};
+ 	const struct cred	*cl_cred;
+ 	unsigned int		cl_max_connect; /* max number of transports not to the same IP */
++	struct super_block *pipefs_sb;
+ };
+ 
+ /*
+diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
+index 59d451f455bfb..094e5eaef072d 100644
+--- a/include/linux/sysctl.h
++++ b/include/linux/sysctl.h
+@@ -227,6 +227,7 @@ extern void __register_sysctl_init(const char *path, struct ctl_table *table,
+ extern struct ctl_table_header *register_sysctl_mount_point(const char *path);
+ 
+ void do_sysctl_args(void);
++bool sysctl_is_alias(char *param);
+ int do_proc_douintvec(struct ctl_table *table, int write,
+ 		      void *buffer, size_t *lenp, loff_t *ppos,
+ 		      int (*conv)(unsigned long *lvalp,
+@@ -270,6 +271,11 @@ static inline void setup_sysctl_set(struct ctl_table_set *p,
+ static inline void do_sysctl_args(void)
+ {
+ }
++
++static inline bool sysctl_is_alias(char *param)
++{
++	return false;
++}
+ #endif /* CONFIG_SYSCTL */
+ 
+ int sysctl_max_threads(struct ctl_table *table, int write, void *buffer,
+diff --git a/include/linux/torture.h b/include/linux/torture.h
+index 7038104463e48..017f0f710815a 100644
+--- a/include/linux/torture.h
++++ b/include/linux/torture.h
+@@ -81,7 +81,8 @@ static inline void torture_random_init(struct torture_random_state *trsp)
+ }
+ 
+ /* Definitions for high-resolution-timer sleeps. */
+-int torture_hrtimeout_ns(ktime_t baset_ns, u32 fuzzt_ns, struct torture_random_state *trsp);
++int torture_hrtimeout_ns(ktime_t baset_ns, u32 fuzzt_ns, const enum hrtimer_mode mode,
++			 struct torture_random_state *trsp);
+ int torture_hrtimeout_us(u32 baset_us, u32 fuzzt_ns, struct torture_random_state *trsp);
+ int torture_hrtimeout_ms(u32 baset_ms, u32 fuzzt_us, struct torture_random_state *trsp);
+ int torture_hrtimeout_jiffies(u32 baset_j, struct torture_random_state *trsp);
+@@ -108,12 +109,15 @@ bool torture_must_stop(void);
+ bool torture_must_stop_irq(void);
+ void torture_kthread_stopping(char *title);
+ int _torture_create_kthread(int (*fn)(void *arg), void *arg, char *s, char *m,
+-			     char *f, struct task_struct **tp);
++			     char *f, struct task_struct **tp, void (*cbf)(struct task_struct *tp));
+ void _torture_stop_kthread(char *m, struct task_struct **tp);
+ 
+ #define torture_create_kthread(n, arg, tp) \
+ 	_torture_create_kthread(n, (arg), #n, "Creating " #n " task", \
+-				"Failed to create " #n, &(tp))
++				"Failed to create " #n, &(tp), NULL)
++#define torture_create_kthread_cb(n, arg, tp, cbf) \
++	_torture_create_kthread(n, (arg), #n, "Creating " #n " task", \
++				"Failed to create " #n, &(tp), cbf)
+ #define torture_stop_kthread(n, tp) \
+ 	_torture_stop_kthread("Stopping " #n " task", &(tp))
+ 
+diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h
+index faa579209a724..40436b7ddfd24 100644
+--- a/include/linux/trace_events.h
++++ b/include/linux/trace_events.h
+@@ -492,6 +492,7 @@ enum {
+ 	EVENT_FILE_FL_TRIGGER_COND_BIT,
+ 	EVENT_FILE_FL_PID_FILTER_BIT,
+ 	EVENT_FILE_FL_WAS_ENABLED_BIT,
++	EVENT_FILE_FL_FREED_BIT,
+ };
+ 
+ extern struct trace_event_file *trace_get_event_file(const char *instance,
+@@ -630,6 +631,7 @@ extern int __kprobe_event_add_fields(struct dynevent_cmd *cmd, ...);
+  *  TRIGGER_COND  - When set, one or more triggers has an associated filter
+  *  PID_FILTER    - When set, the event is filtered based on pid
+  *  WAS_ENABLED   - Set when enabled to know to clear trace on module removal
++ *  FREED         - File descriptor is freed, all fields should be considered invalid
+  */
+ enum {
+ 	EVENT_FILE_FL_ENABLED		= (1 << EVENT_FILE_FL_ENABLED_BIT),
+@@ -643,6 +645,7 @@ enum {
+ 	EVENT_FILE_FL_TRIGGER_COND	= (1 << EVENT_FILE_FL_TRIGGER_COND_BIT),
+ 	EVENT_FILE_FL_PID_FILTER	= (1 << EVENT_FILE_FL_PID_FILTER_BIT),
+ 	EVENT_FILE_FL_WAS_ENABLED	= (1 << EVENT_FILE_FL_WAS_ENABLED_BIT),
++	EVENT_FILE_FL_FREED		= (1 << EVENT_FILE_FL_FREED_BIT),
+ };
+ 
+ struct trace_event_file {
+@@ -671,6 +674,7 @@ struct trace_event_file {
+ 	 * caching and such. Which is mostly OK ;-)
+ 	 */
+ 	unsigned long		flags;
++	atomic_t		ref;	/* ref count for opened files */
+ 	atomic_t		sm_ref;	/* soft-mode reference counter */
+ 	atomic_t		tm_ref;	/* trigger-mode reference counter */
+ };
+diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
+index 683efe29fa698..ca26c1f94f044 100644
+--- a/include/linux/workqueue.h
++++ b/include/linux/workqueue.h
+@@ -222,18 +222,16 @@ static inline unsigned int work_static(struct work_struct *work) { return 0; }
+  * to generate better code.
+  */
+ #ifdef CONFIG_LOCKDEP
+-#define __INIT_WORK(_work, _func, _onstack)				\
++#define __INIT_WORK_KEY(_work, _func, _onstack, _key)			\
+ 	do {								\
+-		static struct lock_class_key __key;			\
+-									\
+ 		__init_work((_work), _onstack);				\
+ 		(_work)->data = (atomic_long_t) WORK_DATA_INIT();	\
+-		lockdep_init_map(&(_work)->lockdep_map, "(work_completion)"#_work, &__key, 0); \
++		lockdep_init_map(&(_work)->lockdep_map, "(work_completion)"#_work, (_key), 0); \
+ 		INIT_LIST_HEAD(&(_work)->entry);			\
+ 		(_work)->func = (_func);				\
+ 	} while (0)
+ #else
+-#define __INIT_WORK(_work, _func, _onstack)				\
++#define __INIT_WORK_KEY(_work, _func, _onstack, _key)			\
+ 	do {								\
+ 		__init_work((_work), _onstack);				\
+ 		(_work)->data = (atomic_long_t) WORK_DATA_INIT();	\
+@@ -242,12 +240,22 @@ static inline unsigned int work_static(struct work_struct *work) { return 0; }
+ 	} while (0)
+ #endif
+ 
++#define __INIT_WORK(_work, _func, _onstack)				\
++	do {								\
++		static __maybe_unused struct lock_class_key __key;	\
++									\
++		__INIT_WORK_KEY(_work, _func, _onstack, &__key);	\
++	} while (0)
++
+ #define INIT_WORK(_work, _func)						\
+ 	__INIT_WORK((_work), (_func), 0)
+ 
+ #define INIT_WORK_ONSTACK(_work, _func)					\
+ 	__INIT_WORK((_work), (_func), 1)
+ 
++#define INIT_WORK_ONSTACK_KEY(_work, _func, _key)			\
++	__INIT_WORK_KEY((_work), (_func), 1, _key)
++
+ #define __INIT_DELAYED_WORK(_work, _func, _tflags)			\
+ 	do {								\
+ 		INIT_WORK(&(_work)->work, (_func));			\
+@@ -683,8 +691,32 @@ static inline long work_on_cpu_safe(int cpu, long (*fn)(void *), void *arg)
+ 	return fn(arg);
+ }
+ #else
+-long work_on_cpu(int cpu, long (*fn)(void *), void *arg);
+-long work_on_cpu_safe(int cpu, long (*fn)(void *), void *arg);
++long work_on_cpu_key(int cpu, long (*fn)(void *),
++		     void *arg, struct lock_class_key *key);
++/*
++ * A new key is defined for each caller to make sure the work
++ * associated with the function doesn't share its locking class.
++ */
++#define work_on_cpu(_cpu, _fn, _arg)			\
++({							\
++	static struct lock_class_key __key;		\
++							\
++	work_on_cpu_key(_cpu, _fn, _arg, &__key);	\
++})
++
++long work_on_cpu_safe_key(int cpu, long (*fn)(void *),
++			  void *arg, struct lock_class_key *key);
++
++/*
++ * A new key is defined for each caller to make sure the work
++ * associated with the function doesn't share its locking class.
++ */
++#define work_on_cpu_safe(_cpu, _fn, _arg)		\
++({							\
++	static struct lock_class_key __key;		\
++							\
++	work_on_cpu_safe_key(_cpu, _fn, _arg, &__key);	\
++})
+ #endif /* CONFIG_SMP */
+ 
+ #ifdef CONFIG_FREEZER
+diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
+index 7c816359d5a98..75972e211ba12 100644
+--- a/include/net/netfilter/nf_tables.h
++++ b/include/net/netfilter/nf_tables.h
+@@ -178,9 +178,9 @@ static inline __be32 nft_reg_load_be32(const u32 *sreg)
+ 	return *(__force __be32 *)sreg;
+ }
+ 
+-static inline void nft_reg_store64(u32 *dreg, u64 val)
++static inline void nft_reg_store64(u64 *dreg, u64 val)
+ {
+-	put_unaligned(val, (u64 *)dreg);
++	put_unaligned(val, dreg);
+ }
+ 
+ static inline u64 nft_reg_load64(const u32 *sreg)
+diff --git a/include/net/sock.h b/include/net/sock.h
+index fc189910e63fc..b9f0ef4bb527a 100644
+--- a/include/net/sock.h
++++ b/include/net/sock.h
+@@ -2006,21 +2006,33 @@ static inline void sk_tx_queue_set(struct sock *sk, int tx_queue)
+ 	/* sk_tx_queue_mapping accept only upto a 16-bit value */
+ 	if (WARN_ON_ONCE((unsigned short)tx_queue >= USHRT_MAX))
+ 		return;
+-	sk->sk_tx_queue_mapping = tx_queue;
++	/* Paired with READ_ONCE() in sk_tx_queue_get() and
++	 * other WRITE_ONCE() because socket lock might be not held.
++	 */
++	WRITE_ONCE(sk->sk_tx_queue_mapping, tx_queue);
+ }
+ 
+ #define NO_QUEUE_MAPPING	USHRT_MAX
+ 
+ static inline void sk_tx_queue_clear(struct sock *sk)
+ {
+-	sk->sk_tx_queue_mapping = NO_QUEUE_MAPPING;
++	/* Paired with READ_ONCE() in sk_tx_queue_get() and
++	 * other WRITE_ONCE() because socket lock might be not held.
++	 */
++	WRITE_ONCE(sk->sk_tx_queue_mapping, NO_QUEUE_MAPPING);
+ }
+ 
+ static inline int sk_tx_queue_get(const struct sock *sk)
+ {
+-	if (sk && sk->sk_tx_queue_mapping != NO_QUEUE_MAPPING)
+-		return sk->sk_tx_queue_mapping;
++	if (sk) {
++		/* Paired with WRITE_ONCE() in sk_tx_queue_clear()
++		 * and sk_tx_queue_set().
++		 */
++		int val = READ_ONCE(sk->sk_tx_queue_mapping);
+ 
++		if (val != NO_QUEUE_MAPPING)
++			return val;
++	}
+ 	return -1;
+ }
+ 
+@@ -2169,7 +2181,7 @@ static inline void __dst_negative_advice(struct sock *sk)
+ 		if (ndst != dst) {
+ 			rcu_assign_pointer(sk->sk_dst_cache, ndst);
+ 			sk_tx_queue_clear(sk);
+-			sk->sk_dst_pending_confirm = 0;
++			WRITE_ONCE(sk->sk_dst_pending_confirm, 0);
+ 		}
+ 	}
+ }
+@@ -2186,7 +2198,7 @@ __sk_dst_set(struct sock *sk, struct dst_entry *dst)
+ 	struct dst_entry *old_dst;
+ 
+ 	sk_tx_queue_clear(sk);
+-	sk->sk_dst_pending_confirm = 0;
++	WRITE_ONCE(sk->sk_dst_pending_confirm, 0);
+ 	old_dst = rcu_dereference_protected(sk->sk_dst_cache,
+ 					    lockdep_sock_is_held(sk));
+ 	rcu_assign_pointer(sk->sk_dst_cache, dst);
+@@ -2199,7 +2211,7 @@ sk_dst_set(struct sock *sk, struct dst_entry *dst)
+ 	struct dst_entry *old_dst;
+ 
+ 	sk_tx_queue_clear(sk);
+-	sk->sk_dst_pending_confirm = 0;
++	WRITE_ONCE(sk->sk_dst_pending_confirm, 0);
+ 	old_dst = xchg((__force struct dst_entry **)&sk->sk_dst_cache, dst);
+ 	dst_release(old_dst);
+ }
+diff --git a/include/net/tc_act/tc_ct.h b/include/net/tc_act/tc_ct.h
+index b24ea2d9400ba..1dc2f827d0bcf 100644
+--- a/include/net/tc_act/tc_ct.h
++++ b/include/net/tc_act/tc_ct.h
+@@ -57,6 +57,11 @@ static inline struct nf_flowtable *tcf_ct_ft(const struct tc_action *a)
+ 	return to_ct_params(a)->nf_ft;
+ }
+ 
++static inline struct nf_conntrack_helper *tcf_ct_helper(const struct tc_action *a)
++{
++	return to_ct_params(a)->helper;
++}
++
+ #else
+ static inline uint16_t tcf_ct_zone(const struct tc_action *a) { return 0; }
+ static inline int tcf_ct_action(const struct tc_action *a) { return 0; }
+@@ -64,6 +69,10 @@ static inline struct nf_flowtable *tcf_ct_ft(const struct tc_action *a)
+ {
+ 	return NULL;
+ }
++static inline struct nf_conntrack_helper *tcf_ct_helper(const struct tc_action *a)
++{
++	return NULL;
++}
+ #endif /* CONFIG_NF_CONNTRACK */
+ 
+ #if IS_ENABLED(CONFIG_NET_ACT_CT)
+diff --git a/include/sound/soc-acpi.h b/include/sound/soc-acpi.h
+index 528279056b3ab..1a5f90b0a5463 100644
+--- a/include/sound/soc-acpi.h
++++ b/include/sound/soc-acpi.h
+@@ -67,6 +67,10 @@ static inline struct snd_soc_acpi_mach *snd_soc_acpi_codec_list(void *arg)
+  * @i2s_link_mask: I2S/TDM links enabled on the board
+  * @num_dai_drivers: number of elements in @dai_drivers
+  * @dai_drivers: pointer to dai_drivers, used e.g. in nocodec mode
++ * @subsystem_vendor: optional PCI SSID vendor value
++ * @subsystem_device: optional PCI SSID device value
++ * @subsystem_id_set: true if a value has been written to
++ *		      subsystem_vendor and subsystem_device.
+  */
+ struct snd_soc_acpi_mach_params {
+ 	u32 acpi_ipc_irq_index;
+@@ -79,6 +83,9 @@ struct snd_soc_acpi_mach_params {
+ 	u32 i2s_link_mask;
+ 	u32 num_dai_drivers;
+ 	struct snd_soc_dai_driver *dai_drivers;
++	unsigned short subsystem_vendor;
++	unsigned short subsystem_device;
++	bool subsystem_id_set;
+ };
+ 
+ /**
+diff --git a/include/sound/soc-card.h b/include/sound/soc-card.h
+index fc94dfb0021fd..e8ff2e089cd00 100644
+--- a/include/sound/soc-card.h
++++ b/include/sound/soc-card.h
+@@ -59,6 +59,43 @@ int snd_soc_card_add_dai_link(struct snd_soc_card *card,
+ void snd_soc_card_remove_dai_link(struct snd_soc_card *card,
+ 				  struct snd_soc_dai_link *dai_link);
+ 
++#ifdef CONFIG_PCI
++static inline void snd_soc_card_set_pci_ssid(struct snd_soc_card *card,
++					     unsigned short vendor,
++					     unsigned short device)
++{
++	card->pci_subsystem_vendor = vendor;
++	card->pci_subsystem_device = device;
++	card->pci_subsystem_set = true;
++}
++
++static inline int snd_soc_card_get_pci_ssid(struct snd_soc_card *card,
++					    unsigned short *vendor,
++					    unsigned short *device)
++{
++	if (!card->pci_subsystem_set)
++		return -ENOENT;
++
++	*vendor = card->pci_subsystem_vendor;
++	*device = card->pci_subsystem_device;
++
++	return 0;
++}
++#else /* !CONFIG_PCI */
++static inline void snd_soc_card_set_pci_ssid(struct snd_soc_card *card,
++					     unsigned short vendor,
++					     unsigned short device)
++{
++}
++
++static inline int snd_soc_card_get_pci_ssid(struct snd_soc_card *card,
++					    unsigned short *vendor,
++					    unsigned short *device)
++{
++	return -ENOENT;
++}
++#endif /* CONFIG_PCI */
++
+ /* device driver data */
+ static inline void snd_soc_card_set_drvdata(struct snd_soc_card *card,
+ 					    void *data)
+diff --git a/include/sound/soc-dai.h b/include/sound/soc-dai.h
+index e3906ecda740a..5827b4d882fcc 100644
+--- a/include/sound/soc-dai.h
++++ b/include/sound/soc-dai.h
+@@ -355,6 +355,7 @@ struct snd_soc_dai_ops {
+ 
+ 	/* bit field */
+ 	unsigned int no_capture_mute:1;
++	unsigned int mute_unmute_on_trigger:1;
+ };
+ 
+ struct snd_soc_cdai_ops {
+diff --git a/include/sound/soc.h b/include/sound/soc.h
+index cf34810882347..0c54b343d3e5d 100644
+--- a/include/sound/soc.h
++++ b/include/sound/soc.h
+@@ -931,6 +931,17 @@ struct snd_soc_card {
+ #ifdef CONFIG_DMI
+ 	char dmi_longname[80];
+ #endif /* CONFIG_DMI */
++
++#ifdef CONFIG_PCI
++	/*
++	 * PCI does not define 0 as invalid, so pci_subsystem_set indicates
++	 * whether a value has been written to these fields.
++	 */
++	unsigned short pci_subsystem_vendor;
++	unsigned short pci_subsystem_device;
++	bool pci_subsystem_set;
++#endif /* CONFIG_PCI */
++
+ 	char topology_shortname[32];
+ 
+ 	struct device *dev;
+diff --git a/include/sound/sof.h b/include/sound/sof.h
+index d3c41f87ac319..51294f2ba302c 100644
+--- a/include/sound/sof.h
++++ b/include/sound/sof.h
+@@ -64,6 +64,14 @@ struct snd_sof_pdata {
+ 	const char *name;
+ 	const char *platform;
+ 
++	/*
++	 * PCI SSID. As PCI does not define 0 as invalid, the subsystem_id_set
++	 * flag indicates that a value has been written to these members.
++	 */
++	unsigned short subsystem_vendor;
++	unsigned short subsystem_device;
++	bool subsystem_id_set;
++
+ 	struct device *dev;
+ 
+ 	/*
+diff --git a/include/uapi/linux/prctl.h b/include/uapi/linux/prctl.h
+index 3c36aeade991e..9a85c69782bdd 100644
+--- a/include/uapi/linux/prctl.h
++++ b/include/uapi/linux/prctl.h
+@@ -283,7 +283,7 @@ struct prctl_mm_map {
+ 
+ /* Memory deny write / execute */
+ #define PR_SET_MDWE			65
+-# define PR_MDWE_REFUSE_EXEC_GAIN	1
++# define PR_MDWE_REFUSE_EXEC_GAIN	(1UL << 0)
+ 
+ #define PR_GET_MDWE			66
+ 
+diff --git a/include/uapi/linux/vm_sockets.h b/include/uapi/linux/vm_sockets.h
+index c60ca33eac594..ed07181d4eff9 100644
+--- a/include/uapi/linux/vm_sockets.h
++++ b/include/uapi/linux/vm_sockets.h
+@@ -191,4 +191,21 @@ struct sockaddr_vm {
+ 
+ #define IOCTL_VM_SOCKETS_GET_LOCAL_CID		_IO(7, 0xb9)
+ 
++/* MSG_ZEROCOPY notifications are encoded in the standard error format,
++ * sock_extended_err. See Documentation/networking/msg_zerocopy.rst in
++ * kernel source tree for more details.
++ */
++
++/* 'cmsg_level' field value of 'struct cmsghdr' for notification parsing
++ * when MSG_ZEROCOPY flag is used on transmissions.
++ */
++
++#define SOL_VSOCK	287
++
++/* 'cmsg_type' field value of 'struct cmsghdr' for notification parsing
++ * when MSG_ZEROCOPY flag is used on transmissions.
++ */
++
++#define VSOCK_RECVERR	1
++
+ #endif /* _UAPI_VM_SOCKETS_H */
+diff --git a/include/video/sticore.h b/include/video/sticore.h
+index 945ad60463a18..012b5b46ad7d0 100644
+--- a/include/video/sticore.h
++++ b/include/video/sticore.h
+@@ -232,7 +232,7 @@ struct sti_rom_font {
+ 	 u8 height;
+ 	 u8 font_type;		/* language type */
+ 	 u8 bytes_per_char;
+-	u32 next_font;
++	s32 next_font;		/* note: signed int */
+ 	 u8 underline_height;
+ 	 u8 underline_pos;
+ 	 u8 res008[2];
+diff --git a/init/Makefile b/init/Makefile
+index ec557ada3c12e..cbac576c57d63 100644
+--- a/init/Makefile
++++ b/init/Makefile
+@@ -60,4 +60,5 @@ include/generated/utsversion.h: FORCE
+ $(obj)/version-timestamp.o: include/generated/utsversion.h
+ CFLAGS_version-timestamp.o := -include include/generated/utsversion.h
+ KASAN_SANITIZE_version-timestamp.o := n
++KCSAN_SANITIZE_version-timestamp.o := n
+ GCOV_PROFILE_version-timestamp.o := n
+diff --git a/init/main.c b/init/main.c
+index ad920fac325c3..1e19a40f40c5f 100644
+--- a/init/main.c
++++ b/init/main.c
+@@ -530,6 +530,10 @@ static int __init unknown_bootoption(char *param, char *val,
+ {
+ 	size_t len = strlen(param);
+ 
++	/* Handle params aliased to sysctls */
++	if (sysctl_is_alias(param))
++		return 0;
++
+ 	repair_env_string(param, val);
+ 
+ 	/* Handle obsolete-style parameters */
+diff --git a/io_uring/fdinfo.c b/io_uring/fdinfo.c
+index b603a06f7103d..5fcfe03ed93ec 100644
+--- a/io_uring/fdinfo.c
++++ b/io_uring/fdinfo.c
+@@ -139,13 +139,8 @@ static __cold void __io_uring_show_fdinfo(struct io_ring_ctx *ctx,
+ 	if (has_lock && (ctx->flags & IORING_SETUP_SQPOLL)) {
+ 		struct io_sq_data *sq = ctx->sq_data;
+ 
+-		if (mutex_trylock(&sq->lock)) {
+-			if (sq->thread) {
+-				sq_pid = task_pid_nr(sq->thread);
+-				sq_cpu = task_cpu(sq->thread);
+-			}
+-			mutex_unlock(&sq->lock);
+-		}
++		sq_pid = sq->task_pid;
++		sq_cpu = sq->sq_cpu;
+ 	}
+ 
+ 	seq_printf(m, "SqThread:\t%d\n", sq_pid);
+diff --git a/io_uring/sqpoll.c b/io_uring/sqpoll.c
+index bd6c2c7959a5b..65b5dbe3c850e 100644
+--- a/io_uring/sqpoll.c
++++ b/io_uring/sqpoll.c
+@@ -214,6 +214,7 @@ static bool io_sqd_handle_event(struct io_sq_data *sqd)
+ 			did_sig = get_signal(&ksig);
+ 		cond_resched();
+ 		mutex_lock(&sqd->lock);
++		sqd->sq_cpu = raw_smp_processor_id();
+ 	}
+ 	return did_sig || test_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state);
+ }
+@@ -229,10 +230,15 @@ static int io_sq_thread(void *data)
+ 	snprintf(buf, sizeof(buf), "iou-sqp-%d", sqd->task_pid);
+ 	set_task_comm(current, buf);
+ 
+-	if (sqd->sq_cpu != -1)
++	/* reset to our pid after we've set task_comm, for fdinfo */
++	sqd->task_pid = current->pid;
++
++	if (sqd->sq_cpu != -1) {
+ 		set_cpus_allowed_ptr(current, cpumask_of(sqd->sq_cpu));
+-	else
++	} else {
+ 		set_cpus_allowed_ptr(current, cpu_online_mask);
++		sqd->sq_cpu = raw_smp_processor_id();
++	}
+ 
+ 	mutex_lock(&sqd->lock);
+ 	while (1) {
+@@ -261,6 +267,7 @@ static int io_sq_thread(void *data)
+ 				mutex_unlock(&sqd->lock);
+ 				cond_resched();
+ 				mutex_lock(&sqd->lock);
++				sqd->sq_cpu = raw_smp_processor_id();
+ 			}
+ 			continue;
+ 		}
+@@ -294,6 +301,7 @@ static int io_sq_thread(void *data)
+ 				mutex_unlock(&sqd->lock);
+ 				schedule();
+ 				mutex_lock(&sqd->lock);
++				sqd->sq_cpu = raw_smp_processor_id();
+ 			}
+ 			list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
+ 				atomic_andnot(IORING_SQ_NEED_WAKEUP,
+diff --git a/kernel/audit_watch.c b/kernel/audit_watch.c
+index 65075f1e4ac8c..7a98cd176a127 100644
+--- a/kernel/audit_watch.c
++++ b/kernel/audit_watch.c
+@@ -527,11 +527,18 @@ int audit_exe_compare(struct task_struct *tsk, struct audit_fsnotify_mark *mark)
+ 	unsigned long ino;
+ 	dev_t dev;
+ 
+-	exe_file = get_task_exe_file(tsk);
++	/* only do exe filtering if we are recording @current events/records */
++	if (tsk != current)
++		return 0;
++
++	if (!current->mm)
++		return 0;
++	exe_file = get_mm_exe_file(current->mm);
+ 	if (!exe_file)
+ 		return 0;
+ 	ino = file_inode(exe_file)->i_ino;
+ 	dev = file_inode(exe_file)->i_sb->s_dev;
+ 	fput(exe_file);
++
+ 	return audit_mark_compare(mark, ino, dev);
+ }
+diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
+index e3e45b651cd40..33d1a76b7fc5d 100644
+--- a/kernel/bpf/core.c
++++ b/kernel/bpf/core.c
+@@ -613,7 +613,11 @@ static __always_inline int bpf_tree_comp(void *key, struct latch_tree_node *n)
+ 
+ 	if (val < ksym->start)
+ 		return -1;
+-	if (val >= ksym->end)
++	/* Ensure that we detect return addresses as part of the program, when
++	 * the final instruction is a call for a program part of the stack
++	 * trace. Therefore, do val > ksym->end instead of val >= ksym->end.
++	 */
++	if (val > ksym->end)
+ 		return  1;
+ 
+ 	return 0;
+diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
+index e7e2687c35884..9f27b40839831 100644
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -1513,7 +1513,8 @@ static void print_verifier_state(struct bpf_verifier_env *env,
+ 	if (state->in_async_callback_fn)
+ 		verbose(env, " async_cb");
+ 	verbose(env, "\n");
+-	mark_verifier_state_clean(env);
++	if (!print_all)
++		mark_verifier_state_clean(env);
+ }
+ 
+ static inline u32 vlog_alignment(u32 pos)
+@@ -3192,12 +3193,29 @@ static int push_jmp_history(struct bpf_verifier_env *env,
+ 
+ /* Backtrack one insn at a time. If idx is not at the top of recorded
+  * history then previous instruction came from straight line execution.
++ * Return -ENOENT if we exhausted all instructions within given state.
++ *
++ * It's legal to have a bit of a looping with the same starting and ending
++ * insn index within the same state, e.g.: 3->4->5->3, so just because current
++ * instruction index is the same as state's first_idx doesn't mean we are
++ * done. If there is still some jump history left, we should keep going. We
++ * need to take into account that we might have a jump history between given
++ * state's parent and itself, due to checkpointing. In this case, we'll have
++ * history entry recording a jump from last instruction of parent state and
++ * first instruction of given state.
+  */
+ static int get_prev_insn_idx(struct bpf_verifier_state *st, int i,
+ 			     u32 *history)
+ {
+ 	u32 cnt = *history;
+ 
++	if (i == st->first_insn_idx) {
++		if (cnt == 0)
++			return -ENOENT;
++		if (cnt == 1 && st->jmp_history[0].idx == i)
++			return -ENOENT;
++	}
++
+ 	if (cnt && st->jmp_history[cnt - 1].idx == i) {
+ 		i = st->jmp_history[cnt - 1].prev_idx;
+ 		(*history)--;
+@@ -3418,7 +3436,12 @@ static int backtrack_insn(struct bpf_verifier_env *env, int idx, int subseq_idx,
+ 	if (class == BPF_ALU || class == BPF_ALU64) {
+ 		if (!bt_is_reg_set(bt, dreg))
+ 			return 0;
+-		if (opcode == BPF_MOV) {
++		if (opcode == BPF_END || opcode == BPF_NEG) {
++			/* sreg is reserved and unused
++			 * dreg still need precision before this insn
++			 */
++			return 0;
++		} else if (opcode == BPF_MOV) {
+ 			if (BPF_SRC(insn->code) == BPF_X) {
+ 				/* dreg = sreg
+ 				 * dreg needs precision after this insn
+@@ -4072,10 +4095,10 @@ static int __mark_chain_precision(struct bpf_verifier_env *env, int regno)
+ 				 * Nothing to be tracked further in the parent state.
+ 				 */
+ 				return 0;
+-			if (i == first_idx)
+-				break;
+ 			subseq_idx = i;
+ 			i = get_prev_insn_idx(st, i, &history);
++			if (i == -ENOENT)
++				break;
+ 			if (i >= env->prog->len) {
+ 				/* This can happen if backtracking reached insn 0
+ 				 * and there are still reg_mask or stack_mask
+@@ -4350,7 +4373,7 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env,
+ 		   insn->imm != 0 && env->bpf_capable) {
+ 		struct bpf_reg_state fake_reg = {};
+ 
+-		__mark_reg_known(&fake_reg, (u32)insn->imm);
++		__mark_reg_known(&fake_reg, insn->imm);
+ 		fake_reg.type = SCALAR_VALUE;
+ 		save_register_state(state, spi, &fake_reg, size);
+ 	} else if (reg && is_spillable_regtype(reg->type)) {
+@@ -13915,6 +13938,8 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
+ 		    !sanitize_speculative_path(env, insn, *insn_idx + 1,
+ 					       *insn_idx))
+ 			return -EFAULT;
++		if (env->log.level & BPF_LOG_LEVEL)
++			print_insn_state(env, this_branch->frame[this_branch->curframe]);
+ 		*insn_idx += insn->off;
+ 		return 0;
+ 	} else if (pred == 0) {
+@@ -13927,6 +13952,8 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
+ 					       *insn_idx + insn->off + 1,
+ 					       *insn_idx))
+ 			return -EFAULT;
++		if (env->log.level & BPF_LOG_LEVEL)
++			print_insn_state(env, this_branch->frame[this_branch->curframe]);
+ 		return 0;
+ 	}
+ 
+@@ -14558,15 +14585,16 @@ static int visit_func_call_insn(int t, struct bpf_insn *insns,
+ 				struct bpf_verifier_env *env,
+ 				bool visit_callee)
+ {
+-	int ret;
++	int ret, insn_sz;
+ 
+-	ret = push_insn(t, t + 1, FALLTHROUGH, env, false);
++	insn_sz = bpf_is_ldimm64(&insns[t]) ? 2 : 1;
++	ret = push_insn(t, t + insn_sz, FALLTHROUGH, env, false);
+ 	if (ret)
+ 		return ret;
+ 
+-	mark_prune_point(env, t + 1);
++	mark_prune_point(env, t + insn_sz);
+ 	/* when we exit from subprog, we need to record non-linear history */
+-	mark_jmp_point(env, t + 1);
++	mark_jmp_point(env, t + insn_sz);
+ 
+ 	if (visit_callee) {
+ 		mark_prune_point(env, t);
+@@ -14588,15 +14616,17 @@ static int visit_func_call_insn(int t, struct bpf_insn *insns,
+ static int visit_insn(int t, struct bpf_verifier_env *env)
+ {
+ 	struct bpf_insn *insns = env->prog->insnsi, *insn = &insns[t];
+-	int ret;
++	int ret, insn_sz;
+ 
+ 	if (bpf_pseudo_func(insn))
+ 		return visit_func_call_insn(t, insns, env, true);
+ 
+ 	/* All non-branch instructions have a single fall-through edge. */
+ 	if (BPF_CLASS(insn->code) != BPF_JMP &&
+-	    BPF_CLASS(insn->code) != BPF_JMP32)
+-		return push_insn(t, t + 1, FALLTHROUGH, env, false);
++	    BPF_CLASS(insn->code) != BPF_JMP32) {
++		insn_sz = bpf_is_ldimm64(insn) ? 2 : 1;
++		return push_insn(t, t + insn_sz, FALLTHROUGH, env, false);
++	}
+ 
+ 	switch (BPF_OP(insn->code)) {
+ 	case BPF_EXIT:
+@@ -14710,11 +14740,21 @@ static int check_cfg(struct bpf_verifier_env *env)
+ 	}
+ 
+ 	for (i = 0; i < insn_cnt; i++) {
++		struct bpf_insn *insn = &env->prog->insnsi[i];
++
+ 		if (insn_state[i] != EXPLORED) {
+ 			verbose(env, "unreachable insn %d\n", i);
+ 			ret = -EINVAL;
+ 			goto err_free;
+ 		}
++		if (bpf_is_ldimm64(insn)) {
++			if (insn_state[i + 1] != 0) {
++				verbose(env, "jump into the middle of ldimm64 insn %d\n", i);
++				ret = -EINVAL;
++				goto err_free;
++			}
++			i++; /* skip second half of ldimm64 */
++		}
+ 	}
+ 	ret = 0; /* cfg looks good */
+ 
+diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
+index f55a40db065f7..096d30ff5c4ec 100644
+--- a/kernel/cgroup/cgroup.c
++++ b/kernel/cgroup/cgroup.c
+@@ -3836,14 +3836,6 @@ static __poll_t cgroup_pressure_poll(struct kernfs_open_file *of,
+ 	return psi_trigger_poll(&ctx->psi.trigger, of->file, pt);
+ }
+ 
+-static int cgroup_pressure_open(struct kernfs_open_file *of)
+-{
+-	if (of->file->f_mode & FMODE_WRITE && !capable(CAP_SYS_RESOURCE))
+-		return -EPERM;
+-
+-	return 0;
+-}
+-
+ static void cgroup_pressure_release(struct kernfs_open_file *of)
+ {
+ 	struct cgroup_file_ctx *ctx = of->priv;
+@@ -5243,7 +5235,6 @@ static struct cftype cgroup_psi_files[] = {
+ 	{
+ 		.name = "io.pressure",
+ 		.file_offset = offsetof(struct cgroup, psi_files[PSI_IO]),
+-		.open = cgroup_pressure_open,
+ 		.seq_show = cgroup_io_pressure_show,
+ 		.write = cgroup_io_pressure_write,
+ 		.poll = cgroup_pressure_poll,
+@@ -5252,7 +5243,6 @@ static struct cftype cgroup_psi_files[] = {
+ 	{
+ 		.name = "memory.pressure",
+ 		.file_offset = offsetof(struct cgroup, psi_files[PSI_MEM]),
+-		.open = cgroup_pressure_open,
+ 		.seq_show = cgroup_memory_pressure_show,
+ 		.write = cgroup_memory_pressure_write,
+ 		.poll = cgroup_pressure_poll,
+@@ -5261,7 +5251,6 @@ static struct cftype cgroup_psi_files[] = {
+ 	{
+ 		.name = "cpu.pressure",
+ 		.file_offset = offsetof(struct cgroup, psi_files[PSI_CPU]),
+-		.open = cgroup_pressure_open,
+ 		.seq_show = cgroup_cpu_pressure_show,
+ 		.write = cgroup_cpu_pressure_write,
+ 		.poll = cgroup_pressure_poll,
+@@ -5271,7 +5260,6 @@ static struct cftype cgroup_psi_files[] = {
+ 	{
+ 		.name = "irq.pressure",
+ 		.file_offset = offsetof(struct cgroup, psi_files[PSI_IRQ]),
+-		.open = cgroup_pressure_open,
+ 		.seq_show = cgroup_irq_pressure_show,
+ 		.write = cgroup_irq_pressure_write,
+ 		.poll = cgroup_pressure_poll,
+diff --git a/kernel/cpu.c b/kernel/cpu.c
+index 26119d2154102..189ba5fd9af4b 100644
+--- a/kernel/cpu.c
++++ b/kernel/cpu.c
+@@ -1503,11 +1503,14 @@ static int cpu_down_maps_locked(unsigned int cpu, enum cpuhp_state target)
+ 	/*
+ 	 * Ensure that the control task does not run on the to be offlined
+ 	 * CPU to prevent a deadlock against cfs_b->period_timer.
++	 * Also keep at least one housekeeping cpu onlined to avoid generating
++	 * an empty sched_domain span.
+ 	 */
+-	cpu = cpumask_any_but(cpu_online_mask, cpu);
+-	if (cpu >= nr_cpu_ids)
+-		return -EBUSY;
+-	return work_on_cpu(cpu, __cpu_down_maps_locked, &work);
++	for_each_cpu_and(cpu, cpu_online_mask, housekeeping_cpumask(HK_TYPE_DOMAIN)) {
++		if (cpu != work.cpu)
++			return work_on_cpu(cpu, __cpu_down_maps_locked, &work);
++	}
++	return -EBUSY;
+ }
+ 
+ static int cpu_down(unsigned int cpu, enum cpuhp_state target)
+diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
+index d5e9ccde3ab8e..3a904d8697c8f 100644
+--- a/kernel/debug/debug_core.c
++++ b/kernel/debug/debug_core.c
+@@ -1006,6 +1006,9 @@ void kgdb_panic(const char *msg)
+ 	if (panic_timeout)
+ 		return;
+ 
++	debug_locks_off();
++	console_flush_on_panic(CONSOLE_FLUSH_PENDING);
++
+ 	if (dbg_kdb_mode)
+ 		kdb_printf("PANIC: %s\n", msg);
+ 
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index e66398c9ffe05..271cb953fb52d 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -4816,6 +4816,11 @@ find_get_pmu_context(struct pmu *pmu, struct perf_event_context *ctx,
+ 	void *task_ctx_data = NULL;
+ 
+ 	if (!ctx->task) {
++		/*
++		 * perf_pmu_migrate_context() / __perf_pmu_install_event()
++		 * relies on the fact that find_get_pmu_context() cannot fail
++		 * for CPU contexts.
++		 */
+ 		struct perf_cpu_pmu_context *cpc;
+ 
+ 		cpc = per_cpu_ptr(pmu->cpu_pmu_context, event->cpu);
+@@ -12888,6 +12893,9 @@ static void __perf_pmu_install_event(struct pmu *pmu,
+ 				     int cpu, struct perf_event *event)
+ {
+ 	struct perf_event_pmu_context *epc;
++	struct perf_event_context *old_ctx = event->ctx;
++
++	get_ctx(ctx); /* normally find_get_context() */
+ 
+ 	event->cpu = cpu;
+ 	epc = find_get_pmu_context(pmu, ctx, event);
+@@ -12896,6 +12904,11 @@ static void __perf_pmu_install_event(struct pmu *pmu,
+ 	if (event->state >= PERF_EVENT_STATE_OFF)
+ 		event->state = PERF_EVENT_STATE_INACTIVE;
+ 	perf_install_in_context(ctx, event, cpu);
++
++	/*
++	 * Now that event->ctx is updated and visible, put the old ctx.
++	 */
++	put_ctx(old_ctx);
+ }
+ 
+ static void __perf_pmu_install(struct perf_event_context *ctx,
+@@ -12934,6 +12947,10 @@ void perf_pmu_migrate_context(struct pmu *pmu, int src_cpu, int dst_cpu)
+ 	struct perf_event_context *src_ctx, *dst_ctx;
+ 	LIST_HEAD(events);
+ 
++	/*
++	 * Since per-cpu context is persistent, no need to grab an extra
++	 * reference.
++	 */
+ 	src_ctx = &per_cpu_ptr(&perf_cpu_context, src_cpu)->ctx;
+ 	dst_ctx = &per_cpu_ptr(&perf_cpu_context, dst_cpu)->ctx;
+ 
+diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c
+index a0433f37b0243..4a260ceed9c73 100644
+--- a/kernel/events/ring_buffer.c
++++ b/kernel/events/ring_buffer.c
+@@ -699,6 +699,12 @@ int rb_alloc_aux(struct perf_buffer *rb, struct perf_event *event,
+ 		watermark = 0;
+ 	}
+ 
++	/*
++	 * kcalloc_node() is unable to allocate buffer if the size is larger
++	 * than: PAGE_SIZE << MAX_ORDER; directly bail out in this case.
++	 */
++	if (get_order((unsigned long)nr_pages * sizeof(void *)) > MAX_ORDER)
++		return -ENOMEM;
+ 	rb->aux_pages = kcalloc_node(nr_pages, sizeof(void *), GFP_KERNEL,
+ 				     node);
+ 	if (!rb->aux_pages)
+diff --git a/kernel/irq/debugfs.c b/kernel/irq/debugfs.c
+index 5971a66be0347..aae0402507ed7 100644
+--- a/kernel/irq/debugfs.c
++++ b/kernel/irq/debugfs.c
+@@ -121,7 +121,6 @@ static const struct irq_bit_descr irqdata_states[] = {
+ 	BIT_MASK_DESCR(IRQD_AFFINITY_ON_ACTIVATE),
+ 	BIT_MASK_DESCR(IRQD_MANAGED_SHUTDOWN),
+ 	BIT_MASK_DESCR(IRQD_CAN_RESERVE),
+-	BIT_MASK_DESCR(IRQD_MSI_NOMASK_QUIRK),
+ 
+ 	BIT_MASK_DESCR(IRQD_FORWARDED_TO_VCPU),
+ 
+diff --git a/kernel/irq/generic-chip.c b/kernel/irq/generic-chip.c
+index c653cd31548d0..5a452b94b6434 100644
+--- a/kernel/irq/generic-chip.c
++++ b/kernel/irq/generic-chip.c
+@@ -544,21 +544,34 @@ EXPORT_SYMBOL_GPL(irq_setup_alt_chip);
+ void irq_remove_generic_chip(struct irq_chip_generic *gc, u32 msk,
+ 			     unsigned int clr, unsigned int set)
+ {
+-	unsigned int i = gc->irq_base;
++	unsigned int i, virq;
+ 
+ 	raw_spin_lock(&gc_lock);
+ 	list_del(&gc->list);
+ 	raw_spin_unlock(&gc_lock);
+ 
+-	for (; msk; msk >>= 1, i++) {
++	for (i = 0; msk; msk >>= 1, i++) {
+ 		if (!(msk & 0x01))
+ 			continue;
+ 
++		/*
++		 * Interrupt domain based chips store the base hardware
++		 * interrupt number in gc::irq_base. Otherwise gc::irq_base
++		 * contains the base Linux interrupt number.
++		 */
++		if (gc->domain) {
++			virq = irq_find_mapping(gc->domain, gc->irq_base + i);
++			if (!virq)
++				continue;
++		} else {
++			virq = gc->irq_base + i;
++		}
++
+ 		/* Remove handler first. That will mask the irq line */
+-		irq_set_handler(i, NULL);
+-		irq_set_chip(i, &no_irq_chip);
+-		irq_set_chip_data(i, NULL);
+-		irq_modify_status(i, clr, set);
++		irq_set_handler(virq, NULL);
++		irq_set_chip(virq, &no_irq_chip);
++		irq_set_chip_data(virq, NULL);
++		irq_modify_status(virq, clr, set);
+ 	}
+ }
+ EXPORT_SYMBOL_GPL(irq_remove_generic_chip);
+diff --git a/kernel/irq/msi.c b/kernel/irq/msi.c
+index b4c31a5c11473..79b4a58ba9c3f 100644
+--- a/kernel/irq/msi.c
++++ b/kernel/irq/msi.c
+@@ -1204,7 +1204,6 @@ static int msi_handle_pci_fail(struct irq_domain *domain, struct msi_desc *desc,
+ 
+ #define VIRQ_CAN_RESERVE	0x01
+ #define VIRQ_ACTIVATE		0x02
+-#define VIRQ_NOMASK_QUIRK	0x04
+ 
+ static int msi_init_virq(struct irq_domain *domain, int virq, unsigned int vflags)
+ {
+@@ -1213,8 +1212,6 @@ static int msi_init_virq(struct irq_domain *domain, int virq, unsigned int vflag
+ 
+ 	if (!(vflags & VIRQ_CAN_RESERVE)) {
+ 		irqd_clr_can_reserve(irqd);
+-		if (vflags & VIRQ_NOMASK_QUIRK)
+-			irqd_set_msi_nomask_quirk(irqd);
+ 
+ 		/*
+ 		 * If the interrupt is managed but no CPU is available to
+@@ -1275,15 +1272,8 @@ static int __msi_domain_alloc_irqs(struct device *dev, struct irq_domain *domain
+ 	 * Interrupt can use a reserved vector and will not occupy
+ 	 * a real device vector until the interrupt is requested.
+ 	 */
+-	if (msi_check_reservation_mode(domain, info, dev)) {
++	if (msi_check_reservation_mode(domain, info, dev))
+ 		vflags |= VIRQ_CAN_RESERVE;
+-		/*
+-		 * MSI affinity setting requires a special quirk (X86) when
+-		 * reservation mode is active.
+-		 */
+-		if (info->flags & MSI_FLAG_NOMASK_QUIRK)
+-			vflags |= VIRQ_NOMASK_QUIRK;
+-	}
+ 
+ 	xa_for_each_range(xa, idx, desc, ctrl->first, ctrl->last) {
+ 		if (!msi_desc_match(desc, MSI_DESC_NOTASSOCIATED))
+diff --git a/kernel/kexec.c b/kernel/kexec.c
+index 92d301f987766..f6067c1bb0893 100644
+--- a/kernel/kexec.c
++++ b/kernel/kexec.c
+@@ -242,7 +242,7 @@ SYSCALL_DEFINE4(kexec_load, unsigned long, entry, unsigned long, nr_segments,
+ 		((flags & KEXEC_ARCH_MASK) != KEXEC_ARCH_DEFAULT))
+ 		return -EINVAL;
+ 
+-	ksegments = memdup_user(segments, nr_segments * sizeof(ksegments[0]));
++	ksegments = memdup_array_user(segments, nr_segments, sizeof(ksegments[0]));
+ 	if (IS_ERR(ksegments))
+ 		return PTR_ERR(ksegments);
+ 
+diff --git a/kernel/locking/locktorture.c b/kernel/locking/locktorture.c
+index 949d3deae5062..270c7f80ce84c 100644
+--- a/kernel/locking/locktorture.c
++++ b/kernel/locking/locktorture.c
+@@ -45,6 +45,7 @@ torture_param(int, stutter, 5, "Number of jiffies to run/halt test, 0=disable");
+ torture_param(int, rt_boost, 2,
+ 		   "Do periodic rt-boost. 0=Disable, 1=Only for rt_mutex, 2=For all lock types.");
+ torture_param(int, rt_boost_factor, 50, "A factor determining how often rt-boost happens.");
++torture_param(int, writer_fifo, 0, "Run writers at sched_set_fifo() priority");
+ torture_param(int, verbose, 1, "Enable verbose debugging printk()s");
+ torture_param(int, nested_locks, 0, "Number of nested locks (max = 8)");
+ /* Going much higher trips "BUG: MAX_LOCKDEP_CHAIN_HLOCKS too low!" errors */
+@@ -809,7 +810,8 @@ static int lock_torture_writer(void *arg)
+ 	bool skip_main_lock;
+ 
+ 	VERBOSE_TOROUT_STRING("lock_torture_writer task started");
+-	set_user_nice(current, MAX_NICE);
++	if (!rt_task(current))
++		set_user_nice(current, MAX_NICE);
+ 
+ 	do {
+ 		if ((torture_random(&rand) & 0xfffff) == 0)
+@@ -1015,8 +1017,7 @@ static void lock_torture_cleanup(void)
+ 
+ 	if (writer_tasks) {
+ 		for (i = 0; i < cxt.nrealwriters_stress; i++)
+-			torture_stop_kthread(lock_torture_writer,
+-					     writer_tasks[i]);
++			torture_stop_kthread(lock_torture_writer, writer_tasks[i]);
+ 		kfree(writer_tasks);
+ 		writer_tasks = NULL;
+ 	}
+@@ -1244,8 +1245,9 @@ static int __init lock_torture_init(void)
+ 			goto create_reader;
+ 
+ 		/* Create writer. */
+-		firsterr = torture_create_kthread(lock_torture_writer, &cxt.lwsa[i],
+-						  writer_tasks[i]);
++		firsterr = torture_create_kthread_cb(lock_torture_writer, &cxt.lwsa[i],
++						     writer_tasks[i],
++						     writer_fifo ? sched_set_fifo : NULL);
+ 		if (torture_init_error(firsterr))
+ 			goto unwind;
+ 
+diff --git a/kernel/locking/test-ww_mutex.c b/kernel/locking/test-ww_mutex.c
+index 93cca6e698600..7c5a8f05497f2 100644
+--- a/kernel/locking/test-ww_mutex.c
++++ b/kernel/locking/test-ww_mutex.c
+@@ -466,7 +466,6 @@ retry:
+ 	} while (!time_after(jiffies, stress->timeout));
+ 
+ 	kfree(order);
+-	kfree(stress);
+ }
+ 
+ struct reorder_lock {
+@@ -531,7 +530,6 @@ out:
+ 	list_for_each_entry_safe(ll, ln, &locks, link)
+ 		kfree(ll);
+ 	kfree(order);
+-	kfree(stress);
+ }
+ 
+ static void stress_one_work(struct work_struct *work)
+@@ -552,8 +550,6 @@ static void stress_one_work(struct work_struct *work)
+ 			break;
+ 		}
+ 	} while (!time_after(jiffies, stress->timeout));
+-
+-	kfree(stress);
+ }
+ 
+ #define STRESS_INORDER BIT(0)
+@@ -564,15 +560,24 @@ static void stress_one_work(struct work_struct *work)
+ static int stress(int nlocks, int nthreads, unsigned int flags)
+ {
+ 	struct ww_mutex *locks;
+-	int n;
++	struct stress *stress_array;
++	int n, count;
+ 
+ 	locks = kmalloc_array(nlocks, sizeof(*locks), GFP_KERNEL);
+ 	if (!locks)
+ 		return -ENOMEM;
+ 
++	stress_array = kmalloc_array(nthreads, sizeof(*stress_array),
++				     GFP_KERNEL);
++	if (!stress_array) {
++		kfree(locks);
++		return -ENOMEM;
++	}
++
+ 	for (n = 0; n < nlocks; n++)
+ 		ww_mutex_init(&locks[n], &ww_class);
+ 
++	count = 0;
+ 	for (n = 0; nthreads; n++) {
+ 		struct stress *stress;
+ 		void (*fn)(struct work_struct *work);
+@@ -596,9 +601,7 @@ static int stress(int nlocks, int nthreads, unsigned int flags)
+ 		if (!fn)
+ 			continue;
+ 
+-		stress = kmalloc(sizeof(*stress), GFP_KERNEL);
+-		if (!stress)
+-			break;
++		stress = &stress_array[count++];
+ 
+ 		INIT_WORK(&stress->work, fn);
+ 		stress->locks = locks;
+@@ -613,6 +616,7 @@ static int stress(int nlocks, int nthreads, unsigned int flags)
+ 
+ 	for (n = 0; n < nlocks; n++)
+ 		ww_mutex_destroy(&locks[n]);
++	kfree(stress_array);
+ 	kfree(locks);
+ 
+ 	return 0;
+diff --git a/kernel/padata.c b/kernel/padata.c
+index ff349e1084c1d..179fb1518070c 100644
+--- a/kernel/padata.c
++++ b/kernel/padata.c
+@@ -202,7 +202,7 @@ int padata_do_parallel(struct padata_shell *ps,
+ 		*cb_cpu = cpu;
+ 	}
+ 
+-	err =  -EBUSY;
++	err = -EBUSY;
+ 	if ((pinst->flags & PADATA_RESET))
+ 		goto out;
+ 
+diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
+index 0415d5ecb9772..e9ba7fc87d4ad 100644
+--- a/kernel/power/snapshot.c
++++ b/kernel/power/snapshot.c
+@@ -2474,8 +2474,9 @@ static void *get_highmem_page_buffer(struct page *page,
+ 		pbe->copy_page = tmp;
+ 	} else {
+ 		/* Copy of the page will be stored in normal memory */
+-		kaddr = safe_pages_list;
+-		safe_pages_list = safe_pages_list->next;
++		kaddr = __get_safe_page(ca->gfp_mask);
++		if (!kaddr)
++			return ERR_PTR(-ENOMEM);
+ 		pbe->copy_page = virt_to_page(kaddr);
+ 	}
+ 	pbe->next = highmem_pblist;
+@@ -2655,8 +2656,9 @@ static void *get_buffer(struct memory_bitmap *bm, struct chain_allocator *ca)
+ 		return ERR_PTR(-ENOMEM);
+ 	}
+ 	pbe->orig_address = page_address(page);
+-	pbe->address = safe_pages_list;
+-	safe_pages_list = safe_pages_list->next;
++	pbe->address = __get_safe_page(ca->gfp_mask);
++	if (!pbe->address)
++		return ERR_PTR(-ENOMEM);
+ 	pbe->next = restore_pblist;
+ 	restore_pblist = pbe;
+ 	return pbe->address;
+@@ -2687,8 +2689,6 @@ int snapshot_write_next(struct snapshot_handle *handle)
+ 	if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages)
+ 		return 0;
+ 
+-	handle->sync_read = 1;
+-
+ 	if (!handle->cur) {
+ 		if (!buffer)
+ 			/* This makes the buffer be freed by swsusp_free() */
+@@ -2724,7 +2724,6 @@ int snapshot_write_next(struct snapshot_handle *handle)
+ 			memory_bm_position_reset(&orig_bm);
+ 			restore_pblist = NULL;
+ 			handle->buffer = get_buffer(&orig_bm, &ca);
+-			handle->sync_read = 0;
+ 			if (IS_ERR(handle->buffer))
+ 				return PTR_ERR(handle->buffer);
+ 		}
+@@ -2734,9 +2733,8 @@ int snapshot_write_next(struct snapshot_handle *handle)
+ 		handle->buffer = get_buffer(&orig_bm, &ca);
+ 		if (IS_ERR(handle->buffer))
+ 			return PTR_ERR(handle->buffer);
+-		if (handle->buffer != buffer)
+-			handle->sync_read = 0;
+ 	}
++	handle->sync_read = (handle->buffer == buffer);
+ 	handle->cur++;
+ 	return PAGE_SIZE;
+ }
+diff --git a/kernel/rcu/srcutree.c b/kernel/rcu/srcutree.c
+index 253ed509b6abb..25285893e44e7 100644
+--- a/kernel/rcu/srcutree.c
++++ b/kernel/rcu/srcutree.c
+@@ -223,7 +223,7 @@ static bool init_srcu_struct_nodes(struct srcu_struct *ssp, gfp_t gfp_flags)
+ 				snp->grplo = cpu;
+ 			snp->grphi = cpu;
+ 		}
+-		sdp->grpmask = 1 << (cpu - sdp->mynode->grplo);
++		sdp->grpmask = 1UL << (cpu - sdp->mynode->grplo);
+ 	}
+ 	smp_store_release(&ssp->srcu_sup->srcu_size_state, SRCU_SIZE_WAIT_BARRIER);
+ 	return true;
+@@ -782,8 +782,7 @@ static void srcu_gp_start(struct srcu_struct *ssp)
+ 	spin_lock_rcu_node(sdp);  /* Interrupts already disabled. */
+ 	rcu_segcblist_advance(&sdp->srcu_cblist,
+ 			      rcu_seq_current(&ssp->srcu_sup->srcu_gp_seq));
+-	(void)rcu_segcblist_accelerate(&sdp->srcu_cblist,
+-				       rcu_seq_snap(&ssp->srcu_sup->srcu_gp_seq));
++	WARN_ON_ONCE(!rcu_segcblist_segempty(&sdp->srcu_cblist, RCU_NEXT_TAIL));
+ 	spin_unlock_rcu_node(sdp);  /* Interrupts remain disabled. */
+ 	WRITE_ONCE(ssp->srcu_sup->srcu_gp_start, jiffies);
+ 	WRITE_ONCE(ssp->srcu_sup->srcu_n_exp_nodelay, 0);
+@@ -833,7 +832,7 @@ static void srcu_schedule_cbs_snp(struct srcu_struct *ssp, struct srcu_node *snp
+ 	int cpu;
+ 
+ 	for (cpu = snp->grplo; cpu <= snp->grphi; cpu++) {
+-		if (!(mask & (1 << (cpu - snp->grplo))))
++		if (!(mask & (1UL << (cpu - snp->grplo))))
+ 			continue;
+ 		srcu_schedule_cbs_sdp(per_cpu_ptr(ssp->sda, cpu), delay);
+ 	}
+@@ -1719,6 +1718,7 @@ static void srcu_invoke_callbacks(struct work_struct *work)
+ 	ssp = sdp->ssp;
+ 	rcu_cblist_init(&ready_cbs);
+ 	spin_lock_irq_rcu_node(sdp);
++	WARN_ON_ONCE(!rcu_segcblist_segempty(&sdp->srcu_cblist, RCU_NEXT_TAIL));
+ 	rcu_segcblist_advance(&sdp->srcu_cblist,
+ 			      rcu_seq_current(&ssp->srcu_sup->srcu_gp_seq));
+ 	if (sdp->srcu_cblist_invoking ||
+@@ -1747,8 +1747,6 @@ static void srcu_invoke_callbacks(struct work_struct *work)
+ 	 */
+ 	spin_lock_irq_rcu_node(sdp);
+ 	rcu_segcblist_add_len(&sdp->srcu_cblist, -len);
+-	(void)rcu_segcblist_accelerate(&sdp->srcu_cblist,
+-				       rcu_seq_snap(&ssp->srcu_sup->srcu_gp_seq));
+ 	sdp->srcu_cblist_invoking = false;
+ 	more = rcu_segcblist_ready_cbs(&sdp->srcu_cblist);
+ 	spin_unlock_irq_rcu_node(sdp);
+diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
+index 1449cb69a0e0e..8f867fbf9b312 100644
+--- a/kernel/rcu/tree.c
++++ b/kernel/rcu/tree.c
+@@ -31,6 +31,7 @@
+ #include <linux/bitops.h>
+ #include <linux/export.h>
+ #include <linux/completion.h>
++#include <linux/kmemleak.h>
+ #include <linux/moduleparam.h>
+ #include <linux/panic.h>
+ #include <linux/panic_notifier.h>
+@@ -1552,10 +1553,22 @@ static bool rcu_gp_fqs_check_wake(int *gfp)
+  */
+ static void rcu_gp_fqs(bool first_time)
+ {
++	int nr_fqs = READ_ONCE(rcu_state.nr_fqs_jiffies_stall);
+ 	struct rcu_node *rnp = rcu_get_root();
+ 
+ 	WRITE_ONCE(rcu_state.gp_activity, jiffies);
+ 	WRITE_ONCE(rcu_state.n_force_qs, rcu_state.n_force_qs + 1);
++
++	WARN_ON_ONCE(nr_fqs > 3);
++	/* Only countdown nr_fqs for stall purposes if jiffies moves. */
++	if (nr_fqs) {
++		if (nr_fqs == 1) {
++			WRITE_ONCE(rcu_state.jiffies_stall,
++				   jiffies + rcu_jiffies_till_stall_check());
++		}
++		WRITE_ONCE(rcu_state.nr_fqs_jiffies_stall, --nr_fqs);
++	}
++
+ 	if (first_time) {
+ 		/* Collect dyntick-idle snapshots. */
+ 		force_qs_rnp(dyntick_save_progress_counter);
+@@ -3384,6 +3397,14 @@ void kvfree_call_rcu(struct rcu_head *head, void *ptr)
+ 		success = true;
+ 	}
+ 
++	/*
++	 * The kvfree_rcu() caller considers the pointer freed at this point
++	 * and likely removes any references to it. Since the actual slab
++	 * freeing (and kmemleak_free()) is deferred, tell kmemleak to ignore
++	 * this object (no scanning or false positives reporting).
++	 */
++	kmemleak_ignore(ptr);
++
+ 	// Set timer to drain after KFREE_DRAIN_JIFFIES.
+ 	if (rcu_scheduler_active == RCU_SCHEDULER_RUNNING)
+ 		schedule_delayed_monitor_work(krcp);
+diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h
+index 192536916f9a6..e9821a8422dbe 100644
+--- a/kernel/rcu/tree.h
++++ b/kernel/rcu/tree.h
+@@ -386,6 +386,10 @@ struct rcu_state {
+ 						/*  in jiffies. */
+ 	unsigned long jiffies_stall;		/* Time at which to check */
+ 						/*  for CPU stalls. */
++	int nr_fqs_jiffies_stall;		/* Number of fqs loops after
++						 * which read jiffies and set
++						 * jiffies_stall. Stall
++						 * warnings disabled if !0. */
+ 	unsigned long jiffies_resched;		/* Time at which to resched */
+ 						/*  a reluctant CPU. */
+ 	unsigned long n_force_qs_gpstart;	/* Snapshot of n_force_qs at */
+diff --git a/kernel/rcu/tree_stall.h b/kernel/rcu/tree_stall.h
+index b10b8349bb2a4..a2fa6b22e2488 100644
+--- a/kernel/rcu/tree_stall.h
++++ b/kernel/rcu/tree_stall.h
+@@ -149,12 +149,17 @@ static void panic_on_rcu_stall(void)
+ /**
+  * rcu_cpu_stall_reset - restart stall-warning timeout for current grace period
+  *
++ * To perform the reset request from the caller, disable stall detection until
++ * 3 fqs loops have passed. This is required to ensure a fresh jiffies is
++ * loaded.  It should be safe to do from the fqs loop as enough timer
++ * interrupts and context switches should have passed.
++ *
+  * The caller must disable hard irqs.
+  */
+ void rcu_cpu_stall_reset(void)
+ {
+-	WRITE_ONCE(rcu_state.jiffies_stall,
+-		   jiffies + rcu_jiffies_till_stall_check());
++	WRITE_ONCE(rcu_state.nr_fqs_jiffies_stall, 3);
++	WRITE_ONCE(rcu_state.jiffies_stall, ULONG_MAX);
+ }
+ 
+ //////////////////////////////////////////////////////////////////////////////
+@@ -170,6 +175,7 @@ static void record_gp_stall_check_time(void)
+ 	WRITE_ONCE(rcu_state.gp_start, j);
+ 	j1 = rcu_jiffies_till_stall_check();
+ 	smp_mb(); // ->gp_start before ->jiffies_stall and caller's ->gp_seq.
++	WRITE_ONCE(rcu_state.nr_fqs_jiffies_stall, 0);
+ 	WRITE_ONCE(rcu_state.jiffies_stall, j + j1);
+ 	rcu_state.jiffies_resched = j + j1 / 2;
+ 	rcu_state.n_force_qs_gpstart = READ_ONCE(rcu_state.n_force_qs);
+@@ -725,6 +731,16 @@ static void check_cpu_stall(struct rcu_data *rdp)
+ 	    !rcu_gp_in_progress())
+ 		return;
+ 	rcu_stall_kick_kthreads();
++
++	/*
++	 * Check if it was requested (via rcu_cpu_stall_reset()) that the FQS
++	 * loop has to set jiffies to ensure a non-stale jiffies value. This
++	 * is required to have good jiffies value after coming out of long
++	 * breaks of jiffies updates. Not doing so can cause false positives.
++	 */
++	if (READ_ONCE(rcu_state.nr_fqs_jiffies_stall) > 0)
++		return;
++
+ 	j = jiffies;
+ 
+ 	/*
+diff --git a/kernel/reboot.c b/kernel/reboot.c
+index 3bba88c7ffc6b..6ebef11c88760 100644
+--- a/kernel/reboot.c
++++ b/kernel/reboot.c
+@@ -74,6 +74,7 @@ void __weak (*pm_power_off)(void);
+ void emergency_restart(void)
+ {
+ 	kmsg_dump(KMSG_DUMP_EMERG);
++	system_state = SYSTEM_RESTART;
+ 	machine_emergency_restart();
+ }
+ EXPORT_SYMBOL_GPL(emergency_restart);
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index 149fdb212e20f..460bf0e643842 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -5377,8 +5377,6 @@ context_switch(struct rq *rq, struct task_struct *prev,
+ 	/* switch_mm_cid() requires the memory barriers above. */
+ 	switch_mm_cid(rq, prev, next);
+ 
+-	rq->clock_update_flags &= ~(RQCF_ACT_SKIP|RQCF_REQ_SKIP);
+-
+ 	prepare_lock_switch(rq, next, rf);
+ 
+ 	/* Here we just switch the register state and the stack. */
+@@ -6634,6 +6632,7 @@ static void __sched notrace __schedule(unsigned int sched_mode)
+ 	/* Promote REQ to ACT */
+ 	rq->clock_update_flags <<= 1;
+ 	update_rq_clock(rq);
++	rq->clock_update_flags = RQCF_UPDATED;
+ 
+ 	switch_count = &prev->nivcsw;
+ 
+@@ -6713,8 +6712,6 @@ static void __sched notrace __schedule(unsigned int sched_mode)
+ 		/* Also unlocks the rq: */
+ 		rq = context_switch(rq, prev, next, &rf);
+ 	} else {
+-		rq->clock_update_flags &= ~(RQCF_ACT_SKIP|RQCF_REQ_SKIP);
+-
+ 		rq_unpin_lock(rq, &rf);
+ 		__balance_callbacks(rq);
+ 		raw_spin_rq_unlock_irq(rq);
+diff --git a/kernel/smp.c b/kernel/smp.c
+index 385179dae360e..0a9a3262f7822 100644
+--- a/kernel/smp.c
++++ b/kernel/smp.c
+@@ -168,6 +168,8 @@ static DEFINE_PER_CPU(void *, cur_csd_info);
+ 
+ static ulong csd_lock_timeout = 5000;  /* CSD lock timeout in milliseconds. */
+ module_param(csd_lock_timeout, ulong, 0444);
++static int panic_on_ipistall;  /* CSD panic timeout in milliseconds, 300000 for five minutes. */
++module_param(panic_on_ipistall, int, 0444);
+ 
+ static atomic_t csd_bug_count = ATOMIC_INIT(0);
+ 
+@@ -228,6 +230,7 @@ static bool csd_lock_wait_toolong(struct __call_single_data *csd, u64 ts0, u64 *
+ 	}
+ 
+ 	ts2 = sched_clock();
++	/* How long since we last checked for a stuck CSD lock.*/
+ 	ts_delta = ts2 - *ts1;
+ 	if (likely(ts_delta <= csd_lock_timeout_ns || csd_lock_timeout_ns == 0))
+ 		return false;
+@@ -241,9 +244,17 @@ static bool csd_lock_wait_toolong(struct __call_single_data *csd, u64 ts0, u64 *
+ 	else
+ 		cpux = cpu;
+ 	cpu_cur_csd = smp_load_acquire(&per_cpu(cur_csd, cpux)); /* Before func and info. */
++	/* How long since this CSD lock was stuck. */
++	ts_delta = ts2 - ts0;
+ 	pr_alert("csd: %s non-responsive CSD lock (#%d) on CPU#%d, waiting %llu ns for CPU#%02d %pS(%ps).\n",
+-		 firsttime ? "Detected" : "Continued", *bug_id, raw_smp_processor_id(), ts2 - ts0,
++		 firsttime ? "Detected" : "Continued", *bug_id, raw_smp_processor_id(), ts_delta,
+ 		 cpu, csd->func, csd->info);
++	/*
++	 * If the CSD lock is still stuck after 5 minutes, it is unlikely
++	 * to become unstuck. Use a signed comparison to avoid triggering
++	 * on underflows when the TSC is out of sync between sockets.
++	 */
++	BUG_ON(panic_on_ipistall > 0 && (s64)ts_delta > ((s64)panic_on_ipistall * NSEC_PER_MSEC));
+ 	if (cpu_cur_csd && csd != cpu_cur_csd) {
+ 		pr_alert("\tcsd: CSD lock (#%d) handling prior %pS(%ps) request.\n",
+ 			 *bug_id, READ_ONCE(per_cpu(cur_csd_func, cpux)),
+diff --git a/kernel/torture.c b/kernel/torture.c
+index 1a0519b836ac9..cd299ccc4e5d5 100644
+--- a/kernel/torture.c
++++ b/kernel/torture.c
+@@ -37,6 +37,7 @@
+ #include <linux/ktime.h>
+ #include <asm/byteorder.h>
+ #include <linux/torture.h>
++#include <linux/sched/rt.h>
+ #include "rcu/rcu.h"
+ 
+ MODULE_LICENSE("GPL");
+@@ -83,14 +84,15 @@ EXPORT_SYMBOL_GPL(verbose_torout_sleep);
+  * nanosecond random fuzz.  This function and its friends desynchronize
+  * testing from the timer wheel.
+  */
+-int torture_hrtimeout_ns(ktime_t baset_ns, u32 fuzzt_ns, struct torture_random_state *trsp)
++int torture_hrtimeout_ns(ktime_t baset_ns, u32 fuzzt_ns, const enum hrtimer_mode mode,
++			 struct torture_random_state *trsp)
+ {
+ 	ktime_t hto = baset_ns;
+ 
+ 	if (trsp)
+ 		hto += (torture_random(trsp) >> 3) % fuzzt_ns;
+-	set_current_state(TASK_UNINTERRUPTIBLE);
+-	return schedule_hrtimeout(&hto, HRTIMER_MODE_REL);
++	set_current_state(TASK_IDLE);
++	return schedule_hrtimeout(&hto, mode);
+ }
+ EXPORT_SYMBOL_GPL(torture_hrtimeout_ns);
+ 
+@@ -102,7 +104,7 @@ int torture_hrtimeout_us(u32 baset_us, u32 fuzzt_ns, struct torture_random_state
+ {
+ 	ktime_t baset_ns = baset_us * NSEC_PER_USEC;
+ 
+-	return torture_hrtimeout_ns(baset_ns, fuzzt_ns, trsp);
++	return torture_hrtimeout_ns(baset_ns, fuzzt_ns, HRTIMER_MODE_REL, trsp);
+ }
+ EXPORT_SYMBOL_GPL(torture_hrtimeout_us);
+ 
+@@ -119,7 +121,7 @@ int torture_hrtimeout_ms(u32 baset_ms, u32 fuzzt_us, struct torture_random_state
+ 		fuzzt_ns = (u32)~0U;
+ 	else
+ 		fuzzt_ns = fuzzt_us * NSEC_PER_USEC;
+-	return torture_hrtimeout_ns(baset_ns, fuzzt_ns, trsp);
++	return torture_hrtimeout_ns(baset_ns, fuzzt_ns, HRTIMER_MODE_REL, trsp);
+ }
+ EXPORT_SYMBOL_GPL(torture_hrtimeout_ms);
+ 
+@@ -132,7 +134,7 @@ int torture_hrtimeout_jiffies(u32 baset_j, struct torture_random_state *trsp)
+ {
+ 	ktime_t baset_ns = jiffies_to_nsecs(baset_j);
+ 
+-	return torture_hrtimeout_ns(baset_ns, jiffies_to_nsecs(1), trsp);
++	return torture_hrtimeout_ns(baset_ns, jiffies_to_nsecs(1), HRTIMER_MODE_REL, trsp);
+ }
+ EXPORT_SYMBOL_GPL(torture_hrtimeout_jiffies);
+ 
+@@ -149,7 +151,7 @@ int torture_hrtimeout_s(u32 baset_s, u32 fuzzt_ms, struct torture_random_state *
+ 		fuzzt_ns = (u32)~0U;
+ 	else
+ 		fuzzt_ns = fuzzt_ms * NSEC_PER_MSEC;
+-	return torture_hrtimeout_ns(baset_ns, fuzzt_ns, trsp);
++	return torture_hrtimeout_ns(baset_ns, fuzzt_ns, HRTIMER_MODE_REL, trsp);
+ }
+ EXPORT_SYMBOL_GPL(torture_hrtimeout_s);
+ 
+@@ -711,7 +713,7 @@ static void torture_shutdown_cleanup(void)
+  * suddenly applied to or removed from the system.
+  */
+ static struct task_struct *stutter_task;
+-static int stutter_pause_test;
++static ktime_t stutter_till_abs_time;
+ static int stutter;
+ static int stutter_gap;
+ 
+@@ -721,30 +723,16 @@ static int stutter_gap;
+  */
+ bool stutter_wait(const char *title)
+ {
+-	unsigned int i = 0;
+ 	bool ret = false;
+-	int spt;
++	ktime_t till_ns;
+ 
+ 	cond_resched_tasks_rcu_qs();
+-	spt = READ_ONCE(stutter_pause_test);
+-	for (; spt; spt = READ_ONCE(stutter_pause_test)) {
+-		if (!ret) {
+-			sched_set_normal(current, MAX_NICE);
+-			ret = true;
+-		}
+-		if (spt == 1) {
+-			schedule_timeout_interruptible(1);
+-		} else if (spt == 2) {
+-			while (READ_ONCE(stutter_pause_test)) {
+-				if (!(i++ & 0xffff))
+-					torture_hrtimeout_us(10, 0, NULL);
+-				cond_resched();
+-			}
+-		} else {
+-			schedule_timeout_interruptible(round_jiffies_relative(HZ));
+-		}
+-		torture_shutdown_absorb(title);
++	till_ns = READ_ONCE(stutter_till_abs_time);
++	if (till_ns && ktime_before(ktime_get(), till_ns)) {
++		torture_hrtimeout_ns(till_ns, 0, HRTIMER_MODE_ABS, NULL);
++		ret = true;
+ 	}
++	torture_shutdown_absorb(title);
+ 	return ret;
+ }
+ EXPORT_SYMBOL_GPL(stutter_wait);
+@@ -755,23 +743,16 @@ EXPORT_SYMBOL_GPL(stutter_wait);
+  */
+ static int torture_stutter(void *arg)
+ {
+-	DEFINE_TORTURE_RANDOM(rand);
+-	int wtime;
++	ktime_t till_ns;
+ 
+ 	VERBOSE_TOROUT_STRING("torture_stutter task started");
+ 	do {
+ 		if (!torture_must_stop() && stutter > 1) {
+-			wtime = stutter;
+-			if (stutter > 2) {
+-				WRITE_ONCE(stutter_pause_test, 1);
+-				wtime = stutter - 3;
+-				torture_hrtimeout_jiffies(wtime, &rand);
+-				wtime = 2;
+-			}
+-			WRITE_ONCE(stutter_pause_test, 2);
+-			torture_hrtimeout_jiffies(wtime, NULL);
++			till_ns = ktime_add_ns(ktime_get(),
++					       jiffies_to_nsecs(stutter));
++			WRITE_ONCE(stutter_till_abs_time, till_ns);
++			torture_hrtimeout_jiffies(stutter - 1, NULL);
+ 		}
+-		WRITE_ONCE(stutter_pause_test, 0);
+ 		if (!torture_must_stop())
+ 			torture_hrtimeout_jiffies(stutter_gap, NULL);
+ 		torture_shutdown_absorb("torture_stutter");
+@@ -926,7 +907,7 @@ EXPORT_SYMBOL_GPL(torture_kthread_stopping);
+  * it starts, you will need to open-code your own.
+  */
+ int _torture_create_kthread(int (*fn)(void *arg), void *arg, char *s, char *m,
+-			    char *f, struct task_struct **tp)
++			    char *f, struct task_struct **tp, void (*cbf)(struct task_struct *tp))
+ {
+ 	int ret = 0;
+ 
+@@ -938,6 +919,10 @@ int _torture_create_kthread(int (*fn)(void *arg), void *arg, char *s, char *m,
+ 		*tp = NULL;
+ 		return ret;
+ 	}
++
++	if (cbf)
++		cbf(*tp);
++
+ 	wake_up_process(*tp);  // Process is sleeping, so ordering provided.
+ 	torture_shuffle_task_register(*tp);
+ 	return ret;
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index 81c4dade3763e..fa8bdedc7a067 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -5000,6 +5000,20 @@ int tracing_open_file_tr(struct inode *inode, struct file *filp)
+ 	if (ret)
+ 		return ret;
+ 
++	mutex_lock(&event_mutex);
++
++	/* Fail if the file is marked for removal */
++	if (file->flags & EVENT_FILE_FL_FREED) {
++		trace_array_put(file->tr);
++		ret = -ENODEV;
++	} else {
++		event_file_get(file);
++	}
++
++	mutex_unlock(&event_mutex);
++	if (ret)
++		return ret;
++
+ 	filp->private_data = inode->i_private;
+ 
+ 	return 0;
+@@ -5010,6 +5024,7 @@ int tracing_release_file_tr(struct inode *inode, struct file *filp)
+ 	struct trace_event_file *file = inode->i_private;
+ 
+ 	trace_array_put(file->tr);
++	event_file_put(file);
+ 
+ 	return 0;
+ }
+diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
+index c98c3f42c3862..2e4717a741857 100644
+--- a/kernel/trace/trace.h
++++ b/kernel/trace/trace.h
+@@ -1656,6 +1656,9 @@ extern void event_trigger_unregister(struct event_command *cmd_ops,
+ 				     char *glob,
+ 				     struct event_trigger_data *trigger_data);
+ 
++extern void event_file_get(struct trace_event_file *file);
++extern void event_file_put(struct trace_event_file *file);
++
+ /**
+  * struct event_trigger_ops - callbacks for trace event triggers
+  *
+diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
+index 9841589b4af7f..2a9058c5068b5 100644
+--- a/kernel/trace/trace_events.c
++++ b/kernel/trace/trace_events.c
+@@ -990,26 +990,38 @@ static void remove_subsystem(struct trace_subsystem_dir *dir)
+ 	}
+ }
+ 
+-static void remove_event_file_dir(struct trace_event_file *file)
++void event_file_get(struct trace_event_file *file)
+ {
+-	struct dentry *dir = file->dir;
+-	struct dentry *child;
++	atomic_inc(&file->ref);
++}
+ 
+-	if (dir) {
+-		spin_lock(&dir->d_lock);	/* probably unneeded */
+-		list_for_each_entry(child, &dir->d_subdirs, d_child) {
+-			if (d_really_is_positive(child))	/* probably unneeded */
+-				d_inode(child)->i_private = NULL;
+-		}
+-		spin_unlock(&dir->d_lock);
++void event_file_put(struct trace_event_file *file)
++{
++	if (WARN_ON_ONCE(!atomic_read(&file->ref))) {
++		if (file->flags & EVENT_FILE_FL_FREED)
++			kmem_cache_free(file_cachep, file);
++		return;
++	}
+ 
+-		tracefs_remove(dir);
++	if (atomic_dec_and_test(&file->ref)) {
++		/* Count should only go to zero when it is freed */
++		if (WARN_ON_ONCE(!(file->flags & EVENT_FILE_FL_FREED)))
++			return;
++		kmem_cache_free(file_cachep, file);
+ 	}
++}
++
++static void remove_event_file_dir(struct trace_event_file *file)
++{
++	struct dentry *dir = file->dir;
++
++	tracefs_remove(dir);
+ 
+ 	list_del(&file->list);
+ 	remove_subsystem(file->system);
+ 	free_event_filter(file->filter);
+-	kmem_cache_free(file_cachep, file);
++	file->flags |= EVENT_FILE_FL_FREED;
++	event_file_put(file);
+ }
+ 
+ /*
+@@ -1382,7 +1394,7 @@ event_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
+ 		flags = file->flags;
+ 	mutex_unlock(&event_mutex);
+ 
+-	if (!file)
++	if (!file || flags & EVENT_FILE_FL_FREED)
+ 		return -ENODEV;
+ 
+ 	if (flags & EVENT_FILE_FL_ENABLED &&
+@@ -1420,7 +1432,7 @@ event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
+ 		ret = -ENODEV;
+ 		mutex_lock(&event_mutex);
+ 		file = event_file_data(filp);
+-		if (likely(file))
++		if (likely(file && !(file->flags & EVENT_FILE_FL_FREED)))
+ 			ret = ftrace_event_enable_disable(file, val);
+ 		mutex_unlock(&event_mutex);
+ 		break;
+@@ -1694,7 +1706,7 @@ event_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
+ 
+ 	mutex_lock(&event_mutex);
+ 	file = event_file_data(filp);
+-	if (file)
++	if (file && !(file->flags & EVENT_FILE_FL_FREED))
+ 		print_event_filter(file, s);
+ 	mutex_unlock(&event_mutex);
+ 
+@@ -2810,6 +2822,7 @@ trace_create_new_event(struct trace_event_call *call,
+ 	atomic_set(&file->tm_ref, 0);
+ 	INIT_LIST_HEAD(&file->triggers);
+ 	list_add(&file->list, &tr->events);
++	event_file_get(file);
+ 
+ 	return file;
+ }
+diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
+index 1dad64267878c..5e2a422a58303 100644
+--- a/kernel/trace/trace_events_filter.c
++++ b/kernel/trace/trace_events_filter.c
+@@ -2088,6 +2088,9 @@ int apply_event_filter(struct trace_event_file *file, char *filter_string)
+ 	struct event_filter *filter = NULL;
+ 	int err;
+ 
++	if (file->flags & EVENT_FILE_FL_FREED)
++		return -ENODEV;
++
+ 	if (!strcmp(strstrip(filter_string), "0")) {
+ 		filter_disable(file);
+ 		filter = event_filter(file);
+diff --git a/kernel/trace/trace_events_synth.c b/kernel/trace/trace_events_synth.c
+index 14cb275a0bab0..846e02c0fb59a 100644
+--- a/kernel/trace/trace_events_synth.c
++++ b/kernel/trace/trace_events_synth.c
+@@ -452,7 +452,7 @@ static unsigned int trace_string(struct synth_trace_event *entry,
+ 
+ #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
+ 		if ((unsigned long)str_val < TASK_SIZE)
+-			ret = strncpy_from_user_nofault(str_field, str_val, STR_VAR_LEN_MAX);
++			ret = strncpy_from_user_nofault(str_field, (const void __user *)str_val, STR_VAR_LEN_MAX);
+ 		else
+ #endif
+ 			ret = strncpy_from_kernel_nofault(str_field, str_val, STR_VAR_LEN_MAX);
+diff --git a/kernel/watch_queue.c b/kernel/watch_queue.c
+index d0b6b390ee423..778b4056700ff 100644
+--- a/kernel/watch_queue.c
++++ b/kernel/watch_queue.c
+@@ -331,7 +331,7 @@ long watch_queue_set_filter(struct pipe_inode_info *pipe,
+ 	    filter.__reserved != 0)
+ 		return -EINVAL;
+ 
+-	tf = memdup_user(_filter->filters, filter.nr_filters * sizeof(*tf));
++	tf = memdup_array_user(_filter->filters, filter.nr_filters, sizeof(*tf));
+ 	if (IS_ERR(tf))
+ 		return PTR_ERR(tf);
+ 
+diff --git a/kernel/watchdog.c b/kernel/watchdog.c
+index d145305d95fe8..5cd6d4e269157 100644
+--- a/kernel/watchdog.c
++++ b/kernel/watchdog.c
+@@ -283,6 +283,13 @@ static DEFINE_PER_CPU(struct hrtimer, watchdog_hrtimer);
+ static DEFINE_PER_CPU(bool, softlockup_touch_sync);
+ static unsigned long soft_lockup_nmi_warn;
+ 
++static int __init softlockup_panic_setup(char *str)
++{
++	softlockup_panic = simple_strtoul(str, NULL, 0);
++	return 1;
++}
++__setup("softlockup_panic=", softlockup_panic_setup);
++
+ static int __init nowatchdog_setup(char *str)
+ {
+ 	watchdog_user_enabled = 0;
+diff --git a/kernel/workqueue.c b/kernel/workqueue.c
+index e4a37d7a6752d..a7fcb25417726 100644
+--- a/kernel/workqueue.c
++++ b/kernel/workqueue.c
+@@ -5571,50 +5571,54 @@ static void work_for_cpu_fn(struct work_struct *work)
+ }
+ 
+ /**
+- * work_on_cpu - run a function in thread context on a particular cpu
++ * work_on_cpu_key - run a function in thread context on a particular cpu
+  * @cpu: the cpu to run on
+  * @fn: the function to run
+  * @arg: the function arg
++ * @key: The lock class key for lock debugging purposes
+  *
+  * It is up to the caller to ensure that the cpu doesn't go offline.
+  * The caller must not hold any locks which would prevent @fn from completing.
+  *
+  * Return: The value @fn returns.
+  */
+-long work_on_cpu(int cpu, long (*fn)(void *), void *arg)
++long work_on_cpu_key(int cpu, long (*fn)(void *),
++		     void *arg, struct lock_class_key *key)
+ {
+ 	struct work_for_cpu wfc = { .fn = fn, .arg = arg };
+ 
+-	INIT_WORK_ONSTACK(&wfc.work, work_for_cpu_fn);
++	INIT_WORK_ONSTACK_KEY(&wfc.work, work_for_cpu_fn, key);
+ 	schedule_work_on(cpu, &wfc.work);
+ 	flush_work(&wfc.work);
+ 	destroy_work_on_stack(&wfc.work);
+ 	return wfc.ret;
+ }
+-EXPORT_SYMBOL_GPL(work_on_cpu);
++EXPORT_SYMBOL_GPL(work_on_cpu_key);
+ 
+ /**
+- * work_on_cpu_safe - run a function in thread context on a particular cpu
++ * work_on_cpu_safe_key - run a function in thread context on a particular cpu
+  * @cpu: the cpu to run on
+  * @fn:  the function to run
+  * @arg: the function argument
++ * @key: The lock class key for lock debugging purposes
+  *
+  * Disables CPU hotplug and calls work_on_cpu(). The caller must not hold
+  * any locks which would prevent @fn from completing.
+  *
+  * Return: The value @fn returns.
+  */
+-long work_on_cpu_safe(int cpu, long (*fn)(void *), void *arg)
++long work_on_cpu_safe_key(int cpu, long (*fn)(void *),
++			  void *arg, struct lock_class_key *key)
+ {
+ 	long ret = -ENODEV;
+ 
+ 	cpus_read_lock();
+ 	if (cpu_online(cpu))
+-		ret = work_on_cpu(cpu, fn, arg);
++		ret = work_on_cpu_key(cpu, fn, arg, key);
+ 	cpus_read_unlock();
+ 	return ret;
+ }
+-EXPORT_SYMBOL_GPL(work_on_cpu_safe);
++EXPORT_SYMBOL_GPL(work_on_cpu_safe_key);
+ #endif /* CONFIG_SMP */
+ 
+ #ifdef CONFIG_FREEZER
+diff --git a/lib/generic-radix-tree.c b/lib/generic-radix-tree.c
+index f25eb111c0516..7dfa88282b006 100644
+--- a/lib/generic-radix-tree.c
++++ b/lib/generic-radix-tree.c
+@@ -166,6 +166,10 @@ void *__genradix_iter_peek(struct genradix_iter *iter,
+ 	struct genradix_root *r;
+ 	struct genradix_node *n;
+ 	unsigned level, i;
++
++	if (iter->offset == SIZE_MAX)
++		return NULL;
++
+ restart:
+ 	r = READ_ONCE(radix->root);
+ 	if (!r)
+@@ -184,10 +188,17 @@ restart:
+ 			(GENRADIX_ARY - 1);
+ 
+ 		while (!n->children[i]) {
++			size_t objs_per_ptr = genradix_depth_size(level);
++
++			if (iter->offset + objs_per_ptr < iter->offset) {
++				iter->offset	= SIZE_MAX;
++				iter->pos	= SIZE_MAX;
++				return NULL;
++			}
++
+ 			i++;
+-			iter->offset = round_down(iter->offset +
+-					   genradix_depth_size(level),
+-					   genradix_depth_size(level));
++			iter->offset = round_down(iter->offset + objs_per_ptr,
++						  objs_per_ptr);
+ 			iter->pos = (iter->offset >> PAGE_SHIFT) *
+ 				objs_per_page;
+ 			if (i == GENRADIX_ARY)
+diff --git a/mm/cma.c b/mm/cma.c
+index a4cfe995e11e7..f0c9d73ddb658 100644
+--- a/mm/cma.c
++++ b/mm/cma.c
+@@ -501,7 +501,7 @@ struct page *cma_alloc(struct cma *cma, unsigned long count,
+ 	 */
+ 	if (page) {
+ 		for (i = 0; i < count; i++)
+-			page_kasan_tag_reset(page + i);
++			page_kasan_tag_reset(nth_page(page, i));
+ 	}
+ 
+ 	if (ret && !no_warn) {
+diff --git a/mm/damon/core.c b/mm/damon/core.c
+index eb9580942a5c3..747af25ba9109 100644
+--- a/mm/damon/core.c
++++ b/mm/damon/core.c
+@@ -476,20 +476,14 @@ static unsigned int damon_age_for_new_attrs(unsigned int age,
+ static unsigned int damon_accesses_bp_to_nr_accesses(
+ 		unsigned int accesses_bp, struct damon_attrs *attrs)
+ {
+-	unsigned int max_nr_accesses =
+-		attrs->aggr_interval / attrs->sample_interval;
+-
+-	return accesses_bp * max_nr_accesses / 10000;
++	return accesses_bp * damon_max_nr_accesses(attrs) / 10000;
+ }
+ 
+ /* convert nr_accesses to access ratio in bp (per 10,000) */
+ static unsigned int damon_nr_accesses_to_accesses_bp(
+ 		unsigned int nr_accesses, struct damon_attrs *attrs)
+ {
+-	unsigned int max_nr_accesses =
+-		attrs->aggr_interval / attrs->sample_interval;
+-
+-	return nr_accesses * 10000 / max_nr_accesses;
++	return nr_accesses * 10000 / damon_max_nr_accesses(attrs);
+ }
+ 
+ static unsigned int damon_nr_accesses_for_new_attrs(unsigned int nr_accesses,
+diff --git a/mm/damon/lru_sort.c b/mm/damon/lru_sort.c
+index 7b8fce2f67a8d..3071e08e8b8f8 100644
+--- a/mm/damon/lru_sort.c
++++ b/mm/damon/lru_sort.c
+@@ -193,9 +193,7 @@ static int damon_lru_sort_apply_parameters(void)
+ 	if (err)
+ 		return err;
+ 
+-	/* aggr_interval / sample_interval is the maximum nr_accesses */
+-	hot_thres = damon_lru_sort_mon_attrs.aggr_interval /
+-		damon_lru_sort_mon_attrs.sample_interval *
++	hot_thres = damon_max_nr_accesses(&damon_lru_sort_mon_attrs) *
+ 		hot_thres_access_freq / 1000;
+ 	scheme = damon_lru_sort_new_hot_scheme(hot_thres);
+ 	if (!scheme)
+diff --git a/mm/damon/ops-common.c b/mm/damon/ops-common.c
+index e940802a15a41..5f27ba598350f 100644
+--- a/mm/damon/ops-common.c
++++ b/mm/damon/ops-common.c
+@@ -73,7 +73,6 @@ void damon_pmdp_mkold(pmd_t *pmd, struct vm_area_struct *vma, unsigned long addr
+ int damon_hot_score(struct damon_ctx *c, struct damon_region *r,
+ 			struct damos *s)
+ {
+-	unsigned int max_nr_accesses;
+ 	int freq_subscore;
+ 	unsigned int age_in_sec;
+ 	int age_in_log, age_subscore;
+@@ -81,8 +80,8 @@ int damon_hot_score(struct damon_ctx *c, struct damon_region *r,
+ 	unsigned int age_weight = s->quota.weight_age;
+ 	int hotness;
+ 
+-	max_nr_accesses = c->attrs.aggr_interval / c->attrs.sample_interval;
+-	freq_subscore = r->nr_accesses * DAMON_MAX_SUBSCORE / max_nr_accesses;
++	freq_subscore = r->nr_accesses * DAMON_MAX_SUBSCORE /
++		damon_max_nr_accesses(&c->attrs);
+ 
+ 	age_in_sec = (unsigned long)r->age * c->attrs.aggr_interval / 1000000;
+ 	for (age_in_log = 0; age_in_log < DAMON_MAX_AGE_IN_LOG && age_in_sec;
+diff --git a/mm/damon/sysfs-schemes.c b/mm/damon/sysfs-schemes.c
+index 50cf89dcd898b..ebc77c1baab96 100644
+--- a/mm/damon/sysfs-schemes.c
++++ b/mm/damon/sysfs-schemes.c
+@@ -125,6 +125,9 @@ damon_sysfs_scheme_regions_alloc(void)
+ 	struct damon_sysfs_scheme_regions *regions = kmalloc(sizeof(*regions),
+ 			GFP_KERNEL);
+ 
++	if (!regions)
++		return NULL;
++
+ 	regions->kobj = (struct kobject){};
+ 	INIT_LIST_HEAD(&regions->regions_list);
+ 	regions->nr_regions = 0;
+@@ -1649,6 +1652,8 @@ static int damon_sysfs_before_damos_apply(struct damon_ctx *ctx,
+ 
+ 	sysfs_regions = sysfs_schemes->schemes_arr[schemes_idx]->tried_regions;
+ 	region = damon_sysfs_scheme_region_alloc(r);
++	if (!region)
++		return 0;
+ 	list_add_tail(&region->list, &sysfs_regions->regions_list);
+ 	sysfs_regions->nr_regions++;
+ 	if (kobject_init_and_add(&region->kobj,
+diff --git a/mm/damon/sysfs.c b/mm/damon/sysfs.c
+index df165820c6054..e25dd9ba81876 100644
+--- a/mm/damon/sysfs.c
++++ b/mm/damon/sysfs.c
+@@ -1144,58 +1144,75 @@ destroy_targets_out:
+ 	return err;
+ }
+ 
+-/*
+- * Search a target in a context that corresponds to the sysfs target input.
+- *
+- * Return: pointer to the target if found, NULL if not found, or negative
+- * error code if the search failed.
+- */
+-static struct damon_target *damon_sysfs_existing_target(
+-		struct damon_sysfs_target *sys_target, struct damon_ctx *ctx)
++static int damon_sysfs_update_target_pid(struct damon_target *target, int pid)
+ {
+-	struct pid *pid;
+-	struct damon_target *t;
++	struct pid *pid_new;
+ 
+-	if (!damon_target_has_pid(ctx)) {
+-		/* Up to only one target for paddr could exist */
+-		damon_for_each_target(t, ctx)
+-			return t;
+-		return NULL;
++	pid_new = find_get_pid(pid);
++	if (!pid_new)
++		return -EINVAL;
++
++	if (pid_new == target->pid) {
++		put_pid(pid_new);
++		return 0;
+ 	}
+ 
+-	/* ops.id should be DAMON_OPS_VADDR or DAMON_OPS_FVADDR */
+-	pid = find_get_pid(sys_target->pid);
+-	if (!pid)
+-		return ERR_PTR(-EINVAL);
+-	damon_for_each_target(t, ctx) {
+-		if (t->pid == pid) {
+-			put_pid(pid);
+-			return t;
+-		}
++	put_pid(target->pid);
++	target->pid = pid_new;
++	return 0;
++}
++
++static int damon_sysfs_update_target(struct damon_target *target,
++		struct damon_ctx *ctx,
++		struct damon_sysfs_target *sys_target)
++{
++	int err;
++
++	if (damon_target_has_pid(ctx)) {
++		err = damon_sysfs_update_target_pid(target, sys_target->pid);
++		if (err)
++			return err;
+ 	}
+-	put_pid(pid);
+-	return NULL;
++
++	/*
++	 * Do monitoring target region boundary update only if one or more
++	 * regions are set by the user.  This is for keeping current monitoring
++	 * target results and range easier, especially for dynamic monitoring
++	 * target regions update ops like 'vaddr'.
++	 */
++	if (sys_target->regions->nr)
++		err = damon_sysfs_set_regions(target, sys_target->regions);
++	return err;
+ }
+ 
+ static int damon_sysfs_set_targets(struct damon_ctx *ctx,
+ 		struct damon_sysfs_targets *sysfs_targets)
+ {
+-	int i, err;
++	struct damon_target *t, *next;
++	int i = 0, err;
+ 
+ 	/* Multiple physical address space monitoring targets makes no sense */
+ 	if (ctx->ops.id == DAMON_OPS_PADDR && sysfs_targets->nr > 1)
+ 		return -EINVAL;
+ 
+-	for (i = 0; i < sysfs_targets->nr; i++) {
++	damon_for_each_target_safe(t, next, ctx) {
++		if (i < sysfs_targets->nr) {
++			err = damon_sysfs_update_target(t, ctx,
++					sysfs_targets->targets_arr[i]);
++			if (err)
++				return err;
++		} else {
++			if (damon_target_has_pid(ctx))
++				put_pid(t->pid);
++			damon_destroy_target(t);
++		}
++		i++;
++	}
++
++	for (; i < sysfs_targets->nr; i++) {
+ 		struct damon_sysfs_target *st = sysfs_targets->targets_arr[i];
+-		struct damon_target *t = damon_sysfs_existing_target(st, ctx);
+-
+-		if (IS_ERR(t))
+-			return PTR_ERR(t);
+-		if (!t)
+-			err = damon_sysfs_add_target(st, ctx);
+-		else
+-			err = damon_sysfs_set_regions(t, st->regions);
++
++		err = damon_sysfs_add_target(st, ctx);
+ 		if (err)
+ 			return err;
+ 	}
+diff --git a/mm/huge_memory.c b/mm/huge_memory.c
+index 164d22365bdee..dfffd1df12a1d 100644
+--- a/mm/huge_memory.c
++++ b/mm/huge_memory.c
+@@ -2759,13 +2759,15 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
+ 			int nr = folio_nr_pages(folio);
+ 
+ 			xas_split(&xas, folio, folio_order(folio));
+-			if (folio_test_swapbacked(folio)) {
+-				__lruvec_stat_mod_folio(folio, NR_SHMEM_THPS,
+-							-nr);
+-			} else {
+-				__lruvec_stat_mod_folio(folio, NR_FILE_THPS,
+-							-nr);
+-				filemap_nr_thps_dec(mapping);
++			if (folio_test_pmd_mappable(folio)) {
++				if (folio_test_swapbacked(folio)) {
++					__lruvec_stat_mod_folio(folio,
++							NR_SHMEM_THPS, -nr);
++				} else {
++					__lruvec_stat_mod_folio(folio,
++							NR_FILE_THPS, -nr);
++					filemap_nr_thps_dec(mapping);
++				}
+ 			}
+ 		}
+ 
+diff --git a/mm/hugetlb.c b/mm/hugetlb.c
+index 097b81c37597e..9951fb7412cc7 100644
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -6521,13 +6521,7 @@ struct page *hugetlb_follow_page_mask(struct vm_area_struct *vma,
+ 	struct page *page = NULL;
+ 	spinlock_t *ptl;
+ 	pte_t *pte, entry;
+-
+-	/*
+-	 * FOLL_PIN is not supported for follow_page(). Ordinary GUP goes via
+-	 * follow_hugetlb_page().
+-	 */
+-	if (WARN_ON_ONCE(flags & FOLL_PIN))
+-		return NULL;
++	int ret;
+ 
+ 	hugetlb_vma_lock_read(vma);
+ 	pte = hugetlb_walk(vma, haddr, huge_page_size(h));
+@@ -6537,8 +6531,23 @@ struct page *hugetlb_follow_page_mask(struct vm_area_struct *vma,
+ 	ptl = huge_pte_lock(h, mm, pte);
+ 	entry = huge_ptep_get(pte);
+ 	if (pte_present(entry)) {
+-		page = pte_page(entry) +
+-				((address & ~huge_page_mask(h)) >> PAGE_SHIFT);
++		page = pte_page(entry);
++
++		if (!huge_pte_write(entry)) {
++			if (flags & FOLL_WRITE) {
++				page = NULL;
++				goto out;
++			}
++
++			if (gup_must_unshare(vma, flags, page)) {
++				/* Tell the caller to do unsharing */
++				page = ERR_PTR(-EMLINK);
++				goto out;
++			}
++		}
++
++		page = nth_page(page, ((address & ~huge_page_mask(h)) >> PAGE_SHIFT));
++
+ 		/*
+ 		 * Note that page may be a sub-page, and with vmemmap
+ 		 * optimizations the page struct may be read only.
+@@ -6548,8 +6557,10 @@ struct page *hugetlb_follow_page_mask(struct vm_area_struct *vma,
+ 		 * try_grab_page() should always be able to get the page here,
+ 		 * because we hold the ptl lock and have verified pte_present().
+ 		 */
+-		if (try_grab_page(page, flags)) {
+-			page = NULL;
++		ret = try_grab_page(page, flags);
++
++		if (WARN_ON_ONCE(ret)) {
++			page = ERR_PTR(ret);
+ 			goto out;
+ 		}
+ 	}
+diff --git a/mm/memcontrol.c b/mm/memcontrol.c
+index 339dd2ccc9333..9f0c38c409cea 100644
+--- a/mm/memcontrol.c
++++ b/mm/memcontrol.c
+@@ -2868,7 +2868,8 @@ static void commit_charge(struct folio *folio, struct mem_cgroup *memcg)
+  * Moreover, it should not come from DMA buffer and is not readily
+  * reclaimable. So those GFP bits should be masked off.
+  */
+-#define OBJCGS_CLEAR_MASK	(__GFP_DMA | __GFP_RECLAIMABLE | __GFP_ACCOUNT)
++#define OBJCGS_CLEAR_MASK	(__GFP_DMA | __GFP_RECLAIMABLE | \
++				 __GFP_ACCOUNT | __GFP_NOFAIL)
+ 
+ /*
+  * mod_objcg_mlstate() may be called with irq enabled, so
+diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
+index 3f231cf1b4106..dea98fc562c16 100644
+--- a/mm/memory_hotplug.c
++++ b/mm/memory_hotplug.c
+@@ -1586,7 +1586,7 @@ static int scan_movable_pages(unsigned long start, unsigned long end,
+ 		 */
+ 		if (HPageMigratable(head))
+ 			goto found;
+-		skip = compound_nr(head) - (page - head);
++		skip = compound_nr(head) - (pfn - page_to_pfn(head));
+ 		pfn += skip - 1;
+ 	}
+ 	return -ENOENT;
+diff --git a/net/9p/client.c b/net/9p/client.c
+index b0e7cb7e1a54a..e265a0ca6bddd 100644
+--- a/net/9p/client.c
++++ b/net/9p/client.c
+@@ -1981,7 +1981,7 @@ struct p9_fid *p9_client_xattrwalk(struct p9_fid *file_fid,
+ 		goto error;
+ 	}
+ 	p9_debug(P9_DEBUG_9P,
+-		 ">>> TXATTRWALK file_fid %d, attr_fid %d name %s\n",
++		 ">>> TXATTRWALK file_fid %d, attr_fid %d name '%s'\n",
+ 		 file_fid->fid, attr_fid->fid, attr_name);
+ 
+ 	req = p9_client_rpc(clnt, P9_TXATTRWALK, "dds",
+diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
+index 00b684616e8d9..9374790f17ce4 100644
+--- a/net/9p/trans_fd.c
++++ b/net/9p/trans_fd.c
+@@ -832,14 +832,21 @@ static int p9_fd_open(struct p9_client *client, int rfd, int wfd)
+ 		goto out_free_ts;
+ 	if (!(ts->rd->f_mode & FMODE_READ))
+ 		goto out_put_rd;
+-	/* prevent workers from hanging on IO when fd is a pipe */
+-	ts->rd->f_flags |= O_NONBLOCK;
++	/* Prevent workers from hanging on IO when fd is a pipe.
++	 * It's technically possible for userspace or concurrent mounts to
++	 * modify this flag concurrently, which will likely result in a
++	 * broken filesystem. However, just having bad flags here should
++	 * not crash the kernel or cause any other sort of bug, so mark this
++	 * particular data race as intentional so that tooling (like KCSAN)
++	 * can allow it and detect further problems.
++	 */
++	data_race(ts->rd->f_flags |= O_NONBLOCK);
+ 	ts->wr = fget(wfd);
+ 	if (!ts->wr)
+ 		goto out_put_rd;
+ 	if (!(ts->wr->f_mode & FMODE_WRITE))
+ 		goto out_put_wr;
+-	ts->wr->f_flags |= O_NONBLOCK;
++	data_race(ts->wr->f_flags |= O_NONBLOCK);
+ 
+ 	client->trans = ts;
+ 	client->status = Connected;
+diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
+index 4e03642488230..c090627ff9751 100644
+--- a/net/bluetooth/hci_conn.c
++++ b/net/bluetooth/hci_conn.c
+@@ -172,13 +172,11 @@ static void hci_conn_cleanup(struct hci_conn *conn)
+ 			hdev->notify(hdev, HCI_NOTIFY_CONN_DEL);
+ 	}
+ 
+-	hci_conn_del_sysfs(conn);
+-
+ 	debugfs_remove_recursive(conn->debugfs);
+ 
+-	hci_dev_put(hdev);
++	hci_conn_del_sysfs(conn);
+ 
+-	hci_conn_put(conn);
++	hci_dev_put(hdev);
+ }
+ 
+ static void hci_acl_create_connection(struct hci_conn *conn)
+diff --git a/net/bluetooth/hci_sysfs.c b/net/bluetooth/hci_sysfs.c
+index 15b33579007cb..367e32fe30eb8 100644
+--- a/net/bluetooth/hci_sysfs.c
++++ b/net/bluetooth/hci_sysfs.c
+@@ -35,7 +35,7 @@ void hci_conn_init_sysfs(struct hci_conn *conn)
+ {
+ 	struct hci_dev *hdev = conn->hdev;
+ 
+-	BT_DBG("conn %p", conn);
++	bt_dev_dbg(hdev, "conn %p", conn);
+ 
+ 	conn->dev.type = &bt_link;
+ 	conn->dev.class = &bt_class;
+@@ -48,27 +48,30 @@ void hci_conn_add_sysfs(struct hci_conn *conn)
+ {
+ 	struct hci_dev *hdev = conn->hdev;
+ 
+-	BT_DBG("conn %p", conn);
++	bt_dev_dbg(hdev, "conn %p", conn);
+ 
+ 	if (device_is_registered(&conn->dev))
+ 		return;
+ 
+ 	dev_set_name(&conn->dev, "%s:%d", hdev->name, conn->handle);
+ 
+-	if (device_add(&conn->dev) < 0) {
++	if (device_add(&conn->dev) < 0)
+ 		bt_dev_err(hdev, "failed to register connection device");
+-		return;
+-	}
+-
+-	hci_dev_hold(hdev);
+ }
+ 
+ void hci_conn_del_sysfs(struct hci_conn *conn)
+ {
+ 	struct hci_dev *hdev = conn->hdev;
+ 
+-	if (!device_is_registered(&conn->dev))
++	bt_dev_dbg(hdev, "conn %p", conn);
++
++	if (!device_is_registered(&conn->dev)) {
++		/* If device_add() has *not* succeeded, use *only* put_device()
++		 * to drop the reference count.
++		 */
++		put_device(&conn->dev);
+ 		return;
++	}
+ 
+ 	while (1) {
+ 		struct device *dev;
+@@ -80,9 +83,7 @@ void hci_conn_del_sysfs(struct hci_conn *conn)
+ 		put_device(dev);
+ 	}
+ 
+-	device_del(&conn->dev);
+-
+-	hci_dev_put(hdev);
++	device_unregister(&conn->dev);
+ }
+ 
+ static void bt_host_release(struct device *dev)
+diff --git a/net/bridge/netfilter/nf_conntrack_bridge.c b/net/bridge/netfilter/nf_conntrack_bridge.c
+index 71056ee847736..0fcf357ea7ad3 100644
+--- a/net/bridge/netfilter/nf_conntrack_bridge.c
++++ b/net/bridge/netfilter/nf_conntrack_bridge.c
+@@ -37,7 +37,7 @@ static int nf_br_ip_fragment(struct net *net, struct sock *sk,
+ 	ktime_t tstamp = skb->tstamp;
+ 	struct ip_frag_state state;
+ 	struct iphdr *iph;
+-	int err;
++	int err = 0;
+ 
+ 	/* for offloaded checksums cleanup checksum before fragmentation */
+ 	if (skb->ip_summed == CHECKSUM_PARTIAL &&
+diff --git a/net/core/sock.c b/net/core/sock.c
+index eef27812013a4..6df04c705200a 100644
+--- a/net/core/sock.c
++++ b/net/core/sock.c
+@@ -600,7 +600,7 @@ struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie)
+ 	    INDIRECT_CALL_INET(dst->ops->check, ip6_dst_check, ipv4_dst_check,
+ 			       dst, cookie) == NULL) {
+ 		sk_tx_queue_clear(sk);
+-		sk->sk_dst_pending_confirm = 0;
++		WRITE_ONCE(sk->sk_dst_pending_confirm, 0);
+ 		RCU_INIT_POINTER(sk->sk_dst_cache, NULL);
+ 		dst_release(dst);
+ 		return NULL;
+diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
+index 60cffabfd4788..c8c2704a320f1 100644
+--- a/net/ipv4/inet_hashtables.c
++++ b/net/ipv4/inet_hashtables.c
+@@ -731,12 +731,12 @@ int __inet_hash(struct sock *sk, struct sock *osk)
+ 		if (err)
+ 			goto unlock;
+ 	}
++	sock_set_flag(sk, SOCK_RCU_FREE);
+ 	if (IS_ENABLED(CONFIG_IPV6) && sk->sk_reuseport &&
+ 		sk->sk_family == AF_INET6)
+ 		__sk_nulls_add_node_tail_rcu(sk, &ilb2->nulls_head);
+ 	else
+ 		__sk_nulls_add_node_rcu(sk, &ilb2->nulls_head);
+-	sock_set_flag(sk, SOCK_RCU_FREE);
+ 	sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
+ unlock:
+ 	spin_unlock(&ilb2->lock);
+diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
+index afa819eede6a3..c2403fea8ec9a 100644
+--- a/net/ipv4/tcp_output.c
++++ b/net/ipv4/tcp_output.c
+@@ -1316,7 +1316,7 @@ static int __tcp_transmit_skb(struct sock *sk, struct sk_buff *skb,
+ 	skb->destructor = skb_is_tcp_pure_ack(skb) ? __sock_wfree : tcp_wfree;
+ 	refcount_add(skb->truesize, &sk->sk_wmem_alloc);
+ 
+-	skb_set_dst_pending_confirm(skb, sk->sk_dst_pending_confirm);
++	skb_set_dst_pending_confirm(skb, READ_ONCE(sk->sk_dst_pending_confirm));
+ 
+ 	/* Build TCP header and checksum it. */
+ 	th = (struct tcphdr *)skb->data;
+diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
+index 0e3a1753a51c6..715da615f0359 100644
+--- a/net/mac80211/cfg.c
++++ b/net/mac80211/cfg.c
+@@ -3121,6 +3121,10 @@ static int ieee80211_get_tx_power(struct wiphy *wiphy,
+ 	else
+ 		*dbm = sdata->vif.bss_conf.txpower;
+ 
++	/* INT_MIN indicates no power level was set yet */
++	if (*dbm == INT_MIN)
++		return -EINVAL;
++
+ 	return 0;
+ }
+ 
+diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c
+index 5692daf57a4d8..c5988c7f9e81c 100644
+--- a/net/mptcp/pm_netlink.c
++++ b/net/mptcp/pm_netlink.c
+@@ -1537,8 +1537,9 @@ void mptcp_pm_remove_addrs(struct mptcp_sock *msk, struct list_head *rm_list)
+ 	struct mptcp_pm_addr_entry *entry;
+ 
+ 	list_for_each_entry(entry, rm_list, list) {
+-		remove_anno_list_by_saddr(msk, &entry->addr);
+-		if (alist.nr < MPTCP_RM_IDS_MAX)
++		if ((remove_anno_list_by_saddr(msk, &entry->addr) ||
++		     lookup_subflow_by_saddr(&msk->conn_list, &entry->addr)) &&
++		    alist.nr < MPTCP_RM_IDS_MAX)
+ 			alist.ids[alist.nr++] = entry->addr.id;
+ 	}
+ 
+diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
+index 636580c4736c9..974a096293d08 100644
+--- a/net/mptcp/protocol.c
++++ b/net/mptcp/protocol.c
+@@ -1233,6 +1233,8 @@ static void mptcp_update_infinite_map(struct mptcp_sock *msk,
+ 	mptcp_do_fallback(ssk);
+ }
+ 
++#define MPTCP_MAX_GSO_SIZE (GSO_LEGACY_MAX_SIZE - (MAX_TCP_HEADER + 1))
++
+ static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk,
+ 			      struct mptcp_data_frag *dfrag,
+ 			      struct mptcp_sendmsg_info *info)
+@@ -1259,6 +1261,8 @@ static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk,
+ 		return -EAGAIN;
+ 
+ 	/* compute send limit */
++	if (unlikely(ssk->sk_gso_max_size > MPTCP_MAX_GSO_SIZE))
++		ssk->sk_gso_max_size = MPTCP_MAX_GSO_SIZE;
+ 	info->mss_now = tcp_send_mss(ssk, &info->size_goal, info->flags);
+ 	copy = info->size_goal;
+ 
+diff --git a/net/mptcp/sockopt.c b/net/mptcp/sockopt.c
+index a3f1fe810cc96..7b878ea29e3a1 100644
+--- a/net/mptcp/sockopt.c
++++ b/net/mptcp/sockopt.c
+@@ -737,8 +737,11 @@ static int mptcp_setsockopt_v4_set_tos(struct mptcp_sock *msk, int optname,
+ 	val = inet_sk(sk)->tos;
+ 	mptcp_for_each_subflow(msk, subflow) {
+ 		struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
++		bool slow;
+ 
++		slow = lock_sock_fast(ssk);
+ 		__ip_sock_set_tos(ssk, val);
++		unlock_sock_fast(ssk, slow);
+ 	}
+ 	release_sock(sk);
+ 
+diff --git a/net/ncsi/ncsi-aen.c b/net/ncsi/ncsi-aen.c
+index f8854bff286cb..62fb1031763d1 100644
+--- a/net/ncsi/ncsi-aen.c
++++ b/net/ncsi/ncsi-aen.c
+@@ -89,11 +89,6 @@ static int ncsi_aen_handler_lsc(struct ncsi_dev_priv *ndp,
+ 	if ((had_link == has_link) || chained)
+ 		return 0;
+ 
+-	if (had_link)
+-		netif_carrier_off(ndp->ndev.dev);
+-	else
+-		netif_carrier_on(ndp->ndev.dev);
+-
+ 	if (!ndp->multi_package && !nc->package->multi_channel) {
+ 		if (had_link) {
+ 			ndp->flags |= NCSI_DEV_RESHUFFLE;
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
+index 8776266ba1532..db582c8d25f00 100644
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -6461,6 +6461,12 @@ static int nft_setelem_deactivate(const struct net *net,
+ 	return ret;
+ }
+ 
++static void nft_setelem_catchall_destroy(struct nft_set_elem_catchall *catchall)
++{
++	list_del_rcu(&catchall->list);
++	kfree_rcu(catchall, rcu);
++}
++
+ static void nft_setelem_catchall_remove(const struct net *net,
+ 					const struct nft_set *set,
+ 					const struct nft_set_elem *elem)
+@@ -6469,8 +6475,7 @@ static void nft_setelem_catchall_remove(const struct net *net,
+ 
+ 	list_for_each_entry_safe(catchall, next, &set->catchall_list, list) {
+ 		if (catchall->elem == elem->priv) {
+-			list_del_rcu(&catchall->list);
+-			kfree_rcu(catchall, rcu);
++			nft_setelem_catchall_destroy(catchall);
+ 			break;
+ 		}
+ 	}
+@@ -7202,10 +7207,11 @@ static int nf_tables_delsetelem(struct sk_buff *skb,
+ 
+ 		if (err < 0) {
+ 			NL_SET_BAD_ATTR(extack, attr);
+-			break;
++			return err;
+ 		}
+ 	}
+-	return err;
++
++	return 0;
+ }
+ 
+ /*
+@@ -9631,9 +9637,8 @@ void nft_trans_gc_queue_sync_done(struct nft_trans_gc *trans)
+ 	call_rcu(&trans->rcu, nft_trans_gc_trans_free);
+ }
+ 
+-static struct nft_trans_gc *nft_trans_gc_catchall(struct nft_trans_gc *gc,
+-						  unsigned int gc_seq,
+-						  bool sync)
++struct nft_trans_gc *nft_trans_gc_catchall_async(struct nft_trans_gc *gc,
++						 unsigned int gc_seq)
+ {
+ 	struct nft_set_elem_catchall *catchall;
+ 	const struct nft_set *set = gc->set;
+@@ -9649,11 +9654,7 @@ static struct nft_trans_gc *nft_trans_gc_catchall(struct nft_trans_gc *gc,
+ 
+ 		nft_set_elem_dead(ext);
+ dead_elem:
+-		if (sync)
+-			gc = nft_trans_gc_queue_sync(gc, GFP_ATOMIC);
+-		else
+-			gc = nft_trans_gc_queue_async(gc, gc_seq, GFP_ATOMIC);
+-
++		gc = nft_trans_gc_queue_async(gc, gc_seq, GFP_ATOMIC);
+ 		if (!gc)
+ 			return NULL;
+ 
+@@ -9663,15 +9664,34 @@ dead_elem:
+ 	return gc;
+ }
+ 
+-struct nft_trans_gc *nft_trans_gc_catchall_async(struct nft_trans_gc *gc,
+-						 unsigned int gc_seq)
+-{
+-	return nft_trans_gc_catchall(gc, gc_seq, false);
+-}
+-
+ struct nft_trans_gc *nft_trans_gc_catchall_sync(struct nft_trans_gc *gc)
+ {
+-	return nft_trans_gc_catchall(gc, 0, true);
++	struct nft_set_elem_catchall *catchall, *next;
++	const struct nft_set *set = gc->set;
++	struct nft_set_elem elem;
++	struct nft_set_ext *ext;
++
++	WARN_ON_ONCE(!lockdep_commit_lock_is_held(gc->net));
++
++	list_for_each_entry_safe(catchall, next, &set->catchall_list, list) {
++		ext = nft_set_elem_ext(set, catchall->elem);
++
++		if (!nft_set_elem_expired(ext))
++			continue;
++
++		gc = nft_trans_gc_queue_sync(gc, GFP_KERNEL);
++		if (!gc)
++			return NULL;
++
++		memset(&elem, 0, sizeof(elem));
++		elem.priv = catchall->elem;
++
++		nft_setelem_data_deactivate(gc->net, gc->set, &elem);
++		nft_setelem_catchall_destroy(catchall);
++		nft_trans_gc_elem_add(gc, elem.priv);
++	}
++
++	return gc;
+ }
+ 
+ static void nf_tables_module_autoload_cleanup(struct net *net)
+diff --git a/net/netfilter/nft_byteorder.c b/net/netfilter/nft_byteorder.c
+index e596d1a842f70..f6e791a681015 100644
+--- a/net/netfilter/nft_byteorder.c
++++ b/net/netfilter/nft_byteorder.c
+@@ -38,13 +38,14 @@ void nft_byteorder_eval(const struct nft_expr *expr,
+ 
+ 	switch (priv->size) {
+ 	case 8: {
++		u64 *dst64 = (void *)dst;
+ 		u64 src64;
+ 
+ 		switch (priv->op) {
+ 		case NFT_BYTEORDER_NTOH:
+ 			for (i = 0; i < priv->len / 8; i++) {
+ 				src64 = nft_reg_load64(&src[i]);
+-				nft_reg_store64(&dst[i],
++				nft_reg_store64(&dst64[i],
+ 						be64_to_cpu((__force __be64)src64));
+ 			}
+ 			break;
+@@ -52,7 +53,7 @@ void nft_byteorder_eval(const struct nft_expr *expr,
+ 			for (i = 0; i < priv->len / 8; i++) {
+ 				src64 = (__force __u64)
+ 					cpu_to_be64(nft_reg_load64(&src[i]));
+-				nft_reg_store64(&dst[i], src64);
++				nft_reg_store64(&dst64[i], src64);
+ 			}
+ 			break;
+ 		}
+diff --git a/net/netfilter/nft_meta.c b/net/netfilter/nft_meta.c
+index 8fdc7318c03c7..715484665a907 100644
+--- a/net/netfilter/nft_meta.c
++++ b/net/netfilter/nft_meta.c
+@@ -63,7 +63,7 @@ nft_meta_get_eval_time(enum nft_meta_keys key,
+ {
+ 	switch (key) {
+ 	case NFT_META_TIME_NS:
+-		nft_reg_store64(dest, ktime_get_real_ns());
++		nft_reg_store64((u64 *)dest, ktime_get_real_ns());
+ 		break;
+ 	case NFT_META_TIME_DAY:
+ 		nft_reg_store8(dest, nft_meta_weekday());
+diff --git a/net/sched/act_ct.c b/net/sched/act_ct.c
+index d131750663c3c..ea05d0b2df68a 100644
+--- a/net/sched/act_ct.c
++++ b/net/sched/act_ct.c
+@@ -1534,6 +1534,9 @@ static int tcf_ct_offload_act_setup(struct tc_action *act, void *entry_data,
+ 	if (bind) {
+ 		struct flow_action_entry *entry = entry_data;
+ 
++		if (tcf_ct_helper(act))
++			return -EOPNOTSUPP;
++
+ 		entry->id = FLOW_ACTION_CT;
+ 		entry->ct.action = tcf_ct_action(act);
+ 		entry->ct.zone = tcf_ct_zone(act);
+diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
+index 9fb0ccabc1a26..a148aa8003b88 100644
+--- a/net/sunrpc/clnt.c
++++ b/net/sunrpc/clnt.c
+@@ -111,7 +111,8 @@ static void rpc_clnt_remove_pipedir(struct rpc_clnt *clnt)
+ 
+ 	pipefs_sb = rpc_get_sb_net(net);
+ 	if (pipefs_sb) {
+-		__rpc_clnt_remove_pipedir(clnt);
++		if (pipefs_sb == clnt->pipefs_sb)
++			__rpc_clnt_remove_pipedir(clnt);
+ 		rpc_put_sb_net(net);
+ 	}
+ }
+@@ -151,6 +152,8 @@ rpc_setup_pipedir(struct super_block *pipefs_sb, struct rpc_clnt *clnt)
+ {
+ 	struct dentry *dentry;
+ 
++	clnt->pipefs_sb = pipefs_sb;
++
+ 	if (clnt->cl_program->pipe_dir_name != NULL) {
+ 		dentry = rpc_setup_pipedir_sb(pipefs_sb, clnt);
+ 		if (IS_ERR(dentry))
+@@ -2169,6 +2172,7 @@ call_connect_status(struct rpc_task *task)
+ 	task->tk_status = 0;
+ 	switch (status) {
+ 	case -ECONNREFUSED:
++	case -ECONNRESET:
+ 		/* A positive refusal suggests a rebind is needed. */
+ 		if (RPC_IS_SOFTCONN(task))
+ 			break;
+@@ -2177,7 +2181,6 @@ call_connect_status(struct rpc_task *task)
+ 			goto out_retry;
+ 		}
+ 		fallthrough;
+-	case -ECONNRESET:
+ 	case -ECONNABORTED:
+ 	case -ENETDOWN:
+ 	case -ENETUNREACH:
+diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c
+index 5988a5c5ff3f0..102c3818bc54d 100644
+--- a/net/sunrpc/rpcb_clnt.c
++++ b/net/sunrpc/rpcb_clnt.c
+@@ -769,6 +769,10 @@ void rpcb_getport_async(struct rpc_task *task)
+ 
+ 	child = rpcb_call_async(rpcb_clnt, map, proc);
+ 	rpc_release_client(rpcb_clnt);
++	if (IS_ERR(child)) {
++		/* rpcb_map_release() has freed the arguments */
++		return;
++	}
+ 
+ 	xprt->stat.bind_count++;
+ 	rpc_put_task(child);
+diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
+index 85c8bcaebb80f..3b05f90a3e50d 100644
+--- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
++++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
+@@ -852,7 +852,8 @@ out_readfail:
+ 	if (ret == -EINVAL)
+ 		svc_rdma_send_error(rdma_xprt, ctxt, ret);
+ 	svc_rdma_recv_ctxt_put(rdma_xprt, ctxt);
+-	return ret;
++	svc_xprt_deferred_close(xprt);
++	return -ENOTCONN;
+ 
+ out_backchannel:
+ 	svc_rdma_handle_bc_reply(rqstp, ctxt);
+diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c
+index 9b47c84092319..42d9586365ae3 100644
+--- a/net/tipc/netlink_compat.c
++++ b/net/tipc/netlink_compat.c
+@@ -102,6 +102,7 @@ static int tipc_add_tlv(struct sk_buff *skb, u16 type, void *data, u16 len)
+ 		return -EMSGSIZE;
+ 
+ 	skb_put(skb, TLV_SPACE(len));
++	memset(tlv, 0, TLV_SPACE(len));
+ 	tlv->tlv_type = htons(type);
+ 	tlv->tlv_len = htons(TLV_LENGTH(len));
+ 	if (len && data)
+diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
+index 3e8a04a136688..3e6eeacb13aec 100644
+--- a/net/unix/af_unix.c
++++ b/net/unix/af_unix.c
+@@ -2553,15 +2553,16 @@ static int unix_stream_recv_urg(struct unix_stream_read_state *state)
+ 
+ 	if (!(state->flags & MSG_PEEK))
+ 		WRITE_ONCE(u->oob_skb, NULL);
+-
++	else
++		skb_get(oob_skb);
+ 	unix_state_unlock(sk);
+ 
+ 	chunk = state->recv_actor(oob_skb, 0, chunk, state);
+ 
+-	if (!(state->flags & MSG_PEEK)) {
++	if (!(state->flags & MSG_PEEK))
+ 		UNIXCB(oob_skb).consumed += 1;
+-		kfree_skb(oob_skb);
+-	}
++
++	consume_skb(oob_skb);
+ 
+ 	mutex_unlock(&u->iolock);
+ 
+diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
+index 020cf17ab7e47..ccd8cefeea7ba 100644
+--- a/net/vmw_vsock/af_vsock.c
++++ b/net/vmw_vsock/af_vsock.c
+@@ -89,6 +89,7 @@
+ #include <linux/types.h>
+ #include <linux/bitops.h>
+ #include <linux/cred.h>
++#include <linux/errqueue.h>
+ #include <linux/init.h>
+ #include <linux/io.h>
+ #include <linux/kernel.h>
+@@ -110,6 +111,7 @@
+ #include <linux/workqueue.h>
+ #include <net/sock.h>
+ #include <net/af_vsock.h>
++#include <uapi/linux/vm_sockets.h>
+ 
+ static int __vsock_bind(struct sock *sk, struct sockaddr_vm *addr);
+ static void vsock_sk_destruct(struct sock *sk);
+@@ -2134,6 +2136,10 @@ vsock_connectible_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
+ 	int err;
+ 
+ 	sk = sock->sk;
++
++	if (unlikely(flags & MSG_ERRQUEUE))
++		return sock_recv_errqueue(sk, msg, len, SOL_VSOCK, VSOCK_RECVERR);
++
+ 	vsk = vsock_sk(sk);
+ 	err = 0;
+ 
+diff --git a/samples/bpf/syscall_tp_user.c b/samples/bpf/syscall_tp_user.c
+index 7a788bb837fc1..7a09ac74fac07 100644
+--- a/samples/bpf/syscall_tp_user.c
++++ b/samples/bpf/syscall_tp_user.c
+@@ -17,9 +17,9 @@
+ 
+ static void usage(const char *cmd)
+ {
+-	printf("USAGE: %s [-i num_progs] [-h]\n", cmd);
+-	printf("       -i num_progs      # number of progs of the test\n");
+-	printf("       -h                # help\n");
++	printf("USAGE: %s [-i nr_tests] [-h]\n", cmd);
++	printf("       -i nr_tests      # rounds of test to run\n");
++	printf("       -h               # help\n");
+ }
+ 
+ static void verify_map(int map_id)
+@@ -45,14 +45,14 @@ static void verify_map(int map_id)
+ 	}
+ }
+ 
+-static int test(char *filename, int num_progs)
++static int test(char *filename, int nr_tests)
+ {
+-	int map0_fds[num_progs], map1_fds[num_progs], fd, i, j = 0;
+-	struct bpf_link *links[num_progs * 4];
+-	struct bpf_object *objs[num_progs];
++	int map0_fds[nr_tests], map1_fds[nr_tests], fd, i, j = 0;
++	struct bpf_link **links = NULL;
++	struct bpf_object *objs[nr_tests];
+ 	struct bpf_program *prog;
+ 
+-	for (i = 0; i < num_progs; i++) {
++	for (i = 0; i < nr_tests; i++) {
+ 		objs[i] = bpf_object__open_file(filename, NULL);
+ 		if (libbpf_get_error(objs[i])) {
+ 			fprintf(stderr, "opening BPF object file failed\n");
+@@ -60,6 +60,19 @@ static int test(char *filename, int num_progs)
+ 			goto cleanup;
+ 		}
+ 
++		/* One-time initialization */
++		if (!links) {
++			int nr_progs = 0;
++
++			bpf_object__for_each_program(prog, objs[i])
++				nr_progs += 1;
++
++			links = calloc(nr_progs * nr_tests, sizeof(struct bpf_link *));
++
++			if (!links)
++				goto cleanup;
++		}
++
+ 		/* load BPF program */
+ 		if (bpf_object__load(objs[i])) {
+ 			fprintf(stderr, "loading BPF object file failed\n");
+@@ -101,14 +114,18 @@ static int test(char *filename, int num_progs)
+ 	close(fd);
+ 
+ 	/* verify the map */
+-	for (i = 0; i < num_progs; i++) {
++	for (i = 0; i < nr_tests; i++) {
+ 		verify_map(map0_fds[i]);
+ 		verify_map(map1_fds[i]);
+ 	}
+ 
+ cleanup:
+-	for (j--; j >= 0; j--)
+-		bpf_link__destroy(links[j]);
++	if (links) {
++		for (j--; j >= 0; j--)
++			bpf_link__destroy(links[j]);
++
++		free(links);
++	}
+ 
+ 	for (i--; i >= 0; i--)
+ 		bpf_object__close(objs[i]);
+@@ -117,13 +134,13 @@ cleanup:
+ 
+ int main(int argc, char **argv)
+ {
+-	int opt, num_progs = 1;
++	int opt, nr_tests = 1;
+ 	char filename[256];
+ 
+ 	while ((opt = getopt(argc, argv, "i:h")) != -1) {
+ 		switch (opt) {
+ 		case 'i':
+-			num_progs = atoi(optarg);
++			nr_tests = atoi(optarg);
+ 			break;
+ 		case 'h':
+ 		default:
+@@ -134,5 +151,5 @@ int main(int argc, char **argv)
+ 
+ 	snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
+ 
+-	return test(filename, num_progs);
++	return test(filename, nr_tests);
+ }
+diff --git a/scripts/Makefile.vmlinux b/scripts/Makefile.vmlinux
+index 3cd6ca15f390d..c9f3e03124d7f 100644
+--- a/scripts/Makefile.vmlinux
++++ b/scripts/Makefile.vmlinux
+@@ -19,6 +19,7 @@ quiet_cmd_cc_o_c = CC      $@
+ 
+ ifdef CONFIG_MODULES
+ KASAN_SANITIZE_.vmlinux.export.o := n
++KCSAN_SANITIZE_.vmlinux.export.o := n
+ GCOV_PROFILE_.vmlinux.export.o := n
+ targets += .vmlinux.export.o
+ vmlinux: .vmlinux.export.o
+diff --git a/scripts/gcc-plugins/randomize_layout_plugin.c b/scripts/gcc-plugins/randomize_layout_plugin.c
+index 951b74ba1b242..910bd21d08f48 100644
+--- a/scripts/gcc-plugins/randomize_layout_plugin.c
++++ b/scripts/gcc-plugins/randomize_layout_plugin.c
+@@ -191,12 +191,14 @@ static void partition_struct(tree *fields, unsigned long length, struct partitio
+ 
+ static void performance_shuffle(tree *newtree, unsigned long length, ranctx *prng_state)
+ {
+-	unsigned long i, x;
++	unsigned long i, x, index;
+ 	struct partition_group size_group[length];
+ 	unsigned long num_groups = 0;
+ 	unsigned long randnum;
+ 
+ 	partition_struct(newtree, length, (struct partition_group *)&size_group, &num_groups);
++
++	/* FIXME: this group shuffle is currently a no-op. */
+ 	for (i = num_groups - 1; i > 0; i--) {
+ 		struct partition_group tmp;
+ 		randnum = ranval(prng_state) % (i + 1);
+@@ -206,11 +208,14 @@ static void performance_shuffle(tree *newtree, unsigned long length, ranctx *prn
+ 	}
+ 
+ 	for (x = 0; x < num_groups; x++) {
+-		for (i = size_group[x].start + size_group[x].length - 1; i > size_group[x].start; i--) {
++		for (index = size_group[x].length - 1; index > 0; index--) {
+ 			tree tmp;
++
++			i = size_group[x].start + index;
+ 			if (DECL_BIT_FIELD_TYPE(newtree[i]))
+ 				continue;
+-			randnum = ranval(prng_state) % (i + 1);
++			randnum = ranval(prng_state) % (index + 1);
++			randnum += size_group[x].start;
+ 			// we could handle this case differently if desired
+ 			if (DECL_BIT_FIELD_TYPE(newtree[randnum]))
+ 				continue;
+@@ -273,8 +278,6 @@ static bool is_flexible_array(const_tree field)
+ {
+ 	const_tree fieldtype;
+ 	const_tree typesize;
+-	const_tree elemtype;
+-	const_tree elemsize;
+ 
+ 	fieldtype = TREE_TYPE(field);
+ 	typesize = TYPE_SIZE(fieldtype);
+@@ -282,20 +285,12 @@ static bool is_flexible_array(const_tree field)
+ 	if (TREE_CODE(fieldtype) != ARRAY_TYPE)
+ 		return false;
+ 
+-	elemtype = TREE_TYPE(fieldtype);
+-	elemsize = TYPE_SIZE(elemtype);
+-
+ 	/* size of type is represented in bits */
+ 
+ 	if (typesize == NULL_TREE && TYPE_DOMAIN(fieldtype) != NULL_TREE &&
+ 	    TYPE_MAX_VALUE(TYPE_DOMAIN(fieldtype)) == NULL_TREE)
+ 		return true;
+ 
+-	if (typesize != NULL_TREE &&
+-	    (TREE_CONSTANT(typesize) && (!tree_to_uhwi(typesize) ||
+-	     tree_to_uhwi(typesize) == tree_to_uhwi(elemsize))))
+-		return true;
+-
+ 	return false;
+ }
+ 
+diff --git a/security/integrity/iint.c b/security/integrity/iint.c
+index a462df827de2d..27ea19fb1f54c 100644
+--- a/security/integrity/iint.c
++++ b/security/integrity/iint.c
+@@ -66,9 +66,32 @@ struct integrity_iint_cache *integrity_iint_find(struct inode *inode)
+ 	return iint;
+ }
+ 
+-static void iint_free(struct integrity_iint_cache *iint)
++#define IMA_MAX_NESTING (FILESYSTEM_MAX_STACK_DEPTH+1)
++
++/*
++ * It is not clear that IMA should be nested at all, but as long is it measures
++ * files both on overlayfs and on underlying fs, we need to annotate the iint
++ * mutex to avoid lockdep false positives related to IMA + overlayfs.
++ * See ovl_lockdep_annotate_inode_mutex_key() for more details.
++ */
++static inline void iint_lockdep_annotate(struct integrity_iint_cache *iint,
++					 struct inode *inode)
++{
++#ifdef CONFIG_LOCKDEP
++	static struct lock_class_key iint_mutex_key[IMA_MAX_NESTING];
++
++	int depth = inode->i_sb->s_stack_depth;
++
++	if (WARN_ON_ONCE(depth < 0 || depth >= IMA_MAX_NESTING))
++		depth = 0;
++
++	lockdep_set_class(&iint->mutex, &iint_mutex_key[depth]);
++#endif
++}
++
++static void iint_init_always(struct integrity_iint_cache *iint,
++			     struct inode *inode)
+ {
+-	kfree(iint->ima_hash);
+ 	iint->ima_hash = NULL;
+ 	iint->version = 0;
+ 	iint->flags = 0UL;
+@@ -80,6 +103,14 @@ static void iint_free(struct integrity_iint_cache *iint)
+ 	iint->ima_creds_status = INTEGRITY_UNKNOWN;
+ 	iint->evm_status = INTEGRITY_UNKNOWN;
+ 	iint->measured_pcrs = 0;
++	mutex_init(&iint->mutex);
++	iint_lockdep_annotate(iint, inode);
++}
++
++static void iint_free(struct integrity_iint_cache *iint)
++{
++	kfree(iint->ima_hash);
++	mutex_destroy(&iint->mutex);
+ 	kmem_cache_free(iint_cache, iint);
+ }
+ 
+@@ -104,6 +135,8 @@ struct integrity_iint_cache *integrity_inode_get(struct inode *inode)
+ 	if (!iint)
+ 		return NULL;
+ 
++	iint_init_always(iint, inode);
++
+ 	write_lock(&integrity_iint_lock);
+ 
+ 	p = &integrity_iint_tree.rb_node;
+@@ -153,25 +186,18 @@ void integrity_inode_free(struct inode *inode)
+ 	iint_free(iint);
+ }
+ 
+-static void init_once(void *foo)
++static void iint_init_once(void *foo)
+ {
+ 	struct integrity_iint_cache *iint = (struct integrity_iint_cache *) foo;
+ 
+ 	memset(iint, 0, sizeof(*iint));
+-	iint->ima_file_status = INTEGRITY_UNKNOWN;
+-	iint->ima_mmap_status = INTEGRITY_UNKNOWN;
+-	iint->ima_bprm_status = INTEGRITY_UNKNOWN;
+-	iint->ima_read_status = INTEGRITY_UNKNOWN;
+-	iint->ima_creds_status = INTEGRITY_UNKNOWN;
+-	iint->evm_status = INTEGRITY_UNKNOWN;
+-	mutex_init(&iint->mutex);
+ }
+ 
+ static int __init integrity_iintcache_init(void)
+ {
+ 	iint_cache =
+ 	    kmem_cache_create("iint_cache", sizeof(struct integrity_iint_cache),
+-			      0, SLAB_PANIC, init_once);
++			      0, SLAB_PANIC, iint_init_once);
+ 	return 0;
+ }
+ DEFINE_LSM(integrity) = {
+diff --git a/security/integrity/ima/ima_api.c b/security/integrity/ima/ima_api.c
+index 452e80b541e54..597ea0c4d72f7 100644
+--- a/security/integrity/ima/ima_api.c
++++ b/security/integrity/ima/ima_api.c
+@@ -243,6 +243,7 @@ int ima_collect_measurement(struct integrity_iint_cache *iint,
+ {
+ 	const char *audit_cause = "failed";
+ 	struct inode *inode = file_inode(file);
++	struct inode *real_inode = d_real_inode(file_dentry(file));
+ 	const char *filename = file->f_path.dentry->d_name.name;
+ 	struct ima_max_digest_data hash;
+ 	struct kstat stat;
+@@ -302,6 +303,10 @@ int ima_collect_measurement(struct integrity_iint_cache *iint,
+ 	iint->ima_hash = tmpbuf;
+ 	memcpy(iint->ima_hash, &hash, length);
+ 	iint->version = i_version;
++	if (real_inode != inode) {
++		iint->real_ino = real_inode->i_ino;
++		iint->real_dev = real_inode->i_sb->s_dev;
++	}
+ 
+ 	/* Possibly temporary failure due to type of read (eg. O_DIRECT) */
+ 	if (!result)
+diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c
+index 365db0e43d7c2..cc1217ac2c6fa 100644
+--- a/security/integrity/ima/ima_main.c
++++ b/security/integrity/ima/ima_main.c
+@@ -25,6 +25,7 @@
+ #include <linux/xattr.h>
+ #include <linux/ima.h>
+ #include <linux/fs.h>
++#include <linux/iversion.h>
+ 
+ #include "ima.h"
+ 
+@@ -207,7 +208,7 @@ static int process_measurement(struct file *file, const struct cred *cred,
+ 			       u32 secid, char *buf, loff_t size, int mask,
+ 			       enum ima_hooks func)
+ {
+-	struct inode *inode = file_inode(file);
++	struct inode *backing_inode, *inode = file_inode(file);
+ 	struct integrity_iint_cache *iint = NULL;
+ 	struct ima_template_desc *template_desc = NULL;
+ 	char *pathbuf = NULL;
+@@ -284,6 +285,19 @@ static int process_measurement(struct file *file, const struct cred *cred,
+ 		iint->measured_pcrs = 0;
+ 	}
+ 
++	/* Detect and re-evaluate changes made to the backing file. */
++	backing_inode = d_real_inode(file_dentry(file));
++	if (backing_inode != inode &&
++	    (action & IMA_DO_MASK) && (iint->flags & IMA_DONE_MASK)) {
++		if (!IS_I_VERSION(backing_inode) ||
++		    backing_inode->i_sb->s_dev != iint->real_dev ||
++		    backing_inode->i_ino != iint->real_ino ||
++		    !inode_eq_iversion(backing_inode, iint->version)) {
++			iint->flags &= ~IMA_DONE_MASK;
++			iint->measured_pcrs = 0;
++		}
++	}
++
+ 	/* Determine if already appraised/measured based on bitmask
+ 	 * (IMA_MEASURE, IMA_MEASURED, IMA_XXXX_APPRAISE, IMA_XXXX_APPRAISED,
+ 	 *  IMA_AUDIT, IMA_AUDITED)
+diff --git a/security/integrity/integrity.h b/security/integrity/integrity.h
+index 7167a6e99bdc0..52c3c806bf69f 100644
+--- a/security/integrity/integrity.h
++++ b/security/integrity/integrity.h
+@@ -164,6 +164,8 @@ struct integrity_iint_cache {
+ 	unsigned long flags;
+ 	unsigned long measured_pcrs;
+ 	unsigned long atomic_flags;
++	unsigned long real_ino;
++	dev_t real_dev;
+ 	enum integrity_status ima_file_status:4;
+ 	enum integrity_status ima_mmap_status:4;
+ 	enum integrity_status ima_bprm_status:4;
+diff --git a/security/keys/trusted-keys/trusted_core.c b/security/keys/trusted-keys/trusted_core.c
+index 85fb5c22529a7..fee1ab2c734d3 100644
+--- a/security/keys/trusted-keys/trusted_core.c
++++ b/security/keys/trusted-keys/trusted_core.c
+@@ -358,17 +358,17 @@ static int __init init_trusted(void)
+ 		if (!get_random)
+ 			get_random = kernel_get_random;
+ 
+-		static_call_update(trusted_key_seal,
+-				   trusted_key_sources[i].ops->seal);
+-		static_call_update(trusted_key_unseal,
+-				   trusted_key_sources[i].ops->unseal);
+-		static_call_update(trusted_key_get_random,
+-				   get_random);
+-		trusted_key_exit = trusted_key_sources[i].ops->exit;
+-		migratable = trusted_key_sources[i].ops->migratable;
+-
+ 		ret = trusted_key_sources[i].ops->init();
+-		if (!ret)
++		if (!ret) {
++			static_call_update(trusted_key_seal, trusted_key_sources[i].ops->seal);
++			static_call_update(trusted_key_unseal, trusted_key_sources[i].ops->unseal);
++			static_call_update(trusted_key_get_random, get_random);
++
++			trusted_key_exit = trusted_key_sources[i].ops->exit;
++			migratable = trusted_key_sources[i].ops->migratable;
++		}
++
++		if (!ret || ret != -ENODEV)
+ 			break;
+ 	}
+ 
+diff --git a/security/keys/trusted-keys/trusted_tee.c b/security/keys/trusted-keys/trusted_tee.c
+index ac3e270ade69b..aa3d477de6db5 100644
+--- a/security/keys/trusted-keys/trusted_tee.c
++++ b/security/keys/trusted-keys/trusted_tee.c
+@@ -65,24 +65,16 @@ static int trusted_tee_seal(struct trusted_key_payload *p, char *datablob)
+ 	int ret;
+ 	struct tee_ioctl_invoke_arg inv_arg;
+ 	struct tee_param param[4];
+-	struct tee_shm *reg_shm_in = NULL, *reg_shm_out = NULL;
++	struct tee_shm *reg_shm = NULL;
+ 
+ 	memset(&inv_arg, 0, sizeof(inv_arg));
+ 	memset(&param, 0, sizeof(param));
+ 
+-	reg_shm_in = tee_shm_register_kernel_buf(pvt_data.ctx, p->key,
+-						 p->key_len);
+-	if (IS_ERR(reg_shm_in)) {
+-		dev_err(pvt_data.dev, "key shm register failed\n");
+-		return PTR_ERR(reg_shm_in);
+-	}
+-
+-	reg_shm_out = tee_shm_register_kernel_buf(pvt_data.ctx, p->blob,
+-						  sizeof(p->blob));
+-	if (IS_ERR(reg_shm_out)) {
+-		dev_err(pvt_data.dev, "blob shm register failed\n");
+-		ret = PTR_ERR(reg_shm_out);
+-		goto out;
++	reg_shm = tee_shm_register_kernel_buf(pvt_data.ctx, p->key,
++					      sizeof(p->key) + sizeof(p->blob));
++	if (IS_ERR(reg_shm)) {
++		dev_err(pvt_data.dev, "shm register failed\n");
++		return PTR_ERR(reg_shm);
+ 	}
+ 
+ 	inv_arg.func = TA_CMD_SEAL;
+@@ -90,13 +82,13 @@ static int trusted_tee_seal(struct trusted_key_payload *p, char *datablob)
+ 	inv_arg.num_params = 4;
+ 
+ 	param[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT;
+-	param[0].u.memref.shm = reg_shm_in;
++	param[0].u.memref.shm = reg_shm;
+ 	param[0].u.memref.size = p->key_len;
+ 	param[0].u.memref.shm_offs = 0;
+ 	param[1].attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT;
+-	param[1].u.memref.shm = reg_shm_out;
++	param[1].u.memref.shm = reg_shm;
+ 	param[1].u.memref.size = sizeof(p->blob);
+-	param[1].u.memref.shm_offs = 0;
++	param[1].u.memref.shm_offs = sizeof(p->key);
+ 
+ 	ret = tee_client_invoke_func(pvt_data.ctx, &inv_arg, param);
+ 	if ((ret < 0) || (inv_arg.ret != 0)) {
+@@ -107,11 +99,7 @@ static int trusted_tee_seal(struct trusted_key_payload *p, char *datablob)
+ 		p->blob_len = param[1].u.memref.size;
+ 	}
+ 
+-out:
+-	if (reg_shm_out)
+-		tee_shm_free(reg_shm_out);
+-	if (reg_shm_in)
+-		tee_shm_free(reg_shm_in);
++	tee_shm_free(reg_shm);
+ 
+ 	return ret;
+ }
+@@ -124,24 +112,16 @@ static int trusted_tee_unseal(struct trusted_key_payload *p, char *datablob)
+ 	int ret;
+ 	struct tee_ioctl_invoke_arg inv_arg;
+ 	struct tee_param param[4];
+-	struct tee_shm *reg_shm_in = NULL, *reg_shm_out = NULL;
++	struct tee_shm *reg_shm = NULL;
+ 
+ 	memset(&inv_arg, 0, sizeof(inv_arg));
+ 	memset(&param, 0, sizeof(param));
+ 
+-	reg_shm_in = tee_shm_register_kernel_buf(pvt_data.ctx, p->blob,
+-						 p->blob_len);
+-	if (IS_ERR(reg_shm_in)) {
+-		dev_err(pvt_data.dev, "blob shm register failed\n");
+-		return PTR_ERR(reg_shm_in);
+-	}
+-
+-	reg_shm_out = tee_shm_register_kernel_buf(pvt_data.ctx, p->key,
+-						  sizeof(p->key));
+-	if (IS_ERR(reg_shm_out)) {
+-		dev_err(pvt_data.dev, "key shm register failed\n");
+-		ret = PTR_ERR(reg_shm_out);
+-		goto out;
++	reg_shm = tee_shm_register_kernel_buf(pvt_data.ctx, p->key,
++					      sizeof(p->key) + sizeof(p->blob));
++	if (IS_ERR(reg_shm)) {
++		dev_err(pvt_data.dev, "shm register failed\n");
++		return PTR_ERR(reg_shm);
+ 	}
+ 
+ 	inv_arg.func = TA_CMD_UNSEAL;
+@@ -149,11 +129,11 @@ static int trusted_tee_unseal(struct trusted_key_payload *p, char *datablob)
+ 	inv_arg.num_params = 4;
+ 
+ 	param[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT;
+-	param[0].u.memref.shm = reg_shm_in;
++	param[0].u.memref.shm = reg_shm;
+ 	param[0].u.memref.size = p->blob_len;
+-	param[0].u.memref.shm_offs = 0;
++	param[0].u.memref.shm_offs = sizeof(p->key);
+ 	param[1].attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT;
+-	param[1].u.memref.shm = reg_shm_out;
++	param[1].u.memref.shm = reg_shm;
+ 	param[1].u.memref.size = sizeof(p->key);
+ 	param[1].u.memref.shm_offs = 0;
+ 
+@@ -166,11 +146,7 @@ static int trusted_tee_unseal(struct trusted_key_payload *p, char *datablob)
+ 		p->key_len = param[1].u.memref.size;
+ 	}
+ 
+-out:
+-	if (reg_shm_out)
+-		tee_shm_free(reg_shm_out);
+-	if (reg_shm_in)
+-		tee_shm_free(reg_shm_in);
++	tee_shm_free(reg_shm);
+ 
+ 	return ret;
+ }
+diff --git a/sound/core/info.c b/sound/core/info.c
+index 0b2f04dcb5897..e2f302e55bbb2 100644
+--- a/sound/core/info.c
++++ b/sound/core/info.c
+@@ -56,7 +56,7 @@ struct snd_info_private_data {
+ };
+ 
+ static int snd_info_version_init(void);
+-static void snd_info_disconnect(struct snd_info_entry *entry);
++static void snd_info_clear_entries(struct snd_info_entry *entry);
+ 
+ /*
+ 
+@@ -569,11 +569,16 @@ void snd_info_card_disconnect(struct snd_card *card)
+ {
+ 	if (!card)
+ 		return;
+-	mutex_lock(&info_mutex);
++
+ 	proc_remove(card->proc_root_link);
+-	card->proc_root_link = NULL;
+ 	if (card->proc_root)
+-		snd_info_disconnect(card->proc_root);
++		proc_remove(card->proc_root->p);
++
++	mutex_lock(&info_mutex);
++	if (card->proc_root)
++		snd_info_clear_entries(card->proc_root);
++	card->proc_root_link = NULL;
++	card->proc_root = NULL;
+ 	mutex_unlock(&info_mutex);
+ }
+ 
+@@ -745,15 +750,14 @@ struct snd_info_entry *snd_info_create_card_entry(struct snd_card *card,
+ }
+ EXPORT_SYMBOL(snd_info_create_card_entry);
+ 
+-static void snd_info_disconnect(struct snd_info_entry *entry)
++static void snd_info_clear_entries(struct snd_info_entry *entry)
+ {
+ 	struct snd_info_entry *p;
+ 
+ 	if (!entry->p)
+ 		return;
+ 	list_for_each_entry(p, &entry->children, list)
+-		snd_info_disconnect(p);
+-	proc_remove(entry->p);
++		snd_info_clear_entries(p);
+ 	entry->p = NULL;
+ }
+ 
+@@ -770,8 +774,9 @@ void snd_info_free_entry(struct snd_info_entry * entry)
+ 	if (!entry)
+ 		return;
+ 	if (entry->p) {
++		proc_remove(entry->p);
+ 		mutex_lock(&info_mutex);
+-		snd_info_disconnect(entry);
++		snd_info_clear_entries(entry);
+ 		mutex_unlock(&info_mutex);
+ 	}
+ 
+diff --git a/sound/hda/hdac_stream.c b/sound/hda/hdac_stream.c
+index 2633a4bb1d85d..214a0680524b0 100644
+--- a/sound/hda/hdac_stream.c
++++ b/sound/hda/hdac_stream.c
+@@ -354,8 +354,10 @@ struct hdac_stream *snd_hdac_stream_assign(struct hdac_bus *bus,
+ 	struct hdac_stream *res = NULL;
+ 
+ 	/* make a non-zero unique key for the substream */
+-	int key = (substream->pcm->device << 16) | (substream->number << 2) |
+-		(substream->stream + 1);
++	int key = (substream->number << 2) | (substream->stream + 1);
++
++	if (substream->pcm)
++		key |= (substream->pcm->device << 16);
+ 
+ 	spin_lock_irq(&bus->reg_lock);
+ 	list_for_each_entry(azx_dev, &bus->stream_list, list) {
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 7f1d79f450a2a..7375998538040 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -9670,6 +9670,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x103c, 0x8898, "HP EliteBook 845 G8 Notebook PC", ALC285_FIXUP_HP_LIMIT_INT_MIC_BOOST),
+ 	SND_PCI_QUIRK(0x103c, 0x88d0, "HP Pavilion 15-eh1xxx (mainboard 88D0)", ALC287_FIXUP_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8902, "HP OMEN 16", ALC285_FIXUP_HP_MUTE_LED),
++	SND_PCI_QUIRK(0x103c, 0x890e, "HP 255 G8 Notebook PC", ALC236_FIXUP_HP_MUTE_LED_COEFBIT2),
+ 	SND_PCI_QUIRK(0x103c, 0x8919, "HP Pavilion Aero Laptop 13-be0xxx", ALC287_FIXUP_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x896d, "HP ZBook Firefly 16 G9", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x896e, "HP EliteBook x360 830 G9", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
+@@ -9705,6 +9706,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x103c, 0x8abb, "HP ZBook Firefly 14 G9", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8ad1, "HP EliteBook 840 14 inch G9 Notebook PC", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8ad2, "HP EliteBook 860 16 inch G9 Notebook PC", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
++	SND_PCI_QUIRK(0x103c, 0x8b2f, "HP 255 15.6 inch G10 Notebook PC", ALC236_FIXUP_HP_MUTE_LED_COEFBIT2),
+ 	SND_PCI_QUIRK(0x103c, 0x8b42, "HP", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8b43, "HP", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8b44, "HP", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
+@@ -9738,12 +9740,16 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x103c, 0x8c70, "HP EliteBook 835 G11", ALC287_FIXUP_CS35L41_I2C_2_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8c71, "HP EliteBook 845 G11", ALC287_FIXUP_CS35L41_I2C_2_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8c72, "HP EliteBook 865 G11", ALC287_FIXUP_CS35L41_I2C_2_HP_GPIO_LED),
++	SND_PCI_QUIRK(0x103c, 0x8ca4, "HP ZBook Fury", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
++	SND_PCI_QUIRK(0x103c, 0x8ca7, "HP ZBook Fury", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
++	SND_PCI_QUIRK(0x103c, 0x8cf5, "HP ZBook Studio 16", ALC245_FIXUP_CS35L41_SPI_4_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC),
+ 	SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300),
+ 	SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+ 	SND_PCI_QUIRK(0x1043, 0x10a1, "ASUS UX391UA", ALC294_FIXUP_ASUS_SPK),
+ 	SND_PCI_QUIRK(0x1043, 0x10c0, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC),
+ 	SND_PCI_QUIRK(0x1043, 0x10d0, "ASUS X540LA/X540LJ", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE),
++	SND_PCI_QUIRK(0x1043, 0x10d3, "ASUS K6500ZC", ALC294_FIXUP_ASUS_SPK),
+ 	SND_PCI_QUIRK(0x1043, 0x115d, "Asus 1015E", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+ 	SND_PCI_QUIRK(0x1043, 0x11c0, "ASUS X556UR", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1043, 0x125e, "ASUS Q524UQK", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE),
+@@ -9798,6 +9804,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1043, 0x1d4e, "ASUS TM420", ALC256_FIXUP_ASUS_HPE),
+ 	SND_PCI_QUIRK(0x1043, 0x1e02, "ASUS UX3402ZA", ALC245_FIXUP_CS35L41_SPI_2),
+ 	SND_PCI_QUIRK(0x1043, 0x16a3, "ASUS UX3402VA", ALC245_FIXUP_CS35L41_SPI_2),
++	SND_PCI_QUIRK(0x1043, 0x1f62, "ASUS UX7602ZM", ALC245_FIXUP_CS35L41_SPI_2),
+ 	SND_PCI_QUIRK(0x1043, 0x1e11, "ASUS Zephyrus G15", ALC289_FIXUP_ASUS_GA502),
+ 	SND_PCI_QUIRK(0x1043, 0x1e12, "ASUS UM3402", ALC287_FIXUP_CS35L41_I2C_2),
+ 	SND_PCI_QUIRK(0x1043, 0x1e51, "ASUS Zephyrus M15", ALC294_FIXUP_ASUS_GU502_PINS),
+@@ -10639,22 +10646,6 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
+ 		{0x12, 0x90a60130},
+ 		{0x17, 0x90170110},
+ 		{0x21, 0x03211020}),
+-	SND_HDA_PIN_QUIRK(0x10ec0295, 0x1028, "Dell", ALC269_FIXUP_DELL4_MIC_NO_PRESENCE,
+-		{0x14, 0x90170110},
+-		{0x21, 0x04211020}),
+-	SND_HDA_PIN_QUIRK(0x10ec0295, 0x1028, "Dell", ALC269_FIXUP_DELL4_MIC_NO_PRESENCE,
+-		{0x14, 0x90170110},
+-		{0x21, 0x04211030}),
+-	SND_HDA_PIN_QUIRK(0x10ec0295, 0x1028, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE,
+-		ALC295_STANDARD_PINS,
+-		{0x17, 0x21014020},
+-		{0x18, 0x21a19030}),
+-	SND_HDA_PIN_QUIRK(0x10ec0295, 0x1028, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE,
+-		ALC295_STANDARD_PINS,
+-		{0x17, 0x21014040},
+-		{0x18, 0x21a19050}),
+-	SND_HDA_PIN_QUIRK(0x10ec0295, 0x1028, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE,
+-		ALC295_STANDARD_PINS),
+ 	SND_HDA_PIN_QUIRK(0x10ec0298, 0x1028, "Dell", ALC298_FIXUP_DELL1_MIC_NO_PRESENCE,
+ 		ALC298_STANDARD_PINS,
+ 		{0x17, 0x90170110}),
+@@ -10698,6 +10689,9 @@ static const struct snd_hda_pin_quirk alc269_fallback_pin_fixup_tbl[] = {
+ 	SND_HDA_PIN_QUIRK(0x10ec0289, 0x1028, "Dell", ALC269_FIXUP_DELL4_MIC_NO_PRESENCE,
+ 		{0x19, 0x40000000},
+ 		{0x1b, 0x40000000}),
++	SND_HDA_PIN_QUIRK(0x10ec0295, 0x1028, "Dell", ALC269_FIXUP_DELL4_MIC_NO_PRESENCE,
++		{0x19, 0x40000000},
++		{0x1b, 0x40000000}),
+ 	SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
+ 		{0x19, 0x40000000},
+ 		{0x1a, 0x40000000}),
+diff --git a/sound/soc/codecs/lpass-wsa-macro.c b/sound/soc/codecs/lpass-wsa-macro.c
+index 8ba7dc89daaaa..5f106be184a87 100644
+--- a/sound/soc/codecs/lpass-wsa-macro.c
++++ b/sound/soc/codecs/lpass-wsa-macro.c
+@@ -1685,6 +1685,9 @@ static int wsa_macro_spk_boost_event(struct snd_soc_dapm_widget *w,
+ 		boost_path_cfg1 = CDC_WSA_RX1_RX_PATH_CFG1;
+ 		reg = CDC_WSA_RX1_RX_PATH_CTL;
+ 		reg_mix = CDC_WSA_RX1_RX_PATH_MIX_CTL;
++	} else {
++		dev_warn(component->dev, "Incorrect widget name in the driver\n");
++		return -EINVAL;
+ 	}
+ 
+ 	switch (event) {
+diff --git a/sound/soc/codecs/wsa883x.c b/sound/soc/codecs/wsa883x.c
+index e40d583a1ce64..4ea550b0601be 100644
+--- a/sound/soc/codecs/wsa883x.c
++++ b/sound/soc/codecs/wsa883x.c
+@@ -1203,9 +1203,6 @@ static int wsa883x_spkr_event(struct snd_soc_dapm_widget *w,
+ 			break;
+ 		}
+ 
+-		snd_soc_component_write_field(component, WSA883X_DRE_CTL_1,
+-					      WSA883X_DRE_GAIN_EN_MASK,
+-					      WSA883X_DRE_GAIN_FROM_CSR);
+ 		if (wsa883x->port_enable[WSA883X_PORT_COMP])
+ 			snd_soc_component_write_field(component, WSA883X_DRE_CTL_0,
+ 						      WSA883X_DRE_OFFSET_MASK,
+@@ -1218,9 +1215,6 @@ static int wsa883x_spkr_event(struct snd_soc_dapm_widget *w,
+ 		snd_soc_component_write_field(component, WSA883X_PDM_WD_CTL,
+ 					      WSA883X_PDM_EN_MASK,
+ 					      WSA883X_PDM_ENABLE);
+-		snd_soc_component_write_field(component, WSA883X_PA_FSM_CTL,
+-					      WSA883X_GLOBAL_PA_EN_MASK,
+-					      WSA883X_GLOBAL_PA_ENABLE);
+ 
+ 		break;
+ 	case SND_SOC_DAPM_PRE_PMD:
+@@ -1346,6 +1340,7 @@ static const struct snd_soc_dai_ops wsa883x_dai_ops = {
+ 	.hw_free = wsa883x_hw_free,
+ 	.mute_stream = wsa883x_digital_mute,
+ 	.set_stream = wsa883x_set_sdw_stream,
++	.mute_unmute_on_trigger = true,
+ };
+ 
+ static struct snd_soc_dai_driver wsa883x_dais[] = {
+diff --git a/sound/soc/intel/common/soc-acpi-intel-cht-match.c b/sound/soc/intel/common/soc-acpi-intel-cht-match.c
+index cdcbf04b8832f..5e2ec60e2954b 100644
+--- a/sound/soc/intel/common/soc-acpi-intel-cht-match.c
++++ b/sound/soc/intel/common/soc-acpi-intel-cht-match.c
+@@ -75,6 +75,39 @@ static struct snd_soc_acpi_mach *cht_ess8316_quirk(void *arg)
+ 	return arg;
+ }
+ 
++/*
++ * The Lenovo Yoga Tab 3 Pro YT3-X90, with Android factory OS has a buggy DSDT
++ * with the coded not being listed at all.
++ */
++static const struct dmi_system_id lenovo_yoga_tab3_x90[] = {
++	{
++		/* Lenovo Yoga Tab 3 Pro YT3-X90, codec missing from DSDT */
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "Intel Corporation"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "CHERRYVIEW D1 PLATFORM"),
++			DMI_MATCH(DMI_PRODUCT_VERSION, "Blade3-10A-001"),
++		},
++	},
++	{ }
++};
++
++static struct snd_soc_acpi_mach cht_lenovo_yoga_tab3_x90_mach = {
++	.id = "10WM5102",
++	.drv_name = "bytcr_wm5102",
++	.fw_filename = "intel/fw_sst_22a8.bin",
++	.board = "bytcr_wm5102",
++	.sof_tplg_filename = "sof-cht-wm5102.tplg",
++};
++
++static struct snd_soc_acpi_mach *lenovo_yt3_x90_quirk(void *arg)
++{
++	if (dmi_check_system(lenovo_yoga_tab3_x90))
++		return &cht_lenovo_yoga_tab3_x90_mach;
++
++	/* Skip wildcard match snd_soc_acpi_intel_cherrytrail_machines[] entry */
++	return NULL;
++}
++
+ static const struct snd_soc_acpi_codecs rt5640_comp_ids = {
+ 	.num_codecs = 2,
+ 	.codecs = { "10EC5640", "10EC3276" },
+@@ -175,6 +208,16 @@ struct snd_soc_acpi_mach  snd_soc_acpi_intel_cherrytrail_machines[] = {
+ 		.drv_name = "sof_pcm512x",
+ 		.sof_tplg_filename = "sof-cht-src-50khz-pcm512x.tplg",
+ 	},
++	/*
++	 * Special case for the Lenovo Yoga Tab 3 Pro YT3-X90 where the DSDT
++	 * misses the codec. Match on the SST id instead, lenovo_yt3_x90_quirk()
++	 * will return a YT3 specific mach or NULL when called on other hw,
++	 * skipping this entry.
++	 */
++	{
++		.id = "808622A8",
++		.machine_quirk = lenovo_yt3_x90_quirk,
++	},
+ 
+ #if IS_ENABLED(CONFIG_SND_SOC_INTEL_BYT_CHT_NOCODEC_MACH)
+ 	/*
+diff --git a/sound/soc/mediatek/mt8188/mt8188-mt6359.c b/sound/soc/mediatek/mt8188/mt8188-mt6359.c
+index ac69c23e0da1c..7048ff52ab86a 100644
+--- a/sound/soc/mediatek/mt8188/mt8188-mt6359.c
++++ b/sound/soc/mediatek/mt8188/mt8188-mt6359.c
+@@ -246,6 +246,11 @@ static const struct snd_soc_dapm_widget mt8188_mt6359_widgets[] = {
+ 	SND_SOC_DAPM_MIC("Headset Mic", NULL),
+ 	SND_SOC_DAPM_SINK("HDMI"),
+ 	SND_SOC_DAPM_SINK("DP"),
++
++	/* dynamic pinctrl */
++	SND_SOC_DAPM_PINCTRL("ETDM_SPK_PIN", "aud_etdm_spk_on", "aud_etdm_spk_off"),
++	SND_SOC_DAPM_PINCTRL("ETDM_HP_PIN", "aud_etdm_hp_on", "aud_etdm_hp_off"),
++	SND_SOC_DAPM_PINCTRL("MTKAIF_PIN", "aud_mtkaif_on", "aud_mtkaif_off"),
+ };
+ 
+ static const struct snd_kcontrol_new mt8188_mt6359_controls[] = {
+@@ -267,6 +272,7 @@ static int mt8188_mt6359_mtkaif_calibration(struct snd_soc_pcm_runtime *rtd)
+ 		snd_soc_rtdcom_lookup(rtd, AFE_PCM_NAME);
+ 	struct snd_soc_component *cmpnt_codec =
+ 		asoc_rtd_to_codec(rtd, 0)->component;
++	struct snd_soc_dapm_widget *pin_w = NULL, *w;
+ 	struct mtk_base_afe *afe;
+ 	struct mt8188_afe_private *afe_priv;
+ 	struct mtkaif_param *param;
+@@ -306,6 +312,18 @@ static int mt8188_mt6359_mtkaif_calibration(struct snd_soc_pcm_runtime *rtd)
+ 		return 0;
+ 	}
+ 
++	for_each_card_widgets(rtd->card, w) {
++		if (!strcmp(w->name, "MTKAIF_PIN")) {
++			pin_w = w;
++			break;
++		}
++	}
++
++	if (pin_w)
++		dapm_pinctrl_event(pin_w, NULL, SND_SOC_DAPM_PRE_PMU);
++	else
++		dev_dbg(afe->dev, "%s(), no pinmux widget, please check if default on\n", __func__);
++
+ 	pm_runtime_get_sync(afe->dev);
+ 	mt6359_mtkaif_calibration_enable(cmpnt_codec);
+ 
+@@ -403,6 +421,9 @@ static int mt8188_mt6359_mtkaif_calibration(struct snd_soc_pcm_runtime *rtd)
+ 	for (i = 0; i < MT8188_MTKAIF_MISO_NUM; i++)
+ 		param->mtkaif_phase_cycle[i] = mtkaif_phase_cycle[i];
+ 
++	if (pin_w)
++		dapm_pinctrl_event(pin_w, NULL, SND_SOC_DAPM_POST_PMD);
++
+ 	dev_dbg(afe->dev, "%s(), end, calibration ok %d\n",
+ 		__func__, param->mtkaif_calibration_ok);
+ 
+diff --git a/sound/soc/soc-dai.c b/sound/soc/soc-dai.c
+index 02dd64dea1792..28d8c6c3d3b26 100644
+--- a/sound/soc/soc-dai.c
++++ b/sound/soc/soc-dai.c
+@@ -641,6 +641,10 @@ int snd_soc_pcm_dai_trigger(struct snd_pcm_substream *substream,
+ 			ret = soc_dai_trigger(dai, substream, cmd);
+ 			if (ret < 0)
+ 				break;
++
++			if (dai->driver->ops && dai->driver->ops->mute_unmute_on_trigger)
++				snd_soc_dai_digital_mute(dai, 0, substream->stream);
++
+ 			soc_dai_mark_push(dai, substream, trigger);
+ 		}
+ 		break;
+@@ -651,6 +655,9 @@ int snd_soc_pcm_dai_trigger(struct snd_pcm_substream *substream,
+ 			if (rollback && !soc_dai_mark_match(dai, substream, trigger))
+ 				continue;
+ 
++			if (dai->driver->ops && dai->driver->ops->mute_unmute_on_trigger)
++				snd_soc_dai_digital_mute(dai, 1, substream->stream);
++
+ 			r = soc_dai_trigger(dai, substream, cmd);
+ 			if (r < 0)
+ 				ret = r; /* use last ret */
+diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
+index 6cf4cd667d036..4cb710a4cea42 100644
+--- a/sound/soc/soc-pcm.c
++++ b/sound/soc/soc-pcm.c
+@@ -896,8 +896,10 @@ static int __soc_pcm_prepare(struct snd_soc_pcm_runtime *rtd,
+ 	snd_soc_dapm_stream_event(rtd, substream->stream,
+ 			SND_SOC_DAPM_STREAM_START);
+ 
+-	for_each_rtd_dais(rtd, i, dai)
+-		snd_soc_dai_digital_mute(dai, 0, substream->stream);
++	for_each_rtd_dais(rtd, i, dai) {
++		if (dai->driver->ops && !dai->driver->ops->mute_unmute_on_trigger)
++			snd_soc_dai_digital_mute(dai, 0, substream->stream);
++	}
+ 
+ out:
+ 	return soc_pcm_ret(rtd, ret);
+@@ -939,8 +941,10 @@ static int soc_pcm_hw_clean(struct snd_soc_pcm_runtime *rtd,
+ 		if (snd_soc_dai_active(dai) == 1)
+ 			soc_pcm_set_dai_params(dai, NULL);
+ 
+-		if (snd_soc_dai_stream_active(dai, substream->stream) == 1)
+-			snd_soc_dai_digital_mute(dai, 1, substream->stream);
++		if (snd_soc_dai_stream_active(dai, substream->stream) == 1) {
++			if (dai->driver->ops && !dai->driver->ops->mute_unmute_on_trigger)
++				snd_soc_dai_digital_mute(dai, 1, substream->stream);
++		}
+ 	}
+ 
+ 	/* run the stream event */
+diff --git a/sound/soc/sof/ipc4.c b/sound/soc/sof/ipc4.c
+index ab6eddd91bb77..1b09496733fb8 100644
+--- a/sound/soc/sof/ipc4.c
++++ b/sound/soc/sof/ipc4.c
+@@ -614,6 +614,9 @@ static void sof_ipc4_rx_msg(struct snd_sof_dev *sdev)
+ 	case SOF_IPC4_NOTIFY_LOG_BUFFER_STATUS:
+ 		sof_ipc4_mtrace_update_pos(sdev, SOF_IPC4_LOG_CORE_GET(ipc4_msg->primary));
+ 		break;
++	case SOF_IPC4_NOTIFY_EXCEPTION_CAUGHT:
++		snd_sof_dsp_panic(sdev, 0, true);
++		break;
+ 	default:
+ 		dev_dbg(sdev->dev, "Unhandled DSP message: %#x|%#x\n",
+ 			ipc4_msg->primary, ipc4_msg->extension);
+diff --git a/sound/soc/sof/sof-audio.c b/sound/soc/sof/sof-audio.c
+index e5405f854a910..563fe6f7789f7 100644
+--- a/sound/soc/sof/sof-audio.c
++++ b/sound/soc/sof/sof-audio.c
+@@ -1032,6 +1032,13 @@ int sof_machine_check(struct snd_sof_dev *sdev)
+ 		mach = snd_sof_machine_select(sdev);
+ 		if (mach) {
+ 			sof_pdata->machine = mach;
++
++			if (sof_pdata->subsystem_id_set) {
++				mach->mach_params.subsystem_vendor = sof_pdata->subsystem_vendor;
++				mach->mach_params.subsystem_device = sof_pdata->subsystem_device;
++				mach->mach_params.subsystem_id_set = true;
++			}
++
+ 			snd_sof_set_mach_params(mach, sdev);
+ 			return 0;
+ 		}
+diff --git a/sound/soc/sof/sof-pci-dev.c b/sound/soc/sof/sof-pci-dev.c
+index f42c85df88a80..69a2352f2e1a0 100644
+--- a/sound/soc/sof/sof-pci-dev.c
++++ b/sound/soc/sof/sof-pci-dev.c
+@@ -221,6 +221,14 @@ int sof_pci_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
+ 		return ret;
+ 
+ 	sof_pdata->name = pci_name(pci);
++
++	/* PCI defines a vendor ID of 0xFFFF as invalid. */
++	if (pci->subsystem_vendor != 0xFFFF) {
++		sof_pdata->subsystem_vendor = pci->subsystem_vendor;
++		sof_pdata->subsystem_device = pci->subsystem_device;
++		sof_pdata->subsystem_id_set = true;
++	}
++
+ 	sof_pdata->desc = desc;
+ 	sof_pdata->dev = dev;
+ 
+diff --git a/sound/soc/ti/omap-mcbsp.c b/sound/soc/ti/omap-mcbsp.c
+index 21fa7b9787997..94c514e57eef9 100644
+--- a/sound/soc/ti/omap-mcbsp.c
++++ b/sound/soc/ti/omap-mcbsp.c
+@@ -74,14 +74,16 @@ static int omap2_mcbsp_set_clks_src(struct omap_mcbsp *mcbsp, u8 fck_src_id)
+ 		return -EINVAL;
+ 	}
+ 
+-	pm_runtime_put_sync(mcbsp->dev);
++	if (mcbsp->active)
++		pm_runtime_put_sync(mcbsp->dev);
+ 
+ 	r = clk_set_parent(mcbsp->fclk, fck_src);
+ 	if (r)
+ 		dev_err(mcbsp->dev, "CLKS: could not clk_set_parent() to %s\n",
+ 			src);
+ 
+-	pm_runtime_get_sync(mcbsp->dev);
++	if (mcbsp->active)
++		pm_runtime_get_sync(mcbsp->dev);
+ 
+ 	clk_put(fck_src);
+ 
+diff --git a/tools/include/uapi/linux/prctl.h b/tools/include/uapi/linux/prctl.h
+index 3c36aeade991e..9a85c69782bdd 100644
+--- a/tools/include/uapi/linux/prctl.h
++++ b/tools/include/uapi/linux/prctl.h
+@@ -283,7 +283,7 @@ struct prctl_mm_map {
+ 
+ /* Memory deny write / execute */
+ #define PR_SET_MDWE			65
+-# define PR_MDWE_REFUSE_EXEC_GAIN	1
++# define PR_MDWE_REFUSE_EXEC_GAIN	(1UL << 0)
+ 
+ #define PR_GET_MDWE			66
+ 
+diff --git a/tools/perf/util/intel-pt.c b/tools/perf/util/intel-pt.c
+index dbf0bc71a63be..f38893e0b0369 100644
+--- a/tools/perf/util/intel-pt.c
++++ b/tools/perf/util/intel-pt.c
+@@ -1512,9 +1512,11 @@ static void intel_pt_sample_flags(struct intel_pt_queue *ptq)
+ 	} else if (ptq->state->flags & INTEL_PT_ASYNC) {
+ 		if (!ptq->state->to_ip)
+ 			ptq->flags = PERF_IP_FLAG_BRANCH |
++				     PERF_IP_FLAG_ASYNC |
+ 				     PERF_IP_FLAG_TRACE_END;
+ 		else if (ptq->state->from_nr && !ptq->state->to_nr)
+ 			ptq->flags = PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CALL |
++				     PERF_IP_FLAG_ASYNC |
+ 				     PERF_IP_FLAG_VMEXIT;
+ 		else
+ 			ptq->flags = PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CALL |
+diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c
+index 8a36ba5df9f90..ce9860e388bd4 100644
+--- a/tools/power/x86/turbostat/turbostat.c
++++ b/tools/power/x86/turbostat/turbostat.c
+@@ -2180,7 +2180,7 @@ retry:
+ 	if ((DO_BIC(BIC_CPU_c6) || soft_c1_residency_display(BIC_CPU_c6)) && !do_knl_cstates) {
+ 		if (get_msr(cpu, MSR_CORE_C6_RESIDENCY, &c->c6))
+ 			return -7;
+-	} else if (do_knl_cstates || soft_c1_residency_display(BIC_CPU_c6)) {
++	} else if (do_knl_cstates && soft_c1_residency_display(BIC_CPU_c6)) {
+ 		if (get_msr(cpu, MSR_KNL_CORE_C6_RESIDENCY, &c->c6))
+ 			return -7;
+ 	}
+@@ -5790,6 +5790,7 @@ void process_cpuid()
+ 	rapl_probe(family, model);
+ 	perf_limit_reasons_probe(family, model);
+ 	automatic_cstate_conversion_probe(family, model);
++	prewake_cstate_probe(family, model);
+ 
+ 	check_tcc_offset(model_orig);
+ 
+diff --git a/tools/testing/cxl/test/cxl.c b/tools/testing/cxl/test/cxl.c
+index fb6ab9cef84f7..b885462999022 100644
+--- a/tools/testing/cxl/test/cxl.c
++++ b/tools/testing/cxl/test/cxl.c
+@@ -831,7 +831,7 @@ static void mock_init_hdm_decoder(struct cxl_decoder *cxld)
+ 			cxld->interleave_ways = 2;
+ 		else
+ 			cxld->interleave_ways = 1;
+-		cxld->interleave_granularity = 256;
++		cxld->interleave_granularity = 4096;
+ 		cxld->hpa_range = (struct range) {
+ 			.start = base,
+ 			.end = base + size - 1,
+diff --git a/tools/testing/selftests/bpf/verifier/ld_imm64.c b/tools/testing/selftests/bpf/verifier/ld_imm64.c
+index f9297900cea6d..78f19c255f20b 100644
+--- a/tools/testing/selftests/bpf/verifier/ld_imm64.c
++++ b/tools/testing/selftests/bpf/verifier/ld_imm64.c
+@@ -9,8 +9,8 @@
+ 	BPF_MOV64_IMM(BPF_REG_0, 2),
+ 	BPF_EXIT_INSN(),
+ 	},
+-	.errstr = "invalid BPF_LD_IMM insn",
+-	.errstr_unpriv = "R1 pointer comparison",
++	.errstr = "jump into the middle of ldimm64 insn 1",
++	.errstr_unpriv = "jump into the middle of ldimm64 insn 1",
+ 	.result = REJECT,
+ },
+ {
+@@ -23,8 +23,8 @@
+ 	BPF_LD_IMM64(BPF_REG_0, 1),
+ 	BPF_EXIT_INSN(),
+ 	},
+-	.errstr = "invalid BPF_LD_IMM insn",
+-	.errstr_unpriv = "R1 pointer comparison",
++	.errstr = "jump into the middle of ldimm64 insn 1",
++	.errstr_unpriv = "jump into the middle of ldimm64 insn 1",
+ 	.result = REJECT,
+ },
+ {
+diff --git a/tools/testing/selftests/clone3/clone3.c b/tools/testing/selftests/clone3/clone3.c
+index e60cf4da8fb07..1c61e3c022cb8 100644
+--- a/tools/testing/selftests/clone3/clone3.c
++++ b/tools/testing/selftests/clone3/clone3.c
+@@ -196,7 +196,12 @@ int main(int argc, char *argv[])
+ 			CLONE3_ARGS_NO_TEST);
+ 
+ 	/* Do a clone3() in a new time namespace */
+-	test_clone3(CLONE_NEWTIME, 0, 0, CLONE3_ARGS_NO_TEST);
++	if (access("/proc/self/ns/time", F_OK) == 0) {
++		test_clone3(CLONE_NEWTIME, 0, 0, CLONE3_ARGS_NO_TEST);
++	} else {
++		ksft_print_msg("Time namespaces are not supported\n");
++		ksft_test_result_skip("Skipping clone3() with CLONE_NEWTIME\n");
++	}
+ 
+ 	/* Do a clone3() with exit signal (SIGCHLD) in flags */
+ 	test_clone3(SIGCHLD, 0, -EINVAL, CLONE3_ARGS_NO_TEST);
+diff --git a/tools/testing/selftests/efivarfs/create-read.c b/tools/testing/selftests/efivarfs/create-read.c
+index 9674a19396a32..7bc7af4eb2c17 100644
+--- a/tools/testing/selftests/efivarfs/create-read.c
++++ b/tools/testing/selftests/efivarfs/create-read.c
+@@ -32,8 +32,10 @@ int main(int argc, char **argv)
+ 	rc = read(fd, buf, sizeof(buf));
+ 	if (rc != 0) {
+ 		fprintf(stderr, "Reading a new var should return EOF\n");
++		close(fd);
+ 		return EXIT_FAILURE;
+ 	}
+ 
++	close(fd);
+ 	return EXIT_SUCCESS;
+ }
+diff --git a/tools/testing/selftests/lkdtm/config b/tools/testing/selftests/lkdtm/config
+index 5d52f64dfb430..7afe05e8c4d79 100644
+--- a/tools/testing/selftests/lkdtm/config
++++ b/tools/testing/selftests/lkdtm/config
+@@ -9,7 +9,6 @@ CONFIG_INIT_ON_FREE_DEFAULT_ON=y
+ CONFIG_INIT_ON_ALLOC_DEFAULT_ON=y
+ CONFIG_UBSAN=y
+ CONFIG_UBSAN_BOUNDS=y
+-CONFIG_UBSAN_TRAP=y
+ CONFIG_STACKPROTECTOR_STRONG=y
+ CONFIG_SLUB_DEBUG=y
+ CONFIG_SLUB_DEBUG_ON=y
+diff --git a/tools/testing/selftests/lkdtm/tests.txt b/tools/testing/selftests/lkdtm/tests.txt
+index 607b8d7e3ea34..2f3a1b96da6e3 100644
+--- a/tools/testing/selftests/lkdtm/tests.txt
++++ b/tools/testing/selftests/lkdtm/tests.txt
+@@ -7,7 +7,7 @@ EXCEPTION
+ #EXHAUST_STACK Corrupts memory on failure
+ #CORRUPT_STACK Crashes entire system on success
+ #CORRUPT_STACK_STRONG Crashes entire system on success
+-ARRAY_BOUNDS
++ARRAY_BOUNDS call trace:|UBSAN: array-index-out-of-bounds
+ CORRUPT_LIST_ADD list_add corruption
+ CORRUPT_LIST_DEL list_del corruption
+ STACK_GUARD_PAGE_LEADING
+diff --git a/tools/testing/selftests/net/mptcp/mptcp_join.sh b/tools/testing/selftests/net/mptcp/mptcp_join.sh
+index 5a02fef4b070c..78003187524d4 100755
+--- a/tools/testing/selftests/net/mptcp/mptcp_join.sh
++++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh
+@@ -3207,7 +3207,7 @@ fastclose_tests()
+ 	if reset_check_counter "fastclose server test" "MPTcpExtMPFastcloseRx"; then
+ 		test_linkfail=1024 addr_nr_ns2=fastclose_server \
+ 			run_tests $ns1 $ns2 10.0.1.1
+-		chk_join_nr 0 0 0
++		chk_join_nr 0 0 0 0 0 0 1
+ 		chk_fclose_nr 1 1 invert
+ 		chk_rst_nr 1 1
+ 	fi
+diff --git a/tools/testing/selftests/resctrl/Makefile b/tools/testing/selftests/resctrl/Makefile
+index 5073dbc961258..2deac2031de9e 100644
+--- a/tools/testing/selftests/resctrl/Makefile
++++ b/tools/testing/selftests/resctrl/Makefile
+@@ -1,6 +1,6 @@
+ # SPDX-License-Identifier: GPL-2.0
+ 
+-CFLAGS = -g -Wall -O2 -D_FORTIFY_SOURCE=2
++CFLAGS = -g -Wall -O2 -D_FORTIFY_SOURCE=2 -D_GNU_SOURCE
+ CFLAGS += $(KHDR_INCLUDES)
+ 
+ TEST_GEN_PROGS := resctrl_tests
+diff --git a/tools/testing/selftests/resctrl/cmt_test.c b/tools/testing/selftests/resctrl/cmt_test.c
+index af71b21412710..37f50252dead1 100644
+--- a/tools/testing/selftests/resctrl/cmt_test.c
++++ b/tools/testing/selftests/resctrl/cmt_test.c
+@@ -90,9 +90,6 @@ int cmt_resctrl_val(int cpu_no, int n, char **benchmark_cmd)
+ 	if (ret)
+ 		return ret;
+ 
+-	if (!validate_resctrl_feature_request(CMT_STR))
+-		return -1;
+-
+ 	ret = get_cbm_mask("L3", cbm_mask);
+ 	if (ret)
+ 		return ret;
+diff --git a/tools/testing/selftests/resctrl/mba_test.c b/tools/testing/selftests/resctrl/mba_test.c
+index cde3781a9ab05..bdb36509beb74 100644
+--- a/tools/testing/selftests/resctrl/mba_test.c
++++ b/tools/testing/selftests/resctrl/mba_test.c
+@@ -12,7 +12,7 @@
+ 
+ #define RESULT_FILE_NAME	"result_mba"
+ #define NUM_OF_RUNS		5
+-#define MAX_DIFF_PERCENT	5
++#define MAX_DIFF_PERCENT	8
+ #define ALLOCATION_MAX		100
+ #define ALLOCATION_MIN		10
+ #define ALLOCATION_STEP		10
+diff --git a/tools/testing/selftests/resctrl/mbm_test.c b/tools/testing/selftests/resctrl/mbm_test.c
+index 538d35a6485ac..299eaed3edf1d 100644
+--- a/tools/testing/selftests/resctrl/mbm_test.c
++++ b/tools/testing/selftests/resctrl/mbm_test.c
+@@ -11,7 +11,7 @@
+ #include "resctrl.h"
+ 
+ #define RESULT_FILE_NAME	"result_mbm"
+-#define MAX_DIFF_PERCENT	5
++#define MAX_DIFF_PERCENT	8
+ #define NUM_OF_RUNS		5
+ 
+ static int
+diff --git a/tools/testing/selftests/resctrl/resctrl.h b/tools/testing/selftests/resctrl/resctrl.h
+index f455f0b7e314b..d8029cb474c9f 100644
+--- a/tools/testing/selftests/resctrl/resctrl.h
++++ b/tools/testing/selftests/resctrl/resctrl.h
+@@ -1,5 +1,4 @@
+ /* SPDX-License-Identifier: GPL-2.0 */
+-#define _GNU_SOURCE
+ #ifndef RESCTRL_H
+ #define RESCTRL_H
+ #include <stdio.h>
+diff --git a/tools/testing/selftests/resctrl/resctrl_val.c b/tools/testing/selftests/resctrl/resctrl_val.c
+index ab1eab1e7ff63..f27e5c2a5171f 100644
+--- a/tools/testing/selftests/resctrl/resctrl_val.c
++++ b/tools/testing/selftests/resctrl/resctrl_val.c
+@@ -482,7 +482,7 @@ void ctrlc_handler(int signum, siginfo_t *info, void *ptr)
+  */
+ int signal_handler_register(void)
+ {
+-	struct sigaction sigact;
++	struct sigaction sigact = {};
+ 	int ret = 0;
+ 
+ 	sigact.sa_sigaction = ctrlc_handler;
+@@ -504,7 +504,7 @@ int signal_handler_register(void)
+  */
+ void signal_handler_unregister(void)
+ {
+-	struct sigaction sigact;
++	struct sigaction sigact = {};
+ 
+ 	sigact.sa_handler = SIG_DFL;
+ 	sigemptyset(&sigact.sa_mask);


^ permalink raw reply related	[flat|nested] 24+ messages in thread
* [gentoo-commits] proj/linux-patches:6.5 commit in: /
@ 2023-11-20 11:27 Mike Pagano
  0 siblings, 0 replies; 24+ messages in thread
From: Mike Pagano @ 2023-11-20 11:27 UTC (permalink / raw
  To: gentoo-commits

commit:     ce0a6ee0d7cbec99484ec9d052884ee234652ba9
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Mon Nov 20 11:27:25 2023 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Mon Nov 20 11:27:25 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=ce0a6ee0

Linux patch 6.5.12

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README             |     4 +
 1011_linux-6.5.12.patch | 27038 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 27042 insertions(+)

diff --git a/0000_README b/0000_README
index 28310de0..fd46c9a5 100644
--- a/0000_README
+++ b/0000_README
@@ -87,6 +87,10 @@ Patch:  1010_linux-6.5.11.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.5.11
 
+Patch:  1011_linux-6.5.12.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.5.12
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1011_linux-6.5.12.patch b/1011_linux-6.5.12.patch
new file mode 100644
index 00000000..f4fe4339
--- /dev/null
+++ b/1011_linux-6.5.12.patch
@@ -0,0 +1,27038 @@
+diff --git a/Documentation/admin-guide/hw-vuln/srso.rst b/Documentation/admin-guide/hw-vuln/srso.rst
+index b6cfb51cb0b46..e715bfc09879a 100644
+--- a/Documentation/admin-guide/hw-vuln/srso.rst
++++ b/Documentation/admin-guide/hw-vuln/srso.rst
+@@ -46,12 +46,22 @@ The possible values in this file are:
+ 
+    The processor is not vulnerable
+ 
+- * 'Vulnerable: no microcode':
++* 'Vulnerable':
++
++   The processor is vulnerable and no mitigations have been applied.
++
++ * 'Vulnerable: No microcode':
+ 
+    The processor is vulnerable, no microcode extending IBPB
+    functionality to address the vulnerability has been applied.
+ 
+- * 'Mitigation: microcode':
++ * 'Vulnerable: Safe RET, no microcode':
++
++   The "Safe RET" mitigation (see below) has been applied to protect the
++   kernel, but the IBPB-extending microcode has not been applied.  User
++   space tasks may still be vulnerable.
++
++ * 'Vulnerable: Microcode, no safe RET':
+ 
+    Extended IBPB functionality microcode patch has been applied. It does
+    not address User->Kernel and Guest->Host transitions protection but it
+@@ -72,11 +82,11 @@ The possible values in this file are:
+ 
+    (spec_rstack_overflow=microcode)
+ 
+- * 'Mitigation: safe RET':
++ * 'Mitigation: Safe RET':
+ 
+-   Software-only mitigation. It complements the extended IBPB microcode
+-   patch functionality by addressing User->Kernel and Guest->Host
+-   transitions protection.
++   Combined microcode/software mitigation. It complements the
++   extended IBPB microcode patch functionality by addressing
++   User->Kernel and Guest->Host transitions protection.
+ 
+    Selected by default or by spec_rstack_overflow=safe-ret
+ 
+@@ -129,7 +139,7 @@ an indrect branch prediction barrier after having applied the required
+ microcode patch for one's system. This mitigation comes also at
+ a performance cost.
+ 
+-Mitigation: safe RET
++Mitigation: Safe RET
+ --------------------
+ 
+ The mitigation works by ensuring all RET instructions speculate to
+diff --git a/Documentation/devicetree/bindings/mfd/mt6397.txt b/Documentation/devicetree/bindings/mfd/mt6397.txt
+index 294693a8906cf..10540aa7afa1a 100644
+--- a/Documentation/devicetree/bindings/mfd/mt6397.txt
++++ b/Documentation/devicetree/bindings/mfd/mt6397.txt
+@@ -22,8 +22,9 @@ compatible:
+ 	"mediatek,mt6323" for PMIC MT6323
+ 	"mediatek,mt6331" for PMIC MT6331 and MT6332
+ 	"mediatek,mt6357" for PMIC MT6357
+-	"mediatek,mt6358" for PMIC MT6358 and MT6366
++	"mediatek,mt6358" for PMIC MT6358
+ 	"mediatek,mt6359" for PMIC MT6359
++	"mediatek,mt6366", "mediatek,mt6358" for PMIC MT6366
+ 	"mediatek,mt6397" for PMIC MT6397
+ 
+ Optional subnodes:
+@@ -40,6 +41,7 @@ Optional subnodes:
+ 		- compatible: "mediatek,mt6323-regulator"
+ 	see ../regulator/mt6323-regulator.txt
+ 		- compatible: "mediatek,mt6358-regulator"
++		- compatible: "mediatek,mt6366-regulator", "mediatek-mt6358-regulator"
+ 	see ../regulator/mt6358-regulator.txt
+ 		- compatible: "mediatek,mt6397-regulator"
+ 	see ../regulator/mt6397-regulator.txt
+diff --git a/Makefile b/Makefile
+index 555cc34f47301..a6e152146028a 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 5
+-SUBLEVEL = 11
++SUBLEVEL = 12
+ EXTRAVERSION =
+ NAME = Hurr durr I'ma ninja sloth
+ 
+diff --git a/arch/Kconfig b/arch/Kconfig
+index aff2746c8af28..63c5d6a2022bc 100644
+--- a/arch/Kconfig
++++ b/arch/Kconfig
+@@ -34,6 +34,9 @@ config ARCH_HAS_SUBPAGE_FAULTS
+ config HOTPLUG_SMT
+ 	bool
+ 
++config SMT_NUM_THREADS_DYNAMIC
++	bool
++
+ # Selected by HOTPLUG_CORE_SYNC_DEAD or HOTPLUG_CORE_SYNC_FULL
+ config HOTPLUG_CORE_SYNC
+ 	bool
+diff --git a/arch/arm/boot/dts/broadcom/bcm4708-buffalo-wzr-1166dhp-common.dtsi b/arch/arm/boot/dts/broadcom/bcm4708-buffalo-wzr-1166dhp-common.dtsi
+index 42bcbf10957c4..9f9084269ef58 100644
+--- a/arch/arm/boot/dts/broadcom/bcm4708-buffalo-wzr-1166dhp-common.dtsi
++++ b/arch/arm/boot/dts/broadcom/bcm4708-buffalo-wzr-1166dhp-common.dtsi
+@@ -181,5 +181,13 @@
+ 		port@5 {
+ 			label = "cpu";
+ 		};
++
++		port@7 {
++			status = "disabled";
++		};
++
++		port@8 {
++			status = "disabled";
++		};
+ 	};
+ };
+diff --git a/arch/arm/boot/dts/broadcom/bcm4708-luxul-xap-1510.dts b/arch/arm/boot/dts/broadcom/bcm4708-luxul-xap-1510.dts
+index e04d2e5ea51aa..72e960c888ac8 100644
+--- a/arch/arm/boot/dts/broadcom/bcm4708-luxul-xap-1510.dts
++++ b/arch/arm/boot/dts/broadcom/bcm4708-luxul-xap-1510.dts
+@@ -85,5 +85,13 @@
+ 		port@5 {
+ 			label = "cpu";
+ 		};
++
++		port@7 {
++			status = "disabled";
++		};
++
++		port@8 {
++			status = "disabled";
++		};
+ 	};
+ };
+diff --git a/arch/arm/boot/dts/broadcom/bcm4708-luxul-xwc-1000.dts b/arch/arm/boot/dts/broadcom/bcm4708-luxul-xwc-1000.dts
+index a399800139d9c..750e17482371c 100644
+--- a/arch/arm/boot/dts/broadcom/bcm4708-luxul-xwc-1000.dts
++++ b/arch/arm/boot/dts/broadcom/bcm4708-luxul-xwc-1000.dts
+@@ -88,5 +88,13 @@
+ 		port@5 {
+ 			label = "cpu";
+ 		};
++
++		port@7 {
++			status = "disabled";
++		};
++
++		port@8 {
++			status = "disabled";
++		};
+ 	};
+ };
+diff --git a/arch/arm/boot/dts/broadcom/bcm4708-netgear-r6250.dts b/arch/arm/boot/dts/broadcom/bcm4708-netgear-r6250.dts
+index fad3473810a2e..2bdbc7d18b0eb 100644
+--- a/arch/arm/boot/dts/broadcom/bcm4708-netgear-r6250.dts
++++ b/arch/arm/boot/dts/broadcom/bcm4708-netgear-r6250.dts
+@@ -122,5 +122,13 @@
+ 		port@5 {
+ 			label = "cpu";
+ 		};
++
++		port@7 {
++			status = "disabled";
++		};
++
++		port@8 {
++			status = "disabled";
++		};
+ 	};
+ };
+diff --git a/arch/arm/boot/dts/broadcom/bcm4708-smartrg-sr400ac.dts b/arch/arm/boot/dts/broadcom/bcm4708-smartrg-sr400ac.dts
+index 5b2b7b8b3b123..b226bef3369cf 100644
+--- a/arch/arm/boot/dts/broadcom/bcm4708-smartrg-sr400ac.dts
++++ b/arch/arm/boot/dts/broadcom/bcm4708-smartrg-sr400ac.dts
+@@ -145,6 +145,14 @@
+ 		port@5 {
+ 			label = "cpu";
+ 		};
++
++		port@7 {
++			status = "disabled";
++		};
++
++		port@8 {
++			status = "disabled";
++		};
+ 	};
+ };
+ 
+diff --git a/arch/arm/boot/dts/broadcom/bcm47081-buffalo-wzr-600dhp2.dts b/arch/arm/boot/dts/broadcom/bcm47081-buffalo-wzr-600dhp2.dts
+index d0a26b643b82f..192b8db5a89c3 100644
+--- a/arch/arm/boot/dts/broadcom/bcm47081-buffalo-wzr-600dhp2.dts
++++ b/arch/arm/boot/dts/broadcom/bcm47081-buffalo-wzr-600dhp2.dts
+@@ -145,5 +145,13 @@
+ 		port@5 {
+ 			label = "cpu";
+ 		};
++
++		port@7 {
++			status = "disabled";
++		};
++
++		port@8 {
++			status = "disabled";
++		};
+ 	};
+ };
+diff --git a/arch/arm/boot/dts/broadcom/bcm47081-luxul-xap-1410.dts b/arch/arm/boot/dts/broadcom/bcm47081-luxul-xap-1410.dts
+index 9f21d6d6d35b7..0198b5f9e4a75 100644
+--- a/arch/arm/boot/dts/broadcom/bcm47081-luxul-xap-1410.dts
++++ b/arch/arm/boot/dts/broadcom/bcm47081-luxul-xap-1410.dts
+@@ -81,5 +81,13 @@
+ 		port@5 {
+ 			label = "cpu";
+ 		};
++
++		port@7 {
++			status = "disabled";
++		};
++
++		port@8 {
++			status = "disabled";
++		};
+ 	};
+ };
+diff --git a/arch/arm/boot/dts/broadcom/bcm47081-luxul-xwr-1200.dts b/arch/arm/boot/dts/broadcom/bcm47081-luxul-xwr-1200.dts
+index 2561072917021..73ff1694a4a0b 100644
+--- a/arch/arm/boot/dts/broadcom/bcm47081-luxul-xwr-1200.dts
++++ b/arch/arm/boot/dts/broadcom/bcm47081-luxul-xwr-1200.dts
+@@ -148,5 +148,13 @@
+ 		port@5 {
+ 			label = "cpu";
+ 		};
++
++		port@7 {
++			status = "disabled";
++		};
++
++		port@8 {
++			status = "disabled";
++		};
+ 	};
+ };
+diff --git a/arch/arm/boot/dts/broadcom/bcm4709-netgear-r8000.dts b/arch/arm/boot/dts/broadcom/bcm4709-netgear-r8000.dts
+index 707c561703ed8..55fc9f44cbc7f 100644
+--- a/arch/arm/boot/dts/broadcom/bcm4709-netgear-r8000.dts
++++ b/arch/arm/boot/dts/broadcom/bcm4709-netgear-r8000.dts
+@@ -227,6 +227,14 @@
+ 			label = "wan";
+ 		};
+ 
++		port@5 {
++			status = "disabled";
++		};
++
++		port@7 {
++			status = "disabled";
++		};
++
+ 		port@8 {
+ 			label = "cpu";
+ 		};
+diff --git a/arch/arm/boot/dts/broadcom/bcm47094-dlink-dir-885l.dts b/arch/arm/boot/dts/broadcom/bcm47094-dlink-dir-885l.dts
+index 51ce510b3e3a0..48d6e04b36447 100644
+--- a/arch/arm/boot/dts/broadcom/bcm47094-dlink-dir-885l.dts
++++ b/arch/arm/boot/dts/broadcom/bcm47094-dlink-dir-885l.dts
+@@ -143,6 +143,14 @@
+ 			label = "wan";
+ 		};
+ 
++		port@5 {
++			status = "disabled";
++		};
++
++		port@7 {
++			status = "disabled";
++		};
++
+ 		port@8 {
+ 			label = "cpu";
+ 		};
+diff --git a/arch/arm/boot/dts/broadcom/bcm47094-dlink-dir-890l.dts b/arch/arm/boot/dts/broadcom/bcm47094-dlink-dir-890l.dts
+index 60744f82c2b71..fe511d0b08f3c 100644
+--- a/arch/arm/boot/dts/broadcom/bcm47094-dlink-dir-890l.dts
++++ b/arch/arm/boot/dts/broadcom/bcm47094-dlink-dir-890l.dts
+@@ -191,6 +191,14 @@
+ 			label = "wan";
+ 		};
+ 
++		port@5 {
++			status = "disabled";
++		};
++
++		port@7 {
++			status = "disabled";
++		};
++
+ 		port@8 {
+ 			label = "cpu";
+ 			phy-mode = "rgmii";
+diff --git a/arch/arm/boot/dts/broadcom/bcm47094-luxul-abr-4500.dts b/arch/arm/boot/dts/broadcom/bcm47094-luxul-abr-4500.dts
+index e8991d4e248ce..e374062eb5b76 100644
+--- a/arch/arm/boot/dts/broadcom/bcm47094-luxul-abr-4500.dts
++++ b/arch/arm/boot/dts/broadcom/bcm47094-luxul-abr-4500.dts
+@@ -107,5 +107,13 @@
+ 		port@5 {
+ 			label = "cpu";
+ 		};
++
++		port@7 {
++			status = "disabled";
++		};
++
++		port@8 {
++			status = "disabled";
++		};
+ 	};
+ };
+diff --git a/arch/arm/boot/dts/broadcom/bcm47094-luxul-xap-1610.dts b/arch/arm/boot/dts/broadcom/bcm47094-luxul-xap-1610.dts
+index 6875625869d9c..0d1ac4151e4f9 100644
+--- a/arch/arm/boot/dts/broadcom/bcm47094-luxul-xap-1610.dts
++++ b/arch/arm/boot/dts/broadcom/bcm47094-luxul-xap-1610.dts
+@@ -83,5 +83,13 @@
+ 		port@5 {
+ 			label = "cpu";
+ 		};
++
++		port@7 {
++			status = "disabled";
++		};
++
++		port@8 {
++			status = "disabled";
++		};
+ 	};
+ };
+diff --git a/arch/arm/boot/dts/broadcom/bcm47094-luxul-xbr-4500.dts b/arch/arm/boot/dts/broadcom/bcm47094-luxul-xbr-4500.dts
+index 7cfa4607ef311..cf95af9db1e66 100644
+--- a/arch/arm/boot/dts/broadcom/bcm47094-luxul-xbr-4500.dts
++++ b/arch/arm/boot/dts/broadcom/bcm47094-luxul-xbr-4500.dts
+@@ -107,5 +107,13 @@
+ 		port@5 {
+ 			label = "cpu";
+ 		};
++
++		port@7 {
++			status = "disabled";
++		};
++
++		port@8 {
++			status = "disabled";
++		};
+ 	};
+ };
+diff --git a/arch/arm/boot/dts/broadcom/bcm47094-luxul-xwc-2000.dts b/arch/arm/boot/dts/broadcom/bcm47094-luxul-xwc-2000.dts
+index d55e10095eae7..992c19e1cfa17 100644
+--- a/arch/arm/boot/dts/broadcom/bcm47094-luxul-xwc-2000.dts
++++ b/arch/arm/boot/dts/broadcom/bcm47094-luxul-xwc-2000.dts
+@@ -75,5 +75,13 @@
+ 		port@5 {
+ 			label = "cpu";
+ 		};
++
++		port@7 {
++			status = "disabled";
++		};
++
++		port@8 {
++			status = "disabled";
++		};
+ 	};
+ };
+diff --git a/arch/arm/boot/dts/broadcom/bcm47094-luxul-xwr-3100.dts b/arch/arm/boot/dts/broadcom/bcm47094-luxul-xwr-3100.dts
+index ccf031c0e276d..4d0ba315a2049 100644
+--- a/arch/arm/boot/dts/broadcom/bcm47094-luxul-xwr-3100.dts
++++ b/arch/arm/boot/dts/broadcom/bcm47094-luxul-xwr-3100.dts
+@@ -147,5 +147,13 @@
+ 		port@5 {
+ 			label = "cpu";
+ 		};
++
++		port@7 {
++			status = "disabled";
++		};
++
++		port@8 {
++			status = "disabled";
++		};
+ 	};
+ };
+diff --git a/arch/arm/boot/dts/broadcom/bcm47094-luxul-xwr-3150-v1.dts b/arch/arm/boot/dts/broadcom/bcm47094-luxul-xwr-3150-v1.dts
+index 789dd2a3d2263..c2c2899d33d22 100644
+--- a/arch/arm/boot/dts/broadcom/bcm47094-luxul-xwr-3150-v1.dts
++++ b/arch/arm/boot/dts/broadcom/bcm47094-luxul-xwr-3150-v1.dts
+@@ -122,5 +122,13 @@
+ 		port@5 {
+ 			label = "cpu";
+ 		};
++
++		port@7 {
++			status = "disabled";
++		};
++
++		port@8 {
++			status = "disabled";
++		};
+ 	};
+ };
+diff --git a/arch/arm/boot/dts/broadcom/bcm53015-meraki-mr26.dts b/arch/arm/boot/dts/broadcom/bcm53015-meraki-mr26.dts
+index 03ad614e6b721..0bf5106f7012c 100644
+--- a/arch/arm/boot/dts/broadcom/bcm53015-meraki-mr26.dts
++++ b/arch/arm/boot/dts/broadcom/bcm53015-meraki-mr26.dts
+@@ -124,6 +124,14 @@
+ 				full-duplex;
+ 			};
+ 		};
++
++		port@7 {
++			status = "disabled";
++		};
++
++		port@8 {
++			status = "disabled";
++		};
+ 	};
+ };
+ 
+diff --git a/arch/arm/boot/dts/broadcom/bcm53016-meraki-mr32.dts b/arch/arm/boot/dts/broadcom/bcm53016-meraki-mr32.dts
+index 26c12bfb0bdd4..25eeacf6a2484 100644
+--- a/arch/arm/boot/dts/broadcom/bcm53016-meraki-mr32.dts
++++ b/arch/arm/boot/dts/broadcom/bcm53016-meraki-mr32.dts
+@@ -185,6 +185,14 @@
+ 				full-duplex;
+ 			};
+ 		};
++
++		port@7 {
++			status = "disabled";
++		};
++
++		port@8 {
++			status = "disabled";
++		};
+ 	};
+ };
+ 
+diff --git a/arch/arm/boot/dts/broadcom/bcm953012er.dts b/arch/arm/boot/dts/broadcom/bcm953012er.dts
+index 4fe3b36533767..d939ec9f4a9e7 100644
+--- a/arch/arm/boot/dts/broadcom/bcm953012er.dts
++++ b/arch/arm/boot/dts/broadcom/bcm953012er.dts
+@@ -84,6 +84,14 @@
+ 			label = "cpu";
+ 			ethernet = <&gmac0>;
+ 		};
++
++		port@7 {
++			status = "disabled";
++		};
++
++		port@8 {
++			status = "disabled";
++		};
+ 	};
+ };
+ 
+diff --git a/arch/arm/boot/dts/qcom/qcom-apq8026-samsung-matisse-wifi.dts b/arch/arm/boot/dts/qcom/qcom-apq8026-samsung-matisse-wifi.dts
+index 884d99297d4cf..f516e0426bb9e 100644
+--- a/arch/arm/boot/dts/qcom/qcom-apq8026-samsung-matisse-wifi.dts
++++ b/arch/arm/boot/dts/qcom/qcom-apq8026-samsung-matisse-wifi.dts
+@@ -45,11 +45,11 @@
+ 
+ 		event-hall-sensor {
+ 			label = "Hall Effect Sensor";
+-			gpios = <&tlmm 110 GPIO_ACTIVE_HIGH>;
+-			interrupts = <&tlmm 110 IRQ_TYPE_EDGE_FALLING>;
++			gpios = <&tlmm 110 GPIO_ACTIVE_LOW>;
+ 			linux,input-type = <EV_SW>;
+ 			linux,code = <SW_LID>;
+ 			debounce-interval = <15>;
++			linux,can-disable;
+ 			wakeup-source;
+ 		};
+ 	};
+diff --git a/arch/arm/boot/dts/qcom/qcom-mdm9615.dtsi b/arch/arm/boot/dts/qcom/qcom-mdm9615.dtsi
+index b40c52ddf9b44..bfcb4fcf65460 100644
+--- a/arch/arm/boot/dts/qcom/qcom-mdm9615.dtsi
++++ b/arch/arm/boot/dts/qcom/qcom-mdm9615.dtsi
+@@ -46,14 +46,12 @@
+ 		};
+ 	};
+ 
+-	regulators {
+-		vsdcc_fixed: vsdcc-regulator {
+-			compatible = "regulator-fixed";
+-			regulator-name = "SDCC Power";
+-			regulator-min-microvolt = <2700000>;
+-			regulator-max-microvolt = <2700000>;
+-			regulator-always-on;
+-		};
++	vsdcc_fixed: vsdcc-regulator {
++		compatible = "regulator-fixed";
++		regulator-name = "SDCC Power";
++		regulator-min-microvolt = <2700000>;
++		regulator-max-microvolt = <2700000>;
++		regulator-always-on;
+ 	};
+ 
+ 	soc: soc {
+diff --git a/arch/arm/boot/dts/renesas/r8a7792-blanche.dts b/arch/arm/boot/dts/renesas/r8a7792-blanche.dts
+index c66de9dd12dfc..6a83923aa4612 100644
+--- a/arch/arm/boot/dts/renesas/r8a7792-blanche.dts
++++ b/arch/arm/boot/dts/renesas/r8a7792-blanche.dts
+@@ -239,7 +239,7 @@
+ 	};
+ 
+ 	keyboard_pins: keyboard {
+-		pins = "GP_3_10", "GP_3_11", "GP_3_12", "GP_3_15", "GP_11_02";
++		pins = "GP_3_10", "GP_3_11", "GP_3_12", "GP_3_15", "GP_11_2";
+ 		bias-pull-up;
+ 	};
+ 
+diff --git a/arch/arm/boot/dts/ti/omap/am3517-evm.dts b/arch/arm/boot/dts/ti/omap/am3517-evm.dts
+index af9df15274bed..866f68c5b504d 100644
+--- a/arch/arm/boot/dts/ti/omap/am3517-evm.dts
++++ b/arch/arm/boot/dts/ti/omap/am3517-evm.dts
+@@ -271,13 +271,6 @@
+ 		>;
+ 	};
+ 
+-	leds_pins: leds-pins {
+-		pinctrl-single,pins = <
+-			OMAP3_WKUP_IOPAD(0x2a24, PIN_OUTPUT_PULLUP | MUX_MODE4)	/* jtag_emu0.gpio_11 */
+-			OMAP3_WKUP_IOPAD(0x2a26, PIN_OUTPUT_PULLUP | MUX_MODE4)	/* jtag_emu1.gpio_31 */
+-		>;
+-	};
+-
+ 	mmc1_pins: mmc1-pins {
+ 		pinctrl-single,pins = <
+ 			OMAP3_CORE1_IOPAD(0x2144, PIN_INPUT_PULLUP | MUX_MODE0)	/* sdmmc1_clk.sdmmc1_clk */
+@@ -355,3 +348,12 @@
+ 		>;
+ 	};
+ };
++
++&omap3_pmx_wkup {
++	leds_pins: leds-pins {
++		pinctrl-single,pins = <
++			OMAP3_WKUP_IOPAD(0x2a24, PIN_OUTPUT_PULLUP | MUX_MODE4)	/* jtag_emu0.gpio_11 */
++			OMAP3_WKUP_IOPAD(0x2a26, PIN_OUTPUT_PULLUP | MUX_MODE4)	/* jtag_emu1.gpio_31 */
++		>;
++	};
++};
+diff --git a/arch/arm/include/asm/arm_pmuv3.h b/arch/arm/include/asm/arm_pmuv3.h
+index f3cd04ff022df..9590dc0ba1688 100644
+--- a/arch/arm/include/asm/arm_pmuv3.h
++++ b/arch/arm/include/asm/arm_pmuv3.h
+@@ -23,6 +23,8 @@
+ #define PMUSERENR		__ACCESS_CP15(c9,  0, c14, 0)
+ #define PMINTENSET		__ACCESS_CP15(c9,  0, c14, 1)
+ #define PMINTENCLR		__ACCESS_CP15(c9,  0, c14, 2)
++#define PMCEID2			__ACCESS_CP15(c9,  0, c14, 4)
++#define PMCEID3			__ACCESS_CP15(c9,  0, c14, 5)
+ #define PMMIR			__ACCESS_CP15(c9,  0, c14, 6)
+ #define PMCCFILTR		__ACCESS_CP15(c14, 0, c15, 7)
+ 
+@@ -150,21 +152,6 @@ static inline u64 read_pmccntr(void)
+ 	return read_sysreg(PMCCNTR);
+ }
+ 
+-static inline void write_pmxevcntr(u32 val)
+-{
+-	write_sysreg(val, PMXEVCNTR);
+-}
+-
+-static inline u32 read_pmxevcntr(void)
+-{
+-	return read_sysreg(PMXEVCNTR);
+-}
+-
+-static inline void write_pmxevtyper(u32 val)
+-{
+-	write_sysreg(val, PMXEVTYPER);
+-}
+-
+ static inline void write_pmcntenset(u32 val)
+ {
+ 	write_sysreg(val, PMCNTENSET);
+@@ -205,16 +192,6 @@ static inline void write_pmuserenr(u32 val)
+ 	write_sysreg(val, PMUSERENR);
+ }
+ 
+-static inline u32 read_pmceid0(void)
+-{
+-	return read_sysreg(PMCEID0);
+-}
+-
+-static inline u32 read_pmceid1(void)
+-{
+-	return read_sysreg(PMCEID1);
+-}
+-
+ static inline void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr) {}
+ static inline void kvm_clr_pmu_events(u32 clr) {}
+ static inline bool kvm_pmu_counter_deferred(struct perf_event_attr *attr)
+@@ -229,6 +206,7 @@ static inline bool kvm_set_pmuserenr(u64 val)
+ 
+ /* PMU Version in DFR Register */
+ #define ARMV8_PMU_DFR_VER_NI        0
++#define ARMV8_PMU_DFR_VER_V3P1      0x4
+ #define ARMV8_PMU_DFR_VER_V3P4      0x5
+ #define ARMV8_PMU_DFR_VER_V3P5      0x6
+ #define ARMV8_PMU_DFR_VER_IMP_DEF   0xF
+@@ -249,4 +227,24 @@ static inline bool is_pmuv3p5(int pmuver)
+ 	return pmuver >= ARMV8_PMU_DFR_VER_V3P5;
+ }
+ 
++static inline u64 read_pmceid0(void)
++{
++	u64 val = read_sysreg(PMCEID0);
++
++	if (read_pmuver() >= ARMV8_PMU_DFR_VER_V3P1)
++		val |= (u64)read_sysreg(PMCEID2) << 32;
++
++	return val;
++}
++
++static inline u64 read_pmceid1(void)
++{
++	u64 val = read_sysreg(PMCEID1);
++
++	if (read_pmuver() >= ARMV8_PMU_DFR_VER_V3P1)
++		val |= (u64)read_sysreg(PMCEID3) << 32;
++
++	return val;
++}
++
+ #endif
+diff --git a/arch/arm/include/asm/dma.h b/arch/arm/include/asm/dma.h
+index c6aded1b069cf..e2a1916013e75 100644
+--- a/arch/arm/include/asm/dma.h
++++ b/arch/arm/include/asm/dma.h
+@@ -12,6 +12,9 @@
+ 	extern phys_addr_t arm_dma_zone_size; \
+ 	arm_dma_zone_size && arm_dma_zone_size < (0x100000000ULL - PAGE_OFFSET) ? \
+ 		(PAGE_OFFSET + arm_dma_zone_size) : 0xffffffffUL; })
++
++extern phys_addr_t arm_dma_limit;
++#define ARCH_LOW_ADDRESS_LIMIT arm_dma_limit
+ #endif
+ 
+ #ifdef CONFIG_ISA_DMA_API
+diff --git a/arch/arm/lib/memset.S b/arch/arm/lib/memset.S
+index d71ab61430b26..de75ae4d5ab41 100644
+--- a/arch/arm/lib/memset.S
++++ b/arch/arm/lib/memset.S
+@@ -17,6 +17,7 @@ ENTRY(__memset)
+ ENTRY(mmioset)
+ WEAK(memset)
+ UNWIND( .fnstart         )
++	and	r1, r1, #255		@ cast to unsigned char
+ 	ands	r3, r0, #3		@ 1 unaligned?
+ 	mov	ip, r0			@ preserve r0 as return value
+ 	bne	6f			@ 1
+diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c
+index 7d59765aef220..6cdcd39fb0961 100644
+--- a/arch/arm/xen/enlighten.c
++++ b/arch/arm/xen/enlighten.c
+@@ -164,9 +164,6 @@ static int xen_starting_cpu(unsigned int cpu)
+ 	BUG_ON(err);
+ 	per_cpu(xen_vcpu, cpu) = vcpup;
+ 
+-	if (!xen_kernel_unmapped_at_usr())
+-		xen_setup_runstate_info(cpu);
+-
+ after_register_vcpu_info:
+ 	enable_percpu_irq(xen_events_irq, 0);
+ 	return 0;
+@@ -523,9 +520,6 @@ static int __init xen_guest_init(void)
+ 		return -EINVAL;
+ 	}
+ 
+-	if (!xen_kernel_unmapped_at_usr())
+-		xen_time_setup_guest();
+-
+ 	if (xen_initial_domain())
+ 		pvclock_gtod_register_notifier(&xen_pvclock_gtod_notifier);
+ 
+@@ -535,7 +529,13 @@ static int __init xen_guest_init(void)
+ }
+ early_initcall(xen_guest_init);
+ 
+-static int __init xen_pm_init(void)
++static int xen_starting_runstate_cpu(unsigned int cpu)
++{
++	xen_setup_runstate_info(cpu);
++	return 0;
++}
++
++static int __init xen_late_init(void)
+ {
+ 	if (!xen_domain())
+ 		return -ENODEV;
+@@ -548,9 +548,16 @@ static int __init xen_pm_init(void)
+ 		do_settimeofday64(&ts);
+ 	}
+ 
+-	return 0;
++	if (xen_kernel_unmapped_at_usr())
++		return 0;
++
++	xen_time_setup_guest();
++
++	return cpuhp_setup_state(CPUHP_AP_ARM_XEN_RUNSTATE_STARTING,
++				 "arm/xen_runstate:starting",
++				 xen_starting_runstate_cpu, NULL);
+ }
+-late_initcall(xen_pm_init);
++late_initcall(xen_late_init);
+ 
+ 
+ /* empty stubs */
+diff --git a/arch/arm64/boot/dts/freescale/imx8mm.dtsi b/arch/arm64/boot/dts/freescale/imx8mm.dtsi
+index 1a647d4072ba0..453254bd5f195 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mm.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx8mm.dtsi
+@@ -399,6 +399,7 @@
+ 						      "pll8k", "pll11k", "clkext3";
+ 					dmas = <&sdma2 24 25 0x80000000>;
+ 					dma-names = "rx";
++					#sound-dai-cells = <0>;
+ 					status = "disabled";
+ 				};
+ 
+diff --git a/arch/arm64/boot/dts/freescale/imx8mn.dtsi b/arch/arm64/boot/dts/freescale/imx8mn.dtsi
+index aa38dd6dc9ba5..1bb1d0c1bae4d 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mn.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx8mn.dtsi
+@@ -371,6 +371,7 @@
+ 						      "pll8k", "pll11k", "clkext3";
+ 					dmas = <&sdma2 24 25 0x80000000>;
+ 					dma-names = "rx";
++					#sound-dai-cells = <0>;
+ 					status = "disabled";
+ 				};
+ 
+diff --git a/arch/arm64/boot/dts/freescale/imx8mp-debix-model-a.dts b/arch/arm64/boot/dts/freescale/imx8mp-debix-model-a.dts
+index 1004ab0abb131..b9573fc36e6f7 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mp-debix-model-a.dts
++++ b/arch/arm64/boot/dts/freescale/imx8mp-debix-model-a.dts
+@@ -285,7 +285,6 @@
+ 	usb_hub_2_x: hub@1 {
+ 		compatible = "usbbda,5411";
+ 		reg = <1>;
+-		reset-gpios = <&gpio4 25 GPIO_ACTIVE_LOW>;
+ 		vdd-supply = <&reg_usb_hub>;
+ 		peer-hub = <&usb_hub_3_x>;
+ 	};
+@@ -294,7 +293,6 @@
+ 	usb_hub_3_x: hub@2 {
+ 		compatible = "usbbda,411";
+ 		reg = <2>;
+-		reset-gpios = <&gpio4 25 GPIO_ACTIVE_LOW>;
+ 		vdd-supply = <&reg_usb_hub>;
+ 		peer-hub = <&usb_hub_2_x>;
+ 	};
+@@ -444,7 +442,6 @@
+ 	pinctrl_usb1: usb1grp {
+ 		fsl,pins = <
+ 			MX8MP_IOMUXC_GPIO1_IO14__USB2_OTG_PWR				0x10
+-			MX8MP_IOMUXC_SAI2_TXC__GPIO4_IO25				0x19
+ 		>;
+ 	};
+ 
+diff --git a/arch/arm64/boot/dts/freescale/imx8qm-ss-img.dtsi b/arch/arm64/boot/dts/freescale/imx8qm-ss-img.dtsi
+index 7764b4146e0ab..2bbdacb1313f9 100644
+--- a/arch/arm64/boot/dts/freescale/imx8qm-ss-img.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx8qm-ss-img.dtsi
+@@ -8,5 +8,5 @@
+ };
+ 
+ &jpegenc {
+-	compatible = "nxp,imx8qm-jpgdec", "nxp,imx8qxp-jpgenc";
++	compatible = "nxp,imx8qm-jpgenc", "nxp,imx8qxp-jpgenc";
+ };
+diff --git a/arch/arm64/boot/dts/marvell/cn9130-crb.dtsi b/arch/arm64/boot/dts/marvell/cn9130-crb.dtsi
+index 32cfb3e2efc3a..47d45ff3d6f57 100644
+--- a/arch/arm64/boot/dts/marvell/cn9130-crb.dtsi
++++ b/arch/arm64/boot/dts/marvell/cn9130-crb.dtsi
+@@ -120,7 +120,7 @@
+ 				       "mpp59", "mpp60", "mpp61";
+ 			marvell,function = "sdio";
+ 		};
+-		cp0_spi0_pins: cp0-spi-pins-0 {
++		cp0_spi1_pins: cp0-spi-pins-1 {
+ 			marvell,pins = "mpp13", "mpp14", "mpp15", "mpp16";
+ 			marvell,function = "spi1";
+ 		};
+@@ -170,7 +170,7 @@
+ 
+ &cp0_spi1 {
+ 	pinctrl-names = "default";
+-	pinctrl-0 = <&cp0_spi0_pins>;
++	pinctrl-0 = <&cp0_spi1_pins>;
+ 	reg = <0x700680 0x50>,		/* control */
+ 	      <0x2000000 0x1000000>;	/* CS0 */
+ 	status = "okay";
+diff --git a/arch/arm64/boot/dts/marvell/cn9130-db.dtsi b/arch/arm64/boot/dts/marvell/cn9130-db.dtsi
+index c7de1ea0d470a..6eb6a175de38d 100644
+--- a/arch/arm64/boot/dts/marvell/cn9130-db.dtsi
++++ b/arch/arm64/boot/dts/marvell/cn9130-db.dtsi
+@@ -307,7 +307,7 @@
+ &cp0_spi1 {
+ 	status = "disabled";
+ 	pinctrl-names = "default";
+-	pinctrl-0 = <&cp0_spi0_pins>;
++	pinctrl-0 = <&cp0_spi1_pins>;
+ 	reg = <0x700680 0x50>;
+ 
+ 	flash@0 {
+@@ -371,7 +371,7 @@
+ 				       "mpp59", "mpp60", "mpp61";
+ 			marvell,function = "sdio";
+ 		};
+-		cp0_spi0_pins: cp0-spi-pins-0 {
++		cp0_spi1_pins: cp0-spi-pins-1 {
+ 			marvell,pins = "mpp13", "mpp14", "mpp15", "mpp16";
+ 			marvell,function = "spi1";
+ 		};
+diff --git a/arch/arm64/boot/dts/nvidia/tegra234-p3767.dtsi b/arch/arm64/boot/dts/nvidia/tegra234-p3767.dtsi
+index a8aa6e7d8fbc5..2e0fb61a1167f 100644
+--- a/arch/arm64/boot/dts/nvidia/tegra234-p3767.dtsi
++++ b/arch/arm64/boot/dts/nvidia/tegra234-p3767.dtsi
+@@ -28,7 +28,7 @@
+ 			flash@0 {
+ 				compatible = "jedec,spi-nor";
+ 				reg = <0>;
+-				spi-max-frequency = <136000000>;
++				spi-max-frequency = <102000000>;
+ 				spi-tx-bus-width = <4>;
+ 				spi-rx-bus-width = <4>;
+ 			};
+@@ -42,7 +42,7 @@
+ 		mmc@3400000 {
+ 			status = "okay";
+ 			bus-width = <4>;
+-			cd-gpios = <&gpio TEGRA234_MAIN_GPIO(G, 7) GPIO_ACTIVE_HIGH>;
++			cd-gpios = <&gpio TEGRA234_MAIN_GPIO(G, 7) GPIO_ACTIVE_LOW>;
+ 			disable-wp;
+ 		};
+ 
+diff --git a/arch/arm64/boot/dts/nvidia/tegra234.dtsi b/arch/arm64/boot/dts/nvidia/tegra234.dtsi
+index 0f12a8debd8ae..1a1dd35aff26a 100644
+--- a/arch/arm64/boot/dts/nvidia/tegra234.dtsi
++++ b/arch/arm64/boot/dts/nvidia/tegra234.dtsi
+@@ -43,12 +43,12 @@
+ 				     <GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>,
+ 				     <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>,
+ 				     <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>;
++				     <GIC_SPI 256 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 257 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 258 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 259 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 260 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 261 IRQ_TYPE_LEVEL_HIGH>;
+ 			status = "okay";
+ 		};
+ 
+diff --git a/arch/arm64/boot/dts/qcom/apq8016-sbc.dts b/arch/arm64/boot/dts/qcom/apq8016-sbc.dts
+index 5ee098c12801c..b3bf4257213ac 100644
+--- a/arch/arm64/boot/dts/qcom/apq8016-sbc.dts
++++ b/arch/arm64/boot/dts/qcom/apq8016-sbc.dts
+@@ -198,6 +198,9 @@
+ 		pd-gpios = <&tlmm 32 GPIO_ACTIVE_HIGH>;
+ 
+ 		avdd-supply = <&pm8916_l6>;
++		a2vdd-supply = <&pm8916_l6>;
++		dvdd-supply = <&pm8916_l6>;
++		pvdd-supply = <&pm8916_l6>;
+ 		v1p2-supply = <&pm8916_l6>;
+ 		v3p3-supply = <&pm8916_l17>;
+ 
+diff --git a/arch/arm64/boot/dts/qcom/msm8916.dtsi b/arch/arm64/boot/dts/qcom/msm8916.dtsi
+index 7582c7d748fe9..6b8705e965537 100644
+--- a/arch/arm64/boot/dts/qcom/msm8916.dtsi
++++ b/arch/arm64/boot/dts/qcom/msm8916.dtsi
+@@ -1805,7 +1805,7 @@
+ 			#size-cells = <1>;
+ 			#iommu-cells = <1>;
+ 			compatible = "qcom,msm8916-iommu", "qcom,msm-iommu-v1";
+-			ranges = <0 0x01e20000 0x40000>;
++			ranges = <0 0x01e20000 0x20000>;
+ 			reg = <0x01ef0000 0x3000>;
+ 			clocks = <&gcc GCC_SMMU_CFG_CLK>,
+ 				 <&gcc GCC_APSS_TCU_CLK>;
+diff --git a/arch/arm64/boot/dts/qcom/msm8939.dtsi b/arch/arm64/boot/dts/qcom/msm8939.dtsi
+index 559a5d1ba615b..6318e100dd547 100644
+--- a/arch/arm64/boot/dts/qcom/msm8939.dtsi
++++ b/arch/arm64/boot/dts/qcom/msm8939.dtsi
+@@ -1449,7 +1449,7 @@
+ 		apps_iommu: iommu@1ef0000 {
+ 			compatible = "qcom,msm8916-iommu", "qcom,msm-iommu-v1";
+ 			reg = <0x01ef0000 0x3000>;
+-			ranges = <0 0x01e20000 0x40000>;
++			ranges = <0 0x01e20000 0x20000>;
+ 			clocks = <&gcc GCC_SMMU_CFG_CLK>,
+ 				 <&gcc GCC_APSS_TCU_CLK>;
+ 			clock-names = "iface", "bus";
+diff --git a/arch/arm64/boot/dts/qcom/msm8976.dtsi b/arch/arm64/boot/dts/qcom/msm8976.dtsi
+index 753b9a2105edd..c97b22fb1dc21 100644
+--- a/arch/arm64/boot/dts/qcom/msm8976.dtsi
++++ b/arch/arm64/boot/dts/qcom/msm8976.dtsi
+@@ -303,7 +303,7 @@
+ 	smp2p-modem {
+ 		compatible = "qcom,smp2p";
+ 		interrupts = <GIC_SPI 27 IRQ_TYPE_EDGE_RISING>;
+-		qcom,ipc = <&apcs 8 13>;
++		qcom,ipc = <&apcs 8 14>;
+ 
+ 		qcom,local-pid = <0>;
+ 		qcom,remote-pid = <1>;
+@@ -326,7 +326,7 @@
+ 	smp2p-wcnss {
+ 		compatible = "qcom,smp2p";
+ 		interrupts = <GIC_SPI 143 IRQ_TYPE_EDGE_RISING>;
+-		qcom,ipc = <&apcs 8 17>;
++		qcom,ipc = <&apcs 8 18>;
+ 
+ 		qcom,local-pid = <0>;
+ 		qcom,remote-pid = <4>;
+@@ -428,9 +428,9 @@
+ 		#address-cells = <1>;
+ 		#size-cells = <0>;
+ 
+-		qcom,ipc-1 = <&apcs 8 12>;
++		qcom,ipc-1 = <&apcs 8 13>;
+ 		qcom,ipc-2 = <&apcs 8 9>;
+-		qcom,ipc-3 = <&apcs 8 18>;
++		qcom,ipc-3 = <&apcs 8 19>;
+ 
+ 		apps_smsm: apps@0 {
+ 			reg = <0>;
+diff --git a/arch/arm64/boot/dts/qcom/msm8992-xiaomi-libra.dts b/arch/arm64/boot/dts/qcom/msm8992-xiaomi-libra.dts
+index fcca1ba94da69..5fe5de9ceef99 100644
+--- a/arch/arm64/boot/dts/qcom/msm8992-xiaomi-libra.dts
++++ b/arch/arm64/boot/dts/qcom/msm8992-xiaomi-libra.dts
+@@ -109,11 +109,6 @@
+ 			qcom,client-id = <1>;
+ 		};
+ 
+-		audio_mem: audio@cb400000 {
+-			reg = <0 0xcb000000 0 0x400000>;
+-			no-mem;
+-		};
+-
+ 		qseecom_mem: qseecom@cb400000 {
+ 			reg = <0 0xcb400000 0 0x1c00000>;
+ 			no-mem;
+diff --git a/arch/arm64/boot/dts/qcom/qrb2210-rb1.dts b/arch/arm64/boot/dts/qcom/qrb2210-rb1.dts
+index ef36160932890..7eca89dc46aae 100644
+--- a/arch/arm64/boot/dts/qcom/qrb2210-rb1.dts
++++ b/arch/arm64/boot/dts/qcom/qrb2210-rb1.dts
+@@ -13,7 +13,7 @@
+ 	compatible = "qcom,qrb2210-rb1", "qcom,qrb2210", "qcom,qcm2290";
+ 
+ 	aliases {
+-		serial0 = &uart0;
++		serial0 = &uart4;
+ 		sdhc1 = &sdhc_1;
+ 		sdhc2 = &sdhc_2;
+ 	};
+@@ -94,7 +94,7 @@
+ };
+ 
+ /* UART connected to the Micro-USB port via a FTDI chip */
+-&uart0 {
++&uart4 {
+ 	compatible = "qcom,geni-debug-uart";
+ 	status = "okay";
+ };
+diff --git a/arch/arm64/boot/dts/qcom/sc7280.dtsi b/arch/arm64/boot/dts/qcom/sc7280.dtsi
+index 925428a5f6aea..91bb58c6b1a61 100644
+--- a/arch/arm64/boot/dts/qcom/sc7280.dtsi
++++ b/arch/arm64/boot/dts/qcom/sc7280.dtsi
+@@ -649,18 +649,6 @@
+ 		};
+ 	};
+ 
+-	eud_typec: connector {
+-		compatible = "usb-c-connector";
+-
+-		ports {
+-			port@0 {
+-				con_eud: endpoint {
+-					remote-endpoint = <&eud_con>;
+-				};
+-			};
+-		};
+-	};
+-
+ 	memory@80000000 {
+ 		device_type = "memory";
+ 		/* We expect the bootloader to fill in the size */
+@@ -869,7 +857,8 @@
+ 			clocks = <&rpmhcc RPMH_CXO_CLK>,
+ 				 <&rpmhcc RPMH_CXO_CLK_A>, <&sleep_clk>,
+ 				 <0>, <&pcie1_lane>,
+-				 <0>, <0>, <0>, <0>;
++				 <0>, <0>, <0>,
++				 <&usb_1_ssphy>;
+ 			clock-names = "bi_tcxo", "bi_tcxo_ao", "sleep_clk",
+ 				      "pcie_0_pipe_clk", "pcie_1_pipe_clk",
+ 				      "ufs_phy_rx_symbol_0_clk", "ufs_phy_rx_symbol_1_clk",
+@@ -3624,6 +3613,8 @@
+ 			      <0 0x88e2000 0 0x1000>;
+ 			interrupts-extended = <&pdc 11 IRQ_TYPE_LEVEL_HIGH>;
+ 
++			status = "disabled";
++
+ 			ports {
+ 				#address-cells = <1>;
+ 				#size-cells = <0>;
+@@ -3634,13 +3625,6 @@
+ 						remote-endpoint = <&usb2_role_switch>;
+ 					};
+ 				};
+-
+-				port@1 {
+-					reg = <1>;
+-					eud_con: endpoint {
+-						remote-endpoint = <&con_eud>;
+-					};
+-				};
+ 			};
+ 		};
+ 
+@@ -5363,6 +5347,14 @@
+ 			reg = <0 0x18591000 0 0x1000>,
+ 			      <0 0x18592000 0 0x1000>,
+ 			      <0 0x18593000 0 0x1000>;
++
++			interrupts = <GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH>;
++			interrupt-names = "dcvsh-irq-0",
++					  "dcvsh-irq-1",
++					  "dcvsh-irq-2";
++
+ 			clocks = <&rpmhcc RPMH_CXO_CLK>, <&gcc GCC_GPLL0>;
+ 			clock-names = "xo", "alternate";
+ 			#freq-domain-cells = <1>;
+diff --git a/arch/arm64/boot/dts/qcom/sdm845-cheza.dtsi b/arch/arm64/boot/dts/qcom/sdm845-cheza.dtsi
+index 1ce413263b7f9..74c47a8499a26 100644
+--- a/arch/arm64/boot/dts/qcom/sdm845-cheza.dtsi
++++ b/arch/arm64/boot/dts/qcom/sdm845-cheza.dtsi
+@@ -143,16 +143,20 @@
+ 	};
+ };
+ 
++&cpufreq_hw {
++	/delete-property/ interrupts-extended; /* reference to lmh_cluster[01] */
++};
++
+ &psci {
+-	/delete-node/ cpu0;
+-	/delete-node/ cpu1;
+-	/delete-node/ cpu2;
+-	/delete-node/ cpu3;
+-	/delete-node/ cpu4;
+-	/delete-node/ cpu5;
+-	/delete-node/ cpu6;
+-	/delete-node/ cpu7;
+-	/delete-node/ cpu-cluster0;
++	/delete-node/ power-domain-cpu0;
++	/delete-node/ power-domain-cpu1;
++	/delete-node/ power-domain-cpu2;
++	/delete-node/ power-domain-cpu3;
++	/delete-node/ power-domain-cpu4;
++	/delete-node/ power-domain-cpu5;
++	/delete-node/ power-domain-cpu6;
++	/delete-node/ power-domain-cpu7;
++	/delete-node/ power-domain-cluster;
+ };
+ 
+ &cpus {
+@@ -275,6 +279,14 @@
+ 			   &CLUSTER_SLEEP_0>;
+ };
+ 
++&lmh_cluster0 {
++	status = "disabled";
++};
++
++&lmh_cluster1 {
++	status = "disabled";
++};
++
+ /*
+  * Reserved memory changes
+  *
+@@ -338,6 +350,8 @@
+ 
+ 
+ &apps_rsc {
++	/delete-property/ power-domains;
++
+ 	regulators-0 {
+ 		compatible = "qcom,pm8998-rpmh-regulators";
+ 		qcom,pmic-id = "a";
+diff --git a/arch/arm64/boot/dts/qcom/sdm845-mtp.dts b/arch/arm64/boot/dts/qcom/sdm845-mtp.dts
+index b2d4336e764bf..2b91fe9bea6e4 100644
+--- a/arch/arm64/boot/dts/qcom/sdm845-mtp.dts
++++ b/arch/arm64/boot/dts/qcom/sdm845-mtp.dts
+@@ -715,6 +715,8 @@
+ 	vdd-1.8-xo-supply = <&vreg_l7a_1p8>;
+ 	vdd-1.3-rfa-supply = <&vreg_l17a_1p3>;
+ 	vdd-3.3-ch0-supply = <&vreg_l25a_3p3>;
++
++	qcom,snoc-host-cap-8bit-quirk;
+ };
+ 
+ /* PINCTRL - additions to nodes defined in sdm845.dtsi */
+diff --git a/arch/arm64/boot/dts/qcom/sm6125.dtsi b/arch/arm64/boot/dts/qcom/sm6125.dtsi
+index a596baa6ce3eb..367a083786e07 100644
+--- a/arch/arm64/boot/dts/qcom/sm6125.dtsi
++++ b/arch/arm64/boot/dts/qcom/sm6125.dtsi
+@@ -1204,7 +1204,7 @@
+ 
+ 		apps_smmu: iommu@c600000 {
+ 			compatible = "qcom,sm6125-smmu-500", "qcom,smmu-500", "arm,mmu-500";
+-			reg = <0xc600000 0x80000>;
++			reg = <0x0c600000 0x80000>;
+ 			interrupts = <GIC_SPI 81 IRQ_TYPE_LEVEL_HIGH>,
+ 				     <GIC_SPI 88 IRQ_TYPE_LEVEL_HIGH>,
+ 				     <GIC_SPI 89 IRQ_TYPE_LEVEL_HIGH>,
+diff --git a/arch/arm64/boot/dts/qcom/sm8150.dtsi b/arch/arm64/boot/dts/qcom/sm8150.dtsi
+index 06c53000bb74d..19c6003dca153 100644
+--- a/arch/arm64/boot/dts/qcom/sm8150.dtsi
++++ b/arch/arm64/boot/dts/qcom/sm8150.dtsi
+@@ -1893,8 +1893,12 @@
+ 			ranges;
+ 			clocks = <&gcc GCC_PCIE_PHY_AUX_CLK>,
+ 				 <&gcc GCC_PCIE_0_CFG_AHB_CLK>,
++				 <&gcc GCC_PCIE_0_CLKREF_CLK>,
+ 				 <&gcc GCC_PCIE0_PHY_REFGEN_CLK>;
+-			clock-names = "aux", "cfg_ahb", "refgen";
++			clock-names = "aux",
++				      "cfg_ahb",
++				      "ref",
++				      "refgen";
+ 
+ 			resets = <&gcc GCC_PCIE_0_PHY_BCR>;
+ 			reset-names = "phy";
+@@ -1991,8 +1995,12 @@
+ 			ranges;
+ 			clocks = <&gcc GCC_PCIE_PHY_AUX_CLK>,
+ 				 <&gcc GCC_PCIE_1_CFG_AHB_CLK>,
++				 <&gcc GCC_PCIE_1_CLKREF_CLK>,
+ 				 <&gcc GCC_PCIE1_PHY_REFGEN_CLK>;
+-			clock-names = "aux", "cfg_ahb", "refgen";
++			clock-names = "aux",
++				      "cfg_ahb",
++				      "ref",
++				      "refgen";
+ 
+ 			resets = <&gcc GCC_PCIE_1_PHY_BCR>;
+ 			reset-names = "phy";
+diff --git a/arch/arm64/boot/dts/qcom/sm8350.dtsi b/arch/arm64/boot/dts/qcom/sm8350.dtsi
+index c236967725c1b..df0a4cc9c4358 100644
+--- a/arch/arm64/boot/dts/qcom/sm8350.dtsi
++++ b/arch/arm64/boot/dts/qcom/sm8350.dtsi
+@@ -2939,7 +2939,7 @@
+ 			};
+ 
+ 			qup_uart18_default: qup-uart18-default-state {
+-				pins = "gpio58", "gpio59";
++				pins = "gpio68", "gpio69";
+ 				function = "qup18";
+ 				drive-strength = <2>;
+ 				bias-disable;
+diff --git a/arch/arm64/boot/dts/ti/k3-am625-beagleplay.dts b/arch/arm64/boot/dts/ti/k3-am625-beagleplay.dts
+index 589bf998bc528..92541e9842a24 100644
+--- a/arch/arm64/boot/dts/ti/k3-am625-beagleplay.dts
++++ b/arch/arm64/boot/dts/ti/k3-am625-beagleplay.dts
+@@ -58,7 +58,7 @@
+ 
+ 		ramoops: ramoops@9ca00000 {
+ 			compatible = "ramoops";
+-			reg = <0x00 0x9c700000 0x00 0x00100000>;
++			reg = <0x00 0x9ca00000 0x00 0x00100000>;
+ 			record-size = <0x8000>;
+ 			console-size = <0x8000>;
+ 			ftrace-size = <0x00>;
+diff --git a/arch/arm64/boot/dts/ti/k3-am62a7-sk.dts b/arch/arm64/boot/dts/ti/k3-am62a7-sk.dts
+index ecc0e13331c41..726afa29efe4c 100644
+--- a/arch/arm64/boot/dts/ti/k3-am62a7-sk.dts
++++ b/arch/arm64/boot/dts/ti/k3-am62a7-sk.dts
+@@ -232,7 +232,7 @@
+ 	status = "okay";
+ 	pinctrl-names = "default";
+ 	pinctrl-0 = <&main_i2c1_pins_default>;
+-	clock-frequency = <400000>;
++	clock-frequency = <100000>;
+ 
+ 	exp1: gpio@22 {
+ 		compatible = "ti,tca6424";
+diff --git a/arch/arm64/include/asm/arm_pmuv3.h b/arch/arm64/include/asm/arm_pmuv3.h
+index 18dc2fb3d7b7b..c27404fa4418a 100644
+--- a/arch/arm64/include/asm/arm_pmuv3.h
++++ b/arch/arm64/include/asm/arm_pmuv3.h
+@@ -46,12 +46,12 @@ static inline u32 read_pmuver(void)
+ 			ID_AA64DFR0_EL1_PMUVer_SHIFT);
+ }
+ 
+-static inline void write_pmcr(u32 val)
++static inline void write_pmcr(u64 val)
+ {
+ 	write_sysreg(val, pmcr_el0);
+ }
+ 
+-static inline u32 read_pmcr(void)
++static inline u64 read_pmcr(void)
+ {
+ 	return read_sysreg(pmcr_el0);
+ }
+@@ -71,21 +71,6 @@ static inline u64 read_pmccntr(void)
+ 	return read_sysreg(pmccntr_el0);
+ }
+ 
+-static inline void write_pmxevcntr(u32 val)
+-{
+-	write_sysreg(val, pmxevcntr_el0);
+-}
+-
+-static inline u32 read_pmxevcntr(void)
+-{
+-	return read_sysreg(pmxevcntr_el0);
+-}
+-
+-static inline void write_pmxevtyper(u32 val)
+-{
+-	write_sysreg(val, pmxevtyper_el0);
+-}
+-
+ static inline void write_pmcntenset(u32 val)
+ {
+ 	write_sysreg(val, pmcntenset_el0);
+@@ -106,7 +91,7 @@ static inline void write_pmintenclr(u32 val)
+ 	write_sysreg(val, pmintenclr_el1);
+ }
+ 
+-static inline void write_pmccfiltr(u32 val)
++static inline void write_pmccfiltr(u64 val)
+ {
+ 	write_sysreg(val, pmccfiltr_el0);
+ }
+@@ -126,12 +111,12 @@ static inline void write_pmuserenr(u32 val)
+ 	write_sysreg(val, pmuserenr_el0);
+ }
+ 
+-static inline u32 read_pmceid0(void)
++static inline u64 read_pmceid0(void)
+ {
+ 	return read_sysreg(pmceid0_el0);
+ }
+ 
+-static inline u32 read_pmceid1(void)
++static inline u64 read_pmceid1(void)
+ {
+ 	return read_sysreg(pmceid1_el0);
+ }
+diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h
+index 74d00feb62f03..7c7493cb571f9 100644
+--- a/arch/arm64/include/asm/cputype.h
++++ b/arch/arm64/include/asm/cputype.h
+@@ -86,7 +86,8 @@
+ #define ARM_CPU_PART_NEOVERSE_N2	0xD49
+ #define ARM_CPU_PART_CORTEX_A78C	0xD4B
+ 
+-#define APM_CPU_PART_POTENZA		0x000
++#define APM_CPU_PART_XGENE		0x000
++#define APM_CPU_VAR_POTENZA		0x00
+ 
+ #define CAVIUM_CPU_PART_THUNDERX	0x0A1
+ #define CAVIUM_CPU_PART_THUNDERX_81XX	0x0A2
+diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c
+index 20280a5233f67..eaa9ed4cfde59 100644
+--- a/arch/arm64/kvm/guest.c
++++ b/arch/arm64/kvm/guest.c
+@@ -874,7 +874,7 @@ u32 __attribute_const__ kvm_target_cpu(void)
+ 		break;
+ 	case ARM_CPU_IMP_APM:
+ 		switch (part_number) {
+-		case APM_CPU_PART_POTENZA:
++		case APM_CPU_PART_XGENE:
+ 			return KVM_ARM_TARGET_XGENE_POTENZA;
+ 		}
+ 		break;
+diff --git a/arch/powerpc/include/asm/nohash/32/pte-40x.h b/arch/powerpc/include/asm/nohash/32/pte-40x.h
+index 6fe46e7545566..0b4e5f8ce3e8a 100644
+--- a/arch/powerpc/include/asm/nohash/32/pte-40x.h
++++ b/arch/powerpc/include/asm/nohash/32/pte-40x.h
+@@ -69,9 +69,6 @@
+ 
+ #define _PTE_NONE_MASK	0
+ 
+-/* Until my rework is finished, 40x still needs atomic PTE updates */
+-#define PTE_ATOMIC_UPDATES	1
+-
+ #define _PAGE_BASE_NC	(_PAGE_PRESENT | _PAGE_ACCESSED)
+ #define _PAGE_BASE	(_PAGE_BASE_NC)
+ 
+diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
+index b68898ac07e19..392404688cec3 100644
+--- a/arch/powerpc/kernel/process.c
++++ b/arch/powerpc/kernel/process.c
+@@ -2258,6 +2258,22 @@ unsigned long __get_wchan(struct task_struct *p)
+ 	return ret;
+ }
+ 
++static bool empty_user_regs(struct pt_regs *regs, struct task_struct *tsk)
++{
++	unsigned long stack_page;
++
++	// A non-empty pt_regs should never have a zero MSR or TRAP value.
++	if (regs->msr || regs->trap)
++		return false;
++
++	// Check it sits at the very base of the stack
++	stack_page = (unsigned long)task_stack_page(tsk);
++	if ((unsigned long)(regs + 1) != stack_page + THREAD_SIZE)
++		return false;
++
++	return true;
++}
++
+ static int kstack_depth_to_print = CONFIG_PRINT_STACK_DEPTH;
+ 
+ void __no_sanitize_address show_stack(struct task_struct *tsk,
+@@ -2322,9 +2338,13 @@ void __no_sanitize_address show_stack(struct task_struct *tsk,
+ 			lr = regs->link;
+ 			printk("%s--- interrupt: %lx at %pS\n",
+ 			       loglvl, regs->trap, (void *)regs->nip);
+-			__show_regs(regs);
+-			printk("%s--- interrupt: %lx\n",
+-			       loglvl, regs->trap);
++
++			// Detect the case of an empty pt_regs at the very base
++			// of the stack and suppress showing it in full.
++			if (!empty_user_regs(regs, tsk)) {
++				__show_regs(regs);
++				printk("%s--- interrupt: %lx\n", loglvl, regs->trap);
++			}
+ 
+ 			firstframe = 1;
+ 		}
+diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
+index 109b93874df92..5455e819fb76b 100644
+--- a/arch/powerpc/kernel/traps.c
++++ b/arch/powerpc/kernel/traps.c
+@@ -1164,6 +1164,7 @@ static void emulate_single_step(struct pt_regs *regs)
+ 		__single_step_exception(regs);
+ }
+ 
++#ifdef CONFIG_PPC_FPU_REGS
+ static inline int __parse_fpscr(unsigned long fpscr)
+ {
+ 	int ret = FPE_FLTUNK;
+@@ -1190,6 +1191,7 @@ static inline int __parse_fpscr(unsigned long fpscr)
+ 
+ 	return ret;
+ }
++#endif
+ 
+ static void parse_fpe(struct pt_regs *regs)
+ {
+diff --git a/arch/powerpc/perf/imc-pmu.c b/arch/powerpc/perf/imc-pmu.c
+index 9d229ef7f86ef..ada817c49b722 100644
+--- a/arch/powerpc/perf/imc-pmu.c
++++ b/arch/powerpc/perf/imc-pmu.c
+@@ -51,7 +51,7 @@ static int trace_imc_mem_size;
+  * core and trace-imc
+  */
+ static struct imc_pmu_ref imc_global_refc = {
+-	.lock = __SPIN_LOCK_INITIALIZER(imc_global_refc.lock),
++	.lock = __SPIN_LOCK_UNLOCKED(imc_global_refc.lock),
+ 	.id = 0,
+ 	.refc = 0,
+ };
+diff --git a/arch/powerpc/platforms/book3s/vas-api.c b/arch/powerpc/platforms/book3s/vas-api.c
+index 77ea9335fd049..f381b177ea06a 100644
+--- a/arch/powerpc/platforms/book3s/vas-api.c
++++ b/arch/powerpc/platforms/book3s/vas-api.c
+@@ -4,6 +4,8 @@
+  * Copyright (C) 2019 Haren Myneni, IBM Corp
+  */
+ 
++#define pr_fmt(fmt)	"vas-api: " fmt
++
+ #include <linux/kernel.h>
+ #include <linux/device.h>
+ #include <linux/cdev.h>
+@@ -78,7 +80,7 @@ int get_vas_user_win_ref(struct vas_user_win_ref *task_ref)
+ 	task_ref->mm = get_task_mm(current);
+ 	if (!task_ref->mm) {
+ 		put_pid(task_ref->pid);
+-		pr_err("VAS: pid(%d): mm_struct is not found\n",
++		pr_err("pid(%d): mm_struct is not found\n",
+ 				current->pid);
+ 		return -EPERM;
+ 	}
+@@ -235,8 +237,7 @@ void vas_update_csb(struct coprocessor_request_block *crb,
+ 	rc = kill_pid_info(SIGSEGV, &info, pid);
+ 	rcu_read_unlock();
+ 
+-	pr_devel("%s(): pid %d kill_proc_info() rc %d\n", __func__,
+-			pid_vnr(pid), rc);
++	pr_devel("pid %d kill_proc_info() rc %d\n", pid_vnr(pid), rc);
+ }
+ 
+ void vas_dump_crb(struct coprocessor_request_block *crb)
+@@ -294,7 +295,7 @@ static int coproc_ioc_tx_win_open(struct file *fp, unsigned long arg)
+ 
+ 	rc = copy_from_user(&uattr, uptr, sizeof(uattr));
+ 	if (rc) {
+-		pr_err("%s(): copy_from_user() returns %d\n", __func__, rc);
++		pr_err("copy_from_user() returns %d\n", rc);
+ 		return -EFAULT;
+ 	}
+ 
+@@ -311,7 +312,7 @@ static int coproc_ioc_tx_win_open(struct file *fp, unsigned long arg)
+ 	txwin = cp_inst->coproc->vops->open_win(uattr.vas_id, uattr.flags,
+ 						cp_inst->coproc->cop_type);
+ 	if (IS_ERR(txwin)) {
+-		pr_err("%s() VAS window open failed, %ld\n", __func__,
++		pr_err_ratelimited("VAS window open failed rc=%ld\n",
+ 				PTR_ERR(txwin));
+ 		return PTR_ERR(txwin);
+ 	}
+@@ -405,8 +406,7 @@ static vm_fault_t vas_mmap_fault(struct vm_fault *vmf)
+ 	 * window is not opened. Shouldn't expect this error.
+ 	 */
+ 	if (!cp_inst || !cp_inst->txwin) {
+-		pr_err("%s(): Unexpected fault on paste address with TX window closed\n",
+-				__func__);
++		pr_err("Unexpected fault on paste address with TX window closed\n");
+ 		return VM_FAULT_SIGBUS;
+ 	}
+ 
+@@ -421,8 +421,7 @@ static vm_fault_t vas_mmap_fault(struct vm_fault *vmf)
+ 	 * issue NX request.
+ 	 */
+ 	if (txwin->task_ref.vma != vmf->vma) {
+-		pr_err("%s(): No previous mapping with paste address\n",
+-			__func__);
++		pr_err("No previous mapping with paste address\n");
+ 		return VM_FAULT_SIGBUS;
+ 	}
+ 
+@@ -481,19 +480,19 @@ static int coproc_mmap(struct file *fp, struct vm_area_struct *vma)
+ 	txwin = cp_inst->txwin;
+ 
+ 	if ((vma->vm_end - vma->vm_start) > PAGE_SIZE) {
+-		pr_debug("%s(): size 0x%zx, PAGE_SIZE 0x%zx\n", __func__,
++		pr_debug("size 0x%zx, PAGE_SIZE 0x%zx\n",
+ 				(vma->vm_end - vma->vm_start), PAGE_SIZE);
+ 		return -EINVAL;
+ 	}
+ 
+ 	/* Ensure instance has an open send window */
+ 	if (!txwin) {
+-		pr_err("%s(): No send window open?\n", __func__);
++		pr_err("No send window open?\n");
+ 		return -EINVAL;
+ 	}
+ 
+ 	if (!cp_inst->coproc->vops || !cp_inst->coproc->vops->paste_addr) {
+-		pr_err("%s(): VAS API is not registered\n", __func__);
++		pr_err("VAS API is not registered\n");
+ 		return -EACCES;
+ 	}
+ 
+@@ -510,14 +509,14 @@ static int coproc_mmap(struct file *fp, struct vm_area_struct *vma)
+ 	 */
+ 	mutex_lock(&txwin->task_ref.mmap_mutex);
+ 	if (txwin->status != VAS_WIN_ACTIVE) {
+-		pr_err("%s(): Window is not active\n", __func__);
++		pr_err("Window is not active\n");
+ 		rc = -EACCES;
+ 		goto out;
+ 	}
+ 
+ 	paste_addr = cp_inst->coproc->vops->paste_addr(txwin);
+ 	if (!paste_addr) {
+-		pr_err("%s(): Window paste address failed\n", __func__);
++		pr_err("Window paste address failed\n");
+ 		rc = -EINVAL;
+ 		goto out;
+ 	}
+@@ -533,8 +532,8 @@ static int coproc_mmap(struct file *fp, struct vm_area_struct *vma)
+ 	rc = remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff,
+ 			vma->vm_end - vma->vm_start, prot);
+ 
+-	pr_devel("%s(): paste addr %llx at %lx, rc %d\n", __func__,
+-			paste_addr, vma->vm_start, rc);
++	pr_devel("paste addr %llx at %lx, rc %d\n", paste_addr,
++			vma->vm_start, rc);
+ 
+ 	txwin->task_ref.vma = vma;
+ 	vma->vm_ops = &vas_vm_ops;
+@@ -609,8 +608,7 @@ int vas_register_coproc_api(struct module *mod, enum vas_cop_type cop_type,
+ 		goto err;
+ 	}
+ 
+-	pr_devel("%s: Added dev [%d,%d]\n", __func__, MAJOR(devno),
+-			MINOR(devno));
++	pr_devel("Added dev [%d,%d]\n", MAJOR(devno), MINOR(devno));
+ 
+ 	return 0;
+ 
+diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
+index cb2f1211f7ebf..d1fc396bb1de4 100644
+--- a/arch/powerpc/platforms/pseries/lpar.c
++++ b/arch/powerpc/platforms/pseries/lpar.c
+@@ -525,8 +525,10 @@ static ssize_t vcpudispatch_stats_write(struct file *file, const char __user *p,
+ 
+ 	if (cmd) {
+ 		rc = init_cpu_associativity();
+-		if (rc)
++		if (rc) {
++			destroy_cpu_associativity();
+ 			goto out;
++		}
+ 
+ 		for_each_possible_cpu(cpu) {
+ 			disp = per_cpu_ptr(&vcpu_disp_data, cpu);
+diff --git a/arch/powerpc/platforms/pseries/vas.c b/arch/powerpc/platforms/pseries/vas.c
+index 3fbc2a6aa319d..23d1637242682 100644
+--- a/arch/powerpc/platforms/pseries/vas.c
++++ b/arch/powerpc/platforms/pseries/vas.c
+@@ -340,7 +340,7 @@ static struct vas_window *vas_allocate_window(int vas_id, u64 flags,
+ 
+ 	if (atomic_inc_return(&cop_feat_caps->nr_used_credits) >
+ 			atomic_read(&cop_feat_caps->nr_total_credits)) {
+-		pr_err("Credits are not available to allocate window\n");
++		pr_err_ratelimited("Credits are not available to allocate window\n");
+ 		rc = -EINVAL;
+ 		goto out;
+ 	}
+@@ -423,7 +423,7 @@ static struct vas_window *vas_allocate_window(int vas_id, u64 flags,
+ 
+ 	put_vas_user_win_ref(&txwin->vas_win.task_ref);
+ 	rc = -EBUSY;
+-	pr_err("No credit is available to allocate window\n");
++	pr_err_ratelimited("No credit is available to allocate window\n");
+ 
+ out_free:
+ 	/*
+diff --git a/arch/powerpc/sysdev/xive/native.c b/arch/powerpc/sysdev/xive/native.c
+index 9f0af4d795d88..f1c0fa6ece21d 100644
+--- a/arch/powerpc/sysdev/xive/native.c
++++ b/arch/powerpc/sysdev/xive/native.c
+@@ -802,7 +802,7 @@ int xive_native_get_queue_info(u32 vp_id, u32 prio,
+ 	if (out_qpage)
+ 		*out_qpage = be64_to_cpu(qpage);
+ 	if (out_qsize)
+-		*out_qsize = be32_to_cpu(qsize);
++		*out_qsize = be64_to_cpu(qsize);
+ 	if (out_qeoi_page)
+ 		*out_qeoi_page = be64_to_cpu(qeoi_page);
+ 	if (out_escalate_irq)
+diff --git a/arch/riscv/boot/Makefile b/arch/riscv/boot/Makefile
+index 22b13947bd131..8e7fc0edf21d3 100644
+--- a/arch/riscv/boot/Makefile
++++ b/arch/riscv/boot/Makefile
+@@ -17,6 +17,7 @@
+ KCOV_INSTRUMENT := n
+ 
+ OBJCOPYFLAGS_Image :=-O binary -R .note -R .note.gnu.build-id -R .comment -S
++OBJCOPYFLAGS_loader.bin :=-O binary
+ OBJCOPYFLAGS_xipImage :=-O binary -R .note -R .note.gnu.build-id -R .comment -S
+ 
+ targets := Image Image.* loader loader.o loader.lds loader.bin
+diff --git a/arch/riscv/boot/dts/allwinner/sun20i-d1s.dtsi b/arch/riscv/boot/dts/allwinner/sun20i-d1s.dtsi
+index 8275630af977d..b8684312593e5 100644
+--- a/arch/riscv/boot/dts/allwinner/sun20i-d1s.dtsi
++++ b/arch/riscv/boot/dts/allwinner/sun20i-d1s.dtsi
+@@ -30,7 +30,6 @@
+ 			cpu0_intc: interrupt-controller {
+ 				compatible = "riscv,cpu-intc";
+ 				interrupt-controller;
+-				#address-cells = <0>;
+ 				#interrupt-cells = <1>;
+ 			};
+ 		};
+diff --git a/arch/riscv/kernel/cpu.c b/arch/riscv/kernel/cpu.c
+index 35b854cf078ed..fa8bf78fa9c5a 100644
+--- a/arch/riscv/kernel/cpu.c
++++ b/arch/riscv/kernel/cpu.c
+@@ -88,13 +88,14 @@ int riscv_early_of_processor_hartid(struct device_node *node, unsigned long *har
+  */
+ int riscv_of_parent_hartid(struct device_node *node, unsigned long *hartid)
+ {
+-	int rc;
+-
+ 	for (; node; node = node->parent) {
+ 		if (of_device_is_compatible(node, "riscv")) {
+-			rc = riscv_of_processor_hartid(node, hartid);
+-			if (!rc)
+-				return 0;
++			*hartid = (unsigned long)of_get_cpu_hwid(node, 0);
++			if (*hartid == ~0UL) {
++				pr_warn("Found CPU without hart ID\n");
++				return -ENODEV;
++			}
++			return 0;
+ 		}
+ 	}
+ 
+diff --git a/arch/sh/Kconfig.debug b/arch/sh/Kconfig.debug
+index c449e7c1b20ff..8bcd6c1431a95 100644
+--- a/arch/sh/Kconfig.debug
++++ b/arch/sh/Kconfig.debug
+@@ -22,6 +22,17 @@ config STACK_DEBUG
+ 	  every function call and will therefore incur a major
+ 	  performance hit. Most users should say N.
+ 
++config EARLY_PRINTK
++	bool "Early printk"
++	depends on SH_STANDARD_BIOS
++	help
++	  Say Y here to redirect kernel printk messages to the serial port
++	  used by the SH-IPL bootloader, starting very early in the boot
++	  process and ending when the kernel's serial console is initialised.
++	  This option is only useful while porting the kernel to a new machine,
++	  when the kernel may crash or hang before the serial console is
++	  initialised.  If unsure, say N.
++
+ config 4KSTACKS
+ 	bool "Use 4Kb for kernel stacks instead of 8Kb"
+ 	depends on DEBUG_KERNEL && (MMU || BROKEN) && !PAGE_SIZE_64KB
+diff --git a/arch/x86/coco/tdx/tdcall.S b/arch/x86/coco/tdx/tdcall.S
+index b193c0a1d8db3..2eca5f43734fe 100644
+--- a/arch/x86/coco/tdx/tdcall.S
++++ b/arch/x86/coco/tdx/tdcall.S
+@@ -195,6 +195,7 @@ SYM_FUNC_END(__tdx_module_call)
+ 	xor %r10d, %r10d
+ 	xor %r11d, %r11d
+ 	xor %rdi,  %rdi
++	xor %rsi,  %rsi
+ 	xor %rdx,  %rdx
+ 
+ 	/* Restore callee-saved GPRs as mandated by the x86_64 ABI */
+diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
+index c55cc243592e9..197ff4f4d1ceb 100644
+--- a/arch/x86/include/asm/nospec-branch.h
++++ b/arch/x86/include/asm/nospec-branch.h
+@@ -271,7 +271,7 @@
+ .Lskip_rsb_\@:
+ .endm
+ 
+-#ifdef CONFIG_CPU_UNRET_ENTRY
++#if defined(CONFIG_CPU_UNRET_ENTRY) || defined(CONFIG_CPU_SRSO)
+ #define CALL_UNTRAIN_RET	"call entry_untrain_ret"
+ #else
+ #define CALL_UNTRAIN_RET	""
+@@ -312,7 +312,7 @@
+ 
+ .macro UNTRAIN_RET_FROM_CALL
+ #if defined(CONFIG_CPU_UNRET_ENTRY) || defined(CONFIG_CPU_IBPB_ENTRY) || \
+-	defined(CONFIG_CALL_DEPTH_TRACKING)
++	defined(CONFIG_CALL_DEPTH_TRACKING) || defined(CONFIG_CPU_SRSO)
+ 	VALIDATE_UNRET_END
+ 	ALTERNATIVE_3 "",						\
+ 		      CALL_UNTRAIN_RET, X86_FEATURE_UNRET,		\
+diff --git a/arch/x86/include/asm/sparsemem.h b/arch/x86/include/asm/sparsemem.h
+index 64df897c0ee30..1be13b2dfe8bf 100644
+--- a/arch/x86/include/asm/sparsemem.h
++++ b/arch/x86/include/asm/sparsemem.h
+@@ -37,6 +37,8 @@ extern int phys_to_target_node(phys_addr_t start);
+ #define phys_to_target_node phys_to_target_node
+ extern int memory_add_physaddr_to_nid(u64 start);
+ #define memory_add_physaddr_to_nid memory_add_physaddr_to_nid
++extern int numa_fill_memblks(u64 start, u64 end);
++#define numa_fill_memblks numa_fill_memblks
+ #endif
+ #endif /* __ASSEMBLY__ */
+ 
+diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
+index 8bae40a662827..5c367c1290c35 100644
+--- a/arch/x86/include/asm/uaccess.h
++++ b/arch/x86/include/asm/uaccess.h
+@@ -496,7 +496,7 @@ copy_mc_to_kernel(void *to, const void *from, unsigned len);
+ #define copy_mc_to_kernel copy_mc_to_kernel
+ 
+ unsigned long __must_check
+-copy_mc_to_user(void *to, const void *from, unsigned len);
++copy_mc_to_user(void __user *to, const void *from, unsigned len);
+ #endif
+ 
+ /*
+diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c
+index 356de955e78dd..cab4d8b1535d6 100644
+--- a/arch/x86/kernel/amd_nb.c
++++ b/arch/x86/kernel/amd_nb.c
+@@ -112,6 +112,9 @@ static const struct pci_device_id amd_nb_link_ids[] = {
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M10H_DF_F4) },
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M40H_DF_F4) },
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M50H_DF_F4) },
++	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M60H_DF_F4) },
++	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M70H_DF_F4) },
++	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M78H_DF_F4) },
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F4) },
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M00H_DF_F4) },
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_MI200_DF_F4) },
+diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
+index af49e24b46a43..7e0c7fbdc7d08 100644
+--- a/arch/x86/kernel/apic/apic.c
++++ b/arch/x86/kernel/apic/apic.c
+@@ -36,6 +36,8 @@
+ #include <linux/smp.h>
+ #include <linux/mm.h>
+ 
++#include <xen/xen.h>
++
+ #include <asm/trace/irq_vectors.h>
+ #include <asm/irq_remapping.h>
+ #include <asm/pc-conf-reg.h>
+@@ -2408,6 +2410,15 @@ static int __init smp_init_primary_thread_mask(void)
+ {
+ 	unsigned int cpu;
+ 
++	/*
++	 * XEN/PV provides either none or useless topology information.
++	 * Pretend that all vCPUs are primary threads.
++	 */
++	if (xen_pv_domain()) {
++		cpumask_copy(&__cpu_primary_thread_mask, cpu_possible_mask);
++		return 0;
++	}
++
+ 	for (cpu = 0; cpu < nr_logical_cpuids; cpu++)
+ 		cpu_mark_primary_thread(cpu, cpuid_to_apicid[cpu]);
+ 	return 0;
+diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
+index 10499bcd4e396..a55a3864df1c9 100644
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -2353,6 +2353,8 @@ early_param("l1tf", l1tf_cmdline);
+ 
+ enum srso_mitigation {
+ 	SRSO_MITIGATION_NONE,
++	SRSO_MITIGATION_UCODE_NEEDED,
++	SRSO_MITIGATION_SAFE_RET_UCODE_NEEDED,
+ 	SRSO_MITIGATION_MICROCODE,
+ 	SRSO_MITIGATION_SAFE_RET,
+ 	SRSO_MITIGATION_IBPB,
+@@ -2368,11 +2370,13 @@ enum srso_mitigation_cmd {
+ };
+ 
+ static const char * const srso_strings[] = {
+-	[SRSO_MITIGATION_NONE]           = "Vulnerable",
+-	[SRSO_MITIGATION_MICROCODE]      = "Mitigation: microcode",
+-	[SRSO_MITIGATION_SAFE_RET]	 = "Mitigation: safe RET",
+-	[SRSO_MITIGATION_IBPB]		 = "Mitigation: IBPB",
+-	[SRSO_MITIGATION_IBPB_ON_VMEXIT] = "Mitigation: IBPB on VMEXIT only"
++	[SRSO_MITIGATION_NONE]			= "Vulnerable",
++	[SRSO_MITIGATION_UCODE_NEEDED]		= "Vulnerable: No microcode",
++	[SRSO_MITIGATION_SAFE_RET_UCODE_NEEDED]	= "Vulnerable: Safe RET, no microcode",
++	[SRSO_MITIGATION_MICROCODE]		= "Vulnerable: Microcode, no safe RET",
++	[SRSO_MITIGATION_SAFE_RET]		= "Mitigation: Safe RET",
++	[SRSO_MITIGATION_IBPB]			= "Mitigation: IBPB",
++	[SRSO_MITIGATION_IBPB_ON_VMEXIT]	= "Mitigation: IBPB on VMEXIT only"
+ };
+ 
+ static enum srso_mitigation srso_mitigation __ro_after_init = SRSO_MITIGATION_NONE;
+@@ -2409,10 +2413,7 @@ static void __init srso_select_mitigation(void)
+ 	if (!boot_cpu_has_bug(X86_BUG_SRSO) || cpu_mitigations_off())
+ 		goto pred_cmd;
+ 
+-	if (!has_microcode) {
+-		pr_warn("IBPB-extending microcode not applied!\n");
+-		pr_warn(SRSO_NOTICE);
+-	} else {
++	if (has_microcode) {
+ 		/*
+ 		 * Zen1/2 with SMT off aren't vulnerable after the right
+ 		 * IBPB microcode has been applied.
+@@ -2425,10 +2426,15 @@ static void __init srso_select_mitigation(void)
+ 
+ 	if (retbleed_mitigation == RETBLEED_MITIGATION_IBPB) {
+ 		if (has_microcode) {
+-			pr_err("Retbleed IBPB mitigation enabled, using same for SRSO\n");
+ 			srso_mitigation = SRSO_MITIGATION_IBPB;
+-			goto pred_cmd;
++			goto out;
+ 		}
++	} else {
++		pr_warn("IBPB-extending microcode not applied!\n");
++		pr_warn(SRSO_NOTICE);
++
++		/* may be overwritten by SRSO_CMD_SAFE_RET below */
++		srso_mitigation = SRSO_MITIGATION_UCODE_NEEDED;
+ 	}
+ 
+ 	switch (srso_cmd) {
+@@ -2458,7 +2464,10 @@ static void __init srso_select_mitigation(void)
+ 				setup_force_cpu_cap(X86_FEATURE_SRSO);
+ 				x86_return_thunk = srso_return_thunk;
+ 			}
+-			srso_mitigation = SRSO_MITIGATION_SAFE_RET;
++			if (has_microcode)
++				srso_mitigation = SRSO_MITIGATION_SAFE_RET;
++			else
++				srso_mitigation = SRSO_MITIGATION_SAFE_RET_UCODE_NEEDED;
+ 		} else {
+ 			pr_err("WARNING: kernel not compiled with CPU_SRSO.\n");
+ 			goto pred_cmd;
+@@ -2493,10 +2502,11 @@ static void __init srso_select_mitigation(void)
+ 		break;
+ 	}
+ 
+-	pr_info("%s%s\n", srso_strings[srso_mitigation], (has_microcode ? "" : ", no microcode"));
++out:
++	pr_info("%s\n", srso_strings[srso_mitigation]);
+ 
+ pred_cmd:
+-	if ((boot_cpu_has(X86_FEATURE_SRSO_NO) || srso_cmd == SRSO_CMD_OFF) &&
++	if ((!boot_cpu_has_bug(X86_BUG_SRSO) || srso_cmd == SRSO_CMD_OFF) &&
+ 	     boot_cpu_has(X86_FEATURE_SBPB))
+ 		x86_pred_cmd = PRED_CMD_SBPB;
+ }
+@@ -2704,9 +2714,7 @@ static ssize_t srso_show_state(char *buf)
+ 	if (boot_cpu_has(X86_FEATURE_SRSO_NO))
+ 		return sysfs_emit(buf, "Mitigation: SMT disabled\n");
+ 
+-	return sysfs_emit(buf, "%s%s\n",
+-			  srso_strings[srso_mitigation],
+-			  boot_cpu_has(X86_FEATURE_IBPB_BRTYPE) ? "" : ", no microcode");
++	return sysfs_emit(buf, "%s\n", srso_strings[srso_mitigation]);
+ }
+ 
+ static ssize_t gds_show_state(char *buf)
+diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
+index 49f7629b17f73..bbc21798df10e 100644
+--- a/arch/x86/kernel/head64.c
++++ b/arch/x86/kernel/head64.c
+@@ -80,7 +80,7 @@ static struct desc_struct startup_gdt[GDT_ENTRIES] = {
+  * while the kernel still uses a direct mapping.
+  */
+ static struct desc_ptr startup_gdt_descr = {
+-	.size = sizeof(startup_gdt),
++	.size = sizeof(startup_gdt)-1,
+ 	.address = 0,
+ };
+ 
+diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
+index a0c551846b35f..4766b6bed4439 100644
+--- a/arch/x86/kernel/nmi.c
++++ b/arch/x86/kernel/nmi.c
+@@ -507,12 +507,13 @@ DEFINE_IDTENTRY_RAW(exc_nmi)
+ 	}
+ 	this_cpu_write(nmi_state, NMI_EXECUTING);
+ 	this_cpu_write(nmi_cr2, read_cr2());
++
++nmi_restart:
+ 	if (IS_ENABLED(CONFIG_NMI_CHECK_CPU)) {
+ 		WRITE_ONCE(nsp->idt_seq, nsp->idt_seq + 1);
+ 		WARN_ON_ONCE(!(nsp->idt_seq & 0x1));
+ 		WRITE_ONCE(nsp->recv_jiffies, jiffies);
+ 	}
+-nmi_restart:
+ 
+ 	/*
+ 	 * Needs to happen before DR7 is accessed, because the hypervisor can
+@@ -548,16 +549,16 @@ nmi_restart:
+ 
+ 	if (unlikely(this_cpu_read(nmi_cr2) != read_cr2()))
+ 		write_cr2(this_cpu_read(nmi_cr2));
+-	if (this_cpu_dec_return(nmi_state))
+-		goto nmi_restart;
+-
+-	if (user_mode(regs))
+-		mds_user_clear_cpu_buffers();
+ 	if (IS_ENABLED(CONFIG_NMI_CHECK_CPU)) {
+ 		WRITE_ONCE(nsp->idt_seq, nsp->idt_seq + 1);
+ 		WARN_ON_ONCE(nsp->idt_seq & 0x1);
+ 		WRITE_ONCE(nsp->recv_jiffies, jiffies);
+ 	}
++	if (this_cpu_dec_return(nmi_state))
++		goto nmi_restart;
++
++	if (user_mode(regs))
++		mds_user_clear_cpu_buffers();
+ }
+ 
+ #if IS_ENABLED(CONFIG_KVM_INTEL)
+diff --git a/arch/x86/lib/copy_mc.c b/arch/x86/lib/copy_mc.c
+index 80efd45a77617..6e8b7e600def5 100644
+--- a/arch/x86/lib/copy_mc.c
++++ b/arch/x86/lib/copy_mc.c
+@@ -70,23 +70,23 @@ unsigned long __must_check copy_mc_to_kernel(void *dst, const void *src, unsigne
+ }
+ EXPORT_SYMBOL_GPL(copy_mc_to_kernel);
+ 
+-unsigned long __must_check copy_mc_to_user(void *dst, const void *src, unsigned len)
++unsigned long __must_check copy_mc_to_user(void __user *dst, const void *src, unsigned len)
+ {
+ 	unsigned long ret;
+ 
+ 	if (copy_mc_fragile_enabled) {
+ 		__uaccess_begin();
+-		ret = copy_mc_fragile(dst, src, len);
++		ret = copy_mc_fragile((__force void *)dst, src, len);
+ 		__uaccess_end();
+ 		return ret;
+ 	}
+ 
+ 	if (static_cpu_has(X86_FEATURE_ERMS)) {
+ 		__uaccess_begin();
+-		ret = copy_mc_enhanced_fast_string(dst, src, len);
++		ret = copy_mc_enhanced_fast_string((__force void *)dst, src, len);
+ 		__uaccess_end();
+ 		return ret;
+ 	}
+ 
+-	return copy_user_generic(dst, src, len);
++	return copy_user_generic((__force void *)dst, src, len);
+ }
+diff --git a/arch/x86/mm/maccess.c b/arch/x86/mm/maccess.c
+index 5a53c2cc169cc..6993f026adec9 100644
+--- a/arch/x86/mm/maccess.c
++++ b/arch/x86/mm/maccess.c
+@@ -9,12 +9,21 @@ bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size)
+ 	unsigned long vaddr = (unsigned long)unsafe_src;
+ 
+ 	/*
+-	 * Range covering the highest possible canonical userspace address
+-	 * as well as non-canonical address range. For the canonical range
+-	 * we also need to include the userspace guard page.
++	 * Do not allow userspace addresses.  This disallows
++	 * normal userspace and the userspace guard page:
+ 	 */
+-	return vaddr >= TASK_SIZE_MAX + PAGE_SIZE &&
+-	       __is_canonical_address(vaddr, boot_cpu_data.x86_virt_bits);
++	if (vaddr < TASK_SIZE_MAX + PAGE_SIZE)
++		return false;
++
++	/*
++	 * Allow everything during early boot before 'x86_virt_bits'
++	 * is initialized.  Needed for instruction decoding in early
++	 * exception handlers.
++	 */
++	if (!boot_cpu_data.x86_virt_bits)
++		return true;
++
++	return __is_canonical_address(vaddr, boot_cpu_data.x86_virt_bits);
+ }
+ #else
+ bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size)
+diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
+index 2aadb2019b4f2..c01c5506fd4ae 100644
+--- a/arch/x86/mm/numa.c
++++ b/arch/x86/mm/numa.c
+@@ -11,6 +11,7 @@
+ #include <linux/nodemask.h>
+ #include <linux/sched.h>
+ #include <linux/topology.h>
++#include <linux/sort.h>
+ 
+ #include <asm/e820/api.h>
+ #include <asm/proto.h>
+@@ -961,4 +962,83 @@ int memory_add_physaddr_to_nid(u64 start)
+ 	return nid;
+ }
+ EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
++
++static int __init cmp_memblk(const void *a, const void *b)
++{
++	const struct numa_memblk *ma = *(const struct numa_memblk **)a;
++	const struct numa_memblk *mb = *(const struct numa_memblk **)b;
++
++	return ma->start - mb->start;
++}
++
++static struct numa_memblk *numa_memblk_list[NR_NODE_MEMBLKS] __initdata;
++
++/**
++ * numa_fill_memblks - Fill gaps in numa_meminfo memblks
++ * @start: address to begin fill
++ * @end: address to end fill
++ *
++ * Find and extend numa_meminfo memblks to cover the @start-@end
++ * physical address range, such that the first memblk includes
++ * @start, the last memblk includes @end, and any gaps in between
++ * are filled.
++ *
++ * RETURNS:
++ * 0		  : Success
++ * NUMA_NO_MEMBLK : No memblk exists in @start-@end range
++ */
++
++int __init numa_fill_memblks(u64 start, u64 end)
++{
++	struct numa_memblk **blk = &numa_memblk_list[0];
++	struct numa_meminfo *mi = &numa_meminfo;
++	int count = 0;
++	u64 prev_end;
++
++	/*
++	 * Create a list of pointers to numa_meminfo memblks that
++	 * overlap start, end. Exclude (start == bi->end) since
++	 * end addresses in both a CFMWS range and a memblk range
++	 * are exclusive.
++	 *
++	 * This list of pointers is used to make in-place changes
++	 * that fill out the numa_meminfo memblks.
++	 */
++	for (int i = 0; i < mi->nr_blks; i++) {
++		struct numa_memblk *bi = &mi->blk[i];
++
++		if (start < bi->end && end >= bi->start) {
++			blk[count] = &mi->blk[i];
++			count++;
++		}
++	}
++	if (!count)
++		return NUMA_NO_MEMBLK;
++
++	/* Sort the list of pointers in memblk->start order */
++	sort(&blk[0], count, sizeof(blk[0]), cmp_memblk, NULL);
++
++	/* Make sure the first/last memblks include start/end */
++	blk[0]->start = min(blk[0]->start, start);
++	blk[count - 1]->end = max(blk[count - 1]->end, end);
++
++	/*
++	 * Fill any gaps by tracking the previous memblks
++	 * end address and backfilling to it if needed.
++	 */
++	prev_end = blk[0]->end;
++	for (int i = 1; i < count; i++) {
++		struct numa_memblk *curr = blk[i];
++
++		if (prev_end >= curr->start) {
++			if (prev_end < curr->end)
++				prev_end = curr->end;
++		} else {
++			curr->start = prev_end;
++			prev_end = curr->end;
++		}
++	}
++	return 0;
++}
++
+ #endif
+diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
+index 438adb695daab..7172b0740abf9 100644
+--- a/arch/x86/net/bpf_jit_comp.c
++++ b/arch/x86/net/bpf_jit_comp.c
+@@ -963,6 +963,10 @@ static void emit_shiftx(u8 **pprog, u32 dst_reg, u8 src_reg, bool is64, u8 op)
+ 
+ #define INSN_SZ_DIFF (((addrs[i] - addrs[i - 1]) - (prog - temp)))
+ 
++/* mov rax, qword ptr [rbp - rounded_stack_depth - 8] */
++#define RESTORE_TAIL_CALL_CNT(stack)				\
++	EMIT3_off32(0x48, 0x8B, 0x85, -round_up(stack, 8) - 8)
++
+ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, u8 *rw_image,
+ 		  int oldproglen, struct jit_context *ctx, bool jmp_padding)
+ {
+@@ -1538,9 +1542,7 @@ st:			if (is_imm8(insn->off))
+ 
+ 			func = (u8 *) __bpf_call_base + imm32;
+ 			if (tail_call_reachable) {
+-				/* mov rax, qword ptr [rbp - rounded_stack_depth - 8] */
+-				EMIT3_off32(0x48, 0x8B, 0x85,
+-					    -round_up(bpf_prog->aux->stack_depth, 8) - 8);
++				RESTORE_TAIL_CALL_CNT(bpf_prog->aux->stack_depth);
+ 				if (!imm32)
+ 					return -EINVAL;
+ 				offs = 7 + x86_call_depth_emit_accounting(&prog, func);
+@@ -1857,59 +1859,177 @@ emit_jmp:
+ 	return proglen;
+ }
+ 
+-static void save_regs(const struct btf_func_model *m, u8 **prog, int nr_regs,
+-		      int stack_size)
++static void clean_stack_garbage(const struct btf_func_model *m,
++				u8 **pprog, int nr_stack_slots,
++				int stack_size)
++{
++	int arg_size, off;
++	u8 *prog;
++
++	/* Generally speaking, the compiler will pass the arguments
++	 * on-stack with "push" instruction, which will take 8-byte
++	 * on the stack. In this case, there won't be garbage values
++	 * while we copy the arguments from origin stack frame to current
++	 * in BPF_DW.
++	 *
++	 * However, sometimes the compiler will only allocate 4-byte on
++	 * the stack for the arguments. For now, this case will only
++	 * happen if there is only one argument on-stack and its size
++	 * not more than 4 byte. In this case, there will be garbage
++	 * values on the upper 4-byte where we store the argument on
++	 * current stack frame.
++	 *
++	 * arguments on origin stack:
++	 *
++	 * stack_arg_1(4-byte) xxx(4-byte)
++	 *
++	 * what we copy:
++	 *
++	 * stack_arg_1(8-byte): stack_arg_1(origin) xxx
++	 *
++	 * and the xxx is the garbage values which we should clean here.
++	 */
++	if (nr_stack_slots != 1)
++		return;
++
++	/* the size of the last argument */
++	arg_size = m->arg_size[m->nr_args - 1];
++	if (arg_size <= 4) {
++		off = -(stack_size - 4);
++		prog = *pprog;
++		/* mov DWORD PTR [rbp + off], 0 */
++		if (!is_imm8(off))
++			EMIT2_off32(0xC7, 0x85, off);
++		else
++			EMIT3(0xC7, 0x45, off);
++		EMIT(0, 4);
++		*pprog = prog;
++	}
++}
++
++/* get the count of the regs that are used to pass arguments */
++static int get_nr_used_regs(const struct btf_func_model *m)
+ {
+-	int i, j, arg_size;
+-	bool next_same_struct = false;
++	int i, arg_regs, nr_used_regs = 0;
++
++	for (i = 0; i < min_t(int, m->nr_args, MAX_BPF_FUNC_ARGS); i++) {
++		arg_regs = (m->arg_size[i] + 7) / 8;
++		if (nr_used_regs + arg_regs <= 6)
++			nr_used_regs += arg_regs;
++
++		if (nr_used_regs >= 6)
++			break;
++	}
++
++	return nr_used_regs;
++}
++
++static void save_args(const struct btf_func_model *m, u8 **prog,
++		      int stack_size, bool for_call_origin)
++{
++	int arg_regs, first_off = 0, nr_regs = 0, nr_stack_slots = 0;
++	int i, j;
+ 
+ 	/* Store function arguments to stack.
+ 	 * For a function that accepts two pointers the sequence will be:
+ 	 * mov QWORD PTR [rbp-0x10],rdi
+ 	 * mov QWORD PTR [rbp-0x8],rsi
+ 	 */
+-	for (i = 0, j = 0; i < min(nr_regs, 6); i++) {
+-		/* The arg_size is at most 16 bytes, enforced by the verifier. */
+-		arg_size = m->arg_size[j];
+-		if (arg_size > 8) {
+-			arg_size = 8;
+-			next_same_struct = !next_same_struct;
+-		}
++	for (i = 0; i < min_t(int, m->nr_args, MAX_BPF_FUNC_ARGS); i++) {
++		arg_regs = (m->arg_size[i] + 7) / 8;
+ 
+-		emit_stx(prog, bytes_to_bpf_size(arg_size),
+-			 BPF_REG_FP,
+-			 i == 5 ? X86_REG_R9 : BPF_REG_1 + i,
+-			 -(stack_size - i * 8));
++		/* According to the research of Yonghong, struct members
++		 * should be all in register or all on the stack.
++		 * Meanwhile, the compiler will pass the argument on regs
++		 * if the remaining regs can hold the argument.
++		 *
++		 * Disorder of the args can happen. For example:
++		 *
++		 * struct foo_struct {
++		 *     long a;
++		 *     int b;
++		 * };
++		 * int foo(char, char, char, char, char, struct foo_struct,
++		 *         char);
++		 *
++		 * the arg1-5,arg7 will be passed by regs, and arg6 will
++		 * by stack.
++		 */
++		if (nr_regs + arg_regs > 6) {
++			/* copy function arguments from origin stack frame
++			 * into current stack frame.
++			 *
++			 * The starting address of the arguments on-stack
++			 * is:
++			 *   rbp + 8(push rbp) +
++			 *   8(return addr of origin call) +
++			 *   8(return addr of the caller)
++			 * which means: rbp + 24
++			 */
++			for (j = 0; j < arg_regs; j++) {
++				emit_ldx(prog, BPF_DW, BPF_REG_0, BPF_REG_FP,
++					 nr_stack_slots * 8 + 0x18);
++				emit_stx(prog, BPF_DW, BPF_REG_FP, BPF_REG_0,
++					 -stack_size);
++
++				if (!nr_stack_slots)
++					first_off = stack_size;
++				stack_size -= 8;
++				nr_stack_slots++;
++			}
++		} else {
++			/* Only copy the arguments on-stack to current
++			 * 'stack_size' and ignore the regs, used to
++			 * prepare the arguments on-stack for orign call.
++			 */
++			if (for_call_origin) {
++				nr_regs += arg_regs;
++				continue;
++			}
+ 
+-		j = next_same_struct ? j : j + 1;
++			/* copy the arguments from regs into stack */
++			for (j = 0; j < arg_regs; j++) {
++				emit_stx(prog, BPF_DW, BPF_REG_FP,
++					 nr_regs == 5 ? X86_REG_R9 : BPF_REG_1 + nr_regs,
++					 -stack_size);
++				stack_size -= 8;
++				nr_regs++;
++			}
++		}
+ 	}
++
++	clean_stack_garbage(m, prog, nr_stack_slots, first_off);
+ }
+ 
+-static void restore_regs(const struct btf_func_model *m, u8 **prog, int nr_regs,
++static void restore_regs(const struct btf_func_model *m, u8 **prog,
+ 			 int stack_size)
+ {
+-	int i, j, arg_size;
+-	bool next_same_struct = false;
++	int i, j, arg_regs, nr_regs = 0;
+ 
+ 	/* Restore function arguments from stack.
+ 	 * For a function that accepts two pointers the sequence will be:
+ 	 * EMIT4(0x48, 0x8B, 0x7D, 0xF0); mov rdi,QWORD PTR [rbp-0x10]
+ 	 * EMIT4(0x48, 0x8B, 0x75, 0xF8); mov rsi,QWORD PTR [rbp-0x8]
++	 *
++	 * The logic here is similar to what we do in save_args()
+ 	 */
+-	for (i = 0, j = 0; i < min(nr_regs, 6); i++) {
+-		/* The arg_size is at most 16 bytes, enforced by the verifier. */
+-		arg_size = m->arg_size[j];
+-		if (arg_size > 8) {
+-			arg_size = 8;
+-			next_same_struct = !next_same_struct;
++	for (i = 0; i < min_t(int, m->nr_args, MAX_BPF_FUNC_ARGS); i++) {
++		arg_regs = (m->arg_size[i] + 7) / 8;
++		if (nr_regs + arg_regs <= 6) {
++			for (j = 0; j < arg_regs; j++) {
++				emit_ldx(prog, BPF_DW,
++					 nr_regs == 5 ? X86_REG_R9 : BPF_REG_1 + nr_regs,
++					 BPF_REG_FP,
++					 -stack_size);
++				stack_size -= 8;
++				nr_regs++;
++			}
++		} else {
++			stack_size -= 8 * arg_regs;
+ 		}
+ 
+-		emit_ldx(prog, bytes_to_bpf_size(arg_size),
+-			 i == 5 ? X86_REG_R9 : BPF_REG_1 + i,
+-			 BPF_REG_FP,
+-			 -(stack_size - i * 8));
+-
+-		j = next_same_struct ? j : j + 1;
++		if (nr_regs >= 6)
++			break;
+ 	}
+ }
+ 
+@@ -1938,7 +2058,10 @@ static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog,
+ 	/* arg1: mov rdi, progs[i] */
+ 	emit_mov_imm64(&prog, BPF_REG_1, (long) p >> 32, (u32) (long) p);
+ 	/* arg2: lea rsi, [rbp - ctx_cookie_off] */
+-	EMIT4(0x48, 0x8D, 0x75, -run_ctx_off);
++	if (!is_imm8(-run_ctx_off))
++		EMIT3_off32(0x48, 0x8D, 0xB5, -run_ctx_off);
++	else
++		EMIT4(0x48, 0x8D, 0x75, -run_ctx_off);
+ 
+ 	if (emit_rsb_call(&prog, bpf_trampoline_enter(p), prog))
+ 		return -EINVAL;
+@@ -1954,7 +2077,10 @@ static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog,
+ 	emit_nops(&prog, 2);
+ 
+ 	/* arg1: lea rdi, [rbp - stack_size] */
+-	EMIT4(0x48, 0x8D, 0x7D, -stack_size);
++	if (!is_imm8(-stack_size))
++		EMIT3_off32(0x48, 0x8D, 0xBD, -stack_size);
++	else
++		EMIT4(0x48, 0x8D, 0x7D, -stack_size);
+ 	/* arg2: progs[i]->insnsi for interpreter */
+ 	if (!p->jited)
+ 		emit_mov_imm64(&prog, BPF_REG_2,
+@@ -1984,7 +2110,10 @@ static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog,
+ 	/* arg2: mov rsi, rbx <- start time in nsec */
+ 	emit_mov_reg(&prog, true, BPF_REG_2, BPF_REG_6);
+ 	/* arg3: lea rdx, [rbp - run_ctx_off] */
+-	EMIT4(0x48, 0x8D, 0x55, -run_ctx_off);
++	if (!is_imm8(-run_ctx_off))
++		EMIT3_off32(0x48, 0x8D, 0x95, -run_ctx_off);
++	else
++		EMIT4(0x48, 0x8D, 0x55, -run_ctx_off);
+ 	if (emit_rsb_call(&prog, bpf_trampoline_exit(p), prog))
+ 		return -EINVAL;
+ 
+@@ -2136,7 +2265,7 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
+ 				void *func_addr)
+ {
+ 	int i, ret, nr_regs = m->nr_args, stack_size = 0;
+-	int regs_off, nregs_off, ip_off, run_ctx_off;
++	int regs_off, nregs_off, ip_off, run_ctx_off, arg_stack_off, rbx_off;
+ 	struct bpf_tramp_links *fentry = &tlinks[BPF_TRAMP_FENTRY];
+ 	struct bpf_tramp_links *fexit = &tlinks[BPF_TRAMP_FEXIT];
+ 	struct bpf_tramp_links *fmod_ret = &tlinks[BPF_TRAMP_MODIFY_RETURN];
+@@ -2150,8 +2279,10 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
+ 		if (m->arg_flags[i] & BTF_FMODEL_STRUCT_ARG)
+ 			nr_regs += (m->arg_size[i] + 7) / 8 - 1;
+ 
+-	/* x86-64 supports up to 6 arguments. 7+ can be added in the future */
+-	if (nr_regs > 6)
++	/* x86-64 supports up to MAX_BPF_FUNC_ARGS arguments. 1-6
++	 * are passed through regs, the remains are through stack.
++	 */
++	if (nr_regs > MAX_BPF_FUNC_ARGS)
+ 		return -ENOTSUPP;
+ 
+ 	/* Generated trampoline stack layout:
+@@ -2170,7 +2301,15 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
+ 	 *
+ 	 * RBP - ip_off    [ traced function ]  BPF_TRAMP_F_IP_ARG flag
+ 	 *
++	 * RBP - rbx_off   [ rbx value       ]  always
++	 *
+ 	 * RBP - run_ctx_off [ bpf_tramp_run_ctx ]
++	 *
++	 *                     [ stack_argN ]  BPF_TRAMP_F_CALL_ORIG
++	 *                     [ ...        ]
++	 *                     [ stack_arg2 ]
++	 * RBP - arg_stack_off [ stack_arg1 ]
++	 * RSP                 [ tail_call_cnt ] BPF_TRAMP_F_TAIL_CALL_CTX
+ 	 */
+ 
+ 	/* room for return value of orig_call or fentry prog */
+@@ -2190,9 +2329,26 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
+ 
+ 	ip_off = stack_size;
+ 
++	stack_size += 8;
++	rbx_off = stack_size;
++
+ 	stack_size += (sizeof(struct bpf_tramp_run_ctx) + 7) & ~0x7;
+ 	run_ctx_off = stack_size;
+ 
++	if (nr_regs > 6 && (flags & BPF_TRAMP_F_CALL_ORIG)) {
++		/* the space that used to pass arguments on-stack */
++		stack_size += (nr_regs - get_nr_used_regs(m)) * 8;
++		/* make sure the stack pointer is 16-byte aligned if we
++		 * need pass arguments on stack, which means
++		 *  [stack_size + 8(rbp) + 8(rip) + 8(origin rip)]
++		 * should be 16-byte aligned. Following code depend on
++		 * that stack_size is already 8-byte aligned.
++		 */
++		stack_size += (stack_size % 16) ? 0 : 8;
++	}
++
++	arg_stack_off = stack_size;
++
+ 	if (flags & BPF_TRAMP_F_SKIP_FRAME) {
+ 		/* skip patched call instruction and point orig_call to actual
+ 		 * body of the kernel function.
+@@ -2212,8 +2368,16 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
+ 	x86_call_depth_emit_accounting(&prog, NULL);
+ 	EMIT1(0x55);		 /* push rbp */
+ 	EMIT3(0x48, 0x89, 0xE5); /* mov rbp, rsp */
+-	EMIT4(0x48, 0x83, 0xEC, stack_size); /* sub rsp, stack_size */
+-	EMIT1(0x53);		 /* push rbx */
++	if (!is_imm8(stack_size))
++		/* sub rsp, stack_size */
++		EMIT3_off32(0x48, 0x81, 0xEC, stack_size);
++	else
++		/* sub rsp, stack_size */
++		EMIT4(0x48, 0x83, 0xEC, stack_size);
++	if (flags & BPF_TRAMP_F_TAIL_CALL_CTX)
++		EMIT1(0x50);		/* push rax */
++	/* mov QWORD PTR [rbp - rbx_off], rbx */
++	emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_6, -rbx_off);
+ 
+ 	/* Store number of argument registers of the traced function:
+ 	 *   mov rax, nr_regs
+@@ -2231,7 +2395,7 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
+ 		emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -ip_off);
+ 	}
+ 
+-	save_regs(m, &prog, nr_regs, regs_off);
++	save_args(m, &prog, regs_off, false);
+ 
+ 	if (flags & BPF_TRAMP_F_CALL_ORIG) {
+ 		/* arg1: mov rdi, im */
+@@ -2261,11 +2425,18 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
+ 	}
+ 
+ 	if (flags & BPF_TRAMP_F_CALL_ORIG) {
+-		restore_regs(m, &prog, nr_regs, regs_off);
++		restore_regs(m, &prog, regs_off);
++		save_args(m, &prog, arg_stack_off, true);
++
++		if (flags & BPF_TRAMP_F_TAIL_CALL_CTX)
++			/* Before calling the original function, restore the
++			 * tail_call_cnt from stack to rax.
++			 */
++			RESTORE_TAIL_CALL_CNT(stack_size);
+ 
+ 		if (flags & BPF_TRAMP_F_ORIG_STACK) {
+-			emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, 8);
+-			EMIT2(0xff, 0xd0); /* call *rax */
++			emit_ldx(&prog, BPF_DW, BPF_REG_6, BPF_REG_FP, 8);
++			EMIT2(0xff, 0xd3); /* call *rbx */
+ 		} else {
+ 			/* call original function */
+ 			if (emit_rsb_call(&prog, orig_call, prog)) {
+@@ -2302,7 +2473,7 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
+ 		}
+ 
+ 	if (flags & BPF_TRAMP_F_RESTORE_REGS)
+-		restore_regs(m, &prog, nr_regs, regs_off);
++		restore_regs(m, &prog, regs_off);
+ 
+ 	/* This needs to be done regardless. If there were fmod_ret programs,
+ 	 * the return value is only updated on the stack and still needs to be
+@@ -2316,12 +2487,17 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
+ 			ret = -EINVAL;
+ 			goto cleanup;
+ 		}
+-	}
++	} else if (flags & BPF_TRAMP_F_TAIL_CALL_CTX)
++		/* Before running the original function, restore the
++		 * tail_call_cnt from stack to rax.
++		 */
++		RESTORE_TAIL_CALL_CNT(stack_size);
++
+ 	/* restore return value of orig_call or fentry prog back into RAX */
+ 	if (save_ret)
+ 		emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, -8);
+ 
+-	EMIT1(0x5B); /* pop rbx */
++	emit_ldx(&prog, BPF_DW, BPF_REG_6, BPF_REG_FP, -rbx_off);
+ 	EMIT1(0xC9); /* leave */
+ 	if (flags & BPF_TRAMP_F_SKIP_FRAME)
+ 		/* skip our return address and return to parent */
+diff --git a/block/blk-core.c b/block/blk-core.c
+index 9866468c72a2a..3e01b4afb90ce 100644
+--- a/block/blk-core.c
++++ b/block/blk-core.c
+@@ -500,8 +500,8 @@ static inline void bio_check_ro(struct bio *bio)
+ 	if (op_is_write(bio_op(bio)) && bdev_read_only(bio->bi_bdev)) {
+ 		if (op_is_flush(bio->bi_opf) && !bio_sectors(bio))
+ 			return;
+-		pr_warn("Trying to write to read-only block-device %pg\n",
+-			bio->bi_bdev);
++		pr_warn_ratelimited("Trying to write to read-only block-device %pg\n",
++				    bio->bi_bdev);
+ 		/* Older lvm-tools actually trigger this */
+ 	}
+ }
+diff --git a/crypto/asymmetric_keys/Kconfig b/crypto/asymmetric_keys/Kconfig
+index 1ef3b46d6f6e5..59ec726b7c770 100644
+--- a/crypto/asymmetric_keys/Kconfig
++++ b/crypto/asymmetric_keys/Kconfig
+@@ -76,7 +76,7 @@ config SIGNED_PE_FILE_VERIFICATION
+ 	  signed PE binary.
+ 
+ config FIPS_SIGNATURE_SELFTEST
+-	bool "Run FIPS selftests on the X.509+PKCS7 signature verification"
++	tristate "Run FIPS selftests on the X.509+PKCS7 signature verification"
+ 	help
+ 	  This option causes some selftests to be run on the signature
+ 	  verification code, using some built in data.  This is required
+@@ -84,5 +84,6 @@ config FIPS_SIGNATURE_SELFTEST
+ 	depends on KEYS
+ 	depends on ASYMMETRIC_KEY_TYPE
+ 	depends on PKCS7_MESSAGE_PARSER=X509_CERTIFICATE_PARSER
++	depends on X509_CERTIFICATE_PARSER
+ 
+ endif # ASYMMETRIC_KEY_TYPE
+diff --git a/crypto/asymmetric_keys/Makefile b/crypto/asymmetric_keys/Makefile
+index 0d1fa1b692c6b..1a273d6df3ebf 100644
+--- a/crypto/asymmetric_keys/Makefile
++++ b/crypto/asymmetric_keys/Makefile
+@@ -22,7 +22,8 @@ x509_key_parser-y := \
+ 	x509_cert_parser.o \
+ 	x509_loader.o \
+ 	x509_public_key.o
+-x509_key_parser-$(CONFIG_FIPS_SIGNATURE_SELFTEST) += selftest.o
++obj-$(CONFIG_FIPS_SIGNATURE_SELFTEST) += x509_selftest.o
++x509_selftest-y += selftest.o
+ 
+ $(obj)/x509_cert_parser.o: \
+ 	$(obj)/x509.asn1.h \
+diff --git a/crypto/asymmetric_keys/selftest.c b/crypto/asymmetric_keys/selftest.c
+index fa0bf7f242849..c50da7ef90ae9 100644
+--- a/crypto/asymmetric_keys/selftest.c
++++ b/crypto/asymmetric_keys/selftest.c
+@@ -4,10 +4,11 @@
+  * Written by David Howells (dhowells@redhat.com)
+  */
+ 
+-#include <linux/kernel.h>
++#include <crypto/pkcs7.h>
+ #include <linux/cred.h>
++#include <linux/kernel.h>
+ #include <linux/key.h>
+-#include <crypto/pkcs7.h>
++#include <linux/module.h>
+ #include "x509_parser.h"
+ 
+ struct certs_test {
+@@ -175,7 +176,7 @@ static const struct certs_test certs_tests[] __initconst = {
+ 	TEST(certs_selftest_1_data, certs_selftest_1_pkcs7),
+ };
+ 
+-int __init fips_signature_selftest(void)
++static int __init fips_signature_selftest(void)
+ {
+ 	struct key *keyring;
+ 	int ret, i;
+@@ -222,3 +223,9 @@ int __init fips_signature_selftest(void)
+ 	key_put(keyring);
+ 	return 0;
+ }
++
++late_initcall(fips_signature_selftest);
++
++MODULE_DESCRIPTION("X.509 self tests");
++MODULE_AUTHOR("Red Hat, Inc.");
++MODULE_LICENSE("GPL");
+diff --git a/crypto/asymmetric_keys/x509_parser.h b/crypto/asymmetric_keys/x509_parser.h
+index a299c9c56f409..97a886cbe01c3 100644
+--- a/crypto/asymmetric_keys/x509_parser.h
++++ b/crypto/asymmetric_keys/x509_parser.h
+@@ -40,15 +40,6 @@ struct x509_certificate {
+ 	bool		blacklisted;
+ };
+ 
+-/*
+- * selftest.c
+- */
+-#ifdef CONFIG_FIPS_SIGNATURE_SELFTEST
+-extern int __init fips_signature_selftest(void);
+-#else
+-static inline int fips_signature_selftest(void) { return 0; }
+-#endif
+-
+ /*
+  * x509_cert_parser.c
+  */
+diff --git a/crypto/asymmetric_keys/x509_public_key.c b/crypto/asymmetric_keys/x509_public_key.c
+index 7c71db3ac23d4..6a4f00be22fc1 100644
+--- a/crypto/asymmetric_keys/x509_public_key.c
++++ b/crypto/asymmetric_keys/x509_public_key.c
+@@ -262,15 +262,9 @@ static struct asymmetric_key_parser x509_key_parser = {
+ /*
+  * Module stuff
+  */
+-extern int __init certs_selftest(void);
+ static int __init x509_key_init(void)
+ {
+-	int ret;
+-
+-	ret = register_asymmetric_key_parser(&x509_key_parser);
+-	if (ret < 0)
+-		return ret;
+-	return fips_signature_selftest();
++	return register_asymmetric_key_parser(&x509_key_parser);
+ }
+ 
+ static void __exit x509_key_exit(void)
+diff --git a/drivers/accel/habanalabs/gaudi2/gaudi2.c b/drivers/accel/habanalabs/gaudi2/gaudi2.c
+index 20c4583f12b0d..31c74ca70a2e5 100644
+--- a/drivers/accel/habanalabs/gaudi2/gaudi2.c
++++ b/drivers/accel/habanalabs/gaudi2/gaudi2.c
+@@ -8149,11 +8149,11 @@ static int gaudi2_psoc_razwi_get_engines(struct gaudi2_razwi_info *razwi_info, u
+ 		eng_id[num_of_eng] = razwi_info[i].eng_id;
+ 		base[num_of_eng] = razwi_info[i].rtr_ctrl;
+ 		if (!num_of_eng)
+-			str_size += snprintf(eng_name + str_size,
++			str_size += scnprintf(eng_name + str_size,
+ 						PSOC_RAZWI_ENG_STR_SIZE - str_size, "%s",
+ 						razwi_info[i].eng_name);
+ 		else
+-			str_size += snprintf(eng_name + str_size,
++			str_size += scnprintf(eng_name + str_size,
+ 						PSOC_RAZWI_ENG_STR_SIZE - str_size, " or %s",
+ 						razwi_info[i].eng_name);
+ 		num_of_eng++;
+diff --git a/drivers/acpi/device_sysfs.c b/drivers/acpi/device_sysfs.c
+index b9bbf07461992..a34d8578b3da6 100644
+--- a/drivers/acpi/device_sysfs.c
++++ b/drivers/acpi/device_sysfs.c
+@@ -158,8 +158,8 @@ static int create_pnp_modalias(const struct acpi_device *acpi_dev, char *modalia
+ 		return 0;
+ 
+ 	len = snprintf(modalias, size, "acpi:");
+-	if (len <= 0)
+-		return len;
++	if (len >= size)
++		return -ENOMEM;
+ 
+ 	size -= len;
+ 
+@@ -212,8 +212,10 @@ static int create_of_modalias(const struct acpi_device *acpi_dev, char *modalias
+ 	len = snprintf(modalias, size, "of:N%sT", (char *)buf.pointer);
+ 	ACPI_FREE(buf.pointer);
+ 
+-	if (len <= 0)
+-		return len;
++	if (len >= size)
++		return -ENOMEM;
++
++	size -= len;
+ 
+ 	of_compatible = acpi_dev->data.of_compatible;
+ 	if (of_compatible->type == ACPI_TYPE_PACKAGE) {
+diff --git a/drivers/acpi/numa/srat.c b/drivers/acpi/numa/srat.c
+index 1f4fc5f8a819d..12f330b0eac01 100644
+--- a/drivers/acpi/numa/srat.c
++++ b/drivers/acpi/numa/srat.c
+@@ -310,11 +310,16 @@ static int __init acpi_parse_cfmws(union acpi_subtable_headers *header,
+ 	start = cfmws->base_hpa;
+ 	end = cfmws->base_hpa + cfmws->window_size;
+ 
+-	/* Skip if the SRAT already described the NUMA details for this HPA */
+-	node = phys_to_target_node(start);
+-	if (node != NUMA_NO_NODE)
++	/*
++	 * The SRAT may have already described NUMA details for all,
++	 * or a portion of, this CFMWS HPA range. Extend the memblks
++	 * found for any portion of the window to cover the entire
++	 * window.
++	 */
++	if (!numa_fill_memblks(start, end))
+ 		return 0;
+ 
++	/* No SRAT description. Create a new node. */
+ 	node = acpi_map_pxm_to_node(*fake_pxm);
+ 
+ 	if (node == NUMA_NO_NODE) {
+diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c
+index 413e4fcadcaf7..99b4e33554355 100644
+--- a/drivers/acpi/property.c
++++ b/drivers/acpi/property.c
+@@ -1102,25 +1102,26 @@ static int acpi_data_prop_read(const struct acpi_device_data *data,
+ 	switch (proptype) {
+ 	case DEV_PROP_STRING:
+ 		break;
+-	case DEV_PROP_U8 ... DEV_PROP_U64:
++	default:
+ 		if (obj->type == ACPI_TYPE_BUFFER) {
+ 			if (nval > obj->buffer.length)
+ 				return -EOVERFLOW;
+-			break;
++		} else {
++			if (nval > obj->package.count)
++				return -EOVERFLOW;
+ 		}
+-		fallthrough;
+-	default:
+-		if (nval > obj->package.count)
+-			return -EOVERFLOW;
+ 		break;
+ 	}
+ 	if (nval == 0)
+ 		return -EINVAL;
+ 
+-	if (obj->type != ACPI_TYPE_BUFFER)
+-		items = obj->package.elements;
+-	else
++	if (obj->type == ACPI_TYPE_BUFFER) {
++		if (proptype != DEV_PROP_U8)
++			return -EPROTO;
+ 		items = obj;
++	} else {
++		items = obj->package.elements;
++	}
+ 
+ 	switch (proptype) {
+ 	case DEV_PROP_U8:
+diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
+index 442396f6ed1f9..31205fee59d4a 100644
+--- a/drivers/acpi/video_detect.c
++++ b/drivers/acpi/video_detect.c
+@@ -130,6 +130,16 @@ static int video_detect_force_native(const struct dmi_system_id *d)
+ 	return 0;
+ }
+ 
++static int video_detect_portege_r100(const struct dmi_system_id *d)
++{
++	struct pci_dev *dev;
++	/* Search for Trident CyberBlade XP4m32 to confirm Portégé R100 */
++	dev = pci_get_device(PCI_VENDOR_ID_TRIDENT, 0x2100, NULL);
++	if (dev)
++		acpi_backlight_dmi = acpi_backlight_vendor;
++	return 0;
++}
++
+ static const struct dmi_system_id video_detect_dmi_table[] = {
+ 	/*
+ 	 * Models which should use the vendor backlight interface,
+@@ -270,6 +280,22 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
+ 		},
+ 	},
+ 
++	/*
++	 * Toshiba Portégé R100 has working both acpi_video and toshiba_acpi
++	 * vendor driver. But none of them gets activated as it has a VGA with
++	 * no kernel driver (Trident CyberBlade XP4m32).
++	 * The DMI strings are generic so check for the VGA chip in callback.
++	 */
++	{
++	 .callback = video_detect_portege_r100,
++	 .matches = {
++		DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
++		DMI_MATCH(DMI_PRODUCT_NAME, "Portable PC"),
++		DMI_MATCH(DMI_PRODUCT_VERSION, "Version 1.0"),
++		DMI_MATCH(DMI_BOARD_NAME, "Portable PC")
++		},
++	},
++
+ 	/*
+ 	 * Models which need acpi_video backlight control where the GPU drivers
+ 	 * do not call acpi_video_register_backlight() because no internal panel
+diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c
+index f36027591e1a8..bdd80b73c3e6c 100644
+--- a/drivers/base/regmap/regmap-debugfs.c
++++ b/drivers/base/regmap/regmap-debugfs.c
+@@ -48,7 +48,7 @@ static ssize_t regmap_name_read_file(struct file *file,
+ 		name = map->dev->driver->name;
+ 
+ 	ret = snprintf(buf, PAGE_SIZE, "%s\n", name);
+-	if (ret < 0) {
++	if (ret >= PAGE_SIZE) {
+ 		kfree(buf);
+ 		return ret;
+ 	}
+diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
+index bb2f41043f602..0d7ed11b089c3 100644
+--- a/drivers/base/regmap/regmap.c
++++ b/drivers/base/regmap/regmap.c
+@@ -1714,17 +1714,19 @@ static int _regmap_raw_write_impl(struct regmap *map, unsigned int reg,
+ 	}
+ 
+ 	if (!map->cache_bypass && map->format.parse_val) {
+-		unsigned int ival;
++		unsigned int ival, offset;
+ 		int val_bytes = map->format.val_bytes;
+-		for (i = 0; i < val_len / val_bytes; i++) {
+-			ival = map->format.parse_val(val + (i * val_bytes));
+-			ret = regcache_write(map,
+-					     reg + regmap_get_offset(map, i),
+-					     ival);
++
++		/* Cache the last written value for noinc writes */
++		i = noinc ? val_len - val_bytes : 0;
++		for (; i < val_len; i += val_bytes) {
++			ival = map->format.parse_val(val + i);
++			offset = noinc ? 0 : regmap_get_offset(map, i / val_bytes);
++			ret = regcache_write(map, reg + offset, ival);
+ 			if (ret) {
+ 				dev_err(map->dev,
+ 					"Error in caching of register: %x ret: %d\n",
+-					reg + regmap_get_offset(map, i), ret);
++					reg + offset, ret);
+ 				return ret;
+ 			}
+ 		}
+diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
+index 8576d696c7a22..f6a9eda9fbb20 100644
+--- a/drivers/block/nbd.c
++++ b/drivers/block/nbd.c
+@@ -250,7 +250,6 @@ static void nbd_dev_remove(struct nbd_device *nbd)
+ 	struct gendisk *disk = nbd->disk;
+ 
+ 	del_gendisk(disk);
+-	put_disk(disk);
+ 	blk_mq_free_tag_set(&nbd->tag_set);
+ 
+ 	/*
+@@ -261,7 +260,7 @@ static void nbd_dev_remove(struct nbd_device *nbd)
+ 	idr_remove(&nbd_index_idr, nbd->index);
+ 	mutex_unlock(&nbd_index_mutex);
+ 	destroy_workqueue(nbd->recv_workq);
+-	kfree(nbd);
++	put_disk(disk);
+ }
+ 
+ static void nbd_dev_remove_work(struct work_struct *work)
+@@ -1609,6 +1608,13 @@ static void nbd_release(struct gendisk *disk)
+ 	nbd_put(nbd);
+ }
+ 
++static void nbd_free_disk(struct gendisk *disk)
++{
++	struct nbd_device *nbd = disk->private_data;
++
++	kfree(nbd);
++}
++
+ static const struct block_device_operations nbd_fops =
+ {
+ 	.owner =	THIS_MODULE,
+@@ -1616,6 +1622,7 @@ static const struct block_device_operations nbd_fops =
+ 	.release =	nbd_release,
+ 	.ioctl =	nbd_ioctl,
+ 	.compat_ioctl =	nbd_ioctl,
++	.free_disk =	nbd_free_disk,
+ };
+ 
+ #if IS_ENABLED(CONFIG_DEBUG_FS)
+diff --git a/drivers/char/hw_random/bcm2835-rng.c b/drivers/char/hw_random/bcm2835-rng.c
+index e98fcac578d66..634eab4776f32 100644
+--- a/drivers/char/hw_random/bcm2835-rng.c
++++ b/drivers/char/hw_random/bcm2835-rng.c
+@@ -71,7 +71,7 @@ static int bcm2835_rng_read(struct hwrng *rng, void *buf, size_t max,
+ 	while ((rng_readl(priv, RNG_STATUS) >> 24) == 0) {
+ 		if (!wait)
+ 			return 0;
+-		hwrng_msleep(rng, 1000);
++		hwrng_yield(rng);
+ 	}
+ 
+ 	num_words = rng_readl(priv, RNG_STATUS) >> 24;
+diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
+index f34d356fe2c06..599a4bc2c5484 100644
+--- a/drivers/char/hw_random/core.c
++++ b/drivers/char/hw_random/core.c
+@@ -679,6 +679,12 @@ long hwrng_msleep(struct hwrng *rng, unsigned int msecs)
+ }
+ EXPORT_SYMBOL_GPL(hwrng_msleep);
+ 
++long hwrng_yield(struct hwrng *rng)
++{
++	return wait_for_completion_interruptible_timeout(&rng->dying, 1);
++}
++EXPORT_SYMBOL_GPL(hwrng_yield);
++
+ static int __init hwrng_modinit(void)
+ {
+ 	int ret;
+diff --git a/drivers/char/hw_random/geode-rng.c b/drivers/char/hw_random/geode-rng.c
+index 12fbe80918319..159baf00a8675 100644
+--- a/drivers/char/hw_random/geode-rng.c
++++ b/drivers/char/hw_random/geode-rng.c
+@@ -58,7 +58,8 @@ struct amd_geode_priv {
+ 
+ static int geode_rng_data_read(struct hwrng *rng, u32 *data)
+ {
+-	void __iomem *mem = (void __iomem *)rng->priv;
++	struct amd_geode_priv *priv = (struct amd_geode_priv *)rng->priv;
++	void __iomem *mem = priv->membase;
+ 
+ 	*data = readl(mem + GEODE_RNG_DATA_REG);
+ 
+@@ -67,7 +68,8 @@ static int geode_rng_data_read(struct hwrng *rng, u32 *data)
+ 
+ static int geode_rng_data_present(struct hwrng *rng, int wait)
+ {
+-	void __iomem *mem = (void __iomem *)rng->priv;
++	struct amd_geode_priv *priv = (struct amd_geode_priv *)rng->priv;
++	void __iomem *mem = priv->membase;
+ 	int data, i;
+ 
+ 	for (i = 0; i < 20; i++) {
+diff --git a/drivers/clk/clk-npcm7xx.c b/drivers/clk/clk-npcm7xx.c
+index e319cfa51a8a3..030186def9c69 100644
+--- a/drivers/clk/clk-npcm7xx.c
++++ b/drivers/clk/clk-npcm7xx.c
+@@ -510,7 +510,7 @@ static void __init npcm7xx_clk_init(struct device_node *clk_np)
+ 	return;
+ 
+ npcm7xx_init_fail:
+-	kfree(npcm7xx_clk_data->hws);
++	kfree(npcm7xx_clk_data);
+ npcm7xx_init_np_err:
+ 	iounmap(clk_base);
+ npcm7xx_init_error:
+diff --git a/drivers/clk/clk-scmi.c b/drivers/clk/clk-scmi.c
+index 2c7a830ce3080..fdec715c9ba9b 100644
+--- a/drivers/clk/clk-scmi.c
++++ b/drivers/clk/clk-scmi.c
+@@ -213,6 +213,7 @@ static int scmi_clocks_probe(struct scmi_device *sdev)
+ 		sclk->info = scmi_proto_clk_ops->info_get(ph, idx);
+ 		if (!sclk->info) {
+ 			dev_dbg(dev, "invalid clock info for idx %d\n", idx);
++			devm_kfree(dev, sclk);
+ 			continue;
+ 		}
+ 
+diff --git a/drivers/clk/imx/Kconfig b/drivers/clk/imx/Kconfig
+index f6b82e0b9703a..db3bca5f4ec9c 100644
+--- a/drivers/clk/imx/Kconfig
++++ b/drivers/clk/imx/Kconfig
+@@ -96,6 +96,7 @@ config CLK_IMX8QXP
+ 	depends on (ARCH_MXC && ARM64) || COMPILE_TEST
+ 	depends on IMX_SCU && HAVE_ARM_SMCCC
+ 	select MXC_CLK_SCU
++	select MXC_CLK
+ 	help
+ 	  Build the driver for IMX8QXP SCU based clocks.
+ 
+diff --git a/drivers/clk/imx/clk-imx8mq.c b/drivers/clk/imx/clk-imx8mq.c
+index 4bd65879fcd34..f70ed231b92d6 100644
+--- a/drivers/clk/imx/clk-imx8mq.c
++++ b/drivers/clk/imx/clk-imx8mq.c
+@@ -288,8 +288,7 @@ static int imx8mq_clocks_probe(struct platform_device *pdev)
+ 	void __iomem *base;
+ 	int err;
+ 
+-	clk_hw_data = kzalloc(struct_size(clk_hw_data, hws,
+-					  IMX8MQ_CLK_END), GFP_KERNEL);
++	clk_hw_data = devm_kzalloc(dev, struct_size(clk_hw_data, hws, IMX8MQ_CLK_END), GFP_KERNEL);
+ 	if (WARN_ON(!clk_hw_data))
+ 		return -ENOMEM;
+ 
+@@ -306,10 +305,12 @@ static int imx8mq_clocks_probe(struct platform_device *pdev)
+ 	hws[IMX8MQ_CLK_EXT4] = imx_get_clk_hw_by_name(np, "clk_ext4");
+ 
+ 	np = of_find_compatible_node(NULL, NULL, "fsl,imx8mq-anatop");
+-	base = of_iomap(np, 0);
++	base = devm_of_iomap(dev, np, 0, NULL);
+ 	of_node_put(np);
+-	if (WARN_ON(!base))
+-		return -ENOMEM;
++	if (WARN_ON(IS_ERR(base))) {
++		err = PTR_ERR(base);
++		goto unregister_hws;
++	}
+ 
+ 	hws[IMX8MQ_ARM_PLL_REF_SEL] = imx_clk_hw_mux("arm_pll_ref_sel", base + 0x28, 16, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+ 	hws[IMX8MQ_GPU_PLL_REF_SEL] = imx_clk_hw_mux("gpu_pll_ref_sel", base + 0x18, 16, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+@@ -395,8 +396,10 @@ static int imx8mq_clocks_probe(struct platform_device *pdev)
+ 
+ 	np = dev->of_node;
+ 	base = devm_platform_ioremap_resource(pdev, 0);
+-	if (WARN_ON(IS_ERR(base)))
+-		return PTR_ERR(base);
++	if (WARN_ON(IS_ERR(base))) {
++		err = PTR_ERR(base);
++		goto unregister_hws;
++	}
+ 
+ 	/* CORE */
+ 	hws[IMX8MQ_CLK_A53_DIV] = imx8m_clk_hw_composite_core("arm_a53_div", imx8mq_a53_sels, base + 0x8000);
+diff --git a/drivers/clk/imx/clk-imx8qxp.c b/drivers/clk/imx/clk-imx8qxp.c
+index 546a3703bfeb2..273de1f293076 100644
+--- a/drivers/clk/imx/clk-imx8qxp.c
++++ b/drivers/clk/imx/clk-imx8qxp.c
+@@ -148,10 +148,10 @@ static int imx8qxp_clk_probe(struct platform_device *pdev)
+ 	imx_clk_scu("adc0_clk",  IMX_SC_R_ADC_0, IMX_SC_PM_CLK_PER);
+ 	imx_clk_scu("adc1_clk",  IMX_SC_R_ADC_1, IMX_SC_PM_CLK_PER);
+ 	imx_clk_scu("pwm_clk",   IMX_SC_R_LCD_0_PWM_0, IMX_SC_PM_CLK_PER);
++	imx_clk_scu("elcdif_pll", IMX_SC_R_ELCDIF_PLL, IMX_SC_PM_CLK_PLL);
+ 	imx_clk_scu2("lcd_clk", lcd_sels, ARRAY_SIZE(lcd_sels), IMX_SC_R_LCD_0, IMX_SC_PM_CLK_PER);
+ 	imx_clk_scu2("lcd_pxl_clk", lcd_pxl_sels, ARRAY_SIZE(lcd_pxl_sels), IMX_SC_R_LCD_0, IMX_SC_PM_CLK_MISC0);
+ 	imx_clk_scu("lcd_pxl_bypass_div_clk", IMX_SC_R_LCD_0, IMX_SC_PM_CLK_BYPASS);
+-	imx_clk_scu("elcdif_pll", IMX_SC_R_ELCDIF_PLL, IMX_SC_PM_CLK_PLL);
+ 
+ 	/* Audio SS */
+ 	imx_clk_scu("audio_pll0_clk", IMX_SC_R_AUDIO_PLL_0, IMX_SC_PM_CLK_PLL);
+diff --git a/drivers/clk/keystone/pll.c b/drivers/clk/keystone/pll.c
+index ee5c72369334f..6bbdd4705d71f 100644
+--- a/drivers/clk/keystone/pll.c
++++ b/drivers/clk/keystone/pll.c
+@@ -281,12 +281,13 @@ static void __init of_pll_div_clk_init(struct device_node *node)
+ 
+ 	clk = clk_register_divider(NULL, clk_name, parent_name, 0, reg, shift,
+ 				 mask, 0, NULL);
+-	if (clk) {
+-		of_clk_add_provider(node, of_clk_src_simple_get, clk);
+-	} else {
++	if (IS_ERR(clk)) {
+ 		pr_err("%s: error registering divider %s\n", __func__, clk_name);
+ 		iounmap(reg);
++		return;
+ 	}
++
++	of_clk_add_provider(node, of_clk_src_simple_get, clk);
+ }
+ CLK_OF_DECLARE(pll_divider_clock, "ti,keystone,pll-divider-clock", of_pll_div_clk_init);
+ 
+@@ -328,10 +329,12 @@ static void __init of_pll_mux_clk_init(struct device_node *node)
+ 	clk = clk_register_mux(NULL, clk_name, (const char **)&parents,
+ 				ARRAY_SIZE(parents) , 0, reg, shift, mask,
+ 				0, NULL);
+-	if (clk)
+-		of_clk_add_provider(node, of_clk_src_simple_get, clk);
+-	else
++	if (IS_ERR(clk)) {
+ 		pr_err("%s: error registering mux %s\n", __func__, clk_name);
++		return;
++	}
++
++	of_clk_add_provider(node, of_clk_src_simple_get, clk);
+ }
+ CLK_OF_DECLARE(pll_mux_clock, "ti,keystone,pll-mux-clock", of_pll_mux_clk_init);
+ 
+diff --git a/drivers/clk/mediatek/clk-mt2701.c b/drivers/clk/mediatek/clk-mt2701.c
+index 4a154da8a5430..ae4405ca5bfa0 100644
+--- a/drivers/clk/mediatek/clk-mt2701.c
++++ b/drivers/clk/mediatek/clk-mt2701.c
+@@ -670,6 +670,8 @@ static int mtk_topckgen_init(struct platform_device *pdev)
+ 		return PTR_ERR(base);
+ 
+ 	clk_data = mtk_alloc_clk_data(CLK_TOP_NR);
++	if (!clk_data)
++		return -ENOMEM;
+ 
+ 	mtk_clk_register_fixed_clks(top_fixed_clks, ARRAY_SIZE(top_fixed_clks),
+ 								clk_data);
+@@ -750,6 +752,8 @@ static void __init mtk_infrasys_init_early(struct device_node *node)
+ 
+ 	if (!infra_clk_data) {
+ 		infra_clk_data = mtk_alloc_clk_data(CLK_INFRA_NR);
++		if (!infra_clk_data)
++			return;
+ 
+ 		for (i = 0; i < CLK_INFRA_NR; i++)
+ 			infra_clk_data->hws[i] = ERR_PTR(-EPROBE_DEFER);
+@@ -777,6 +781,8 @@ static int mtk_infrasys_init(struct platform_device *pdev)
+ 
+ 	if (!infra_clk_data) {
+ 		infra_clk_data = mtk_alloc_clk_data(CLK_INFRA_NR);
++		if (!infra_clk_data)
++			return -ENOMEM;
+ 	} else {
+ 		for (i = 0; i < CLK_INFRA_NR; i++) {
+ 			if (infra_clk_data->hws[i] == ERR_PTR(-EPROBE_DEFER))
+@@ -894,6 +900,8 @@ static int mtk_pericfg_init(struct platform_device *pdev)
+ 		return PTR_ERR(base);
+ 
+ 	clk_data = mtk_alloc_clk_data(CLK_PERI_NR);
++	if (!clk_data)
++		return -ENOMEM;
+ 
+ 	mtk_clk_register_gates(&pdev->dev, node, peri_clks,
+ 			       ARRAY_SIZE(peri_clks), clk_data);
+diff --git a/drivers/clk/mediatek/clk-mt6765.c b/drivers/clk/mediatek/clk-mt6765.c
+index 0377e6dd3206a..780f98408c9b7 100644
+--- a/drivers/clk/mediatek/clk-mt6765.c
++++ b/drivers/clk/mediatek/clk-mt6765.c
+@@ -740,6 +740,8 @@ static int clk_mt6765_apmixed_probe(struct platform_device *pdev)
+ 	}
+ 
+ 	clk_data = mtk_alloc_clk_data(CLK_APMIXED_NR_CLK);
++	if (!clk_data)
++		return -ENOMEM;
+ 
+ 	mtk_clk_register_plls(node, plls, ARRAY_SIZE(plls), clk_data);
+ 
+@@ -775,6 +777,8 @@ static int clk_mt6765_top_probe(struct platform_device *pdev)
+ 	}
+ 
+ 	clk_data = mtk_alloc_clk_data(CLK_TOP_NR_CLK);
++	if (!clk_data)
++		return -ENOMEM;
+ 
+ 	mtk_clk_register_fixed_clks(fixed_clks, ARRAY_SIZE(fixed_clks),
+ 				    clk_data);
+@@ -816,6 +820,8 @@ static int clk_mt6765_ifr_probe(struct platform_device *pdev)
+ 	}
+ 
+ 	clk_data = mtk_alloc_clk_data(CLK_IFR_NR_CLK);
++	if (!clk_data)
++		return -ENOMEM;
+ 
+ 	mtk_clk_register_gates(&pdev->dev, node, ifr_clks,
+ 			       ARRAY_SIZE(ifr_clks), clk_data);
+diff --git a/drivers/clk/mediatek/clk-mt6779.c b/drivers/clk/mediatek/clk-mt6779.c
+index f33fbaee14048..fd14da075604b 100644
+--- a/drivers/clk/mediatek/clk-mt6779.c
++++ b/drivers/clk/mediatek/clk-mt6779.c
+@@ -1219,6 +1219,8 @@ static int clk_mt6779_apmixed_probe(struct platform_device *pdev)
+ 	struct device_node *node = pdev->dev.of_node;
+ 
+ 	clk_data = mtk_alloc_clk_data(CLK_APMIXED_NR_CLK);
++	if (!clk_data)
++		return -ENOMEM;
+ 
+ 	mtk_clk_register_plls(node, plls, ARRAY_SIZE(plls), clk_data);
+ 
+@@ -1239,6 +1241,8 @@ static int clk_mt6779_top_probe(struct platform_device *pdev)
+ 		return PTR_ERR(base);
+ 
+ 	clk_data = mtk_alloc_clk_data(CLK_TOP_NR_CLK);
++	if (!clk_data)
++		return -ENOMEM;
+ 
+ 	mtk_clk_register_fixed_clks(top_fixed_clks, ARRAY_SIZE(top_fixed_clks),
+ 				    clk_data);
+diff --git a/drivers/clk/mediatek/clk-mt6797.c b/drivers/clk/mediatek/clk-mt6797.c
+index 4c87c0348e5f4..d8303d5efc7fe 100644
+--- a/drivers/clk/mediatek/clk-mt6797.c
++++ b/drivers/clk/mediatek/clk-mt6797.c
+@@ -392,6 +392,8 @@ static int mtk_topckgen_init(struct platform_device *pdev)
+ 		return PTR_ERR(base);
+ 
+ 	clk_data = mtk_alloc_clk_data(CLK_TOP_NR);
++	if (!clk_data)
++		return -ENOMEM;
+ 
+ 	mtk_clk_register_factors(top_fixed_divs, ARRAY_SIZE(top_fixed_divs),
+ 				 clk_data);
+@@ -547,6 +549,8 @@ static void mtk_infrasys_init_early(struct device_node *node)
+ 
+ 	if (!infra_clk_data) {
+ 		infra_clk_data = mtk_alloc_clk_data(CLK_INFRA_NR);
++		if (!infra_clk_data)
++			return;
+ 
+ 		for (i = 0; i < CLK_INFRA_NR; i++)
+ 			infra_clk_data->hws[i] = ERR_PTR(-EPROBE_DEFER);
+@@ -572,6 +576,8 @@ static int mtk_infrasys_init(struct platform_device *pdev)
+ 
+ 	if (!infra_clk_data) {
+ 		infra_clk_data = mtk_alloc_clk_data(CLK_INFRA_NR);
++		if (!infra_clk_data)
++			return -ENOMEM;
+ 	} else {
+ 		for (i = 0; i < CLK_INFRA_NR; i++) {
+ 			if (infra_clk_data->hws[i] == ERR_PTR(-EPROBE_DEFER))
+diff --git a/drivers/clk/mediatek/clk-mt7629-eth.c b/drivers/clk/mediatek/clk-mt7629-eth.c
+index 1e1c77cc14ba5..ad0a3854c2a0b 100644
+--- a/drivers/clk/mediatek/clk-mt7629-eth.c
++++ b/drivers/clk/mediatek/clk-mt7629-eth.c
+@@ -79,6 +79,8 @@ static int clk_mt7629_ethsys_init(struct platform_device *pdev)
+ 	int r;
+ 
+ 	clk_data = mtk_alloc_clk_data(CLK_ETH_NR_CLK);
++	if (!clk_data)
++		return -ENOMEM;
+ 
+ 	mtk_clk_register_gates(&pdev->dev, node, eth_clks,
+ 			       CLK_ETH_NR_CLK, clk_data);
+@@ -102,6 +104,8 @@ static int clk_mt7629_sgmiisys_init(struct platform_device *pdev)
+ 	int r;
+ 
+ 	clk_data = mtk_alloc_clk_data(CLK_SGMII_NR_CLK);
++	if (!clk_data)
++		return -ENOMEM;
+ 
+ 	mtk_clk_register_gates(&pdev->dev, node, sgmii_clks[id++],
+ 			       CLK_SGMII_NR_CLK, clk_data);
+diff --git a/drivers/clk/mediatek/clk-mt7629.c b/drivers/clk/mediatek/clk-mt7629.c
+index 0893fbbb68cc1..f54127292e3c8 100644
+--- a/drivers/clk/mediatek/clk-mt7629.c
++++ b/drivers/clk/mediatek/clk-mt7629.c
+@@ -557,6 +557,8 @@ static int mtk_topckgen_init(struct platform_device *pdev)
+ 		return PTR_ERR(base);
+ 
+ 	clk_data = mtk_alloc_clk_data(CLK_TOP_NR_CLK);
++	if (!clk_data)
++		return -ENOMEM;
+ 
+ 	mtk_clk_register_fixed_clks(top_fixed_clks, ARRAY_SIZE(top_fixed_clks),
+ 				    clk_data);
+@@ -581,6 +583,8 @@ static int mtk_infrasys_init(struct platform_device *pdev)
+ 	struct clk_hw_onecell_data *clk_data;
+ 
+ 	clk_data = mtk_alloc_clk_data(CLK_INFRA_NR_CLK);
++	if (!clk_data)
++		return -ENOMEM;
+ 
+ 	mtk_clk_register_gates(&pdev->dev, node, infra_clks,
+ 			       ARRAY_SIZE(infra_clks), clk_data);
+@@ -604,6 +608,8 @@ static int mtk_pericfg_init(struct platform_device *pdev)
+ 		return PTR_ERR(base);
+ 
+ 	clk_data = mtk_alloc_clk_data(CLK_PERI_NR_CLK);
++	if (!clk_data)
++		return -ENOMEM;
+ 
+ 	mtk_clk_register_gates(&pdev->dev, node, peri_clks,
+ 			       ARRAY_SIZE(peri_clks), clk_data);
+diff --git a/drivers/clk/mediatek/clk-pll.c b/drivers/clk/mediatek/clk-pll.c
+index a4eca5fd539c8..513ab6b1b3229 100644
+--- a/drivers/clk/mediatek/clk-pll.c
++++ b/drivers/clk/mediatek/clk-pll.c
+@@ -321,10 +321,8 @@ struct clk_hw *mtk_clk_register_pll_ops(struct mtk_clk_pll *pll,
+ 
+ 	ret = clk_hw_register(NULL, &pll->hw);
+ 
+-	if (ret) {
+-		kfree(pll);
++	if (ret)
+ 		return ERR_PTR(ret);
+-	}
+ 
+ 	return &pll->hw;
+ }
+@@ -340,6 +338,8 @@ struct clk_hw *mtk_clk_register_pll(const struct mtk_pll_data *data,
+ 		return ERR_PTR(-ENOMEM);
+ 
+ 	hw = mtk_clk_register_pll_ops(pll, data, base, &mtk_pll_ops);
++	if (IS_ERR(hw))
++		kfree(pll);
+ 
+ 	return hw;
+ }
+diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig
+index 92ef5314b59ce..9151f7b11833f 100644
+--- a/drivers/clk/qcom/Kconfig
++++ b/drivers/clk/qcom/Kconfig
+@@ -131,6 +131,7 @@ config IPQ_APSS_6018
+ 	tristate "IPQ APSS Clock Controller"
+ 	select IPQ_APSS_PLL
+ 	depends on QCOM_APCS_IPC || COMPILE_TEST
++	depends on QCOM_SMEM
+ 	help
+ 	  Support for APSS clock controller on IPQ platforms. The
+ 	  APSS clock controller manages the Mux and enable block that feeds the
+diff --git a/drivers/clk/qcom/apss-ipq-pll.c b/drivers/clk/qcom/apss-ipq-pll.c
+index ce28d882ee785..6ac7e3bfe79a5 100644
+--- a/drivers/clk/qcom/apss-ipq-pll.c
++++ b/drivers/clk/qcom/apss-ipq-pll.c
+@@ -68,13 +68,13 @@ static struct clk_alpha_pll ipq_pll_stromer_plus = {
+ 				.fw_name = "xo",
+ 			},
+ 			.num_parents = 1,
+-			.ops = &clk_alpha_pll_stromer_ops,
++			.ops = &clk_alpha_pll_stromer_plus_ops,
+ 		},
+ 	},
+ };
+ 
+ static const struct alpha_pll_config ipq5332_pll_config = {
+-	.l = 0x3e,
++	.l = 0x2d,
+ 	.config_ctl_val = 0x4001075b,
+ 	.config_ctl_hi_val = 0x304,
+ 	.main_output_mask = BIT(0),
+diff --git a/drivers/clk/qcom/clk-alpha-pll.c b/drivers/clk/qcom/clk-alpha-pll.c
+index e4ef645f65d1f..892f2efc1c32c 100644
+--- a/drivers/clk/qcom/clk-alpha-pll.c
++++ b/drivers/clk/qcom/clk-alpha-pll.c
+@@ -2479,3 +2479,66 @@ const struct clk_ops clk_alpha_pll_stromer_ops = {
+ 	.set_rate = clk_alpha_pll_stromer_set_rate,
+ };
+ EXPORT_SYMBOL_GPL(clk_alpha_pll_stromer_ops);
++
++static int clk_alpha_pll_stromer_plus_set_rate(struct clk_hw *hw,
++					       unsigned long rate,
++					       unsigned long prate)
++{
++	struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
++	u32 l, alpha_width = pll_alpha_width(pll);
++	int ret, pll_mode;
++	u64 a;
++
++	rate = alpha_pll_round_rate(rate, prate, &l, &a, alpha_width);
++
++	ret = regmap_read(pll->clkr.regmap, PLL_MODE(pll), &pll_mode);
++	if (ret)
++		return ret;
++
++	regmap_write(pll->clkr.regmap, PLL_MODE(pll), 0);
++
++	/* Delay of 2 output clock ticks required until output is disabled */
++	udelay(1);
++
++	regmap_write(pll->clkr.regmap, PLL_L_VAL(pll), l);
++
++	if (alpha_width > ALPHA_BITWIDTH)
++		a <<= alpha_width - ALPHA_BITWIDTH;
++
++	regmap_write(pll->clkr.regmap, PLL_ALPHA_VAL(pll), a);
++	regmap_write(pll->clkr.regmap, PLL_ALPHA_VAL_U(pll),
++					a >> ALPHA_BITWIDTH);
++
++	regmap_write(pll->clkr.regmap, PLL_MODE(pll), PLL_BYPASSNL);
++
++	/* Wait five micro seconds or more */
++	udelay(5);
++	regmap_update_bits(pll->clkr.regmap, PLL_MODE(pll), PLL_RESET_N,
++			   PLL_RESET_N);
++
++	/* The lock time should be less than 50 micro seconds worst case */
++	usleep_range(50, 60);
++
++	ret = wait_for_pll_enable_lock(pll);
++	if (ret) {
++		pr_err("Wait for PLL enable lock failed [%s] %d\n",
++		       clk_hw_get_name(hw), ret);
++		return ret;
++	}
++
++	if (pll_mode & PLL_OUTCTRL)
++		regmap_update_bits(pll->clkr.regmap, PLL_MODE(pll), PLL_OUTCTRL,
++				   PLL_OUTCTRL);
++
++	return 0;
++}
++
++const struct clk_ops clk_alpha_pll_stromer_plus_ops = {
++	.prepare = clk_alpha_pll_enable,
++	.unprepare = clk_alpha_pll_disable,
++	.is_enabled = clk_alpha_pll_is_enabled,
++	.recalc_rate = clk_alpha_pll_recalc_rate,
++	.determine_rate = clk_alpha_pll_stromer_determine_rate,
++	.set_rate = clk_alpha_pll_stromer_plus_set_rate,
++};
++EXPORT_SYMBOL_GPL(clk_alpha_pll_stromer_plus_ops);
+diff --git a/drivers/clk/qcom/clk-alpha-pll.h b/drivers/clk/qcom/clk-alpha-pll.h
+index e4bd863027ab6..903fbab9b58e9 100644
+--- a/drivers/clk/qcom/clk-alpha-pll.h
++++ b/drivers/clk/qcom/clk-alpha-pll.h
+@@ -152,6 +152,7 @@ extern const struct clk_ops clk_alpha_pll_postdiv_ops;
+ extern const struct clk_ops clk_alpha_pll_huayra_ops;
+ extern const struct clk_ops clk_alpha_pll_postdiv_ro_ops;
+ extern const struct clk_ops clk_alpha_pll_stromer_ops;
++extern const struct clk_ops clk_alpha_pll_stromer_plus_ops;
+ 
+ extern const struct clk_ops clk_alpha_pll_fabia_ops;
+ extern const struct clk_ops clk_alpha_pll_fixed_fabia_ops;
+diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c
+index e22baf3a7112a..5183c74b074f8 100644
+--- a/drivers/clk/qcom/clk-rcg2.c
++++ b/drivers/clk/qcom/clk-rcg2.c
+@@ -158,17 +158,11 @@ static int clk_rcg2_set_parent(struct clk_hw *hw, u8 index)
+ static unsigned long
+ calc_rate(unsigned long rate, u32 m, u32 n, u32 mode, u32 hid_div)
+ {
+-	if (hid_div) {
+-		rate *= 2;
+-		rate /= hid_div + 1;
+-	}
++	if (hid_div)
++		rate = mult_frac(rate, 2, hid_div + 1);
+ 
+-	if (mode) {
+-		u64 tmp = rate;
+-		tmp *= m;
+-		do_div(tmp, n);
+-		rate = tmp;
+-	}
++	if (mode)
++		rate = mult_frac(rate, m, n);
+ 
+ 	return rate;
+ }
+diff --git a/drivers/clk/qcom/gcc-ipq5332.c b/drivers/clk/qcom/gcc-ipq5332.c
+index a75ab88ed14c6..c975fb6719d45 100644
+--- a/drivers/clk/qcom/gcc-ipq5332.c
++++ b/drivers/clk/qcom/gcc-ipq5332.c
+@@ -70,7 +70,6 @@ static struct clk_fixed_factor gpll0_div2 = {
+ 				&gpll0_main.clkr.hw },
+ 		.num_parents = 1,
+ 		.ops = &clk_fixed_factor_ops,
+-		.flags = CLK_SET_RATE_PARENT,
+ 	},
+ };
+ 
+@@ -84,7 +83,6 @@ static struct clk_alpha_pll_postdiv gpll0 = {
+ 				&gpll0_main.clkr.hw },
+ 		.num_parents = 1,
+ 		.ops = &clk_alpha_pll_postdiv_ro_ops,
+-		.flags = CLK_SET_RATE_PARENT,
+ 	},
+ };
+ 
+@@ -113,7 +111,6 @@ static struct clk_alpha_pll_postdiv gpll2 = {
+ 				&gpll2_main.clkr.hw },
+ 		.num_parents = 1,
+ 		.ops = &clk_alpha_pll_postdiv_ro_ops,
+-		.flags = CLK_SET_RATE_PARENT,
+ 	},
+ };
+ 
+@@ -153,7 +150,6 @@ static struct clk_alpha_pll_postdiv gpll4 = {
+ 				&gpll4_main.clkr.hw },
+ 		.num_parents = 1,
+ 		.ops = &clk_alpha_pll_postdiv_ro_ops,
+-		.flags = CLK_SET_RATE_PARENT,
+ 	},
+ };
+ 
+diff --git a/drivers/clk/qcom/gcc-ipq9574.c b/drivers/clk/qcom/gcc-ipq9574.c
+index 6914f962c8936..272080448e60b 100644
+--- a/drivers/clk/qcom/gcc-ipq9574.c
++++ b/drivers/clk/qcom/gcc-ipq9574.c
+@@ -87,7 +87,6 @@ static struct clk_fixed_factor gpll0_out_main_div2 = {
+ 			&gpll0_main.clkr.hw
+ 		},
+ 		.num_parents = 1,
+-		.flags = CLK_SET_RATE_PARENT,
+ 		.ops = &clk_fixed_factor_ops,
+ 	},
+ };
+@@ -102,7 +101,6 @@ static struct clk_alpha_pll_postdiv gpll0 = {
+ 			&gpll0_main.clkr.hw
+ 		},
+ 		.num_parents = 1,
+-		.flags = CLK_SET_RATE_PARENT,
+ 		.ops = &clk_alpha_pll_postdiv_ro_ops,
+ 	},
+ };
+@@ -132,7 +130,6 @@ static struct clk_alpha_pll_postdiv gpll4 = {
+ 			&gpll4_main.clkr.hw
+ 		},
+ 		.num_parents = 1,
+-		.flags = CLK_SET_RATE_PARENT,
+ 		.ops = &clk_alpha_pll_postdiv_ro_ops,
+ 	},
+ };
+@@ -162,7 +159,6 @@ static struct clk_alpha_pll_postdiv gpll2 = {
+ 			&gpll2_main.clkr.hw
+ 		},
+ 		.num_parents = 1,
+-		.flags = CLK_SET_RATE_PARENT,
+ 		.ops = &clk_alpha_pll_postdiv_ro_ops,
+ 	},
+ };
+diff --git a/drivers/clk/qcom/gcc-msm8996.c b/drivers/clk/qcom/gcc-msm8996.c
+index 5e44d1bcca9e2..48345ae7c2466 100644
+--- a/drivers/clk/qcom/gcc-msm8996.c
++++ b/drivers/clk/qcom/gcc-msm8996.c
+@@ -245,71 +245,6 @@ static const struct clk_parent_data gcc_xo_gpll0_gpll4_gpll0_early_div[] = {
+ 	{ .hw = &gpll0_early_div.hw }
+ };
+ 
+-static const struct freq_tbl ftbl_system_noc_clk_src[] = {
+-	F(19200000, P_XO, 1, 0, 0),
+-	F(50000000, P_GPLL0_EARLY_DIV, 6, 0, 0),
+-	F(100000000, P_GPLL0, 6, 0, 0),
+-	F(150000000, P_GPLL0, 4, 0, 0),
+-	F(200000000, P_GPLL0, 3, 0, 0),
+-	F(240000000, P_GPLL0, 2.5, 0, 0),
+-	{ }
+-};
+-
+-static struct clk_rcg2 system_noc_clk_src = {
+-	.cmd_rcgr = 0x0401c,
+-	.hid_width = 5,
+-	.parent_map = gcc_xo_gpll0_gpll0_early_div_map,
+-	.freq_tbl = ftbl_system_noc_clk_src,
+-	.clkr.hw.init = &(struct clk_init_data){
+-		.name = "system_noc_clk_src",
+-		.parent_data = gcc_xo_gpll0_gpll0_early_div,
+-		.num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll0_early_div),
+-		.ops = &clk_rcg2_ops,
+-	},
+-};
+-
+-static const struct freq_tbl ftbl_config_noc_clk_src[] = {
+-	F(19200000, P_XO, 1, 0, 0),
+-	F(37500000, P_GPLL0, 16, 0, 0),
+-	F(75000000, P_GPLL0, 8, 0, 0),
+-	{ }
+-};
+-
+-static struct clk_rcg2 config_noc_clk_src = {
+-	.cmd_rcgr = 0x0500c,
+-	.hid_width = 5,
+-	.parent_map = gcc_xo_gpll0_map,
+-	.freq_tbl = ftbl_config_noc_clk_src,
+-	.clkr.hw.init = &(struct clk_init_data){
+-		.name = "config_noc_clk_src",
+-		.parent_data = gcc_xo_gpll0,
+-		.num_parents = ARRAY_SIZE(gcc_xo_gpll0),
+-		.ops = &clk_rcg2_ops,
+-	},
+-};
+-
+-static const struct freq_tbl ftbl_periph_noc_clk_src[] = {
+-	F(19200000, P_XO, 1, 0, 0),
+-	F(37500000, P_GPLL0, 16, 0, 0),
+-	F(50000000, P_GPLL0, 12, 0, 0),
+-	F(75000000, P_GPLL0, 8, 0, 0),
+-	F(100000000, P_GPLL0, 6, 0, 0),
+-	{ }
+-};
+-
+-static struct clk_rcg2 periph_noc_clk_src = {
+-	.cmd_rcgr = 0x06014,
+-	.hid_width = 5,
+-	.parent_map = gcc_xo_gpll0_map,
+-	.freq_tbl = ftbl_periph_noc_clk_src,
+-	.clkr.hw.init = &(struct clk_init_data){
+-		.name = "periph_noc_clk_src",
+-		.parent_data = gcc_xo_gpll0,
+-		.num_parents = ARRAY_SIZE(gcc_xo_gpll0),
+-		.ops = &clk_rcg2_ops,
+-	},
+-};
+-
+ static const struct freq_tbl ftbl_usb30_master_clk_src[] = {
+ 	F(19200000, P_XO, 1, 0, 0),
+ 	F(120000000, P_GPLL0, 5, 0, 0),
+@@ -1298,11 +1233,7 @@ static struct clk_branch gcc_mmss_noc_cfg_ahb_clk = {
+ 		.enable_mask = BIT(0),
+ 		.hw.init = &(struct clk_init_data){
+ 			.name = "gcc_mmss_noc_cfg_ahb_clk",
+-			.parent_hws = (const struct clk_hw*[]){
+-				&config_noc_clk_src.clkr.hw,
+-			},
+-			.num_parents = 1,
+-			.flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
++			.flags = CLK_IGNORE_UNUSED,
+ 			.ops = &clk_branch2_ops,
+ 		},
+ 	},
+@@ -1465,11 +1396,6 @@ static struct clk_branch gcc_usb_phy_cfg_ahb2phy_clk = {
+ 		.enable_mask = BIT(0),
+ 		.hw.init = &(struct clk_init_data){
+ 			.name = "gcc_usb_phy_cfg_ahb2phy_clk",
+-			.parent_hws = (const struct clk_hw*[]){
+-				&periph_noc_clk_src.clkr.hw,
+-			},
+-			.num_parents = 1,
+-			.flags = CLK_SET_RATE_PARENT,
+ 			.ops = &clk_branch2_ops,
+ 		},
+ 	},
+@@ -1499,11 +1425,6 @@ static struct clk_branch gcc_sdcc1_ahb_clk = {
+ 		.enable_mask = BIT(0),
+ 		.hw.init = &(struct clk_init_data){
+ 			.name = "gcc_sdcc1_ahb_clk",
+-			.parent_hws = (const struct clk_hw*[]){
+-				&periph_noc_clk_src.clkr.hw,
+-			},
+-			.num_parents = 1,
+-			.flags = CLK_SET_RATE_PARENT,
+ 			.ops = &clk_branch2_ops,
+ 		},
+ 	},
+@@ -1550,11 +1471,6 @@ static struct clk_branch gcc_sdcc2_ahb_clk = {
+ 		.enable_mask = BIT(0),
+ 		.hw.init = &(struct clk_init_data){
+ 			.name = "gcc_sdcc2_ahb_clk",
+-			.parent_hws = (const struct clk_hw*[]){
+-				&periph_noc_clk_src.clkr.hw,
+-			},
+-			.num_parents = 1,
+-			.flags = CLK_SET_RATE_PARENT,
+ 			.ops = &clk_branch2_ops,
+ 		},
+ 	},
+@@ -1584,11 +1500,6 @@ static struct clk_branch gcc_sdcc3_ahb_clk = {
+ 		.enable_mask = BIT(0),
+ 		.hw.init = &(struct clk_init_data){
+ 			.name = "gcc_sdcc3_ahb_clk",
+-			.parent_hws = (const struct clk_hw*[]){
+-				&periph_noc_clk_src.clkr.hw,
+-			},
+-			.num_parents = 1,
+-			.flags = CLK_SET_RATE_PARENT,
+ 			.ops = &clk_branch2_ops,
+ 		},
+ 	},
+@@ -1618,11 +1529,6 @@ static struct clk_branch gcc_sdcc4_ahb_clk = {
+ 		.enable_mask = BIT(0),
+ 		.hw.init = &(struct clk_init_data){
+ 			.name = "gcc_sdcc4_ahb_clk",
+-			.parent_hws = (const struct clk_hw*[]){
+-				&periph_noc_clk_src.clkr.hw,
+-			},
+-			.num_parents = 1,
+-			.flags = CLK_SET_RATE_PARENT,
+ 			.ops = &clk_branch2_ops,
+ 		},
+ 	},
+@@ -1636,11 +1542,6 @@ static struct clk_branch gcc_blsp1_ahb_clk = {
+ 		.enable_mask = BIT(17),
+ 		.hw.init = &(struct clk_init_data){
+ 			.name = "gcc_blsp1_ahb_clk",
+-			.parent_hws = (const struct clk_hw*[]){
+-				&periph_noc_clk_src.clkr.hw,
+-			},
+-			.num_parents = 1,
+-			.flags = CLK_SET_RATE_PARENT,
+ 			.ops = &clk_branch2_ops,
+ 		},
+ 	},
+@@ -1978,11 +1879,6 @@ static struct clk_branch gcc_blsp2_ahb_clk = {
+ 		.enable_mask = BIT(15),
+ 		.hw.init = &(struct clk_init_data){
+ 			.name = "gcc_blsp2_ahb_clk",
+-			.parent_hws = (const struct clk_hw*[]){
+-				&periph_noc_clk_src.clkr.hw,
+-			},
+-			.num_parents = 1,
+-			.flags = CLK_SET_RATE_PARENT,
+ 			.ops = &clk_branch2_ops,
+ 		},
+ 	},
+@@ -2319,11 +2215,6 @@ static struct clk_branch gcc_pdm_ahb_clk = {
+ 		.enable_mask = BIT(0),
+ 		.hw.init = &(struct clk_init_data){
+ 			.name = "gcc_pdm_ahb_clk",
+-			.parent_hws = (const struct clk_hw*[]){
+-				&periph_noc_clk_src.clkr.hw,
+-			},
+-			.num_parents = 1,
+-			.flags = CLK_SET_RATE_PARENT,
+ 			.ops = &clk_branch2_ops,
+ 		},
+ 	},
+@@ -2354,11 +2245,6 @@ static struct clk_branch gcc_prng_ahb_clk = {
+ 		.enable_mask = BIT(13),
+ 		.hw.init = &(struct clk_init_data){
+ 			.name = "gcc_prng_ahb_clk",
+-			.parent_hws = (const struct clk_hw*[]){
+-				&config_noc_clk_src.clkr.hw,
+-			},
+-			.num_parents = 1,
+-			.flags = CLK_SET_RATE_PARENT,
+ 			.ops = &clk_branch2_ops,
+ 		},
+ 	},
+@@ -2371,11 +2257,6 @@ static struct clk_branch gcc_tsif_ahb_clk = {
+ 		.enable_mask = BIT(0),
+ 		.hw.init = &(struct clk_init_data){
+ 			.name = "gcc_tsif_ahb_clk",
+-			.parent_hws = (const struct clk_hw*[]){
+-				&periph_noc_clk_src.clkr.hw,
+-			},
+-			.num_parents = 1,
+-			.flags = CLK_SET_RATE_PARENT,
+ 			.ops = &clk_branch2_ops,
+ 		},
+ 	},
+@@ -2423,11 +2304,6 @@ static struct clk_branch gcc_boot_rom_ahb_clk = {
+ 		.enable_mask = BIT(10),
+ 		.hw.init = &(struct clk_init_data){
+ 			.name = "gcc_boot_rom_ahb_clk",
+-			.parent_hws = (const struct clk_hw*[]){
+-				&config_noc_clk_src.clkr.hw,
+-			},
+-			.num_parents = 1,
+-			.flags = CLK_SET_RATE_PARENT,
+ 			.ops = &clk_branch2_ops,
+ 		},
+ 	},
+@@ -2521,11 +2397,6 @@ static struct clk_branch gcc_pcie_0_slv_axi_clk = {
+ 		.enable_mask = BIT(0),
+ 		.hw.init = &(struct clk_init_data){
+ 			.name = "gcc_pcie_0_slv_axi_clk",
+-			.parent_hws = (const struct clk_hw*[]){
+-				&system_noc_clk_src.clkr.hw,
+-			},
+-			.num_parents = 1,
+-			.flags = CLK_SET_RATE_PARENT,
+ 			.ops = &clk_branch2_ops,
+ 		},
+ 	},
+@@ -2538,11 +2409,6 @@ static struct clk_branch gcc_pcie_0_mstr_axi_clk = {
+ 		.enable_mask = BIT(0),
+ 		.hw.init = &(struct clk_init_data){
+ 			.name = "gcc_pcie_0_mstr_axi_clk",
+-			.parent_hws = (const struct clk_hw*[]){
+-				&system_noc_clk_src.clkr.hw,
+-			},
+-			.num_parents = 1,
+-			.flags = CLK_SET_RATE_PARENT,
+ 			.ops = &clk_branch2_ops,
+ 		},
+ 	},
+@@ -2555,11 +2421,6 @@ static struct clk_branch gcc_pcie_0_cfg_ahb_clk = {
+ 		.enable_mask = BIT(0),
+ 		.hw.init = &(struct clk_init_data){
+ 			.name = "gcc_pcie_0_cfg_ahb_clk",
+-			.parent_hws = (const struct clk_hw*[]){
+-				&config_noc_clk_src.clkr.hw,
+-			},
+-			.num_parents = 1,
+-			.flags = CLK_SET_RATE_PARENT,
+ 			.ops = &clk_branch2_ops,
+ 		},
+ 	},
+@@ -2607,11 +2468,6 @@ static struct clk_branch gcc_pcie_1_slv_axi_clk = {
+ 		.enable_mask = BIT(0),
+ 		.hw.init = &(struct clk_init_data){
+ 			.name = "gcc_pcie_1_slv_axi_clk",
+-			.parent_hws = (const struct clk_hw*[]){
+-				&system_noc_clk_src.clkr.hw,
+-			},
+-			.num_parents = 1,
+-			.flags = CLK_SET_RATE_PARENT,
+ 			.ops = &clk_branch2_ops,
+ 		},
+ 	},
+@@ -2624,11 +2480,6 @@ static struct clk_branch gcc_pcie_1_mstr_axi_clk = {
+ 		.enable_mask = BIT(0),
+ 		.hw.init = &(struct clk_init_data){
+ 			.name = "gcc_pcie_1_mstr_axi_clk",
+-			.parent_hws = (const struct clk_hw*[]){
+-				&system_noc_clk_src.clkr.hw,
+-			},
+-			.num_parents = 1,
+-			.flags = CLK_SET_RATE_PARENT,
+ 			.ops = &clk_branch2_ops,
+ 		},
+ 	},
+@@ -2641,11 +2492,6 @@ static struct clk_branch gcc_pcie_1_cfg_ahb_clk = {
+ 		.enable_mask = BIT(0),
+ 		.hw.init = &(struct clk_init_data){
+ 			.name = "gcc_pcie_1_cfg_ahb_clk",
+-			.parent_hws = (const struct clk_hw*[]){
+-				&config_noc_clk_src.clkr.hw,
+-			},
+-			.num_parents = 1,
+-			.flags = CLK_SET_RATE_PARENT,
+ 			.ops = &clk_branch2_ops,
+ 		},
+ 	},
+@@ -2693,11 +2539,6 @@ static struct clk_branch gcc_pcie_2_slv_axi_clk = {
+ 		.enable_mask = BIT(0),
+ 		.hw.init = &(struct clk_init_data){
+ 			.name = "gcc_pcie_2_slv_axi_clk",
+-			.parent_hws = (const struct clk_hw*[]){
+-				&system_noc_clk_src.clkr.hw,
+-			},
+-			.num_parents = 1,
+-			.flags = CLK_SET_RATE_PARENT,
+ 			.ops = &clk_branch2_ops,
+ 		},
+ 	},
+@@ -2710,11 +2551,6 @@ static struct clk_branch gcc_pcie_2_mstr_axi_clk = {
+ 		.enable_mask = BIT(0),
+ 		.hw.init = &(struct clk_init_data){
+ 			.name = "gcc_pcie_2_mstr_axi_clk",
+-			.parent_hws = (const struct clk_hw*[]){
+-				&system_noc_clk_src.clkr.hw,
+-			},
+-			.num_parents = 1,
+-			.flags = CLK_SET_RATE_PARENT,
+ 			.ops = &clk_branch2_ops,
+ 		},
+ 	},
+@@ -2727,11 +2563,6 @@ static struct clk_branch gcc_pcie_2_cfg_ahb_clk = {
+ 		.enable_mask = BIT(0),
+ 		.hw.init = &(struct clk_init_data){
+ 			.name = "gcc_pcie_2_cfg_ahb_clk",
+-			.parent_hws = (const struct clk_hw*[]){
+-				&config_noc_clk_src.clkr.hw,
+-			},
+-			.num_parents = 1,
+-			.flags = CLK_SET_RATE_PARENT,
+ 			.ops = &clk_branch2_ops,
+ 		},
+ 	},
+@@ -2779,11 +2610,6 @@ static struct clk_branch gcc_pcie_phy_cfg_ahb_clk = {
+ 		.enable_mask = BIT(0),
+ 		.hw.init = &(struct clk_init_data){
+ 			.name = "gcc_pcie_phy_cfg_ahb_clk",
+-			.parent_hws = (const struct clk_hw*[]){
+-				&config_noc_clk_src.clkr.hw,
+-			},
+-			.num_parents = 1,
+-			.flags = CLK_SET_RATE_PARENT,
+ 			.ops = &clk_branch2_ops,
+ 		},
+ 	},
+@@ -2830,11 +2656,6 @@ static struct clk_branch gcc_ufs_ahb_clk = {
+ 		.enable_mask = BIT(0),
+ 		.hw.init = &(struct clk_init_data){
+ 			.name = "gcc_ufs_ahb_clk",
+-			.parent_hws = (const struct clk_hw*[]){
+-				&config_noc_clk_src.clkr.hw,
+-			},
+-			.num_parents = 1,
+-			.flags = CLK_SET_RATE_PARENT,
+ 			.ops = &clk_branch2_ops,
+ 		},
+ 	},
+@@ -3061,11 +2882,7 @@ static struct clk_branch gcc_aggre0_snoc_axi_clk = {
+ 		.enable_mask = BIT(0),
+ 		.hw.init = &(struct clk_init_data){
+ 			.name = "gcc_aggre0_snoc_axi_clk",
+-			.parent_hws = (const struct clk_hw*[]){
+-				&system_noc_clk_src.clkr.hw,
+-			},
+-			.num_parents = 1,
+-			.flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
++			.flags = CLK_IS_CRITICAL,
+ 			.ops = &clk_branch2_ops,
+ 		},
+ 	},
+@@ -3078,11 +2895,7 @@ static struct clk_branch gcc_aggre0_cnoc_ahb_clk = {
+ 		.enable_mask = BIT(0),
+ 		.hw.init = &(struct clk_init_data){
+ 			.name = "gcc_aggre0_cnoc_ahb_clk",
+-			.parent_hws = (const struct clk_hw*[]){
+-				&config_noc_clk_src.clkr.hw,
+-			},
+-			.num_parents = 1,
+-			.flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
++			.flags = CLK_IS_CRITICAL,
+ 			.ops = &clk_branch2_ops,
+ 		},
+ 	},
+@@ -3095,11 +2908,7 @@ static struct clk_branch gcc_smmu_aggre0_axi_clk = {
+ 		.enable_mask = BIT(0),
+ 		.hw.init = &(struct clk_init_data){
+ 			.name = "gcc_smmu_aggre0_axi_clk",
+-			.parent_hws = (const struct clk_hw*[]){
+-				&system_noc_clk_src.clkr.hw,
+-			},
+-			.num_parents = 1,
+-			.flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
++			.flags = CLK_IS_CRITICAL,
+ 			.ops = &clk_branch2_ops,
+ 		},
+ 	},
+@@ -3112,11 +2921,7 @@ static struct clk_branch gcc_smmu_aggre0_ahb_clk = {
+ 		.enable_mask = BIT(0),
+ 		.hw.init = &(struct clk_init_data){
+ 			.name = "gcc_smmu_aggre0_ahb_clk",
+-			.parent_hws = (const struct clk_hw*[]){
+-				&config_noc_clk_src.clkr.hw,
+-			},
+-			.num_parents = 1,
+-			.flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
++			.flags = CLK_IS_CRITICAL,
+ 			.ops = &clk_branch2_ops,
+ 		},
+ 	},
+@@ -3163,10 +2968,6 @@ static struct clk_branch gcc_dcc_ahb_clk = {
+ 		.enable_mask = BIT(0),
+ 		.hw.init = &(struct clk_init_data){
+ 			.name = "gcc_dcc_ahb_clk",
+-			.parent_hws = (const struct clk_hw*[]){
+-				&config_noc_clk_src.clkr.hw,
+-			},
+-			.num_parents = 1,
+ 			.ops = &clk_branch2_ops,
+ 		},
+ 	},
+@@ -3179,10 +2980,6 @@ static struct clk_branch gcc_aggre0_noc_mpu_cfg_ahb_clk = {
+ 		.enable_mask = BIT(0),
+ 		.hw.init = &(struct clk_init_data){
+ 			.name = "gcc_aggre0_noc_mpu_cfg_ahb_clk",
+-			.parent_hws = (const struct clk_hw*[]){
+-				&config_noc_clk_src.clkr.hw,
+-			},
+-			.num_parents = 1,
+ 			.ops = &clk_branch2_ops,
+ 		},
+ 	},
+@@ -3195,11 +2992,6 @@ static struct clk_branch gcc_qspi_ahb_clk = {
+ 		.enable_mask = BIT(0),
+ 		.hw.init = &(struct clk_init_data){
+ 			.name = "gcc_qspi_ahb_clk",
+-			.parent_hws = (const struct clk_hw*[]){
+-				&periph_noc_clk_src.clkr.hw,
+-			},
+-			.num_parents = 1,
+-			.flags = CLK_SET_RATE_PARENT,
+ 			.ops = &clk_branch2_ops,
+ 		},
+ 	},
+@@ -3348,10 +3140,6 @@ static struct clk_branch gcc_mss_cfg_ahb_clk = {
+ 		.enable_mask = BIT(0),
+ 		.hw.init = &(struct clk_init_data){
+ 			.name = "gcc_mss_cfg_ahb_clk",
+-			.parent_hws = (const struct clk_hw*[]){
+-				&config_noc_clk_src.clkr.hw,
+-			},
+-			.num_parents = 1,
+ 			.ops = &clk_branch2_ops,
+ 		},
+ 	},
+@@ -3364,10 +3152,6 @@ static struct clk_branch gcc_mss_mnoc_bimc_axi_clk = {
+ 		.enable_mask = BIT(0),
+ 		.hw.init = &(struct clk_init_data){
+ 			.name = "gcc_mss_mnoc_bimc_axi_clk",
+-			.parent_hws = (const struct clk_hw*[]){
+-				&system_noc_clk_src.clkr.hw,
+-			},
+-			.num_parents = 1,
+ 			.ops = &clk_branch2_ops,
+ 		},
+ 	},
+@@ -3380,10 +3164,6 @@ static struct clk_branch gcc_mss_snoc_axi_clk = {
+ 		.enable_mask = BIT(0),
+ 		.hw.init = &(struct clk_init_data){
+ 			.name = "gcc_mss_snoc_axi_clk",
+-			.parent_hws = (const struct clk_hw*[]){
+-				&system_noc_clk_src.clkr.hw,
+-			},
+-			.num_parents = 1,
+ 			.ops = &clk_branch2_ops,
+ 		},
+ 	},
+@@ -3396,10 +3176,6 @@ static struct clk_branch gcc_mss_q6_bimc_axi_clk = {
+ 		.enable_mask = BIT(0),
+ 		.hw.init = &(struct clk_init_data){
+ 			.name = "gcc_mss_q6_bimc_axi_clk",
+-			.parent_hws = (const struct clk_hw*[]){
+-				&system_noc_clk_src.clkr.hw,
+-			},
+-			.num_parents = 1,
+ 			.ops = &clk_branch2_ops,
+ 		},
+ 	},
+@@ -3496,9 +3272,6 @@ static struct clk_regmap *gcc_msm8996_clocks[] = {
+ 	[GPLL0] = &gpll0.clkr,
+ 	[GPLL4_EARLY] = &gpll4_early.clkr,
+ 	[GPLL4] = &gpll4.clkr,
+-	[SYSTEM_NOC_CLK_SRC] = &system_noc_clk_src.clkr,
+-	[CONFIG_NOC_CLK_SRC] = &config_noc_clk_src.clkr,
+-	[PERIPH_NOC_CLK_SRC] = &periph_noc_clk_src.clkr,
+ 	[USB30_MASTER_CLK_SRC] = &usb30_master_clk_src.clkr,
+ 	[USB30_MOCK_UTMI_CLK_SRC] = &usb30_mock_utmi_clk_src.clkr,
+ 	[USB3_PHY_AUX_CLK_SRC] = &usb3_phy_aux_clk_src.clkr,
+diff --git a/drivers/clk/qcom/gcc-sm8150.c b/drivers/clk/qcom/gcc-sm8150.c
+index 70b067f3618cb..889b297a875e1 100644
+--- a/drivers/clk/qcom/gcc-sm8150.c
++++ b/drivers/clk/qcom/gcc-sm8150.c
+@@ -775,7 +775,7 @@ static struct clk_rcg2 gcc_sdcc2_apps_clk_src = {
+ 		.name = "gcc_sdcc2_apps_clk_src",
+ 		.parent_data = gcc_parents_6,
+ 		.num_parents = ARRAY_SIZE(gcc_parents_6),
+-		.flags = CLK_SET_RATE_PARENT,
++		.flags = CLK_OPS_PARENT_ENABLE,
+ 		.ops = &clk_rcg2_floor_ops,
+ 	},
+ };
+diff --git a/drivers/clk/qcom/mmcc-msm8998.c b/drivers/clk/qcom/mmcc-msm8998.c
+index 4490594bde69f..7b1d105afbd8c 100644
+--- a/drivers/clk/qcom/mmcc-msm8998.c
++++ b/drivers/clk/qcom/mmcc-msm8998.c
+@@ -2453,6 +2453,7 @@ static struct clk_branch fd_ahb_clk = {
+ 
+ static struct clk_branch mnoc_ahb_clk = {
+ 	.halt_reg = 0x5024,
++	.halt_check = BRANCH_HALT_SKIP,
+ 	.clkr = {
+ 		.enable_reg = 0x5024,
+ 		.enable_mask = BIT(0),
+@@ -2468,6 +2469,7 @@ static struct clk_branch mnoc_ahb_clk = {
+ 
+ static struct clk_branch bimc_smmu_ahb_clk = {
+ 	.halt_reg = 0xe004,
++	.halt_check = BRANCH_HALT_SKIP,
+ 	.hwcg_reg = 0xe004,
+ 	.hwcg_bit = 1,
+ 	.clkr = {
+@@ -2485,6 +2487,7 @@ static struct clk_branch bimc_smmu_ahb_clk = {
+ 
+ static struct clk_branch bimc_smmu_axi_clk = {
+ 	.halt_reg = 0xe008,
++	.halt_check = BRANCH_HALT_SKIP,
+ 	.hwcg_reg = 0xe008,
+ 	.hwcg_bit = 1,
+ 	.clkr = {
+@@ -2625,11 +2628,13 @@ static struct gdsc camss_cpp_gdsc = {
+ static struct gdsc bimc_smmu_gdsc = {
+ 	.gdscr = 0xe020,
+ 	.gds_hw_ctrl = 0xe024,
++	.cxcs = (unsigned int []){ 0xe008 },
++	.cxc_count = 1,
+ 	.pd = {
+ 		.name = "bimc_smmu",
+ 	},
+ 	.pwrsts = PWRSTS_OFF_ON,
+-	.flags = HW_CTRL | ALWAYS_ON,
++	.flags = VOTABLE,
+ };
+ 
+ static struct clk_regmap *mmcc_msm8998_clocks[] = {
+diff --git a/drivers/clk/ralink/clk-mtmips.c b/drivers/clk/ralink/clk-mtmips.c
+index 1e7991439527a..50a443bf79ecd 100644
+--- a/drivers/clk/ralink/clk-mtmips.c
++++ b/drivers/clk/ralink/clk-mtmips.c
+@@ -821,6 +821,10 @@ static const struct mtmips_clk_data mt76x8_clk_data = {
+ };
+ 
+ static const struct of_device_id mtmips_of_match[] = {
++	{
++		.compatible = "ralink,rt2880-reset",
++		.data = NULL,
++	},
+ 	{
+ 		.compatible = "ralink,rt2880-sysc",
+ 		.data = &rt2880_clk_data,
+@@ -1088,25 +1092,11 @@ static int mtmips_clk_probe(struct platform_device *pdev)
+ 	return 0;
+ }
+ 
+-static const struct of_device_id mtmips_clk_of_match[] = {
+-	{ .compatible = "ralink,rt2880-reset" },
+-	{ .compatible = "ralink,rt2880-sysc" },
+-	{ .compatible = "ralink,rt3050-sysc" },
+-	{ .compatible = "ralink,rt3052-sysc" },
+-	{ .compatible = "ralink,rt3352-sysc" },
+-	{ .compatible = "ralink,rt3883-sysc" },
+-	{ .compatible = "ralink,rt5350-sysc" },
+-	{ .compatible = "ralink,mt7620-sysc" },
+-	{ .compatible = "ralink,mt7628-sysc" },
+-	{ .compatible = "ralink,mt7688-sysc" },
+-	{}
+-};
+-
+ static struct platform_driver mtmips_clk_driver = {
+ 	.probe = mtmips_clk_probe,
+ 	.driver = {
+ 		.name = "mtmips-clk",
+-		.of_match_table = mtmips_clk_of_match,
++		.of_match_table = mtmips_of_match,
+ 	},
+ };
+ 
+diff --git a/drivers/clk/renesas/rcar-cpg-lib.c b/drivers/clk/renesas/rcar-cpg-lib.c
+index e2e0447de1901..5a15f8788b922 100644
+--- a/drivers/clk/renesas/rcar-cpg-lib.c
++++ b/drivers/clk/renesas/rcar-cpg-lib.c
+@@ -70,8 +70,21 @@ void cpg_simple_notifier_register(struct raw_notifier_head *notifiers,
+ #define STPnHCK	BIT(9 - SDnSRCFC_SHIFT)
+ 
+ static const struct clk_div_table cpg_sdh_div_table[] = {
++	/*
++	 * These values are recommended by the datasheet.  Because they come
++	 * first, Linux will only use these.
++	 */
+ 	{ 0, 1 }, { 1, 2 }, { STPnHCK | 2, 4 }, { STPnHCK | 3, 8 },
+-	{ STPnHCK | 4, 16 }, { 0, 0 },
++	{ STPnHCK | 4, 16 },
++	/*
++	 * These values are not recommended because STPnHCK is wrong.  But they
++	 * have been seen because of broken firmware.  So, we support reading
++	 * them but Linux will sanitize them when initializing through
++	 * recalc_rate.
++	 */
++	{ STPnHCK | 0, 1 }, { STPnHCK | 1, 2 },  { 2, 4 }, { 3, 8 }, { 4, 16 },
++	/* Sentinel */
++	{ 0, 0 }
+ };
+ 
+ struct clk * __init cpg_sdh_clk_register(const char *name,
+diff --git a/drivers/clk/renesas/rzg2l-cpg.c b/drivers/clk/renesas/rzg2l-cpg.c
+index bc623515ad843..a3b20c898a391 100644
+--- a/drivers/clk/renesas/rzg2l-cpg.c
++++ b/drivers/clk/renesas/rzg2l-cpg.c
+@@ -11,6 +11,7 @@
+  * Copyright (C) 2015 Renesas Electronics Corp.
+  */
+ 
++#include <linux/bitfield.h>
+ #include <linux/clk.h>
+ #include <linux/clk-provider.h>
+ #include <linux/clk/renesas.h>
+@@ -39,14 +40,13 @@
+ #define WARN_DEBUG(x)	do { } while (0)
+ #endif
+ 
+-#define DIV_RSMASK(v, s, m)	((v >> s) & m)
+ #define GET_SHIFT(val)		((val >> 12) & 0xff)
+ #define GET_WIDTH(val)		((val >> 8) & 0xf)
+ 
+-#define KDIV(val)		DIV_RSMASK(val, 16, 0xffff)
+-#define MDIV(val)		DIV_RSMASK(val, 6, 0x3ff)
+-#define PDIV(val)		DIV_RSMASK(val, 0, 0x3f)
+-#define SDIV(val)		DIV_RSMASK(val, 0, 0x7)
++#define KDIV(val)		((s16)FIELD_GET(GENMASK(31, 16), val))
++#define MDIV(val)		FIELD_GET(GENMASK(15, 6), val)
++#define PDIV(val)		FIELD_GET(GENMASK(5, 0), val)
++#define SDIV(val)		FIELD_GET(GENMASK(2, 0), val)
+ 
+ #define CLK_ON_R(reg)		(reg)
+ #define CLK_MON_R(reg)		(0x180 + (reg))
+@@ -195,7 +195,9 @@ static int rzg2l_cpg_sd_clk_mux_set_parent(struct clk_hw *hw, u8 index)
+ 	u32 off = GET_REG_OFFSET(hwdata->conf);
+ 	u32 shift = GET_SHIFT(hwdata->conf);
+ 	const u32 clk_src_266 = 2;
+-	u32 bitmask;
++	u32 msk, val, bitmask;
++	unsigned long flags;
++	int ret;
+ 
+ 	/*
+ 	 * As per the HW manual, we should not directly switch from 533 MHz to
+@@ -209,26 +211,30 @@ static int rzg2l_cpg_sd_clk_mux_set_parent(struct clk_hw *hw, u8 index)
+ 	 * the index to value mapping is done by adding 1 to the index.
+ 	 */
+ 	bitmask = (GENMASK(GET_WIDTH(hwdata->conf) - 1, 0) << shift) << 16;
++	msk = off ? CPG_CLKSTATUS_SELSDHI1_STS : CPG_CLKSTATUS_SELSDHI0_STS;
++	spin_lock_irqsave(&priv->rmw_lock, flags);
+ 	if (index != clk_src_266) {
+-		u32 msk, val;
+-		int ret;
+-
+ 		writel(bitmask | ((clk_src_266 + 1) << shift), priv->base + off);
+ 
+-		msk = off ? CPG_CLKSTATUS_SELSDHI1_STS : CPG_CLKSTATUS_SELSDHI0_STS;
+-
+-		ret = readl_poll_timeout(priv->base + CPG_CLKSTATUS, val,
+-					 !(val & msk), 100,
+-					 CPG_SDHI_CLK_SWITCH_STATUS_TIMEOUT_US);
+-		if (ret) {
+-			dev_err(priv->dev, "failed to switch clk source\n");
+-			return ret;
+-		}
++		ret = readl_poll_timeout_atomic(priv->base + CPG_CLKSTATUS, val,
++						!(val & msk), 10,
++						CPG_SDHI_CLK_SWITCH_STATUS_TIMEOUT_US);
++		if (ret)
++			goto unlock;
+ 	}
+ 
+ 	writel(bitmask | ((index + 1) << shift), priv->base + off);
+ 
+-	return 0;
++	ret = readl_poll_timeout_atomic(priv->base + CPG_CLKSTATUS, val,
++					!(val & msk), 10,
++					CPG_SDHI_CLK_SWITCH_STATUS_TIMEOUT_US);
++unlock:
++	spin_unlock_irqrestore(&priv->rmw_lock, flags);
++
++	if (ret)
++		dev_err(priv->dev, "failed to switch clk source\n");
++
++	return ret;
+ }
+ 
+ static u8 rzg2l_cpg_sd_clk_mux_get_parent(struct clk_hw *hw)
+@@ -239,14 +245,8 @@ static u8 rzg2l_cpg_sd_clk_mux_get_parent(struct clk_hw *hw)
+ 
+ 	val >>= GET_SHIFT(hwdata->conf);
+ 	val &= GENMASK(GET_WIDTH(hwdata->conf) - 1, 0);
+-	if (val) {
+-		val--;
+-	} else {
+-		/* Prohibited clk source, change it to 533 MHz(reset value) */
+-		rzg2l_cpg_sd_clk_mux_set_parent(hw, 0);
+-	}
+ 
+-	return val;
++	return val ? val - 1 : 0;
+ }
+ 
+ static const struct clk_ops rzg2l_cpg_sd_clk_mux_ops = {
+@@ -702,18 +702,18 @@ static unsigned long rzg2l_cpg_pll_clk_recalc_rate(struct clk_hw *hw,
+ 	struct pll_clk *pll_clk = to_pll(hw);
+ 	struct rzg2l_cpg_priv *priv = pll_clk->priv;
+ 	unsigned int val1, val2;
+-	unsigned int mult = 1;
+-	unsigned int div = 1;
++	u64 rate;
+ 
+ 	if (pll_clk->type != CLK_TYPE_SAM_PLL)
+ 		return parent_rate;
+ 
+ 	val1 = readl(priv->base + GET_REG_SAMPLL_CLK1(pll_clk->conf));
+ 	val2 = readl(priv->base + GET_REG_SAMPLL_CLK2(pll_clk->conf));
+-	mult = MDIV(val1) + KDIV(val1) / 65536;
+-	div = PDIV(val1) << SDIV(val2);
+ 
+-	return DIV_ROUND_CLOSEST_ULL((u64)parent_rate * mult, div);
++	rate = mul_u64_u32_shr(parent_rate, (MDIV(val1) << 16) + KDIV(val1),
++			       16 + SDIV(val2));
++
++	return DIV_ROUND_CLOSEST_ULL(rate, PDIV(val1));
+ }
+ 
+ static const struct clk_ops rzg2l_cpg_pll_ops = {
+diff --git a/drivers/clk/renesas/rzg2l-cpg.h b/drivers/clk/renesas/rzg2l-cpg.h
+index 6cee9e56acc72..91e9c2569f801 100644
+--- a/drivers/clk/renesas/rzg2l-cpg.h
++++ b/drivers/clk/renesas/rzg2l-cpg.h
+@@ -43,7 +43,7 @@
+ #define CPG_CLKSTATUS_SELSDHI0_STS	BIT(28)
+ #define CPG_CLKSTATUS_SELSDHI1_STS	BIT(29)
+ 
+-#define CPG_SDHI_CLK_SWITCH_STATUS_TIMEOUT_US	20000
++#define CPG_SDHI_CLK_SWITCH_STATUS_TIMEOUT_US	200
+ 
+ /* n = 0/1/2 for PLL1/4/6 */
+ #define CPG_SAMPLL_CLK1(n)	(0x04 + (16 * n))
+diff --git a/drivers/clk/ti/divider.c b/drivers/clk/ti/divider.c
+index 768a1f3398b47..5d5bb123ba949 100644
+--- a/drivers/clk/ti/divider.c
++++ b/drivers/clk/ti/divider.c
+@@ -309,7 +309,6 @@ static struct clk *_register_divider(struct device_node *node,
+ 				     u32 flags,
+ 				     struct clk_omap_divider *div)
+ {
+-	struct clk *clk;
+ 	struct clk_init_data init;
+ 	const char *parent_name;
+ 	const char *name;
+@@ -326,12 +325,7 @@ static struct clk *_register_divider(struct device_node *node,
+ 	div->hw.init = &init;
+ 
+ 	/* register the clock */
+-	clk = of_ti_clk_register(node, &div->hw, name);
+-
+-	if (IS_ERR(clk))
+-		kfree(div);
+-
+-	return clk;
++	return of_ti_clk_register(node, &div->hw, name);
+ }
+ 
+ int ti_clk_parse_divider_data(int *div_table, int num_dividers, int max_div,
+diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
+index 7dd2c615bce23..071b04f1ee730 100644
+--- a/drivers/clocksource/arm_arch_timer.c
++++ b/drivers/clocksource/arm_arch_timer.c
+@@ -836,8 +836,9 @@ static u64 __arch_timer_check_delta(void)
+ 		 * Note that TVAL is signed, thus has only 31 of its
+ 		 * 32 bits to express magnitude.
+ 		 */
+-		MIDR_ALL_VERSIONS(MIDR_CPU_MODEL(ARM_CPU_IMP_APM,
+-						 APM_CPU_PART_POTENZA)),
++		MIDR_REV_RANGE(MIDR_CPU_MODEL(ARM_CPU_IMP_APM,
++					      APM_CPU_PART_XGENE),
++			       APM_CPU_VAR_POTENZA, 0x0, 0xf),
+ 		{},
+ 	};
+ 
+diff --git a/drivers/clocksource/timer-ti-dm.c b/drivers/clocksource/timer-ti-dm.c
+index 349236a7ba5ff..2d0bed0877e03 100644
+--- a/drivers/clocksource/timer-ti-dm.c
++++ b/drivers/clocksource/timer-ti-dm.c
+@@ -141,6 +141,8 @@ struct dmtimer {
+ 	struct platform_device *pdev;
+ 	struct list_head node;
+ 	struct notifier_block nb;
++	struct notifier_block fclk_nb;
++	unsigned long fclk_rate;
+ };
+ 
+ static u32 omap_reserved_systimers;
+@@ -254,8 +256,7 @@ static inline void __omap_dm_timer_enable_posted(struct dmtimer *timer)
+ 	timer->posted = OMAP_TIMER_POSTED;
+ }
+ 
+-static inline void __omap_dm_timer_stop(struct dmtimer *timer,
+-					unsigned long rate)
++static inline void __omap_dm_timer_stop(struct dmtimer *timer)
+ {
+ 	u32 l;
+ 
+@@ -270,7 +271,7 @@ static inline void __omap_dm_timer_stop(struct dmtimer *timer,
+ 		 * Wait for functional clock period x 3.5 to make sure that
+ 		 * timer is stopped
+ 		 */
+-		udelay(3500000 / rate + 1);
++		udelay(3500000 / timer->fclk_rate + 1);
+ #endif
+ 	}
+ 
+@@ -349,6 +350,21 @@ static int omap_timer_context_notifier(struct notifier_block *nb,
+ 	return NOTIFY_OK;
+ }
+ 
++static int omap_timer_fclk_notifier(struct notifier_block *nb,
++				    unsigned long event, void *data)
++{
++	struct clk_notifier_data *clk_data = data;
++	struct dmtimer *timer = container_of(nb, struct dmtimer, fclk_nb);
++
++	switch (event) {
++	case POST_RATE_CHANGE:
++		timer->fclk_rate = clk_data->new_rate;
++		return NOTIFY_OK;
++	default:
++		return NOTIFY_DONE;
++	}
++}
++
+ static int omap_dm_timer_reset(struct dmtimer *timer)
+ {
+ 	u32 l, timeout = 100000;
+@@ -755,7 +771,6 @@ static int omap_dm_timer_stop(struct omap_dm_timer *cookie)
+ {
+ 	struct dmtimer *timer;
+ 	struct device *dev;
+-	unsigned long rate = 0;
+ 
+ 	timer = to_dmtimer(cookie);
+ 	if (unlikely(!timer))
+@@ -763,10 +778,7 @@ static int omap_dm_timer_stop(struct omap_dm_timer *cookie)
+ 
+ 	dev = &timer->pdev->dev;
+ 
+-	if (!timer->omap1)
+-		rate = clk_get_rate(timer->fclk);
+-
+-	__omap_dm_timer_stop(timer, rate);
++	__omap_dm_timer_stop(timer);
+ 
+ 	pm_runtime_put_sync(dev);
+ 
+@@ -1125,6 +1137,14 @@ static int omap_dm_timer_probe(struct platform_device *pdev)
+ 		timer->fclk = devm_clk_get(dev, "fck");
+ 		if (IS_ERR(timer->fclk))
+ 			return PTR_ERR(timer->fclk);
++
++		timer->fclk_nb.notifier_call = omap_timer_fclk_notifier;
++		ret = devm_clk_notifier_register(dev, timer->fclk,
++						 &timer->fclk_nb);
++		if (ret)
++			return ret;
++
++		timer->fclk_rate = clk_get_rate(timer->fclk);
+ 	} else {
+ 		timer->fclk = ERR_PTR(-ENODEV);
+ 	}
+diff --git a/drivers/cpufreq/tegra194-cpufreq.c b/drivers/cpufreq/tegra194-cpufreq.c
+index 75f1e611d0aab..f7b193b195dc9 100644
+--- a/drivers/cpufreq/tegra194-cpufreq.c
++++ b/drivers/cpufreq/tegra194-cpufreq.c
+@@ -450,6 +450,8 @@ static int tegra_cpufreq_init_cpufreq_table(struct cpufreq_policy *policy,
+ 		if (IS_ERR(opp))
+ 			continue;
+ 
++		dev_pm_opp_put(opp);
++
+ 		ret = dev_pm_opp_enable(cpu_dev, pos->frequency * KHZ);
+ 		if (ret < 0)
+ 			return ret;
+diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
+index feb86013dbf63..192cadc7d85a5 100644
+--- a/drivers/crypto/caam/caamalg.c
++++ b/drivers/crypto/caam/caamalg.c
+@@ -572,7 +572,8 @@ static int chachapoly_setkey(struct crypto_aead *aead, const u8 *key,
+ 	if (keylen != CHACHA_KEY_SIZE + saltlen)
+ 		return -EINVAL;
+ 
+-	ctx->cdata.key_virt = key;
++	memcpy(ctx->key, key, keylen);
++	ctx->cdata.key_virt = ctx->key;
+ 	ctx->cdata.keylen = keylen - saltlen;
+ 
+ 	return chachapoly_set_sh_desc(aead);
+diff --git a/drivers/crypto/caam/caamalg_qi2.c b/drivers/crypto/caam/caamalg_qi2.c
+index 9156bbe038b7b..a148ff1f0872c 100644
+--- a/drivers/crypto/caam/caamalg_qi2.c
++++ b/drivers/crypto/caam/caamalg_qi2.c
+@@ -641,7 +641,8 @@ static int chachapoly_setkey(struct crypto_aead *aead, const u8 *key,
+ 	if (keylen != CHACHA_KEY_SIZE + saltlen)
+ 		return -EINVAL;
+ 
+-	ctx->cdata.key_virt = key;
++	memcpy(ctx->key, key, keylen);
++	ctx->cdata.key_virt = ctx->key;
+ 	ctx->cdata.keylen = keylen - saltlen;
+ 
+ 	return chachapoly_set_sh_desc(aead);
+diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c
+index 5d0adfb54a34b..bbf1bbe283574 100644
+--- a/drivers/crypto/hisilicon/hpre/hpre_main.c
++++ b/drivers/crypto/hisilicon/hpre/hpre_main.c
+@@ -430,8 +430,11 @@ static u32 uacce_mode = UACCE_MODE_NOUACCE;
+ module_param_cb(uacce_mode, &hpre_uacce_mode_ops, &uacce_mode, 0444);
+ MODULE_PARM_DESC(uacce_mode, UACCE_MODE_DESC);
+ 
++static bool pf_q_num_flag;
+ static int pf_q_num_set(const char *val, const struct kernel_param *kp)
+ {
++	pf_q_num_flag = true;
++
+ 	return q_num_set(val, kp, PCI_DEVICE_ID_HUAWEI_HPRE_PF);
+ }
+ 
+@@ -1030,7 +1033,7 @@ static int hpre_cluster_debugfs_init(struct hisi_qm *qm)
+ 
+ 	for (i = 0; i < clusters_num; i++) {
+ 		ret = snprintf(buf, HPRE_DBGFS_VAL_MAX_LEN, "cluster%d", i);
+-		if (ret < 0)
++		if (ret >= HPRE_DBGFS_VAL_MAX_LEN)
+ 			return -EINVAL;
+ 		tmp_d = debugfs_create_dir(buf, qm->debug.debug_root);
+ 
+@@ -1154,6 +1157,8 @@ static int hpre_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
+ 		qm->qp_num = pf_q_num;
+ 		qm->debug.curr_qm_qp_num = pf_q_num;
+ 		qm->qm_list = &hpre_devices;
++		if (pf_q_num_flag)
++			set_bit(QM_MODULE_PARAM, &qm->misc_ctl);
+ 	}
+ 
+ 	ret = hisi_qm_init(qm);
+diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c
+index edc6fd44e7ca9..ba4852744c052 100644
+--- a/drivers/crypto/hisilicon/qm.c
++++ b/drivers/crypto/hisilicon/qm.c
+@@ -204,8 +204,6 @@
+ #define WAIT_PERIOD			20
+ #define REMOVE_WAIT_DELAY		10
+ 
+-#define QM_DRIVER_REMOVING		0
+-#define QM_RST_SCHED			1
+ #define QM_QOS_PARAM_NUM		2
+ #define QM_QOS_MAX_VAL			1000
+ #define QM_QOS_RATE			100
+@@ -2814,7 +2812,6 @@ static void hisi_qm_pre_init(struct hisi_qm *qm)
+ 	mutex_init(&qm->mailbox_lock);
+ 	init_rwsem(&qm->qps_lock);
+ 	qm->qp_in_used = 0;
+-	qm->misc_ctl = false;
+ 	if (test_bit(QM_SUPPORT_RPM, &qm->caps)) {
+ 		if (!acpi_device_power_manageable(ACPI_COMPANION(&pdev->dev)))
+ 			dev_info(&pdev->dev, "_PS0 and _PR0 are not defined");
+@@ -5081,6 +5078,7 @@ free_eq_irq:
+ 
+ static int qm_get_qp_num(struct hisi_qm *qm)
+ {
++	struct device *dev = &qm->pdev->dev;
+ 	bool is_db_isolation;
+ 
+ 	/* VF's qp_num assigned by PF in v2, and VF can get qp_num by vft. */
+@@ -5097,13 +5095,21 @@ static int qm_get_qp_num(struct hisi_qm *qm)
+ 	qm->max_qp_num = hisi_qm_get_hw_info(qm, qm_basic_info,
+ 					     QM_FUNC_MAX_QP_CAP, is_db_isolation);
+ 
+-	/* check if qp number is valid */
+-	if (qm->qp_num > qm->max_qp_num) {
+-		dev_err(&qm->pdev->dev, "qp num(%u) is more than max qp num(%u)!\n",
++	if (qm->qp_num <= qm->max_qp_num)
++		return 0;
++
++	if (test_bit(QM_MODULE_PARAM, &qm->misc_ctl)) {
++		/* Check whether the set qp number is valid */
++		dev_err(dev, "qp num(%u) is more than max qp num(%u)!\n",
+ 			qm->qp_num, qm->max_qp_num);
+ 		return -EINVAL;
+ 	}
+ 
++	dev_info(dev, "Default qp num(%u) is too big, reset it to Function's max qp num(%u)!\n",
++		 qm->qp_num, qm->max_qp_num);
++	qm->qp_num = qm->max_qp_num;
++	qm->debug.curr_qm_qp_num = qm->qp_num;
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/crypto/hisilicon/qm_common.h b/drivers/crypto/hisilicon/qm_common.h
+index 1406a422d4551..8e36aa9c681be 100644
+--- a/drivers/crypto/hisilicon/qm_common.h
++++ b/drivers/crypto/hisilicon/qm_common.h
+@@ -4,7 +4,6 @@
+ #define QM_COMMON_H
+ 
+ #define QM_DBG_READ_LEN		256
+-#define QM_RESETTING		2
+ 
+ struct qm_cqe {
+ 	__le32 rsvd0;
+diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c
+index 77f9f131b8503..62bd8936a9154 100644
+--- a/drivers/crypto/hisilicon/sec2/sec_main.c
++++ b/drivers/crypto/hisilicon/sec2/sec_main.c
+@@ -311,8 +311,11 @@ static int sec_diff_regs_show(struct seq_file *s, void *unused)
+ }
+ DEFINE_SHOW_ATTRIBUTE(sec_diff_regs);
+ 
++static bool pf_q_num_flag;
+ static int sec_pf_q_num_set(const char *val, const struct kernel_param *kp)
+ {
++	pf_q_num_flag = true;
++
+ 	return q_num_set(val, kp, PCI_DEVICE_ID_HUAWEI_SEC_PF);
+ }
+ 
+@@ -1120,6 +1123,8 @@ static int sec_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
+ 		qm->qp_num = pf_q_num;
+ 		qm->debug.curr_qm_qp_num = pf_q_num;
+ 		qm->qm_list = &sec_devices;
++		if (pf_q_num_flag)
++			set_bit(QM_MODULE_PARAM, &qm->misc_ctl);
+ 	} else if (qm->fun_type == QM_HW_VF && qm->ver == QM_HW_V1) {
+ 		/*
+ 		 * have no way to get qm configure in VM in v1 hardware,
+diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c
+index f3ce34198775d..84dbaeb07ea83 100644
+--- a/drivers/crypto/hisilicon/zip/zip_main.c
++++ b/drivers/crypto/hisilicon/zip/zip_main.c
+@@ -364,8 +364,11 @@ static u32 uacce_mode = UACCE_MODE_NOUACCE;
+ module_param_cb(uacce_mode, &zip_uacce_mode_ops, &uacce_mode, 0444);
+ MODULE_PARM_DESC(uacce_mode, UACCE_MODE_DESC);
+ 
++static bool pf_q_num_flag;
+ static int pf_q_num_set(const char *val, const struct kernel_param *kp)
+ {
++	pf_q_num_flag = true;
++
+ 	return q_num_set(val, kp, PCI_DEVICE_ID_HUAWEI_ZIP_PF);
+ }
+ 
+@@ -1139,6 +1142,8 @@ static int hisi_zip_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
+ 		qm->qp_num = pf_q_num;
+ 		qm->debug.curr_qm_qp_num = pf_q_num;
+ 		qm->qm_list = &zip_devices;
++		if (pf_q_num_flag)
++			set_bit(QM_MODULE_PARAM, &qm->misc_ctl);
+ 	} else if (qm->fun_type == QM_HW_VF && qm->ver == QM_HW_V1) {
+ 		/*
+ 		 * have no way to get qm configure in VM in v1 hardware,
+diff --git a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h
+index 0399417b91fc7..c43e39c34d9ba 100644
+--- a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h
++++ b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h
+@@ -29,7 +29,7 @@
+ #define ADF_PCI_MAX_BARS 3
+ #define ADF_DEVICE_NAME_LENGTH 32
+ #define ADF_ETR_MAX_RINGS_PER_BANK 16
+-#define ADF_MAX_MSIX_VECTOR_NAME 16
++#define ADF_MAX_MSIX_VECTOR_NAME 48
+ #define ADF_DEVICE_NAME_PREFIX "qat_"
+ 
+ enum adf_accel_capabilities {
+diff --git a/drivers/crypto/intel/qat/qat_common/adf_common_drv.h b/drivers/crypto/intel/qat/qat_common/adf_common_drv.h
+index b8132eb9bc2a0..0af8606eac5b2 100644
+--- a/drivers/crypto/intel/qat/qat_common/adf_common_drv.h
++++ b/drivers/crypto/intel/qat/qat_common/adf_common_drv.h
+@@ -25,6 +25,8 @@
+ #define ADF_STATUS_AE_STARTED 6
+ #define ADF_STATUS_PF_RUNNING 7
+ #define ADF_STATUS_IRQ_ALLOCATED 8
++#define ADF_STATUS_CRYPTO_ALGS_REGISTERED 9
++#define ADF_STATUS_COMP_ALGS_REGISTERED 10
+ 
+ enum adf_dev_reset_mode {
+ 	ADF_DEV_RESET_ASYNC = 0,
+diff --git a/drivers/crypto/intel/qat/qat_common/adf_init.c b/drivers/crypto/intel/qat/qat_common/adf_init.c
+index 826179c985241..67bdbee584991 100644
+--- a/drivers/crypto/intel/qat/qat_common/adf_init.c
++++ b/drivers/crypto/intel/qat/qat_common/adf_init.c
+@@ -209,6 +209,7 @@ static int adf_dev_start(struct adf_accel_dev *accel_dev)
+ 		clear_bit(ADF_STATUS_STARTED, &accel_dev->status);
+ 		return -EFAULT;
+ 	}
++	set_bit(ADF_STATUS_CRYPTO_ALGS_REGISTERED, &accel_dev->status);
+ 
+ 	if (!list_empty(&accel_dev->compression_list) && qat_comp_algs_register()) {
+ 		dev_err(&GET_DEV(accel_dev),
+@@ -217,6 +218,7 @@ static int adf_dev_start(struct adf_accel_dev *accel_dev)
+ 		clear_bit(ADF_STATUS_STARTED, &accel_dev->status);
+ 		return -EFAULT;
+ 	}
++	set_bit(ADF_STATUS_COMP_ALGS_REGISTERED, &accel_dev->status);
+ 
+ 	adf_dbgfs_add(accel_dev);
+ 
+@@ -249,13 +251,17 @@ static void adf_dev_stop(struct adf_accel_dev *accel_dev)
+ 	clear_bit(ADF_STATUS_STARTING, &accel_dev->status);
+ 	clear_bit(ADF_STATUS_STARTED, &accel_dev->status);
+ 
+-	if (!list_empty(&accel_dev->crypto_list)) {
++	if (!list_empty(&accel_dev->crypto_list) &&
++	    test_bit(ADF_STATUS_CRYPTO_ALGS_REGISTERED, &accel_dev->status)) {
+ 		qat_algs_unregister();
+ 		qat_asym_algs_unregister();
+ 	}
++	clear_bit(ADF_STATUS_CRYPTO_ALGS_REGISTERED, &accel_dev->status);
+ 
+-	if (!list_empty(&accel_dev->compression_list))
++	if (!list_empty(&accel_dev->compression_list) &&
++	    test_bit(ADF_STATUS_COMP_ALGS_REGISTERED, &accel_dev->status))
+ 		qat_comp_algs_unregister();
++	clear_bit(ADF_STATUS_COMP_ALGS_REGISTERED, &accel_dev->status);
+ 
+ 	list_for_each(list_itr, &service_table) {
+ 		service = list_entry(list_itr, struct service_hndl, list);
+@@ -412,13 +418,6 @@ int adf_dev_down(struct adf_accel_dev *accel_dev, bool reconfig)
+ 
+ 	mutex_lock(&accel_dev->state_lock);
+ 
+-	if (!adf_dev_started(accel_dev)) {
+-		dev_info(&GET_DEV(accel_dev), "Device qat_dev%d already down\n",
+-			 accel_dev->accel_id);
+-		ret = -EINVAL;
+-		goto out;
+-	}
+-
+ 	if (reconfig) {
+ 		ret = adf_dev_shutdown_cache_cfg(accel_dev);
+ 		goto out;
+diff --git a/drivers/crypto/intel/qat/qat_common/adf_sysfs.c b/drivers/crypto/intel/qat/qat_common/adf_sysfs.c
+index a74d2f9303670..8880af1aa1b5b 100644
+--- a/drivers/crypto/intel/qat/qat_common/adf_sysfs.c
++++ b/drivers/crypto/intel/qat/qat_common/adf_sysfs.c
+@@ -52,6 +52,13 @@ static ssize_t state_store(struct device *dev, struct device_attribute *attr,
+ 	case DEV_DOWN:
+ 		dev_info(dev, "Stopping device qat_dev%d\n", accel_id);
+ 
++		if (!adf_dev_started(accel_dev)) {
++			dev_info(&GET_DEV(accel_dev), "Device qat_dev%d already down\n",
++				 accel_id);
++
++			break;
++		}
++
+ 		ret = adf_dev_down(accel_dev, true);
+ 		if (ret < 0)
+ 			return -EINVAL;
+@@ -61,7 +68,9 @@ static ssize_t state_store(struct device *dev, struct device_attribute *attr,
+ 		dev_info(dev, "Starting device qat_dev%d\n", accel_id);
+ 
+ 		ret = adf_dev_up(accel_dev, true);
+-		if (ret < 0) {
++		if (ret == -EALREADY) {
++			break;
++		} else if (ret) {
+ 			dev_err(dev, "Failed to start device qat_dev%d\n",
+ 				accel_id);
+ 			adf_dev_down(accel_dev, true);
+diff --git a/drivers/crypto/intel/qat/qat_common/adf_transport_debug.c b/drivers/crypto/intel/qat/qat_common/adf_transport_debug.c
+index 08bca1c506c0e..e2dd568b87b51 100644
+--- a/drivers/crypto/intel/qat/qat_common/adf_transport_debug.c
++++ b/drivers/crypto/intel/qat/qat_common/adf_transport_debug.c
+@@ -90,7 +90,7 @@ DEFINE_SEQ_ATTRIBUTE(adf_ring_debug);
+ int adf_ring_debugfs_add(struct adf_etr_ring_data *ring, const char *name)
+ {
+ 	struct adf_etr_ring_debug_entry *ring_debug;
+-	char entry_name[8];
++	char entry_name[16];
+ 
+ 	ring_debug = kzalloc(sizeof(*ring_debug), GFP_KERNEL);
+ 	if (!ring_debug)
+@@ -192,7 +192,7 @@ int adf_bank_debugfs_add(struct adf_etr_bank_data *bank)
+ {
+ 	struct adf_accel_dev *accel_dev = bank->accel_dev;
+ 	struct dentry *parent = accel_dev->transport->debug;
+-	char name[8];
++	char name[16];
+ 
+ 	snprintf(name, sizeof(name), "bank_%02d", bank->bank_number);
+ 	bank->bank_debug_dir = debugfs_create_dir(name, parent);
+diff --git a/drivers/crypto/intel/qat/qat_common/qat_algs_send.c b/drivers/crypto/intel/qat/qat_common/qat_algs_send.c
+index bb80455b3e81e..b97b678823a97 100644
+--- a/drivers/crypto/intel/qat/qat_common/qat_algs_send.c
++++ b/drivers/crypto/intel/qat/qat_common/qat_algs_send.c
+@@ -40,40 +40,44 @@ void qat_alg_send_backlog(struct qat_instance_backlog *backlog)
+ 	spin_unlock_bh(&backlog->lock);
+ }
+ 
+-static void qat_alg_backlog_req(struct qat_alg_req *req,
+-				struct qat_instance_backlog *backlog)
+-{
+-	INIT_LIST_HEAD(&req->list);
+-
+-	spin_lock_bh(&backlog->lock);
+-	list_add_tail(&req->list, &backlog->list);
+-	spin_unlock_bh(&backlog->lock);
+-}
+-
+-static int qat_alg_send_message_maybacklog(struct qat_alg_req *req)
++static bool qat_alg_try_enqueue(struct qat_alg_req *req)
+ {
+ 	struct qat_instance_backlog *backlog = req->backlog;
+ 	struct adf_etr_ring_data *tx_ring = req->tx_ring;
+ 	u32 *fw_req = req->fw_req;
+ 
+-	/* If any request is already backlogged, then add to backlog list */
++	/* Check if any request is already backlogged */
+ 	if (!list_empty(&backlog->list))
+-		goto enqueue;
++		return false;
+ 
+-	/* If ring is nearly full, then add to backlog list */
++	/* Check if ring is nearly full */
+ 	if (adf_ring_nearly_full(tx_ring))
+-		goto enqueue;
++		return false;
+ 
+-	/* If adding request to HW ring fails, then add to backlog list */
++	/* Try to enqueue to HW ring */
+ 	if (adf_send_message(tx_ring, fw_req))
+-		goto enqueue;
++		return false;
+ 
+-	return -EINPROGRESS;
++	return true;
++}
+ 
+-enqueue:
+-	qat_alg_backlog_req(req, backlog);
+ 
+-	return -EBUSY;
++static int qat_alg_send_message_maybacklog(struct qat_alg_req *req)
++{
++	struct qat_instance_backlog *backlog = req->backlog;
++	int ret = -EINPROGRESS;
++
++	if (qat_alg_try_enqueue(req))
++		return ret;
++
++	spin_lock_bh(&backlog->lock);
++	if (!qat_alg_try_enqueue(req)) {
++		list_add_tail(&req->list, &backlog->list);
++		ret = -EBUSY;
++	}
++	spin_unlock_bh(&backlog->lock);
++
++	return ret;
+ }
+ 
+ int qat_alg_send_message(struct qat_alg_req *req)
+diff --git a/drivers/cxl/core/core.h b/drivers/cxl/core/core.h
+index 45e7e044cf4a0..8e5f3d84311e5 100644
+--- a/drivers/cxl/core/core.h
++++ b/drivers/cxl/core/core.h
+@@ -75,6 +75,7 @@ resource_size_t __rcrb_to_component(struct device *dev,
+ 				    enum cxl_rcrb which);
+ 
+ extern struct rw_semaphore cxl_dpa_rwsem;
++extern struct rw_semaphore cxl_region_rwsem;
+ 
+ int cxl_memdev_init(void);
+ void cxl_memdev_exit(void);
+diff --git a/drivers/cxl/core/hdm.c b/drivers/cxl/core/hdm.c
+index 4449b34a80cc9..64e86b786db52 100644
+--- a/drivers/cxl/core/hdm.c
++++ b/drivers/cxl/core/hdm.c
+@@ -85,7 +85,7 @@ static int map_hdm_decoder_regs(struct cxl_port *port, void __iomem *crb,
+ 				struct cxl_component_regs *regs)
+ {
+ 	struct cxl_register_map map = {
+-		.dev = &port->dev,
++		.host = &port->dev,
+ 		.resource = port->component_reg_phys,
+ 		.base = crb,
+ 		.max_size = CXL_COMPONENT_REG_BLOCK_SIZE,
+@@ -575,17 +575,11 @@ static void cxld_set_type(struct cxl_decoder *cxld, u32 *ctrl)
+ 			  CXL_HDM_DECODER0_CTRL_HOSTONLY);
+ }
+ 
+-static int cxlsd_set_targets(struct cxl_switch_decoder *cxlsd, u64 *tgt)
++static void cxlsd_set_targets(struct cxl_switch_decoder *cxlsd, u64 *tgt)
+ {
+ 	struct cxl_dport **t = &cxlsd->target[0];
+ 	int ways = cxlsd->cxld.interleave_ways;
+ 
+-	if (dev_WARN_ONCE(&cxlsd->cxld.dev,
+-			  ways > 8 || ways > cxlsd->nr_targets,
+-			  "ways: %d overflows targets: %d\n", ways,
+-			  cxlsd->nr_targets))
+-		return -ENXIO;
+-
+ 	*tgt = FIELD_PREP(GENMASK(7, 0), t[0]->port_id);
+ 	if (ways > 1)
+ 		*tgt |= FIELD_PREP(GENMASK(15, 8), t[1]->port_id);
+@@ -601,8 +595,6 @@ static int cxlsd_set_targets(struct cxl_switch_decoder *cxlsd, u64 *tgt)
+ 		*tgt |= FIELD_PREP(GENMASK_ULL(55, 48), t[6]->port_id);
+ 	if (ways > 7)
+ 		*tgt |= FIELD_PREP(GENMASK_ULL(63, 56), t[7]->port_id);
+-
+-	return 0;
+ }
+ 
+ /*
+@@ -650,6 +642,25 @@ static int cxl_decoder_commit(struct cxl_decoder *cxld)
+ 		return -EBUSY;
+ 	}
+ 
++	/*
++	 * For endpoint decoders hosted on CXL memory devices that
++	 * support the sanitize operation, make sure sanitize is not in-flight.
++	 */
++	if (is_endpoint_decoder(&cxld->dev)) {
++		struct cxl_endpoint_decoder *cxled =
++			to_cxl_endpoint_decoder(&cxld->dev);
++		struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
++		struct cxl_memdev_state *mds =
++			to_cxl_memdev_state(cxlmd->cxlds);
++
++		if (mds && mds->security.sanitize_active) {
++			dev_dbg(&cxlmd->dev,
++				"attempted to commit %s during sanitize\n",
++				dev_name(&cxld->dev));
++			return -EBUSY;
++		}
++	}
++
+ 	down_read(&cxl_dpa_rwsem);
+ 	/* common decoder settings */
+ 	ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(cxld->id));
+@@ -670,13 +681,7 @@ static int cxl_decoder_commit(struct cxl_decoder *cxld)
+ 		void __iomem *tl_lo = hdm + CXL_HDM_DECODER0_TL_LOW(id);
+ 		u64 targets;
+ 
+-		rc = cxlsd_set_targets(cxlsd, &targets);
+-		if (rc) {
+-			dev_dbg(&port->dev, "%s: target configuration error\n",
+-				dev_name(&cxld->dev));
+-			goto err;
+-		}
+-
++		cxlsd_set_targets(cxlsd, &targets);
+ 		writel(upper_32_bits(targets), tl_hi);
+ 		writel(lower_32_bits(targets), tl_lo);
+ 	} else {
+@@ -694,7 +699,6 @@ static int cxl_decoder_commit(struct cxl_decoder *cxld)
+ 
+ 	port->commit_end++;
+ 	rc = cxld_await_commit(hdm, cxld->id);
+-err:
+ 	if (rc) {
+ 		dev_dbg(&port->dev, "%s: error %d committing decoder\n",
+ 			dev_name(&cxld->dev), rc);
+diff --git a/drivers/cxl/core/mbox.c b/drivers/cxl/core/mbox.c
+index 4df4f614f490e..b91bb98869917 100644
+--- a/drivers/cxl/core/mbox.c
++++ b/drivers/cxl/core/mbox.c
+@@ -1125,20 +1125,7 @@ int cxl_dev_state_identify(struct cxl_memdev_state *mds)
+ }
+ EXPORT_SYMBOL_NS_GPL(cxl_dev_state_identify, CXL);
+ 
+-/**
+- * cxl_mem_sanitize() - Send a sanitization command to the device.
+- * @mds: The device data for the operation
+- * @cmd: The specific sanitization command opcode
+- *
+- * Return: 0 if the command was executed successfully, regardless of
+- * whether or not the actual security operation is done in the background,
+- * such as for the Sanitize case.
+- * Error return values can be the result of the mailbox command, -EINVAL
+- * when security requirements are not met or invalid contexts.
+- *
+- * See CXL 3.0 @8.2.9.8.5.1 Sanitize and @8.2.9.8.5.2 Secure Erase.
+- */
+-int cxl_mem_sanitize(struct cxl_memdev_state *mds, u16 cmd)
++static int __cxl_mem_sanitize(struct cxl_memdev_state *mds, u16 cmd)
+ {
+ 	int rc;
+ 	u32 sec_out = 0;
+@@ -1183,7 +1170,45 @@ int cxl_mem_sanitize(struct cxl_memdev_state *mds, u16 cmd)
+ 
+ 	return 0;
+ }
+-EXPORT_SYMBOL_NS_GPL(cxl_mem_sanitize, CXL);
++
++
++/**
++ * cxl_mem_sanitize() - Send a sanitization command to the device.
++ * @cxlmd: The device for the operation
++ * @cmd: The specific sanitization command opcode
++ *
++ * Return: 0 if the command was executed successfully, regardless of
++ * whether or not the actual security operation is done in the background,
++ * such as for the Sanitize case.
++ * Error return values can be the result of the mailbox command, -EINVAL
++ * when security requirements are not met or invalid contexts, or -EBUSY
++ * if the sanitize operation is already in flight.
++ *
++ * See CXL 3.0 @8.2.9.8.5.1 Sanitize and @8.2.9.8.5.2 Secure Erase.
++ */
++int cxl_mem_sanitize(struct cxl_memdev *cxlmd, u16 cmd)
++{
++	struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
++	struct cxl_port  *endpoint;
++	int rc;
++
++	/* synchronize with cxl_mem_probe() and decoder write operations */
++	device_lock(&cxlmd->dev);
++	endpoint = cxlmd->endpoint;
++	down_read(&cxl_region_rwsem);
++	/*
++	 * Require an endpoint to be safe otherwise the driver can not
++	 * be sure that the device is unmapped.
++	 */
++	if (endpoint && endpoint->commit_end == -1)
++		rc = __cxl_mem_sanitize(mds, cmd);
++	else
++		rc = -EBUSY;
++	up_read(&cxl_region_rwsem);
++	device_unlock(&cxlmd->dev);
++
++	return rc;
++}
+ 
+ static int add_dpa_res(struct device *dev, struct resource *parent,
+ 		       struct resource *res, resource_size_t start,
+diff --git a/drivers/cxl/core/memdev.c b/drivers/cxl/core/memdev.c
+index 14b547c07f547..fed9573cf355e 100644
+--- a/drivers/cxl/core/memdev.c
++++ b/drivers/cxl/core/memdev.c
+@@ -125,13 +125,16 @@ static ssize_t security_state_show(struct device *dev,
+ 	struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
+ 	struct cxl_dev_state *cxlds = cxlmd->cxlds;
+ 	struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
+-	u64 reg = readq(cxlds->regs.mbox + CXLDEV_MBOX_BG_CMD_STATUS_OFFSET);
+-	u32 pct = FIELD_GET(CXLDEV_MBOX_BG_CMD_COMMAND_PCT_MASK, reg);
+-	u16 cmd = FIELD_GET(CXLDEV_MBOX_BG_CMD_COMMAND_OPCODE_MASK, reg);
+ 	unsigned long state = mds->security.state;
++	int rc = 0;
+ 
+-	if (cmd == CXL_MBOX_OP_SANITIZE && pct != 100)
+-		return sysfs_emit(buf, "sanitize\n");
++	/* sync with latest submission state */
++	mutex_lock(&mds->mbox_mutex);
++	if (mds->security.sanitize_active)
++		rc = sysfs_emit(buf, "sanitize\n");
++	mutex_unlock(&mds->mbox_mutex);
++	if (rc)
++		return rc;
+ 
+ 	if (!(state & CXL_PMEM_SEC_STATE_USER_PASS_SET))
+ 		return sysfs_emit(buf, "disabled\n");
+@@ -152,24 +155,17 @@ static ssize_t security_sanitize_store(struct device *dev,
+ 				       const char *buf, size_t len)
+ {
+ 	struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
+-	struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
+-	struct cxl_port *port = cxlmd->endpoint;
+ 	bool sanitize;
+ 	ssize_t rc;
+ 
+ 	if (kstrtobool(buf, &sanitize) || !sanitize)
+ 		return -EINVAL;
+ 
+-	if (!port || !is_cxl_endpoint(port))
+-		return -EINVAL;
+-
+-	/* ensure no regions are mapped to this memdev */
+-	if (port->commit_end != -1)
+-		return -EBUSY;
+-
+-	rc = cxl_mem_sanitize(mds, CXL_MBOX_OP_SANITIZE);
++	rc = cxl_mem_sanitize(cxlmd, CXL_MBOX_OP_SANITIZE);
++	if (rc)
++		return rc;
+ 
+-	return rc ? rc : len;
++	return len;
+ }
+ static struct device_attribute dev_attr_security_sanitize =
+ 	__ATTR(sanitize, 0200, NULL, security_sanitize_store);
+@@ -179,24 +175,17 @@ static ssize_t security_erase_store(struct device *dev,
+ 				    const char *buf, size_t len)
+ {
+ 	struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
+-	struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
+-	struct cxl_port *port = cxlmd->endpoint;
+ 	ssize_t rc;
+ 	bool erase;
+ 
+ 	if (kstrtobool(buf, &erase) || !erase)
+ 		return -EINVAL;
+ 
+-	if (!port || !is_cxl_endpoint(port))
+-		return -EINVAL;
+-
+-	/* ensure no regions are mapped to this memdev */
+-	if (port->commit_end != -1)
+-		return -EBUSY;
+-
+-	rc = cxl_mem_sanitize(mds, CXL_MBOX_OP_SECURE_ERASE);
++	rc = cxl_mem_sanitize(cxlmd, CXL_MBOX_OP_SECURE_ERASE);
++	if (rc)
++		return rc;
+ 
+-	return rc ? rc : len;
++	return len;
+ }
+ static struct device_attribute dev_attr_security_erase =
+ 	__ATTR(erase, 0200, NULL, security_erase_store);
+@@ -556,21 +545,11 @@ void clear_exclusive_cxl_commands(struct cxl_memdev_state *mds,
+ }
+ EXPORT_SYMBOL_NS_GPL(clear_exclusive_cxl_commands, CXL);
+ 
+-static void cxl_memdev_security_shutdown(struct device *dev)
+-{
+-	struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
+-	struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
+-
+-	if (mds->security.poll)
+-		cancel_delayed_work_sync(&mds->security.poll_dwork);
+-}
+-
+ static void cxl_memdev_shutdown(struct device *dev)
+ {
+ 	struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
+ 
+ 	down_write(&cxl_memdev_rwsem);
+-	cxl_memdev_security_shutdown(dev);
+ 	cxlmd->cxlds = NULL;
+ 	up_write(&cxl_memdev_rwsem);
+ }
+@@ -580,8 +559,8 @@ static void cxl_memdev_unregister(void *_cxlmd)
+ 	struct cxl_memdev *cxlmd = _cxlmd;
+ 	struct device *dev = &cxlmd->dev;
+ 
+-	cxl_memdev_shutdown(dev);
+ 	cdev_device_del(&cxlmd->cdev, dev);
++	cxl_memdev_shutdown(dev);
+ 	put_device(dev);
+ }
+ 
+@@ -961,17 +940,16 @@ static const struct fw_upload_ops cxl_memdev_fw_ops = {
+         .cleanup = cxl_fw_cleanup,
+ };
+ 
+-static void devm_cxl_remove_fw_upload(void *fwl)
++static void cxl_remove_fw_upload(void *fwl)
+ {
+ 	firmware_upload_unregister(fwl);
+ }
+ 
+-int cxl_memdev_setup_fw_upload(struct cxl_memdev_state *mds)
++int devm_cxl_setup_fw_upload(struct device *host, struct cxl_memdev_state *mds)
+ {
+ 	struct cxl_dev_state *cxlds = &mds->cxlds;
+ 	struct device *dev = &cxlds->cxlmd->dev;
+ 	struct fw_upload *fwl;
+-	int rc;
+ 
+ 	if (!test_bit(CXL_MEM_COMMAND_ID_GET_FW_INFO, mds->enabled_cmds))
+ 		return 0;
+@@ -979,19 +957,10 @@ int cxl_memdev_setup_fw_upload(struct cxl_memdev_state *mds)
+ 	fwl = firmware_upload_register(THIS_MODULE, dev, dev_name(dev),
+ 				       &cxl_memdev_fw_ops, mds);
+ 	if (IS_ERR(fwl))
+-		return dev_err_probe(dev, PTR_ERR(fwl),
+-				     "Failed to register firmware loader\n");
+-
+-	rc = devm_add_action_or_reset(cxlds->dev, devm_cxl_remove_fw_upload,
+-				      fwl);
+-	if (rc)
+-		dev_err(dev,
+-			"Failed to add firmware loader remove action: %d\n",
+-			rc);
+-
+-	return rc;
++		return PTR_ERR(fwl);
++	return devm_add_action_or_reset(host, cxl_remove_fw_upload, fwl);
+ }
+-EXPORT_SYMBOL_NS_GPL(cxl_memdev_setup_fw_upload, CXL);
++EXPORT_SYMBOL_NS_GPL(devm_cxl_setup_fw_upload, CXL);
+ 
+ static const struct file_operations cxl_memdev_fops = {
+ 	.owner = THIS_MODULE,
+@@ -1002,36 +971,8 @@ static const struct file_operations cxl_memdev_fops = {
+ 	.llseek = noop_llseek,
+ };
+ 
+-static void put_sanitize(void *data)
+-{
+-	struct cxl_memdev_state *mds = data;
+-
+-	sysfs_put(mds->security.sanitize_node);
+-}
+-
+-static int cxl_memdev_security_init(struct cxl_memdev *cxlmd)
+-{
+-	struct cxl_dev_state *cxlds = cxlmd->cxlds;
+-	struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
+-	struct device *dev = &cxlmd->dev;
+-	struct kernfs_node *sec;
+-
+-	sec = sysfs_get_dirent(dev->kobj.sd, "security");
+-	if (!sec) {
+-		dev_err(dev, "sysfs_get_dirent 'security' failed\n");
+-		return -ENODEV;
+-	}
+-	mds->security.sanitize_node = sysfs_get_dirent(sec, "state");
+-	sysfs_put(sec);
+-	if (!mds->security.sanitize_node) {
+-		dev_err(dev, "sysfs_get_dirent 'state' failed\n");
+-		return -ENODEV;
+-	}
+-
+-	return devm_add_action_or_reset(cxlds->dev, put_sanitize, mds);
+- }
+-
+-struct cxl_memdev *devm_cxl_add_memdev(struct cxl_dev_state *cxlds)
++struct cxl_memdev *devm_cxl_add_memdev(struct device *host,
++				       struct cxl_dev_state *cxlds)
+ {
+ 	struct cxl_memdev *cxlmd;
+ 	struct device *dev;
+@@ -1059,11 +1000,7 @@ struct cxl_memdev *devm_cxl_add_memdev(struct cxl_dev_state *cxlds)
+ 	if (rc)
+ 		goto err;
+ 
+-	rc = cxl_memdev_security_init(cxlmd);
+-	if (rc)
+-		goto err;
+-
+-	rc = devm_add_action_or_reset(cxlds->dev, cxl_memdev_unregister, cxlmd);
++	rc = devm_add_action_or_reset(host, cxl_memdev_unregister, cxlmd);
+ 	if (rc)
+ 		return ERR_PTR(rc);
+ 	return cxlmd;
+@@ -1079,6 +1016,50 @@ err:
+ }
+ EXPORT_SYMBOL_NS_GPL(devm_cxl_add_memdev, CXL);
+ 
++static void sanitize_teardown_notifier(void *data)
++{
++	struct cxl_memdev_state *mds = data;
++	struct kernfs_node *state;
++
++	/*
++	 * Prevent new irq triggered invocations of the workqueue and
++	 * flush inflight invocations.
++	 */
++	mutex_lock(&mds->mbox_mutex);
++	state = mds->security.sanitize_node;
++	mds->security.sanitize_node = NULL;
++	mutex_unlock(&mds->mbox_mutex);
++
++	cancel_delayed_work_sync(&mds->security.poll_dwork);
++	sysfs_put(state);
++}
++
++int devm_cxl_sanitize_setup_notifier(struct device *host,
++				     struct cxl_memdev *cxlmd)
++{
++	struct cxl_dev_state *cxlds = cxlmd->cxlds;
++	struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
++	struct kernfs_node *sec;
++
++	if (!test_bit(CXL_SEC_ENABLED_SANITIZE, mds->security.enabled_cmds))
++		return 0;
++
++	/*
++	 * Note, the expectation is that @cxlmd would have failed to be
++	 * created if these sysfs_get_dirent calls fail.
++	 */
++	sec = sysfs_get_dirent(cxlmd->dev.kobj.sd, "security");
++	if (!sec)
++		return -ENOENT;
++	mds->security.sanitize_node = sysfs_get_dirent(sec, "state");
++	sysfs_put(sec);
++	if (!mds->security.sanitize_node)
++		return -ENOENT;
++
++	return devm_add_action_or_reset(host, sanitize_teardown_notifier, mds);
++}
++EXPORT_SYMBOL_NS_GPL(devm_cxl_sanitize_setup_notifier, CXL);
++
+ __init int cxl_memdev_init(void)
+ {
+ 	dev_t devt;
+diff --git a/drivers/cxl/core/port.c b/drivers/cxl/core/port.c
+index 7ca01a834e188..2c6001592fe20 100644
+--- a/drivers/cxl/core/port.c
++++ b/drivers/cxl/core/port.c
+@@ -28,6 +28,12 @@
+  * instantiated by the core.
+  */
+ 
++/*
++ * All changes to the interleave configuration occur with this lock held
++ * for write.
++ */
++DECLARE_RWSEM(cxl_region_rwsem);
++
+ static DEFINE_IDA(cxl_port_ida);
+ static DEFINE_XARRAY(cxl_root_buses);
+ 
+@@ -691,14 +697,14 @@ err:
+ 	return ERR_PTR(rc);
+ }
+ 
+-static int cxl_setup_comp_regs(struct device *dev, struct cxl_register_map *map,
++static int cxl_setup_comp_regs(struct device *host, struct cxl_register_map *map,
+ 			       resource_size_t component_reg_phys)
+ {
+ 	if (component_reg_phys == CXL_RESOURCE_NONE)
+ 		return 0;
+ 
+ 	*map = (struct cxl_register_map) {
+-		.dev = dev,
++		.host = host,
+ 		.reg_type = CXL_REGLOC_RBI_COMPONENT,
+ 		.resource = component_reg_phys,
+ 		.max_size = CXL_COMPONENT_REG_BLOCK_SIZE,
+@@ -716,13 +722,23 @@ static int cxl_port_setup_regs(struct cxl_port *port,
+ 				   component_reg_phys);
+ }
+ 
+-static int cxl_dport_setup_regs(struct cxl_dport *dport,
++static int cxl_dport_setup_regs(struct device *host, struct cxl_dport *dport,
+ 				resource_size_t component_reg_phys)
+ {
++	int rc;
++
+ 	if (dev_is_platform(dport->dport_dev))
+ 		return 0;
+-	return cxl_setup_comp_regs(dport->dport_dev, &dport->comp_map,
+-				   component_reg_phys);
++
++	/*
++	 * use @dport->dport_dev for the context for error messages during
++	 * register probing, and fixup @host after the fact, since @host may be
++	 * NULL.
++	 */
++	rc = cxl_setup_comp_regs(dport->dport_dev, &dport->comp_map,
++				 component_reg_phys);
++	dport->comp_map.host = host;
++	return rc;
+ }
+ 
+ static struct cxl_port *__devm_cxl_add_port(struct device *host,
+@@ -983,7 +999,16 @@ __devm_cxl_add_dport(struct cxl_port *port, struct device *dport_dev,
+ 	if (!dport)
+ 		return ERR_PTR(-ENOMEM);
+ 
+-	if (rcrb != CXL_RESOURCE_NONE) {
++	dport->dport_dev = dport_dev;
++	dport->port_id = port_id;
++	dport->port = port;
++
++	if (rcrb == CXL_RESOURCE_NONE) {
++		rc = cxl_dport_setup_regs(&port->dev, dport,
++					  component_reg_phys);
++		if (rc)
++			return ERR_PTR(rc);
++	} else {
+ 		dport->rcrb.base = rcrb;
+ 		component_reg_phys = __rcrb_to_component(dport_dev, &dport->rcrb,
+ 							 CXL_RCRB_DOWNSTREAM);
+@@ -992,6 +1017,14 @@ __devm_cxl_add_dport(struct cxl_port *port, struct device *dport_dev,
+ 			return ERR_PTR(-ENXIO);
+ 		}
+ 
++		/*
++		 * RCH @dport is not ready to map until associated with its
++		 * memdev
++		 */
++		rc = cxl_dport_setup_regs(NULL, dport, component_reg_phys);
++		if (rc)
++			return ERR_PTR(rc);
++
+ 		dport->rch = true;
+ 	}
+ 
+@@ -999,14 +1032,6 @@ __devm_cxl_add_dport(struct cxl_port *port, struct device *dport_dev,
+ 		dev_dbg(dport_dev, "Component Registers found for dport: %pa\n",
+ 			&component_reg_phys);
+ 
+-	dport->dport_dev = dport_dev;
+-	dport->port_id = port_id;
+-	dport->port = port;
+-
+-	rc = cxl_dport_setup_regs(dport, component_reg_phys);
+-	if (rc)
+-		return ERR_PTR(rc);
+-
+ 	cond_cxl_root_lock(port);
+ 	rc = add_dport(port, dport);
+ 	cond_cxl_root_unlock(port);
+diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c
+index b4c6a749406f1..a25f5deb3de51 100644
+--- a/drivers/cxl/core/region.c
++++ b/drivers/cxl/core/region.c
+@@ -28,12 +28,6 @@
+  * 3. Decoder targets
+  */
+ 
+-/*
+- * All changes to the interleave configuration occur with this lock held
+- * for write.
+- */
+-static DECLARE_RWSEM(cxl_region_rwsem);
+-
+ static struct cxl_region *to_cxl_region(struct device *dev);
+ 
+ static ssize_t uuid_show(struct device *dev, struct device_attribute *attr,
+@@ -294,7 +288,7 @@ static ssize_t commit_store(struct device *dev, struct device_attribute *attr,
+ 	 */
+ 	rc = cxl_region_invalidate_memregion(cxlr);
+ 	if (rc)
+-		return rc;
++		goto out;
+ 
+ 	if (commit) {
+ 		rc = cxl_region_decode_commit(cxlr);
+@@ -1196,6 +1190,14 @@ static int cxl_port_setup_targets(struct cxl_port *port,
+ 		return rc;
+ 	}
+ 
++	if (iw > 8 || iw > cxlsd->nr_targets) {
++		dev_dbg(&cxlr->dev,
++			"%s:%s:%s: ways: %d overflows targets: %d\n",
++			dev_name(port->uport_dev), dev_name(&port->dev),
++			dev_name(&cxld->dev), iw, cxlsd->nr_targets);
++		return -ENXIO;
++	}
++
+ 	if (test_bit(CXL_REGION_F_AUTO, &cxlr->flags)) {
+ 		if (cxld->interleave_ways != iw ||
+ 		    cxld->interleave_granularity != ig ||
+@@ -1481,6 +1483,14 @@ static int cxl_region_attach_auto(struct cxl_region *cxlr,
+ 	return 0;
+ }
+ 
++static int cmp_interleave_pos(const void *a, const void *b)
++{
++	struct cxl_endpoint_decoder *cxled_a = *(typeof(cxled_a) *)a;
++	struct cxl_endpoint_decoder *cxled_b = *(typeof(cxled_b) *)b;
++
++	return cxled_a->pos - cxled_b->pos;
++}
++
+ static struct cxl_port *next_port(struct cxl_port *port)
+ {
+ 	if (!port->parent_dport)
+@@ -1488,119 +1498,127 @@ static struct cxl_port *next_port(struct cxl_port *port)
+ 	return port->parent_dport->port;
+ }
+ 
+-static int decoder_match_range(struct device *dev, void *data)
++static int match_switch_decoder_by_range(struct device *dev, void *data)
+ {
+-	struct cxl_endpoint_decoder *cxled = data;
+ 	struct cxl_switch_decoder *cxlsd;
++	struct range *r1, *r2 = data;
+ 
+ 	if (!is_switch_decoder(dev))
+ 		return 0;
+ 
+ 	cxlsd = to_cxl_switch_decoder(dev);
+-	return range_contains(&cxlsd->cxld.hpa_range, &cxled->cxld.hpa_range);
+-}
++	r1 = &cxlsd->cxld.hpa_range;
+ 
+-static void find_positions(const struct cxl_switch_decoder *cxlsd,
+-			   const struct cxl_port *iter_a,
+-			   const struct cxl_port *iter_b, int *a_pos,
+-			   int *b_pos)
+-{
+-	int i;
+-
+-	for (i = 0, *a_pos = -1, *b_pos = -1; i < cxlsd->nr_targets; i++) {
+-		if (cxlsd->target[i] == iter_a->parent_dport)
+-			*a_pos = i;
+-		else if (cxlsd->target[i] == iter_b->parent_dport)
+-			*b_pos = i;
+-		if (*a_pos >= 0 && *b_pos >= 0)
+-			break;
+-	}
++	if (is_root_decoder(dev))
++		return range_contains(r1, r2);
++	return (r1->start == r2->start && r1->end == r2->end);
+ }
+ 
+-static int cmp_decode_pos(const void *a, const void *b)
++static int find_pos_and_ways(struct cxl_port *port, struct range *range,
++			     int *pos, int *ways)
+ {
+-	struct cxl_endpoint_decoder *cxled_a = *(typeof(cxled_a) *)a;
+-	struct cxl_endpoint_decoder *cxled_b = *(typeof(cxled_b) *)b;
+-	struct cxl_memdev *cxlmd_a = cxled_to_memdev(cxled_a);
+-	struct cxl_memdev *cxlmd_b = cxled_to_memdev(cxled_b);
+-	struct cxl_port *port_a = cxled_to_port(cxled_a);
+-	struct cxl_port *port_b = cxled_to_port(cxled_b);
+-	struct cxl_port *iter_a, *iter_b, *port = NULL;
+ 	struct cxl_switch_decoder *cxlsd;
++	struct cxl_port *parent;
+ 	struct device *dev;
+-	int a_pos, b_pos;
+-	unsigned int seq;
+-
+-	/* Exit early if any prior sorting failed */
+-	if (cxled_a->pos < 0 || cxled_b->pos < 0)
+-		return 0;
++	int rc = -ENXIO;
+ 
+-	/*
+-	 * Walk up the hierarchy to find a shared port, find the decoder that
+-	 * maps the range, compare the relative position of those dport
+-	 * mappings.
+-	 */
+-	for (iter_a = port_a; iter_a; iter_a = next_port(iter_a)) {
+-		struct cxl_port *next_a, *next_b;
++	parent = next_port(port);
++	if (!parent)
++		return rc;
+ 
+-		next_a = next_port(iter_a);
+-		if (!next_a)
+-			break;
++	dev = device_find_child(&parent->dev, range,
++				match_switch_decoder_by_range);
++	if (!dev) {
++		dev_err(port->uport_dev,
++			"failed to find decoder mapping %#llx-%#llx\n",
++			range->start, range->end);
++		return rc;
++	}
++	cxlsd = to_cxl_switch_decoder(dev);
++	*ways = cxlsd->cxld.interleave_ways;
+ 
+-		for (iter_b = port_b; iter_b; iter_b = next_port(iter_b)) {
+-			next_b = next_port(iter_b);
+-			if (next_a != next_b)
+-				continue;
+-			port = next_a;
++	for (int i = 0; i < *ways; i++) {
++		if (cxlsd->target[i] == port->parent_dport) {
++			*pos = i;
++			rc = 0;
+ 			break;
+ 		}
+-
+-		if (port)
+-			break;
+ 	}
++	put_device(dev);
+ 
+-	if (!port) {
+-		dev_err(cxlmd_a->dev.parent,
+-			"failed to find shared port with %s\n",
+-			dev_name(cxlmd_b->dev.parent));
+-		goto err;
+-	}
++	return rc;
++}
+ 
+-	dev = device_find_child(&port->dev, cxled_a, decoder_match_range);
+-	if (!dev) {
+-		struct range *range = &cxled_a->cxld.hpa_range;
++/**
++ * cxl_calc_interleave_pos() - calculate an endpoint position in a region
++ * @cxled: endpoint decoder member of given region
++ *
++ * The endpoint position is calculated by traversing the topology from
++ * the endpoint to the root decoder and iteratively applying this
++ * calculation:
++ *
++ *    position = position * parent_ways + parent_pos;
++ *
++ * ...where @position is inferred from switch and root decoder target lists.
++ *
++ * Return: position >= 0 on success
++ *	   -ENXIO on failure
++ */
++static int cxl_calc_interleave_pos(struct cxl_endpoint_decoder *cxled)
++{
++	struct cxl_port *iter, *port = cxled_to_port(cxled);
++	struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
++	struct range *range = &cxled->cxld.hpa_range;
++	int parent_ways = 0, parent_pos = 0, pos = 0;
++	int rc;
+ 
+-		dev_err(port->uport_dev,
+-			"failed to find decoder that maps %#llx-%#llx\n",
+-			range->start, range->end);
+-		goto err;
+-	}
++	/*
++	 * Example: the expected interleave order of the 4-way region shown
++	 * below is: mem0, mem2, mem1, mem3
++	 *
++	 *		  root_port
++	 *                 /      \
++	 *      host_bridge_0    host_bridge_1
++	 *        |    |           |    |
++	 *       mem0 mem1        mem2 mem3
++	 *
++	 * In the example the calculator will iterate twice. The first iteration
++	 * uses the mem position in the host-bridge and the ways of the host-
++	 * bridge to generate the first, or local, position. The second
++	 * iteration uses the host-bridge position in the root_port and the ways
++	 * of the root_port to refine the position.
++	 *
++	 * A trace of the calculation per endpoint looks like this:
++	 * mem0: pos = 0 * 2 + 0    mem2: pos = 0 * 2 + 0
++	 *       pos = 0 * 2 + 0          pos = 0 * 2 + 1
++	 *       pos: 0                   pos: 1
++	 *
++	 * mem1: pos = 0 * 2 + 1    mem3: pos = 0 * 2 + 1
++	 *       pos = 1 * 2 + 0          pos = 1 * 2 + 1
++	 *       pos: 2                   pos = 3
++	 *
++	 * Note that while this example is simple, the method applies to more
++	 * complex topologies, including those with switches.
++	 */
+ 
+-	cxlsd = to_cxl_switch_decoder(dev);
+-	do {
+-		seq = read_seqbegin(&cxlsd->target_lock);
+-		find_positions(cxlsd, iter_a, iter_b, &a_pos, &b_pos);
+-	} while (read_seqretry(&cxlsd->target_lock, seq));
++	/* Iterate from endpoint to root_port refining the position */
++	for (iter = port; iter; iter = next_port(iter)) {
++		if (is_cxl_root(iter))
++			break;
+ 
+-	put_device(dev);
++		rc = find_pos_and_ways(iter, range, &parent_pos, &parent_ways);
++		if (rc)
++			return rc;
+ 
+-	if (a_pos < 0 || b_pos < 0) {
+-		dev_err(port->uport_dev,
+-			"failed to find shared decoder for %s and %s\n",
+-			dev_name(cxlmd_a->dev.parent),
+-			dev_name(cxlmd_b->dev.parent));
+-		goto err;
++		pos = pos * parent_ways + parent_pos;
+ 	}
+ 
+-	dev_dbg(port->uport_dev, "%s comes %s %s\n",
+-		dev_name(cxlmd_a->dev.parent),
+-		a_pos - b_pos < 0 ? "before" : "after",
+-		dev_name(cxlmd_b->dev.parent));
++	dev_dbg(&cxlmd->dev,
++		"decoder:%s parent:%s port:%s range:%#llx-%#llx pos:%d\n",
++		dev_name(&cxled->cxld.dev), dev_name(cxlmd->dev.parent),
++		dev_name(&port->dev), range->start, range->end, pos);
+ 
+-	return a_pos - b_pos;
+-err:
+-	cxled_a->pos = -1;
+-	return 0;
++	return pos;
+ }
+ 
+ static int cxl_region_sort_targets(struct cxl_region *cxlr)
+@@ -1608,22 +1626,21 @@ static int cxl_region_sort_targets(struct cxl_region *cxlr)
+ 	struct cxl_region_params *p = &cxlr->params;
+ 	int i, rc = 0;
+ 
+-	sort(p->targets, p->nr_targets, sizeof(p->targets[0]), cmp_decode_pos,
+-	     NULL);
+-
+ 	for (i = 0; i < p->nr_targets; i++) {
+ 		struct cxl_endpoint_decoder *cxled = p->targets[i];
+ 
++		cxled->pos = cxl_calc_interleave_pos(cxled);
+ 		/*
+-		 * Record that sorting failed, but still continue to restore
+-		 * cxled->pos with its ->targets[] position so that follow-on
+-		 * code paths can reliably do p->targets[cxled->pos] to
+-		 * self-reference their entry.
++		 * Record that sorting failed, but still continue to calc
++		 * cxled->pos so that follow-on code paths can reliably
++		 * do p->targets[cxled->pos] to self-reference their entry.
+ 		 */
+ 		if (cxled->pos < 0)
+ 			rc = -ENXIO;
+-		cxled->pos = i;
+ 	}
++	/* Keep the cxlr target list in interleave position order */
++	sort(p->targets, p->nr_targets, sizeof(p->targets[0]),
++	     cmp_interleave_pos, NULL);
+ 
+ 	dev_dbg(&cxlr->dev, "region sort %s\n", rc ? "failed" : "successful");
+ 	return rc;
+@@ -1762,6 +1779,26 @@ static int cxl_region_attach(struct cxl_region *cxlr,
+ 		.end = p->res->end,
+ 	};
+ 
++	if (p->nr_targets != p->interleave_ways)
++		return 0;
++
++	/*
++	 * Test the auto-discovery position calculator function
++	 * against this successfully created user-defined region.
++	 * A fail message here means that this interleave config
++	 * will fail when presented as CXL_REGION_F_AUTO.
++	 */
++	for (int i = 0; i < p->nr_targets; i++) {
++		struct cxl_endpoint_decoder *cxled = p->targets[i];
++		int test_pos;
++
++		test_pos = cxl_calc_interleave_pos(cxled);
++		dev_dbg(&cxled->cxld.dev,
++			"Test cxl_calc_interleave_pos(): %s test_pos:%d cxled->pos:%d\n",
++			(test_pos == cxled->pos) ? "success" : "fail",
++			test_pos, cxled->pos);
++	}
++
+ 	return 0;
+ 
+ err_decrement:
+@@ -2697,7 +2734,7 @@ err:
+ 	return rc;
+ }
+ 
+-static int match_decoder_by_range(struct device *dev, void *data)
++static int match_root_decoder_by_range(struct device *dev, void *data)
+ {
+ 	struct range *r1, *r2 = data;
+ 	struct cxl_root_decoder *cxlrd;
+@@ -2828,7 +2865,7 @@ int cxl_add_to_region(struct cxl_port *root, struct cxl_endpoint_decoder *cxled)
+ 	int rc;
+ 
+ 	cxlrd_dev = device_find_child(&root->dev, &cxld->hpa_range,
+-				      match_decoder_by_range);
++				      match_root_decoder_by_range);
+ 	if (!cxlrd_dev) {
+ 		dev_err(cxlmd->dev.parent,
+ 			"%s:%s no CXL window for range %#llx:%#llx\n",
+diff --git a/drivers/cxl/core/regs.c b/drivers/cxl/core/regs.c
+index 6281127b3e9d9..e0fbe964f6f0a 100644
+--- a/drivers/cxl/core/regs.c
++++ b/drivers/cxl/core/regs.c
+@@ -204,7 +204,7 @@ int cxl_map_component_regs(const struct cxl_register_map *map,
+ 			   struct cxl_component_regs *regs,
+ 			   unsigned long map_mask)
+ {
+-	struct device *dev = map->dev;
++	struct device *host = map->host;
+ 	struct mapinfo {
+ 		const struct cxl_reg_map *rmap;
+ 		void __iomem **addr;
+@@ -225,7 +225,7 @@ int cxl_map_component_regs(const struct cxl_register_map *map,
+ 			continue;
+ 		phys_addr = map->resource + mi->rmap->offset;
+ 		length = mi->rmap->size;
+-		*(mi->addr) = devm_cxl_iomap_block(dev, phys_addr, length);
++		*(mi->addr) = devm_cxl_iomap_block(host, phys_addr, length);
+ 		if (!*(mi->addr))
+ 			return -ENOMEM;
+ 	}
+@@ -237,7 +237,7 @@ EXPORT_SYMBOL_NS_GPL(cxl_map_component_regs, CXL);
+ int cxl_map_device_regs(const struct cxl_register_map *map,
+ 			struct cxl_device_regs *regs)
+ {
+-	struct device *dev = map->dev;
++	struct device *host = map->host;
+ 	resource_size_t phys_addr = map->resource;
+ 	struct mapinfo {
+ 		const struct cxl_reg_map *rmap;
+@@ -259,7 +259,7 @@ int cxl_map_device_regs(const struct cxl_register_map *map,
+ 
+ 		addr = phys_addr + mi->rmap->offset;
+ 		length = mi->rmap->size;
+-		*(mi->addr) = devm_cxl_iomap_block(dev, addr, length);
++		*(mi->addr) = devm_cxl_iomap_block(host, addr, length);
+ 		if (!*(mi->addr))
+ 			return -ENOMEM;
+ 	}
+@@ -309,7 +309,7 @@ int cxl_find_regblock_instance(struct pci_dev *pdev, enum cxl_regloc_type type,
+ 	int regloc, i;
+ 
+ 	*map = (struct cxl_register_map) {
+-		.dev = &pdev->dev,
++		.host = &pdev->dev,
+ 		.resource = CXL_RESOURCE_NONE,
+ 	};
+ 
+@@ -403,15 +403,15 @@ EXPORT_SYMBOL_NS_GPL(cxl_map_pmu_regs, CXL);
+ 
+ static int cxl_map_regblock(struct cxl_register_map *map)
+ {
+-	struct device *dev = map->dev;
++	struct device *host = map->host;
+ 
+ 	map->base = ioremap(map->resource, map->max_size);
+ 	if (!map->base) {
+-		dev_err(dev, "failed to map registers\n");
++		dev_err(host, "failed to map registers\n");
+ 		return -ENOMEM;
+ 	}
+ 
+-	dev_dbg(dev, "Mapped CXL Memory Device resource %pa\n", &map->resource);
++	dev_dbg(host, "Mapped CXL Memory Device resource %pa\n", &map->resource);
+ 	return 0;
+ }
+ 
+@@ -425,28 +425,28 @@ static int cxl_probe_regs(struct cxl_register_map *map)
+ {
+ 	struct cxl_component_reg_map *comp_map;
+ 	struct cxl_device_reg_map *dev_map;
+-	struct device *dev = map->dev;
++	struct device *host = map->host;
+ 	void __iomem *base = map->base;
+ 
+ 	switch (map->reg_type) {
+ 	case CXL_REGLOC_RBI_COMPONENT:
+ 		comp_map = &map->component_map;
+-		cxl_probe_component_regs(dev, base, comp_map);
+-		dev_dbg(dev, "Set up component registers\n");
++		cxl_probe_component_regs(host, base, comp_map);
++		dev_dbg(host, "Set up component registers\n");
+ 		break;
+ 	case CXL_REGLOC_RBI_MEMDEV:
+ 		dev_map = &map->device_map;
+-		cxl_probe_device_regs(dev, base, dev_map);
++		cxl_probe_device_regs(host, base, dev_map);
+ 		if (!dev_map->status.valid || !dev_map->mbox.valid ||
+ 		    !dev_map->memdev.valid) {
+-			dev_err(dev, "registers not found: %s%s%s\n",
++			dev_err(host, "registers not found: %s%s%s\n",
+ 				!dev_map->status.valid ? "status " : "",
+ 				!dev_map->mbox.valid ? "mbox " : "",
+ 				!dev_map->memdev.valid ? "memdev " : "");
+ 			return -ENXIO;
+ 		}
+ 
+-		dev_dbg(dev, "Probing device registers...\n");
++		dev_dbg(host, "Probing device registers...\n");
+ 		break;
+ 	default:
+ 		break;
+diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h
+index 76d92561af294..b5b015b661eae 100644
+--- a/drivers/cxl/cxl.h
++++ b/drivers/cxl/cxl.h
+@@ -247,7 +247,7 @@ struct cxl_pmu_reg_map {
+ 
+ /**
+  * struct cxl_register_map - DVSEC harvested register block mapping parameters
+- * @dev: device for devm operations and logging
++ * @host: device for devm operations and logging
+  * @base: virtual base of the register-block-BAR + @block_offset
+  * @resource: physical resource base of the register block
+  * @max_size: maximum mapping size to perform register search
+@@ -257,7 +257,7 @@ struct cxl_pmu_reg_map {
+  * @pmu_map: cxl_reg_maps for CXL Performance Monitoring Units
+  */
+ struct cxl_register_map {
+-	struct device *dev;
++	struct device *host;
+ 	void __iomem *base;
+ 	resource_size_t resource;
+ 	resource_size_t max_size;
+diff --git a/drivers/cxl/cxlmem.h b/drivers/cxl/cxlmem.h
+index 706f8a6d1ef43..6933bc20e76b6 100644
+--- a/drivers/cxl/cxlmem.h
++++ b/drivers/cxl/cxlmem.h
+@@ -84,9 +84,12 @@ static inline bool is_cxl_endpoint(struct cxl_port *port)
+ 	return is_cxl_memdev(port->uport_dev);
+ }
+ 
+-struct cxl_memdev *devm_cxl_add_memdev(struct cxl_dev_state *cxlds);
++struct cxl_memdev *devm_cxl_add_memdev(struct device *host,
++				       struct cxl_dev_state *cxlds);
++int devm_cxl_sanitize_setup_notifier(struct device *host,
++				     struct cxl_memdev *cxlmd);
+ struct cxl_memdev_state;
+-int cxl_memdev_setup_fw_upload(struct cxl_memdev_state *mds);
++int devm_cxl_setup_fw_upload(struct device *host, struct cxl_memdev_state *mds);
+ int devm_cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled,
+ 			 resource_size_t base, resource_size_t len,
+ 			 resource_size_t skipped);
+@@ -360,16 +363,16 @@ struct cxl_fw_state {
+  *
+  * @state: state of last security operation
+  * @enabled_cmds: All security commands enabled in the CEL
+- * @poll: polling for sanitization is enabled, device has no mbox irq support
+  * @poll_tmo_secs: polling timeout
++ * @sanitize_active: sanitize completion pending
+  * @poll_dwork: polling work item
+  * @sanitize_node: sanitation sysfs file to notify
+  */
+ struct cxl_security_state {
+ 	unsigned long state;
+ 	DECLARE_BITMAP(enabled_cmds, CXL_SEC_ENABLED_MAX);
+-	bool poll;
+ 	int poll_tmo_secs;
++	bool sanitize_active;
+ 	struct delayed_work poll_dwork;
+ 	struct kernfs_node *sanitize_node;
+ };
+@@ -883,7 +886,7 @@ static inline void cxl_mem_active_dec(void)
+ }
+ #endif
+ 
+-int cxl_mem_sanitize(struct cxl_memdev_state *mds, u16 cmd);
++int cxl_mem_sanitize(struct cxl_memdev *cxlmd, u16 cmd);
+ 
+ struct cxl_hdm {
+ 	struct cxl_component_regs regs;
+diff --git a/drivers/cxl/pci.c b/drivers/cxl/pci.c
+index 2323169b6e5fe..bb37e76ef5a68 100644
+--- a/drivers/cxl/pci.c
++++ b/drivers/cxl/pci.c
+@@ -128,10 +128,10 @@ static irqreturn_t cxl_pci_mbox_irq(int irq, void *id)
+ 	reg = readq(cxlds->regs.mbox + CXLDEV_MBOX_BG_CMD_STATUS_OFFSET);
+ 	opcode = FIELD_GET(CXLDEV_MBOX_BG_CMD_COMMAND_OPCODE_MASK, reg);
+ 	if (opcode == CXL_MBOX_OP_SANITIZE) {
++		mutex_lock(&mds->mbox_mutex);
+ 		if (mds->security.sanitize_node)
+-			sysfs_notify_dirent(mds->security.sanitize_node);
+-
+-		dev_dbg(cxlds->dev, "Sanitization operation ended\n");
++			mod_delayed_work(system_wq, &mds->security.poll_dwork, 0);
++		mutex_unlock(&mds->mbox_mutex);
+ 	} else {
+ 		/* short-circuit the wait in __cxl_pci_mbox_send_cmd() */
+ 		rcuwait_wake_up(&mds->mbox_wait);
+@@ -152,18 +152,16 @@ static void cxl_mbox_sanitize_work(struct work_struct *work)
+ 	mutex_lock(&mds->mbox_mutex);
+ 	if (cxl_mbox_background_complete(cxlds)) {
+ 		mds->security.poll_tmo_secs = 0;
+-		put_device(cxlds->dev);
+-
+ 		if (mds->security.sanitize_node)
+ 			sysfs_notify_dirent(mds->security.sanitize_node);
++		mds->security.sanitize_active = false;
+ 
+ 		dev_dbg(cxlds->dev, "Sanitization operation ended\n");
+ 	} else {
+ 		int timeout = mds->security.poll_tmo_secs + 10;
+ 
+ 		mds->security.poll_tmo_secs = min(15 * 60, timeout);
+-		queue_delayed_work(system_wq, &mds->security.poll_dwork,
+-				   timeout * HZ);
++		schedule_delayed_work(&mds->security.poll_dwork, timeout * HZ);
+ 	}
+ 	mutex_unlock(&mds->mbox_mutex);
+ }
+@@ -295,18 +293,15 @@ static int __cxl_pci_mbox_send_cmd(struct cxl_memdev_state *mds,
+ 		 * and allow userspace to poll(2) for completion.
+ 		 */
+ 		if (mbox_cmd->opcode == CXL_MBOX_OP_SANITIZE) {
+-			if (mds->security.poll) {
+-				/* hold the device throughout */
+-				get_device(cxlds->dev);
+-
+-				/* give first timeout a second */
+-				timeout = 1;
+-				mds->security.poll_tmo_secs = timeout;
+-				queue_delayed_work(system_wq,
+-						   &mds->security.poll_dwork,
+-						   timeout * HZ);
+-			}
+-
++			if (mds->security.sanitize_active)
++				return -EBUSY;
++
++			/* give first timeout a second */
++			timeout = 1;
++			mds->security.poll_tmo_secs = timeout;
++			mds->security.sanitize_active = true;
++			schedule_delayed_work(&mds->security.poll_dwork,
++					      timeout * HZ);
+ 			dev_dbg(dev, "Sanitization operation started\n");
+ 			goto success;
+ 		}
+@@ -389,7 +384,9 @@ static int cxl_pci_setup_mailbox(struct cxl_memdev_state *mds)
+ 	const int cap = readl(cxlds->regs.mbox + CXLDEV_MBOX_CAPS_OFFSET);
+ 	struct device *dev = cxlds->dev;
+ 	unsigned long timeout;
++	int irq, msgnum;
+ 	u64 md_status;
++	u32 ctrl;
+ 
+ 	timeout = jiffies + mbox_ready_timeout * HZ;
+ 	do {
+@@ -437,33 +434,26 @@ static int cxl_pci_setup_mailbox(struct cxl_memdev_state *mds)
+ 	dev_dbg(dev, "Mailbox payload sized %zu", mds->payload_size);
+ 
+ 	rcuwait_init(&mds->mbox_wait);
++	INIT_DELAYED_WORK(&mds->security.poll_dwork, cxl_mbox_sanitize_work);
+ 
+-	if (cap & CXLDEV_MBOX_CAP_BG_CMD_IRQ) {
+-		u32 ctrl;
+-		int irq, msgnum;
+-		struct pci_dev *pdev = to_pci_dev(cxlds->dev);
+-
+-		msgnum = FIELD_GET(CXLDEV_MBOX_CAP_IRQ_MSGNUM_MASK, cap);
+-		irq = pci_irq_vector(pdev, msgnum);
+-		if (irq < 0)
+-			goto mbox_poll;
+-
+-		if (cxl_request_irq(cxlds, irq, cxl_pci_mbox_irq, NULL))
+-			goto mbox_poll;
++	/* background command interrupts are optional */
++	if (!(cap & CXLDEV_MBOX_CAP_BG_CMD_IRQ))
++		return 0;
+ 
+-		/* enable background command mbox irq support */
+-		ctrl = readl(cxlds->regs.mbox + CXLDEV_MBOX_CTRL_OFFSET);
+-		ctrl |= CXLDEV_MBOX_CTRL_BG_CMD_IRQ;
+-		writel(ctrl, cxlds->regs.mbox + CXLDEV_MBOX_CTRL_OFFSET);
++	msgnum = FIELD_GET(CXLDEV_MBOX_CAP_IRQ_MSGNUM_MASK, cap);
++	irq = pci_irq_vector(to_pci_dev(cxlds->dev), msgnum);
++	if (irq < 0)
++		return 0;
+ 
++	if (cxl_request_irq(cxlds, irq, NULL, cxl_pci_mbox_irq))
+ 		return 0;
+-	}
+ 
+-mbox_poll:
+-	mds->security.poll = true;
+-	INIT_DELAYED_WORK(&mds->security.poll_dwork, cxl_mbox_sanitize_work);
++	dev_dbg(cxlds->dev, "Mailbox interrupts enabled\n");
++	/* enable background command mbox irq support */
++	ctrl = readl(cxlds->regs.mbox + CXLDEV_MBOX_CTRL_OFFSET);
++	ctrl |= CXLDEV_MBOX_CTRL_BG_CMD_IRQ;
++	writel(ctrl, cxlds->regs.mbox + CXLDEV_MBOX_CTRL_OFFSET);
+ 
+-	dev_dbg(cxlds->dev, "Mailbox interrupts are unsupported");
+ 	return 0;
+ }
+ 
+@@ -484,7 +474,7 @@ static int cxl_rcrb_get_comp_regs(struct pci_dev *pdev,
+ 	resource_size_t component_reg_phys;
+ 
+ 	*map = (struct cxl_register_map) {
+-		.dev = &pdev->dev,
++		.host = &pdev->dev,
+ 		.resource = CXL_RESOURCE_NONE,
+ 	};
+ 
+@@ -883,11 +873,15 @@ static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ 	if (rc)
+ 		return rc;
+ 
+-	cxlmd = devm_cxl_add_memdev(cxlds);
++	cxlmd = devm_cxl_add_memdev(&pdev->dev, cxlds);
+ 	if (IS_ERR(cxlmd))
+ 		return PTR_ERR(cxlmd);
+ 
+-	rc = cxl_memdev_setup_fw_upload(mds);
++	rc = devm_cxl_setup_fw_upload(&pdev->dev, mds);
++	if (rc)
++		return rc;
++
++	rc = devm_cxl_sanitize_setup_notifier(&pdev->dev, cxlmd);
+ 	if (rc)
+ 		return rc;
+ 
+diff --git a/drivers/devfreq/event/rockchip-dfi.c b/drivers/devfreq/event/rockchip-dfi.c
+index 39ac069cabc75..74893c06aa087 100644
+--- a/drivers/devfreq/event/rockchip-dfi.c
++++ b/drivers/devfreq/event/rockchip-dfi.c
+@@ -193,14 +193,15 @@ static int rockchip_dfi_probe(struct platform_device *pdev)
+ 		return dev_err_probe(dev, PTR_ERR(data->clk),
+ 				     "Cannot get the clk pclk_ddr_mon\n");
+ 
+-	/* try to find the optional reference to the pmu syscon */
+ 	node = of_parse_phandle(np, "rockchip,pmu", 0);
+-	if (node) {
+-		data->regmap_pmu = syscon_node_to_regmap(node);
+-		of_node_put(node);
+-		if (IS_ERR(data->regmap_pmu))
+-			return PTR_ERR(data->regmap_pmu);
+-	}
++	if (!node)
++		return dev_err_probe(&pdev->dev, -ENODEV, "Can't find pmu_grf registers\n");
++
++	data->regmap_pmu = syscon_node_to_regmap(node);
++	of_node_put(node);
++	if (IS_ERR(data->regmap_pmu))
++		return PTR_ERR(data->regmap_pmu);
++
+ 	data->dev = dev;
+ 
+ 	desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL);
+diff --git a/drivers/dma/idxd/Makefile b/drivers/dma/idxd/Makefile
+index dc096839ac637..c5e679070e463 100644
+--- a/drivers/dma/idxd/Makefile
++++ b/drivers/dma/idxd/Makefile
+@@ -1,12 +1,12 @@
+ ccflags-y += -DDEFAULT_SYMBOL_NAMESPACE=IDXD
+ 
++obj-$(CONFIG_INTEL_IDXD_BUS) += idxd_bus.o
++idxd_bus-y := bus.o
++
+ obj-$(CONFIG_INTEL_IDXD) += idxd.o
+ idxd-y := init.o irq.o device.o sysfs.o submit.o dma.o cdev.o debugfs.o
+ 
+ idxd-$(CONFIG_INTEL_IDXD_PERFMON) += perfmon.o
+ 
+-obj-$(CONFIG_INTEL_IDXD_BUS) += idxd_bus.o
+-idxd_bus-y := bus.o
+-
+ obj-$(CONFIG_INTEL_IDXD_COMPAT) += idxd_compat.o
+ idxd_compat-y := compat.o
+diff --git a/drivers/dma/pxa_dma.c b/drivers/dma/pxa_dma.c
+index 1b046d9a3a269..16d342654da2b 100644
+--- a/drivers/dma/pxa_dma.c
++++ b/drivers/dma/pxa_dma.c
+@@ -722,7 +722,6 @@ static void pxad_free_desc(struct virt_dma_desc *vd)
+ 	dma_addr_t dma;
+ 	struct pxad_desc_sw *sw_desc = to_pxad_sw_desc(vd);
+ 
+-	BUG_ON(sw_desc->nb_desc == 0);
+ 	for (i = sw_desc->nb_desc - 1; i >= 0; i--) {
+ 		if (i > 0)
+ 			dma = sw_desc->hw_desc[i - 1]->ddadr;
+diff --git a/drivers/dma/ti/edma.c b/drivers/dma/ti/edma.c
+index 9ea91c640c324..5d07aa5a74928 100644
+--- a/drivers/dma/ti/edma.c
++++ b/drivers/dma/ti/edma.c
+@@ -2402,7 +2402,7 @@ static int edma_probe(struct platform_device *pdev)
+ 	if (irq < 0 && node)
+ 		irq = irq_of_parse_and_map(node, 0);
+ 
+-	if (irq >= 0) {
++	if (irq > 0) {
+ 		irq_name = devm_kasprintf(dev, GFP_KERNEL, "%s_ccint",
+ 					  dev_name(dev));
+ 		ret = devm_request_irq(dev, irq, dma_irq_handler, 0, irq_name,
+@@ -2418,7 +2418,7 @@ static int edma_probe(struct platform_device *pdev)
+ 	if (irq < 0 && node)
+ 		irq = irq_of_parse_and_map(node, 2);
+ 
+-	if (irq >= 0) {
++	if (irq > 0) {
+ 		irq_name = devm_kasprintf(dev, GFP_KERNEL, "%s_ccerrint",
+ 					  dev_name(dev));
+ 		ret = devm_request_irq(dev, irq, dma_ccerr_handler, 0, irq_name,
+diff --git a/drivers/firmware/arm_ffa/bus.c b/drivers/firmware/arm_ffa/bus.c
+index 2b8bfcd010f5f..7865438b36960 100644
+--- a/drivers/firmware/arm_ffa/bus.c
++++ b/drivers/firmware/arm_ffa/bus.c
+@@ -193,6 +193,7 @@ struct ffa_device *ffa_device_register(const uuid_t *uuid, int vm_id,
+ 	dev->release = ffa_release_device;
+ 	dev_set_name(&ffa_dev->dev, "arm-ffa-%d", id);
+ 
++	ffa_dev->id = id;
+ 	ffa_dev->vm_id = vm_id;
+ 	ffa_dev->ops = ops;
+ 	uuid_copy(&ffa_dev->uuid, uuid);
+diff --git a/drivers/firmware/arm_ffa/driver.c b/drivers/firmware/arm_ffa/driver.c
+index 121f4fc903cd5..7cd6b1564e801 100644
+--- a/drivers/firmware/arm_ffa/driver.c
++++ b/drivers/firmware/arm_ffa/driver.c
+@@ -587,17 +587,9 @@ static int ffa_partition_info_get(const char *uuid_str,
+ 	return 0;
+ }
+ 
+-static void _ffa_mode_32bit_set(struct ffa_device *dev)
+-{
+-	dev->mode_32bit = true;
+-}
+-
+ static void ffa_mode_32bit_set(struct ffa_device *dev)
+ {
+-	if (drv_info->version > FFA_VERSION_1_0)
+-		return;
+-
+-	_ffa_mode_32bit_set(dev);
++	dev->mode_32bit = true;
+ }
+ 
+ static int ffa_sync_send_receive(struct ffa_device *dev,
+@@ -706,7 +698,7 @@ static void ffa_setup_partitions(void)
+ 
+ 		if (drv_info->version > FFA_VERSION_1_0 &&
+ 		    !(tpbuf->properties & FFA_PARTITION_AARCH64_EXEC))
+-			_ffa_mode_32bit_set(ffa_dev);
++			ffa_mode_32bit_set(ffa_dev);
+ 	}
+ 	kfree(pbuf);
+ }
+diff --git a/drivers/firmware/tegra/bpmp.c b/drivers/firmware/tegra/bpmp.c
+index 17bd3590aaa24..5ce202c26e8d3 100644
+--- a/drivers/firmware/tegra/bpmp.c
++++ b/drivers/firmware/tegra/bpmp.c
+@@ -314,6 +314,8 @@ static ssize_t tegra_bpmp_channel_write(struct tegra_bpmp_channel *channel,
+ 	return __tegra_bpmp_channel_write(channel, mrq, flags, data, size);
+ }
+ 
++static int __maybe_unused tegra_bpmp_resume(struct device *dev);
++
+ int tegra_bpmp_transfer_atomic(struct tegra_bpmp *bpmp,
+ 			       struct tegra_bpmp_message *msg)
+ {
+@@ -326,6 +328,14 @@ int tegra_bpmp_transfer_atomic(struct tegra_bpmp *bpmp,
+ 	if (!tegra_bpmp_message_valid(msg))
+ 		return -EINVAL;
+ 
++	if (bpmp->suspended) {
++		/* Reset BPMP IPC channels during resume based on flags passed */
++		if (msg->flags & TEGRA_BPMP_MESSAGE_RESET)
++			tegra_bpmp_resume(bpmp->dev);
++		else
++			return -EAGAIN;
++	}
++
+ 	channel = bpmp->tx_channel;
+ 
+ 	spin_lock(&bpmp->atomic_tx_lock);
+@@ -365,6 +375,14 @@ int tegra_bpmp_transfer(struct tegra_bpmp *bpmp,
+ 	if (!tegra_bpmp_message_valid(msg))
+ 		return -EINVAL;
+ 
++	if (bpmp->suspended) {
++		/* Reset BPMP IPC channels during resume based on flags passed */
++		if (msg->flags & TEGRA_BPMP_MESSAGE_RESET)
++			tegra_bpmp_resume(bpmp->dev);
++		else
++			return -EAGAIN;
++	}
++
+ 	channel = tegra_bpmp_write_threaded(bpmp, msg->mrq, msg->tx.data,
+ 					    msg->tx.size);
+ 	if (IS_ERR(channel))
+@@ -797,10 +815,21 @@ deinit:
+ 	return err;
+ }
+ 
++static int __maybe_unused tegra_bpmp_suspend(struct device *dev)
++{
++	struct tegra_bpmp *bpmp = dev_get_drvdata(dev);
++
++	bpmp->suspended = true;
++
++	return 0;
++}
++
+ static int __maybe_unused tegra_bpmp_resume(struct device *dev)
+ {
+ 	struct tegra_bpmp *bpmp = dev_get_drvdata(dev);
+ 
++	bpmp->suspended = false;
++
+ 	if (bpmp->soc->ops->resume)
+ 		return bpmp->soc->ops->resume(bpmp);
+ 	else
+@@ -808,6 +837,7 @@ static int __maybe_unused tegra_bpmp_resume(struct device *dev)
+ }
+ 
+ static const struct dev_pm_ops tegra_bpmp_pm_ops = {
++	.suspend_noirq = tegra_bpmp_suspend,
+ 	.resume_noirq = tegra_bpmp_resume,
+ };
+ 
+diff --git a/drivers/firmware/ti_sci.c b/drivers/firmware/ti_sci.c
+index 91aaa0ca9bde8..1213951307f72 100644
+--- a/drivers/firmware/ti_sci.c
++++ b/drivers/firmware/ti_sci.c
+@@ -190,19 +190,6 @@ static int ti_sci_debugfs_create(struct platform_device *pdev,
+ 	return 0;
+ }
+ 
+-/**
+- * ti_sci_debugfs_destroy() - clean up log debug file
+- * @pdev:	platform device pointer
+- * @info:	Pointer to SCI entity information
+- */
+-static void ti_sci_debugfs_destroy(struct platform_device *pdev,
+-				   struct ti_sci_info *info)
+-{
+-	if (IS_ERR(info->debug_region))
+-		return;
+-
+-	debugfs_remove(info->d);
+-}
+ #else /* CONFIG_DEBUG_FS */
+ static inline int ti_sci_debugfs_create(struct platform_device *dev,
+ 					struct ti_sci_info *info)
+@@ -3448,43 +3435,12 @@ out:
+ 	return ret;
+ }
+ 
+-static int ti_sci_remove(struct platform_device *pdev)
+-{
+-	struct ti_sci_info *info;
+-	struct device *dev = &pdev->dev;
+-	int ret = 0;
+-
+-	of_platform_depopulate(dev);
+-
+-	info = platform_get_drvdata(pdev);
+-
+-	if (info->nb.notifier_call)
+-		unregister_restart_handler(&info->nb);
+-
+-	mutex_lock(&ti_sci_list_mutex);
+-	if (info->users)
+-		ret = -EBUSY;
+-	else
+-		list_del(&info->node);
+-	mutex_unlock(&ti_sci_list_mutex);
+-
+-	if (!ret) {
+-		ti_sci_debugfs_destroy(pdev, info);
+-
+-		/* Safe to free channels since no more users */
+-		mbox_free_channel(info->chan_tx);
+-		mbox_free_channel(info->chan_rx);
+-	}
+-
+-	return ret;
+-}
+-
+ static struct platform_driver ti_sci_driver = {
+ 	.probe = ti_sci_probe,
+-	.remove = ti_sci_remove,
+ 	.driver = {
+ 		   .name = "ti-sci",
+ 		   .of_match_table = of_match_ptr(ti_sci_of_match),
++		   .suppress_bind_attrs = true,
+ 	},
+ };
+ module_platform_driver(ti_sci_driver);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+index fd81b04559d49..477bfc813c81b 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+@@ -383,9 +383,11 @@ int amdgpu_gfx_mqd_sw_init(struct amdgpu_device *adev,
+ 	struct amdgpu_ring *ring = &kiq->ring;
+ 	u32 domain = AMDGPU_GEM_DOMAIN_GTT;
+ 
++#if !defined(CONFIG_ARM) && !defined(CONFIG_ARM64)
+ 	/* Only enable on gfx10 and 11 for now to avoid changing behavior on older chips */
+ 	if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 0, 0))
+ 		domain |= AMDGPU_GEM_DOMAIN_VRAM;
++#endif
+ 
+ 	/* create MQD for KIQ */
+ 	if (!adev->enable_mes_kiq && !ring->mqd_obj) {
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
+index 6c6184f0dbc17..508f02eb0cf8f 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
+@@ -28,7 +28,7 @@
+ #define AMDGPU_IH_MAX_NUM_IVS	32
+ 
+ #define IH_RING_SIZE	(256 * 1024)
+-#define IH_SW_RING_SIZE	(8 * 1024)	/* enough for 256 CAM entries */
++#define IH_SW_RING_SIZE	(16 * 1024)	/* enough for 512 CAM entries */
+ 
+ struct amdgpu_device;
+ struct amdgpu_iv_entry;
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+index f743bf2c92877..ec84d8bb6a213 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+@@ -6466,11 +6466,11 @@ static int gfx_v10_0_gfx_init_queue(struct amdgpu_ring *ring)
+ 		nv_grbm_select(adev, 0, 0, 0, 0);
+ 		mutex_unlock(&adev->srbm_mutex);
+ 		if (adev->gfx.me.mqd_backup[mqd_idx])
+-			memcpy(adev->gfx.me.mqd_backup[mqd_idx], mqd, sizeof(*mqd));
++			memcpy_fromio(adev->gfx.me.mqd_backup[mqd_idx], mqd, sizeof(*mqd));
+ 	} else {
+ 		/* restore mqd with the backup copy */
+ 		if (adev->gfx.me.mqd_backup[mqd_idx])
+-			memcpy(mqd, adev->gfx.me.mqd_backup[mqd_idx], sizeof(*mqd));
++			memcpy_toio(mqd, adev->gfx.me.mqd_backup[mqd_idx], sizeof(*mqd));
+ 		/* reset the ring */
+ 		ring->wptr = 0;
+ 		*ring->wptr_cpu_addr = 0;
+@@ -6744,7 +6744,7 @@ static int gfx_v10_0_kiq_init_queue(struct amdgpu_ring *ring)
+ 	if (amdgpu_in_reset(adev)) { /* for GPU_RESET case */
+ 		/* reset MQD to a clean status */
+ 		if (adev->gfx.kiq[0].mqd_backup)
+-			memcpy(mqd, adev->gfx.kiq[0].mqd_backup, sizeof(*mqd));
++			memcpy_toio(mqd, adev->gfx.kiq[0].mqd_backup, sizeof(*mqd));
+ 
+ 		/* reset ring buffer */
+ 		ring->wptr = 0;
+@@ -6767,7 +6767,7 @@ static int gfx_v10_0_kiq_init_queue(struct amdgpu_ring *ring)
+ 		mutex_unlock(&adev->srbm_mutex);
+ 
+ 		if (adev->gfx.kiq[0].mqd_backup)
+-			memcpy(adev->gfx.kiq[0].mqd_backup, mqd, sizeof(*mqd));
++			memcpy_fromio(adev->gfx.kiq[0].mqd_backup, mqd, sizeof(*mqd));
+ 	}
+ 
+ 	return 0;
+@@ -6788,11 +6788,11 @@ static int gfx_v10_0_kcq_init_queue(struct amdgpu_ring *ring)
+ 		mutex_unlock(&adev->srbm_mutex);
+ 
+ 		if (adev->gfx.mec.mqd_backup[mqd_idx])
+-			memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(*mqd));
++			memcpy_fromio(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(*mqd));
+ 	} else {
+ 		/* restore MQD to a clean status */
+ 		if (adev->gfx.mec.mqd_backup[mqd_idx])
+-			memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd));
++			memcpy_toio(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd));
+ 		/* reset ring buffer */
+ 		ring->wptr = 0;
+ 		atomic64_set((atomic64_t *)ring->wptr_cpu_addr, 0);
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
+index a82cba884c48f..0b9aa825bc4b3 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
+@@ -3684,11 +3684,11 @@ static int gfx_v11_0_gfx_init_queue(struct amdgpu_ring *ring)
+ 		soc21_grbm_select(adev, 0, 0, 0, 0);
+ 		mutex_unlock(&adev->srbm_mutex);
+ 		if (adev->gfx.me.mqd_backup[mqd_idx])
+-			memcpy(adev->gfx.me.mqd_backup[mqd_idx], mqd, sizeof(*mqd));
++			memcpy_fromio(adev->gfx.me.mqd_backup[mqd_idx], mqd, sizeof(*mqd));
+ 	} else {
+ 		/* restore mqd with the backup copy */
+ 		if (adev->gfx.me.mqd_backup[mqd_idx])
+-			memcpy(mqd, adev->gfx.me.mqd_backup[mqd_idx], sizeof(*mqd));
++			memcpy_toio(mqd, adev->gfx.me.mqd_backup[mqd_idx], sizeof(*mqd));
+ 		/* reset the ring */
+ 		ring->wptr = 0;
+ 		*ring->wptr_cpu_addr = 0;
+@@ -3977,7 +3977,7 @@ static int gfx_v11_0_kiq_init_queue(struct amdgpu_ring *ring)
+ 	if (amdgpu_in_reset(adev)) { /* for GPU_RESET case */
+ 		/* reset MQD to a clean status */
+ 		if (adev->gfx.kiq[0].mqd_backup)
+-			memcpy(mqd, adev->gfx.kiq[0].mqd_backup, sizeof(*mqd));
++			memcpy_toio(mqd, adev->gfx.kiq[0].mqd_backup, sizeof(*mqd));
+ 
+ 		/* reset ring buffer */
+ 		ring->wptr = 0;
+@@ -4000,7 +4000,7 @@ static int gfx_v11_0_kiq_init_queue(struct amdgpu_ring *ring)
+ 		mutex_unlock(&adev->srbm_mutex);
+ 
+ 		if (adev->gfx.kiq[0].mqd_backup)
+-			memcpy(adev->gfx.kiq[0].mqd_backup, mqd, sizeof(*mqd));
++			memcpy_fromio(adev->gfx.kiq[0].mqd_backup, mqd, sizeof(*mqd));
+ 	}
+ 
+ 	return 0;
+@@ -4021,11 +4021,11 @@ static int gfx_v11_0_kcq_init_queue(struct amdgpu_ring *ring)
+ 		mutex_unlock(&adev->srbm_mutex);
+ 
+ 		if (adev->gfx.mec.mqd_backup[mqd_idx])
+-			memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(*mqd));
++			memcpy_fromio(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(*mqd));
+ 	} else {
+ 		/* restore MQD to a clean status */
+ 		if (adev->gfx.mec.mqd_backup[mqd_idx])
+-			memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd));
++			memcpy_toio(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd));
+ 		/* reset ring buffer */
+ 		ring->wptr = 0;
+ 		atomic64_set((atomic64_t *)ring->wptr_cpu_addr, 0);
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+index 5ff1a5a89d968..50f943e04f8a4 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+@@ -484,11 +484,11 @@ svm_range_validate_svm_bo(struct kfd_node *node, struct svm_range *prange)
+ 
+ 	/* We need a new svm_bo. Spin-loop to wait for concurrent
+ 	 * svm_range_bo_release to finish removing this range from
+-	 * its range list. After this, it is safe to reuse the
+-	 * svm_bo pointer and svm_bo_list head.
++	 * its range list and set prange->svm_bo to null. After this,
++	 * it is safe to reuse the svm_bo pointer and svm_bo_list head.
+ 	 */
+-	while (!list_empty_careful(&prange->svm_bo_list))
+-		;
++	while (!list_empty_careful(&prange->svm_bo_list) || prange->svm_bo)
++		cond_resched();
+ 
+ 	return false;
+ }
+@@ -809,7 +809,7 @@ svm_range_is_same_attrs(struct kfd_process *p, struct svm_range *prange,
+ 		}
+ 	}
+ 
+-	return !prange->is_error_flag;
++	return true;
+ }
+ 
+ /**
+@@ -1627,71 +1627,66 @@ static int svm_range_validate_and_map(struct mm_struct *mm,
+ 
+ 	start = prange->start << PAGE_SHIFT;
+ 	end = (prange->last + 1) << PAGE_SHIFT;
+-	for (addr = start; addr < end && !r; ) {
++	for (addr = start; !r && addr < end; ) {
+ 		struct hmm_range *hmm_range;
+ 		struct vm_area_struct *vma;
+-		unsigned long next;
++		unsigned long next = 0;
+ 		unsigned long offset;
+ 		unsigned long npages;
+ 		bool readonly;
+ 
+ 		vma = vma_lookup(mm, addr);
+-		if (!vma) {
++		if (vma) {
++			readonly = !(vma->vm_flags & VM_WRITE);
++
++			next = min(vma->vm_end, end);
++			npages = (next - addr) >> PAGE_SHIFT;
++			WRITE_ONCE(p->svms.faulting_task, current);
++			r = amdgpu_hmm_range_get_pages(&prange->notifier, addr, npages,
++						       readonly, owner, NULL,
++						       &hmm_range);
++			WRITE_ONCE(p->svms.faulting_task, NULL);
++			if (r) {
++				pr_debug("failed %d to get svm range pages\n", r);
++				if (r == -EBUSY)
++					r = -EAGAIN;
++			}
++		} else {
+ 			r = -EFAULT;
+-			goto unreserve_out;
+-		}
+-		readonly = !(vma->vm_flags & VM_WRITE);
+-
+-		next = min(vma->vm_end, end);
+-		npages = (next - addr) >> PAGE_SHIFT;
+-		WRITE_ONCE(p->svms.faulting_task, current);
+-		r = amdgpu_hmm_range_get_pages(&prange->notifier, addr, npages,
+-					       readonly, owner, NULL,
+-					       &hmm_range);
+-		WRITE_ONCE(p->svms.faulting_task, NULL);
+-		if (r) {
+-			pr_debug("failed %d to get svm range pages\n", r);
+-			goto unreserve_out;
+ 		}
+ 
+-		offset = (addr - start) >> PAGE_SHIFT;
+-		r = svm_range_dma_map(prange, ctx->bitmap, offset, npages,
+-				      hmm_range->hmm_pfns);
+-		if (r) {
+-			pr_debug("failed %d to dma map range\n", r);
+-			goto unreserve_out;
++		if (!r) {
++			offset = (addr - start) >> PAGE_SHIFT;
++			r = svm_range_dma_map(prange, ctx->bitmap, offset, npages,
++					      hmm_range->hmm_pfns);
++			if (r)
++				pr_debug("failed %d to dma map range\n", r);
+ 		}
+ 
+ 		svm_range_lock(prange);
+-		if (amdgpu_hmm_range_get_pages_done(hmm_range)) {
++		if (!r && amdgpu_hmm_range_get_pages_done(hmm_range)) {
+ 			pr_debug("hmm update the range, need validate again\n");
+ 			r = -EAGAIN;
+-			goto unlock_out;
+ 		}
+-		if (!list_empty(&prange->child_list)) {
++
++		if (!r && !list_empty(&prange->child_list)) {
+ 			pr_debug("range split by unmap in parallel, validate again\n");
+ 			r = -EAGAIN;
+-			goto unlock_out;
+ 		}
+ 
+-		r = svm_range_map_to_gpus(prange, offset, npages, readonly,
+-					  ctx->bitmap, wait, flush_tlb);
++		if (!r)
++			r = svm_range_map_to_gpus(prange, offset, npages, readonly,
++						  ctx->bitmap, wait, flush_tlb);
++
++		if (!r && next == end)
++			prange->mapped_to_gpu = true;
+ 
+-unlock_out:
+ 		svm_range_unlock(prange);
+ 
+ 		addr = next;
+ 	}
+ 
+-	if (addr == end) {
+-		prange->validated_once = true;
+-		prange->mapped_to_gpu = true;
+-	}
+-
+-unreserve_out:
+ 	svm_range_unreserve_bos(ctx);
+-
+-	prange->is_error_flag = !!r;
+ 	if (!r)
+ 		prange->validate_timestamp = ktime_get_boottime();
+ 
+@@ -2057,7 +2052,8 @@ svm_range_add(struct kfd_process *p, uint64_t start, uint64_t size,
+ 		next = interval_tree_iter_next(node, start, last);
+ 		next_start = min(node->last, last) + 1;
+ 
+-		if (svm_range_is_same_attrs(p, prange, nattr, attrs)) {
++		if (svm_range_is_same_attrs(p, prange, nattr, attrs) &&
++		    prange->mapped_to_gpu) {
+ 			/* nothing to do */
+ 		} else if (node->start < start || node->last > last) {
+ 			/* node intersects the update range and its attributes
+@@ -3470,7 +3466,7 @@ svm_range_set_attr(struct kfd_process *p, struct mm_struct *mm,
+ 	struct svm_range *next;
+ 	bool update_mapping = false;
+ 	bool flush_tlb;
+-	int r = 0;
++	int r, ret = 0;
+ 
+ 	pr_debug("pasid 0x%x svms 0x%p [0x%llx 0x%llx] pages 0x%llx\n",
+ 		 p->pasid, &p->svms, start, start + size - 1, size);
+@@ -3558,7 +3554,7 @@ svm_range_set_attr(struct kfd_process *p, struct mm_struct *mm,
+ out_unlock_range:
+ 		mutex_unlock(&prange->migrate_mutex);
+ 		if (r)
+-			break;
++			ret = r;
+ 	}
+ 
+ 	svm_range_debug_dump(svms);
+@@ -3571,7 +3567,7 @@ out:
+ 	pr_debug("pasid 0x%x svms 0x%p [0x%llx 0x%llx] done, r=%d\n", p->pasid,
+ 		 &p->svms, start, start + size - 1, r);
+ 
+-	return r;
++	return ret ? ret : r;
+ }
+ 
+ static int
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
+index 21b14510882b8..d2aa8c324c610 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
+@@ -132,9 +132,7 @@ struct svm_range {
+ 	struct list_head		child_list;
+ 	DECLARE_BITMAP(bitmap_access, MAX_GPU_INSTANCE);
+ 	DECLARE_BITMAP(bitmap_aip, MAX_GPU_INSTANCE);
+-	bool				validated_once;
+ 	bool				mapped_to_gpu;
+-	bool				is_error_flag;
+ };
+ 
+ static inline void svm_range_lock(struct svm_range *prange)
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index c9959bd8147db..573e27399c790 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -1690,8 +1690,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
+ 		DRM_INFO("Display Core v%s initialized on %s\n", DC_VER,
+ 			 dce_version_to_string(adev->dm.dc->ctx->dce_version));
+ 	} else {
+-		DRM_INFO("Display Core v%s failed to initialize on %s\n", DC_VER,
+-			 dce_version_to_string(adev->dm.dc->ctx->dce_version));
++		DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
+ 		goto error;
+ 	}
+ 
+@@ -9858,16 +9857,27 @@ static void dm_get_oriented_plane_size(struct drm_plane_state *plane_state,
+ 	}
+ }
+ 
++static void
++dm_get_plane_scale(struct drm_plane_state *plane_state,
++		   int *out_plane_scale_w, int *out_plane_scale_h)
++{
++	int plane_src_w, plane_src_h;
++
++	dm_get_oriented_plane_size(plane_state, &plane_src_w, &plane_src_h);
++	*out_plane_scale_w = plane_state->crtc_w * 1000 / plane_src_w;
++	*out_plane_scale_h = plane_state->crtc_h * 1000 / plane_src_h;
++}
++
+ static int dm_check_crtc_cursor(struct drm_atomic_state *state,
+ 				struct drm_crtc *crtc,
+ 				struct drm_crtc_state *new_crtc_state)
+ {
+-	struct drm_plane *cursor = crtc->cursor, *underlying;
++	struct drm_plane *cursor = crtc->cursor, *plane, *underlying;
++	struct drm_plane_state *old_plane_state, *new_plane_state;
+ 	struct drm_plane_state *new_cursor_state, *new_underlying_state;
+ 	int i;
+ 	int cursor_scale_w, cursor_scale_h, underlying_scale_w, underlying_scale_h;
+-	int cursor_src_w, cursor_src_h;
+-	int underlying_src_w, underlying_src_h;
++	bool any_relevant_change = false;
+ 
+ 	/* On DCE and DCN there is no dedicated hardware cursor plane. We get a
+ 	 * cursor per pipe but it's going to inherit the scaling and
+@@ -9875,13 +9885,50 @@ static int dm_check_crtc_cursor(struct drm_atomic_state *state,
+ 	 * blending properties match the underlying planes'.
+ 	 */
+ 
+-	new_cursor_state = drm_atomic_get_new_plane_state(state, cursor);
+-	if (!new_cursor_state || !new_cursor_state->fb)
++	/* If no plane was enabled or changed scaling, no need to check again */
++	for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
++		int new_scale_w, new_scale_h, old_scale_w, old_scale_h;
++
++		if (!new_plane_state || !new_plane_state->fb || new_plane_state->crtc != crtc)
++			continue;
++
++		if (!old_plane_state || !old_plane_state->fb || old_plane_state->crtc != crtc) {
++			any_relevant_change = true;
++			break;
++		}
++
++		if (new_plane_state->fb == old_plane_state->fb &&
++		    new_plane_state->crtc_w == old_plane_state->crtc_w &&
++		    new_plane_state->crtc_h == old_plane_state->crtc_h)
++			continue;
++
++		dm_get_plane_scale(new_plane_state, &new_scale_w, &new_scale_h);
++		dm_get_plane_scale(old_plane_state, &old_scale_w, &old_scale_h);
++
++		if (new_scale_w != old_scale_w || new_scale_h != old_scale_h) {
++			any_relevant_change = true;
++			break;
++		}
++	}
++
++	if (!any_relevant_change)
++		return 0;
++
++	new_cursor_state = drm_atomic_get_plane_state(state, cursor);
++	if (IS_ERR(new_cursor_state))
++		return PTR_ERR(new_cursor_state);
++
++	if (!new_cursor_state->fb)
+ 		return 0;
+ 
+-	dm_get_oriented_plane_size(new_cursor_state, &cursor_src_w, &cursor_src_h);
+-	cursor_scale_w = new_cursor_state->crtc_w * 1000 / cursor_src_w;
+-	cursor_scale_h = new_cursor_state->crtc_h * 1000 / cursor_src_h;
++	dm_get_plane_scale(new_cursor_state, &cursor_scale_w, &cursor_scale_h);
++
++	/* Need to check all enabled planes, even if this commit doesn't change
++	 * their state
++	 */
++	i = drm_atomic_add_affected_planes(state, crtc);
++	if (i)
++		return i;
+ 
+ 	for_each_new_plane_in_state_reverse(state, underlying, new_underlying_state, i) {
+ 		/* Narrow down to non-cursor planes on the same CRTC as the cursor */
+@@ -9892,10 +9939,8 @@ static int dm_check_crtc_cursor(struct drm_atomic_state *state,
+ 		if (!new_underlying_state->fb)
+ 			continue;
+ 
+-		dm_get_oriented_plane_size(new_underlying_state,
+-					   &underlying_src_w, &underlying_src_h);
+-		underlying_scale_w = new_underlying_state->crtc_w * 1000 / underlying_src_w;
+-		underlying_scale_h = new_underlying_state->crtc_h * 1000 / underlying_src_h;
++		dm_get_plane_scale(new_underlying_state,
++				   &underlying_scale_w, &underlying_scale_h);
+ 
+ 		if (cursor_scale_w != underlying_scale_w ||
+ 		    cursor_scale_h != underlying_scale_h) {
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
+index dc6104a04dce6..7674ea9611ddd 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
+@@ -1977,8 +1977,10 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table
+ 
+ 	metrics = kzalloc(sizeof(MetricsTable_t), GFP_KERNEL);
+ 	ret = smu_v13_0_6_get_metrics_table(smu, metrics, true);
+-	if (ret)
++	if (ret) {
++		kfree(metrics);
+ 		return ret;
++	}
+ 
+ 	smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 3);
+ 
+diff --git a/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c b/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c
+index c8c7f82151559..78122b35a0cbb 100644
+--- a/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c
++++ b/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c
+@@ -351,20 +351,25 @@ err_unload:
+ 	return ret;
+ }
+ 
+-static int aspeed_gfx_remove(struct platform_device *pdev)
++static void aspeed_gfx_remove(struct platform_device *pdev)
+ {
+ 	struct drm_device *drm = platform_get_drvdata(pdev);
+ 
+ 	sysfs_remove_group(&pdev->dev.kobj, &aspeed_sysfs_attr_group);
+ 	drm_dev_unregister(drm);
+ 	aspeed_gfx_unload(drm);
++	drm_atomic_helper_shutdown(drm);
++}
+ 
+-	return 0;
++static void aspeed_gfx_shutdown(struct platform_device *pdev)
++{
++	drm_atomic_helper_shutdown(platform_get_drvdata(pdev));
+ }
+ 
+ static struct platform_driver aspeed_gfx_platform_driver = {
+ 	.probe		= aspeed_gfx_probe,
+-	.remove		= aspeed_gfx_remove,
++	.remove_new	= aspeed_gfx_remove,
++	.shutdown	= aspeed_gfx_shutdown,
+ 	.driver = {
+ 		.name = "aspeed_gfx",
+ 		.of_match_table = aspeed_gfx_match,
+diff --git a/drivers/gpu/drm/ast/ast_dp.c b/drivers/gpu/drm/ast/ast_dp.c
+index fdd9a493aa9c0..6dc1a09504e13 100644
+--- a/drivers/gpu/drm/ast/ast_dp.c
++++ b/drivers/gpu/drm/ast/ast_dp.c
+@@ -7,17 +7,6 @@
+ #include <drm/drm_print.h>
+ #include "ast_drv.h"
+ 
+-bool ast_astdp_is_connected(struct ast_device *ast)
+-{
+-	if (!ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xD1, ASTDP_MCU_FW_EXECUTING))
+-		return false;
+-	if (!ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xDF, ASTDP_HPD))
+-		return false;
+-	if (!ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xDC, ASTDP_LINK_SUCCESS))
+-		return false;
+-	return true;
+-}
+-
+ int ast_astdp_read_edid(struct drm_device *dev, u8 *ediddata)
+ {
+ 	struct ast_device *ast = to_ast_device(dev);
+diff --git a/drivers/gpu/drm/ast/ast_dp501.c b/drivers/gpu/drm/ast/ast_dp501.c
+index fa7442b0c2612..1bc35a992369d 100644
+--- a/drivers/gpu/drm/ast/ast_dp501.c
++++ b/drivers/gpu/drm/ast/ast_dp501.c
+@@ -272,9 +272,11 @@ static bool ast_launch_m68k(struct drm_device *dev)
+ 	return true;
+ }
+ 
+-bool ast_dp501_is_connected(struct ast_device *ast)
++bool ast_dp501_read_edid(struct drm_device *dev, u8 *ediddata)
+ {
+-	u32 boot_address, offset, data;
++	struct ast_device *ast = to_ast_device(dev);
++	u32 i, boot_address, offset, data;
++	u32 *pEDIDidx;
+ 
+ 	if (ast->config_mode == ast_use_p2a) {
+ 		boot_address = get_fw_base(ast);
+@@ -290,6 +292,14 @@ bool ast_dp501_is_connected(struct ast_device *ast)
+ 		data = ast_mindwm(ast, boot_address + offset);
+ 		if (!(data & AST_DP501_PNP_CONNECTED))
+ 			return false;
++
++		/* Read EDID */
++		offset = AST_DP501_EDID_DATA;
++		for (i = 0; i < 128; i += 4) {
++			data = ast_mindwm(ast, boot_address + offset + i);
++			pEDIDidx = (u32 *)(ediddata + i);
++			*pEDIDidx = data;
++		}
+ 	} else {
+ 		if (!ast->dp501_fw_buf)
+ 			return false;
+@@ -309,30 +319,7 @@ bool ast_dp501_is_connected(struct ast_device *ast)
+ 		data = readl(ast->dp501_fw_buf + offset);
+ 		if (!(data & AST_DP501_PNP_CONNECTED))
+ 			return false;
+-	}
+-	return true;
+-}
+-
+-bool ast_dp501_read_edid(struct drm_device *dev, u8 *ediddata)
+-{
+-	struct ast_device *ast = to_ast_device(dev);
+-	u32 i, boot_address, offset, data;
+-	u32 *pEDIDidx;
+-
+-	if (!ast_dp501_is_connected(ast))
+-		return false;
+-
+-	if (ast->config_mode == ast_use_p2a) {
+-		boot_address = get_fw_base(ast);
+ 
+-		/* Read EDID */
+-		offset = AST_DP501_EDID_DATA;
+-		for (i = 0; i < 128; i += 4) {
+-			data = ast_mindwm(ast, boot_address + offset + i);
+-			pEDIDidx = (u32 *)(ediddata + i);
+-			*pEDIDidx = data;
+-		}
+-	} else {
+ 		/* Read EDID */
+ 		offset = AST_DP501_EDID_DATA;
+ 		for (i = 0; i < 128; i += 4) {
+diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
+index 8a0ffa8b5939b..5498a6676f2e8 100644
+--- a/drivers/gpu/drm/ast/ast_drv.h
++++ b/drivers/gpu/drm/ast/ast_drv.h
+@@ -468,7 +468,6 @@ void ast_patch_ahb_2500(struct ast_device *ast);
+ /* ast dp501 */
+ void ast_set_dp501_video_output(struct drm_device *dev, u8 mode);
+ bool ast_backup_fw(struct drm_device *dev, u8 *addr, u32 size);
+-bool ast_dp501_is_connected(struct ast_device *ast);
+ bool ast_dp501_read_edid(struct drm_device *dev, u8 *ediddata);
+ u8 ast_get_dp501_max_clk(struct drm_device *dev);
+ void ast_init_3rdtx(struct drm_device *dev);
+@@ -477,7 +476,6 @@ void ast_init_3rdtx(struct drm_device *dev);
+ struct ast_i2c_chan *ast_i2c_create(struct drm_device *dev);
+ 
+ /* aspeed DP */
+-bool ast_astdp_is_connected(struct ast_device *ast);
+ int ast_astdp_read_edid(struct drm_device *dev, u8 *ediddata);
+ void ast_dp_launch(struct drm_device *dev);
+ void ast_dp_power_on_off(struct drm_device *dev, bool no);
+diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
+index 0724516f29737..b3c670af6ef2b 100644
+--- a/drivers/gpu/drm/ast/ast_mode.c
++++ b/drivers/gpu/drm/ast/ast_mode.c
+@@ -1585,20 +1585,8 @@ err_drm_connector_update_edid_property:
+ 	return 0;
+ }
+ 
+-static int ast_dp501_connector_helper_detect_ctx(struct drm_connector *connector,
+-						 struct drm_modeset_acquire_ctx *ctx,
+-						 bool force)
+-{
+-	struct ast_device *ast = to_ast_device(connector->dev);
+-
+-	if (ast_dp501_is_connected(ast))
+-		return connector_status_connected;
+-	return connector_status_disconnected;
+-}
+-
+ static const struct drm_connector_helper_funcs ast_dp501_connector_helper_funcs = {
+ 	.get_modes = ast_dp501_connector_helper_get_modes,
+-	.detect_ctx = ast_dp501_connector_helper_detect_ctx,
+ };
+ 
+ static const struct drm_connector_funcs ast_dp501_connector_funcs = {
+@@ -1623,7 +1611,7 @@ static int ast_dp501_connector_init(struct drm_device *dev, struct drm_connector
+ 	connector->interlace_allowed = 0;
+ 	connector->doublescan_allowed = 0;
+ 
+-	connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
++	connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+ 
+ 	return 0;
+ }
+@@ -1695,20 +1683,8 @@ err_drm_connector_update_edid_property:
+ 	return 0;
+ }
+ 
+-static int ast_astdp_connector_helper_detect_ctx(struct drm_connector *connector,
+-						 struct drm_modeset_acquire_ctx *ctx,
+-						 bool force)
+-{
+-	struct ast_device *ast = to_ast_device(connector->dev);
+-
+-	if (ast_astdp_is_connected(ast))
+-		return connector_status_connected;
+-	return connector_status_disconnected;
+-}
+-
+ static const struct drm_connector_helper_funcs ast_astdp_connector_helper_funcs = {
+ 	.get_modes = ast_astdp_connector_helper_get_modes,
+-	.detect_ctx = ast_astdp_connector_helper_detect_ctx,
+ };
+ 
+ static const struct drm_connector_funcs ast_astdp_connector_funcs = {
+@@ -1733,7 +1709,7 @@ static int ast_astdp_connector_init(struct drm_device *dev, struct drm_connector
+ 	connector->interlace_allowed = 0;
+ 	connector->doublescan_allowed = 0;
+ 
+-	connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
++	connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+ 
+ 	return 0;
+ }
+@@ -1872,7 +1848,5 @@ int ast_mode_config_init(struct ast_device *ast)
+ 
+ 	drm_mode_config_reset(dev);
+ 
+-	drm_kms_helper_poll_init(dev);
+-
+ 	return 0;
+ }
+diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig
+index 82c68b0424443..42d05a247511a 100644
+--- a/drivers/gpu/drm/bridge/Kconfig
++++ b/drivers/gpu/drm/bridge/Kconfig
+@@ -181,6 +181,7 @@ config DRM_NWL_MIPI_DSI
+ 	select DRM_KMS_HELPER
+ 	select DRM_MIPI_DSI
+ 	select DRM_PANEL_BRIDGE
++	select GENERIC_PHY
+ 	select GENERIC_PHY_MIPI_DPHY
+ 	select MFD_SYSCON
+ 	select MULTIPLEXER
+@@ -227,6 +228,7 @@ config DRM_SAMSUNG_DSIM
+ 	select DRM_KMS_HELPER
+ 	select DRM_MIPI_DSI
+ 	select DRM_PANEL_BRIDGE
++	select GENERIC_PHY
+ 	select GENERIC_PHY_MIPI_DPHY
+ 	help
+ 	  The Samsung MIPI DSIM bridge controller driver.
+diff --git a/drivers/gpu/drm/bridge/cadence/Kconfig b/drivers/gpu/drm/bridge/cadence/Kconfig
+index ec35215a20034..cced81633ddcd 100644
+--- a/drivers/gpu/drm/bridge/cadence/Kconfig
++++ b/drivers/gpu/drm/bridge/cadence/Kconfig
+@@ -4,6 +4,7 @@ config DRM_CDNS_DSI
+ 	select DRM_KMS_HELPER
+ 	select DRM_MIPI_DSI
+ 	select DRM_PANEL_BRIDGE
++	select GENERIC_PHY
+ 	select GENERIC_PHY_MIPI_DPHY
+ 	depends on OF
+ 	help
+diff --git a/drivers/gpu/drm/bridge/ite-it66121.c b/drivers/gpu/drm/bridge/ite-it66121.c
+index 466641c77fe91..fc7f5ec5fb381 100644
+--- a/drivers/gpu/drm/bridge/ite-it66121.c
++++ b/drivers/gpu/drm/bridge/ite-it66121.c
+@@ -1447,10 +1447,14 @@ static int it66121_audio_get_eld(struct device *dev, void *data,
+ 	struct it66121_ctx *ctx = dev_get_drvdata(dev);
+ 
+ 	mutex_lock(&ctx->lock);
+-
+-	memcpy(buf, ctx->connector->eld,
+-	       min(sizeof(ctx->connector->eld), len));
+-
++	if (!ctx->connector) {
++		/* Pass en empty ELD if connector not available */
++		dev_dbg(dev, "No connector present, passing empty EDID data");
++		memset(buf, 0, len);
++	} else {
++		memcpy(buf, ctx->connector->eld,
++		       min(sizeof(ctx->connector->eld), len));
++	}
+ 	mutex_unlock(&ctx->lock);
+ 
+ 	return 0;
+diff --git a/drivers/gpu/drm/bridge/lontium-lt8912b.c b/drivers/gpu/drm/bridge/lontium-lt8912b.c
+index 4eaea67fb71c2..03532efb893bb 100644
+--- a/drivers/gpu/drm/bridge/lontium-lt8912b.c
++++ b/drivers/gpu/drm/bridge/lontium-lt8912b.c
+@@ -45,7 +45,6 @@ struct lt8912 {
+ 
+ 	u8 data_lanes;
+ 	bool is_power_on;
+-	bool is_attached;
+ };
+ 
+ static int lt8912_write_init_config(struct lt8912 *lt)
+@@ -559,6 +558,13 @@ static int lt8912_bridge_attach(struct drm_bridge *bridge,
+ 	struct lt8912 *lt = bridge_to_lt8912(bridge);
+ 	int ret;
+ 
++	ret = drm_bridge_attach(bridge->encoder, lt->hdmi_port, bridge,
++				DRM_BRIDGE_ATTACH_NO_CONNECTOR);
++	if (ret < 0) {
++		dev_err(lt->dev, "Failed to attach next bridge (%d)\n", ret);
++		return ret;
++	}
++
+ 	if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)) {
+ 		ret = lt8912_bridge_connector_init(bridge);
+ 		if (ret) {
+@@ -575,8 +581,6 @@ static int lt8912_bridge_attach(struct drm_bridge *bridge,
+ 	if (ret)
+ 		goto error;
+ 
+-	lt->is_attached = true;
+-
+ 	return 0;
+ 
+ error:
+@@ -588,15 +592,10 @@ static void lt8912_bridge_detach(struct drm_bridge *bridge)
+ {
+ 	struct lt8912 *lt = bridge_to_lt8912(bridge);
+ 
+-	if (lt->is_attached) {
+-		lt8912_hard_power_off(lt);
+-
+-		if (lt->hdmi_port->ops & DRM_BRIDGE_OP_HPD)
+-			drm_bridge_hpd_disable(lt->hdmi_port);
++	lt8912_hard_power_off(lt);
+ 
+-		drm_connector_unregister(&lt->connector);
+-		drm_connector_cleanup(&lt->connector);
+-	}
++	if (lt->connector.dev && lt->hdmi_port->ops & DRM_BRIDGE_OP_HPD)
++		drm_bridge_hpd_disable(lt->hdmi_port);
+ }
+ 
+ static enum drm_connector_status
+@@ -750,7 +749,6 @@ static void lt8912_remove(struct i2c_client *client)
+ {
+ 	struct lt8912 *lt = i2c_get_clientdata(client);
+ 
+-	lt8912_bridge_detach(&lt->bridge);
+ 	drm_bridge_remove(&lt->bridge);
+ 	lt8912_free_i2c(lt);
+ 	lt8912_put_dt(lt);
+diff --git a/drivers/gpu/drm/bridge/lontium-lt9611uxc.c b/drivers/gpu/drm/bridge/lontium-lt9611uxc.c
+index 2a57e804ea020..e562dc6cf4049 100644
+--- a/drivers/gpu/drm/bridge/lontium-lt9611uxc.c
++++ b/drivers/gpu/drm/bridge/lontium-lt9611uxc.c
+@@ -927,9 +927,9 @@ retry:
+ 	init_waitqueue_head(&lt9611uxc->wq);
+ 	INIT_WORK(&lt9611uxc->work, lt9611uxc_hpd_work);
+ 
+-	ret = devm_request_threaded_irq(dev, client->irq, NULL,
+-					lt9611uxc_irq_thread_handler,
+-					IRQF_ONESHOT, "lt9611uxc", lt9611uxc);
++	ret = request_threaded_irq(client->irq, NULL,
++				   lt9611uxc_irq_thread_handler,
++				   IRQF_ONESHOT, "lt9611uxc", lt9611uxc);
+ 	if (ret) {
+ 		dev_err(dev, "failed to request irq\n");
+ 		goto err_disable_regulators;
+@@ -965,6 +965,8 @@ retry:
+ 	return lt9611uxc_audio_init(dev, lt9611uxc);
+ 
+ err_remove_bridge:
++	free_irq(client->irq, lt9611uxc);
++	cancel_work_sync(&lt9611uxc->work);
+ 	drm_bridge_remove(&lt9611uxc->bridge);
+ 
+ err_disable_regulators:
+@@ -981,7 +983,7 @@ static void lt9611uxc_remove(struct i2c_client *client)
+ {
+ 	struct lt9611uxc *lt9611uxc = i2c_get_clientdata(client);
+ 
+-	disable_irq(client->irq);
++	free_irq(client->irq, lt9611uxc);
+ 	cancel_work_sync(&lt9611uxc->work);
+ 	lt9611uxc_audio_exit(lt9611uxc);
+ 	drm_bridge_remove(&lt9611uxc->bridge);
+diff --git a/drivers/gpu/drm/bridge/samsung-dsim.c b/drivers/gpu/drm/bridge/samsung-dsim.c
+index 9e253af69c7a1..4b3e117ae9008 100644
+--- a/drivers/gpu/drm/bridge/samsung-dsim.c
++++ b/drivers/gpu/drm/bridge/samsung-dsim.c
+@@ -384,7 +384,7 @@ static const unsigned int imx8mm_dsim_reg_values[] = {
+ 	[RESET_TYPE] = DSIM_SWRST,
+ 	[PLL_TIMER] = 500,
+ 	[STOP_STATE_CNT] = 0xf,
+-	[PHYCTRL_ULPS_EXIT] = 0,
++	[PHYCTRL_ULPS_EXIT] = DSIM_PHYCTRL_ULPS_EXIT(0xaf),
+ 	[PHYCTRL_VREG_LP] = 0,
+ 	[PHYCTRL_SLEW_UP] = 0,
+ 	[PHYTIMING_LPX] = DSIM_PHYTIMING_LPX(0x06),
+@@ -412,6 +412,7 @@ static const struct samsung_dsim_driver_data exynos3_dsi_driver_data = {
+ 	.m_min = 41,
+ 	.m_max = 125,
+ 	.min_freq = 500,
++	.has_broken_fifoctrl_emptyhdr = 1,
+ };
+ 
+ static const struct samsung_dsim_driver_data exynos4_dsi_driver_data = {
+@@ -428,6 +429,7 @@ static const struct samsung_dsim_driver_data exynos4_dsi_driver_data = {
+ 	.m_min = 41,
+ 	.m_max = 125,
+ 	.min_freq = 500,
++	.has_broken_fifoctrl_emptyhdr = 1,
+ };
+ 
+ static const struct samsung_dsim_driver_data exynos5_dsi_driver_data = {
+@@ -1009,8 +1011,20 @@ static int samsung_dsim_wait_for_hdr_fifo(struct samsung_dsim *dsi)
+ 	do {
+ 		u32 reg = samsung_dsim_read(dsi, DSIM_FIFOCTRL_REG);
+ 
+-		if (reg & DSIM_SFR_HEADER_EMPTY)
+-			return 0;
++		if (!dsi->driver_data->has_broken_fifoctrl_emptyhdr) {
++			if (reg & DSIM_SFR_HEADER_EMPTY)
++				return 0;
++		} else {
++			if (!(reg & DSIM_SFR_HEADER_FULL)) {
++				/*
++				 * Wait a little bit, so the pending data can
++				 * actually leave the FIFO to avoid overflow.
++				 */
++				if (!cond_resched())
++					usleep_range(950, 1050);
++				return 0;
++			}
++		}
+ 
+ 		if (!cond_resched())
+ 			usleep_range(950, 1050);
+diff --git a/drivers/gpu/drm/bridge/tc358768.c b/drivers/gpu/drm/bridge/tc358768.c
+index 819a4b6ec2a07..6eed5c4232956 100644
+--- a/drivers/gpu/drm/bridge/tc358768.c
++++ b/drivers/gpu/drm/bridge/tc358768.c
+@@ -15,6 +15,7 @@
+ #include <linux/regmap.h>
+ #include <linux/regulator/consumer.h>
+ #include <linux/slab.h>
++#include <linux/units.h>
+ 
+ #include <drm/drm_atomic_helper.h>
+ #include <drm/drm_drv.h>
+@@ -216,6 +217,10 @@ static void tc358768_update_bits(struct tc358768_priv *priv, u32 reg, u32 mask,
+ 	u32 tmp, orig;
+ 
+ 	tc358768_read(priv, reg, &orig);
++
++	if (priv->error)
++		return;
++
+ 	tmp = orig & ~mask;
+ 	tmp |= val & mask;
+ 	if (tmp != orig)
+@@ -600,7 +605,7 @@ static int tc358768_setup_pll(struct tc358768_priv *priv,
+ 
+ 	dev_dbg(priv->dev, "PLL: refclk %lu, fbd %u, prd %u, frs %u\n",
+ 		clk_get_rate(priv->refclk), fbd, prd, frs);
+-	dev_dbg(priv->dev, "PLL: pll_clk: %u, DSIClk %u, DSIByteClk %u\n",
++	dev_dbg(priv->dev, "PLL: pll_clk: %u, DSIClk %u, HSByteClk %u\n",
+ 		priv->dsiclk * 2, priv->dsiclk, priv->dsiclk / 4);
+ 	dev_dbg(priv->dev, "PLL: pclk %u (panel: %u)\n",
+ 		tc358768_pll_to_pclk(priv, priv->dsiclk * 2),
+@@ -623,15 +628,14 @@ static int tc358768_setup_pll(struct tc358768_priv *priv,
+ 	return tc358768_clear_error(priv);
+ }
+ 
+-#define TC358768_PRECISION	1000
+-static u32 tc358768_ns_to_cnt(u32 ns, u32 period_nsk)
++static u32 tc358768_ns_to_cnt(u32 ns, u32 period_ps)
+ {
+-	return (ns * TC358768_PRECISION + period_nsk) / period_nsk;
++	return DIV_ROUND_UP(ns * 1000, period_ps);
+ }
+ 
+-static u32 tc358768_to_ns(u32 nsk)
++static u32 tc358768_ps_to_ns(u32 ps)
+ {
+-	return (nsk / TC358768_PRECISION);
++	return ps / 1000;
+ }
+ 
+ static void tc358768_bridge_pre_enable(struct drm_bridge *bridge)
+@@ -642,13 +646,15 @@ static void tc358768_bridge_pre_enable(struct drm_bridge *bridge)
+ 	u32 val, val2, lptxcnt, hact, data_type;
+ 	s32 raw_val;
+ 	const struct drm_display_mode *mode;
+-	u32 dsibclk_nsk, dsiclk_nsk, ui_nsk;
+-	u32 dsiclk, dsibclk, video_start;
++	u32 hsbyteclk_ps, dsiclk_ps, ui_ps;
++	u32 dsiclk, hsbyteclk, video_start;
+ 	const u32 internal_delay = 40;
+ 	int ret, i;
++	struct videomode vm;
++	struct device *dev = priv->dev;
+ 
+ 	if (mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS) {
+-		dev_warn_once(priv->dev, "Non-continuous mode unimplemented, falling back to continuous\n");
++		dev_warn_once(dev, "Non-continuous mode unimplemented, falling back to continuous\n");
+ 		mode_flags &= ~MIPI_DSI_CLOCK_NON_CONTINUOUS;
+ 	}
+ 
+@@ -656,7 +662,7 @@ static void tc358768_bridge_pre_enable(struct drm_bridge *bridge)
+ 
+ 	ret = tc358768_sw_reset(priv);
+ 	if (ret) {
+-		dev_err(priv->dev, "Software reset failed: %d\n", ret);
++		dev_err(dev, "Software reset failed: %d\n", ret);
+ 		tc358768_hw_disable(priv);
+ 		return;
+ 	}
+@@ -664,45 +670,47 @@ static void tc358768_bridge_pre_enable(struct drm_bridge *bridge)
+ 	mode = &bridge->encoder->crtc->state->adjusted_mode;
+ 	ret = tc358768_setup_pll(priv, mode);
+ 	if (ret) {
+-		dev_err(priv->dev, "PLL setup failed: %d\n", ret);
++		dev_err(dev, "PLL setup failed: %d\n", ret);
+ 		tc358768_hw_disable(priv);
+ 		return;
+ 	}
+ 
++	drm_display_mode_to_videomode(mode, &vm);
++
+ 	dsiclk = priv->dsiclk;
+-	dsibclk = dsiclk / 4;
++	hsbyteclk = dsiclk / 4;
+ 
+ 	/* Data Format Control Register */
+ 	val = BIT(2) | BIT(1) | BIT(0); /* rdswap_en | dsitx_en | txdt_en */
+ 	switch (dsi_dev->format) {
+ 	case MIPI_DSI_FMT_RGB888:
+ 		val |= (0x3 << 4);
+-		hact = mode->hdisplay * 3;
+-		video_start = (mode->htotal - mode->hsync_start) * 3;
++		hact = vm.hactive * 3;
++		video_start = (vm.hsync_len + vm.hback_porch) * 3;
+ 		data_type = MIPI_DSI_PACKED_PIXEL_STREAM_24;
+ 		break;
+ 	case MIPI_DSI_FMT_RGB666:
+ 		val |= (0x4 << 4);
+-		hact = mode->hdisplay * 3;
+-		video_start = (mode->htotal - mode->hsync_start) * 3;
++		hact = vm.hactive * 3;
++		video_start = (vm.hsync_len + vm.hback_porch) * 3;
+ 		data_type = MIPI_DSI_PACKED_PIXEL_STREAM_18;
+ 		break;
+ 
+ 	case MIPI_DSI_FMT_RGB666_PACKED:
+ 		val |= (0x4 << 4) | BIT(3);
+-		hact = mode->hdisplay * 18 / 8;
+-		video_start = (mode->htotal - mode->hsync_start) * 18 / 8;
++		hact = vm.hactive * 18 / 8;
++		video_start = (vm.hsync_len + vm.hback_porch) * 18 / 8;
+ 		data_type = MIPI_DSI_PIXEL_STREAM_3BYTE_18;
+ 		break;
+ 
+ 	case MIPI_DSI_FMT_RGB565:
+ 		val |= (0x5 << 4);
+-		hact = mode->hdisplay * 2;
+-		video_start = (mode->htotal - mode->hsync_start) * 2;
++		hact = vm.hactive * 2;
++		video_start = (vm.hsync_len + vm.hback_porch) * 2;
+ 		data_type = MIPI_DSI_PACKED_PIXEL_STREAM_16;
+ 		break;
+ 	default:
+-		dev_err(priv->dev, "Invalid data format (%u)\n",
++		dev_err(dev, "Invalid data format (%u)\n",
+ 			dsi_dev->format);
+ 		tc358768_hw_disable(priv);
+ 		return;
+@@ -722,67 +730,67 @@ static void tc358768_bridge_pre_enable(struct drm_bridge *bridge)
+ 		tc358768_write(priv, TC358768_D0W_CNTRL + i * 4, 0x0000);
+ 
+ 	/* DSI Timings */
+-	dsibclk_nsk = (u32)div_u64((u64)1000000000 * TC358768_PRECISION,
+-				  dsibclk);
+-	dsiclk_nsk = (u32)div_u64((u64)1000000000 * TC358768_PRECISION, dsiclk);
+-	ui_nsk = dsiclk_nsk / 2;
+-	dev_dbg(priv->dev, "dsiclk_nsk: %u\n", dsiclk_nsk);
+-	dev_dbg(priv->dev, "ui_nsk: %u\n", ui_nsk);
+-	dev_dbg(priv->dev, "dsibclk_nsk: %u\n", dsibclk_nsk);
++	hsbyteclk_ps = (u32)div_u64(PICO, hsbyteclk);
++	dsiclk_ps = (u32)div_u64(PICO, dsiclk);
++	ui_ps = dsiclk_ps / 2;
++	dev_dbg(dev, "dsiclk: %u ps, ui %u ps, hsbyteclk %u ps\n", dsiclk_ps,
++		ui_ps, hsbyteclk_ps);
+ 
+ 	/* LP11 > 100us for D-PHY Rx Init */
+-	val = tc358768_ns_to_cnt(100 * 1000, dsibclk_nsk) - 1;
+-	dev_dbg(priv->dev, "LINEINITCNT: 0x%x\n", val);
++	val = tc358768_ns_to_cnt(100 * 1000, hsbyteclk_ps) - 1;
++	dev_dbg(dev, "LINEINITCNT: %u\n", val);
+ 	tc358768_write(priv, TC358768_LINEINITCNT, val);
+ 
+ 	/* LPTimeCnt > 50ns */
+-	val = tc358768_ns_to_cnt(50, dsibclk_nsk) - 1;
++	val = tc358768_ns_to_cnt(50, hsbyteclk_ps) - 1;
+ 	lptxcnt = val;
+-	dev_dbg(priv->dev, "LPTXTIMECNT: 0x%x\n", val);
++	dev_dbg(dev, "LPTXTIMECNT: %u\n", val);
+ 	tc358768_write(priv, TC358768_LPTXTIMECNT, val);
+ 
+ 	/* 38ns < TCLK_PREPARE < 95ns */
+-	val = tc358768_ns_to_cnt(65, dsibclk_nsk) - 1;
++	val = tc358768_ns_to_cnt(65, hsbyteclk_ps) - 1;
++	dev_dbg(dev, "TCLK_PREPARECNT %u\n", val);
+ 	/* TCLK_PREPARE + TCLK_ZERO > 300ns */
+-	val2 = tc358768_ns_to_cnt(300 - tc358768_to_ns(2 * ui_nsk),
+-				  dsibclk_nsk) - 2;
++	val2 = tc358768_ns_to_cnt(300 - tc358768_ps_to_ns(2 * ui_ps),
++				  hsbyteclk_ps) - 2;
++	dev_dbg(dev, "TCLK_ZEROCNT %u\n", val2);
+ 	val |= val2 << 8;
+-	dev_dbg(priv->dev, "TCLK_HEADERCNT: 0x%x\n", val);
+ 	tc358768_write(priv, TC358768_TCLK_HEADERCNT, val);
+ 
+ 	/* TCLK_TRAIL > 60ns AND TEOT <= 105 ns + 12*UI */
+-	raw_val = tc358768_ns_to_cnt(60 + tc358768_to_ns(2 * ui_nsk), dsibclk_nsk) - 5;
++	raw_val = tc358768_ns_to_cnt(60 + tc358768_ps_to_ns(2 * ui_ps), hsbyteclk_ps) - 5;
+ 	val = clamp(raw_val, 0, 127);
+-	dev_dbg(priv->dev, "TCLK_TRAILCNT: 0x%x\n", val);
++	dev_dbg(dev, "TCLK_TRAILCNT: %u\n", val);
+ 	tc358768_write(priv, TC358768_TCLK_TRAILCNT, val);
+ 
+ 	/* 40ns + 4*UI < THS_PREPARE < 85ns + 6*UI */
+-	val = 50 + tc358768_to_ns(4 * ui_nsk);
+-	val = tc358768_ns_to_cnt(val, dsibclk_nsk) - 1;
++	val = 50 + tc358768_ps_to_ns(4 * ui_ps);
++	val = tc358768_ns_to_cnt(val, hsbyteclk_ps) - 1;
++	dev_dbg(dev, "THS_PREPARECNT %u\n", val);
+ 	/* THS_PREPARE + THS_ZERO > 145ns + 10*UI */
+-	raw_val = tc358768_ns_to_cnt(145 - tc358768_to_ns(3 * ui_nsk), dsibclk_nsk) - 10;
++	raw_val = tc358768_ns_to_cnt(145 - tc358768_ps_to_ns(3 * ui_ps), hsbyteclk_ps) - 10;
+ 	val2 = clamp(raw_val, 0, 127);
++	dev_dbg(dev, "THS_ZEROCNT %u\n", val2);
+ 	val |= val2 << 8;
+-	dev_dbg(priv->dev, "THS_HEADERCNT: 0x%x\n", val);
+ 	tc358768_write(priv, TC358768_THS_HEADERCNT, val);
+ 
+ 	/* TWAKEUP > 1ms in lptxcnt steps */
+-	val = tc358768_ns_to_cnt(1020000, dsibclk_nsk);
++	val = tc358768_ns_to_cnt(1020000, hsbyteclk_ps);
+ 	val = val / (lptxcnt + 1) - 1;
+-	dev_dbg(priv->dev, "TWAKEUP: 0x%x\n", val);
++	dev_dbg(dev, "TWAKEUP: %u\n", val);
+ 	tc358768_write(priv, TC358768_TWAKEUP, val);
+ 
+ 	/* TCLK_POSTCNT > 60ns + 52*UI */
+-	val = tc358768_ns_to_cnt(60 + tc358768_to_ns(52 * ui_nsk),
+-				 dsibclk_nsk) - 3;
+-	dev_dbg(priv->dev, "TCLK_POSTCNT: 0x%x\n", val);
++	val = tc358768_ns_to_cnt(60 + tc358768_ps_to_ns(52 * ui_ps),
++				 hsbyteclk_ps) - 3;
++	dev_dbg(dev, "TCLK_POSTCNT: %u\n", val);
+ 	tc358768_write(priv, TC358768_TCLK_POSTCNT, val);
+ 
+ 	/* max(60ns + 4*UI, 8*UI) < THS_TRAILCNT < 105ns + 12*UI */
+-	raw_val = tc358768_ns_to_cnt(60 + tc358768_to_ns(18 * ui_nsk),
+-				     dsibclk_nsk) - 4;
++	raw_val = tc358768_ns_to_cnt(60 + tc358768_ps_to_ns(18 * ui_ps),
++				     hsbyteclk_ps) - 4;
+ 	val = clamp(raw_val, 0, 15);
+-	dev_dbg(priv->dev, "THS_TRAILCNT: 0x%x\n", val);
++	dev_dbg(dev, "THS_TRAILCNT: %u\n", val);
+ 	tc358768_write(priv, TC358768_THS_TRAILCNT, val);
+ 
+ 	val = BIT(0);
+@@ -790,16 +798,17 @@ static void tc358768_bridge_pre_enable(struct drm_bridge *bridge)
+ 		val |= BIT(i + 1);
+ 	tc358768_write(priv, TC358768_HSTXVREGEN, val);
+ 
+-	if (!(mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS))
+-		tc358768_write(priv, TC358768_TXOPTIONCNTRL, 0x1);
++	tc358768_write(priv, TC358768_TXOPTIONCNTRL,
++		       (mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS) ? 0 : BIT(0));
+ 
+ 	/* TXTAGOCNT[26:16] RXTASURECNT[10:0] */
+-	val = tc358768_to_ns((lptxcnt + 1) * dsibclk_nsk * 4);
+-	val = tc358768_ns_to_cnt(val, dsibclk_nsk) / 4 - 1;
+-	val2 = tc358768_ns_to_cnt(tc358768_to_ns((lptxcnt + 1) * dsibclk_nsk),
+-				  dsibclk_nsk) - 2;
++	val = tc358768_ps_to_ns((lptxcnt + 1) * hsbyteclk_ps * 4);
++	val = tc358768_ns_to_cnt(val, hsbyteclk_ps) / 4 - 1;
++	dev_dbg(dev, "TXTAGOCNT: %u\n", val);
++	val2 = tc358768_ns_to_cnt(tc358768_ps_to_ns((lptxcnt + 1) * hsbyteclk_ps),
++				  hsbyteclk_ps) - 2;
++	dev_dbg(dev, "RXTASURECNT: %u\n", val2);
+ 	val = val << 16 | val2;
+-	dev_dbg(priv->dev, "BTACNTRL1: 0x%x\n", val);
+ 	tc358768_write(priv, TC358768_BTACNTRL1, val);
+ 
+ 	/* START[0] */
+@@ -810,43 +819,43 @@ static void tc358768_bridge_pre_enable(struct drm_bridge *bridge)
+ 		tc358768_write(priv, TC358768_DSI_EVENT, 0);
+ 
+ 		/* vact */
+-		tc358768_write(priv, TC358768_DSI_VACT, mode->vdisplay);
++		tc358768_write(priv, TC358768_DSI_VACT, vm.vactive);
+ 
+ 		/* vsw */
+-		tc358768_write(priv, TC358768_DSI_VSW,
+-			       mode->vsync_end - mode->vsync_start);
++		tc358768_write(priv, TC358768_DSI_VSW, vm.vsync_len);
++
+ 		/* vbp */
+-		tc358768_write(priv, TC358768_DSI_VBPR,
+-			       mode->vtotal - mode->vsync_end);
++		tc358768_write(priv, TC358768_DSI_VBPR, vm.vback_porch);
+ 
+ 		/* hsw * byteclk * ndl / pclk */
+-		val = (u32)div_u64((mode->hsync_end - mode->hsync_start) *
+-				   ((u64)priv->dsiclk / 4) * priv->dsi_lanes,
+-				   mode->clock * 1000);
++		val = (u32)div_u64(vm.hsync_len *
++				   (u64)hsbyteclk * priv->dsi_lanes,
++				   vm.pixelclock);
+ 		tc358768_write(priv, TC358768_DSI_HSW, val);
+ 
+ 		/* hbp * byteclk * ndl / pclk */
+-		val = (u32)div_u64((mode->htotal - mode->hsync_end) *
+-				   ((u64)priv->dsiclk / 4) * priv->dsi_lanes,
+-				   mode->clock * 1000);
++		val = (u32)div_u64(vm.hback_porch *
++				   (u64)hsbyteclk * priv->dsi_lanes,
++				   vm.pixelclock);
+ 		tc358768_write(priv, TC358768_DSI_HBPR, val);
+ 	} else {
+ 		/* Set event mode */
+ 		tc358768_write(priv, TC358768_DSI_EVENT, 1);
+ 
+ 		/* vact */
+-		tc358768_write(priv, TC358768_DSI_VACT, mode->vdisplay);
++		tc358768_write(priv, TC358768_DSI_VACT, vm.vactive);
+ 
+ 		/* vsw (+ vbp) */
+ 		tc358768_write(priv, TC358768_DSI_VSW,
+-			       mode->vtotal - mode->vsync_start);
++			       vm.vsync_len + vm.vback_porch);
++
+ 		/* vbp (not used in event mode) */
+ 		tc358768_write(priv, TC358768_DSI_VBPR, 0);
+ 
+ 		/* (hsw + hbp) * byteclk * ndl / pclk */
+-		val = (u32)div_u64((mode->htotal - mode->hsync_start) *
+-				   ((u64)priv->dsiclk / 4) * priv->dsi_lanes,
+-				   mode->clock * 1000);
++		val = (u32)div_u64((vm.hsync_len + vm.hback_porch) *
++				   (u64)hsbyteclk * priv->dsi_lanes,
++				   vm.pixelclock);
+ 		tc358768_write(priv, TC358768_DSI_HSW, val);
+ 
+ 		/* hbp (not used in event mode) */
+@@ -857,11 +866,12 @@ static void tc358768_bridge_pre_enable(struct drm_bridge *bridge)
+ 	tc358768_write(priv, TC358768_DSI_HACT, hact);
+ 
+ 	/* VSYNC polarity */
+-	if (!(mode->flags & DRM_MODE_FLAG_NVSYNC))
+-		tc358768_update_bits(priv, TC358768_CONFCTL, BIT(5), BIT(5));
++	tc358768_update_bits(priv, TC358768_CONFCTL, BIT(5),
++			     (mode->flags & DRM_MODE_FLAG_PVSYNC) ? BIT(5) : 0);
++
+ 	/* HSYNC polarity */
+-	if (mode->flags & DRM_MODE_FLAG_PHSYNC)
+-		tc358768_update_bits(priv, TC358768_PP_MISC, BIT(0), BIT(0));
++	tc358768_update_bits(priv, TC358768_PP_MISC, BIT(0),
++			     (mode->flags & DRM_MODE_FLAG_PHSYNC) ? BIT(0) : 0);
+ 
+ 	/* Start DSI Tx */
+ 	tc358768_write(priv, TC358768_DSI_START, 0x1);
+@@ -891,7 +901,7 @@ static void tc358768_bridge_pre_enable(struct drm_bridge *bridge)
+ 
+ 	ret = tc358768_clear_error(priv);
+ 	if (ret) {
+-		dev_err(priv->dev, "Bridge pre_enable failed: %d\n", ret);
++		dev_err(dev, "Bridge pre_enable failed: %d\n", ret);
+ 		tc358768_bridge_disable(bridge);
+ 		tc358768_bridge_post_disable(bridge);
+ 	}
+diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c
+index e592c5da70cee..da0145bc104a8 100644
+--- a/drivers/gpu/drm/drm_syncobj.c
++++ b/drivers/gpu/drm/drm_syncobj.c
+@@ -1015,7 +1015,8 @@ static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs,
+ 		fence = drm_syncobj_fence_get(syncobjs[i]);
+ 		if (!fence || dma_fence_chain_find_seqno(&fence, points[i])) {
+ 			dma_fence_put(fence);
+-			if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) {
++			if (flags & (DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT |
++				     DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE)) {
+ 				continue;
+ 			} else {
+ 				timeout = -EINVAL;
+diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
+index 8d44f3df116fa..c95f59e018566 100644
+--- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
++++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
+@@ -407,6 +407,9 @@ static int mtk_crtc_ddp_hw_init(struct mtk_drm_crtc *mtk_crtc)
+ 		unsigned int local_layer;
+ 
+ 		plane_state = to_mtk_plane_state(plane->state);
++
++		/* should not enable layer before crtc enabled */
++		plane_state->pending.enable = false;
+ 		comp = mtk_drm_ddp_comp_for_plane(crtc, plane, &local_layer);
+ 		if (comp)
+ 			mtk_ddp_comp_layer_config(comp, local_layer,
+diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+index 30d10f21562f4..da9cbcc685db6 100644
+--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c
++++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+@@ -287,6 +287,7 @@ static const struct mtk_mmsys_driver_data mt8186_mmsys_driver_data = {
+ static const struct mtk_mmsys_driver_data mt8188_vdosys0_driver_data = {
+ 	.main_path = mt8188_mtk_ddp_main,
+ 	.main_len = ARRAY_SIZE(mt8188_mtk_ddp_main),
++	.mmsys_dev_num = 1,
+ };
+ 
+ static const struct mtk_mmsys_driver_data mt8192_mmsys_driver_data = {
+diff --git a/drivers/gpu/drm/mediatek/mtk_drm_gem.c b/drivers/gpu/drm/mediatek/mtk_drm_gem.c
+index 0e0a41b2f57f0..4f2e3feabc0f8 100644
+--- a/drivers/gpu/drm/mediatek/mtk_drm_gem.c
++++ b/drivers/gpu/drm/mediatek/mtk_drm_gem.c
+@@ -121,7 +121,14 @@ int mtk_drm_gem_dumb_create(struct drm_file *file_priv, struct drm_device *dev,
+ 	int ret;
+ 
+ 	args->pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
+-	args->size = args->pitch * args->height;
++
++	/*
++	 * Multiply 2 variables of different types,
++	 * for example: args->size = args->spacing * args->height;
++	 * may cause coverity issue with unintentional overflow.
++	 */
++	args->size = args->pitch;
++	args->size *= args->height;
+ 
+ 	mtk_gem = mtk_drm_gem_create(dev, args->size, false);
+ 	if (IS_ERR(mtk_gem))
+diff --git a/drivers/gpu/drm/mediatek/mtk_drm_plane.c b/drivers/gpu/drm/mediatek/mtk_drm_plane.c
+index 31f9420aff6f8..a46f23cde29a0 100644
+--- a/drivers/gpu/drm/mediatek/mtk_drm_plane.c
++++ b/drivers/gpu/drm/mediatek/mtk_drm_plane.c
+@@ -145,6 +145,7 @@ static void mtk_plane_update_new_state(struct drm_plane_state *new_state,
+ 	dma_addr_t addr;
+ 	dma_addr_t hdr_addr = 0;
+ 	unsigned int hdr_pitch = 0;
++	int offset;
+ 
+ 	gem = fb->obj[0];
+ 	mtk_gem = to_mtk_gem_obj(gem);
+@@ -154,8 +155,15 @@ static void mtk_plane_update_new_state(struct drm_plane_state *new_state,
+ 	modifier = fb->modifier;
+ 
+ 	if (modifier == DRM_FORMAT_MOD_LINEAR) {
+-		addr += (new_state->src.x1 >> 16) * fb->format->cpp[0];
+-		addr += (new_state->src.y1 >> 16) * pitch;
++		/*
++		 * Using dma_addr_t variable to calculate with multiplier of different types,
++		 * for example: addr += (new_state->src.x1 >> 16) * fb->format->cpp[0];
++		 * may cause coverity issue with unintentional overflow.
++		 */
++		offset = (new_state->src.x1 >> 16) * fb->format->cpp[0];
++		addr += offset;
++		offset = (new_state->src.y1 >> 16) * pitch;
++		addr += offset;
+ 	} else {
+ 		int width_in_blocks = ALIGN(fb->width, AFBC_DATA_BLOCK_WIDTH)
+ 				      / AFBC_DATA_BLOCK_WIDTH;
+@@ -163,21 +171,34 @@ static void mtk_plane_update_new_state(struct drm_plane_state *new_state,
+ 				       / AFBC_DATA_BLOCK_HEIGHT;
+ 		int x_offset_in_blocks = (new_state->src.x1 >> 16) / AFBC_DATA_BLOCK_WIDTH;
+ 		int y_offset_in_blocks = (new_state->src.y1 >> 16) / AFBC_DATA_BLOCK_HEIGHT;
+-		int hdr_size;
++		int hdr_size, hdr_offset;
+ 
+ 		hdr_pitch = width_in_blocks * AFBC_HEADER_BLOCK_SIZE;
+ 		pitch = width_in_blocks * AFBC_DATA_BLOCK_WIDTH *
+ 			AFBC_DATA_BLOCK_HEIGHT * fb->format->cpp[0];
+ 
+ 		hdr_size = ALIGN(hdr_pitch * height_in_blocks, AFBC_HEADER_ALIGNMENT);
++		hdr_offset = hdr_pitch * y_offset_in_blocks +
++			AFBC_HEADER_BLOCK_SIZE * x_offset_in_blocks;
++
++		/*
++		 * Using dma_addr_t variable to calculate with multiplier of different types,
++		 * for example: addr += hdr_pitch * y_offset_in_blocks;
++		 * may cause coverity issue with unintentional overflow.
++		 */
++		hdr_addr = addr + hdr_offset;
+ 
+-		hdr_addr = addr + hdr_pitch * y_offset_in_blocks +
+-			   AFBC_HEADER_BLOCK_SIZE * x_offset_in_blocks;
+ 		/* The data plane is offset by 1 additional block. */
+-		addr = addr + hdr_size +
+-		       pitch * y_offset_in_blocks +
+-		       AFBC_DATA_BLOCK_WIDTH * AFBC_DATA_BLOCK_HEIGHT *
+-		       fb->format->cpp[0] * (x_offset_in_blocks + 1);
++		offset = pitch * y_offset_in_blocks +
++			 AFBC_DATA_BLOCK_WIDTH * AFBC_DATA_BLOCK_HEIGHT *
++			 fb->format->cpp[0] * (x_offset_in_blocks + 1);
++
++		/*
++		 * Using dma_addr_t variable to calculate with multiplier of different types,
++		 * for example: addr += pitch * y_offset_in_blocks;
++		 * may cause coverity issue with unintentional overflow.
++		 */
++		addr = addr + hdr_size + offset;
+ 	}
+ 
+ 	mtk_plane_state->pending.enable = true;
+@@ -210,9 +231,9 @@ static void mtk_plane_atomic_async_update(struct drm_plane *plane,
+ 	plane->state->src_y = new_state->src_y;
+ 	plane->state->src_h = new_state->src_h;
+ 	plane->state->src_w = new_state->src_w;
+-	swap(plane->state->fb, new_state->fb);
+ 
+ 	mtk_plane_update_new_state(new_state, new_plane_state);
++	swap(plane->state->fb, new_state->fb);
+ 	wmb(); /* Make sure the above parameters are set before update */
+ 	new_plane_state->pending.async_dirty = true;
+ 	mtk_drm_crtc_async_update(new_state->crtc, plane, state);
+diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c
+index 7d52503511936..b0ab38e59db9d 100644
+--- a/drivers/gpu/drm/mediatek/mtk_dsi.c
++++ b/drivers/gpu/drm/mediatek/mtk_dsi.c
+@@ -407,7 +407,7 @@ static void mtk_dsi_rxtx_control(struct mtk_dsi *dsi)
+ 	if (dsi->mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS)
+ 		tmp_reg |= HSTX_CKLP_EN;
+ 
+-	if (!(dsi->mode_flags & MIPI_DSI_MODE_NO_EOT_PACKET))
++	if (dsi->mode_flags & MIPI_DSI_MODE_NO_EOT_PACKET)
+ 		tmp_reg |= DIS_EOT;
+ 
+ 	writel(tmp_reg, dsi->regs + DSI_TXRX_CTRL);
+@@ -484,7 +484,7 @@ static void mtk_dsi_config_vdo_timing(struct mtk_dsi *dsi)
+ 			  timing->da_hs_zero + timing->da_hs_exit + 3;
+ 
+ 	delta = dsi->mode_flags & MIPI_DSI_MODE_VIDEO_BURST ? 18 : 12;
+-	delta += dsi->mode_flags & MIPI_DSI_MODE_NO_EOT_PACKET ? 2 : 0;
++	delta += dsi->mode_flags & MIPI_DSI_MODE_NO_EOT_PACKET ? 0 : 2;
+ 
+ 	horizontal_frontporch_byte = vm->hfront_porch * dsi_tmp_buf_bpp;
+ 	horizontal_front_back_byte = horizontal_frontporch_byte + horizontal_backporch_byte;
+diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c b/drivers/gpu/drm/mgag200/mgag200_drv.c
+index 976f0ab2006b5..797f7a0623178 100644
+--- a/drivers/gpu/drm/mgag200/mgag200_drv.c
++++ b/drivers/gpu/drm/mgag200/mgag200_drv.c
+@@ -10,6 +10,7 @@
+ #include <linux/pci.h>
+ 
+ #include <drm/drm_aperture.h>
++#include <drm/drm_atomic_helper.h>
+ #include <drm/drm_drv.h>
+ #include <drm/drm_fbdev_generic.h>
+ #include <drm/drm_file.h>
+@@ -278,6 +279,12 @@ static void mgag200_pci_remove(struct pci_dev *pdev)
+ 	struct drm_device *dev = pci_get_drvdata(pdev);
+ 
+ 	drm_dev_unregister(dev);
++	drm_atomic_helper_shutdown(dev);
++}
++
++static void mgag200_pci_shutdown(struct pci_dev *pdev)
++{
++	drm_atomic_helper_shutdown(pci_get_drvdata(pdev));
+ }
+ 
+ static struct pci_driver mgag200_pci_driver = {
+@@ -285,6 +292,7 @@ static struct pci_driver mgag200_pci_driver = {
+ 	.id_table = mgag200_pciidlist,
+ 	.probe = mgag200_pci_probe,
+ 	.remove = mgag200_pci_remove,
++	.shutdown = mgag200_pci_shutdown,
+ };
+ 
+ drm_module_pci_driver_if_modeset(mgag200_pci_driver, mgag200_modeset);
+diff --git a/drivers/gpu/drm/msm/dsi/dsi.c b/drivers/gpu/drm/msm/dsi/dsi.c
+index baab79ab6e745..32f965bacdc30 100644
+--- a/drivers/gpu/drm/msm/dsi/dsi.c
++++ b/drivers/gpu/drm/msm/dsi/dsi.c
+@@ -126,6 +126,7 @@ static void dsi_unbind(struct device *dev, struct device *master,
+ 	struct msm_drm_private *priv = dev_get_drvdata(master);
+ 	struct msm_dsi *msm_dsi = dev_get_drvdata(dev);
+ 
++	msm_dsi_tx_buf_free(msm_dsi->host);
+ 	priv->dsi[msm_dsi->id] = NULL;
+ }
+ 
+diff --git a/drivers/gpu/drm/msm/dsi/dsi.h b/drivers/gpu/drm/msm/dsi/dsi.h
+index bd3763a5d7234..3b46617a59f20 100644
+--- a/drivers/gpu/drm/msm/dsi/dsi.h
++++ b/drivers/gpu/drm/msm/dsi/dsi.h
+@@ -125,6 +125,7 @@ int dsi_tx_buf_alloc_v2(struct msm_dsi_host *msm_host, int size);
+ void *dsi_tx_buf_get_6g(struct msm_dsi_host *msm_host);
+ void *dsi_tx_buf_get_v2(struct msm_dsi_host *msm_host);
+ void dsi_tx_buf_put_6g(struct msm_dsi_host *msm_host);
++void msm_dsi_tx_buf_free(struct mipi_dsi_host *mipi_host);
+ int dsi_dma_base_get_6g(struct msm_dsi_host *msm_host, uint64_t *iova);
+ int dsi_dma_base_get_v2(struct msm_dsi_host *msm_host, uint64_t *iova);
+ int dsi_clk_init_v2(struct msm_dsi_host *msm_host);
+diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
+index 9ac62651eb756..73c7878d5a2a3 100644
+--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
++++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
+@@ -147,6 +147,7 @@ struct msm_dsi_host {
+ 
+ 	/* DSI 6G TX buffer*/
+ 	struct drm_gem_object *tx_gem_obj;
++	struct msm_gem_address_space *aspace;
+ 
+ 	/* DSI v2 TX buffer */
+ 	void *tx_buf;
+@@ -1104,8 +1105,10 @@ int dsi_tx_buf_alloc_6g(struct msm_dsi_host *msm_host, int size)
+ 	uint64_t iova;
+ 	u8 *data;
+ 
++	msm_host->aspace = msm_gem_address_space_get(priv->kms->aspace);
++
+ 	data = msm_gem_kernel_new(dev, size, MSM_BO_WC,
+-					priv->kms->aspace,
++					msm_host->aspace,
+ 					&msm_host->tx_gem_obj, &iova);
+ 
+ 	if (IS_ERR(data)) {
+@@ -1134,10 +1137,10 @@ int dsi_tx_buf_alloc_v2(struct msm_dsi_host *msm_host, int size)
+ 	return 0;
+ }
+ 
+-static void dsi_tx_buf_free(struct msm_dsi_host *msm_host)
++void msm_dsi_tx_buf_free(struct mipi_dsi_host *host)
+ {
++	struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+ 	struct drm_device *dev = msm_host->dev;
+-	struct msm_drm_private *priv;
+ 
+ 	/*
+ 	 * This is possible if we're tearing down before we've had a chance to
+@@ -1148,11 +1151,11 @@ static void dsi_tx_buf_free(struct msm_dsi_host *msm_host)
+ 	if (!dev)
+ 		return;
+ 
+-	priv = dev->dev_private;
+ 	if (msm_host->tx_gem_obj) {
+-		msm_gem_unpin_iova(msm_host->tx_gem_obj, priv->kms->aspace);
+-		drm_gem_object_put(msm_host->tx_gem_obj);
++		msm_gem_kernel_put(msm_host->tx_gem_obj, msm_host->aspace);
++		msm_gem_address_space_put(msm_host->aspace);
+ 		msm_host->tx_gem_obj = NULL;
++		msm_host->aspace = NULL;
+ 	}
+ 
+ 	if (msm_host->tx_buf)
+@@ -1938,7 +1941,6 @@ void msm_dsi_host_destroy(struct mipi_dsi_host *host)
+ 	struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+ 
+ 	DBG("");
+-	dsi_tx_buf_free(msm_host);
+ 	if (msm_host->workqueue) {
+ 		destroy_workqueue(msm_host->workqueue);
+ 		msm_host->workqueue = NULL;
+diff --git a/drivers/gpu/drm/pl111/pl111_drv.c b/drivers/gpu/drm/pl111/pl111_drv.c
+index 43049c8028b21..57a7e6b93c717 100644
+--- a/drivers/gpu/drm/pl111/pl111_drv.c
++++ b/drivers/gpu/drm/pl111/pl111_drv.c
+@@ -326,12 +326,18 @@ static void pl111_amba_remove(struct amba_device *amba_dev)
+ 	struct pl111_drm_dev_private *priv = drm->dev_private;
+ 
+ 	drm_dev_unregister(drm);
++	drm_atomic_helper_shutdown(drm);
+ 	if (priv->panel)
+ 		drm_panel_bridge_remove(priv->bridge);
+ 	drm_dev_put(drm);
+ 	of_reserved_mem_device_release(dev);
+ }
+ 
++static void pl111_amba_shutdown(struct amba_device *amba_dev)
++{
++	drm_atomic_helper_shutdown(amba_get_drvdata(amba_dev));
++}
++
+ /*
+  * This early variant lacks the 565 and 444 pixel formats.
+  */
+@@ -434,6 +440,7 @@ static struct amba_driver pl111_amba_driver __maybe_unused = {
+ 	},
+ 	.probe = pl111_amba_probe,
+ 	.remove = pl111_amba_remove,
++	.shutdown = pl111_amba_shutdown,
+ 	.id_table = pl111_id_table,
+ };
+ 
+diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
+index 4f06356d9ce2e..f0ae087be914e 100644
+--- a/drivers/gpu/drm/radeon/evergreen.c
++++ b/drivers/gpu/drm/radeon/evergreen.c
+@@ -4821,14 +4821,15 @@ restart_ih:
+ 			break;
+ 		case 44: /* hdmi */
+ 			afmt_idx = src_data;
+-			if (!(afmt_status[afmt_idx] & AFMT_AZ_FORMAT_WTRIG))
+-				DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+-
+ 			if (afmt_idx > 5) {
+ 				DRM_ERROR("Unhandled interrupt: %d %d\n",
+ 					  src_id, src_data);
+ 				break;
+ 			}
++
++			if (!(afmt_status[afmt_idx] & AFMT_AZ_FORMAT_WTRIG))
++				DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
++
+ 			afmt_status[afmt_idx] &= ~AFMT_AZ_FORMAT_WTRIG;
+ 			queue_hdmi = true;
+ 			DRM_DEBUG("IH: HDMI%d\n", afmt_idx + 1);
+diff --git a/drivers/gpu/drm/rockchip/cdn-dp-core.c b/drivers/gpu/drm/rockchip/cdn-dp-core.c
+index b6afe3786b742..fbcfda5b9335c 100644
+--- a/drivers/gpu/drm/rockchip/cdn-dp-core.c
++++ b/drivers/gpu/drm/rockchip/cdn-dp-core.c
+@@ -1177,6 +1177,7 @@ static int cdn_dp_probe(struct platform_device *pdev)
+ 	struct cdn_dp_device *dp;
+ 	struct extcon_dev *extcon;
+ 	struct phy *phy;
++	int ret;
+ 	int i;
+ 
+ 	dp = devm_kzalloc(dev, sizeof(*dp), GFP_KERNEL);
+@@ -1217,9 +1218,19 @@ static int cdn_dp_probe(struct platform_device *pdev)
+ 	mutex_init(&dp->lock);
+ 	dev_set_drvdata(dev, dp);
+ 
+-	cdn_dp_audio_codec_init(dp, dev);
++	ret = cdn_dp_audio_codec_init(dp, dev);
++	if (ret)
++		return ret;
++
++	ret = component_add(dev, &cdn_dp_component_ops);
++	if (ret)
++		goto err_audio_deinit;
+ 
+-	return component_add(dev, &cdn_dp_component_ops);
++	return 0;
++
++err_audio_deinit:
++	platform_device_unregister(dp->audio_pdev);
++	return ret;
+ }
+ 
+ static int cdn_dp_remove(struct platform_device *pdev)
+diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
+index b8f8b45ebf594..93ed841f5dcea 100644
+--- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
++++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
+@@ -40,7 +40,7 @@ static int rockchip_gem_iommu_map(struct rockchip_gem_object *rk_obj)
+ 
+ 	ret = iommu_map_sgtable(private->domain, rk_obj->dma_addr, rk_obj->sgt,
+ 				prot);
+-	if (ret < rk_obj->base.size) {
++	if (ret < (ssize_t)rk_obj->base.size) {
+ 		DRM_ERROR("failed to map buffer: size=%zd request_size=%zd\n",
+ 			  ret, rk_obj->base.size);
+ 		ret = -ENOMEM;
+diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+index bf34498c1b6d7..5b7b7b6deed9e 100644
+--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
++++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+@@ -1615,7 +1615,8 @@ static struct drm_crtc_state *vop_crtc_duplicate_state(struct drm_crtc *crtc)
+ 	if (WARN_ON(!crtc->state))
+ 		return NULL;
+ 
+-	rockchip_state = kzalloc(sizeof(*rockchip_state), GFP_KERNEL);
++	rockchip_state = kmemdup(to_rockchip_crtc_state(crtc->state),
++				 sizeof(*rockchip_state), GFP_KERNEL);
+ 	if (!rockchip_state)
+ 		return NULL;
+ 
+@@ -1640,7 +1641,10 @@ static void vop_crtc_reset(struct drm_crtc *crtc)
+ 	if (crtc->state)
+ 		vop_crtc_destroy_state(crtc, crtc->state);
+ 
+-	__drm_atomic_helper_crtc_reset(crtc, &crtc_state->base);
++	if (crtc_state)
++		__drm_atomic_helper_crtc_reset(crtc, &crtc_state->base);
++	else
++		__drm_atomic_helper_crtc_reset(crtc, NULL);
+ }
+ 
+ #ifdef CONFIG_DRM_ANALOGIX_DP
+diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
+index ca73b8ccc29f4..5ba83121a1b90 100644
+--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
++++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
+@@ -2080,30 +2080,15 @@ static const struct drm_crtc_helper_funcs vop2_crtc_helper_funcs = {
+ 	.atomic_disable = vop2_crtc_atomic_disable,
+ };
+ 
+-static void vop2_crtc_reset(struct drm_crtc *crtc)
+-{
+-	struct rockchip_crtc_state *vcstate = to_rockchip_crtc_state(crtc->state);
+-
+-	if (crtc->state) {
+-		__drm_atomic_helper_crtc_destroy_state(crtc->state);
+-		kfree(vcstate);
+-	}
+-
+-	vcstate = kzalloc(sizeof(*vcstate), GFP_KERNEL);
+-	if (!vcstate)
+-		return;
+-
+-	crtc->state = &vcstate->base;
+-	crtc->state->crtc = crtc;
+-}
+-
+ static struct drm_crtc_state *vop2_crtc_duplicate_state(struct drm_crtc *crtc)
+ {
+-	struct rockchip_crtc_state *vcstate, *old_vcstate;
++	struct rockchip_crtc_state *vcstate;
+ 
+-	old_vcstate = to_rockchip_crtc_state(crtc->state);
++	if (WARN_ON(!crtc->state))
++		return NULL;
+ 
+-	vcstate = kmemdup(old_vcstate, sizeof(*old_vcstate), GFP_KERNEL);
++	vcstate = kmemdup(to_rockchip_crtc_state(crtc->state),
++			  sizeof(*vcstate), GFP_KERNEL);
+ 	if (!vcstate)
+ 		return NULL;
+ 
+@@ -2121,6 +2106,20 @@ static void vop2_crtc_destroy_state(struct drm_crtc *crtc,
+ 	kfree(vcstate);
+ }
+ 
++static void vop2_crtc_reset(struct drm_crtc *crtc)
++{
++	struct rockchip_crtc_state *vcstate =
++		kzalloc(sizeof(*vcstate), GFP_KERNEL);
++
++	if (crtc->state)
++		vop2_crtc_destroy_state(crtc, crtc->state);
++
++	if (vcstate)
++		__drm_atomic_helper_crtc_reset(crtc, &vcstate->base);
++	else
++		__drm_atomic_helper_crtc_reset(crtc, NULL);
++}
++
+ static const struct drm_crtc_funcs vop2_crtc_funcs = {
+ 	.set_config = drm_atomic_helper_set_config,
+ 	.page_flip = drm_atomic_helper_page_flip,
+diff --git a/drivers/gpu/drm/stm/drv.c b/drivers/gpu/drm/stm/drv.c
+index cb4404b3ce62c..477ea58557a2d 100644
+--- a/drivers/gpu/drm/stm/drv.c
++++ b/drivers/gpu/drm/stm/drv.c
+@@ -113,6 +113,7 @@ static void drv_unload(struct drm_device *ddev)
+ 	DRM_DEBUG("%s\n", __func__);
+ 
+ 	drm_kms_helper_poll_fini(ddev);
++	drm_atomic_helper_shutdown(ddev);
+ 	ltdc_unload(ddev);
+ }
+ 
+@@ -213,7 +214,7 @@ err_put:
+ 	return ret;
+ }
+ 
+-static int stm_drm_platform_remove(struct platform_device *pdev)
++static void stm_drm_platform_remove(struct platform_device *pdev)
+ {
+ 	struct drm_device *ddev = platform_get_drvdata(pdev);
+ 
+@@ -222,8 +223,11 @@ static int stm_drm_platform_remove(struct platform_device *pdev)
+ 	drm_dev_unregister(ddev);
+ 	drv_unload(ddev);
+ 	drm_dev_put(ddev);
++}
+ 
+-	return 0;
++static void stm_drm_platform_shutdown(struct platform_device *pdev)
++{
++	drm_atomic_helper_shutdown(platform_get_drvdata(pdev));
+ }
+ 
+ static const struct of_device_id drv_dt_ids[] = {
+@@ -234,7 +238,8 @@ MODULE_DEVICE_TABLE(of, drv_dt_ids);
+ 
+ static struct platform_driver stm_drm_platform_driver = {
+ 	.probe = stm_drm_platform_probe,
+-	.remove = stm_drm_platform_remove,
++	.remove_new = stm_drm_platform_remove,
++	.shutdown = stm_drm_platform_shutdown,
+ 	.driver = {
+ 		.name = "stm32-display",
+ 		.of_match_table = drv_dt_ids,
+diff --git a/drivers/gpu/drm/stm/dw_mipi_dsi-stm.c b/drivers/gpu/drm/stm/dw_mipi_dsi-stm.c
+index 1750b6a25e871..d5f8c923d7bc7 100644
+--- a/drivers/gpu/drm/stm/dw_mipi_dsi-stm.c
++++ b/drivers/gpu/drm/stm/dw_mipi_dsi-stm.c
+@@ -535,15 +535,13 @@ err_clk_get:
+ 	return ret;
+ }
+ 
+-static int dw_mipi_dsi_stm_remove(struct platform_device *pdev)
++static void dw_mipi_dsi_stm_remove(struct platform_device *pdev)
+ {
+ 	struct dw_mipi_dsi_stm *dsi = platform_get_drvdata(pdev);
+ 
+ 	dw_mipi_dsi_remove(dsi->dsi);
+ 	clk_disable_unprepare(dsi->pllref_clk);
+ 	regulator_disable(dsi->vdd_supply);
+-
+-	return 0;
+ }
+ 
+ static int __maybe_unused dw_mipi_dsi_stm_suspend(struct device *dev)
+@@ -588,7 +586,7 @@ static const struct dev_pm_ops dw_mipi_dsi_stm_pm_ops = {
+ 
+ static struct platform_driver dw_mipi_dsi_stm_driver = {
+ 	.probe		= dw_mipi_dsi_stm_probe,
+-	.remove		= dw_mipi_dsi_stm_remove,
++	.remove_new	= dw_mipi_dsi_stm_remove,
+ 	.driver		= {
+ 		.of_match_table = dw_mipi_dsi_stm_dt_ids,
+ 		.name	= "stm32-display-dsi",
+diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
+index fe56beea3e93f..8ebd7134ee21b 100644
+--- a/drivers/gpu/drm/tilcdc/tilcdc_drv.c
++++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
+@@ -175,6 +175,7 @@ static void tilcdc_fini(struct drm_device *dev)
+ 		drm_dev_unregister(dev);
+ 
+ 	drm_kms_helper_poll_fini(dev);
++	drm_atomic_helper_shutdown(dev);
+ 	tilcdc_irq_uninstall(dev);
+ 	drm_mode_config_cleanup(dev);
+ 
+@@ -389,6 +390,7 @@ static int tilcdc_init(const struct drm_driver *ddrv, struct device *dev)
+ 
+ init_failed:
+ 	tilcdc_fini(ddev);
++	platform_set_drvdata(pdev, NULL);
+ 
+ 	return ret;
+ }
+@@ -537,7 +539,8 @@ static void tilcdc_unbind(struct device *dev)
+ 	if (!ddev->dev_private)
+ 		return;
+ 
+-	tilcdc_fini(dev_get_drvdata(dev));
++	tilcdc_fini(ddev);
++	dev_set_drvdata(dev, NULL);
+ }
+ 
+ static const struct component_master_ops tilcdc_comp_ops = {
+@@ -582,6 +585,11 @@ static int tilcdc_pdev_remove(struct platform_device *pdev)
+ 	return 0;
+ }
+ 
++static void tilcdc_pdev_shutdown(struct platform_device *pdev)
++{
++	drm_atomic_helper_shutdown(platform_get_drvdata(pdev));
++}
++
+ static const struct of_device_id tilcdc_of_match[] = {
+ 		{ .compatible = "ti,am33xx-tilcdc", },
+ 		{ .compatible = "ti,da850-tilcdc", },
+@@ -592,6 +600,7 @@ MODULE_DEVICE_TABLE(of, tilcdc_of_match);
+ static struct platform_driver tilcdc_platform_driver = {
+ 	.probe      = tilcdc_pdev_probe,
+ 	.remove     = tilcdc_pdev_remove,
++	.shutdown   = tilcdc_pdev_shutdown,
+ 	.driver     = {
+ 		.name   = "tilcdc",
+ 		.pm     = pm_sleep_ptr(&tilcdc_pm_ops),
+diff --git a/drivers/gpu/drm/tve200/tve200_drv.c b/drivers/gpu/drm/tve200/tve200_drv.c
+index 40b1168ad671f..9fa6ea3ba9759 100644
+--- a/drivers/gpu/drm/tve200/tve200_drv.c
++++ b/drivers/gpu/drm/tve200/tve200_drv.c
+@@ -236,19 +236,23 @@ dev_unref:
+ 	return ret;
+ }
+ 
+-static int tve200_remove(struct platform_device *pdev)
++static void tve200_remove(struct platform_device *pdev)
+ {
+ 	struct drm_device *drm = platform_get_drvdata(pdev);
+ 	struct tve200_drm_dev_private *priv = drm->dev_private;
+ 
+ 	drm_dev_unregister(drm);
++	drm_atomic_helper_shutdown(drm);
+ 	if (priv->panel)
+ 		drm_panel_bridge_remove(priv->bridge);
+ 	drm_mode_config_cleanup(drm);
+ 	clk_disable_unprepare(priv->pclk);
+ 	drm_dev_put(drm);
++}
+ 
+-	return 0;
++static void tve200_shutdown(struct platform_device *pdev)
++{
++	drm_atomic_helper_shutdown(platform_get_drvdata(pdev));
+ }
+ 
+ static const struct of_device_id tve200_of_match[] = {
+@@ -264,7 +268,8 @@ static struct platform_driver tve200_driver = {
+ 		.of_match_table = of_match_ptr(tve200_of_match),
+ 	},
+ 	.probe = tve200_probe,
+-	.remove = tve200_remove,
++	.remove_new = tve200_remove,
++	.shutdown = tve200_shutdown,
+ };
+ drm_module_platform_driver(tve200_driver);
+ 
+diff --git a/drivers/gpu/drm/vboxvideo/vbox_drv.c b/drivers/gpu/drm/vboxvideo/vbox_drv.c
+index 4fee15c97c341..047b958123341 100644
+--- a/drivers/gpu/drm/vboxvideo/vbox_drv.c
++++ b/drivers/gpu/drm/vboxvideo/vbox_drv.c
+@@ -12,6 +12,7 @@
+ #include <linux/vt_kern.h>
+ 
+ #include <drm/drm_aperture.h>
++#include <drm/drm_atomic_helper.h>
+ #include <drm/drm_drv.h>
+ #include <drm/drm_fbdev_generic.h>
+ #include <drm/drm_file.h>
+@@ -97,11 +98,19 @@ static void vbox_pci_remove(struct pci_dev *pdev)
+ 	struct vbox_private *vbox = pci_get_drvdata(pdev);
+ 
+ 	drm_dev_unregister(&vbox->ddev);
++	drm_atomic_helper_shutdown(&vbox->ddev);
+ 	vbox_irq_fini(vbox);
+ 	vbox_mode_fini(vbox);
+ 	vbox_hw_fini(vbox);
+ }
+ 
++static void vbox_pci_shutdown(struct pci_dev *pdev)
++{
++	struct vbox_private *vbox = pci_get_drvdata(pdev);
++
++	drm_atomic_helper_shutdown(&vbox->ddev);
++}
++
+ static int vbox_pm_suspend(struct device *dev)
+ {
+ 	struct vbox_private *vbox = dev_get_drvdata(dev);
+@@ -165,6 +174,7 @@ static struct pci_driver vbox_pci_driver = {
+ 	.id_table = pciidlist,
+ 	.probe = vbox_pci_probe,
+ 	.remove = vbox_pci_remove,
++	.shutdown = vbox_pci_shutdown,
+ 	.driver.pm = pm_sleep_ptr(&vbox_pm_ops),
+ };
+ 
+diff --git a/drivers/gpu/drm/vc4/tests/vc4_mock_crtc.c b/drivers/gpu/drm/vc4/tests/vc4_mock_crtc.c
+index 5d12d7beef0eb..ade3309ae042f 100644
+--- a/drivers/gpu/drm/vc4/tests/vc4_mock_crtc.c
++++ b/drivers/gpu/drm/vc4/tests/vc4_mock_crtc.c
+@@ -26,7 +26,7 @@ struct vc4_dummy_crtc *vc4_mock_pv(struct kunit *test,
+ 	struct vc4_crtc *vc4_crtc;
+ 	int ret;
+ 
+-	dummy_crtc = kunit_kzalloc(test, sizeof(*dummy_crtc), GFP_KERNEL);
++	dummy_crtc = drmm_kzalloc(drm, sizeof(*dummy_crtc), GFP_KERNEL);
+ 	KUNIT_ASSERT_NOT_NULL(test, dummy_crtc);
+ 
+ 	vc4_crtc = &dummy_crtc->crtc;
+diff --git a/drivers/gpu/drm/vc4/tests/vc4_mock_output.c b/drivers/gpu/drm/vc4/tests/vc4_mock_output.c
+index 6e11fcc9ef45e..e70d7c3076acf 100644
+--- a/drivers/gpu/drm/vc4/tests/vc4_mock_output.c
++++ b/drivers/gpu/drm/vc4/tests/vc4_mock_output.c
+@@ -32,7 +32,7 @@ struct vc4_dummy_output *vc4_dummy_output(struct kunit *test,
+ 	struct drm_encoder *enc;
+ 	int ret;
+ 
+-	dummy_output = kunit_kzalloc(test, sizeof(*dummy_output), GFP_KERNEL);
++	dummy_output = drmm_kzalloc(drm, sizeof(*dummy_output), GFP_KERNEL);
+ 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dummy_output);
+ 	dummy_output->encoder.type = vc4_encoder_type;
+ 
+diff --git a/drivers/gpu/host1x/context.c b/drivers/gpu/host1x/context.c
+index 9ad89d22c0ca7..824085278732b 100644
+--- a/drivers/gpu/host1x/context.c
++++ b/drivers/gpu/host1x/context.c
+@@ -34,10 +34,10 @@ int host1x_memory_context_list_init(struct host1x *host1x)
+ 	if (err < 0)
+ 		return 0;
+ 
+-	cdl->devs = kcalloc(err, sizeof(*cdl->devs), GFP_KERNEL);
++	cdl->len = err / 4;
++	cdl->devs = kcalloc(cdl->len, sizeof(*cdl->devs), GFP_KERNEL);
+ 	if (!cdl->devs)
+ 		return -ENOMEM;
+-	cdl->len = err / 4;
+ 
+ 	for (i = 0; i < cdl->len; i++) {
+ 		ctx = &cdl->devs[i];
+diff --git a/drivers/hid/hid-cp2112.c b/drivers/hid/hid-cp2112.c
+index 27cadadda7c9d..2770d964133d5 100644
+--- a/drivers/hid/hid-cp2112.c
++++ b/drivers/hid/hid-cp2112.c
+@@ -163,7 +163,6 @@ struct cp2112_device {
+ 	atomic_t read_avail;
+ 	atomic_t xfer_avail;
+ 	struct gpio_chip gc;
+-	struct irq_chip irq;
+ 	u8 *in_out_buffer;
+ 	struct mutex lock;
+ 
+@@ -1080,16 +1079,20 @@ static void cp2112_gpio_irq_mask(struct irq_data *d)
+ {
+ 	struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ 	struct cp2112_device *dev = gpiochip_get_data(gc);
++	irq_hw_number_t hwirq = irqd_to_hwirq(d);
+ 
+-	__clear_bit(d->hwirq, &dev->irq_mask);
++	__clear_bit(hwirq, &dev->irq_mask);
++	gpiochip_disable_irq(gc, hwirq);
+ }
+ 
+ static void cp2112_gpio_irq_unmask(struct irq_data *d)
+ {
+ 	struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ 	struct cp2112_device *dev = gpiochip_get_data(gc);
++	irq_hw_number_t hwirq = irqd_to_hwirq(d);
+ 
+-	__set_bit(d->hwirq, &dev->irq_mask);
++	gpiochip_enable_irq(gc, hwirq);
++	__set_bit(hwirq, &dev->irq_mask);
+ }
+ 
+ static void cp2112_gpio_poll_callback(struct work_struct *work)
+@@ -1159,8 +1162,6 @@ static unsigned int cp2112_gpio_irq_startup(struct irq_data *d)
+ 	struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ 	struct cp2112_device *dev = gpiochip_get_data(gc);
+ 
+-	INIT_DELAYED_WORK(&dev->gpio_poll_worker, cp2112_gpio_poll_callback);
+-
+ 	if (!dev->gpio_poll) {
+ 		dev->gpio_poll = true;
+ 		schedule_delayed_work(&dev->gpio_poll_worker, 0);
+@@ -1175,7 +1176,12 @@ static void cp2112_gpio_irq_shutdown(struct irq_data *d)
+ 	struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ 	struct cp2112_device *dev = gpiochip_get_data(gc);
+ 
+-	cancel_delayed_work_sync(&dev->gpio_poll_worker);
++	cp2112_gpio_irq_mask(d);
++
++	if (!dev->irq_mask) {
++		dev->gpio_poll = false;
++		cancel_delayed_work_sync(&dev->gpio_poll_worker);
++	}
+ }
+ 
+ static int cp2112_gpio_irq_type(struct irq_data *d, unsigned int type)
+@@ -1228,6 +1234,18 @@ err_desc:
+ 	return ret;
+ }
+ 
++static const struct irq_chip cp2112_gpio_irqchip = {
++	.name = "cp2112-gpio",
++	.irq_startup = cp2112_gpio_irq_startup,
++	.irq_shutdown = cp2112_gpio_irq_shutdown,
++	.irq_ack = cp2112_gpio_irq_ack,
++	.irq_mask = cp2112_gpio_irq_mask,
++	.irq_unmask = cp2112_gpio_irq_unmask,
++	.irq_set_type = cp2112_gpio_irq_type,
++	.flags = IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_IMMUTABLE,
++	GPIOCHIP_IRQ_RESOURCE_HELPERS,
++};
++
+ static int cp2112_probe(struct hid_device *hdev, const struct hid_device_id *id)
+ {
+ 	struct cp2112_device *dev;
+@@ -1337,17 +1355,8 @@ static int cp2112_probe(struct hid_device *hdev, const struct hid_device_id *id)
+ 	dev->gc.can_sleep		= 1;
+ 	dev->gc.parent			= &hdev->dev;
+ 
+-	dev->irq.name = "cp2112-gpio";
+-	dev->irq.irq_startup = cp2112_gpio_irq_startup;
+-	dev->irq.irq_shutdown = cp2112_gpio_irq_shutdown;
+-	dev->irq.irq_ack = cp2112_gpio_irq_ack;
+-	dev->irq.irq_mask = cp2112_gpio_irq_mask;
+-	dev->irq.irq_unmask = cp2112_gpio_irq_unmask;
+-	dev->irq.irq_set_type = cp2112_gpio_irq_type;
+-	dev->irq.flags = IRQCHIP_MASK_ON_SUSPEND;
+-
+ 	girq = &dev->gc.irq;
+-	girq->chip = &dev->irq;
++	gpio_irq_chip_set_chip(girq, &cp2112_gpio_irqchip);
+ 	/* The event comes from the outside so no parent handler */
+ 	girq->parent_handler = NULL;
+ 	girq->num_parents = 0;
+@@ -1356,6 +1365,8 @@ static int cp2112_probe(struct hid_device *hdev, const struct hid_device_id *id)
+ 	girq->handler = handle_simple_irq;
+ 	girq->threaded = true;
+ 
++	INIT_DELAYED_WORK(&dev->gpio_poll_worker, cp2112_gpio_poll_callback);
++
+ 	ret = gpiochip_add_data(&dev->gc, dev);
+ 	if (ret < 0) {
+ 		hid_err(hdev, "error registering gpio chip\n");
+diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c
+index 08b68f8476dbb..b040959db0f05 100644
+--- a/drivers/hid/hid-logitech-hidpp.c
++++ b/drivers/hid/hid-logitech-hidpp.c
+@@ -1835,15 +1835,14 @@ static int hidpp_battery_get_property(struct power_supply *psy,
+ /* -------------------------------------------------------------------------- */
+ #define HIDPP_PAGE_WIRELESS_DEVICE_STATUS			0x1d4b
+ 
+-static int hidpp_set_wireless_feature_index(struct hidpp_device *hidpp)
++static int hidpp_get_wireless_feature_index(struct hidpp_device *hidpp, u8 *feature_index)
+ {
+ 	u8 feature_type;
+ 	int ret;
+ 
+ 	ret = hidpp_root_get_feature(hidpp,
+ 				     HIDPP_PAGE_WIRELESS_DEVICE_STATUS,
+-				     &hidpp->wireless_feature_index,
+-				     &feature_type);
++				     feature_index, &feature_type);
+ 
+ 	return ret;
+ }
+@@ -4249,6 +4248,13 @@ static void hidpp_connect_event(struct hidpp_device *hidpp)
+ 		}
+ 	}
+ 
++	if (hidpp->protocol_major >= 2) {
++		u8 feature_index;
++
++		if (!hidpp_get_wireless_feature_index(hidpp, &feature_index))
++			hidpp->wireless_feature_index = feature_index;
++	}
++
+ 	if (hidpp->name == hdev->name && hidpp->protocol_major >= 2) {
+ 		name = hidpp_get_device_name(hidpp);
+ 		if (name) {
+@@ -4394,7 +4400,6 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id)
+ 	bool connected;
+ 	unsigned int connect_mask = HID_CONNECT_DEFAULT;
+ 	struct hidpp_ff_private_data data;
+-	bool will_restart = false;
+ 
+ 	/* report_fixup needs drvdata to be set before we call hid_parse */
+ 	hidpp = devm_kzalloc(&hdev->dev, sizeof(*hidpp), GFP_KERNEL);
+@@ -4445,10 +4450,6 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id)
+ 			return ret;
+ 	}
+ 
+-	if (hidpp->quirks & HIDPP_QUIRK_DELAYED_INIT ||
+-	    hidpp->quirks & HIDPP_QUIRK_UNIFYING)
+-		will_restart = true;
+-
+ 	INIT_WORK(&hidpp->work, delayed_work_cb);
+ 	mutex_init(&hidpp->send_mutex);
+ 	init_waitqueue_head(&hidpp->wait);
+@@ -4460,10 +4461,12 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id)
+ 			 hdev->name);
+ 
+ 	/*
+-	 * Plain USB connections need to actually call start and open
+-	 * on the transport driver to allow incoming data.
++	 * First call hid_hw_start(hdev, 0) to allow IO without connecting any
++	 * hid subdrivers (hid-input, hidraw). This allows retrieving the dev's
++	 * name and serial number and store these in hdev->name and hdev->uniq,
++	 * before the hid-input and hidraw drivers expose these to userspace.
+ 	 */
+-	ret = hid_hw_start(hdev, will_restart ? 0 : connect_mask);
++	ret = hid_hw_start(hdev, 0);
+ 	if (ret) {
+ 		hid_err(hdev, "hw start failed\n");
+ 		goto hid_hw_start_fail;
+@@ -4496,15 +4499,6 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id)
+ 		hidpp_overwrite_name(hdev);
+ 	}
+ 
+-	if (connected && hidpp->protocol_major >= 2) {
+-		ret = hidpp_set_wireless_feature_index(hidpp);
+-		if (ret == -ENOENT)
+-			hidpp->wireless_feature_index = 0;
+-		else if (ret)
+-			goto hid_hw_init_fail;
+-		ret = 0;
+-	}
+-
+ 	if (connected && (hidpp->quirks & HIDPP_QUIRK_CLASS_WTP)) {
+ 		ret = wtp_get_config(hidpp);
+ 		if (ret)
+@@ -4518,21 +4512,14 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id)
+ 	schedule_work(&hidpp->work);
+ 	flush_work(&hidpp->work);
+ 
+-	if (will_restart) {
+-		/* Reset the HID node state */
+-		hid_device_io_stop(hdev);
+-		hid_hw_close(hdev);
+-		hid_hw_stop(hdev);
+-
+-		if (hidpp->quirks & HIDPP_QUIRK_DELAYED_INIT)
+-			connect_mask &= ~HID_CONNECT_HIDINPUT;
++	if (hidpp->quirks & HIDPP_QUIRK_DELAYED_INIT)
++		connect_mask &= ~HID_CONNECT_HIDINPUT;
+ 
+-		/* Now export the actual inputs and hidraw nodes to the world */
+-		ret = hid_hw_start(hdev, connect_mask);
+-		if (ret) {
+-			hid_err(hdev, "%s:hid_hw_start returned error\n", __func__);
+-			goto hid_hw_start_fail;
+-		}
++	/* Now export the actual inputs and hidraw nodes to the world */
++	ret = hid_connect(hdev, connect_mask);
++	if (ret) {
++		hid_err(hdev, "%s:hid_connect returned error %d\n", __func__, ret);
++		goto hid_hw_init_fail;
+ 	}
+ 
+ 	if (hidpp->quirks & HIDPP_QUIRK_CLASS_G920) {
+@@ -4543,6 +4530,11 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id)
+ 				 ret);
+ 	}
+ 
++	/*
++	 * This relies on logi_dj_ll_close() being a no-op so that DJ connection
++	 * events will still be received.
++	 */
++	hid_hw_close(hdev);
+ 	return ret;
+ 
+ hid_hw_init_fail:
+diff --git a/drivers/hid/hid-uclogic-core-test.c b/drivers/hid/hid-uclogic-core-test.c
+index 2bb916226a389..cb274cde3ad23 100644
+--- a/drivers/hid/hid-uclogic-core-test.c
++++ b/drivers/hid/hid-uclogic-core-test.c
+@@ -56,6 +56,11 @@ static struct uclogic_raw_event_hook_test test_events[] = {
+ 	},
+ };
+ 
++static void fake_work(struct work_struct *work)
++{
++
++}
++
+ static void hid_test_uclogic_exec_event_hook_test(struct kunit *test)
+ {
+ 	struct uclogic_params p = {0, };
+@@ -77,6 +82,8 @@ static void hid_test_uclogic_exec_event_hook_test(struct kunit *test)
+ 		KUNIT_ASSERT_NOT_ERR_OR_NULL(test, filter->event);
+ 		memcpy(filter->event, &hook_events[n].event[0], filter->size);
+ 
++		INIT_WORK(&filter->work, fake_work);
++
+ 		list_add_tail(&filter->list, &p.event_hooks->list);
+ 	}
+ 
+diff --git a/drivers/hid/hid-uclogic-params-test.c b/drivers/hid/hid-uclogic-params-test.c
+index 678f50cbb160b..a30121419a292 100644
+--- a/drivers/hid/hid-uclogic-params-test.c
++++ b/drivers/hid/hid-uclogic-params-test.c
+@@ -174,12 +174,26 @@ static void hid_test_uclogic_parse_ugee_v2_desc(struct kunit *test)
+ 	KUNIT_EXPECT_EQ(test, params->frame_type, frame_type);
+ }
+ 
++struct fake_device {
++	unsigned long quirks;
++};
++
+ static void hid_test_uclogic_params_cleanup_event_hooks(struct kunit *test)
+ {
+ 	int res, n;
++	struct hid_device *hdev;
++	struct fake_device *fake_dev;
+ 	struct uclogic_params p = {0, };
+ 
+-	res = uclogic_params_ugee_v2_init_event_hooks(NULL, &p);
++	hdev = kunit_kzalloc(test, sizeof(struct hid_device), GFP_KERNEL);
++	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, hdev);
++
++	fake_dev = kunit_kzalloc(test, sizeof(struct fake_device), GFP_KERNEL);
++	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, fake_dev);
++
++	hid_set_drvdata(hdev, fake_dev);
++
++	res = uclogic_params_ugee_v2_init_event_hooks(hdev, &p);
+ 	KUNIT_ASSERT_EQ(test, res, 0);
+ 
+ 	/* Check that the function can be called repeatedly */
+diff --git a/drivers/hte/hte-tegra194-test.c b/drivers/hte/hte-tegra194-test.c
+index ba37a5efbf820..ab2edff018eb6 100644
+--- a/drivers/hte/hte-tegra194-test.c
++++ b/drivers/hte/hte-tegra194-test.c
+@@ -153,8 +153,10 @@ static int tegra_hte_test_probe(struct platform_device *pdev)
+ 	}
+ 
+ 	cnt = of_hte_req_count(hte.pdev);
+-	if (cnt < 0)
++	if (cnt < 0) {
++		ret = cnt;
+ 		goto free_irq;
++	}
+ 
+ 	dev_info(&pdev->dev, "Total requested lines:%d\n", cnt);
+ 
+diff --git a/drivers/hwmon/axi-fan-control.c b/drivers/hwmon/axi-fan-control.c
+index 5fd136baf1cd3..19b9bf3d75ef9 100644
+--- a/drivers/hwmon/axi-fan-control.c
++++ b/drivers/hwmon/axi-fan-control.c
+@@ -496,6 +496,21 @@ static int axi_fan_control_probe(struct platform_device *pdev)
+ 		return -ENODEV;
+ 	}
+ 
++	ret = axi_fan_control_init(ctl, pdev->dev.of_node);
++	if (ret) {
++		dev_err(&pdev->dev, "Failed to initialize device\n");
++		return ret;
++	}
++
++	ctl->hdev = devm_hwmon_device_register_with_info(&pdev->dev,
++							 name,
++							 ctl,
++							 &axi_chip_info,
++							 axi_fan_control_groups);
++
++	if (IS_ERR(ctl->hdev))
++		return PTR_ERR(ctl->hdev);
++
+ 	ctl->irq = platform_get_irq(pdev, 0);
+ 	if (ctl->irq < 0)
+ 		return ctl->irq;
+@@ -509,19 +524,7 @@ static int axi_fan_control_probe(struct platform_device *pdev)
+ 		return ret;
+ 	}
+ 
+-	ret = axi_fan_control_init(ctl, pdev->dev.of_node);
+-	if (ret) {
+-		dev_err(&pdev->dev, "Failed to initialize device\n");
+-		return ret;
+-	}
+-
+-	ctl->hdev = devm_hwmon_device_register_with_info(&pdev->dev,
+-							 name,
+-							 ctl,
+-							 &axi_chip_info,
+-							 axi_fan_control_groups);
+-
+-	return PTR_ERR_OR_ZERO(ctl->hdev);
++	return 0;
+ }
+ 
+ static struct platform_driver axi_fan_control_driver = {
+diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
+index eba94f68585a8..ba82d1e79c131 100644
+--- a/drivers/hwmon/coretemp.c
++++ b/drivers/hwmon/coretemp.c
+@@ -42,7 +42,7 @@ MODULE_PARM_DESC(tjmax, "TjMax value in degrees Celsius");
+ #define PKG_SYSFS_ATTR_NO	1	/* Sysfs attribute for package temp */
+ #define BASE_SYSFS_ATTR_NO	2	/* Sysfs Base attr no for coretemp */
+ #define NUM_REAL_CORES		128	/* Number of Real cores per cpu */
+-#define CORETEMP_NAME_LENGTH	19	/* String Length of attrs */
++#define CORETEMP_NAME_LENGTH	28	/* String Length of attrs */
+ #define MAX_CORE_ATTRS		4	/* Maximum no of basic attrs */
+ #define TOTAL_ATTRS		(MAX_CORE_ATTRS + 1)
+ #define MAX_CORE_DATA		(NUM_REAL_CORES + BASE_SYSFS_ATTR_NO)
+diff --git a/drivers/hwmon/nct6775-core.c b/drivers/hwmon/nct6775-core.c
+index 08ce4984151de..33ca9aea848cc 100644
+--- a/drivers/hwmon/nct6775-core.c
++++ b/drivers/hwmon/nct6775-core.c
+@@ -1581,17 +1581,21 @@ struct nct6775_data *nct6775_update_device(struct device *dev)
+ 							  data->fan_div[i]);
+ 
+ 			if (data->has_fan_min & BIT(i)) {
+-				err = nct6775_read_value(data, data->REG_FAN_MIN[i], &reg);
++				u16 tmp;
++
++				err = nct6775_read_value(data, data->REG_FAN_MIN[i], &tmp);
+ 				if (err)
+ 					goto out;
+-				data->fan_min[i] = reg;
++				data->fan_min[i] = tmp;
+ 			}
+ 
+ 			if (data->REG_FAN_PULSES[i]) {
+-				err = nct6775_read_value(data, data->REG_FAN_PULSES[i], &reg);
++				u16 tmp;
++
++				err = nct6775_read_value(data, data->REG_FAN_PULSES[i], &tmp);
+ 				if (err)
+ 					goto out;
+-				data->fan_pulses[i] = (reg >> data->FAN_PULSE_SHIFT[i]) & 0x03;
++				data->fan_pulses[i] = (tmp >> data->FAN_PULSE_SHIFT[i]) & 0x03;
+ 			}
+ 
+ 			err = nct6775_select_fan_div(dev, data, i, reg);
+diff --git a/drivers/hwmon/sch5627.c b/drivers/hwmon/sch5627.c
+index 1bbda3b05532e..bf408e35e2c32 100644
+--- a/drivers/hwmon/sch5627.c
++++ b/drivers/hwmon/sch5627.c
+@@ -6,6 +6,7 @@
+ 
+ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+ 
++#include <linux/bits.h>
+ #include <linux/module.h>
+ #include <linux/mod_devicetable.h>
+ #include <linux/init.h>
+@@ -32,6 +33,10 @@
+ #define SCH5627_REG_PRIMARY_ID		0x3f
+ #define SCH5627_REG_CTRL		0x40
+ 
++#define SCH5627_CTRL_START		BIT(0)
++#define SCH5627_CTRL_LOCK		BIT(1)
++#define SCH5627_CTRL_VBAT		BIT(4)
++
+ #define SCH5627_NO_TEMPS		8
+ #define SCH5627_NO_FANS			4
+ #define SCH5627_NO_IN			5
+@@ -147,7 +152,8 @@ static int sch5627_update_in(struct sch5627_data *data)
+ 
+ 	/* Trigger a Vbat voltage measurement every 5 minutes */
+ 	if (time_after(jiffies, data->last_battery + 300 * HZ)) {
+-		sch56xx_write_virtual_reg(data->addr, SCH5627_REG_CTRL, data->control | 0x10);
++		sch56xx_write_virtual_reg(data->addr, SCH5627_REG_CTRL,
++					  data->control | SCH5627_CTRL_VBAT);
+ 		data->last_battery = jiffies;
+ 	}
+ 
+@@ -226,6 +232,14 @@ static int reg_to_rpm(u16 reg)
+ static umode_t sch5627_is_visible(const void *drvdata, enum hwmon_sensor_types type, u32 attr,
+ 				  int channel)
+ {
++	const struct sch5627_data *data = drvdata;
++
++	/* Once the lock bit is set, the virtual registers become read-only
++	 * until the next power cycle.
++	 */
++	if (data->control & SCH5627_CTRL_LOCK)
++		return 0444;
++
+ 	if (type == hwmon_pwm && attr == hwmon_pwm_auto_channels_temp)
+ 		return 0644;
+ 
+@@ -483,14 +497,13 @@ static int sch5627_probe(struct platform_device *pdev)
+ 		return val;
+ 
+ 	data->control = val;
+-	if (!(data->control & 0x01)) {
++	if (!(data->control & SCH5627_CTRL_START)) {
+ 		pr_err("hardware monitoring not enabled\n");
+ 		return -ENODEV;
+ 	}
+ 	/* Trigger a Vbat voltage measurement, so that we get a valid reading
+ 	   the first time we read Vbat */
+-	sch56xx_write_virtual_reg(data->addr, SCH5627_REG_CTRL,
+-				  data->control | 0x10);
++	sch56xx_write_virtual_reg(data->addr, SCH5627_REG_CTRL, data->control | SCH5627_CTRL_VBAT);
+ 	data->last_battery = jiffies;
+ 
+ 	/*
+diff --git a/drivers/hwmon/sch56xx-common.c b/drivers/hwmon/sch56xx-common.c
+index de3a0886c2f72..ac1f725807155 100644
+--- a/drivers/hwmon/sch56xx-common.c
++++ b/drivers/hwmon/sch56xx-common.c
+@@ -7,10 +7,8 @@
+ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+ 
+ #include <linux/module.h>
+-#include <linux/mod_devicetable.h>
+ #include <linux/init.h>
+ #include <linux/platform_device.h>
+-#include <linux/dmi.h>
+ #include <linux/err.h>
+ #include <linux/io.h>
+ #include <linux/acpi.h>
+@@ -21,10 +19,7 @@
+ #include <linux/slab.h>
+ #include "sch56xx-common.h"
+ 
+-static bool ignore_dmi;
+-module_param(ignore_dmi, bool, 0);
+-MODULE_PARM_DESC(ignore_dmi, "Omit DMI check for supported devices (default=0)");
+-
++/* Insmod parameters */
+ static bool nowayout = WATCHDOG_NOWAYOUT;
+ module_param(nowayout, bool, 0);
+ MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
+@@ -523,66 +518,11 @@ static int __init sch56xx_device_add(int address, const char *name)
+ 	return PTR_ERR_OR_ZERO(sch56xx_pdev);
+ }
+ 
+-static const struct dmi_system_id sch56xx_dmi_override_table[] __initconst = {
+-	{
+-		.matches = {
+-			DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+-			DMI_MATCH(DMI_PRODUCT_NAME, "CELSIUS W380"),
+-		},
+-	},
+-	{
+-		.matches = {
+-			DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+-			DMI_MATCH(DMI_PRODUCT_NAME, "ESPRIMO P710"),
+-		},
+-	},
+-	{
+-		.matches = {
+-			DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+-			DMI_MATCH(DMI_PRODUCT_NAME, "ESPRIMO E9900"),
+-		},
+-	},
+-	{ }
+-};
+-
+-/* For autoloading only */
+-static const struct dmi_system_id sch56xx_dmi_table[] __initconst = {
+-	{
+-		.matches = {
+-			DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+-		},
+-	},
+-	{ }
+-};
+-MODULE_DEVICE_TABLE(dmi, sch56xx_dmi_table);
+-
+ static int __init sch56xx_init(void)
+ {
+-	const char *name = NULL;
+ 	int address;
++	const char *name = NULL;
+ 
+-	if (!ignore_dmi) {
+-		if (!dmi_check_system(sch56xx_dmi_table))
+-			return -ENODEV;
+-
+-		if (!dmi_check_system(sch56xx_dmi_override_table)) {
+-			/*
+-			 * Some machines like the Esprimo P720 and Esprimo C700 have
+-			 * onboard devices named " Antiope"/" Theseus" instead of
+-			 * "Antiope"/"Theseus", so we need to check for both.
+-			 */
+-			if (!dmi_find_device(DMI_DEV_TYPE_OTHER, "Antiope", NULL) &&
+-			    !dmi_find_device(DMI_DEV_TYPE_OTHER, " Antiope", NULL) &&
+-			    !dmi_find_device(DMI_DEV_TYPE_OTHER, "Theseus", NULL) &&
+-			    !dmi_find_device(DMI_DEV_TYPE_OTHER, " Theseus", NULL))
+-				return -ENODEV;
+-		}
+-	}
+-
+-	/*
+-	 * Some devices like the Esprimo C700 have both onboard devices,
+-	 * so we still have to check manually
+-	 */
+ 	address = sch56xx_find(0x4e, &name);
+ 	if (address < 0)
+ 		address = sch56xx_find(0x2e, &name);
+diff --git a/drivers/i2c/busses/i2c-bcm-iproc.c b/drivers/i2c/busses/i2c-bcm-iproc.c
+index 05c80680dff47..68438d4e5d733 100644
+--- a/drivers/i2c/busses/i2c-bcm-iproc.c
++++ b/drivers/i2c/busses/i2c-bcm-iproc.c
+@@ -316,26 +316,44 @@ static void bcm_iproc_i2c_slave_init(
+ 	iproc_i2c_wr_reg(iproc_i2c, IE_OFFSET, val);
+ }
+ 
+-static void bcm_iproc_i2c_check_slave_status(
+-	struct bcm_iproc_i2c_dev *iproc_i2c)
++static bool bcm_iproc_i2c_check_slave_status
++	(struct bcm_iproc_i2c_dev *iproc_i2c, u32 status)
+ {
+ 	u32 val;
++	bool recover = false;
+ 
+-	val = iproc_i2c_rd_reg(iproc_i2c, S_CMD_OFFSET);
+-	/* status is valid only when START_BUSY is cleared after it was set */
+-	if (val & BIT(S_CMD_START_BUSY_SHIFT))
+-		return;
++	/* check slave transmit status only if slave is transmitting */
++	if (!iproc_i2c->slave_rx_only) {
++		val = iproc_i2c_rd_reg(iproc_i2c, S_CMD_OFFSET);
++		/* status is valid only when START_BUSY is cleared */
++		if (!(val & BIT(S_CMD_START_BUSY_SHIFT))) {
++			val = (val >> S_CMD_STATUS_SHIFT) & S_CMD_STATUS_MASK;
++			if (val == S_CMD_STATUS_TIMEOUT ||
++			    val == S_CMD_STATUS_MASTER_ABORT) {
++				dev_warn(iproc_i2c->device,
++					 (val == S_CMD_STATUS_TIMEOUT) ?
++					 "slave random stretch time timeout\n" :
++					 "Master aborted read transaction\n");
++				recover = true;
++			}
++		}
++	}
++
++	/* RX_EVENT is not valid when START_BUSY is set */
++	if ((status & BIT(IS_S_RX_EVENT_SHIFT)) &&
++	    (status & BIT(IS_S_START_BUSY_SHIFT))) {
++		dev_warn(iproc_i2c->device, "Slave aborted read transaction\n");
++		recover = true;
++	}
+ 
+-	val = (val >> S_CMD_STATUS_SHIFT) & S_CMD_STATUS_MASK;
+-	if (val == S_CMD_STATUS_TIMEOUT || val == S_CMD_STATUS_MASTER_ABORT) {
+-		dev_err(iproc_i2c->device, (val == S_CMD_STATUS_TIMEOUT) ?
+-			"slave random stretch time timeout\n" :
+-			"Master aborted read transaction\n");
++	if (recover) {
+ 		/* re-initialize i2c for recovery */
+ 		bcm_iproc_i2c_enable_disable(iproc_i2c, false);
+ 		bcm_iproc_i2c_slave_init(iproc_i2c, true);
+ 		bcm_iproc_i2c_enable_disable(iproc_i2c, true);
+ 	}
++
++	return recover;
+ }
+ 
+ static void bcm_iproc_i2c_slave_read(struct bcm_iproc_i2c_dev *iproc_i2c)
+@@ -420,48 +438,6 @@ static bool bcm_iproc_i2c_slave_isr(struct bcm_iproc_i2c_dev *iproc_i2c,
+ 	u32 val;
+ 	u8 value;
+ 
+-	/*
+-	 * Slave events in case of master-write, master-write-read and,
+-	 * master-read
+-	 *
+-	 * Master-write     : only IS_S_RX_EVENT_SHIFT event
+-	 * Master-write-read: both IS_S_RX_EVENT_SHIFT and IS_S_RD_EVENT_SHIFT
+-	 *                    events
+-	 * Master-read      : both IS_S_RX_EVENT_SHIFT and IS_S_RD_EVENT_SHIFT
+-	 *                    events or only IS_S_RD_EVENT_SHIFT
+-	 *
+-	 * iproc has a slave rx fifo size of 64 bytes. Rx fifo full interrupt
+-	 * (IS_S_RX_FIFO_FULL_SHIFT) will be generated when RX fifo becomes
+-	 * full. This can happen if Master issues write requests of more than
+-	 * 64 bytes.
+-	 */
+-	if (status & BIT(IS_S_RX_EVENT_SHIFT) ||
+-	    status & BIT(IS_S_RD_EVENT_SHIFT) ||
+-	    status & BIT(IS_S_RX_FIFO_FULL_SHIFT)) {
+-		/* disable slave interrupts */
+-		val = iproc_i2c_rd_reg(iproc_i2c, IE_OFFSET);
+-		val &= ~iproc_i2c->slave_int_mask;
+-		iproc_i2c_wr_reg(iproc_i2c, IE_OFFSET, val);
+-
+-		if (status & BIT(IS_S_RD_EVENT_SHIFT))
+-			/* Master-write-read request */
+-			iproc_i2c->slave_rx_only = false;
+-		else
+-			/* Master-write request only */
+-			iproc_i2c->slave_rx_only = true;
+-
+-		/* schedule tasklet to read data later */
+-		tasklet_schedule(&iproc_i2c->slave_rx_tasklet);
+-
+-		/*
+-		 * clear only IS_S_RX_EVENT_SHIFT and
+-		 * IS_S_RX_FIFO_FULL_SHIFT interrupt.
+-		 */
+-		val = BIT(IS_S_RX_EVENT_SHIFT);
+-		if (status & BIT(IS_S_RX_FIFO_FULL_SHIFT))
+-			val |= BIT(IS_S_RX_FIFO_FULL_SHIFT);
+-		iproc_i2c_wr_reg(iproc_i2c, IS_OFFSET, val);
+-	}
+ 
+ 	if (status & BIT(IS_S_TX_UNDERRUN_SHIFT)) {
+ 		iproc_i2c->tx_underrun++;
+@@ -493,8 +469,9 @@ static bool bcm_iproc_i2c_slave_isr(struct bcm_iproc_i2c_dev *iproc_i2c,
+ 		 * less than PKT_LENGTH bytes were output on the SMBUS
+ 		 */
+ 		iproc_i2c->slave_int_mask &= ~BIT(IE_S_TX_UNDERRUN_SHIFT);
+-		iproc_i2c_wr_reg(iproc_i2c, IE_OFFSET,
+-				 iproc_i2c->slave_int_mask);
++		val = iproc_i2c_rd_reg(iproc_i2c, IE_OFFSET);
++		val &= ~BIT(IE_S_TX_UNDERRUN_SHIFT);
++		iproc_i2c_wr_reg(iproc_i2c, IE_OFFSET, val);
+ 
+ 		/* End of SMBUS for Master Read */
+ 		val = BIT(S_TX_WR_STATUS_SHIFT);
+@@ -515,9 +492,49 @@ static bool bcm_iproc_i2c_slave_isr(struct bcm_iproc_i2c_dev *iproc_i2c,
+ 				 BIT(IS_S_START_BUSY_SHIFT));
+ 	}
+ 
+-	/* check slave transmit status only if slave is transmitting */
+-	if (!iproc_i2c->slave_rx_only)
+-		bcm_iproc_i2c_check_slave_status(iproc_i2c);
++	/* if the controller has been reset, immediately return from the ISR */
++	if (bcm_iproc_i2c_check_slave_status(iproc_i2c, status))
++		return true;
++
++	/*
++	 * Slave events in case of master-write, master-write-read and,
++	 * master-read
++	 *
++	 * Master-write     : only IS_S_RX_EVENT_SHIFT event
++	 * Master-write-read: both IS_S_RX_EVENT_SHIFT and IS_S_RD_EVENT_SHIFT
++	 *                    events
++	 * Master-read      : both IS_S_RX_EVENT_SHIFT and IS_S_RD_EVENT_SHIFT
++	 *                    events or only IS_S_RD_EVENT_SHIFT
++	 *
++	 * iproc has a slave rx fifo size of 64 bytes. Rx fifo full interrupt
++	 * (IS_S_RX_FIFO_FULL_SHIFT) will be generated when RX fifo becomes
++	 * full. This can happen if Master issues write requests of more than
++	 * 64 bytes.
++	 */
++	if (status & BIT(IS_S_RX_EVENT_SHIFT) ||
++	    status & BIT(IS_S_RD_EVENT_SHIFT) ||
++	    status & BIT(IS_S_RX_FIFO_FULL_SHIFT)) {
++		/* disable slave interrupts */
++		val = iproc_i2c_rd_reg(iproc_i2c, IE_OFFSET);
++		val &= ~iproc_i2c->slave_int_mask;
++		iproc_i2c_wr_reg(iproc_i2c, IE_OFFSET, val);
++
++		if (status & BIT(IS_S_RD_EVENT_SHIFT))
++			/* Master-write-read request */
++			iproc_i2c->slave_rx_only = false;
++		else
++			/* Master-write request only */
++			iproc_i2c->slave_rx_only = true;
++
++		/* schedule tasklet to read data later */
++		tasklet_schedule(&iproc_i2c->slave_rx_tasklet);
++
++		/* clear IS_S_RX_FIFO_FULL_SHIFT interrupt */
++		if (status & BIT(IS_S_RX_FIFO_FULL_SHIFT)) {
++			val = BIT(IS_S_RX_FIFO_FULL_SHIFT);
++			iproc_i2c_wr_reg(iproc_i2c, IS_OFFSET, val);
++		}
++	}
+ 
+ 	return true;
+ }
+diff --git a/drivers/i3c/master.c b/drivers/i3c/master.c
+index 08aeb69a78003..19071ff9eb753 100644
+--- a/drivers/i3c/master.c
++++ b/drivers/i3c/master.c
+@@ -1521,9 +1521,11 @@ i3c_master_register_new_i3c_devs(struct i3c_master_controller *master)
+ 			desc->dev->dev.of_node = desc->boardinfo->of_node;
+ 
+ 		ret = device_register(&desc->dev->dev);
+-		if (ret)
++		if (ret) {
+ 			dev_err(&master->dev,
+ 				"Failed to add I3C device (err = %d)\n", ret);
++			put_device(&desc->dev->dev);
++		}
+ 	}
+ }
+ 
+diff --git a/drivers/iio/frequency/adf4350.c b/drivers/iio/frequency/adf4350.c
+index 85e289700c3c5..4abf80f75ef5d 100644
+--- a/drivers/iio/frequency/adf4350.c
++++ b/drivers/iio/frequency/adf4350.c
+@@ -33,7 +33,6 @@ enum {
+ 
+ struct adf4350_state {
+ 	struct spi_device		*spi;
+-	struct regulator		*reg;
+ 	struct gpio_desc		*lock_detect_gpiod;
+ 	struct adf4350_platform_data	*pdata;
+ 	struct clk			*clk;
+@@ -469,6 +468,15 @@ static struct adf4350_platform_data *adf4350_parse_dt(struct device *dev)
+ 	return pdata;
+ }
+ 
++static void adf4350_power_down(void *data)
++{
++	struct iio_dev *indio_dev = data;
++	struct adf4350_state *st = iio_priv(indio_dev);
++
++	st->regs[ADF4350_REG2] |= ADF4350_REG2_POWER_DOWN_EN;
++	adf4350_sync_config(st);
++}
++
+ static int adf4350_probe(struct spi_device *spi)
+ {
+ 	struct adf4350_platform_data *pdata;
+@@ -491,31 +499,21 @@ static int adf4350_probe(struct spi_device *spi)
+ 	}
+ 
+ 	if (!pdata->clkin) {
+-		clk = devm_clk_get(&spi->dev, "clkin");
++		clk = devm_clk_get_enabled(&spi->dev, "clkin");
+ 		if (IS_ERR(clk))
+-			return -EPROBE_DEFER;
+-
+-		ret = clk_prepare_enable(clk);
+-		if (ret < 0)
+-			return ret;
++			return PTR_ERR(clk);
+ 	}
+ 
+ 	indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
+-	if (indio_dev == NULL) {
+-		ret =  -ENOMEM;
+-		goto error_disable_clk;
+-	}
++	if (indio_dev == NULL)
++		return -ENOMEM;
+ 
+ 	st = iio_priv(indio_dev);
+ 
+-	st->reg = devm_regulator_get(&spi->dev, "vcc");
+-	if (!IS_ERR(st->reg)) {
+-		ret = regulator_enable(st->reg);
+-		if (ret)
+-			goto error_disable_clk;
+-	}
++	ret = devm_regulator_get_enable(&spi->dev, "vcc");
++	if (ret)
++		return ret;
+ 
+-	spi_set_drvdata(spi, indio_dev);
+ 	st->spi = spi;
+ 	st->pdata = pdata;
+ 
+@@ -544,47 +542,21 @@ static int adf4350_probe(struct spi_device *spi)
+ 
+ 	st->lock_detect_gpiod = devm_gpiod_get_optional(&spi->dev, NULL,
+ 							GPIOD_IN);
+-	if (IS_ERR(st->lock_detect_gpiod)) {
+-		ret = PTR_ERR(st->lock_detect_gpiod);
+-		goto error_disable_reg;
+-	}
++	if (IS_ERR(st->lock_detect_gpiod))
++		return PTR_ERR(st->lock_detect_gpiod);
+ 
+ 	if (pdata->power_up_frequency) {
+ 		ret = adf4350_set_freq(st, pdata->power_up_frequency);
+ 		if (ret)
+-			goto error_disable_reg;
++			return ret;
+ 	}
+ 
+-	ret = iio_device_register(indio_dev);
++	ret = devm_add_action_or_reset(&spi->dev, adf4350_power_down, indio_dev);
+ 	if (ret)
+-		goto error_disable_reg;
+-
+-	return 0;
+-
+-error_disable_reg:
+-	if (!IS_ERR(st->reg))
+-		regulator_disable(st->reg);
+-error_disable_clk:
+-	clk_disable_unprepare(clk);
+-
+-	return ret;
+-}
+-
+-static void adf4350_remove(struct spi_device *spi)
+-{
+-	struct iio_dev *indio_dev = spi_get_drvdata(spi);
+-	struct adf4350_state *st = iio_priv(indio_dev);
+-	struct regulator *reg = st->reg;
+-
+-	st->regs[ADF4350_REG2] |= ADF4350_REG2_POWER_DOWN_EN;
+-	adf4350_sync_config(st);
+-
+-	iio_device_unregister(indio_dev);
+-
+-	clk_disable_unprepare(st->clk);
++		return dev_err_probe(&spi->dev, ret,
++				     "Failed to add action to managed power down\n");
+ 
+-	if (!IS_ERR(reg))
+-		regulator_disable(reg);
++	return devm_iio_device_register(&spi->dev, indio_dev);
+ }
+ 
+ static const struct of_device_id adf4350_of_match[] = {
+@@ -607,7 +579,6 @@ static struct spi_driver adf4350_driver = {
+ 		.of_match_table = adf4350_of_match,
+ 	},
+ 	.probe		= adf4350_probe,
+-	.remove		= adf4350_remove,
+ 	.id_table	= adf4350_id,
+ };
+ module_spi_driver(adf4350_driver);
+diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
+index a666847bd7143..010718738d04c 100644
+--- a/drivers/infiniband/core/device.c
++++ b/drivers/infiniband/core/device.c
+@@ -804,7 +804,7 @@ static int alloc_port_data(struct ib_device *device)
+ 	 * empty slots at the beginning.
+ 	 */
+ 	pdata_rcu = kzalloc(struct_size(pdata_rcu, pdata,
+-					rdma_end_port(device) + 1),
++					size_add(rdma_end_port(device), 1)),
+ 			    GFP_KERNEL);
+ 	if (!pdata_rcu)
+ 		return -ENOMEM;
+diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
+index 59179cfc20ef9..8175dde60b0a8 100644
+--- a/drivers/infiniband/core/sa_query.c
++++ b/drivers/infiniband/core/sa_query.c
+@@ -2159,7 +2159,9 @@ static int ib_sa_add_one(struct ib_device *device)
+ 	s = rdma_start_port(device);
+ 	e = rdma_end_port(device);
+ 
+-	sa_dev = kzalloc(struct_size(sa_dev, port, e - s + 1), GFP_KERNEL);
++	sa_dev = kzalloc(struct_size(sa_dev, port,
++				     size_add(size_sub(e, s), 1)),
++			 GFP_KERNEL);
+ 	if (!sa_dev)
+ 		return -ENOMEM;
+ 
+diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c
+index ee59d73915689..ec5efdc166601 100644
+--- a/drivers/infiniband/core/sysfs.c
++++ b/drivers/infiniband/core/sysfs.c
+@@ -903,7 +903,7 @@ alloc_hw_stats_device(struct ib_device *ibdev)
+ 	 * Two extra attribue elements here, one for the lifespan entry and
+ 	 * one to NULL terminate the list for the sysfs core code
+ 	 */
+-	data = kzalloc(struct_size(data, attrs, stats->num_counters + 1),
++	data = kzalloc(struct_size(data, attrs, size_add(stats->num_counters, 1)),
+ 		       GFP_KERNEL);
+ 	if (!data)
+ 		goto err_free_stats;
+@@ -1009,7 +1009,7 @@ alloc_hw_stats_port(struct ib_port *port, struct attribute_group *group)
+ 	 * Two extra attribue elements here, one for the lifespan entry and
+ 	 * one to NULL terminate the list for the sysfs core code
+ 	 */
+-	data = kzalloc(struct_size(data, attrs, stats->num_counters + 1),
++	data = kzalloc(struct_size(data, attrs, size_add(stats->num_counters, 1)),
+ 		       GFP_KERNEL);
+ 	if (!data)
+ 		goto err_free_stats;
+@@ -1140,7 +1140,7 @@ static int setup_gid_attrs(struct ib_port *port,
+ 	int ret;
+ 
+ 	gid_attr_group = kzalloc(struct_size(gid_attr_group, attrs_list,
+-					     attr->gid_tbl_len * 2),
++					     size_mul(attr->gid_tbl_len, 2)),
+ 				 GFP_KERNEL);
+ 	if (!gid_attr_group)
+ 		return -ENOMEM;
+@@ -1205,8 +1205,8 @@ static struct ib_port *setup_port(struct ib_core_device *coredev, int port_num,
+ 	int ret;
+ 
+ 	p = kvzalloc(struct_size(p, attrs_list,
+-				attr->gid_tbl_len + attr->pkey_tbl_len),
+-		    GFP_KERNEL);
++				size_add(attr->gid_tbl_len, attr->pkey_tbl_len)),
++		     GFP_KERNEL);
+ 	if (!p)
+ 		return ERR_PTR(-ENOMEM);
+ 	p->ibdev = device;
+diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c
+index 7e5c33aad1619..f5feca7fa9b9c 100644
+--- a/drivers/infiniband/core/user_mad.c
++++ b/drivers/infiniband/core/user_mad.c
+@@ -1378,7 +1378,9 @@ static int ib_umad_add_one(struct ib_device *device)
+ 	s = rdma_start_port(device);
+ 	e = rdma_end_port(device);
+ 
+-	umad_dev = kzalloc(struct_size(umad_dev, ports, e - s + 1), GFP_KERNEL);
++	umad_dev = kzalloc(struct_size(umad_dev, ports,
++				       size_add(size_sub(e, s), 1)),
++			   GFP_KERNEL);
+ 	if (!umad_dev)
+ 		return -ENOMEM;
+ 
+diff --git a/drivers/infiniband/hw/hfi1/efivar.c b/drivers/infiniband/hw/hfi1/efivar.c
+index 7741a1d69097c..2b5d264f41e51 100644
+--- a/drivers/infiniband/hw/hfi1/efivar.c
++++ b/drivers/infiniband/hw/hfi1/efivar.c
+@@ -112,7 +112,7 @@ int read_hfi1_efi_var(struct hfi1_devdata *dd, const char *kind,
+ 		      unsigned long *size, void **return_data)
+ {
+ 	char prefix_name[64];
+-	char name[64];
++	char name[128];
+ 	int result;
+ 
+ 	/* create a common prefix */
+diff --git a/drivers/infiniband/hw/hns/hns_roce_ah.c b/drivers/infiniband/hw/hns/hns_roce_ah.c
+index e77fcc74f15c4..3df032ddda189 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_ah.c
++++ b/drivers/infiniband/hw/hns/hns_roce_ah.c
+@@ -33,7 +33,9 @@
+ #include <linux/pci.h>
+ #include <rdma/ib_addr.h>
+ #include <rdma/ib_cache.h>
++#include "hnae3.h"
+ #include "hns_roce_device.h"
++#include "hns_roce_hw_v2.h"
+ 
+ static inline u16 get_ah_udp_sport(const struct rdma_ah_attr *ah_attr)
+ {
+@@ -57,6 +59,7 @@ int hns_roce_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr,
+ 	struct hns_roce_dev *hr_dev = to_hr_dev(ibah->device);
+ 	struct hns_roce_ah *ah = to_hr_ah(ibah);
+ 	int ret = 0;
++	u32 max_sl;
+ 
+ 	if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08 && udata)
+ 		return -EOPNOTSUPP;
+@@ -70,9 +73,17 @@ int hns_roce_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr,
+ 	ah->av.hop_limit = grh->hop_limit;
+ 	ah->av.flowlabel = grh->flow_label;
+ 	ah->av.udp_sport = get_ah_udp_sport(ah_attr);
+-	ah->av.sl = rdma_ah_get_sl(ah_attr);
+ 	ah->av.tclass = get_tclass(grh);
+ 
++	ah->av.sl = rdma_ah_get_sl(ah_attr);
++	max_sl = min_t(u32, MAX_SERVICE_LEVEL, hr_dev->caps.sl_num - 1);
++	if (unlikely(ah->av.sl > max_sl)) {
++		ibdev_err_ratelimited(&hr_dev->ib_dev,
++				      "failed to set sl, sl (%u) shouldn't be larger than %u.\n",
++				      ah->av.sl, max_sl);
++		return -EINVAL;
++	}
++
+ 	memcpy(ah->av.dgid, grh->dgid.raw, HNS_ROCE_GID_SIZE);
+ 	memcpy(ah->av.mac, ah_attr->roce.dmac, ETH_ALEN);
+ 
+diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+index 1d998298e28fc..d371442a78b59 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+@@ -270,7 +270,7 @@ static bool check_inl_data_len(struct hns_roce_qp *qp, unsigned int len)
+ 	struct hns_roce_dev *hr_dev = to_hr_dev(qp->ibqp.device);
+ 	int mtu = ib_mtu_enum_to_int(qp->path_mtu);
+ 
+-	if (len > qp->max_inline_data || len > mtu) {
++	if (mtu < 0 || len > qp->max_inline_data || len > mtu) {
+ 		ibdev_err(&hr_dev->ib_dev,
+ 			  "invalid length of data, data len = %u, max inline len = %u, path mtu = %d.\n",
+ 			  len, qp->max_inline_data, mtu);
+@@ -4740,6 +4740,9 @@ static int check_cong_type(struct ib_qp *ibqp,
+ {
+ 	struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
+ 
++	if (ibqp->qp_type == IB_QPT_UD)
++		hr_dev->caps.cong_type = CONG_TYPE_DCQCN;
++
+ 	/* different congestion types match different configurations */
+ 	switch (hr_dev->caps.cong_type) {
+ 	case CONG_TYPE_DCQCN:
+@@ -4836,22 +4839,32 @@ static int hns_roce_v2_set_path(struct ib_qp *ibqp,
+ 	struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
+ 	struct ib_device *ibdev = &hr_dev->ib_dev;
+ 	const struct ib_gid_attr *gid_attr = NULL;
++	u8 sl = rdma_ah_get_sl(&attr->ah_attr);
+ 	int is_roce_protocol;
+ 	u16 vlan_id = 0xffff;
+ 	bool is_udp = false;
++	u32 max_sl;
+ 	u8 ib_port;
+ 	u8 hr_port;
+ 	int ret;
+ 
++	max_sl = min_t(u32, MAX_SERVICE_LEVEL, hr_dev->caps.sl_num - 1);
++	if (unlikely(sl > max_sl)) {
++		ibdev_err_ratelimited(ibdev,
++				      "failed to fill QPC, sl (%u) shouldn't be larger than %u.\n",
++				      sl, max_sl);
++		return -EINVAL;
++	}
++
+ 	/*
+ 	 * If free_mr_en of qp is set, it means that this qp comes from
+ 	 * free mr. This qp will perform the loopback operation.
+ 	 * In the loopback scenario, only sl needs to be set.
+ 	 */
+ 	if (hr_qp->free_mr_en) {
+-		hr_reg_write(context, QPC_SL, rdma_ah_get_sl(&attr->ah_attr));
++		hr_reg_write(context, QPC_SL, sl);
+ 		hr_reg_clear(qpc_mask, QPC_SL);
+-		hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr);
++		hr_qp->sl = sl;
+ 		return 0;
+ 	}
+ 
+@@ -4918,14 +4931,7 @@ static int hns_roce_v2_set_path(struct ib_qp *ibqp,
+ 	memcpy(context->dgid, grh->dgid.raw, sizeof(grh->dgid.raw));
+ 	memset(qpc_mask->dgid, 0, sizeof(grh->dgid.raw));
+ 
+-	hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr);
+-	if (unlikely(hr_qp->sl > MAX_SERVICE_LEVEL)) {
+-		ibdev_err(ibdev,
+-			  "failed to fill QPC, sl (%u) shouldn't be larger than %d.\n",
+-			  hr_qp->sl, MAX_SERVICE_LEVEL);
+-		return -EINVAL;
+-	}
+-
++	hr_qp->sl = sl;
+ 	hr_reg_write(context, QPC_SL, hr_qp->sl);
+ 	hr_reg_clear(qpc_mask, QPC_SL);
+ 
+@@ -5819,7 +5825,7 @@ static void hns_roce_irq_work_handle(struct work_struct *work)
+ 	case HNS_ROCE_EVENT_TYPE_COMM_EST:
+ 		break;
+ 	case HNS_ROCE_EVENT_TYPE_SQ_DRAINED:
+-		ibdev_warn(ibdev, "send queue drained.\n");
++		ibdev_dbg(ibdev, "send queue drained.\n");
+ 		break;
+ 	case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
+ 		ibdev_err(ibdev, "local work queue 0x%x catast error, sub_event type is: %d\n",
+@@ -5834,10 +5840,10 @@ static void hns_roce_irq_work_handle(struct work_struct *work)
+ 			  irq_work->queue_num, irq_work->sub_type);
+ 		break;
+ 	case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH:
+-		ibdev_warn(ibdev, "SRQ limit reach.\n");
++		ibdev_dbg(ibdev, "SRQ limit reach.\n");
+ 		break;
+ 	case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH:
+-		ibdev_warn(ibdev, "SRQ last wqe reach.\n");
++		ibdev_dbg(ibdev, "SRQ last wqe reach.\n");
+ 		break;
+ 	case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR:
+ 		ibdev_err(ibdev, "SRQ catas error.\n");
+diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c
+index cdc1c6de43a17..828b58534aa97 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
++++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
+@@ -1064,7 +1064,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
+ {
+ 	struct hns_roce_ib_create_qp_resp resp = {};
+ 	struct ib_device *ibdev = &hr_dev->ib_dev;
+-	struct hns_roce_ib_create_qp ucmd;
++	struct hns_roce_ib_create_qp ucmd = {};
+ 	int ret;
+ 
+ 	mutex_init(&hr_qp->mutex);
+diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
+index 666e737371b76..61d892bf6d38b 100644
+--- a/drivers/infiniband/hw/mlx5/main.c
++++ b/drivers/infiniband/hw/mlx5/main.c
+@@ -4052,10 +4052,8 @@ static int mlx5_ib_stage_post_ib_reg_umr_init(struct mlx5_ib_dev *dev)
+ 		return ret;
+ 
+ 	ret = mlx5_mkey_cache_init(dev);
+-	if (ret) {
++	if (ret)
+ 		mlx5_ib_warn(dev, "mr cache init failed %d\n", ret);
+-		mlx5r_umr_resource_cleanup(dev);
+-	}
+ 	return ret;
+ }
+ 
+diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
+index 78b96bfb4e6ac..2340baaba8e67 100644
+--- a/drivers/infiniband/hw/mlx5/qp.c
++++ b/drivers/infiniband/hw/mlx5/qp.c
+@@ -4045,6 +4045,30 @@ static unsigned int get_tx_affinity(struct ib_qp *qp,
+ 	return tx_affinity;
+ }
+ 
++static int __mlx5_ib_qp_set_raw_qp_counter(struct mlx5_ib_qp *qp, u32 set_id,
++					   struct mlx5_core_dev *mdev)
++{
++	struct mlx5_ib_raw_packet_qp *raw_packet_qp = &qp->raw_packet_qp;
++	struct mlx5_ib_rq *rq = &raw_packet_qp->rq;
++	u32 in[MLX5_ST_SZ_DW(modify_rq_in)] = {};
++	void *rqc;
++
++	if (!qp->rq.wqe_cnt)
++		return 0;
++
++	MLX5_SET(modify_rq_in, in, rq_state, rq->state);
++	MLX5_SET(modify_rq_in, in, uid, to_mpd(qp->ibqp.pd)->uid);
++
++	rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
++	MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RDY);
++
++	MLX5_SET64(modify_rq_in, in, modify_bitmask,
++		   MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_RQ_COUNTER_SET_ID);
++	MLX5_SET(rqc, rqc, counter_set_id, set_id);
++
++	return mlx5_core_modify_rq(mdev, rq->base.mqp.qpn, in);
++}
++
+ static int __mlx5_ib_qp_set_counter(struct ib_qp *qp,
+ 				    struct rdma_counter *counter)
+ {
+@@ -4060,6 +4084,9 @@ static int __mlx5_ib_qp_set_counter(struct ib_qp *qp,
+ 	else
+ 		set_id = mlx5_ib_get_counters_id(dev, mqp->port - 1);
+ 
++	if (mqp->type == IB_QPT_RAW_PACKET)
++		return __mlx5_ib_qp_set_raw_qp_counter(mqp, set_id, dev->mdev);
++
+ 	base = &mqp->trans_qp.base;
+ 	MLX5_SET(rts2rts_qp_in, in, opcode, MLX5_CMD_OP_RTS2RTS_QP);
+ 	MLX5_SET(rts2rts_qp_in, in, qpn, base->mqp.qpn);
+diff --git a/drivers/input/rmi4/rmi_bus.c b/drivers/input/rmi4/rmi_bus.c
+index f2e093b0b9982..1b45b1d3077de 100644
+--- a/drivers/input/rmi4/rmi_bus.c
++++ b/drivers/input/rmi4/rmi_bus.c
+@@ -277,11 +277,11 @@ void rmi_unregister_function(struct rmi_function *fn)
+ 
+ 	device_del(&fn->dev);
+ 	of_node_put(fn->dev.of_node);
+-	put_device(&fn->dev);
+ 
+ 	for (i = 0; i < fn->num_of_irqs; i++)
+ 		irq_dispose_mapping(fn->irq[i]);
+ 
++	put_device(&fn->dev);
+ }
+ 
+ /**
+diff --git a/drivers/interconnect/qcom/icc-rpm.c b/drivers/interconnect/qcom/icc-rpm.c
+index 6acc7686ed386..f45d48db15440 100644
+--- a/drivers/interconnect/qcom/icc-rpm.c
++++ b/drivers/interconnect/qcom/icc-rpm.c
+@@ -491,7 +491,7 @@ regmap_done:
+ 
+ 	ret = devm_clk_bulk_get(dev, qp->num_intf_clks, qp->intf_clks);
+ 	if (ret)
+-		return ret;
++		goto err_disable_unprepare_clk;
+ 
+ 	provider = &qp->provider;
+ 	provider->dev = dev;
+@@ -506,13 +506,15 @@ regmap_done:
+ 	/* If this fails, bus accesses will crash the platform! */
+ 	ret = clk_bulk_prepare_enable(qp->num_intf_clks, qp->intf_clks);
+ 	if (ret)
+-		return ret;
++		goto err_disable_unprepare_clk;
+ 
+ 	for (i = 0; i < num_nodes; i++) {
+ 		size_t j;
+ 
+ 		node = icc_node_create(qnodes[i]->id);
+ 		if (IS_ERR(node)) {
++			clk_bulk_disable_unprepare(qp->num_intf_clks,
++						   qp->intf_clks);
+ 			ret = PTR_ERR(node);
+ 			goto err_remove_nodes;
+ 		}
+@@ -528,8 +530,11 @@ regmap_done:
+ 		if (qnodes[i]->qos.ap_owned &&
+ 		    qnodes[i]->qos.qos_mode != NOC_QOS_MODE_INVALID) {
+ 			ret = qcom_icc_qos_set(node);
+-			if (ret)
+-				return ret;
++			if (ret) {
++				clk_bulk_disable_unprepare(qp->num_intf_clks,
++							   qp->intf_clks);
++				goto err_remove_nodes;
++			}
+ 		}
+ 
+ 		data->nodes[i] = node;
+@@ -557,6 +562,7 @@ err_deregister_provider:
+ 	icc_provider_deregister(provider);
+ err_remove_nodes:
+ 	icc_nodes_remove(provider);
++err_disable_unprepare_clk:
+ 	clk_bulk_disable_unprepare(qp->num_bus_clks, qp->bus_clks);
+ 
+ 	return ret;
+diff --git a/drivers/interconnect/qcom/qdu1000.c b/drivers/interconnect/qcom/qdu1000.c
+index a4cf559de2b0b..4725f5f5c6e19 100644
+--- a/drivers/interconnect/qcom/qdu1000.c
++++ b/drivers/interconnect/qcom/qdu1000.c
+@@ -768,6 +768,7 @@ static struct qcom_icc_node xs_sys_tcu_cfg = {
+ 
+ static struct qcom_icc_bcm bcm_acv = {
+ 	.name = "ACV",
++	.enable_mask = BIT(3),
+ 	.num_nodes = 1,
+ 	.nodes = { &ebi },
+ };
+diff --git a/drivers/interconnect/qcom/sc7180.c b/drivers/interconnect/qcom/sc7180.c
+index ef4e13fb49831..bf8bd7a6691f9 100644
+--- a/drivers/interconnect/qcom/sc7180.c
++++ b/drivers/interconnect/qcom/sc7180.c
+@@ -153,30 +153,238 @@ DEFINE_QNODE(srvc_snoc, SC7180_SLAVE_SERVICE_SNOC, 1, 4);
+ DEFINE_QNODE(xs_qdss_stm, SC7180_SLAVE_QDSS_STM, 1, 4);
+ DEFINE_QNODE(xs_sys_tcu_cfg, SC7180_SLAVE_TCU, 1, 8);
+ 
+-DEFINE_QBCM(bcm_acv, "ACV", false, &ebi);
+-DEFINE_QBCM(bcm_mc0, "MC0", true, &ebi);
+-DEFINE_QBCM(bcm_sh0, "SH0", true, &qns_llcc);
+-DEFINE_QBCM(bcm_mm0, "MM0", false, &qns_mem_noc_hf);
+-DEFINE_QBCM(bcm_ce0, "CE0", false, &qxm_crypto);
+-DEFINE_QBCM(bcm_cn0, "CN0", true, &qnm_snoc, &xm_qdss_dap, &qhs_a1_noc_cfg, &qhs_a2_noc_cfg, &qhs_ahb2phy0, &qhs_aop, &qhs_aoss, &qhs_boot_rom, &qhs_camera_cfg, &qhs_camera_nrt_throttle_cfg, &qhs_camera_rt_throttle_cfg, &qhs_clk_ctl, &qhs_cpr_cx, &qhs_cpr_mx, &qhs_crypto0_cfg, &qhs_dcc_cfg, &qhs_ddrss_cfg, &qhs_display_cfg, &qhs_display_rt_throttle_cfg, &qhs_display_throttle_cfg, &qhs_glm, &qhs_gpuss_cfg, &qhs_imem_cfg, &qhs_ipa, &qhs_mnoc_cfg, &qhs_mss_cfg, &qhs_npu_cfg, &qhs_npu_dma_throttle_cfg, &qhs_npu_dsp_throttle_cfg, &qhs_pimem_cfg, &qhs_prng, &qhs_qdss_cfg, &qhs_qm_cfg, &qhs_qm_mpu_cfg, &qhs_qup0, &qhs_qup1, &qhs_security, &qhs_snoc_cfg, &qhs_tcsr, &qhs_tlmm_1, &qhs_tlmm_2, &qhs_tlmm_3, &qhs_ufs_mem_cfg, &qhs_usb3, &qhs_venus_cfg, &qhs_venus_throttle_cfg, &qhs_vsense_ctrl_cfg, &srvc_cnoc);
+-DEFINE_QBCM(bcm_mm1, "MM1", false, &qxm_camnoc_hf0_uncomp, &qxm_camnoc_hf1_uncomp, &qxm_camnoc_sf_uncomp, &qhm_mnoc_cfg, &qxm_mdp0, &qxm_rot, &qxm_venus0, &qxm_venus_arm9);
+-DEFINE_QBCM(bcm_sh2, "SH2", false, &acm_sys_tcu);
+-DEFINE_QBCM(bcm_mm2, "MM2", false, &qns_mem_noc_sf);
+-DEFINE_QBCM(bcm_qup0, "QUP0", false, &qup_core_master_1, &qup_core_master_2);
+-DEFINE_QBCM(bcm_sh3, "SH3", false, &qnm_cmpnoc);
+-DEFINE_QBCM(bcm_sh4, "SH4", false, &acm_apps0);
+-DEFINE_QBCM(bcm_sn0, "SN0", true, &qns_gemnoc_sf);
+-DEFINE_QBCM(bcm_co0, "CO0", false, &qns_cdsp_gemnoc);
+-DEFINE_QBCM(bcm_sn1, "SN1", false, &qxs_imem);
+-DEFINE_QBCM(bcm_cn1, "CN1", false, &qhm_qspi, &xm_sdc2, &xm_emmc, &qhs_ahb2phy2, &qhs_emmc_cfg, &qhs_pdm, &qhs_qspi, &qhs_sdc2);
+-DEFINE_QBCM(bcm_sn2, "SN2", false, &qxm_pimem, &qns_gemnoc_gc);
+-DEFINE_QBCM(bcm_co2, "CO2", false, &qnm_npu);
+-DEFINE_QBCM(bcm_sn3, "SN3", false, &qxs_pimem);
+-DEFINE_QBCM(bcm_co3, "CO3", false, &qxm_npu_dsp);
+-DEFINE_QBCM(bcm_sn4, "SN4", false, &xs_qdss_stm);
+-DEFINE_QBCM(bcm_sn7, "SN7", false, &qnm_aggre1_noc);
+-DEFINE_QBCM(bcm_sn9, "SN9", false, &qnm_aggre2_noc);
+-DEFINE_QBCM(bcm_sn12, "SN12", false, &qnm_gemnoc);
++static struct qcom_icc_bcm bcm_acv = {
++	.name = "ACV",
++	.enable_mask = BIT(3),
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &ebi },
++};
++
++static struct qcom_icc_bcm bcm_mc0 = {
++	.name = "MC0",
++	.keepalive = true,
++	.num_nodes = 1,
++	.nodes = { &ebi },
++};
++
++static struct qcom_icc_bcm bcm_sh0 = {
++	.name = "SH0",
++	.keepalive = true,
++	.num_nodes = 1,
++	.nodes = { &qns_llcc },
++};
++
++static struct qcom_icc_bcm bcm_mm0 = {
++	.name = "MM0",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &qns_mem_noc_hf },
++};
++
++static struct qcom_icc_bcm bcm_ce0 = {
++	.name = "CE0",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &qxm_crypto },
++};
++
++static struct qcom_icc_bcm bcm_cn0 = {
++	.name = "CN0",
++	.keepalive = true,
++	.num_nodes = 48,
++	.nodes = { &qnm_snoc,
++		   &xm_qdss_dap,
++		   &qhs_a1_noc_cfg,
++		   &qhs_a2_noc_cfg,
++		   &qhs_ahb2phy0,
++		   &qhs_aop,
++		   &qhs_aoss,
++		   &qhs_boot_rom,
++		   &qhs_camera_cfg,
++		   &qhs_camera_nrt_throttle_cfg,
++		   &qhs_camera_rt_throttle_cfg,
++		   &qhs_clk_ctl,
++		   &qhs_cpr_cx,
++		   &qhs_cpr_mx,
++		   &qhs_crypto0_cfg,
++		   &qhs_dcc_cfg,
++		   &qhs_ddrss_cfg,
++		   &qhs_display_cfg,
++		   &qhs_display_rt_throttle_cfg,
++		   &qhs_display_throttle_cfg,
++		   &qhs_glm,
++		   &qhs_gpuss_cfg,
++		   &qhs_imem_cfg,
++		   &qhs_ipa,
++		   &qhs_mnoc_cfg,
++		   &qhs_mss_cfg,
++		   &qhs_npu_cfg,
++		   &qhs_npu_dma_throttle_cfg,
++		   &qhs_npu_dsp_throttle_cfg,
++		   &qhs_pimem_cfg,
++		   &qhs_prng,
++		   &qhs_qdss_cfg,
++		   &qhs_qm_cfg,
++		   &qhs_qm_mpu_cfg,
++		   &qhs_qup0,
++		   &qhs_qup1,
++		   &qhs_security,
++		   &qhs_snoc_cfg,
++		   &qhs_tcsr,
++		   &qhs_tlmm_1,
++		   &qhs_tlmm_2,
++		   &qhs_tlmm_3,
++		   &qhs_ufs_mem_cfg,
++		   &qhs_usb3,
++		   &qhs_venus_cfg,
++		   &qhs_venus_throttle_cfg,
++		   &qhs_vsense_ctrl_cfg,
++		   &srvc_cnoc
++	},
++};
++
++static struct qcom_icc_bcm bcm_mm1 = {
++	.name = "MM1",
++	.keepalive = false,
++	.num_nodes = 8,
++	.nodes = { &qxm_camnoc_hf0_uncomp,
++		   &qxm_camnoc_hf1_uncomp,
++		   &qxm_camnoc_sf_uncomp,
++		   &qhm_mnoc_cfg,
++		   &qxm_mdp0,
++		   &qxm_rot,
++		   &qxm_venus0,
++		   &qxm_venus_arm9
++	},
++};
++
++static struct qcom_icc_bcm bcm_sh2 = {
++	.name = "SH2",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &acm_sys_tcu },
++};
++
++static struct qcom_icc_bcm bcm_mm2 = {
++	.name = "MM2",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &qns_mem_noc_sf },
++};
++
++static struct qcom_icc_bcm bcm_qup0 = {
++	.name = "QUP0",
++	.keepalive = false,
++	.num_nodes = 2,
++	.nodes = { &qup_core_master_1, &qup_core_master_2 },
++};
++
++static struct qcom_icc_bcm bcm_sh3 = {
++	.name = "SH3",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &qnm_cmpnoc },
++};
++
++static struct qcom_icc_bcm bcm_sh4 = {
++	.name = "SH4",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &acm_apps0 },
++};
++
++static struct qcom_icc_bcm bcm_sn0 = {
++	.name = "SN0",
++	.keepalive = true,
++	.num_nodes = 1,
++	.nodes = { &qns_gemnoc_sf },
++};
++
++static struct qcom_icc_bcm bcm_co0 = {
++	.name = "CO0",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &qns_cdsp_gemnoc },
++};
++
++static struct qcom_icc_bcm bcm_sn1 = {
++	.name = "SN1",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &qxs_imem },
++};
++
++static struct qcom_icc_bcm bcm_cn1 = {
++	.name = "CN1",
++	.keepalive = false,
++	.num_nodes = 8,
++	.nodes = { &qhm_qspi,
++		   &xm_sdc2,
++		   &xm_emmc,
++		   &qhs_ahb2phy2,
++		   &qhs_emmc_cfg,
++		   &qhs_pdm,
++		   &qhs_qspi,
++		   &qhs_sdc2
++	},
++};
++
++static struct qcom_icc_bcm bcm_sn2 = {
++	.name = "SN2",
++	.keepalive = false,
++	.num_nodes = 2,
++	.nodes = { &qxm_pimem, &qns_gemnoc_gc },
++};
++
++static struct qcom_icc_bcm bcm_co2 = {
++	.name = "CO2",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &qnm_npu },
++};
++
++static struct qcom_icc_bcm bcm_sn3 = {
++	.name = "SN3",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &qxs_pimem },
++};
++
++static struct qcom_icc_bcm bcm_co3 = {
++	.name = "CO3",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &qxm_npu_dsp },
++};
++
++static struct qcom_icc_bcm bcm_sn4 = {
++	.name = "SN4",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &xs_qdss_stm },
++};
++
++static struct qcom_icc_bcm bcm_sn7 = {
++	.name = "SN7",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &qnm_aggre1_noc },
++};
++
++static struct qcom_icc_bcm bcm_sn9 = {
++	.name = "SN9",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &qnm_aggre2_noc },
++};
++
++static struct qcom_icc_bcm bcm_sn12 = {
++	.name = "SN12",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &qnm_gemnoc },
++};
+ 
+ static struct qcom_icc_bcm * const aggre1_noc_bcms[] = {
+ 	&bcm_cn1,
+diff --git a/drivers/interconnect/qcom/sc7280.c b/drivers/interconnect/qcom/sc7280.c
+index 971f538bc98ad..3c39edd21b6ca 100644
+--- a/drivers/interconnect/qcom/sc7280.c
++++ b/drivers/interconnect/qcom/sc7280.c
+@@ -1284,6 +1284,7 @@ static struct qcom_icc_node srvc_snoc = {
+ 
+ static struct qcom_icc_bcm bcm_acv = {
+ 	.name = "ACV",
++	.enable_mask = BIT(3),
+ 	.num_nodes = 1,
+ 	.nodes = { &ebi },
+ };
+diff --git a/drivers/interconnect/qcom/sc8180x.c b/drivers/interconnect/qcom/sc8180x.c
+index c76e3a6a98cdd..024930f24f1f7 100644
+--- a/drivers/interconnect/qcom/sc8180x.c
++++ b/drivers/interconnect/qcom/sc8180x.c
+@@ -1344,6 +1344,7 @@ static struct qcom_icc_node slv_qup_core_2 = {
+ 
+ static struct qcom_icc_bcm bcm_acv = {
+ 	.name = "ACV",
++	.enable_mask = BIT(3),
+ 	.num_nodes = 1,
+ 	.nodes = { &slv_ebi }
+ };
+diff --git a/drivers/interconnect/qcom/sc8280xp.c b/drivers/interconnect/qcom/sc8280xp.c
+index e56df893ec3e7..77b31ab2547ab 100644
+--- a/drivers/interconnect/qcom/sc8280xp.c
++++ b/drivers/interconnect/qcom/sc8280xp.c
+@@ -1711,6 +1711,7 @@ static struct qcom_icc_node srvc_snoc = {
+ 
+ static struct qcom_icc_bcm bcm_acv = {
+ 	.name = "ACV",
++	.enable_mask = BIT(3),
+ 	.num_nodes = 1,
+ 	.nodes = { &ebi },
+ };
+diff --git a/drivers/interconnect/qcom/sdm670.c b/drivers/interconnect/qcom/sdm670.c
+index bda955035518c..d77ad3d11ba97 100644
+--- a/drivers/interconnect/qcom/sdm670.c
++++ b/drivers/interconnect/qcom/sdm670.c
+@@ -131,30 +131,222 @@ DEFINE_QNODE(srvc_snoc, SDM670_SLAVE_SERVICE_SNOC, 1, 4);
+ DEFINE_QNODE(xs_qdss_stm, SDM670_SLAVE_QDSS_STM, 1, 4);
+ DEFINE_QNODE(xs_sys_tcu_cfg, SDM670_SLAVE_TCU, 1, 8);
+ 
+-DEFINE_QBCM(bcm_acv, "ACV", false, &ebi);
+-DEFINE_QBCM(bcm_mc0, "MC0", true, &ebi);
+-DEFINE_QBCM(bcm_sh0, "SH0", true, &qns_llcc);
+-DEFINE_QBCM(bcm_mm0, "MM0", true, &qns_mem_noc_hf);
+-DEFINE_QBCM(bcm_sh1, "SH1", false, &qns_apps_io);
+-DEFINE_QBCM(bcm_mm1, "MM1", true, &qxm_camnoc_hf0_uncomp, &qxm_camnoc_hf1_uncomp, &qxm_camnoc_sf_uncomp, &qxm_camnoc_hf0, &qxm_camnoc_hf1, &qxm_mdp0, &qxm_mdp1);
+-DEFINE_QBCM(bcm_sh2, "SH2", false, &qns_memnoc_snoc);
+-DEFINE_QBCM(bcm_mm2, "MM2", false, &qns2_mem_noc);
+-DEFINE_QBCM(bcm_sh3, "SH3", false, &acm_tcu);
+-DEFINE_QBCM(bcm_mm3, "MM3", false, &qxm_camnoc_sf, &qxm_rot, &qxm_venus0, &qxm_venus1, &qxm_venus_arm9);
+-DEFINE_QBCM(bcm_sh5, "SH5", false, &qnm_apps);
+-DEFINE_QBCM(bcm_sn0, "SN0", true, &qns_memnoc_sf);
+-DEFINE_QBCM(bcm_ce0, "CE0", false, &qxm_crypto);
+-DEFINE_QBCM(bcm_cn0, "CN0", true, &qhm_spdm, &qnm_snoc, &qhs_a1_noc_cfg, &qhs_a2_noc_cfg, &qhs_aop, &qhs_aoss, &qhs_camera_cfg, &qhs_clk_ctl, &qhs_compute_dsp_cfg, &qhs_cpr_cx, &qhs_crypto0_cfg, &qhs_dcc_cfg, &qhs_ddrss_cfg, &qhs_display_cfg, &qhs_emmc_cfg, &qhs_glm, &qhs_gpuss_cfg, &qhs_imem_cfg, &qhs_ipa, &qhs_mnoc_cfg, &qhs_pdm, &qhs_phy_refgen_south, &qhs_pimem_cfg, &qhs_prng, &qhs_qdss_cfg, &qhs_qupv3_north, &qhs_qupv3_south, &qhs_sdc2, &qhs_sdc4, &qhs_snoc_cfg, &qhs_spdm, &qhs_tcsr, &qhs_tlmm_north, &qhs_tlmm_south, &qhs_tsif, &qhs_ufs_mem_cfg, &qhs_usb3_0, &qhs_venus_cfg, &qhs_vsense_ctrl_cfg, &qns_cnoc_a2noc, &srvc_cnoc);
+-DEFINE_QBCM(bcm_qup0, "QUP0", false, &qhm_qup1, &qhm_qup2);
+-DEFINE_QBCM(bcm_sn1, "SN1", false, &qxs_imem);
+-DEFINE_QBCM(bcm_sn2, "SN2", false, &qns_memnoc_gc);
+-DEFINE_QBCM(bcm_sn3, "SN3", false, &qns_cnoc);
+-DEFINE_QBCM(bcm_sn4, "SN4", false, &qxm_pimem, &qxs_pimem);
+-DEFINE_QBCM(bcm_sn5, "SN5", false, &xs_qdss_stm);
+-DEFINE_QBCM(bcm_sn8, "SN8", false, &qnm_aggre1_noc, &srvc_aggre1_noc);
+-DEFINE_QBCM(bcm_sn10, "SN10", false, &qnm_aggre2_noc, &srvc_aggre2_noc);
+-DEFINE_QBCM(bcm_sn11, "SN11", false, &qnm_gladiator_sodv, &xm_gic);
+-DEFINE_QBCM(bcm_sn13, "SN13", false, &qnm_memnoc);
++static struct qcom_icc_bcm bcm_acv = {
++	.name = "ACV",
++	.enable_mask = BIT(3),
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &ebi },
++};
++
++static struct qcom_icc_bcm bcm_mc0 = {
++	.name = "MC0",
++	.keepalive = true,
++	.num_nodes = 1,
++	.nodes = { &ebi },
++};
++
++static struct qcom_icc_bcm bcm_sh0 = {
++	.name = "SH0",
++	.keepalive = true,
++	.num_nodes = 1,
++	.nodes = { &qns_llcc },
++};
++
++static struct qcom_icc_bcm bcm_mm0 = {
++	.name = "MM0",
++	.keepalive = true,
++	.num_nodes = 1,
++	.nodes = { &qns_mem_noc_hf },
++};
++
++static struct qcom_icc_bcm bcm_sh1 = {
++	.name = "SH1",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &qns_apps_io },
++};
++
++static struct qcom_icc_bcm bcm_mm1 = {
++	.name = "MM1",
++	.keepalive = true,
++	.num_nodes = 7,
++	.nodes = { &qxm_camnoc_hf0_uncomp,
++		   &qxm_camnoc_hf1_uncomp,
++		   &qxm_camnoc_sf_uncomp,
++		   &qxm_camnoc_hf0,
++		   &qxm_camnoc_hf1,
++		   &qxm_mdp0,
++		   &qxm_mdp1
++	},
++};
++
++static struct qcom_icc_bcm bcm_sh2 = {
++	.name = "SH2",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &qns_memnoc_snoc },
++};
++
++static struct qcom_icc_bcm bcm_mm2 = {
++	.name = "MM2",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &qns2_mem_noc },
++};
++
++static struct qcom_icc_bcm bcm_sh3 = {
++	.name = "SH3",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &acm_tcu },
++};
++
++static struct qcom_icc_bcm bcm_mm3 = {
++	.name = "MM3",
++	.keepalive = false,
++	.num_nodes = 5,
++	.nodes = { &qxm_camnoc_sf, &qxm_rot, &qxm_venus0, &qxm_venus1, &qxm_venus_arm9 },
++};
++
++static struct qcom_icc_bcm bcm_sh5 = {
++	.name = "SH5",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &qnm_apps },
++};
++
++static struct qcom_icc_bcm bcm_sn0 = {
++	.name = "SN0",
++	.keepalive = true,
++	.num_nodes = 1,
++	.nodes = { &qns_memnoc_sf },
++};
++
++static struct qcom_icc_bcm bcm_ce0 = {
++	.name = "CE0",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &qxm_crypto },
++};
++
++static struct qcom_icc_bcm bcm_cn0 = {
++	.name = "CN0",
++	.keepalive = true,
++	.num_nodes = 41,
++	.nodes = { &qhm_spdm,
++		   &qnm_snoc,
++		   &qhs_a1_noc_cfg,
++		   &qhs_a2_noc_cfg,
++		   &qhs_aop,
++		   &qhs_aoss,
++		   &qhs_camera_cfg,
++		   &qhs_clk_ctl,
++		   &qhs_compute_dsp_cfg,
++		   &qhs_cpr_cx,
++		   &qhs_crypto0_cfg,
++		   &qhs_dcc_cfg,
++		   &qhs_ddrss_cfg,
++		   &qhs_display_cfg,
++		   &qhs_emmc_cfg,
++		   &qhs_glm,
++		   &qhs_gpuss_cfg,
++		   &qhs_imem_cfg,
++		   &qhs_ipa,
++		   &qhs_mnoc_cfg,
++		   &qhs_pdm,
++		   &qhs_phy_refgen_south,
++		   &qhs_pimem_cfg,
++		   &qhs_prng,
++		   &qhs_qdss_cfg,
++		   &qhs_qupv3_north,
++		   &qhs_qupv3_south,
++		   &qhs_sdc2,
++		   &qhs_sdc4,
++		   &qhs_snoc_cfg,
++		   &qhs_spdm,
++		   &qhs_tcsr,
++		   &qhs_tlmm_north,
++		   &qhs_tlmm_south,
++		   &qhs_tsif,
++		   &qhs_ufs_mem_cfg,
++		   &qhs_usb3_0,
++		   &qhs_venus_cfg,
++		   &qhs_vsense_ctrl_cfg,
++		   &qns_cnoc_a2noc,
++		   &srvc_cnoc
++	},
++};
++
++static struct qcom_icc_bcm bcm_qup0 = {
++	.name = "QUP0",
++	.keepalive = false,
++	.num_nodes = 2,
++	.nodes = { &qhm_qup1, &qhm_qup2 },
++};
++
++static struct qcom_icc_bcm bcm_sn1 = {
++	.name = "SN1",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &qxs_imem },
++};
++
++static struct qcom_icc_bcm bcm_sn2 = {
++	.name = "SN2",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &qns_memnoc_gc },
++};
++
++static struct qcom_icc_bcm bcm_sn3 = {
++	.name = "SN3",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &qns_cnoc },
++};
++
++static struct qcom_icc_bcm bcm_sn4 = {
++	.name = "SN4",
++	.keepalive = false,
++	.num_nodes = 2,
++	.nodes = { &qxm_pimem, &qxs_pimem },
++};
++
++static struct qcom_icc_bcm bcm_sn5 = {
++	.name = "SN5",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &xs_qdss_stm },
++};
++
++static struct qcom_icc_bcm bcm_sn8 = {
++	.name = "SN8",
++	.keepalive = false,
++	.num_nodes = 2,
++	.nodes = { &qnm_aggre1_noc, &srvc_aggre1_noc },
++};
++
++static struct qcom_icc_bcm bcm_sn10 = {
++	.name = "SN10",
++	.keepalive = false,
++	.num_nodes = 2,
++	.nodes = { &qnm_aggre2_noc, &srvc_aggre2_noc },
++};
++
++static struct qcom_icc_bcm bcm_sn11 = {
++	.name = "SN11",
++	.keepalive = false,
++	.num_nodes = 2,
++	.nodes = { &qnm_gladiator_sodv, &xm_gic },
++};
++
++static struct qcom_icc_bcm bcm_sn13 = {
++	.name = "SN13",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &qnm_memnoc },
++};
+ 
+ static struct qcom_icc_bcm * const aggre1_noc_bcms[] = {
+ 	&bcm_qup0,
+diff --git a/drivers/interconnect/qcom/sdm845.c b/drivers/interconnect/qcom/sdm845.c
+index 954e7bd13fc41..02cf890684441 100644
+--- a/drivers/interconnect/qcom/sdm845.c
++++ b/drivers/interconnect/qcom/sdm845.c
+@@ -146,34 +146,256 @@ DEFINE_QNODE(srvc_snoc, SDM845_SLAVE_SERVICE_SNOC, 1, 4);
+ DEFINE_QNODE(xs_qdss_stm, SDM845_SLAVE_QDSS_STM, 1, 4);
+ DEFINE_QNODE(xs_sys_tcu_cfg, SDM845_SLAVE_TCU, 1, 8);
+ 
+-DEFINE_QBCM(bcm_acv, "ACV", false, &ebi);
+-DEFINE_QBCM(bcm_mc0, "MC0", true, &ebi);
+-DEFINE_QBCM(bcm_sh0, "SH0", true, &qns_llcc);
+-DEFINE_QBCM(bcm_mm0, "MM0", false, &qns_mem_noc_hf);
+-DEFINE_QBCM(bcm_sh1, "SH1", false, &qns_apps_io);
+-DEFINE_QBCM(bcm_mm1, "MM1", true, &qxm_camnoc_hf0_uncomp, &qxm_camnoc_hf1_uncomp, &qxm_camnoc_sf_uncomp, &qxm_camnoc_hf0, &qxm_camnoc_hf1, &qxm_mdp0, &qxm_mdp1);
+-DEFINE_QBCM(bcm_sh2, "SH2", false, &qns_memnoc_snoc);
+-DEFINE_QBCM(bcm_mm2, "MM2", false, &qns2_mem_noc);
+-DEFINE_QBCM(bcm_sh3, "SH3", false, &acm_tcu);
+-DEFINE_QBCM(bcm_mm3, "MM3", false, &qxm_camnoc_sf, &qxm_rot, &qxm_venus0, &qxm_venus1, &qxm_venus_arm9);
+-DEFINE_QBCM(bcm_sh5, "SH5", false, &qnm_apps);
+-DEFINE_QBCM(bcm_sn0, "SN0", true, &qns_memnoc_sf);
+-DEFINE_QBCM(bcm_ce0, "CE0", false, &qxm_crypto);
+-DEFINE_QBCM(bcm_cn0, "CN0", false, &qhm_spdm, &qhm_tic, &qnm_snoc, &xm_qdss_dap, &qhs_a1_noc_cfg, &qhs_a2_noc_cfg, &qhs_aop, &qhs_aoss, &qhs_camera_cfg, &qhs_clk_ctl, &qhs_compute_dsp_cfg, &qhs_cpr_cx, &qhs_crypto0_cfg, &qhs_dcc_cfg, &qhs_ddrss_cfg, &qhs_display_cfg, &qhs_glm, &qhs_gpuss_cfg, &qhs_imem_cfg, &qhs_ipa, &qhs_mnoc_cfg, &qhs_pcie0_cfg, &qhs_pcie_gen3_cfg, &qhs_pdm, &qhs_phy_refgen_south, &qhs_pimem_cfg, &qhs_prng, &qhs_qdss_cfg, &qhs_qupv3_north, &qhs_qupv3_south, &qhs_sdc2, &qhs_sdc4, &qhs_snoc_cfg, &qhs_spdm, &qhs_spss_cfg, &qhs_tcsr, &qhs_tlmm_north, &qhs_tlmm_south, &qhs_tsif, &qhs_ufs_card_cfg, &qhs_ufs_mem_cfg, &qhs_usb3_0, &qhs_usb3_1, &qhs_venus_cfg, &qhs_vsense_ctrl_cfg, &qns_cnoc_a2noc, &srvc_cnoc);
+-DEFINE_QBCM(bcm_qup0, "QUP0", false, &qhm_qup1, &qhm_qup2);
+-DEFINE_QBCM(bcm_sn1, "SN1", false, &qxs_imem);
+-DEFINE_QBCM(bcm_sn2, "SN2", false, &qns_memnoc_gc);
+-DEFINE_QBCM(bcm_sn3, "SN3", false, &qns_cnoc);
+-DEFINE_QBCM(bcm_sn4, "SN4", false, &qxm_pimem);
+-DEFINE_QBCM(bcm_sn5, "SN5", false, &xs_qdss_stm);
+-DEFINE_QBCM(bcm_sn6, "SN6", false, &qhs_apss, &srvc_snoc, &xs_sys_tcu_cfg);
+-DEFINE_QBCM(bcm_sn7, "SN7", false, &qxs_pcie);
+-DEFINE_QBCM(bcm_sn8, "SN8", false, &qxs_pcie_gen3);
+-DEFINE_QBCM(bcm_sn9, "SN9", false, &srvc_aggre1_noc, &qnm_aggre1_noc);
+-DEFINE_QBCM(bcm_sn11, "SN11", false, &srvc_aggre2_noc, &qnm_aggre2_noc);
+-DEFINE_QBCM(bcm_sn12, "SN12", false, &qnm_gladiator_sodv, &xm_gic);
+-DEFINE_QBCM(bcm_sn14, "SN14", false, &qnm_pcie_anoc);
+-DEFINE_QBCM(bcm_sn15, "SN15", false, &qnm_memnoc);
++static struct qcom_icc_bcm bcm_acv = {
++	.name = "ACV",
++	.enable_mask = BIT(3),
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &ebi },
++};
++
++static struct qcom_icc_bcm bcm_mc0 = {
++	.name = "MC0",
++	.keepalive = true,
++	.num_nodes = 1,
++	.nodes = { &ebi },
++};
++
++static struct qcom_icc_bcm bcm_sh0 = {
++	.name = "SH0",
++	.keepalive = true,
++	.num_nodes = 1,
++	.nodes = { &qns_llcc },
++};
++
++static struct qcom_icc_bcm bcm_mm0 = {
++	.name = "MM0",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &qns_mem_noc_hf },
++};
++
++static struct qcom_icc_bcm bcm_sh1 = {
++	.name = "SH1",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &qns_apps_io },
++};
++
++static struct qcom_icc_bcm bcm_mm1 = {
++	.name = "MM1",
++	.keepalive = true,
++	.num_nodes = 7,
++	.nodes = { &qxm_camnoc_hf0_uncomp,
++		   &qxm_camnoc_hf1_uncomp,
++		   &qxm_camnoc_sf_uncomp,
++		   &qxm_camnoc_hf0,
++		   &qxm_camnoc_hf1,
++		   &qxm_mdp0,
++		   &qxm_mdp1
++	},
++};
++
++static struct qcom_icc_bcm bcm_sh2 = {
++	.name = "SH2",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &qns_memnoc_snoc },
++};
++
++static struct qcom_icc_bcm bcm_mm2 = {
++	.name = "MM2",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &qns2_mem_noc },
++};
++
++static struct qcom_icc_bcm bcm_sh3 = {
++	.name = "SH3",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &acm_tcu },
++};
++
++static struct qcom_icc_bcm bcm_mm3 = {
++	.name = "MM3",
++	.keepalive = false,
++	.num_nodes = 5,
++	.nodes = { &qxm_camnoc_sf, &qxm_rot, &qxm_venus0, &qxm_venus1, &qxm_venus_arm9 },
++};
++
++static struct qcom_icc_bcm bcm_sh5 = {
++	.name = "SH5",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &qnm_apps },
++};
++
++static struct qcom_icc_bcm bcm_sn0 = {
++	.name = "SN0",
++	.keepalive = true,
++	.num_nodes = 1,
++	.nodes = { &qns_memnoc_sf },
++};
++
++static struct qcom_icc_bcm bcm_ce0 = {
++	.name = "CE0",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &qxm_crypto },
++};
++
++static struct qcom_icc_bcm bcm_cn0 = {
++	.name = "CN0",
++	.keepalive = false,
++	.num_nodes = 47,
++	.nodes = { &qhm_spdm,
++		   &qhm_tic,
++		   &qnm_snoc,
++		   &xm_qdss_dap,
++		   &qhs_a1_noc_cfg,
++		   &qhs_a2_noc_cfg,
++		   &qhs_aop,
++		   &qhs_aoss,
++		   &qhs_camera_cfg,
++		   &qhs_clk_ctl,
++		   &qhs_compute_dsp_cfg,
++		   &qhs_cpr_cx,
++		   &qhs_crypto0_cfg,
++		   &qhs_dcc_cfg,
++		   &qhs_ddrss_cfg,
++		   &qhs_display_cfg,
++		   &qhs_glm,
++		   &qhs_gpuss_cfg,
++		   &qhs_imem_cfg,
++		   &qhs_ipa,
++		   &qhs_mnoc_cfg,
++		   &qhs_pcie0_cfg,
++		   &qhs_pcie_gen3_cfg,
++		   &qhs_pdm,
++		   &qhs_phy_refgen_south,
++		   &qhs_pimem_cfg,
++		   &qhs_prng,
++		   &qhs_qdss_cfg,
++		   &qhs_qupv3_north,
++		   &qhs_qupv3_south,
++		   &qhs_sdc2,
++		   &qhs_sdc4,
++		   &qhs_snoc_cfg,
++		   &qhs_spdm,
++		   &qhs_spss_cfg,
++		   &qhs_tcsr,
++		   &qhs_tlmm_north,
++		   &qhs_tlmm_south,
++		   &qhs_tsif,
++		   &qhs_ufs_card_cfg,
++		   &qhs_ufs_mem_cfg,
++		   &qhs_usb3_0,
++		   &qhs_usb3_1,
++		   &qhs_venus_cfg,
++		   &qhs_vsense_ctrl_cfg,
++		   &qns_cnoc_a2noc,
++		   &srvc_cnoc
++	},
++};
++
++static struct qcom_icc_bcm bcm_qup0 = {
++	.name = "QUP0",
++	.keepalive = false,
++	.num_nodes = 2,
++	.nodes = { &qhm_qup1, &qhm_qup2 },
++};
++
++static struct qcom_icc_bcm bcm_sn1 = {
++	.name = "SN1",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &qxs_imem },
++};
++
++static struct qcom_icc_bcm bcm_sn2 = {
++	.name = "SN2",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &qns_memnoc_gc },
++};
++
++static struct qcom_icc_bcm bcm_sn3 = {
++	.name = "SN3",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &qns_cnoc },
++};
++
++static struct qcom_icc_bcm bcm_sn4 = {
++	.name = "SN4",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &qxm_pimem },
++};
++
++static struct qcom_icc_bcm bcm_sn5 = {
++	.name = "SN5",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &xs_qdss_stm },
++};
++
++static struct qcom_icc_bcm bcm_sn6 = {
++	.name = "SN6",
++	.keepalive = false,
++	.num_nodes = 3,
++	.nodes = { &qhs_apss, &srvc_snoc, &xs_sys_tcu_cfg },
++};
++
++static struct qcom_icc_bcm bcm_sn7 = {
++	.name = "SN7",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &qxs_pcie },
++};
++
++static struct qcom_icc_bcm bcm_sn8 = {
++	.name = "SN8",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &qxs_pcie_gen3 },
++};
++
++static struct qcom_icc_bcm bcm_sn9 = {
++	.name = "SN9",
++	.keepalive = false,
++	.num_nodes = 2,
++	.nodes = { &srvc_aggre1_noc, &qnm_aggre1_noc },
++};
++
++static struct qcom_icc_bcm bcm_sn11 = {
++	.name = "SN11",
++	.keepalive = false,
++	.num_nodes = 2,
++	.nodes = { &srvc_aggre2_noc, &qnm_aggre2_noc },
++};
++
++static struct qcom_icc_bcm bcm_sn12 = {
++	.name = "SN12",
++	.keepalive = false,
++	.num_nodes = 2,
++	.nodes = { &qnm_gladiator_sodv, &xm_gic },
++};
++
++static struct qcom_icc_bcm bcm_sn14 = {
++	.name = "SN14",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &qnm_pcie_anoc },
++};
++
++static struct qcom_icc_bcm bcm_sn15 = {
++	.name = "SN15",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &qnm_memnoc },
++};
+ 
+ static struct qcom_icc_bcm * const aggre1_noc_bcms[] = {
+ 	&bcm_sn9,
+diff --git a/drivers/interconnect/qcom/sm6350.c b/drivers/interconnect/qcom/sm6350.c
+index a3d46e59444e0..aae4b43b730c0 100644
+--- a/drivers/interconnect/qcom/sm6350.c
++++ b/drivers/interconnect/qcom/sm6350.c
+@@ -142,31 +142,233 @@ DEFINE_QNODE(srvc_snoc, SM6350_SLAVE_SERVICE_SNOC, 1, 4);
+ DEFINE_QNODE(xs_qdss_stm, SM6350_SLAVE_QDSS_STM, 1, 4);
+ DEFINE_QNODE(xs_sys_tcu_cfg, SM6350_SLAVE_TCU, 1, 8);
+ 
+-DEFINE_QBCM(bcm_acv, "ACV", false, &ebi);
+-DEFINE_QBCM(bcm_ce0, "CE0", false, &qxm_crypto);
+-DEFINE_QBCM(bcm_cn0, "CN0", true, &qnm_snoc, &xm_qdss_dap, &qhs_a1_noc_cfg, &qhs_a2_noc_cfg, &qhs_ahb2phy0, &qhs_aoss, &qhs_boot_rom, &qhs_camera_cfg, &qhs_camera_nrt_thrott_cfg, &qhs_camera_rt_throttle_cfg, &qhs_clk_ctl, &qhs_cpr_cx, &qhs_cpr_mx, &qhs_crypto0_cfg, &qhs_dcc_cfg, &qhs_ddrss_cfg, &qhs_display_cfg, &qhs_display_throttle_cfg, &qhs_glm, &qhs_gpuss_cfg, &qhs_imem_cfg, &qhs_ipa, &qhs_mnoc_cfg, &qhs_mss_cfg, &qhs_npu_cfg, &qhs_pimem_cfg, &qhs_prng, &qhs_qdss_cfg, &qhs_qm_cfg, &qhs_qm_mpu_cfg, &qhs_qup0, &qhs_qup1, &qhs_security, &qhs_snoc_cfg, &qhs_tcsr, &qhs_ufs_mem_cfg, &qhs_usb3_0, &qhs_venus_cfg, &qhs_venus_throttle_cfg, &qhs_vsense_ctrl_cfg, &srvc_cnoc);
+-DEFINE_QBCM(bcm_cn1, "CN1", false, &xm_emmc, &xm_sdc2, &qhs_ahb2phy2, &qhs_emmc_cfg, &qhs_pdm, &qhs_sdc2);
+-DEFINE_QBCM(bcm_co0, "CO0", false, &qns_cdsp_gemnoc);
+-DEFINE_QBCM(bcm_co2, "CO2", false, &qnm_npu);
+-DEFINE_QBCM(bcm_co3, "CO3", false, &qxm_npu_dsp);
+-DEFINE_QBCM(bcm_mc0, "MC0", true, &ebi);
+-DEFINE_QBCM(bcm_mm0, "MM0", true, &qns_mem_noc_hf);
+-DEFINE_QBCM(bcm_mm1, "MM1", true, &qxm_camnoc_hf0_uncomp, &qxm_camnoc_icp_uncomp, &qxm_camnoc_sf_uncomp, &qxm_camnoc_hf, &qxm_mdp0);
+-DEFINE_QBCM(bcm_mm2, "MM2", false, &qns_mem_noc_sf);
+-DEFINE_QBCM(bcm_mm3, "MM3", false, &qhm_mnoc_cfg, &qnm_video0, &qnm_video_cvp, &qxm_camnoc_sf);
+-DEFINE_QBCM(bcm_qup0, "QUP0", false, &qup0_core_master, &qup1_core_master, &qup0_core_slave, &qup1_core_slave);
+-DEFINE_QBCM(bcm_sh0, "SH0", true, &qns_llcc);
+-DEFINE_QBCM(bcm_sh2, "SH2", false, &acm_sys_tcu);
+-DEFINE_QBCM(bcm_sh3, "SH3", false, &qnm_cmpnoc);
+-DEFINE_QBCM(bcm_sh4, "SH4", false, &acm_apps);
+-DEFINE_QBCM(bcm_sn0, "SN0", true, &qns_gemnoc_sf);
+-DEFINE_QBCM(bcm_sn1, "SN1", false, &qxs_imem);
+-DEFINE_QBCM(bcm_sn2, "SN2", false, &qns_gemnoc_gc);
+-DEFINE_QBCM(bcm_sn3, "SN3", false, &qxs_pimem);
+-DEFINE_QBCM(bcm_sn4, "SN4", false, &xs_qdss_stm);
+-DEFINE_QBCM(bcm_sn5, "SN5", false, &qnm_aggre1_noc);
+-DEFINE_QBCM(bcm_sn6, "SN6", false, &qnm_aggre2_noc);
+-DEFINE_QBCM(bcm_sn10, "SN10", false, &qnm_gemnoc);
++static struct qcom_icc_bcm bcm_acv = {
++	.name = "ACV",
++	.enable_mask = BIT(3),
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &ebi },
++};
++
++static struct qcom_icc_bcm bcm_ce0 = {
++	.name = "CE0",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &qxm_crypto },
++};
++
++static struct qcom_icc_bcm bcm_cn0 = {
++	.name = "CN0",
++	.keepalive = true,
++	.num_nodes = 41,
++	.nodes = { &qnm_snoc,
++		   &xm_qdss_dap,
++		   &qhs_a1_noc_cfg,
++		   &qhs_a2_noc_cfg,
++		   &qhs_ahb2phy0,
++		   &qhs_aoss,
++		   &qhs_boot_rom,
++		   &qhs_camera_cfg,
++		   &qhs_camera_nrt_thrott_cfg,
++		   &qhs_camera_rt_throttle_cfg,
++		   &qhs_clk_ctl,
++		   &qhs_cpr_cx,
++		   &qhs_cpr_mx,
++		   &qhs_crypto0_cfg,
++		   &qhs_dcc_cfg,
++		   &qhs_ddrss_cfg,
++		   &qhs_display_cfg,
++		   &qhs_display_throttle_cfg,
++		   &qhs_glm,
++		   &qhs_gpuss_cfg,
++		   &qhs_imem_cfg,
++		   &qhs_ipa,
++		   &qhs_mnoc_cfg,
++		   &qhs_mss_cfg,
++		   &qhs_npu_cfg,
++		   &qhs_pimem_cfg,
++		   &qhs_prng,
++		   &qhs_qdss_cfg,
++		   &qhs_qm_cfg,
++		   &qhs_qm_mpu_cfg,
++		   &qhs_qup0,
++		   &qhs_qup1,
++		   &qhs_security,
++		   &qhs_snoc_cfg,
++		   &qhs_tcsr,
++		   &qhs_ufs_mem_cfg,
++		   &qhs_usb3_0,
++		   &qhs_venus_cfg,
++		   &qhs_venus_throttle_cfg,
++		   &qhs_vsense_ctrl_cfg,
++		   &srvc_cnoc
++	},
++};
++
++static struct qcom_icc_bcm bcm_cn1 = {
++	.name = "CN1",
++	.keepalive = false,
++	.num_nodes = 6,
++	.nodes = { &xm_emmc,
++		   &xm_sdc2,
++		   &qhs_ahb2phy2,
++		   &qhs_emmc_cfg,
++		   &qhs_pdm,
++		   &qhs_sdc2
++	},
++};
++
++static struct qcom_icc_bcm bcm_co0 = {
++	.name = "CO0",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &qns_cdsp_gemnoc },
++};
++
++static struct qcom_icc_bcm bcm_co2 = {
++	.name = "CO2",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &qnm_npu },
++};
++
++static struct qcom_icc_bcm bcm_co3 = {
++	.name = "CO3",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &qxm_npu_dsp },
++};
++
++static struct qcom_icc_bcm bcm_mc0 = {
++	.name = "MC0",
++	.keepalive = true,
++	.num_nodes = 1,
++	.nodes = { &ebi },
++};
++
++static struct qcom_icc_bcm bcm_mm0 = {
++	.name = "MM0",
++	.keepalive = true,
++	.num_nodes = 1,
++	.nodes = { &qns_mem_noc_hf },
++};
++
++static struct qcom_icc_bcm bcm_mm1 = {
++	.name = "MM1",
++	.keepalive = true,
++	.num_nodes = 5,
++	.nodes = { &qxm_camnoc_hf0_uncomp,
++		   &qxm_camnoc_icp_uncomp,
++		   &qxm_camnoc_sf_uncomp,
++		   &qxm_camnoc_hf,
++		   &qxm_mdp0
++	},
++};
++
++static struct qcom_icc_bcm bcm_mm2 = {
++	.name = "MM2",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &qns_mem_noc_sf },
++};
++
++static struct qcom_icc_bcm bcm_mm3 = {
++	.name = "MM3",
++	.keepalive = false,
++	.num_nodes = 4,
++	.nodes = { &qhm_mnoc_cfg, &qnm_video0, &qnm_video_cvp, &qxm_camnoc_sf },
++};
++
++static struct qcom_icc_bcm bcm_qup0 = {
++	.name = "QUP0",
++	.keepalive = false,
++	.num_nodes = 4,
++	.nodes = { &qup0_core_master, &qup1_core_master, &qup0_core_slave, &qup1_core_slave },
++};
++
++static struct qcom_icc_bcm bcm_sh0 = {
++	.name = "SH0",
++	.keepalive = true,
++	.num_nodes = 1,
++	.nodes = { &qns_llcc },
++};
++
++static struct qcom_icc_bcm bcm_sh2 = {
++	.name = "SH2",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &acm_sys_tcu },
++};
++
++static struct qcom_icc_bcm bcm_sh3 = {
++	.name = "SH3",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &qnm_cmpnoc },
++};
++
++static struct qcom_icc_bcm bcm_sh4 = {
++	.name = "SH4",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &acm_apps },
++};
++
++static struct qcom_icc_bcm bcm_sn0 = {
++	.name = "SN0",
++	.keepalive = true,
++	.num_nodes = 1,
++	.nodes = { &qns_gemnoc_sf },
++};
++
++static struct qcom_icc_bcm bcm_sn1 = {
++	.name = "SN1",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &qxs_imem },
++};
++
++static struct qcom_icc_bcm bcm_sn2 = {
++	.name = "SN2",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &qns_gemnoc_gc },
++};
++
++static struct qcom_icc_bcm bcm_sn3 = {
++	.name = "SN3",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &qxs_pimem },
++};
++
++static struct qcom_icc_bcm bcm_sn4 = {
++	.name = "SN4",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &xs_qdss_stm },
++};
++
++static struct qcom_icc_bcm bcm_sn5 = {
++	.name = "SN5",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &qnm_aggre1_noc },
++};
++
++static struct qcom_icc_bcm bcm_sn6 = {
++	.name = "SN6",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &qnm_aggre2_noc },
++};
++
++static struct qcom_icc_bcm bcm_sn10 = {
++	.name = "SN10",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &qnm_gemnoc },
++};
+ 
+ static struct qcom_icc_bcm * const aggre1_noc_bcms[] = {
+ 	&bcm_cn1,
+diff --git a/drivers/interconnect/qcom/sm8150.c b/drivers/interconnect/qcom/sm8150.c
+index c5ab29322164a..685f35bbf5a7c 100644
+--- a/drivers/interconnect/qcom/sm8150.c
++++ b/drivers/interconnect/qcom/sm8150.c
+@@ -154,34 +154,262 @@ DEFINE_QNODE(xs_pcie_1, SM8150_SLAVE_PCIE_1, 1, 8);
+ DEFINE_QNODE(xs_qdss_stm, SM8150_SLAVE_QDSS_STM, 1, 4);
+ DEFINE_QNODE(xs_sys_tcu_cfg, SM8150_SLAVE_TCU, 1, 8);
+ 
+-DEFINE_QBCM(bcm_acv, "ACV", false, &ebi);
+-DEFINE_QBCM(bcm_mc0, "MC0", true, &ebi);
+-DEFINE_QBCM(bcm_sh0, "SH0", true, &qns_llcc);
+-DEFINE_QBCM(bcm_mm0, "MM0", true, &qns_mem_noc_hf);
+-DEFINE_QBCM(bcm_mm1, "MM1", false, &qxm_camnoc_hf0_uncomp, &qxm_camnoc_hf1_uncomp, &qxm_camnoc_sf_uncomp, &qxm_camnoc_hf0, &qxm_camnoc_hf1, &qxm_mdp0, &qxm_mdp1);
+-DEFINE_QBCM(bcm_sh2, "SH2", false, &qns_gem_noc_snoc);
+-DEFINE_QBCM(bcm_mm2, "MM2", false, &qxm_camnoc_sf, &qns2_mem_noc);
+-DEFINE_QBCM(bcm_sh3, "SH3", false, &acm_gpu_tcu, &acm_sys_tcu);
+-DEFINE_QBCM(bcm_mm3, "MM3", false, &qxm_rot, &qxm_venus0, &qxm_venus1, &qxm_venus_arm9);
+-DEFINE_QBCM(bcm_sh4, "SH4", false, &qnm_cmpnoc);
+-DEFINE_QBCM(bcm_sh5, "SH5", false, &acm_apps);
+-DEFINE_QBCM(bcm_sn0, "SN0", true, &qns_gemnoc_sf);
+-DEFINE_QBCM(bcm_co0, "CO0", false, &qns_cdsp_mem_noc);
+-DEFINE_QBCM(bcm_ce0, "CE0", false, &qxm_crypto);
+-DEFINE_QBCM(bcm_sn1, "SN1", false, &qxs_imem);
+-DEFINE_QBCM(bcm_co1, "CO1", false, &qnm_npu);
+-DEFINE_QBCM(bcm_cn0, "CN0", true, &qhm_spdm, &qnm_snoc, &qhs_a1_noc_cfg, &qhs_a2_noc_cfg, &qhs_ahb2phy_south, &qhs_aop, &qhs_aoss, &qhs_camera_cfg, &qhs_clk_ctl, &qhs_compute_dsp, &qhs_cpr_cx, &qhs_cpr_mmcx, &qhs_cpr_mx, &qhs_crypto0_cfg, &qhs_ddrss_cfg, &qhs_display_cfg, &qhs_emac_cfg, &qhs_glm, &qhs_gpuss_cfg, &qhs_imem_cfg, &qhs_ipa, &qhs_mnoc_cfg, &qhs_npu_cfg, &qhs_pcie0_cfg, &qhs_pcie1_cfg, &qhs_phy_refgen_north, &qhs_pimem_cfg, &qhs_prng, &qhs_qdss_cfg, &qhs_qspi, &qhs_qupv3_east, &qhs_qupv3_north, &qhs_qupv3_south, &qhs_sdc2, &qhs_sdc4, &qhs_snoc_cfg, &qhs_spdm, &qhs_spss_cfg, &qhs_ssc_cfg, &qhs_tcsr, &qhs_tlmm_east, &qhs_tlmm_north, &qhs_tlmm_south, &qhs_tlmm_west, &qhs_tsif, &qhs_ufs_card_cfg, &qhs_ufs_mem_cfg, &qhs_usb3_0, &qhs_usb3_1, &qhs_venus_cfg, &qhs_vsense_ctrl_cfg, &qns_cnoc_a2noc, &srvc_cnoc);
+-DEFINE_QBCM(bcm_qup0, "QUP0", false, &qhm_qup0, &qhm_qup1, &qhm_qup2);
+-DEFINE_QBCM(bcm_sn2, "SN2", false, &qns_gemnoc_gc);
+-DEFINE_QBCM(bcm_sn3, "SN3", false, &srvc_aggre1_noc, &srvc_aggre2_noc, &qns_cnoc);
+-DEFINE_QBCM(bcm_sn4, "SN4", false, &qxs_pimem);
+-DEFINE_QBCM(bcm_sn5, "SN5", false, &xs_qdss_stm);
+-DEFINE_QBCM(bcm_sn8, "SN8", false, &xs_pcie_0, &xs_pcie_1);
+-DEFINE_QBCM(bcm_sn9, "SN9", false, &qnm_aggre1_noc);
+-DEFINE_QBCM(bcm_sn11, "SN11", false, &qnm_aggre2_noc);
+-DEFINE_QBCM(bcm_sn12, "SN12", false, &qxm_pimem, &xm_gic);
+-DEFINE_QBCM(bcm_sn14, "SN14", false, &qns_pcie_mem_noc);
+-DEFINE_QBCM(bcm_sn15, "SN15", false, &qnm_gemnoc);
++static struct qcom_icc_bcm bcm_acv = {
++	.name = "ACV",
++	.enable_mask = BIT(3),
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &ebi },
++};
++
++static struct qcom_icc_bcm bcm_mc0 = {
++	.name = "MC0",
++	.keepalive = true,
++	.num_nodes = 1,
++	.nodes = { &ebi },
++};
++
++static struct qcom_icc_bcm bcm_sh0 = {
++	.name = "SH0",
++	.keepalive = true,
++	.num_nodes = 1,
++	.nodes = { &qns_llcc },
++};
++
++static struct qcom_icc_bcm bcm_mm0 = {
++	.name = "MM0",
++	.keepalive = true,
++	.num_nodes = 1,
++	.nodes = { &qns_mem_noc_hf },
++};
++
++static struct qcom_icc_bcm bcm_mm1 = {
++	.name = "MM1",
++	.keepalive = false,
++	.num_nodes = 7,
++	.nodes = { &qxm_camnoc_hf0_uncomp,
++		   &qxm_camnoc_hf1_uncomp,
++		   &qxm_camnoc_sf_uncomp,
++		   &qxm_camnoc_hf0,
++		   &qxm_camnoc_hf1,
++		   &qxm_mdp0,
++		   &qxm_mdp1
++	},
++};
++
++static struct qcom_icc_bcm bcm_sh2 = {
++	.name = "SH2",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &qns_gem_noc_snoc },
++};
++
++static struct qcom_icc_bcm bcm_mm2 = {
++	.name = "MM2",
++	.keepalive = false,
++	.num_nodes = 2,
++	.nodes = { &qxm_camnoc_sf, &qns2_mem_noc },
++};
++
++static struct qcom_icc_bcm bcm_sh3 = {
++	.name = "SH3",
++	.keepalive = false,
++	.num_nodes = 2,
++	.nodes = { &acm_gpu_tcu, &acm_sys_tcu },
++};
++
++static struct qcom_icc_bcm bcm_mm3 = {
++	.name = "MM3",
++	.keepalive = false,
++	.num_nodes = 4,
++	.nodes = { &qxm_rot, &qxm_venus0, &qxm_venus1, &qxm_venus_arm9 },
++};
++
++static struct qcom_icc_bcm bcm_sh4 = {
++	.name = "SH4",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &qnm_cmpnoc },
++};
++
++static struct qcom_icc_bcm bcm_sh5 = {
++	.name = "SH5",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &acm_apps },
++};
++
++static struct qcom_icc_bcm bcm_sn0 = {
++	.name = "SN0",
++	.keepalive = true,
++	.num_nodes = 1,
++	.nodes = { &qns_gemnoc_sf },
++};
++
++static struct qcom_icc_bcm bcm_co0 = {
++	.name = "CO0",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &qns_cdsp_mem_noc },
++};
++
++static struct qcom_icc_bcm bcm_ce0 = {
++	.name = "CE0",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &qxm_crypto },
++};
++
++static struct qcom_icc_bcm bcm_sn1 = {
++	.name = "SN1",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &qxs_imem },
++};
++
++static struct qcom_icc_bcm bcm_co1 = {
++	.name = "CO1",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &qnm_npu },
++};
++
++static struct qcom_icc_bcm bcm_cn0 = {
++	.name = "CN0",
++	.keepalive = true,
++	.num_nodes = 53,
++	.nodes = { &qhm_spdm,
++		   &qnm_snoc,
++		   &qhs_a1_noc_cfg,
++		   &qhs_a2_noc_cfg,
++		   &qhs_ahb2phy_south,
++		   &qhs_aop,
++		   &qhs_aoss,
++		   &qhs_camera_cfg,
++		   &qhs_clk_ctl,
++		   &qhs_compute_dsp,
++		   &qhs_cpr_cx,
++		   &qhs_cpr_mmcx,
++		   &qhs_cpr_mx,
++		   &qhs_crypto0_cfg,
++		   &qhs_ddrss_cfg,
++		   &qhs_display_cfg,
++		   &qhs_emac_cfg,
++		   &qhs_glm,
++		   &qhs_gpuss_cfg,
++		   &qhs_imem_cfg,
++		   &qhs_ipa,
++		   &qhs_mnoc_cfg,
++		   &qhs_npu_cfg,
++		   &qhs_pcie0_cfg,
++		   &qhs_pcie1_cfg,
++		   &qhs_phy_refgen_north,
++		   &qhs_pimem_cfg,
++		   &qhs_prng,
++		   &qhs_qdss_cfg,
++		   &qhs_qspi,
++		   &qhs_qupv3_east,
++		   &qhs_qupv3_north,
++		   &qhs_qupv3_south,
++		   &qhs_sdc2,
++		   &qhs_sdc4,
++		   &qhs_snoc_cfg,
++		   &qhs_spdm,
++		   &qhs_spss_cfg,
++		   &qhs_ssc_cfg,
++		   &qhs_tcsr,
++		   &qhs_tlmm_east,
++		   &qhs_tlmm_north,
++		   &qhs_tlmm_south,
++		   &qhs_tlmm_west,
++		   &qhs_tsif,
++		   &qhs_ufs_card_cfg,
++		   &qhs_ufs_mem_cfg,
++		   &qhs_usb3_0,
++		   &qhs_usb3_1,
++		   &qhs_venus_cfg,
++		   &qhs_vsense_ctrl_cfg,
++		   &qns_cnoc_a2noc,
++		   &srvc_cnoc
++	},
++};
++
++static struct qcom_icc_bcm bcm_qup0 = {
++	.name = "QUP0",
++	.keepalive = false,
++	.num_nodes = 3,
++	.nodes = { &qhm_qup0, &qhm_qup1, &qhm_qup2 },
++};
++
++static struct qcom_icc_bcm bcm_sn2 = {
++	.name = "SN2",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &qns_gemnoc_gc },
++};
++
++static struct qcom_icc_bcm bcm_sn3 = {
++	.name = "SN3",
++	.keepalive = false,
++	.num_nodes = 3,
++	.nodes = { &srvc_aggre1_noc, &srvc_aggre2_noc, &qns_cnoc },
++};
++
++static struct qcom_icc_bcm bcm_sn4 = {
++	.name = "SN4",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &qxs_pimem },
++};
++
++static struct qcom_icc_bcm bcm_sn5 = {
++	.name = "SN5",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &xs_qdss_stm },
++};
++
++static struct qcom_icc_bcm bcm_sn8 = {
++	.name = "SN8",
++	.keepalive = false,
++	.num_nodes = 2,
++	.nodes = { &xs_pcie_0, &xs_pcie_1 },
++};
++
++static struct qcom_icc_bcm bcm_sn9 = {
++	.name = "SN9",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &qnm_aggre1_noc },
++};
++
++static struct qcom_icc_bcm bcm_sn11 = {
++	.name = "SN11",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &qnm_aggre2_noc },
++};
++
++static struct qcom_icc_bcm bcm_sn12 = {
++	.name = "SN12",
++	.keepalive = false,
++	.num_nodes = 2,
++	.nodes = { &qxm_pimem, &xm_gic },
++};
++
++static struct qcom_icc_bcm bcm_sn14 = {
++	.name = "SN14",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &qns_pcie_mem_noc },
++};
++
++static struct qcom_icc_bcm bcm_sn15 = {
++	.name = "SN15",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &qnm_gemnoc },
++};
+ 
+ static struct qcom_icc_bcm * const aggre1_noc_bcms[] = {
+ 	&bcm_qup0,
+diff --git a/drivers/interconnect/qcom/sm8350.c b/drivers/interconnect/qcom/sm8350.c
+index 5398e7c8d826b..e6e2dcf4574d8 100644
+--- a/drivers/interconnect/qcom/sm8350.c
++++ b/drivers/interconnect/qcom/sm8350.c
+@@ -165,38 +165,283 @@ DEFINE_QNODE(ebi_disp, SM8350_SLAVE_EBI1_DISP, 4, 4);
+ DEFINE_QNODE(qns_mem_noc_hf_disp, SM8350_SLAVE_MNOC_HF_MEM_NOC_DISP, 2, 32, SM8350_MASTER_MNOC_HF_MEM_NOC_DISP);
+ DEFINE_QNODE(qns_mem_noc_sf_disp, SM8350_SLAVE_MNOC_SF_MEM_NOC_DISP, 2, 32, SM8350_MASTER_MNOC_SF_MEM_NOC_DISP);
+ 
+-DEFINE_QBCM(bcm_acv, "ACV", false, &ebi);
+-DEFINE_QBCM(bcm_ce0, "CE0", false, &qxm_crypto);
+-DEFINE_QBCM(bcm_cn0, "CN0", true, &qnm_gemnoc_cnoc, &qnm_gemnoc_pcie);
+-DEFINE_QBCM(bcm_cn1, "CN1", false, &xm_qdss_dap, &qhs_ahb2phy0, &qhs_ahb2phy1, &qhs_aoss, &qhs_apss, &qhs_camera_cfg, &qhs_clk_ctl, &qhs_compute_cfg, &qhs_cpr_cx, &qhs_cpr_mmcx, &qhs_cpr_mx, &qhs_crypto0_cfg, &qhs_cx_rdpm, &qhs_dcc_cfg, &qhs_display_cfg, &qhs_gpuss_cfg, &qhs_hwkm, &qhs_imem_cfg, &qhs_ipa, &qhs_ipc_router, &qhs_mss_cfg, &qhs_mx_rdpm, &qhs_pcie0_cfg, &qhs_pcie1_cfg, &qhs_pimem_cfg, &qhs_pka_wrapper_cfg, &qhs_pmu_wrapper_cfg, &qhs_qdss_cfg, &qhs_qup0, &qhs_qup1, &qhs_qup2, &qhs_security, &qhs_spss_cfg, &qhs_tcsr, &qhs_tlmm, &qhs_ufs_card_cfg, &qhs_ufs_mem_cfg, &qhs_usb3_0, &qhs_usb3_1, &qhs_venus_cfg, &qhs_vsense_ctrl_cfg, &qns_a1_noc_cfg, &qns_a2_noc_cfg, &qns_ddrss_cfg, &qns_mnoc_cfg, &qns_snoc_cfg, &srvc_cnoc);
+-DEFINE_QBCM(bcm_cn2, "CN2", false, &qhs_lpass_cfg, &qhs_pdm, &qhs_qspi, &qhs_sdc2, &qhs_sdc4);
+-DEFINE_QBCM(bcm_co0, "CO0", false, &qns_nsp_gemnoc);
+-DEFINE_QBCM(bcm_co3, "CO3", false, &qxm_nsp);
+-DEFINE_QBCM(bcm_mc0, "MC0", true, &ebi);
+-DEFINE_QBCM(bcm_mm0, "MM0", true, &qns_mem_noc_hf);
+-DEFINE_QBCM(bcm_mm1, "MM1", false, &qnm_camnoc_hf, &qxm_mdp0, &qxm_mdp1);
+-DEFINE_QBCM(bcm_mm4, "MM4", false, &qns_mem_noc_sf);
+-DEFINE_QBCM(bcm_mm5, "MM5", false, &qnm_camnoc_icp, &qnm_camnoc_sf, &qnm_video0, &qnm_video1, &qnm_video_cvp, &qxm_rot);
+-DEFINE_QBCM(bcm_sh0, "SH0", true, &qns_llcc);
+-DEFINE_QBCM(bcm_sh2, "SH2", false, &alm_gpu_tcu, &alm_sys_tcu);
+-DEFINE_QBCM(bcm_sh3, "SH3", false, &qnm_cmpnoc);
+-DEFINE_QBCM(bcm_sh4, "SH4", false, &chm_apps);
+-DEFINE_QBCM(bcm_sn0, "SN0", true, &qns_gemnoc_sf);
+-DEFINE_QBCM(bcm_sn2, "SN2", false, &qns_gemnoc_gc);
+-DEFINE_QBCM(bcm_sn3, "SN3", false, &qxs_pimem);
+-DEFINE_QBCM(bcm_sn4, "SN4", false, &xs_qdss_stm);
+-DEFINE_QBCM(bcm_sn5, "SN5", false, &xm_pcie3_0);
+-DEFINE_QBCM(bcm_sn6, "SN6", false, &xm_pcie3_1);
+-DEFINE_QBCM(bcm_sn7, "SN7", false, &qnm_aggre1_noc);
+-DEFINE_QBCM(bcm_sn8, "SN8", false, &qnm_aggre2_noc);
+-DEFINE_QBCM(bcm_sn14, "SN14", false, &qns_pcie_mem_noc);
+-DEFINE_QBCM(bcm_acv_disp, "ACV", false, &ebi_disp);
+-DEFINE_QBCM(bcm_mc0_disp, "MC0", false, &ebi_disp);
+-DEFINE_QBCM(bcm_mm0_disp, "MM0", false, &qns_mem_noc_hf_disp);
+-DEFINE_QBCM(bcm_mm1_disp, "MM1", false, &qxm_mdp0_disp, &qxm_mdp1_disp);
+-DEFINE_QBCM(bcm_mm4_disp, "MM4", false, &qns_mem_noc_sf_disp);
+-DEFINE_QBCM(bcm_mm5_disp, "MM5", false, &qxm_rot_disp);
+-DEFINE_QBCM(bcm_sh0_disp, "SH0", false, &qns_llcc_disp);
++static struct qcom_icc_bcm bcm_acv = {
++	.name = "ACV",
++	.enable_mask = BIT(3),
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &ebi },
++};
++
++static struct qcom_icc_bcm bcm_ce0 = {
++	.name = "CE0",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &qxm_crypto },
++};
++
++static struct qcom_icc_bcm bcm_cn0 = {
++	.name = "CN0",
++	.keepalive = true,
++	.num_nodes = 2,
++	.nodes = { &qnm_gemnoc_cnoc, &qnm_gemnoc_pcie },
++};
++
++static struct qcom_icc_bcm bcm_cn1 = {
++	.name = "CN1",
++	.keepalive = false,
++	.num_nodes = 47,
++	.nodes = { &xm_qdss_dap,
++		   &qhs_ahb2phy0,
++		   &qhs_ahb2phy1,
++		   &qhs_aoss,
++		   &qhs_apss,
++		   &qhs_camera_cfg,
++		   &qhs_clk_ctl,
++		   &qhs_compute_cfg,
++		   &qhs_cpr_cx,
++		   &qhs_cpr_mmcx,
++		   &qhs_cpr_mx,
++		   &qhs_crypto0_cfg,
++		   &qhs_cx_rdpm,
++		   &qhs_dcc_cfg,
++		   &qhs_display_cfg,
++		   &qhs_gpuss_cfg,
++		   &qhs_hwkm,
++		   &qhs_imem_cfg,
++		   &qhs_ipa,
++		   &qhs_ipc_router,
++		   &qhs_mss_cfg,
++		   &qhs_mx_rdpm,
++		   &qhs_pcie0_cfg,
++		   &qhs_pcie1_cfg,
++		   &qhs_pimem_cfg,
++		   &qhs_pka_wrapper_cfg,
++		   &qhs_pmu_wrapper_cfg,
++		   &qhs_qdss_cfg,
++		   &qhs_qup0,
++		   &qhs_qup1,
++		   &qhs_qup2,
++		   &qhs_security,
++		   &qhs_spss_cfg,
++		   &qhs_tcsr,
++		   &qhs_tlmm,
++		   &qhs_ufs_card_cfg,
++		   &qhs_ufs_mem_cfg,
++		   &qhs_usb3_0,
++		   &qhs_usb3_1,
++		   &qhs_venus_cfg,
++		   &qhs_vsense_ctrl_cfg,
++		   &qns_a1_noc_cfg,
++		   &qns_a2_noc_cfg,
++		   &qns_ddrss_cfg,
++		   &qns_mnoc_cfg,
++		   &qns_snoc_cfg,
++		   &srvc_cnoc
++	},
++};
++
++static struct qcom_icc_bcm bcm_cn2 = {
++	.name = "CN2",
++	.keepalive = false,
++	.num_nodes = 5,
++	.nodes = { &qhs_lpass_cfg, &qhs_pdm, &qhs_qspi, &qhs_sdc2, &qhs_sdc4 },
++};
++
++static struct qcom_icc_bcm bcm_co0 = {
++	.name = "CO0",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &qns_nsp_gemnoc },
++};
++
++static struct qcom_icc_bcm bcm_co3 = {
++	.name = "CO3",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &qxm_nsp },
++};
++
++static struct qcom_icc_bcm bcm_mc0 = {
++	.name = "MC0",
++	.keepalive = true,
++	.num_nodes = 1,
++	.nodes = { &ebi },
++};
++
++static struct qcom_icc_bcm bcm_mm0 = {
++	.name = "MM0",
++	.keepalive = true,
++	.num_nodes = 1,
++	.nodes = { &qns_mem_noc_hf },
++};
++
++static struct qcom_icc_bcm bcm_mm1 = {
++	.name = "MM1",
++	.keepalive = false,
++	.num_nodes = 3,
++	.nodes = { &qnm_camnoc_hf, &qxm_mdp0, &qxm_mdp1 },
++};
++
++static struct qcom_icc_bcm bcm_mm4 = {
++	.name = "MM4",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &qns_mem_noc_sf },
++};
++
++static struct qcom_icc_bcm bcm_mm5 = {
++	.name = "MM5",
++	.keepalive = false,
++	.num_nodes = 6,
++	.nodes = { &qnm_camnoc_icp,
++		   &qnm_camnoc_sf,
++		   &qnm_video0,
++		   &qnm_video1,
++		   &qnm_video_cvp,
++		   &qxm_rot
++	},
++};
++
++static struct qcom_icc_bcm bcm_sh0 = {
++	.name = "SH0",
++	.keepalive = true,
++	.num_nodes = 1,
++	.nodes = { &qns_llcc },
++};
++
++static struct qcom_icc_bcm bcm_sh2 = {
++	.name = "SH2",
++	.keepalive = false,
++	.num_nodes = 2,
++	.nodes = { &alm_gpu_tcu, &alm_sys_tcu },
++};
++
++static struct qcom_icc_bcm bcm_sh3 = {
++	.name = "SH3",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &qnm_cmpnoc },
++};
++
++static struct qcom_icc_bcm bcm_sh4 = {
++	.name = "SH4",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &chm_apps },
++};
++
++static struct qcom_icc_bcm bcm_sn0 = {
++	.name = "SN0",
++	.keepalive = true,
++	.num_nodes = 1,
++	.nodes = { &qns_gemnoc_sf },
++};
++
++static struct qcom_icc_bcm bcm_sn2 = {
++	.name = "SN2",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &qns_gemnoc_gc },
++};
++
++static struct qcom_icc_bcm bcm_sn3 = {
++	.name = "SN3",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &qxs_pimem },
++};
++
++static struct qcom_icc_bcm bcm_sn4 = {
++	.name = "SN4",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &xs_qdss_stm },
++};
++
++static struct qcom_icc_bcm bcm_sn5 = {
++	.name = "SN5",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &xm_pcie3_0 },
++};
++
++static struct qcom_icc_bcm bcm_sn6 = {
++	.name = "SN6",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &xm_pcie3_1 },
++};
++
++static struct qcom_icc_bcm bcm_sn7 = {
++	.name = "SN7",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &qnm_aggre1_noc },
++};
++
++static struct qcom_icc_bcm bcm_sn8 = {
++	.name = "SN8",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &qnm_aggre2_noc },
++};
++
++static struct qcom_icc_bcm bcm_sn14 = {
++	.name = "SN14",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &qns_pcie_mem_noc },
++};
++
++static struct qcom_icc_bcm bcm_acv_disp = {
++	.name = "ACV",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &ebi_disp },
++};
++
++static struct qcom_icc_bcm bcm_mc0_disp = {
++	.name = "MC0",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &ebi_disp },
++};
++
++static struct qcom_icc_bcm bcm_mm0_disp = {
++	.name = "MM0",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &qns_mem_noc_hf_disp },
++};
++
++static struct qcom_icc_bcm bcm_mm1_disp = {
++	.name = "MM1",
++	.keepalive = false,
++	.num_nodes = 2,
++	.nodes = { &qxm_mdp0_disp, &qxm_mdp1_disp },
++};
++
++static struct qcom_icc_bcm bcm_mm4_disp = {
++	.name = "MM4",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &qns_mem_noc_sf_disp },
++};
++
++static struct qcom_icc_bcm bcm_mm5_disp = {
++	.name = "MM5",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &qxm_rot_disp },
++};
++
++static struct qcom_icc_bcm bcm_sh0_disp = {
++	.name = "SH0",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &qns_llcc_disp },
++};
+ 
+ static struct qcom_icc_bcm * const aggre1_noc_bcms[] = {
+ };
+diff --git a/drivers/iommu/iommufd/io_pagetable.c b/drivers/iommu/iommufd/io_pagetable.c
+index 724c4c5742417..9b3935042459e 100644
+--- a/drivers/iommu/iommufd/io_pagetable.c
++++ b/drivers/iommu/iommufd/io_pagetable.c
+@@ -221,6 +221,18 @@ static int iopt_insert_area(struct io_pagetable *iopt, struct iopt_area *area,
+ 	return 0;
+ }
+ 
++static struct iopt_area *iopt_area_alloc(void)
++{
++	struct iopt_area *area;
++
++	area = kzalloc(sizeof(*area), GFP_KERNEL_ACCOUNT);
++	if (!area)
++		return NULL;
++	RB_CLEAR_NODE(&area->node.rb);
++	RB_CLEAR_NODE(&area->pages_node.rb);
++	return area;
++}
++
+ static int iopt_alloc_area_pages(struct io_pagetable *iopt,
+ 				 struct list_head *pages_list,
+ 				 unsigned long length, unsigned long *dst_iova,
+@@ -231,7 +243,7 @@ static int iopt_alloc_area_pages(struct io_pagetable *iopt,
+ 	int rc = 0;
+ 
+ 	list_for_each_entry(elm, pages_list, next) {
+-		elm->area = kzalloc(sizeof(*elm->area), GFP_KERNEL_ACCOUNT);
++		elm->area = iopt_area_alloc();
+ 		if (!elm->area)
+ 			return -ENOMEM;
+ 	}
+@@ -1005,11 +1017,11 @@ static int iopt_area_split(struct iopt_area *area, unsigned long iova)
+ 	    iopt_area_start_byte(area, new_start) & (alignment - 1))
+ 		return -EINVAL;
+ 
+-	lhs = kzalloc(sizeof(*area), GFP_KERNEL_ACCOUNT);
++	lhs = iopt_area_alloc();
+ 	if (!lhs)
+ 		return -ENOMEM;
+ 
+-	rhs = kzalloc(sizeof(*area), GFP_KERNEL_ACCOUNT);
++	rhs = iopt_area_alloc();
+ 	if (!rhs) {
+ 		rc = -ENOMEM;
+ 		goto err_free_lhs;
+diff --git a/drivers/iommu/iommufd/pages.c b/drivers/iommu/iommufd/pages.c
+index 8d9aa297c117e..528f356238b34 100644
+--- a/drivers/iommu/iommufd/pages.c
++++ b/drivers/iommu/iommufd/pages.c
+@@ -1507,6 +1507,8 @@ void iopt_area_unfill_domains(struct iopt_area *area, struct iopt_pages *pages)
+ 				area, domain, iopt_area_index(area),
+ 				iopt_area_last_index(area));
+ 
++	if (IS_ENABLED(CONFIG_IOMMUFD_TEST))
++		WARN_ON(RB_EMPTY_NODE(&area->pages_node.rb));
+ 	interval_tree_remove(&area->pages_node, &pages->domains_itree);
+ 	iopt_area_unfill_domain(area, pages, area->storage_domain);
+ 	area->storage_domain = NULL;
+diff --git a/drivers/irqchip/irq-sifive-plic.c b/drivers/irqchip/irq-sifive-plic.c
+index e1484905b7bdb..5b7bc4fd9517c 100644
+--- a/drivers/irqchip/irq-sifive-plic.c
++++ b/drivers/irqchip/irq-sifive-plic.c
+@@ -532,17 +532,18 @@ done:
+ 	}
+ 
+ 	/*
+-	 * We can have multiple PLIC instances so setup cpuhp state only
+-	 * when context handler for current/boot CPU is present.
++	 * We can have multiple PLIC instances so setup cpuhp state
++	 * and register syscore operations only when context handler
++	 * for current/boot CPU is present.
+ 	 */
+ 	handler = this_cpu_ptr(&plic_handlers);
+ 	if (handler->present && !plic_cpuhp_setup_done) {
+ 		cpuhp_setup_state(CPUHP_AP_IRQ_SIFIVE_PLIC_STARTING,
+ 				  "irqchip/sifive/plic:starting",
+ 				  plic_starting_cpu, plic_dying_cpu);
++		register_syscore_ops(&plic_irq_syscore_ops);
+ 		plic_cpuhp_setup_done = true;
+ 	}
+-	register_syscore_ops(&plic_irq_syscore_ops);
+ 
+ 	pr_info("%pOFP: mapped %d interrupts with %d handlers for"
+ 		" %d contexts.\n", node, nr_irqs, nr_handlers, nr_contexts);
+diff --git a/drivers/leds/leds-pwm.c b/drivers/leds/leds-pwm.c
+index 87c199242f3c8..de695cbd89816 100644
+--- a/drivers/leds/leds-pwm.c
++++ b/drivers/leds/leds-pwm.c
+@@ -53,7 +53,7 @@ static int led_pwm_set(struct led_classdev *led_cdev,
+ 		duty = led_dat->pwmstate.period - duty;
+ 
+ 	led_dat->pwmstate.duty_cycle = duty;
+-	led_dat->pwmstate.enabled = duty > 0;
++	led_dat->pwmstate.enabled = true;
+ 	return pwm_apply_state(led_dat->pwm, &led_dat->pwmstate);
+ }
+ 
+diff --git a/drivers/leds/leds-turris-omnia.c b/drivers/leds/leds-turris-omnia.c
+index 64b2d7b6d3f31..fc8b206b69189 100644
+--- a/drivers/leds/leds-turris-omnia.c
++++ b/drivers/leds/leds-turris-omnia.c
+@@ -2,7 +2,7 @@
+ /*
+  * CZ.NIC's Turris Omnia LEDs driver
+  *
+- * 2020 by Marek Behún <kabel@kernel.org>
++ * 2020, 2023 by Marek Behún <kabel@kernel.org>
+  */
+ 
+ #include <linux/i2c.h>
+@@ -41,6 +41,37 @@ struct omnia_leds {
+ 	struct omnia_led leds[];
+ };
+ 
++static int omnia_cmd_write_u8(const struct i2c_client *client, u8 cmd, u8 val)
++{
++	u8 buf[2] = { cmd, val };
++
++	return i2c_master_send(client, buf, sizeof(buf));
++}
++
++static int omnia_cmd_read_u8(const struct i2c_client *client, u8 cmd)
++{
++	struct i2c_msg msgs[2];
++	u8 reply;
++	int ret;
++
++	msgs[0].addr = client->addr;
++	msgs[0].flags = 0;
++	msgs[0].len = 1;
++	msgs[0].buf = &cmd;
++	msgs[1].addr = client->addr;
++	msgs[1].flags = I2C_M_RD;
++	msgs[1].len = 1;
++	msgs[1].buf = &reply;
++
++	ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
++	if (likely(ret == ARRAY_SIZE(msgs)))
++		return reply;
++	else if (ret < 0)
++		return ret;
++	else
++		return -EIO;
++}
++
+ static int omnia_led_brightness_set_blocking(struct led_classdev *cdev,
+ 					     enum led_brightness brightness)
+ {
+@@ -64,7 +95,7 @@ static int omnia_led_brightness_set_blocking(struct led_classdev *cdev,
+ 	if (buf[2] || buf[3] || buf[4])
+ 		state |= CMD_LED_STATE_ON;
+ 
+-	ret = i2c_smbus_write_byte_data(leds->client, CMD_LED_STATE, state);
++	ret = omnia_cmd_write_u8(leds->client, CMD_LED_STATE, state);
+ 	if (ret >= 0 && (state & CMD_LED_STATE_ON))
+ 		ret = i2c_master_send(leds->client, buf, 5);
+ 
+@@ -114,9 +145,9 @@ static int omnia_led_register(struct i2c_client *client, struct omnia_led *led,
+ 	cdev->brightness_set_blocking = omnia_led_brightness_set_blocking;
+ 
+ 	/* put the LED into software mode */
+-	ret = i2c_smbus_write_byte_data(client, CMD_LED_MODE,
+-					CMD_LED_MODE_LED(led->reg) |
+-					CMD_LED_MODE_USER);
++	ret = omnia_cmd_write_u8(client, CMD_LED_MODE,
++				 CMD_LED_MODE_LED(led->reg) |
++				 CMD_LED_MODE_USER);
+ 	if (ret < 0) {
+ 		dev_err(dev, "Cannot set LED %pOF to software mode: %i\n", np,
+ 			ret);
+@@ -124,8 +155,8 @@ static int omnia_led_register(struct i2c_client *client, struct omnia_led *led,
+ 	}
+ 
+ 	/* disable the LED */
+-	ret = i2c_smbus_write_byte_data(client, CMD_LED_STATE,
+-					CMD_LED_STATE_LED(led->reg));
++	ret = omnia_cmd_write_u8(client, CMD_LED_STATE,
++				 CMD_LED_STATE_LED(led->reg));
+ 	if (ret < 0) {
+ 		dev_err(dev, "Cannot set LED %pOF brightness: %i\n", np, ret);
+ 		return ret;
+@@ -156,12 +187,9 @@ static ssize_t brightness_show(struct device *dev, struct device_attribute *a,
+ 			       char *buf)
+ {
+ 	struct i2c_client *client = to_i2c_client(dev);
+-	struct omnia_leds *leds = i2c_get_clientdata(client);
+ 	int ret;
+ 
+-	mutex_lock(&leds->lock);
+-	ret = i2c_smbus_read_byte_data(client, CMD_LED_GET_BRIGHTNESS);
+-	mutex_unlock(&leds->lock);
++	ret = omnia_cmd_read_u8(client, CMD_LED_GET_BRIGHTNESS);
+ 
+ 	if (ret < 0)
+ 		return ret;
+@@ -173,7 +201,6 @@ static ssize_t brightness_store(struct device *dev, struct device_attribute *a,
+ 				const char *buf, size_t count)
+ {
+ 	struct i2c_client *client = to_i2c_client(dev);
+-	struct omnia_leds *leds = i2c_get_clientdata(client);
+ 	unsigned long brightness;
+ 	int ret;
+ 
+@@ -183,15 +210,9 @@ static ssize_t brightness_store(struct device *dev, struct device_attribute *a,
+ 	if (brightness > 100)
+ 		return -EINVAL;
+ 
+-	mutex_lock(&leds->lock);
+-	ret = i2c_smbus_write_byte_data(client, CMD_LED_SET_BRIGHTNESS,
+-					(u8)brightness);
+-	mutex_unlock(&leds->lock);
+-
+-	if (ret < 0)
+-		return ret;
++	ret = omnia_cmd_write_u8(client, CMD_LED_SET_BRIGHTNESS, brightness);
+ 
+-	return count;
++	return ret < 0 ? ret : count;
+ }
+ static DEVICE_ATTR_RW(brightness);
+ 
+@@ -246,8 +267,8 @@ static void omnia_leds_remove(struct i2c_client *client)
+ 	u8 buf[5];
+ 
+ 	/* put all LEDs into default (HW triggered) mode */
+-	i2c_smbus_write_byte_data(client, CMD_LED_MODE,
+-				  CMD_LED_MODE_LED(OMNIA_BOARD_LEDS));
++	omnia_cmd_write_u8(client, CMD_LED_MODE,
++			   CMD_LED_MODE_LED(OMNIA_BOARD_LEDS));
+ 
+ 	/* set all LEDs color to [255, 255, 255] */
+ 	buf[0] = CMD_LED_COLOR;
+diff --git a/drivers/leds/trigger/ledtrig-cpu.c b/drivers/leds/trigger/ledtrig-cpu.c
+index 8af4f9bb9cde8..05848a2fecff6 100644
+--- a/drivers/leds/trigger/ledtrig-cpu.c
++++ b/drivers/leds/trigger/ledtrig-cpu.c
+@@ -130,7 +130,7 @@ static int ledtrig_prepare_down_cpu(unsigned int cpu)
+ 
+ static int __init ledtrig_cpu_init(void)
+ {
+-	int cpu;
++	unsigned int cpu;
+ 	int ret;
+ 
+ 	/* Supports up to 9999 cpu cores */
+@@ -152,7 +152,7 @@ static int __init ledtrig_cpu_init(void)
+ 		if (cpu >= 8)
+ 			continue;
+ 
+-		snprintf(trig->name, MAX_NAME_LEN, "cpu%d", cpu);
++		snprintf(trig->name, MAX_NAME_LEN, "cpu%u", cpu);
+ 
+ 		led_trigger_register_simple(trig->name, &trig->_trig);
+ 	}
+diff --git a/drivers/media/cec/platform/Makefile b/drivers/media/cec/platform/Makefile
+index 26d2bc7783944..a51e98ab4958d 100644
+--- a/drivers/media/cec/platform/Makefile
++++ b/drivers/media/cec/platform/Makefile
+@@ -6,7 +6,7 @@
+ # Please keep it in alphabetic order
+ obj-$(CONFIG_CEC_CROS_EC) += cros-ec/
+ obj-$(CONFIG_CEC_GPIO) += cec-gpio/
+-obj-$(CONFIG_CEC_MESON_AO) += meson/
++obj-y += meson/
+ obj-$(CONFIG_CEC_SAMSUNG_S5P) += s5p/
+ obj-$(CONFIG_CEC_SECO) += seco/
+ obj-$(CONFIG_CEC_STI) += sti/
+diff --git a/drivers/media/i2c/max9286.c b/drivers/media/i2c/max9286.c
+index 88c58e0c49aab..e265e94ca3de4 100644
+--- a/drivers/media/i2c/max9286.c
++++ b/drivers/media/i2c/max9286.c
+@@ -1461,7 +1461,6 @@ static int max9286_parse_dt(struct max9286_priv *priv)
+ 
+ 		i2c_mux_mask |= BIT(id);
+ 	}
+-	of_node_put(node);
+ 	of_node_put(i2c_mux);
+ 
+ 	/* Parse the endpoints */
+@@ -1525,7 +1524,6 @@ static int max9286_parse_dt(struct max9286_priv *priv)
+ 		priv->source_mask |= BIT(ep.port);
+ 		priv->nsources++;
+ 	}
+-	of_node_put(node);
+ 
+ 	of_property_read_u32(dev->of_node, "maxim,bus-width", &priv->bus_width);
+ 	switch (priv->bus_width) {
+diff --git a/drivers/media/i2c/ov5640.c b/drivers/media/i2c/ov5640.c
+index 8b7ff2f3bdda7..8d6cdc7c345b0 100644
+--- a/drivers/media/i2c/ov5640.c
++++ b/drivers/media/i2c/ov5640.c
+@@ -2850,12 +2850,22 @@ static int ov5640_try_fmt_internal(struct v4l2_subdev *sd,
+ 	return 0;
+ }
+ 
++static void __v4l2_ctrl_vblank_update(struct ov5640_dev *sensor, u32 vblank)
++{
++	const struct ov5640_mode_info *mode = sensor->current_mode;
++
++	__v4l2_ctrl_modify_range(sensor->ctrls.vblank, OV5640_MIN_VBLANK,
++				 OV5640_MAX_VTS - mode->height, 1, vblank);
++
++	__v4l2_ctrl_s_ctrl(sensor->ctrls.vblank, vblank);
++}
++
+ static int ov5640_update_pixel_rate(struct ov5640_dev *sensor)
+ {
+ 	const struct ov5640_mode_info *mode = sensor->current_mode;
+ 	enum ov5640_pixel_rate_id pixel_rate_id = mode->pixel_rate;
+ 	struct v4l2_mbus_framefmt *fmt = &sensor->fmt;
+-	const struct ov5640_timings *timings;
++	const struct ov5640_timings *timings = ov5640_timings(sensor, mode);
+ 	s32 exposure_val, exposure_max;
+ 	unsigned int hblank;
+ 	unsigned int i = 0;
+@@ -2874,6 +2884,8 @@ static int ov5640_update_pixel_rate(struct ov5640_dev *sensor)
+ 		__v4l2_ctrl_s_ctrl_int64(sensor->ctrls.pixel_rate,
+ 					 ov5640_calc_pixel_rate(sensor));
+ 
++		__v4l2_ctrl_vblank_update(sensor, timings->vblank_def);
++
+ 		return 0;
+ 	}
+ 
+@@ -2916,15 +2928,12 @@ static int ov5640_update_pixel_rate(struct ov5640_dev *sensor)
+ 	__v4l2_ctrl_s_ctrl_int64(sensor->ctrls.pixel_rate, pixel_rate);
+ 	__v4l2_ctrl_s_ctrl(sensor->ctrls.link_freq, i);
+ 
+-	timings = ov5640_timings(sensor, mode);
+ 	hblank = timings->htot - mode->width;
+ 	__v4l2_ctrl_modify_range(sensor->ctrls.hblank,
+ 				 hblank, hblank, 1, hblank);
+ 
+ 	vblank = timings->vblank_def;
+-	__v4l2_ctrl_modify_range(sensor->ctrls.vblank, OV5640_MIN_VBLANK,
+-				 OV5640_MAX_VTS - mode->height, 1, vblank);
+-	__v4l2_ctrl_s_ctrl(sensor->ctrls.vblank, vblank);
++	__v4l2_ctrl_vblank_update(sensor, vblank);
+ 
+ 	exposure_max = timings->crop.height + vblank - 4;
+ 	exposure_val = clamp_t(s32, sensor->ctrls.exposure->val,
+@@ -3919,7 +3928,7 @@ static int ov5640_probe(struct i2c_client *client)
+ 	ret = ov5640_sensor_resume(dev);
+ 	if (ret) {
+ 		dev_err(dev, "failed to power on\n");
+-		goto entity_cleanup;
++		goto free_ctrls;
+ 	}
+ 
+ 	pm_runtime_set_active(dev);
+@@ -3944,8 +3953,9 @@ static int ov5640_probe(struct i2c_client *client)
+ err_pm_runtime:
+ 	pm_runtime_put_noidle(dev);
+ 	pm_runtime_disable(dev);
+-	v4l2_ctrl_handler_free(&sensor->ctrls.handler);
+ 	ov5640_sensor_suspend(dev);
++free_ctrls:
++	v4l2_ctrl_handler_free(&sensor->ctrls.handler);
+ entity_cleanup:
+ 	media_entity_cleanup(&sensor->sd.entity);
+ 	mutex_destroy(&sensor->lock);
+diff --git a/drivers/media/pci/bt8xx/bttv-driver.c b/drivers/media/pci/bt8xx/bttv-driver.c
+index 734f02b91aa31..a50cae25b5463 100644
+--- a/drivers/media/pci/bt8xx/bttv-driver.c
++++ b/drivers/media/pci/bt8xx/bttv-driver.c
+@@ -3830,6 +3830,7 @@ static void bttv_remove(struct pci_dev *pci_dev)
+ 
+ 	/* free resources */
+ 	free_irq(btv->c.pci->irq,btv);
++	del_timer_sync(&btv->timeout);
+ 	iounmap(btv->bt848_mmio);
+ 	release_mem_region(pci_resource_start(btv->c.pci,0),
+ 			   pci_resource_len(btv->c.pci,0));
+diff --git a/drivers/media/platform/amphion/vpu_defs.h b/drivers/media/platform/amphion/vpu_defs.h
+index 667637eedb5d4..7320852668d64 100644
+--- a/drivers/media/platform/amphion/vpu_defs.h
++++ b/drivers/media/platform/amphion/vpu_defs.h
+@@ -71,6 +71,7 @@ enum {
+ 	VPU_MSG_ID_TIMESTAMP_INFO,
+ 	VPU_MSG_ID_FIRMWARE_XCPT,
+ 	VPU_MSG_ID_PIC_SKIPPED,
++	VPU_MSG_ID_DBG_MSG,
+ };
+ 
+ enum VPU_ENC_MEMORY_RESOURSE {
+diff --git a/drivers/media/platform/amphion/vpu_helpers.c b/drivers/media/platform/amphion/vpu_helpers.c
+index af3b336e5dc32..d12310af9ebce 100644
+--- a/drivers/media/platform/amphion/vpu_helpers.c
++++ b/drivers/media/platform/amphion/vpu_helpers.c
+@@ -489,6 +489,7 @@ const char *vpu_id_name(u32 id)
+ 	case VPU_MSG_ID_UNSUPPORTED: return "unsupported";
+ 	case VPU_MSG_ID_FIRMWARE_XCPT: return "exception";
+ 	case VPU_MSG_ID_PIC_SKIPPED: return "skipped";
++	case VPU_MSG_ID_DBG_MSG: return "debug msg";
+ 	}
+ 	return "<unknown>";
+ }
+diff --git a/drivers/media/platform/amphion/vpu_malone.c b/drivers/media/platform/amphion/vpu_malone.c
+index c1d6606ad7e57..46713be69adbd 100644
+--- a/drivers/media/platform/amphion/vpu_malone.c
++++ b/drivers/media/platform/amphion/vpu_malone.c
+@@ -747,6 +747,7 @@ static struct vpu_pair malone_msgs[] = {
+ 	{VPU_MSG_ID_UNSUPPORTED, VID_API_EVENT_UNSUPPORTED_STREAM},
+ 	{VPU_MSG_ID_FIRMWARE_XCPT, VID_API_EVENT_FIRMWARE_XCPT},
+ 	{VPU_MSG_ID_PIC_SKIPPED, VID_API_EVENT_PIC_SKIPPED},
++	{VPU_MSG_ID_DBG_MSG, VID_API_EVENT_DBG_MSG_DEC},
+ };
+ 
+ static void vpu_malone_pack_fs_alloc(struct vpu_rpc_event *pkt,
+diff --git a/drivers/media/platform/amphion/vpu_msgs.c b/drivers/media/platform/amphion/vpu_msgs.c
+index d0ead051f7d18..b74a407a19f22 100644
+--- a/drivers/media/platform/amphion/vpu_msgs.c
++++ b/drivers/media/platform/amphion/vpu_msgs.c
+@@ -23,6 +23,7 @@
+ struct vpu_msg_handler {
+ 	u32 id;
+ 	void (*done)(struct vpu_inst *inst, struct vpu_rpc_event *pkt);
++	u32 is_str;
+ };
+ 
+ static void vpu_session_handle_start_done(struct vpu_inst *inst, struct vpu_rpc_event *pkt)
+@@ -154,7 +155,7 @@ static void vpu_session_handle_error(struct vpu_inst *inst, struct vpu_rpc_event
+ {
+ 	char *str = (char *)pkt->data;
+ 
+-	if (strlen(str))
++	if (*str)
+ 		dev_err(inst->dev, "instance %d firmware error : %s\n", inst->id, str);
+ 	else
+ 		dev_err(inst->dev, "instance %d is unsupported stream\n", inst->id);
+@@ -180,6 +181,21 @@ static void vpu_session_handle_pic_skipped(struct vpu_inst *inst, struct vpu_rpc
+ 	vpu_inst_unlock(inst);
+ }
+ 
++static void vpu_session_handle_dbg_msg(struct vpu_inst *inst, struct vpu_rpc_event *pkt)
++{
++	char *str = (char *)pkt->data;
++
++	if (*str)
++		dev_info(inst->dev, "instance %d firmware dbg msg : %s\n", inst->id, str);
++}
++
++static void vpu_terminate_string_msg(struct vpu_rpc_event *pkt)
++{
++	if (pkt->hdr.num == ARRAY_SIZE(pkt->data))
++		pkt->hdr.num--;
++	pkt->data[pkt->hdr.num] = 0;
++}
++
+ static struct vpu_msg_handler handlers[] = {
+ 	{VPU_MSG_ID_START_DONE, vpu_session_handle_start_done},
+ 	{VPU_MSG_ID_STOP_DONE, vpu_session_handle_stop_done},
+@@ -193,9 +209,10 @@ static struct vpu_msg_handler handlers[] = {
+ 	{VPU_MSG_ID_PIC_DECODED, vpu_session_handle_pic_decoded},
+ 	{VPU_MSG_ID_DEC_DONE, vpu_session_handle_pic_done},
+ 	{VPU_MSG_ID_PIC_EOS, vpu_session_handle_eos},
+-	{VPU_MSG_ID_UNSUPPORTED, vpu_session_handle_error},
+-	{VPU_MSG_ID_FIRMWARE_XCPT, vpu_session_handle_firmware_xcpt},
++	{VPU_MSG_ID_UNSUPPORTED, vpu_session_handle_error, true},
++	{VPU_MSG_ID_FIRMWARE_XCPT, vpu_session_handle_firmware_xcpt, true},
+ 	{VPU_MSG_ID_PIC_SKIPPED, vpu_session_handle_pic_skipped},
++	{VPU_MSG_ID_DBG_MSG, vpu_session_handle_dbg_msg, true},
+ };
+ 
+ static int vpu_session_handle_msg(struct vpu_inst *inst, struct vpu_rpc_event *msg)
+@@ -219,8 +236,12 @@ static int vpu_session_handle_msg(struct vpu_inst *inst, struct vpu_rpc_event *m
+ 		}
+ 	}
+ 
+-	if (handler && handler->done)
+-		handler->done(inst, msg);
++	if (handler) {
++		if (handler->is_str)
++			vpu_terminate_string_msg(msg);
++		if (handler->done)
++			handler->done(inst, msg);
++	}
+ 
+ 	vpu_response_cmd(inst, msg_id, 1);
+ 
+diff --git a/drivers/media/platform/cadence/cdns-csi2rx.c b/drivers/media/platform/cadence/cdns-csi2rx.c
+index 9755d1c8ceb9b..ee31c89eb2153 100644
+--- a/drivers/media/platform/cadence/cdns-csi2rx.c
++++ b/drivers/media/platform/cadence/cdns-csi2rx.c
+@@ -404,8 +404,10 @@ static int csi2rx_parse_dt(struct csi2rx_priv *csi2rx)
+ 	asd = v4l2_async_nf_add_fwnode_remote(&csi2rx->notifier, fwh,
+ 					      struct v4l2_async_subdev);
+ 	of_node_put(ep);
+-	if (IS_ERR(asd))
++	if (IS_ERR(asd)) {
++		v4l2_async_nf_cleanup(&csi2rx->notifier);
+ 		return PTR_ERR(asd);
++	}
+ 
+ 	csi2rx->notifier.ops = &csi2rx_notifier_ops;
+ 
+@@ -467,6 +469,7 @@ static int csi2rx_probe(struct platform_device *pdev)
+ 	return 0;
+ 
+ err_cleanup:
++	v4l2_async_nf_unregister(&csi2rx->notifier);
+ 	v4l2_async_nf_cleanup(&csi2rx->notifier);
+ err_free_priv:
+ 	kfree(csi2rx);
+@@ -477,6 +480,8 @@ static void csi2rx_remove(struct platform_device *pdev)
+ {
+ 	struct csi2rx_priv *csi2rx = platform_get_drvdata(pdev);
+ 
++	v4l2_async_nf_unregister(&csi2rx->notifier);
++	v4l2_async_nf_cleanup(&csi2rx->notifier);
+ 	v4l2_async_unregister_subdev(&csi2rx->subdev);
+ 	kfree(csi2rx);
+ }
+diff --git a/drivers/media/platform/mediatek/jpeg/mtk_jpeg_enc_hw.c b/drivers/media/platform/mediatek/jpeg/mtk_jpeg_enc_hw.c
+index 244018365b6f1..03ee8f93bd467 100644
+--- a/drivers/media/platform/mediatek/jpeg/mtk_jpeg_enc_hw.c
++++ b/drivers/media/platform/mediatek/jpeg/mtk_jpeg_enc_hw.c
+@@ -127,6 +127,7 @@ void mtk_jpeg_set_enc_params(struct mtk_jpeg_ctx *ctx,  void __iomem *base)
+ 	u32 img_stride;
+ 	u32 mem_stride;
+ 	u32 i, enc_quality;
++	u32 nr_enc_quality = ARRAY_SIZE(mtk_jpeg_enc_quality);
+ 
+ 	value = width << 16 | height;
+ 	writel(value, base + JPEG_ENC_IMG_SIZE);
+@@ -157,8 +158,8 @@ void mtk_jpeg_set_enc_params(struct mtk_jpeg_ctx *ctx,  void __iomem *base)
+ 	writel(img_stride, base + JPEG_ENC_IMG_STRIDE);
+ 	writel(mem_stride, base + JPEG_ENC_STRIDE);
+ 
+-	enc_quality = mtk_jpeg_enc_quality[0].hardware_value;
+-	for (i = 0; i < ARRAY_SIZE(mtk_jpeg_enc_quality); i++) {
++	enc_quality = mtk_jpeg_enc_quality[nr_enc_quality - 1].hardware_value;
++	for (i = 0; i < nr_enc_quality; i++) {
+ 		if (ctx->enc_quality <= mtk_jpeg_enc_quality[i].quality_param) {
+ 			enc_quality = mtk_jpeg_enc_quality[i].hardware_value;
+ 			break;
+diff --git a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-cmdq.c b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-cmdq.c
+index 3177592490bee..6adac857a4779 100644
+--- a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-cmdq.c
++++ b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-cmdq.c
+@@ -261,11 +261,11 @@ static int mdp_path_config(struct mdp_dev *mdp, struct mdp_cmdq_cmd *cmd,
+ 		const struct v4l2_rect *compose;
+ 		u32 out = 0;
+ 
++		ctx = &path->comps[index];
+ 		if (CFG_CHECK(MT8183, p_id))
+ 			out = CFG_COMP(MT8183, ctx->param, outputs[0]);
+ 
+ 		compose = path->composes[out];
+-		ctx = &path->comps[index];
+ 		ret = call_op(ctx, config_frame, cmd, compose);
+ 		if (ret)
+ 			return ret;
+diff --git a/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c b/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c
+index 9512c0a619667..c376ce1cabac4 100644
+--- a/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c
++++ b/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c
+@@ -1322,6 +1322,20 @@ static bool mxc_jpeg_compare_format(const struct mxc_jpeg_fmt *fmt1,
+ 	return false;
+ }
+ 
++static void mxc_jpeg_set_last_buffer(struct mxc_jpeg_ctx *ctx)
++{
++	struct vb2_v4l2_buffer *next_dst_buf;
++
++	next_dst_buf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
++	if (!next_dst_buf) {
++		ctx->fh.m2m_ctx->is_draining = true;
++		ctx->fh.m2m_ctx->next_buf_last = true;
++		return;
++	}
++
++	v4l2_m2m_last_buffer_done(ctx->fh.m2m_ctx, next_dst_buf);
++}
++
+ static bool mxc_jpeg_source_change(struct mxc_jpeg_ctx *ctx,
+ 				   struct mxc_jpeg_src_buf *jpeg_src_buf)
+ {
+@@ -1334,7 +1348,8 @@ static bool mxc_jpeg_source_change(struct mxc_jpeg_ctx *ctx,
+ 	q_data_cap = mxc_jpeg_get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
+ 	if (mxc_jpeg_compare_format(q_data_cap->fmt, jpeg_src_buf->fmt))
+ 		jpeg_src_buf->fmt = q_data_cap->fmt;
+-	if (q_data_cap->fmt != jpeg_src_buf->fmt ||
++	if (ctx->need_initial_source_change_evt ||
++	    q_data_cap->fmt != jpeg_src_buf->fmt ||
+ 	    q_data_cap->w != jpeg_src_buf->w ||
+ 	    q_data_cap->h != jpeg_src_buf->h) {
+ 		dev_dbg(dev, "Detected jpeg res=(%dx%d)->(%dx%d), pixfmt=%c%c%c%c\n",
+@@ -1378,6 +1393,9 @@ static bool mxc_jpeg_source_change(struct mxc_jpeg_ctx *ctx,
+ 		mxc_jpeg_sizeimage(q_data_cap);
+ 		notify_src_chg(ctx);
+ 		ctx->source_change = 1;
++		ctx->need_initial_source_change_evt = false;
++		if (vb2_is_streaming(v4l2_m2m_get_dst_vq(ctx->fh.m2m_ctx)))
++			mxc_jpeg_set_last_buffer(ctx);
+ 	}
+ 
+ 	return ctx->source_change ? true : false;
+@@ -1595,6 +1613,9 @@ static int mxc_jpeg_queue_setup(struct vb2_queue *q,
+ 	for (i = 0; i < *nplanes; i++)
+ 		sizes[i] = mxc_jpeg_get_plane_size(q_data, i);
+ 
++	if (V4L2_TYPE_IS_OUTPUT(q->type))
++		ctx->need_initial_source_change_evt = true;
++
+ 	return 0;
+ }
+ 
+@@ -1638,8 +1659,13 @@ static void mxc_jpeg_stop_streaming(struct vb2_queue *q)
+ 		v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_ERROR);
+ 	}
+ 
+-	if (V4L2_TYPE_IS_OUTPUT(q->type) || !ctx->source_change)
+-		v4l2_m2m_update_stop_streaming_state(ctx->fh.m2m_ctx, q);
++	v4l2_m2m_update_stop_streaming_state(ctx->fh.m2m_ctx, q);
++	/* if V4L2_DEC_CMD_STOP is sent before the source change triggered,
++	 * restore the is_draining flag
++	 */
++	if (V4L2_TYPE_IS_CAPTURE(q->type) && ctx->source_change && ctx->fh.m2m_ctx->last_src_buf)
++		ctx->fh.m2m_ctx->is_draining = true;
++
+ 	if (V4L2_TYPE_IS_OUTPUT(q->type) &&
+ 	    v4l2_m2m_has_stopped(ctx->fh.m2m_ctx)) {
+ 		notify_eos(ctx);
+@@ -1916,7 +1942,7 @@ static int mxc_jpeg_buf_prepare(struct vb2_buffer *vb)
+ 		return -EINVAL;
+ 	for (i = 0; i < q_data->fmt->mem_planes; i++) {
+ 		sizeimage = mxc_jpeg_get_plane_size(q_data, i);
+-		if (vb2_plane_size(vb, i) < sizeimage) {
++		if (!ctx->source_change && vb2_plane_size(vb, i) < sizeimage) {
+ 			dev_err(dev, "plane %d too small (%lu < %lu)",
+ 				i, vb2_plane_size(vb, i), sizeimage);
+ 			return -EINVAL;
+diff --git a/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.h b/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.h
+index d80e94cc9d992..dc4afeeff5b65 100644
+--- a/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.h
++++ b/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.h
+@@ -99,6 +99,7 @@ struct mxc_jpeg_ctx {
+ 	enum mxc_jpeg_enc_state		enc_state;
+ 	int				slot;
+ 	unsigned int			source_change;
++	bool				need_initial_source_change_evt;
+ 	bool				header_parsed;
+ 	struct v4l2_ctrl_handler	ctrl_handler;
+ 	u8				jpeg_quality;
+diff --git a/drivers/media/platform/samsung/s3c-camif/camif-capture.c b/drivers/media/platform/samsung/s3c-camif/camif-capture.c
+index 76634d242b103..0f5b3845d7b94 100644
+--- a/drivers/media/platform/samsung/s3c-camif/camif-capture.c
++++ b/drivers/media/platform/samsung/s3c-camif/camif-capture.c
+@@ -1133,12 +1133,12 @@ int s3c_camif_register_video_node(struct camif_dev *camif, int idx)
+ 
+ 	ret = vb2_queue_init(q);
+ 	if (ret)
+-		goto err_vd_rel;
++		return ret;
+ 
+ 	vp->pad.flags = MEDIA_PAD_FL_SINK;
+ 	ret = media_entity_pads_init(&vfd->entity, 1, &vp->pad);
+ 	if (ret)
+-		goto err_vd_rel;
++		return ret;
+ 
+ 	video_set_drvdata(vfd, vp);
+ 
+@@ -1171,8 +1171,6 @@ err_ctrlh_free:
+ 	v4l2_ctrl_handler_free(&vp->ctrl_handler);
+ err_me_cleanup:
+ 	media_entity_cleanup(&vfd->entity);
+-err_vd_rel:
+-	video_device_release(vfd);
+ 	return ret;
+ }
+ 
+diff --git a/drivers/media/platform/verisilicon/hantro_drv.c b/drivers/media/platform/verisilicon/hantro_drv.c
+index c0a368bacf880..7299bcdd3bfa4 100644
+--- a/drivers/media/platform/verisilicon/hantro_drv.c
++++ b/drivers/media/platform/verisilicon/hantro_drv.c
+@@ -125,7 +125,8 @@ void hantro_watchdog(struct work_struct *work)
+ 	ctx = v4l2_m2m_get_curr_priv(vpu->m2m_dev);
+ 	if (ctx) {
+ 		vpu_err("frame processing timed out!\n");
+-		ctx->codec_ops->reset(ctx);
++		if (ctx->codec_ops->reset)
++			ctx->codec_ops->reset(ctx);
+ 		hantro_job_finish(vpu, ctx, VB2_BUF_STATE_ERROR);
+ 	}
+ }
+diff --git a/drivers/media/platform/verisilicon/hantro_postproc.c b/drivers/media/platform/verisilicon/hantro_postproc.c
+index 0224ff68ab3fc..64d6fb852ae9b 100644
+--- a/drivers/media/platform/verisilicon/hantro_postproc.c
++++ b/drivers/media/platform/verisilicon/hantro_postproc.c
+@@ -107,7 +107,7 @@ static void hantro_postproc_g1_enable(struct hantro_ctx *ctx)
+ 
+ static int down_scale_factor(struct hantro_ctx *ctx)
+ {
+-	if (ctx->src_fmt.width == ctx->dst_fmt.width)
++	if (ctx->src_fmt.width <= ctx->dst_fmt.width)
+ 		return 0;
+ 
+ 	return DIV_ROUND_CLOSEST(ctx->src_fmt.width, ctx->dst_fmt.width);
+diff --git a/drivers/media/platform/verisilicon/rockchip_vpu_hw.c b/drivers/media/platform/verisilicon/rockchip_vpu_hw.c
+index 816ffa905a4bb..f975276707835 100644
+--- a/drivers/media/platform/verisilicon/rockchip_vpu_hw.c
++++ b/drivers/media/platform/verisilicon/rockchip_vpu_hw.c
+@@ -648,7 +648,7 @@ static const char * const rockchip_vpu_clk_names[] = {
+ };
+ 
+ static const char * const rk3588_vpu981_vpu_clk_names[] = {
+-	"aclk", "hclk", "aclk_vdpu_root", "hclk_vdpu_root"
++	"aclk", "hclk",
+ };
+ 
+ /* VDPU1/VEPU1 */
+diff --git a/drivers/media/test-drivers/vidtv/vidtv_mux.c b/drivers/media/test-drivers/vidtv/vidtv_mux.c
+index b51e6a3b8cbeb..f99878eff7ace 100644
+--- a/drivers/media/test-drivers/vidtv/vidtv_mux.c
++++ b/drivers/media/test-drivers/vidtv/vidtv_mux.c
+@@ -504,13 +504,16 @@ struct vidtv_mux *vidtv_mux_init(struct dvb_frontend *fe,
+ 	m->priv = args->priv;
+ 	m->network_id = args->network_id;
+ 	m->network_name = kstrdup(args->network_name, GFP_KERNEL);
++	if (!m->network_name)
++		goto free_mux_buf;
++
+ 	m->timing.current_jiffies = get_jiffies_64();
+ 
+ 	if (args->channels)
+ 		m->channels = args->channels;
+ 	else
+ 		if (vidtv_channels_init(m) < 0)
+-			goto free_mux_buf;
++			goto free_mux_network_name;
+ 
+ 	/* will alloc data for pmt_sections after initializing pat */
+ 	if (vidtv_channel_si_init(m) < 0)
+@@ -527,6 +530,8 @@ free_channel_si:
+ 	vidtv_channel_si_destroy(m);
+ free_channels:
+ 	vidtv_channels_destroy(m);
++free_mux_network_name:
++	kfree(m->network_name);
+ free_mux_buf:
+ 	vfree(m->mux_buf);
+ free_mux:
+diff --git a/drivers/media/test-drivers/vidtv/vidtv_psi.c b/drivers/media/test-drivers/vidtv/vidtv_psi.c
+index ce0b7a6e92dc3..2a51c898c11eb 100644
+--- a/drivers/media/test-drivers/vidtv/vidtv_psi.c
++++ b/drivers/media/test-drivers/vidtv/vidtv_psi.c
+@@ -301,16 +301,29 @@ struct vidtv_psi_desc_service *vidtv_psi_service_desc_init(struct vidtv_psi_desc
+ 
+ 	desc->service_name_len = service_name_len;
+ 
+-	if (service_name && service_name_len)
++	if (service_name && service_name_len) {
+ 		desc->service_name = kstrdup(service_name, GFP_KERNEL);
++		if (!desc->service_name)
++			goto free_desc;
++	}
+ 
+ 	desc->provider_name_len = provider_name_len;
+ 
+-	if (provider_name && provider_name_len)
++	if (provider_name && provider_name_len) {
+ 		desc->provider_name = kstrdup(provider_name, GFP_KERNEL);
++		if (!desc->provider_name)
++			goto free_desc_service_name;
++	}
+ 
+ 	vidtv_psi_desc_chain(head, (struct vidtv_psi_desc *)desc);
+ 	return desc;
++
++free_desc_service_name:
++	if (service_name && service_name_len)
++		kfree(desc->service_name);
++free_desc:
++	kfree(desc);
++	return NULL;
+ }
+ 
+ struct vidtv_psi_desc_registration
+@@ -355,8 +368,13 @@ struct vidtv_psi_desc_network_name
+ 
+ 	desc->length = network_name_len;
+ 
+-	if (network_name && network_name_len)
++	if (network_name && network_name_len) {
+ 		desc->network_name = kstrdup(network_name, GFP_KERNEL);
++		if (!desc->network_name) {
++			kfree(desc);
++			return NULL;
++		}
++	}
+ 
+ 	vidtv_psi_desc_chain(head, (struct vidtv_psi_desc *)desc);
+ 	return desc;
+@@ -442,15 +460,32 @@ struct vidtv_psi_desc_short_event
+ 		iso_language_code = "eng";
+ 
+ 	desc->iso_language_code = kstrdup(iso_language_code, GFP_KERNEL);
++	if (!desc->iso_language_code)
++		goto free_desc;
+ 
+-	if (event_name && event_name_len)
++	if (event_name && event_name_len) {
+ 		desc->event_name = kstrdup(event_name, GFP_KERNEL);
++		if (!desc->event_name)
++			goto free_desc_language_code;
++	}
+ 
+-	if (text && text_len)
++	if (text && text_len) {
+ 		desc->text = kstrdup(text, GFP_KERNEL);
++		if (!desc->text)
++			goto free_desc_event_name;
++	}
+ 
+ 	vidtv_psi_desc_chain(head, (struct vidtv_psi_desc *)desc);
+ 	return desc;
++
++free_desc_event_name:
++	if (event_name && event_name_len)
++		kfree(desc->event_name);
++free_desc_language_code:
++	kfree(desc->iso_language_code);
++free_desc:
++	kfree(desc);
++	return NULL;
+ }
+ 
+ struct vidtv_psi_desc *vidtv_psi_desc_clone(struct vidtv_psi_desc *desc)
+diff --git a/drivers/media/usb/dvb-usb-v2/af9035.c b/drivers/media/usb/dvb-usb-v2/af9035.c
+index 33a2aa8907e65..4eb7dd4599b7e 100644
+--- a/drivers/media/usb/dvb-usb-v2/af9035.c
++++ b/drivers/media/usb/dvb-usb-v2/af9035.c
+@@ -322,8 +322,10 @@ static int af9035_i2c_master_xfer(struct i2c_adapter *adap,
+ 			ret = -EOPNOTSUPP;
+ 		} else if ((msg[0].addr == state->af9033_i2c_addr[0]) ||
+ 			   (msg[0].addr == state->af9033_i2c_addr[1])) {
+-			if (msg[0].len < 3 || msg[1].len < 1)
+-				return -EOPNOTSUPP;
++			if (msg[0].len < 3 || msg[1].len < 1) {
++				ret = -EOPNOTSUPP;
++				goto unlock;
++			}
+ 			/* demod access via firmware interface */
+ 			u32 reg = msg[0].buf[0] << 16 | msg[0].buf[1] << 8 |
+ 					msg[0].buf[2];
+@@ -383,8 +385,10 @@ static int af9035_i2c_master_xfer(struct i2c_adapter *adap,
+ 			ret = -EOPNOTSUPP;
+ 		} else if ((msg[0].addr == state->af9033_i2c_addr[0]) ||
+ 			   (msg[0].addr == state->af9033_i2c_addr[1])) {
+-			if (msg[0].len < 3)
+-				return -EOPNOTSUPP;
++			if (msg[0].len < 3) {
++				ret = -EOPNOTSUPP;
++				goto unlock;
++			}
+ 			/* demod access via firmware interface */
+ 			u32 reg = msg[0].buf[0] << 16 | msg[0].buf[1] << 8 |
+ 					msg[0].buf[2];
+@@ -459,6 +463,7 @@ static int af9035_i2c_master_xfer(struct i2c_adapter *adap,
+ 		ret = -EOPNOTSUPP;
+ 	}
+ 
++unlock:
+ 	mutex_unlock(&d->i2c_mutex);
+ 
+ 	if (ret < 0)
+diff --git a/drivers/memory/tegra/tegra234.c b/drivers/memory/tegra/tegra234.c
+index 8fb83b39f5f5b..0952f1210b772 100644
+--- a/drivers/memory/tegra/tegra234.c
++++ b/drivers/memory/tegra/tegra234.c
+@@ -852,6 +852,10 @@ static int tegra234_mc_icc_set(struct icc_node *src, struct icc_node *dst)
+ 	msg.rx.data = &bwmgr_resp;
+ 	msg.rx.size = sizeof(bwmgr_resp);
+ 
++	if (pclient->bpmp_id >= TEGRA_ICC_BPMP_CPU_CLUSTER0 &&
++	    pclient->bpmp_id <= TEGRA_ICC_BPMP_CPU_CLUSTER2)
++		msg.flags = TEGRA_BPMP_MESSAGE_RESET;
++
+ 	ret = tegra_bpmp_transfer(mc->bpmp, &msg);
+ 	if (ret < 0) {
+ 		dev_err(mc->dev, "BPMP transfer failed: %d\n", ret);
+diff --git a/drivers/mfd/arizona-spi.c b/drivers/mfd/arizona-spi.c
+index 02cf4f3e91d76..de5d894ac04af 100644
+--- a/drivers/mfd/arizona-spi.c
++++ b/drivers/mfd/arizona-spi.c
+@@ -159,6 +159,9 @@ static int arizona_spi_acpi_probe(struct arizona *arizona)
+ 	arizona->pdata.micd_ranges = arizona_micd_aosp_ranges;
+ 	arizona->pdata.num_micd_ranges = ARRAY_SIZE(arizona_micd_aosp_ranges);
+ 
++	/* Use left headphone speaker for HP vs line-out detection */
++	arizona->pdata.hpdet_channel = ARIZONA_ACCDET_MODE_HPL;
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/mfd/dln2.c b/drivers/mfd/dln2.c
+index c7510434380a4..fbbe82c6e75b5 100644
+--- a/drivers/mfd/dln2.c
++++ b/drivers/mfd/dln2.c
+@@ -826,7 +826,6 @@ out_stop_rx:
+ 	dln2_stop_rx_urbs(dln2);
+ 
+ out_free:
+-	usb_put_dev(dln2->usb_dev);
+ 	dln2_free(dln2);
+ 
+ 	return ret;
+diff --git a/drivers/mfd/mfd-core.c b/drivers/mfd/mfd-core.c
+index 0ed7c0d7784e1..2b85509a90fc2 100644
+--- a/drivers/mfd/mfd-core.c
++++ b/drivers/mfd/mfd-core.c
+@@ -146,6 +146,7 @@ static int mfd_add_device(struct device *parent, int id,
+ 	struct platform_device *pdev;
+ 	struct device_node *np = NULL;
+ 	struct mfd_of_node_entry *of_entry, *tmp;
++	bool disabled = false;
+ 	int ret = -ENOMEM;
+ 	int platform_id;
+ 	int r;
+@@ -183,11 +184,10 @@ static int mfd_add_device(struct device *parent, int id,
+ 	if (IS_ENABLED(CONFIG_OF) && parent->of_node && cell->of_compatible) {
+ 		for_each_child_of_node(parent->of_node, np) {
+ 			if (of_device_is_compatible(np, cell->of_compatible)) {
+-				/* Ignore 'disabled' devices error free */
++				/* Skip 'disabled' devices */
+ 				if (!of_device_is_available(np)) {
+-					of_node_put(np);
+-					ret = 0;
+-					goto fail_alias;
++					disabled = true;
++					continue;
+ 				}
+ 
+ 				ret = mfd_match_of_node_to_dev(pdev, np, cell);
+@@ -197,10 +197,17 @@ static int mfd_add_device(struct device *parent, int id,
+ 				if (ret)
+ 					goto fail_alias;
+ 
+-				break;
++				goto match;
+ 			}
+ 		}
+ 
++		if (disabled) {
++			/* Ignore 'disabled' devices error free */
++			ret = 0;
++			goto fail_alias;
++		}
++
++match:
+ 		if (!pdev->dev.of_node)
+ 			pr_warn("%s: Failed to locate of_node [id: %d]\n",
+ 				cell->name, platform_id);
+diff --git a/drivers/misc/ti-st/st_core.c b/drivers/misc/ti-st/st_core.c
+index 01d2257deea46..1f2542dcba4de 100644
+--- a/drivers/misc/ti-st/st_core.c
++++ b/drivers/misc/ti-st/st_core.c
+@@ -15,6 +15,7 @@
+ #include <linux/skbuff.h>
+ 
+ #include <linux/ti_wilink_st.h>
++#include <linux/netdevice.h>
+ 
+ extern void st_kim_recv(void *, const unsigned char *, long);
+ void st_int_recv(void *, const unsigned char *, long);
+@@ -435,7 +436,7 @@ static void st_int_enqueue(struct st_data_s *st_gdata, struct sk_buff *skb)
+ 	case ST_LL_AWAKE_TO_ASLEEP:
+ 		pr_err("ST LL is illegal state(%ld),"
+ 			   "purging received skb.", st_ll_getstate(st_gdata));
+-		kfree_skb(skb);
++		dev_kfree_skb_irq(skb);
+ 		break;
+ 	case ST_LL_ASLEEP:
+ 		skb_queue_tail(&st_gdata->tx_waitq, skb);
+@@ -444,7 +445,7 @@ static void st_int_enqueue(struct st_data_s *st_gdata, struct sk_buff *skb)
+ 	default:
+ 		pr_err("ST LL is illegal state(%ld),"
+ 			   "purging received skb.", st_ll_getstate(st_gdata));
+-		kfree_skb(skb);
++		dev_kfree_skb_irq(skb);
+ 		break;
+ 	}
+ 
+@@ -498,7 +499,7 @@ void st_tx_wakeup(struct st_data_s *st_data)
+ 				spin_unlock_irqrestore(&st_data->lock, flags);
+ 				break;
+ 			}
+-			kfree_skb(skb);
++			dev_kfree_skb_irq(skb);
+ 			spin_unlock_irqrestore(&st_data->lock, flags);
+ 		}
+ 		/* if wake-up is set in another context- restart sending */
+diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
+index 4a4bab9aa7263..89cd48fcec79f 100644
+--- a/drivers/mmc/core/mmc.c
++++ b/drivers/mmc/core/mmc.c
+@@ -104,7 +104,7 @@ static int mmc_decode_cid(struct mmc_card *card)
+ 	case 3: /* MMC v3.1 - v3.3 */
+ 	case 4: /* MMC v4 */
+ 		card->cid.manfid	= UNSTUFF_BITS(resp, 120, 8);
+-		card->cid.oemid		= UNSTUFF_BITS(resp, 104, 8);
++		card->cid.oemid		= UNSTUFF_BITS(resp, 104, 16);
+ 		card->cid.prod_name[0]	= UNSTUFF_BITS(resp, 96, 8);
+ 		card->cid.prod_name[1]	= UNSTUFF_BITS(resp, 88, 8);
+ 		card->cid.prod_name[2]	= UNSTUFF_BITS(resp, 80, 8);
+diff --git a/drivers/net/can/dev/dev.c b/drivers/net/can/dev/dev.c
+index 7f9334a8af500..735d5de3caa0e 100644
+--- a/drivers/net/can/dev/dev.c
++++ b/drivers/net/can/dev/dev.c
+@@ -132,7 +132,8 @@ static void can_restart(struct net_device *dev)
+ 	struct can_frame *cf;
+ 	int err;
+ 
+-	BUG_ON(netif_carrier_ok(dev));
++	if (netif_carrier_ok(dev))
++		netdev_err(dev, "Attempt to restart for bus-off recovery, but carrier is OK?\n");
+ 
+ 	/* No synchronization needed because the device is bus-off and
+ 	 * no messages can come in or go out.
+@@ -153,11 +154,12 @@ restart:
+ 	priv->can_stats.restarts++;
+ 
+ 	/* Now restart the device */
+-	err = priv->do_set_mode(dev, CAN_MODE_START);
+-
+ 	netif_carrier_on(dev);
+-	if (err)
++	err = priv->do_set_mode(dev, CAN_MODE_START);
++	if (err) {
+ 		netdev_err(dev, "Error %d during restart", err);
++		netif_carrier_off(dev);
++	}
+ }
+ 
+ static void can_restart_work(struct work_struct *work)
+diff --git a/drivers/net/can/dev/skb.c b/drivers/net/can/dev/skb.c
+index f6d05b3ef59ab..3ebd4f779b9bd 100644
+--- a/drivers/net/can/dev/skb.c
++++ b/drivers/net/can/dev/skb.c
+@@ -49,7 +49,11 @@ int can_put_echo_skb(struct sk_buff *skb, struct net_device *dev,
+ {
+ 	struct can_priv *priv = netdev_priv(dev);
+ 
+-	BUG_ON(idx >= priv->echo_skb_max);
++	if (idx >= priv->echo_skb_max) {
++		netdev_err(dev, "%s: BUG! Trying to access can_priv::echo_skb out of bounds (%u/max %u)\n",
++			   __func__, idx, priv->echo_skb_max);
++		return -EINVAL;
++	}
+ 
+ 	/* check flag whether this packet has to be looped back */
+ 	if (!(dev->flags & IFF_ECHO) ||
+diff --git a/drivers/net/can/usb/etas_es58x/es58x_core.c b/drivers/net/can/usb/etas_es58x/es58x_core.c
+index 0c7f7505632cd..5e3a72b7c4691 100644
+--- a/drivers/net/can/usb/etas_es58x/es58x_core.c
++++ b/drivers/net/can/usb/etas_es58x/es58x_core.c
+@@ -2230,6 +2230,7 @@ static int es58x_probe(struct usb_interface *intf,
+ 
+ 	for (ch_idx = 0; ch_idx < es58x_dev->num_can_ch; ch_idx++) {
+ 		int ret = es58x_init_netdev(es58x_dev, ch_idx);
++
+ 		if (ret) {
+ 			es58x_free_netdevs(es58x_dev);
+ 			return ret;
+diff --git a/drivers/net/can/usb/etas_es58x/es58x_core.h b/drivers/net/can/usb/etas_es58x/es58x_core.h
+index c1ba1a4e8857b..2e183bdeedd72 100644
+--- a/drivers/net/can/usb/etas_es58x/es58x_core.h
++++ b/drivers/net/can/usb/etas_es58x/es58x_core.h
+@@ -378,13 +378,13 @@ struct es58x_sw_version {
+ 
+ /**
+  * struct es58x_hw_revision - Hardware revision number.
+- * @letter: Revision letter.
++ * @letter: Revision letter, an alphanumeric character.
+  * @major: Version major number, represented on three digits.
+  * @minor: Version minor number, represented on three digits.
+  *
+  * The hardware revision uses its own format: "axxx/xxx" where 'a' is
+- * a letter and 'x' a digit. It can be retrieved from the product
+- * information string.
++ * an alphanumeric character and 'x' a digit. It can be retrieved from
++ * the product information string.
+  */
+ struct es58x_hw_revision {
+ 	char letter;
+diff --git a/drivers/net/can/usb/etas_es58x/es58x_devlink.c b/drivers/net/can/usb/etas_es58x/es58x_devlink.c
+index 9fba29e2f57c6..635edeb8f68cd 100644
+--- a/drivers/net/can/usb/etas_es58x/es58x_devlink.c
++++ b/drivers/net/can/usb/etas_es58x/es58x_devlink.c
+@@ -125,14 +125,28 @@ static int es58x_parse_hw_rev(struct es58x_device *es58x_dev,
+  * firmware version, the bootloader version and the hardware
+  * revision.
+  *
+- * If the function fails, simply emit a log message and continue
+- * because product information is not critical for the driver to
+- * operate.
++ * If the function fails, set the version or revision to an invalid
++ * value and emit an informal message. Continue probing because the
++ * product information is not critical for the driver to operate.
+  */
+ void es58x_parse_product_info(struct es58x_device *es58x_dev)
+ {
++	static const struct es58x_sw_version sw_version_not_set = {
++		.major = -1,
++		.minor = -1,
++		.revision = -1,
++	};
++	static const struct es58x_hw_revision hw_revision_not_set = {
++		.letter = '\0',
++		.major = -1,
++		.minor = -1,
++	};
+ 	char *prod_info;
+ 
++	es58x_dev->firmware_version = sw_version_not_set;
++	es58x_dev->bootloader_version = sw_version_not_set;
++	es58x_dev->hardware_revision = hw_revision_not_set;
++
+ 	prod_info = usb_cache_string(es58x_dev->udev, ES58X_PROD_INFO_IDX);
+ 	if (!prod_info) {
+ 		dev_warn(es58x_dev->dev,
+@@ -150,29 +164,36 @@ void es58x_parse_product_info(struct es58x_device *es58x_dev)
+ }
+ 
+ /**
+- * es58x_sw_version_is_set() - Check if the version is a valid number.
++ * es58x_sw_version_is_valid() - Check if the version is a valid number.
+  * @sw_ver: Version number of either the firmware or the bootloader.
+  *
+- * If &es58x_sw_version.major, &es58x_sw_version.minor and
+- * &es58x_sw_version.revision are all zero, the product string could
+- * not be parsed and the version number is invalid.
++ * If any of the software version sub-numbers do not fit on two
++ * digits, the version is invalid, most probably because the product
++ * string could not be parsed.
++ *
++ * Return: @true if the software version is valid, @false otherwise.
+  */
+-static inline bool es58x_sw_version_is_set(struct es58x_sw_version *sw_ver)
++static inline bool es58x_sw_version_is_valid(struct es58x_sw_version *sw_ver)
+ {
+-	return sw_ver->major || sw_ver->minor || sw_ver->revision;
++	return sw_ver->major < 100 && sw_ver->minor < 100 &&
++		sw_ver->revision < 100;
+ }
+ 
+ /**
+- * es58x_hw_revision_is_set() - Check if the revision is a valid number.
++ * es58x_hw_revision_is_valid() - Check if the revision is a valid number.
+  * @hw_rev: Revision number of the hardware.
+  *
+- * If &es58x_hw_revision.letter is the null character, the product
+- * string could not be parsed and the hardware revision number is
+- * invalid.
++ * If &es58x_hw_revision.letter is not a alphanumeric character or if
++ * any of the hardware revision sub-numbers do not fit on three
++ * digits, the revision is invalid, most probably because the product
++ * string could not be parsed.
++ *
++ * Return: @true if the hardware revision is valid, @false otherwise.
+  */
+-static inline bool es58x_hw_revision_is_set(struct es58x_hw_revision *hw_rev)
++static inline bool es58x_hw_revision_is_valid(struct es58x_hw_revision *hw_rev)
+ {
+-	return hw_rev->letter != '\0';
++	return isalnum(hw_rev->letter) && hw_rev->major < 1000 &&
++		hw_rev->minor < 1000;
+ }
+ 
+ /**
+@@ -197,7 +218,7 @@ static int es58x_devlink_info_get(struct devlink *devlink,
+ 	char buf[max(sizeof("xx.xx.xx"), sizeof("axxx/xxx"))];
+ 	int ret = 0;
+ 
+-	if (es58x_sw_version_is_set(fw_ver)) {
++	if (es58x_sw_version_is_valid(fw_ver)) {
+ 		snprintf(buf, sizeof(buf), "%02u.%02u.%02u",
+ 			 fw_ver->major, fw_ver->minor, fw_ver->revision);
+ 		ret = devlink_info_version_running_put(req,
+@@ -207,7 +228,7 @@ static int es58x_devlink_info_get(struct devlink *devlink,
+ 			return ret;
+ 	}
+ 
+-	if (es58x_sw_version_is_set(bl_ver)) {
++	if (es58x_sw_version_is_valid(bl_ver)) {
+ 		snprintf(buf, sizeof(buf), "%02u.%02u.%02u",
+ 			 bl_ver->major, bl_ver->minor, bl_ver->revision);
+ 		ret = devlink_info_version_running_put(req,
+@@ -217,7 +238,7 @@ static int es58x_devlink_info_get(struct devlink *devlink,
+ 			return ret;
+ 	}
+ 
+-	if (es58x_hw_revision_is_set(hw_rev)) {
++	if (es58x_hw_revision_is_valid(hw_rev)) {
+ 		snprintf(buf, sizeof(buf), "%c%03u/%03u",
+ 			 hw_rev->letter, hw_rev->major, hw_rev->minor);
+ 		ret = devlink_info_version_fixed_put(req,
+diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
+index cb2810f175ccd..9d1de202ac714 100644
+--- a/drivers/net/ethernet/broadcom/tg3.c
++++ b/drivers/net/ethernet/broadcom/tg3.c
+@@ -18082,7 +18082,8 @@ static void tg3_shutdown(struct pci_dev *pdev)
+ 	if (netif_running(dev))
+ 		dev_close(dev);
+ 
+-	tg3_power_down(tp);
++	if (system_state == SYSTEM_POWER_OFF)
++		tg3_power_down(tp);
+ 
+ 	rtnl_unlock();
+ 
+diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c
+index 7750702900fa6..6f6525983130e 100644
+--- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c
++++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c
+@@ -2259,7 +2259,7 @@ static void chtls_rx_ack(struct sock *sk, struct sk_buff *skb)
+ 
+ 		if (tp->snd_una != snd_una) {
+ 			tp->snd_una = snd_una;
+-			tp->rcv_tstamp = tcp_time_stamp(tp);
++			tp->rcv_tstamp = tcp_jiffies32;
+ 			if (tp->snd_una == tp->snd_nxt &&
+ 			    !csk_flag_nochk(csk, CSK_TX_FAILOVER))
+ 				csk_reset_flag(csk, CSK_TX_WAIT_IDLE);
+diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c
+index 35461165de0d2..b92e3aa7cd041 100644
+--- a/drivers/net/ethernet/freescale/enetc/enetc.c
++++ b/drivers/net/ethernet/freescale/enetc/enetc.c
+@@ -2769,7 +2769,7 @@ static int enetc_setup_xdp_prog(struct net_device *ndev, struct bpf_prog *prog,
+ 	if (priv->min_num_stack_tx_queues + num_xdp_tx_queues >
+ 	    priv->num_tx_rings) {
+ 		NL_SET_ERR_MSG_FMT_MOD(extack,
+-				       "Reserving %d XDP TXQs does not leave a minimum of %d TXQs for network stack (total %d available)",
++				       "Reserving %d XDP TXQs does not leave a minimum of %d for stack (total %d)",
+ 				       num_xdp_tx_queues,
+ 				       priv->min_num_stack_tx_queues,
+ 				       priv->num_tx_rings);
+diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c
+index e6f1711d9be04..465a6db5a40a8 100644
+--- a/drivers/net/ethernet/google/gve/gve_main.c
++++ b/drivers/net/ethernet/google/gve/gve_main.c
+@@ -191,7 +191,7 @@ static int gve_alloc_stats_report(struct gve_priv *priv)
+ 	rx_stats_num = (GVE_RX_STATS_REPORT_NUM + NIC_RX_STATS_REPORT_NUM) *
+ 		       priv->rx_cfg.num_queues;
+ 	priv->stats_report_len = struct_size(priv->stats_report, stats,
+-					     tx_stats_num + rx_stats_num);
++					     size_add(tx_stats_num, rx_stats_num));
+ 	priv->stats_report =
+ 		dma_alloc_coherent(&priv->pdev->dev, priv->stats_report_len,
+ 				   &priv->stats_report_bus, GFP_KERNEL);
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
+index a86bfa3bba74a..fc9ebef70bd9d 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
+@@ -16324,11 +16324,15 @@ static void i40e_remove(struct pci_dev *pdev)
+ 			i40e_switch_branch_release(pf->veb[i]);
+ 	}
+ 
+-	/* Now we can shutdown the PF's VSI, just before we kill
++	/* Now we can shutdown the PF's VSIs, just before we kill
+ 	 * adminq and hmc.
+ 	 */
+-	if (pf->vsi[pf->lan_vsi])
+-		i40e_vsi_release(pf->vsi[pf->lan_vsi]);
++	for (i = pf->num_alloc_vsi; i--;)
++		if (pf->vsi[i]) {
++			i40e_vsi_close(pf->vsi[i]);
++			i40e_vsi_release(pf->vsi[i]);
++			pf->vsi[i] = NULL;
++		}
+ 
+ 	i40e_cloud_filter_exit(pf);
+ 
+diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h
+index 4d4508e04b1d2..7f6f1677199b9 100644
+--- a/drivers/net/ethernet/intel/iavf/iavf.h
++++ b/drivers/net/ethernet/intel/iavf/iavf.h
+@@ -298,8 +298,6 @@ struct iavf_adapter {
+ #define IAVF_FLAG_CLIENT_NEEDS_OPEN		BIT(10)
+ #define IAVF_FLAG_CLIENT_NEEDS_CLOSE		BIT(11)
+ #define IAVF_FLAG_CLIENT_NEEDS_L2_PARAMS	BIT(12)
+-#define IAVF_FLAG_PROMISC_ON			BIT(13)
+-#define IAVF_FLAG_ALLMULTI_ON			BIT(14)
+ #define IAVF_FLAG_LEGACY_RX			BIT(15)
+ #define IAVF_FLAG_REINIT_ITR_NEEDED		BIT(16)
+ #define IAVF_FLAG_QUEUES_DISABLED		BIT(17)
+@@ -325,10 +323,7 @@ struct iavf_adapter {
+ #define IAVF_FLAG_AQ_SET_HENA			BIT_ULL(12)
+ #define IAVF_FLAG_AQ_SET_RSS_KEY		BIT_ULL(13)
+ #define IAVF_FLAG_AQ_SET_RSS_LUT		BIT_ULL(14)
+-#define IAVF_FLAG_AQ_REQUEST_PROMISC		BIT_ULL(15)
+-#define IAVF_FLAG_AQ_RELEASE_PROMISC		BIT_ULL(16)
+-#define IAVF_FLAG_AQ_REQUEST_ALLMULTI		BIT_ULL(17)
+-#define IAVF_FLAG_AQ_RELEASE_ALLMULTI		BIT_ULL(18)
++#define IAVF_FLAG_AQ_CONFIGURE_PROMISC_MODE	BIT_ULL(15)
+ #define IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING	BIT_ULL(19)
+ #define IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING	BIT_ULL(20)
+ #define IAVF_FLAG_AQ_ENABLE_CHANNELS		BIT_ULL(21)
+@@ -365,6 +360,12 @@ struct iavf_adapter {
+ 	(IAVF_EXTENDED_CAP_SEND_VLAN_V2 |		\
+ 	 IAVF_EXTENDED_CAP_RECV_VLAN_V2)
+ 
++	/* Lock to prevent possible clobbering of
++	 * current_netdev_promisc_flags
++	 */
++	spinlock_t current_netdev_promisc_flags_lock;
++	netdev_features_t current_netdev_promisc_flags;
++
+ 	/* OS defined structs */
+ 	struct net_device *netdev;
+ 	struct pci_dev *pdev;
+@@ -551,7 +552,8 @@ void iavf_add_ether_addrs(struct iavf_adapter *adapter);
+ void iavf_del_ether_addrs(struct iavf_adapter *adapter);
+ void iavf_add_vlans(struct iavf_adapter *adapter);
+ void iavf_del_vlans(struct iavf_adapter *adapter);
+-void iavf_set_promiscuous(struct iavf_adapter *adapter, int flags);
++void iavf_set_promiscuous(struct iavf_adapter *adapter);
++bool iavf_promiscuous_mode_changed(struct iavf_adapter *adapter);
+ void iavf_request_stats(struct iavf_adapter *adapter);
+ int iavf_request_reset(struct iavf_adapter *adapter);
+ void iavf_get_hena(struct iavf_adapter *adapter);
+diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
+index 13bfc9333a8c3..65ef588eb1aa9 100644
+--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
++++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
+@@ -1186,6 +1186,16 @@ static int iavf_addr_unsync(struct net_device *netdev, const u8 *addr)
+ 	return 0;
+ }
+ 
++/**
++ * iavf_promiscuous_mode_changed - check if promiscuous mode bits changed
++ * @adapter: device specific adapter
++ */
++bool iavf_promiscuous_mode_changed(struct iavf_adapter *adapter)
++{
++	return (adapter->current_netdev_promisc_flags ^ adapter->netdev->flags) &
++		(IFF_PROMISC | IFF_ALLMULTI);
++}
++
+ /**
+  * iavf_set_rx_mode - NDO callback to set the netdev filters
+  * @netdev: network interface device structure
+@@ -1199,19 +1209,10 @@ static void iavf_set_rx_mode(struct net_device *netdev)
+ 	__dev_mc_sync(netdev, iavf_addr_sync, iavf_addr_unsync);
+ 	spin_unlock_bh(&adapter->mac_vlan_list_lock);
+ 
+-	if (netdev->flags & IFF_PROMISC &&
+-	    !(adapter->flags & IAVF_FLAG_PROMISC_ON))
+-		adapter->aq_required |= IAVF_FLAG_AQ_REQUEST_PROMISC;
+-	else if (!(netdev->flags & IFF_PROMISC) &&
+-		 adapter->flags & IAVF_FLAG_PROMISC_ON)
+-		adapter->aq_required |= IAVF_FLAG_AQ_RELEASE_PROMISC;
+-
+-	if (netdev->flags & IFF_ALLMULTI &&
+-	    !(adapter->flags & IAVF_FLAG_ALLMULTI_ON))
+-		adapter->aq_required |= IAVF_FLAG_AQ_REQUEST_ALLMULTI;
+-	else if (!(netdev->flags & IFF_ALLMULTI) &&
+-		 adapter->flags & IAVF_FLAG_ALLMULTI_ON)
+-		adapter->aq_required |= IAVF_FLAG_AQ_RELEASE_ALLMULTI;
++	spin_lock_bh(&adapter->current_netdev_promisc_flags_lock);
++	if (iavf_promiscuous_mode_changed(adapter))
++		adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_PROMISC_MODE;
++	spin_unlock_bh(&adapter->current_netdev_promisc_flags_lock);
+ }
+ 
+ /**
+@@ -2162,19 +2163,8 @@ static int iavf_process_aq_command(struct iavf_adapter *adapter)
+ 		return 0;
+ 	}
+ 
+-	if (adapter->aq_required & IAVF_FLAG_AQ_REQUEST_PROMISC) {
+-		iavf_set_promiscuous(adapter, FLAG_VF_UNICAST_PROMISC |
+-				       FLAG_VF_MULTICAST_PROMISC);
+-		return 0;
+-	}
+-
+-	if (adapter->aq_required & IAVF_FLAG_AQ_REQUEST_ALLMULTI) {
+-		iavf_set_promiscuous(adapter, FLAG_VF_MULTICAST_PROMISC);
+-		return 0;
+-	}
+-	if ((adapter->aq_required & IAVF_FLAG_AQ_RELEASE_PROMISC) ||
+-	    (adapter->aq_required & IAVF_FLAG_AQ_RELEASE_ALLMULTI)) {
+-		iavf_set_promiscuous(adapter, 0);
++	if (adapter->aq_required & IAVF_FLAG_AQ_CONFIGURE_PROMISC_MODE) {
++		iavf_set_promiscuous(adapter);
+ 		return 0;
+ 	}
+ 
+@@ -4970,6 +4960,7 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 	spin_lock_init(&adapter->cloud_filter_list_lock);
+ 	spin_lock_init(&adapter->fdir_fltr_lock);
+ 	spin_lock_init(&adapter->adv_rss_lock);
++	spin_lock_init(&adapter->current_netdev_promisc_flags_lock);
+ 
+ 	INIT_LIST_HEAD(&adapter->mac_filter_list);
+ 	INIT_LIST_HEAD(&adapter->vlan_filter_list);
+diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
+index be3c007ce90a9..e5b36d73e4d56 100644
+--- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
++++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
+@@ -948,14 +948,14 @@ void iavf_del_vlans(struct iavf_adapter *adapter)
+ /**
+  * iavf_set_promiscuous
+  * @adapter: adapter structure
+- * @flags: bitmask to control unicast/multicast promiscuous.
+  *
+  * Request that the PF enable promiscuous mode for our VSI.
+  **/
+-void iavf_set_promiscuous(struct iavf_adapter *adapter, int flags)
++void iavf_set_promiscuous(struct iavf_adapter *adapter)
+ {
++	struct net_device *netdev = adapter->netdev;
+ 	struct virtchnl_promisc_info vpi;
+-	int promisc_all;
++	unsigned int flags;
+ 
+ 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
+ 		/* bail because we already have a command pending */
+@@ -964,36 +964,57 @@ void iavf_set_promiscuous(struct iavf_adapter *adapter, int flags)
+ 		return;
+ 	}
+ 
+-	promisc_all = FLAG_VF_UNICAST_PROMISC |
+-		      FLAG_VF_MULTICAST_PROMISC;
+-	if ((flags & promisc_all) == promisc_all) {
+-		adapter->flags |= IAVF_FLAG_PROMISC_ON;
+-		adapter->aq_required &= ~IAVF_FLAG_AQ_REQUEST_PROMISC;
+-		dev_info(&adapter->pdev->dev, "Entering promiscuous mode\n");
+-	}
++	/* prevent changes to promiscuous flags */
++	spin_lock_bh(&adapter->current_netdev_promisc_flags_lock);
+ 
+-	if (flags & FLAG_VF_MULTICAST_PROMISC) {
+-		adapter->flags |= IAVF_FLAG_ALLMULTI_ON;
+-		adapter->aq_required &= ~IAVF_FLAG_AQ_REQUEST_ALLMULTI;
+-		dev_info(&adapter->pdev->dev, "%s is entering multicast promiscuous mode\n",
+-			 adapter->netdev->name);
++	/* sanity check to prevent duplicate AQ calls */
++	if (!iavf_promiscuous_mode_changed(adapter)) {
++		adapter->aq_required &= ~IAVF_FLAG_AQ_CONFIGURE_PROMISC_MODE;
++		dev_dbg(&adapter->pdev->dev, "No change in promiscuous mode\n");
++		/* allow changes to promiscuous flags */
++		spin_unlock_bh(&adapter->current_netdev_promisc_flags_lock);
++		return;
+ 	}
+ 
+-	if (!flags) {
+-		if (adapter->flags & IAVF_FLAG_PROMISC_ON) {
+-			adapter->flags &= ~IAVF_FLAG_PROMISC_ON;
+-			adapter->aq_required &= ~IAVF_FLAG_AQ_RELEASE_PROMISC;
+-			dev_info(&adapter->pdev->dev, "Leaving promiscuous mode\n");
+-		}
++	/* there are 2 bits, but only 3 states */
++	if (!(netdev->flags & IFF_PROMISC) &&
++	    netdev->flags & IFF_ALLMULTI) {
++		/* State 1  - only multicast promiscuous mode enabled
++		 * - !IFF_PROMISC && IFF_ALLMULTI
++		 */
++		flags = FLAG_VF_MULTICAST_PROMISC;
++		adapter->current_netdev_promisc_flags |= IFF_ALLMULTI;
++		adapter->current_netdev_promisc_flags &= ~IFF_PROMISC;
++		dev_info(&adapter->pdev->dev, "Entering multicast promiscuous mode\n");
++	} else if (!(netdev->flags & IFF_PROMISC) &&
++		   !(netdev->flags & IFF_ALLMULTI)) {
++		/* State 2 - unicast/multicast promiscuous mode disabled
++		 * - !IFF_PROMISC && !IFF_ALLMULTI
++		 */
++		flags = 0;
++		adapter->current_netdev_promisc_flags &=
++			~(IFF_PROMISC | IFF_ALLMULTI);
++		dev_info(&adapter->pdev->dev, "Leaving promiscuous mode\n");
++	} else {
++		/* State 3 - unicast/multicast promiscuous mode enabled
++		 * - IFF_PROMISC && IFF_ALLMULTI
++		 * - IFF_PROMISC && !IFF_ALLMULTI
++		 */
++		flags = FLAG_VF_UNICAST_PROMISC | FLAG_VF_MULTICAST_PROMISC;
++		adapter->current_netdev_promisc_flags |= IFF_PROMISC;
++		if (netdev->flags & IFF_ALLMULTI)
++			adapter->current_netdev_promisc_flags |= IFF_ALLMULTI;
++		else
++			adapter->current_netdev_promisc_flags &= ~IFF_ALLMULTI;
+ 
+-		if (adapter->flags & IAVF_FLAG_ALLMULTI_ON) {
+-			adapter->flags &= ~IAVF_FLAG_ALLMULTI_ON;
+-			adapter->aq_required &= ~IAVF_FLAG_AQ_RELEASE_ALLMULTI;
+-			dev_info(&adapter->pdev->dev, "%s is leaving multicast promiscuous mode\n",
+-				 adapter->netdev->name);
+-		}
++		dev_info(&adapter->pdev->dev, "Entering promiscuous mode\n");
+ 	}
+ 
++	adapter->aq_required &= ~IAVF_FLAG_AQ_CONFIGURE_PROMISC_MODE;
++
++	/* allow changes to promiscuous flags */
++	spin_unlock_bh(&adapter->current_netdev_promisc_flags_lock);
++
+ 	adapter->current_op = VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE;
+ 	vpi.vsi_id = adapter->vsi_res->vsi_id;
+ 	vpi.flags = flags;
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
+index 379e1510b70c0..48bcd6f24fcde 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
+@@ -816,7 +816,6 @@ void otx2_sqb_flush(struct otx2_nic *pfvf)
+ 	int qidx, sqe_tail, sqe_head;
+ 	struct otx2_snd_queue *sq;
+ 	u64 incr, *ptr, val;
+-	int timeout = 1000;
+ 
+ 	ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_STATUS);
+ 	for (qidx = 0; qidx < otx2_get_total_tx_queues(pfvf); qidx++) {
+@@ -825,15 +824,11 @@ void otx2_sqb_flush(struct otx2_nic *pfvf)
+ 			continue;
+ 
+ 		incr = (u64)qidx << 32;
+-		while (timeout) {
+-			val = otx2_atomic64_add(incr, ptr);
+-			sqe_head = (val >> 20) & 0x3F;
+-			sqe_tail = (val >> 28) & 0x3F;
+-			if (sqe_head == sqe_tail)
+-				break;
+-			usleep_range(1, 3);
+-			timeout--;
+-		}
++		val = otx2_atomic64_add(incr, ptr);
++		sqe_head = (val >> 20) & 0x3F;
++		sqe_tail = (val >> 28) & 0x3F;
++		if (sqe_head != sqe_tail)
++			usleep_range(50, 60);
+ 	}
+ }
+ 
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
+index 0e81849db3538..5590e30f8e2d0 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
+@@ -972,6 +972,7 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl, int prio, bool pfc_en);
+ int otx2_txsch_alloc(struct otx2_nic *pfvf);
+ void otx2_txschq_stop(struct otx2_nic *pfvf);
+ void otx2_txschq_free_one(struct otx2_nic *pfvf, u16 lvl, u16 schq);
++void otx2_free_pending_sqe(struct otx2_nic *pfvf);
+ void otx2_sqb_flush(struct otx2_nic *pfvf);
+ int otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool,
+ 		    dma_addr_t *dma);
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+index 9ded98bb1c890..472d6982eabd2 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+@@ -1192,31 +1192,32 @@ static char *nix_mnqerr_e_str[NIX_MNQERR_MAX] = {
+ };
+ 
+ static char *nix_snd_status_e_str[NIX_SND_STATUS_MAX] =  {
+-	"NIX_SND_STATUS_GOOD",
+-	"NIX_SND_STATUS_SQ_CTX_FAULT",
+-	"NIX_SND_STATUS_SQ_CTX_POISON",
+-	"NIX_SND_STATUS_SQB_FAULT",
+-	"NIX_SND_STATUS_SQB_POISON",
+-	"NIX_SND_STATUS_HDR_ERR",
+-	"NIX_SND_STATUS_EXT_ERR",
+-	"NIX_SND_STATUS_JUMP_FAULT",
+-	"NIX_SND_STATUS_JUMP_POISON",
+-	"NIX_SND_STATUS_CRC_ERR",
+-	"NIX_SND_STATUS_IMM_ERR",
+-	"NIX_SND_STATUS_SG_ERR",
+-	"NIX_SND_STATUS_MEM_ERR",
+-	"NIX_SND_STATUS_INVALID_SUBDC",
+-	"NIX_SND_STATUS_SUBDC_ORDER_ERR",
+-	"NIX_SND_STATUS_DATA_FAULT",
+-	"NIX_SND_STATUS_DATA_POISON",
+-	"NIX_SND_STATUS_NPC_DROP_ACTION",
+-	"NIX_SND_STATUS_LOCK_VIOL",
+-	"NIX_SND_STATUS_NPC_UCAST_CHAN_ERR",
+-	"NIX_SND_STATUS_NPC_MCAST_CHAN_ERR",
+-	"NIX_SND_STATUS_NPC_MCAST_ABORT",
+-	"NIX_SND_STATUS_NPC_VTAG_PTR_ERR",
+-	"NIX_SND_STATUS_NPC_VTAG_SIZE_ERR",
+-	"NIX_SND_STATUS_SEND_STATS_ERR",
++	[NIX_SND_STATUS_GOOD] = "NIX_SND_STATUS_GOOD",
++	[NIX_SND_STATUS_SQ_CTX_FAULT] = "NIX_SND_STATUS_SQ_CTX_FAULT",
++	[NIX_SND_STATUS_SQ_CTX_POISON] = "NIX_SND_STATUS_SQ_CTX_POISON",
++	[NIX_SND_STATUS_SQB_FAULT] = "NIX_SND_STATUS_SQB_FAULT",
++	[NIX_SND_STATUS_SQB_POISON] = "NIX_SND_STATUS_SQB_POISON",
++	[NIX_SND_STATUS_HDR_ERR] = "NIX_SND_STATUS_HDR_ERR",
++	[NIX_SND_STATUS_EXT_ERR] = "NIX_SND_STATUS_EXT_ERR",
++	[NIX_SND_STATUS_JUMP_FAULT] = "NIX_SND_STATUS_JUMP_FAULT",
++	[NIX_SND_STATUS_JUMP_POISON] = "NIX_SND_STATUS_JUMP_POISON",
++	[NIX_SND_STATUS_CRC_ERR] = "NIX_SND_STATUS_CRC_ERR",
++	[NIX_SND_STATUS_IMM_ERR] = "NIX_SND_STATUS_IMM_ERR",
++	[NIX_SND_STATUS_SG_ERR] = "NIX_SND_STATUS_SG_ERR",
++	[NIX_SND_STATUS_MEM_ERR] = "NIX_SND_STATUS_MEM_ERR",
++	[NIX_SND_STATUS_INVALID_SUBDC] = "NIX_SND_STATUS_INVALID_SUBDC",
++	[NIX_SND_STATUS_SUBDC_ORDER_ERR] = "NIX_SND_STATUS_SUBDC_ORDER_ERR",
++	[NIX_SND_STATUS_DATA_FAULT] = "NIX_SND_STATUS_DATA_FAULT",
++	[NIX_SND_STATUS_DATA_POISON] = "NIX_SND_STATUS_DATA_POISON",
++	[NIX_SND_STATUS_NPC_DROP_ACTION] = "NIX_SND_STATUS_NPC_DROP_ACTION",
++	[NIX_SND_STATUS_LOCK_VIOL] = "NIX_SND_STATUS_LOCK_VIOL",
++	[NIX_SND_STATUS_NPC_UCAST_CHAN_ERR] = "NIX_SND_STAT_NPC_UCAST_CHAN_ERR",
++	[NIX_SND_STATUS_NPC_MCAST_CHAN_ERR] = "NIX_SND_STAT_NPC_MCAST_CHAN_ERR",
++	[NIX_SND_STATUS_NPC_MCAST_ABORT] = "NIX_SND_STATUS_NPC_MCAST_ABORT",
++	[NIX_SND_STATUS_NPC_VTAG_PTR_ERR] = "NIX_SND_STATUS_NPC_VTAG_PTR_ERR",
++	[NIX_SND_STATUS_NPC_VTAG_SIZE_ERR] = "NIX_SND_STATUS_NPC_VTAG_SIZE_ERR",
++	[NIX_SND_STATUS_SEND_MEM_FAULT] = "NIX_SND_STATUS_SEND_MEM_FAULT",
++	[NIX_SND_STATUS_SEND_STATS_ERR] = "NIX_SND_STATUS_SEND_STATS_ERR",
+ };
+ 
+ static irqreturn_t otx2_q_intr_handler(int irq, void *data)
+@@ -1237,14 +1238,16 @@ static irqreturn_t otx2_q_intr_handler(int irq, void *data)
+ 			continue;
+ 
+ 		if (val & BIT_ULL(42)) {
+-			netdev_err(pf->netdev, "CQ%lld: error reading NIX_LF_CQ_OP_INT, NIX_LF_ERR_INT 0x%llx\n",
++			netdev_err(pf->netdev,
++				   "CQ%lld: error reading NIX_LF_CQ_OP_INT, NIX_LF_ERR_INT 0x%llx\n",
+ 				   qidx, otx2_read64(pf, NIX_LF_ERR_INT));
+ 		} else {
+ 			if (val & BIT_ULL(NIX_CQERRINT_DOOR_ERR))
+ 				netdev_err(pf->netdev, "CQ%lld: Doorbell error",
+ 					   qidx);
+ 			if (val & BIT_ULL(NIX_CQERRINT_CQE_FAULT))
+-				netdev_err(pf->netdev, "CQ%lld: Memory fault on CQE write to LLC/DRAM",
++				netdev_err(pf->netdev,
++					   "CQ%lld: Memory fault on CQE write to LLC/DRAM",
+ 					   qidx);
+ 		}
+ 
+@@ -1271,7 +1274,8 @@ static irqreturn_t otx2_q_intr_handler(int irq, void *data)
+ 			     (val & NIX_SQINT_BITS));
+ 
+ 		if (val & BIT_ULL(42)) {
+-			netdev_err(pf->netdev, "SQ%lld: error reading NIX_LF_SQ_OP_INT, NIX_LF_ERR_INT 0x%llx\n",
++			netdev_err(pf->netdev,
++				   "SQ%lld: error reading NIX_LF_SQ_OP_INT, NIX_LF_ERR_INT 0x%llx\n",
+ 				   qidx, otx2_read64(pf, NIX_LF_ERR_INT));
+ 			goto done;
+ 		}
+@@ -1281,8 +1285,11 @@ static irqreturn_t otx2_q_intr_handler(int irq, void *data)
+ 			goto chk_mnq_err_dbg;
+ 
+ 		sq_op_err_code = FIELD_GET(GENMASK(7, 0), sq_op_err_dbg);
+-		netdev_err(pf->netdev, "SQ%lld: NIX_LF_SQ_OP_ERR_DBG(%llx)  err=%s\n",
+-			   qidx, sq_op_err_dbg, nix_sqoperr_e_str[sq_op_err_code]);
++		netdev_err(pf->netdev,
++			   "SQ%lld: NIX_LF_SQ_OP_ERR_DBG(0x%llx)  err=%s(%#x)\n",
++			   qidx, sq_op_err_dbg,
++			   nix_sqoperr_e_str[sq_op_err_code],
++			   sq_op_err_code);
+ 
+ 		otx2_write64(pf, NIX_LF_SQ_OP_ERR_DBG, BIT_ULL(44));
+ 
+@@ -1299,16 +1306,21 @@ chk_mnq_err_dbg:
+ 			goto chk_snd_err_dbg;
+ 
+ 		mnq_err_code = FIELD_GET(GENMASK(7, 0), mnq_err_dbg);
+-		netdev_err(pf->netdev, "SQ%lld: NIX_LF_MNQ_ERR_DBG(%llx)  err=%s\n",
+-			   qidx, mnq_err_dbg,  nix_mnqerr_e_str[mnq_err_code]);
++		netdev_err(pf->netdev,
++			   "SQ%lld: NIX_LF_MNQ_ERR_DBG(0x%llx)  err=%s(%#x)\n",
++			   qidx, mnq_err_dbg,  nix_mnqerr_e_str[mnq_err_code],
++			   mnq_err_code);
+ 		otx2_write64(pf, NIX_LF_MNQ_ERR_DBG, BIT_ULL(44));
+ 
+ chk_snd_err_dbg:
+ 		snd_err_dbg = otx2_read64(pf, NIX_LF_SEND_ERR_DBG);
+ 		if (snd_err_dbg & BIT(44)) {
+ 			snd_err_code = FIELD_GET(GENMASK(7, 0), snd_err_dbg);
+-			netdev_err(pf->netdev, "SQ%lld: NIX_LF_SND_ERR_DBG:0x%llx err=%s\n",
+-				   qidx, snd_err_dbg, nix_snd_status_e_str[snd_err_code]);
++			netdev_err(pf->netdev,
++				   "SQ%lld: NIX_LF_SND_ERR_DBG:0x%llx err=%s(%#x)\n",
++				   qidx, snd_err_dbg,
++				   nix_snd_status_e_str[snd_err_code],
++				   snd_err_code);
+ 			otx2_write64(pf, NIX_LF_SEND_ERR_DBG, BIT_ULL(44));
+ 		}
+ 
+@@ -1588,6 +1600,7 @@ static void otx2_free_hw_resources(struct otx2_nic *pf)
+ 		else
+ 			otx2_cleanup_tx_cqes(pf, cq);
+ 	}
++	otx2_free_pending_sqe(pf);
+ 
+ 	otx2_free_sq_res(pf);
+ 
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_struct.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_struct.h
+index fa37b9f312cae..4e5899d8fa2e6 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_struct.h
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_struct.h
+@@ -318,23 +318,23 @@ enum nix_snd_status_e {
+ 	NIX_SND_STATUS_EXT_ERR = 0x6,
+ 	NIX_SND_STATUS_JUMP_FAULT = 0x7,
+ 	NIX_SND_STATUS_JUMP_POISON = 0x8,
+-	NIX_SND_STATUS_CRC_ERR = 0x9,
+-	NIX_SND_STATUS_IMM_ERR = 0x10,
+-	NIX_SND_STATUS_SG_ERR = 0x11,
+-	NIX_SND_STATUS_MEM_ERR = 0x12,
+-	NIX_SND_STATUS_INVALID_SUBDC = 0x13,
+-	NIX_SND_STATUS_SUBDC_ORDER_ERR = 0x14,
+-	NIX_SND_STATUS_DATA_FAULT = 0x15,
+-	NIX_SND_STATUS_DATA_POISON = 0x16,
+-	NIX_SND_STATUS_NPC_DROP_ACTION = 0x17,
+-	NIX_SND_STATUS_LOCK_VIOL = 0x18,
+-	NIX_SND_STATUS_NPC_UCAST_CHAN_ERR = 0x19,
+-	NIX_SND_STATUS_NPC_MCAST_CHAN_ERR = 0x20,
+-	NIX_SND_STATUS_NPC_MCAST_ABORT = 0x21,
+-	NIX_SND_STATUS_NPC_VTAG_PTR_ERR = 0x22,
+-	NIX_SND_STATUS_NPC_VTAG_SIZE_ERR = 0x23,
+-	NIX_SND_STATUS_SEND_MEM_FAULT = 0x24,
+-	NIX_SND_STATUS_SEND_STATS_ERR = 0x25,
++	NIX_SND_STATUS_CRC_ERR = 0x10,
++	NIX_SND_STATUS_IMM_ERR = 0x11,
++	NIX_SND_STATUS_SG_ERR = 0x12,
++	NIX_SND_STATUS_MEM_ERR = 0x13,
++	NIX_SND_STATUS_INVALID_SUBDC = 0x14,
++	NIX_SND_STATUS_SUBDC_ORDER_ERR = 0x15,
++	NIX_SND_STATUS_DATA_FAULT = 0x16,
++	NIX_SND_STATUS_DATA_POISON = 0x17,
++	NIX_SND_STATUS_NPC_DROP_ACTION = 0x20,
++	NIX_SND_STATUS_LOCK_VIOL = 0x21,
++	NIX_SND_STATUS_NPC_UCAST_CHAN_ERR = 0x22,
++	NIX_SND_STATUS_NPC_MCAST_CHAN_ERR = 0x23,
++	NIX_SND_STATUS_NPC_MCAST_ABORT = 0x24,
++	NIX_SND_STATUS_NPC_VTAG_PTR_ERR = 0x25,
++	NIX_SND_STATUS_NPC_VTAG_SIZE_ERR = 0x26,
++	NIX_SND_STATUS_SEND_MEM_FAULT = 0x27,
++	NIX_SND_STATUS_SEND_STATS_ERR = 0x28,
+ 	NIX_SND_STATUS_MAX,
+ };
+ 
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
+index 53b2a4ef52985..6ee15f3c25ede 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
+@@ -1247,9 +1247,11 @@ void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq, int q
+ 
+ void otx2_cleanup_tx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq)
+ {
++	int tx_pkts = 0, tx_bytes = 0;
+ 	struct sk_buff *skb = NULL;
+ 	struct otx2_snd_queue *sq;
+ 	struct nix_cqe_tx_s *cqe;
++	struct netdev_queue *txq;
+ 	int processed_cqe = 0;
+ 	struct sg_list *sg;
+ 	int qidx;
+@@ -1270,12 +1272,20 @@ void otx2_cleanup_tx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq)
+ 		sg = &sq->sg[cqe->comp.sqe_id];
+ 		skb = (struct sk_buff *)sg->skb;
+ 		if (skb) {
++			tx_bytes += skb->len;
++			tx_pkts++;
+ 			otx2_dma_unmap_skb_frags(pfvf, sg);
+ 			dev_kfree_skb_any(skb);
+ 			sg->skb = (u64)NULL;
+ 		}
+ 	}
+ 
++	if (likely(tx_pkts)) {
++		if (qidx >= pfvf->hw.tx_queues)
++			qidx -= pfvf->hw.xdp_queues;
++		txq = netdev_get_tx_queue(pfvf->netdev, qidx);
++		netdev_tx_completed_queue(txq, tx_pkts, tx_bytes);
++	}
+ 	/* Free CQEs to HW */
+ 	otx2_write64(pfvf, NIX_LF_CQ_OP_DOOR,
+ 		     ((u64)cq->cq_idx << 32) | processed_cqe);
+@@ -1302,6 +1312,38 @@ int otx2_rxtx_enable(struct otx2_nic *pfvf, bool enable)
+ 	return err;
+ }
+ 
++void otx2_free_pending_sqe(struct otx2_nic *pfvf)
++{
++	int tx_pkts = 0, tx_bytes = 0;
++	struct sk_buff *skb = NULL;
++	struct otx2_snd_queue *sq;
++	struct netdev_queue *txq;
++	struct sg_list *sg;
++	int sq_idx, sqe;
++
++	for (sq_idx = 0; sq_idx < pfvf->hw.tx_queues; sq_idx++) {
++		sq = &pfvf->qset.sq[sq_idx];
++		for (sqe = 0; sqe < sq->sqe_cnt; sqe++) {
++			sg = &sq->sg[sqe];
++			skb = (struct sk_buff *)sg->skb;
++			if (skb) {
++				tx_bytes += skb->len;
++				tx_pkts++;
++				otx2_dma_unmap_skb_frags(pfvf, sg);
++				dev_kfree_skb_any(skb);
++				sg->skb = (u64)NULL;
++			}
++		}
++
++		if (!tx_pkts)
++			continue;
++		txq = netdev_get_tx_queue(pfvf->netdev, sq_idx);
++		netdev_tx_completed_queue(txq, tx_pkts, tx_bytes);
++		tx_pkts = 0;
++		tx_bytes = 0;
++	}
++}
++
+ static void otx2_xdp_sqe_add_sg(struct otx2_snd_queue *sq, u64 dma_addr,
+ 				int len, int *offset)
+ {
+diff --git a/drivers/net/ethernet/mediatek/mtk_wed_regs.h b/drivers/net/ethernet/mediatek/mtk_wed_regs.h
+index 0a50bb98c5ea4..20652c4e739a8 100644
+--- a/drivers/net/ethernet/mediatek/mtk_wed_regs.h
++++ b/drivers/net/ethernet/mediatek/mtk_wed_regs.h
+@@ -64,8 +64,8 @@ struct mtk_wdma_desc {
+ #define MTK_WED_EXT_INT_STATUS_TKID_TITO_INVALID	BIT(4)
+ #define MTK_WED_EXT_INT_STATUS_TX_FBUF_LO_TH		BIT(8)
+ #define MTK_WED_EXT_INT_STATUS_TX_FBUF_HI_TH		BIT(9)
+-#define MTK_WED_EXT_INT_STATUS_RX_FBUF_LO_TH		BIT(12)
+-#define MTK_WED_EXT_INT_STATUS_RX_FBUF_HI_TH		BIT(13)
++#define MTK_WED_EXT_INT_STATUS_RX_FBUF_LO_TH		BIT(10) /* wed v2 */
++#define MTK_WED_EXT_INT_STATUS_RX_FBUF_HI_TH		BIT(11) /* wed v2 */
+ #define MTK_WED_EXT_INT_STATUS_RX_DRV_R_RESP_ERR	BIT(16)
+ #define MTK_WED_EXT_INT_STATUS_RX_DRV_W_RESP_ERR	BIT(17)
+ #define MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT		BIT(18)
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c
+index e2aced7ab4547..95f63fcf4ba1f 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c
+@@ -496,7 +496,7 @@ mlxsw_sp_acl_bf_init(struct mlxsw_sp *mlxsw_sp, unsigned int num_erp_banks)
+ 	 * is 2^ACL_MAX_BF_LOG
+ 	 */
+ 	bf_bank_size = 1 << MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_BF_LOG);
+-	bf = kzalloc(struct_size(bf, refcnt, bf_bank_size * num_erp_banks),
++	bf = kzalloc(struct_size(bf, refcnt, size_mul(bf_bank_size, num_erp_banks)),
+ 		     GFP_KERNEL);
+ 	if (!bf)
+ 		return ERR_PTR(-ENOMEM);
+diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
+index 361b90007148b..0c76c162b8a9f 100644
+--- a/drivers/net/ethernet/realtek/r8169_main.c
++++ b/drivers/net/ethernet/realtek/r8169_main.c
+@@ -2582,9 +2582,13 @@ static void rtl_set_rx_mode(struct net_device *dev)
+ 
+ 	if (dev->flags & IFF_PROMISC) {
+ 		rx_mode |= AcceptAllPhys;
++	} else if (!(dev->flags & IFF_MULTICAST)) {
++		rx_mode &= ~AcceptMulticast;
+ 	} else if (netdev_mc_count(dev) > MC_FILTER_LIMIT ||
+ 		   dev->flags & IFF_ALLMULTI ||
+-		   tp->mac_version == RTL_GIGA_MAC_VER_35) {
++		   tp->mac_version == RTL_GIGA_MAC_VER_35 ||
++		   tp->mac_version == RTL_GIGA_MAC_VER_46 ||
++		   tp->mac_version == RTL_GIGA_MAC_VER_48) {
+ 		/* accept all multicasts */
+ 	} else if (netdev_mc_empty(dev)) {
+ 		rx_mode &= ~AcceptMulticast;
+@@ -4596,7 +4600,11 @@ static void r8169_phylink_handler(struct net_device *ndev)
+ 	if (netif_carrier_ok(ndev)) {
+ 		rtl_link_chg_patch(tp);
+ 		pm_request_resume(d);
++		netif_wake_queue(tp->dev);
+ 	} else {
++		/* In few cases rx is broken after link-down otherwise */
++		if (rtl_is_8125(tp))
++			rtl_reset_work(tp);
+ 		pm_runtime_idle(d);
+ 	}
+ 
+diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
+index 1913385df6856..880a75bf2eb1f 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
++++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
+@@ -222,7 +222,7 @@
+ 	((val) << XGMAC_PPS_MINIDX(x))
+ #define XGMAC_PPSCMD_START		0x2
+ #define XGMAC_PPSCMD_STOP		0x5
+-#define XGMAC_PPSEN0			BIT(4)
++#define XGMAC_PPSENx(x)			BIT(4 + (x) * 8)
+ #define XGMAC_PPSx_TARGET_TIME_SEC(x)	(0x00000d80 + (x) * 0x10)
+ #define XGMAC_PPSx_TARGET_TIME_NSEC(x)	(0x00000d84 + (x) * 0x10)
+ #define XGMAC_TRGTBUSY0			BIT(31)
+diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
+index a0c2ef8bb0ac8..35f8c5933d3ad 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
+@@ -1138,7 +1138,19 @@ static int dwxgmac2_flex_pps_config(void __iomem *ioaddr, int index,
+ 
+ 	val |= XGMAC_PPSCMDx(index, XGMAC_PPSCMD_START);
+ 	val |= XGMAC_TRGTMODSELx(index, XGMAC_PPSCMD_START);
+-	val |= XGMAC_PPSEN0;
++
++	/* XGMAC Core has 4 PPS outputs at most.
++	 *
++	 * Prior XGMAC Core 3.20, Fixed mode or Flexible mode are selectable for
++	 * PPS0 only via PPSEN0. PPS{1,2,3} are in Flexible mode by default,
++	 * and can not be switched to Fixed mode, since PPSEN{1,2,3} are
++	 * read-only reserved to 0.
++	 * But we always set PPSEN{1,2,3} do not make things worse ;-)
++	 *
++	 * From XGMAC Core 3.20 and later, PPSEN{0,1,2,3} are writable and must
++	 * be set, or the PPS outputs stay in Fixed PPS mode by default.
++	 */
++	val |= XGMAC_PPSENx(index);
+ 
+ 	writel(cfg->start.tv_sec, ioaddr + XGMAC_PPSx_TARGET_TIME_SEC(index));
+ 
+diff --git a/drivers/net/ethernet/toshiba/spider_net.c b/drivers/net/ethernet/toshiba/spider_net.c
+index 50d7eacfec582..87e67121477cb 100644
+--- a/drivers/net/ethernet/toshiba/spider_net.c
++++ b/drivers/net/ethernet/toshiba/spider_net.c
+@@ -2332,7 +2332,7 @@ spider_net_alloc_card(void)
+ 	struct spider_net_card *card;
+ 
+ 	netdev = alloc_etherdev(struct_size(card, darray,
+-					    tx_descriptors + rx_descriptors));
++					    size_add(tx_descriptors, rx_descriptors)));
+ 	if (!netdev)
+ 		return NULL;
+ 
+diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
+index 477b4d4f860bd..bace989591f75 100644
+--- a/drivers/net/gtp.c
++++ b/drivers/net/gtp.c
+@@ -629,7 +629,7 @@ static void __gtp_encap_destroy(struct sock *sk)
+ 			gtp->sk0 = NULL;
+ 		else
+ 			gtp->sk1u = NULL;
+-		udp_sk(sk)->encap_type = 0;
++		WRITE_ONCE(udp_sk(sk)->encap_type, 0);
+ 		rcu_assign_sk_user_data(sk, NULL);
+ 		release_sock(sk);
+ 		sock_put(sk);
+@@ -681,7 +681,7 @@ static int gtp_encap_recv(struct sock *sk, struct sk_buff *skb)
+ 
+ 	netdev_dbg(gtp->dev, "encap_recv sk=%p\n", sk);
+ 
+-	switch (udp_sk(sk)->encap_type) {
++	switch (READ_ONCE(udp_sk(sk)->encap_type)) {
+ 	case UDP_ENCAP_GTP0:
+ 		netdev_dbg(gtp->dev, "received GTP0 packet\n");
+ 		ret = gtp0_udp_encap_recv(gtp, skb);
+diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c
+index c0c49f1813673..21e9cac731218 100644
+--- a/drivers/net/ipvlan/ipvlan_core.c
++++ b/drivers/net/ipvlan/ipvlan_core.c
+@@ -441,12 +441,12 @@ static int ipvlan_process_v4_outbound(struct sk_buff *skb)
+ 
+ 	err = ip_local_out(net, skb->sk, skb);
+ 	if (unlikely(net_xmit_eval(err)))
+-		dev->stats.tx_errors++;
++		DEV_STATS_INC(dev, tx_errors);
+ 	else
+ 		ret = NET_XMIT_SUCCESS;
+ 	goto out;
+ err:
+-	dev->stats.tx_errors++;
++	DEV_STATS_INC(dev, tx_errors);
+ 	kfree_skb(skb);
+ out:
+ 	return ret;
+@@ -482,12 +482,12 @@ static int ipvlan_process_v6_outbound(struct sk_buff *skb)
+ 
+ 	err = ip6_local_out(net, skb->sk, skb);
+ 	if (unlikely(net_xmit_eval(err)))
+-		dev->stats.tx_errors++;
++		DEV_STATS_INC(dev, tx_errors);
+ 	else
+ 		ret = NET_XMIT_SUCCESS;
+ 	goto out;
+ err:
+-	dev->stats.tx_errors++;
++	DEV_STATS_INC(dev, tx_errors);
+ 	kfree_skb(skb);
+ out:
+ 	return ret;
+diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
+index 1b55928e89b8a..57c79f5f29916 100644
+--- a/drivers/net/ipvlan/ipvlan_main.c
++++ b/drivers/net/ipvlan/ipvlan_main.c
+@@ -324,6 +324,7 @@ static void ipvlan_get_stats64(struct net_device *dev,
+ 		s->rx_dropped = rx_errs;
+ 		s->tx_dropped = tx_drps;
+ 	}
++	s->tx_errors = DEV_STATS_READ(dev, tx_errors);
+ }
+ 
+ static int ipvlan_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
+diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
+index 1c60548c1ddde..27deb14d20225 100644
+--- a/drivers/net/macsec.c
++++ b/drivers/net/macsec.c
+@@ -3668,9 +3668,9 @@ static void macsec_get_stats64(struct net_device *dev,
+ 
+ 	dev_fetch_sw_netstats(s, dev->tstats);
+ 
+-	s->rx_dropped = atomic_long_read(&dev->stats.__rx_dropped);
+-	s->tx_dropped = atomic_long_read(&dev->stats.__tx_dropped);
+-	s->rx_errors = atomic_long_read(&dev->stats.__rx_errors);
++	s->rx_dropped = DEV_STATS_READ(dev, rx_dropped);
++	s->tx_dropped = DEV_STATS_READ(dev, tx_dropped);
++	s->rx_errors = DEV_STATS_READ(dev, rx_errors);
+ }
+ 
+ static int macsec_get_iflink(const struct net_device *dev)
+diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
+index 8e9f4cfe941f6..178d6d0615276 100644
+--- a/drivers/net/virtio_net.c
++++ b/drivers/net/virtio_net.c
+@@ -80,24 +80,24 @@ struct virtnet_stat_desc {
+ 
+ struct virtnet_sq_stats {
+ 	struct u64_stats_sync syncp;
+-	u64 packets;
+-	u64 bytes;
+-	u64 xdp_tx;
+-	u64 xdp_tx_drops;
+-	u64 kicks;
+-	u64 tx_timeouts;
++	u64_stats_t packets;
++	u64_stats_t bytes;
++	u64_stats_t xdp_tx;
++	u64_stats_t xdp_tx_drops;
++	u64_stats_t kicks;
++	u64_stats_t tx_timeouts;
+ };
+ 
+ struct virtnet_rq_stats {
+ 	struct u64_stats_sync syncp;
+-	u64 packets;
+-	u64 bytes;
+-	u64 drops;
+-	u64 xdp_packets;
+-	u64 xdp_tx;
+-	u64 xdp_redirects;
+-	u64 xdp_drops;
+-	u64 kicks;
++	u64_stats_t packets;
++	u64_stats_t bytes;
++	u64_stats_t drops;
++	u64_stats_t xdp_packets;
++	u64_stats_t xdp_tx;
++	u64_stats_t xdp_redirects;
++	u64_stats_t xdp_drops;
++	u64_stats_t kicks;
+ };
+ 
+ #define VIRTNET_SQ_STAT(m)	offsetof(struct virtnet_sq_stats, m)
+@@ -593,8 +593,8 @@ static void free_old_xmit_skbs(struct send_queue *sq, bool in_napi)
+ 		return;
+ 
+ 	u64_stats_update_begin(&sq->stats.syncp);
+-	sq->stats.bytes += bytes;
+-	sq->stats.packets += packets;
++	u64_stats_add(&sq->stats.bytes, bytes);
++	u64_stats_add(&sq->stats.packets, packets);
+ 	u64_stats_update_end(&sq->stats.syncp);
+ }
+ 
+@@ -793,11 +793,11 @@ static int virtnet_xdp_xmit(struct net_device *dev,
+ 	}
+ out:
+ 	u64_stats_update_begin(&sq->stats.syncp);
+-	sq->stats.bytes += bytes;
+-	sq->stats.packets += packets;
+-	sq->stats.xdp_tx += n;
+-	sq->stats.xdp_tx_drops += n - nxmit;
+-	sq->stats.kicks += kicks;
++	u64_stats_add(&sq->stats.bytes, bytes);
++	u64_stats_add(&sq->stats.packets, packets);
++	u64_stats_add(&sq->stats.xdp_tx, n);
++	u64_stats_add(&sq->stats.xdp_tx_drops, n - nxmit);
++	u64_stats_add(&sq->stats.kicks, kicks);
+ 	u64_stats_update_end(&sq->stats.syncp);
+ 
+ 	virtnet_xdp_put_sq(vi, sq);
+@@ -829,14 +829,14 @@ static int virtnet_xdp_handler(struct bpf_prog *xdp_prog, struct xdp_buff *xdp,
+ 	u32 act;
+ 
+ 	act = bpf_prog_run_xdp(xdp_prog, xdp);
+-	stats->xdp_packets++;
++	u64_stats_inc(&stats->xdp_packets);
+ 
+ 	switch (act) {
+ 	case XDP_PASS:
+ 		return act;
+ 
+ 	case XDP_TX:
+-		stats->xdp_tx++;
++		u64_stats_inc(&stats->xdp_tx);
+ 		xdpf = xdp_convert_buff_to_frame(xdp);
+ 		if (unlikely(!xdpf)) {
+ 			netdev_dbg(dev, "convert buff to frame failed for xdp\n");
+@@ -854,7 +854,7 @@ static int virtnet_xdp_handler(struct bpf_prog *xdp_prog, struct xdp_buff *xdp,
+ 		return act;
+ 
+ 	case XDP_REDIRECT:
+-		stats->xdp_redirects++;
++		u64_stats_inc(&stats->xdp_redirects);
+ 		err = xdp_do_redirect(dev, xdp, xdp_prog);
+ 		if (err)
+ 			return XDP_DROP;
+@@ -1050,9 +1050,9 @@ static struct sk_buff *receive_small_xdp(struct net_device *dev,
+ 	return skb;
+ 
+ err_xdp:
+-	stats->xdp_drops++;
++	u64_stats_inc(&stats->xdp_drops);
+ err:
+-	stats->drops++;
++	u64_stats_inc(&stats->drops);
+ 	put_page(page);
+ xdp_xmit:
+ 	return NULL;
+@@ -1071,7 +1071,7 @@ static struct sk_buff *receive_small(struct net_device *dev,
+ 	struct sk_buff *skb;
+ 
+ 	len -= vi->hdr_len;
+-	stats->bytes += len;
++	u64_stats_add(&stats->bytes, len);
+ 
+ 	if (unlikely(len > GOOD_PACKET_LEN)) {
+ 		pr_debug("%s: rx error: len %u exceeds max size %d\n",
+@@ -1100,7 +1100,7 @@ static struct sk_buff *receive_small(struct net_device *dev,
+ 		return skb;
+ 
+ err:
+-	stats->drops++;
++	u64_stats_inc(&stats->drops);
+ 	put_page(page);
+ 	return NULL;
+ }
+@@ -1116,14 +1116,14 @@ static struct sk_buff *receive_big(struct net_device *dev,
+ 	struct sk_buff *skb =
+ 		page_to_skb(vi, rq, page, 0, len, PAGE_SIZE, 0);
+ 
+-	stats->bytes += len - vi->hdr_len;
++	u64_stats_add(&stats->bytes, len - vi->hdr_len);
+ 	if (unlikely(!skb))
+ 		goto err;
+ 
+ 	return skb;
+ 
+ err:
+-	stats->drops++;
++	u64_stats_inc(&stats->drops);
+ 	give_pages(rq, page);
+ 	return NULL;
+ }
+@@ -1144,7 +1144,7 @@ static void mergeable_buf_free(struct receive_queue *rq, int num_buf,
+ 			dev->stats.rx_length_errors++;
+ 			break;
+ 		}
+-		stats->bytes += len;
++		u64_stats_add(&stats->bytes, len);
+ 		page = virt_to_head_page(buf);
+ 		put_page(page);
+ 	}
+@@ -1254,7 +1254,7 @@ static int virtnet_build_xdp_buff_mrg(struct net_device *dev,
+ 			goto err;
+ 		}
+ 
+-		stats->bytes += len;
++		u64_stats_add(&stats->bytes, len);
+ 		page = virt_to_head_page(buf);
+ 		offset = buf - page_address(page);
+ 
+@@ -1418,8 +1418,8 @@ err_xdp:
+ 	put_page(page);
+ 	mergeable_buf_free(rq, num_buf, dev, stats);
+ 
+-	stats->xdp_drops++;
+-	stats->drops++;
++	u64_stats_inc(&stats->xdp_drops);
++	u64_stats_inc(&stats->drops);
+ 	return NULL;
+ }
+ 
+@@ -1443,7 +1443,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
+ 	unsigned int room = SKB_DATA_ALIGN(headroom + tailroom);
+ 
+ 	head_skb = NULL;
+-	stats->bytes += len - vi->hdr_len;
++	u64_stats_add(&stats->bytes, len - vi->hdr_len);
+ 
+ 	if (unlikely(len > truesize - room)) {
+ 		pr_debug("%s: rx error: len %u exceeds truesize %lu\n",
+@@ -1484,7 +1484,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
+ 			goto err_buf;
+ 		}
+ 
+-		stats->bytes += len;
++		u64_stats_add(&stats->bytes, len);
+ 		page = virt_to_head_page(buf);
+ 
+ 		truesize = mergeable_ctx_to_truesize(ctx);
+@@ -1536,7 +1536,7 @@ err_skb:
+ 	mergeable_buf_free(rq, num_buf, dev, stats);
+ 
+ err_buf:
+-	stats->drops++;
++	u64_stats_inc(&stats->drops);
+ 	dev_kfree_skb(head_skb);
+ 	return NULL;
+ }
+@@ -1797,7 +1797,7 @@ static bool try_fill_recv(struct virtnet_info *vi, struct receive_queue *rq,
+ 		unsigned long flags;
+ 
+ 		flags = u64_stats_update_begin_irqsave(&rq->stats.syncp);
+-		rq->stats.kicks++;
++		u64_stats_inc(&rq->stats.kicks);
+ 		u64_stats_update_end_irqrestore(&rq->stats.syncp, flags);
+ 	}
+ 
+@@ -1877,22 +1877,23 @@ static int virtnet_receive(struct receive_queue *rq, int budget,
+ 	struct virtnet_info *vi = rq->vq->vdev->priv;
+ 	struct virtnet_rq_stats stats = {};
+ 	unsigned int len;
++	int packets = 0;
+ 	void *buf;
+ 	int i;
+ 
+ 	if (!vi->big_packets || vi->mergeable_rx_bufs) {
+ 		void *ctx;
+ 
+-		while (stats.packets < budget &&
++		while (packets < budget &&
+ 		       (buf = virtqueue_get_buf_ctx(rq->vq, &len, &ctx))) {
+ 			receive_buf(vi, rq, buf, len, ctx, xdp_xmit, &stats);
+-			stats.packets++;
++			packets++;
+ 		}
+ 	} else {
+-		while (stats.packets < budget &&
++		while (packets < budget &&
+ 		       (buf = virtqueue_get_buf(rq->vq, &len)) != NULL) {
+ 			receive_buf(vi, rq, buf, len, NULL, xdp_xmit, &stats);
+-			stats.packets++;
++			packets++;
+ 		}
+ 	}
+ 
+@@ -1905,17 +1906,19 @@ static int virtnet_receive(struct receive_queue *rq, int budget,
+ 		}
+ 	}
+ 
++	u64_stats_set(&stats.packets, packets);
+ 	u64_stats_update_begin(&rq->stats.syncp);
+ 	for (i = 0; i < VIRTNET_RQ_STATS_LEN; i++) {
+ 		size_t offset = virtnet_rq_stats_desc[i].offset;
+-		u64 *item;
++		u64_stats_t *item, *src;
+ 
+-		item = (u64 *)((u8 *)&rq->stats + offset);
+-		*item += *(u64 *)((u8 *)&stats + offset);
++		item = (u64_stats_t *)((u8 *)&rq->stats + offset);
++		src = (u64_stats_t *)((u8 *)&stats + offset);
++		u64_stats_add(item, u64_stats_read(src));
+ 	}
+ 	u64_stats_update_end(&rq->stats.syncp);
+ 
+-	return stats.packets;
++	return packets;
+ }
+ 
+ static void virtnet_poll_cleantx(struct receive_queue *rq)
+@@ -1970,7 +1973,7 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
+ 		sq = virtnet_xdp_get_sq(vi);
+ 		if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) {
+ 			u64_stats_update_begin(&sq->stats.syncp);
+-			sq->stats.kicks++;
++			u64_stats_inc(&sq->stats.kicks);
+ 			u64_stats_update_end(&sq->stats.syncp);
+ 		}
+ 		virtnet_xdp_put_sq(vi, sq);
+@@ -2182,7 +2185,7 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
+ 	if (kick || netif_xmit_stopped(txq)) {
+ 		if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) {
+ 			u64_stats_update_begin(&sq->stats.syncp);
+-			sq->stats.kicks++;
++			u64_stats_inc(&sq->stats.kicks);
+ 			u64_stats_update_end(&sq->stats.syncp);
+ 		}
+ 	}
+@@ -2365,16 +2368,16 @@ static void virtnet_stats(struct net_device *dev,
+ 
+ 		do {
+ 			start = u64_stats_fetch_begin(&sq->stats.syncp);
+-			tpackets = sq->stats.packets;
+-			tbytes   = sq->stats.bytes;
+-			terrors  = sq->stats.tx_timeouts;
++			tpackets = u64_stats_read(&sq->stats.packets);
++			tbytes   = u64_stats_read(&sq->stats.bytes);
++			terrors  = u64_stats_read(&sq->stats.tx_timeouts);
+ 		} while (u64_stats_fetch_retry(&sq->stats.syncp, start));
+ 
+ 		do {
+ 			start = u64_stats_fetch_begin(&rq->stats.syncp);
+-			rpackets = rq->stats.packets;
+-			rbytes   = rq->stats.bytes;
+-			rdrops   = rq->stats.drops;
++			rpackets = u64_stats_read(&rq->stats.packets);
++			rbytes   = u64_stats_read(&rq->stats.bytes);
++			rdrops   = u64_stats_read(&rq->stats.drops);
+ 		} while (u64_stats_fetch_retry(&rq->stats.syncp, start));
+ 
+ 		tot->rx_packets += rpackets;
+@@ -2976,17 +2979,19 @@ static void virtnet_get_ethtool_stats(struct net_device *dev,
+ 	struct virtnet_info *vi = netdev_priv(dev);
+ 	unsigned int idx = 0, start, i, j;
+ 	const u8 *stats_base;
++	const u64_stats_t *p;
+ 	size_t offset;
+ 
+ 	for (i = 0; i < vi->curr_queue_pairs; i++) {
+ 		struct receive_queue *rq = &vi->rq[i];
+ 
+-		stats_base = (u8 *)&rq->stats;
++		stats_base = (const u8 *)&rq->stats;
+ 		do {
+ 			start = u64_stats_fetch_begin(&rq->stats.syncp);
+ 			for (j = 0; j < VIRTNET_RQ_STATS_LEN; j++) {
+ 				offset = virtnet_rq_stats_desc[j].offset;
+-				data[idx + j] = *(u64 *)(stats_base + offset);
++				p = (const u64_stats_t *)(stats_base + offset);
++				data[idx + j] = u64_stats_read(p);
+ 			}
+ 		} while (u64_stats_fetch_retry(&rq->stats.syncp, start));
+ 		idx += VIRTNET_RQ_STATS_LEN;
+@@ -2995,12 +3000,13 @@ static void virtnet_get_ethtool_stats(struct net_device *dev,
+ 	for (i = 0; i < vi->curr_queue_pairs; i++) {
+ 		struct send_queue *sq = &vi->sq[i];
+ 
+-		stats_base = (u8 *)&sq->stats;
++		stats_base = (const u8 *)&sq->stats;
+ 		do {
+ 			start = u64_stats_fetch_begin(&sq->stats.syncp);
+ 			for (j = 0; j < VIRTNET_SQ_STATS_LEN; j++) {
+ 				offset = virtnet_sq_stats_desc[j].offset;
+-				data[idx + j] = *(u64 *)(stats_base + offset);
++				p = (const u64_stats_t *)(stats_base + offset);
++				data[idx + j] = u64_stats_read(p);
+ 			}
+ 		} while (u64_stats_fetch_retry(&sq->stats.syncp, start));
+ 		idx += VIRTNET_SQ_STATS_LEN;
+@@ -3550,7 +3556,7 @@ static void virtnet_tx_timeout(struct net_device *dev, unsigned int txqueue)
+ 	struct netdev_queue *txq = netdev_get_tx_queue(dev, txqueue);
+ 
+ 	u64_stats_update_begin(&sq->stats.syncp);
+-	sq->stats.tx_timeouts++;
++	u64_stats_inc(&sq->stats.tx_timeouts);
+ 	u64_stats_update_end(&sq->stats.syncp);
+ 
+ 	netdev_err(dev, "TX timeout on queue: %u, sq: %s, vq: 0x%x, name: %s, %u usecs ago\n",
+diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c
+index 8c77ade49437f..1cc016c5ca44b 100644
+--- a/drivers/net/wireless/ath/ath11k/mac.c
++++ b/drivers/net/wireless/ath/ath11k/mac.c
+@@ -9042,6 +9042,14 @@ static int ath11k_mac_op_get_txpower(struct ieee80211_hw *hw,
+ 	if (ar->state != ATH11K_STATE_ON)
+ 		goto err_fallback;
+ 
++	/* Firmware doesn't provide Tx power during CAC hence no need to fetch
++	 * the stats.
++	 */
++	if (test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags)) {
++		mutex_unlock(&ar->conf_mutex);
++		return -EAGAIN;
++	}
++
+ 	req_param.pdev_id = ar->pdev->pdev_id;
+ 	req_param.stats_id = WMI_REQUEST_PDEV_STAT;
+ 
+diff --git a/drivers/net/wireless/ath/ath11k/pci.c b/drivers/net/wireless/ath/ath11k/pci.c
+index a181563ec0851..c0f00343cee93 100644
+--- a/drivers/net/wireless/ath/ath11k/pci.c
++++ b/drivers/net/wireless/ath/ath11k/pci.c
+@@ -853,10 +853,16 @@ unsupported_wcn6855_soc:
+ 	if (ret)
+ 		goto err_pci_disable_msi;
+ 
++	ret = ath11k_pci_set_irq_affinity_hint(ab_pci, cpumask_of(0));
++	if (ret) {
++		ath11k_err(ab, "failed to set irq affinity %d\n", ret);
++		goto err_pci_disable_msi;
++	}
++
+ 	ret = ath11k_mhi_register(ab_pci);
+ 	if (ret) {
+ 		ath11k_err(ab, "failed to register mhi: %d\n", ret);
+-		goto err_pci_disable_msi;
++		goto err_irq_affinity_cleanup;
+ 	}
+ 
+ 	ret = ath11k_hal_srng_init(ab);
+@@ -877,12 +883,6 @@ unsupported_wcn6855_soc:
+ 		goto err_ce_free;
+ 	}
+ 
+-	ret = ath11k_pci_set_irq_affinity_hint(ab_pci, cpumask_of(0));
+-	if (ret) {
+-		ath11k_err(ab, "failed to set irq affinity %d\n", ret);
+-		goto err_free_irq;
+-	}
+-
+ 	/* kernel may allocate a dummy vector before request_irq and
+ 	 * then allocate a real vector when request_irq is called.
+ 	 * So get msi_data here again to avoid spurious interrupt
+@@ -891,19 +891,16 @@ unsupported_wcn6855_soc:
+ 	ret = ath11k_pci_config_msi_data(ab_pci);
+ 	if (ret) {
+ 		ath11k_err(ab, "failed to config msi_data: %d\n", ret);
+-		goto err_irq_affinity_cleanup;
++		goto err_free_irq;
+ 	}
+ 
+ 	ret = ath11k_core_init(ab);
+ 	if (ret) {
+ 		ath11k_err(ab, "failed to init core: %d\n", ret);
+-		goto err_irq_affinity_cleanup;
++		goto err_free_irq;
+ 	}
+ 	return 0;
+ 
+-err_irq_affinity_cleanup:
+-	ath11k_pci_set_irq_affinity_hint(ab_pci, NULL);
+-
+ err_free_irq:
+ 	ath11k_pcic_free_irq(ab);
+ 
+@@ -916,6 +913,9 @@ err_hal_srng_deinit:
+ err_mhi_unregister:
+ 	ath11k_mhi_unregister(ab_pci);
+ 
++err_irq_affinity_cleanup:
++	ath11k_pci_set_irq_affinity_hint(ab_pci, NULL);
++
+ err_pci_disable_msi:
+ 	ath11k_pci_free_msi(ab_pci);
+ 
+diff --git a/drivers/net/wireless/ath/ath12k/dp_rx.c b/drivers/net/wireless/ath/ath12k/dp_rx.c
+index ffd9a2018610f..fcb91b8ef00e3 100644
+--- a/drivers/net/wireless/ath/ath12k/dp_rx.c
++++ b/drivers/net/wireless/ath/ath12k/dp_rx.c
+@@ -3228,7 +3228,7 @@ static int ath12k_dp_rx_frag_h_mpdu(struct ath12k *ar,
+ 		goto out_unlock;
+ 	}
+ 
+-	if (frag_no > __fls(rx_tid->rx_frag_bitmap))
++	if ((!rx_tid->rx_frag_bitmap || frag_no > __fls(rx_tid->rx_frag_bitmap)))
+ 		__skb_queue_tail(&rx_tid->rx_frags, msdu);
+ 	else
+ 		ath12k_dp_rx_h_sort_frags(ab, &rx_tid->rx_frags, msdu);
+diff --git a/drivers/net/wireless/ath/ath12k/dp_tx.c b/drivers/net/wireless/ath/ath12k/dp_tx.c
+index d3c7c76d6b75e..5d1be6451d587 100644
+--- a/drivers/net/wireless/ath/ath12k/dp_tx.c
++++ b/drivers/net/wireless/ath/ath12k/dp_tx.c
+@@ -330,8 +330,11 @@ tcl_ring_sel:
+ 
+ fail_unmap_dma:
+ 	dma_unmap_single(ab->dev, ti.paddr, ti.data_len, DMA_TO_DEVICE);
+-	dma_unmap_single(ab->dev, skb_cb->paddr_ext_desc,
+-			 sizeof(struct hal_tx_msdu_ext_desc), DMA_TO_DEVICE);
++
++	if (skb_cb->paddr_ext_desc)
++		dma_unmap_single(ab->dev, skb_cb->paddr_ext_desc,
++				 sizeof(struct hal_tx_msdu_ext_desc),
++				 DMA_TO_DEVICE);
+ 
+ fail_remove_tx_buf:
+ 	ath12k_dp_tx_release_txbuf(dp, tx_desc, pool_id);
+diff --git a/drivers/net/wireless/ath/dfs_pattern_detector.c b/drivers/net/wireless/ath/dfs_pattern_detector.c
+index 27f4d74a41c80..2788a1b06c17c 100644
+--- a/drivers/net/wireless/ath/dfs_pattern_detector.c
++++ b/drivers/net/wireless/ath/dfs_pattern_detector.c
+@@ -206,7 +206,7 @@ channel_detector_create(struct dfs_pattern_detector *dpd, u16 freq)
+ 
+ 	INIT_LIST_HEAD(&cd->head);
+ 	cd->freq = freq;
+-	cd->detectors = kmalloc_array(dpd->num_radar_types,
++	cd->detectors = kcalloc(dpd->num_radar_types,
+ 				      sizeof(*cd->detectors), GFP_ATOMIC);
+ 	if (cd->detectors == NULL)
+ 		goto fail;
+diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/bz.c b/drivers/net/wireless/intel/iwlwifi/cfg/bz.c
+index b9893b22e41da..42e765fe3cfe1 100644
+--- a/drivers/net/wireless/intel/iwlwifi/cfg/bz.c
++++ b/drivers/net/wireless/intel/iwlwifi/cfg/bz.c
+@@ -134,12 +134,10 @@ static const struct iwl_base_params iwl_bz_base_params = {
+ 	.ht_params = &iwl_gl_a_ht_params
+ 
+ /*
+- * If the device doesn't support HE, no need to have that many buffers.
+- * These sizes were picked according to 8 MSDUs inside 256 A-MSDUs in an
++ * This size was picked according to 8 MSDUs inside 512 A-MSDUs in an
+  * A-MPDU, with additional overhead to account for processing time.
+  */
+-#define IWL_NUM_RBDS_NON_HE		512
+-#define IWL_NUM_RBDS_BZ_HE		4096
++#define IWL_NUM_RBDS_BZ_EHT		(512 * 16)
+ 
+ const struct iwl_cfg_trans_params iwl_bz_trans_cfg = {
+ 	.device_family = IWL_DEVICE_FAMILY_BZ,
+@@ -160,16 +158,16 @@ const struct iwl_cfg iwl_cfg_bz = {
+ 	.fw_name_mac = "bz",
+ 	.uhb_supported = true,
+ 	IWL_DEVICE_BZ,
+-	.features = IWL_TX_CSUM_NETIF_FLAGS_BZ | NETIF_F_RXCSUM,
+-	.num_rbds = IWL_NUM_RBDS_BZ_HE,
++	.features = IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM,
++	.num_rbds = IWL_NUM_RBDS_BZ_EHT,
+ };
+ 
+ const struct iwl_cfg iwl_cfg_gl = {
+ 	.fw_name_mac = "gl",
+ 	.uhb_supported = true,
+ 	IWL_DEVICE_BZ,
+-	.features = IWL_TX_CSUM_NETIF_FLAGS_BZ | NETIF_F_RXCSUM,
+-	.num_rbds = IWL_NUM_RBDS_BZ_HE,
++	.features = IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM,
++	.num_rbds = IWL_NUM_RBDS_BZ_EHT,
+ };
+ 
+ 
+diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/sc.c b/drivers/net/wireless/intel/iwlwifi/cfg/sc.c
+index ad283fd22e2a2..604e9cef6baac 100644
+--- a/drivers/net/wireless/intel/iwlwifi/cfg/sc.c
++++ b/drivers/net/wireless/intel/iwlwifi/cfg/sc.c
+@@ -127,12 +127,10 @@ static const struct iwl_base_params iwl_sc_base_params = {
+ 	.ht_params = &iwl_22000_ht_params
+ 
+ /*
+- * If the device doesn't support HE, no need to have that many buffers.
+- * These sizes were picked according to 8 MSDUs inside 256 A-MSDUs in an
++ * This size was picked according to 8 MSDUs inside 512 A-MSDUs in an
+  * A-MPDU, with additional overhead to account for processing time.
+  */
+-#define IWL_NUM_RBDS_NON_HE		512
+-#define IWL_NUM_RBDS_SC_HE		4096
++#define IWL_NUM_RBDS_SC_EHT		(512 * 16)
+ 
+ const struct iwl_cfg_trans_params iwl_sc_trans_cfg = {
+ 	.device_family = IWL_DEVICE_FAMILY_SC,
+@@ -153,8 +151,8 @@ const struct iwl_cfg iwl_cfg_sc = {
+ 	.fw_name_mac = "sc",
+ 	.uhb_supported = true,
+ 	IWL_DEVICE_SC,
+-	.features = IWL_TX_CSUM_NETIF_FLAGS_BZ | NETIF_F_RXCSUM,
+-	.num_rbds = IWL_NUM_RBDS_SC_HE,
++	.features = IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM,
++	.num_rbds = IWL_NUM_RBDS_SC_EHT,
+ };
+ 
+ MODULE_FIRMWARE(IWL_SC_A_FM_B_FW_MODULE_FIRMWARE(IWL_SC_UCODE_API_MAX));
+diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/tx.c b/drivers/net/wireless/intel/iwlwifi/dvm/tx.c
+index 60a7b61d59aa3..ca1daec641c4f 100644
+--- a/drivers/net/wireless/intel/iwlwifi/dvm/tx.c
++++ b/drivers/net/wireless/intel/iwlwifi/dvm/tx.c
+@@ -3,6 +3,7 @@
+  *
+  * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
+  * Copyright (C) 2019 Intel Corporation
++ * Copyright (C) 2023 Intel Corporation
+  *****************************************************************************/
+ 
+ #include <linux/kernel.h>
+@@ -1169,7 +1170,7 @@ void iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb)
+ 			iwlagn_check_ratid_empty(priv, sta_id, tid);
+ 		}
+ 
+-		iwl_trans_reclaim(priv->trans, txq_id, ssn, &skbs);
++		iwl_trans_reclaim(priv->trans, txq_id, ssn, &skbs, false);
+ 
+ 		freed = 0;
+ 
+@@ -1315,7 +1316,7 @@ void iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
+ 	 * block-ack window (we assume that they've been successfully
+ 	 * transmitted ... if not, it's too late anyway). */
+ 	iwl_trans_reclaim(priv->trans, scd_flow, ba_resp_scd_ssn,
+-			  &reclaimed_skbs);
++			  &reclaimed_skbs, false);
+ 
+ 	IWL_DEBUG_TX_REPLY(priv, "REPLY_COMPRESSED_BA [%d] Received from %pM, "
+ 			   "sta_id = %d\n",
+diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h b/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h
+index ba538d70985f4..39bee9c00e071 100644
+--- a/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h
++++ b/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h
+@@ -13,6 +13,7 @@
+ #define IWL_FW_INI_DOMAIN_ALWAYS_ON		0
+ #define IWL_FW_INI_REGION_ID_MASK		GENMASK(15, 0)
+ #define IWL_FW_INI_REGION_DUMP_POLICY_MASK	GENMASK(31, 16)
++#define IWL_FW_INI_PRESET_DISABLE		0xff
+ 
+ /**
+  * struct iwl_fw_ini_hcmd
+diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
+index 241a9e3f2a1a7..f45f645ca6485 100644
+--- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h
++++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
+@@ -86,10 +86,7 @@ enum iwl_nvm_type {
+ #define IWL_DEFAULT_MAX_TX_POWER 22
+ #define IWL_TX_CSUM_NETIF_FLAGS (NETIF_F_IPV6_CSUM | NETIF_F_IP_CSUM |\
+ 				 NETIF_F_TSO | NETIF_F_TSO6)
+-#define IWL_TX_CSUM_NETIF_FLAGS_BZ (NETIF_F_HW_CSUM | NETIF_F_TSO | NETIF_F_TSO6)
+-#define IWL_CSUM_NETIF_FLAGS_MASK (IWL_TX_CSUM_NETIF_FLAGS | \
+-				   IWL_TX_CSUM_NETIF_FLAGS_BZ | \
+-				   NETIF_F_RXCSUM)
++#define IWL_CSUM_NETIF_FLAGS_MASK (IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM)
+ 
+ /* Antenna presence definitions */
+ #define	ANT_NONE	0x0
+diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.h b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.h
+index 128059ca77e60..06fb7d6653905 100644
+--- a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.h
++++ b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.h
+@@ -1,6 +1,6 @@
+ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+ /*
+- * Copyright (C) 2018-2022 Intel Corporation
++ * Copyright (C) 2018-2023 Intel Corporation
+  */
+ #ifndef __iwl_dbg_tlv_h__
+ #define __iwl_dbg_tlv_h__
+@@ -10,7 +10,8 @@
+ #include <fw/file.h>
+ #include <fw/api/dbg-tlv.h>
+ 
+-#define IWL_DBG_TLV_MAX_PRESET 15
++#define IWL_DBG_TLV_MAX_PRESET	15
++#define ENABLE_INI		(IWL_DBG_TLV_MAX_PRESET + 1)
+ 
+ /**
+  * struct iwl_dbg_tlv_node - debug TLV node
+diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
+index 3d87d26845e74..fb5e254757e71 100644
+--- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
++++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
+@@ -1795,6 +1795,22 @@ struct iwl_drv *iwl_drv_start(struct iwl_trans *trans)
+ #endif
+ 
+ 	drv->trans->dbg.domains_bitmap = IWL_TRANS_FW_DBG_DOMAIN(drv->trans);
++	if (iwlwifi_mod_params.enable_ini != ENABLE_INI) {
++		/* We have a non-default value in the module parameter,
++		 * take its value
++		 */
++		drv->trans->dbg.domains_bitmap &= 0xffff;
++		if (iwlwifi_mod_params.enable_ini != IWL_FW_INI_PRESET_DISABLE) {
++			if (iwlwifi_mod_params.enable_ini > ENABLE_INI) {
++				IWL_ERR(trans,
++					"invalid enable_ini module parameter value: max = %d, using 0 instead\n",
++					ENABLE_INI);
++				iwlwifi_mod_params.enable_ini = 0;
++			}
++			drv->trans->dbg.domains_bitmap =
++				BIT(IWL_FW_DBG_DOMAIN_POS + iwlwifi_mod_params.enable_ini);
++		}
++	}
+ 
+ 	ret = iwl_request_firmware(drv, true);
+ 	if (ret) {
+@@ -1843,8 +1859,6 @@ void iwl_drv_stop(struct iwl_drv *drv)
+ 	kfree(drv);
+ }
+ 
+-#define ENABLE_INI	(IWL_DBG_TLV_MAX_PRESET + 1)
+-
+ /* shared module parameters */
+ struct iwl_mod_params iwlwifi_mod_params = {
+ 	.fw_restart = true,
+@@ -1964,38 +1978,7 @@ module_param_named(uapsd_disable, iwlwifi_mod_params.uapsd_disable, uint, 0644);
+ MODULE_PARM_DESC(uapsd_disable,
+ 		 "disable U-APSD functionality bitmap 1: BSS 2: P2P Client (default: 3)");
+ 
+-static int enable_ini_set(const char *arg, const struct kernel_param *kp)
+-{
+-	int ret = 0;
+-	bool res;
+-	__u32 new_enable_ini;
+-
+-	/* in case the argument type is a number */
+-	ret = kstrtou32(arg, 0, &new_enable_ini);
+-	if (!ret) {
+-		if (new_enable_ini > ENABLE_INI) {
+-			pr_err("enable_ini cannot be %d, in range 0-16\n", new_enable_ini);
+-			return -EINVAL;
+-		}
+-		goto out;
+-	}
+-
+-	/* in case the argument type is boolean */
+-	ret = kstrtobool(arg, &res);
+-	if (ret)
+-		return ret;
+-	new_enable_ini = (res ? ENABLE_INI : 0);
+-
+-out:
+-	iwlwifi_mod_params.enable_ini = new_enable_ini;
+-	return 0;
+-}
+-
+-static const struct kernel_param_ops enable_ini_ops = {
+-	.set = enable_ini_set
+-};
+-
+-module_param_cb(enable_ini, &enable_ini_ops, &iwlwifi_mod_params.enable_ini, 0644);
++module_param_named(enable_ini, iwlwifi_mod_params.enable_ini, uint, 0444);
+ MODULE_PARM_DESC(enable_ini,
+ 		 "0:disable, 1-15:FW_DBG_PRESET Values, 16:enabled without preset value defined,"
+ 		 "Debug INI TLV FW debug infrastructure (default: 16)");
+diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
+index 6dd381ff0f9e7..2a63968b0e55b 100644
+--- a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
++++ b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
+@@ -348,8 +348,8 @@
+ #define RFIC_REG_RD			0xAD0470
+ #define WFPM_CTRL_REG			0xA03030
+ #define WFPM_OTP_CFG1_ADDR		0x00a03098
+-#define WFPM_OTP_CFG1_IS_JACKET_BIT	BIT(4)
+-#define WFPM_OTP_CFG1_IS_CDB_BIT	BIT(5)
++#define WFPM_OTP_CFG1_IS_JACKET_BIT	BIT(5)
++#define WFPM_OTP_CFG1_IS_CDB_BIT	BIT(4)
+ #define WFPM_OTP_BZ_BNJ_JACKET_BIT	5
+ #define WFPM_OTP_BZ_BNJ_CDB_BIT		4
+ #define WFPM_OTP_CFG1_IS_JACKET(_val)   (((_val) & 0x00000020) >> WFPM_OTP_BZ_BNJ_JACKET_BIT)
+diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
+index d02943d0ea625..b1a4be0069a7e 100644
+--- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
++++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
+@@ -56,6 +56,10 @@
+  *	6) Eventually, the free function will be called.
+  */
+ 
++/* default preset 0 (start from bit 16)*/
++#define IWL_FW_DBG_DOMAIN_POS	16
++#define IWL_FW_DBG_DOMAIN	BIT(IWL_FW_DBG_DOMAIN_POS)
++
+ #define IWL_TRANS_FW_DBG_DOMAIN(trans)	IWL_FW_INI_DOMAIN_ALWAYS_ON
+ 
+ #define FH_RSCSR_FRAME_SIZE_MSK		0x00003FFF	/* bits 0-13 */
+@@ -584,7 +588,7 @@ struct iwl_trans_ops {
+ 	int (*tx)(struct iwl_trans *trans, struct sk_buff *skb,
+ 		  struct iwl_device_tx_cmd *dev_cmd, int queue);
+ 	void (*reclaim)(struct iwl_trans *trans, int queue, int ssn,
+-			struct sk_buff_head *skbs);
++			struct sk_buff_head *skbs, bool is_flush);
+ 
+ 	void (*set_q_ptrs)(struct iwl_trans *trans, int queue, int ptr);
+ 
+@@ -1266,14 +1270,15 @@ static inline int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb,
+ }
+ 
+ static inline void iwl_trans_reclaim(struct iwl_trans *trans, int queue,
+-				     int ssn, struct sk_buff_head *skbs)
++				     int ssn, struct sk_buff_head *skbs,
++				     bool is_flush)
+ {
+ 	if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
+ 		IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
+ 		return;
+ 	}
+ 
+-	trans->ops->reclaim(trans, queue, ssn, skbs);
++	trans->ops->reclaim(trans, queue, ssn, skbs, is_flush);
+ }
+ 
+ static inline void iwl_trans_set_q_ptrs(struct iwl_trans *trans, int queue,
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+index f6488b4bbe68b..be2602d8c5bfa 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+@@ -2012,6 +2012,16 @@ iwl_mvm_d3_igtk_bigtk_rekey_add(struct iwl_wowlan_status_data *status,
+ 	if (IS_ERR(key_config))
+ 		return false;
+ 	ieee80211_set_key_rx_seq(key_config, 0, &seq);
++
++	if (key_config->keyidx == 4 || key_config->keyidx == 5) {
++		struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
++		int link_id = vif->active_links ? __ffs(vif->active_links) : 0;
++		struct iwl_mvm_vif_link_info *mvm_link =
++			mvmvif->link[link_id];
++
++		mvm_link->igtk = key_config;
++	}
++
+ 	return true;
+ }
+ 
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c
+index b49781d1a07a7..10b9219b3bfd3 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c
+@@ -1,7 +1,7 @@
+ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+ /*
+  * Copyright (C) 2015-2017 Intel Deutschland GmbH
+- * Copyright (C) 2018-2022 Intel Corporation
++ * Copyright (C) 2018-2023 Intel Corporation
+  */
+ #include <net/cfg80211.h>
+ #include <linux/etherdevice.h>
+@@ -302,7 +302,12 @@ static void iwl_mvm_resp_del_pasn_sta(struct iwl_mvm *mvm,
+ 				      struct iwl_mvm_pasn_sta *sta)
+ {
+ 	list_del(&sta->list);
+-	iwl_mvm_rm_sta_id(mvm, vif, sta->int_sta.sta_id);
++
++	if (iwl_mvm_has_mld_api(mvm->fw))
++		iwl_mvm_mld_rm_sta_id(mvm, sta->int_sta.sta_id);
++	else
++		iwl_mvm_rm_sta_id(mvm, vif, sta->int_sta.sta_id);
++
+ 	iwl_mvm_dealloc_int_sta(mvm, &sta->int_sta);
+ 	kfree(sta);
+ }
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/link.c b/drivers/net/wireless/intel/iwlwifi/mvm/link.c
+index ace82e2c5bd91..6e1ad65527d12 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/link.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/link.c
+@@ -53,7 +53,6 @@ int iwl_mvm_add_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ 	unsigned int link_id = link_conf->link_id;
+ 	struct iwl_mvm_vif_link_info *link_info = mvmvif->link[link_id];
+ 	struct iwl_link_config_cmd cmd = {};
+-	struct iwl_mvm_phy_ctxt *phyctxt;
+ 
+ 	if (WARN_ON_ONCE(!link_info))
+ 		return -EINVAL;
+@@ -77,12 +76,8 @@ int iwl_mvm_add_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ 	cmd.link_id = cpu_to_le32(link_info->fw_link_id);
+ 	cmd.mac_id = cpu_to_le32(mvmvif->id);
+ 	cmd.spec_link_id = link_conf->link_id;
+-	/* P2P-Device already has a valid PHY context during add */
+-	phyctxt = link_info->phy_ctxt;
+-	if (phyctxt)
+-		cmd.phy_id = cpu_to_le32(phyctxt->id);
+-	else
+-		cmd.phy_id = cpu_to_le32(FW_CTXT_INVALID);
++	WARN_ON_ONCE(link_info->phy_ctxt);
++	cmd.phy_id = cpu_to_le32(FW_CTXT_INVALID);
+ 
+ 	memcpy(cmd.local_link_addr, link_conf->addr, ETH_ALEN);
+ 
+@@ -194,11 +189,14 @@ int iwl_mvm_link_changed(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ 		flags_mask |= LINK_FLG_MU_EDCA_CW;
+ 	}
+ 
+-	if (link_conf->eht_puncturing && !iwlwifi_mod_params.disable_11be)
+-		cmd.puncture_mask = cpu_to_le16(link_conf->eht_puncturing);
+-	else
+-		/* This flag can be set only if the MAC has eht support */
+-		changes &= ~LINK_CONTEXT_MODIFY_EHT_PARAMS;
++	if (changes & LINK_CONTEXT_MODIFY_EHT_PARAMS) {
++		if (iwlwifi_mod_params.disable_11be ||
++		    !link_conf->eht_support)
++			changes &= ~LINK_CONTEXT_MODIFY_EHT_PARAMS;
++		else
++			cmd.puncture_mask =
++				cpu_to_le16(link_conf->eht_puncturing);
++	}
+ 
+ 	cmd.bss_color = link_conf->he_bss_color.color;
+ 
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
+index 7369a45f7f2bd..9c97691e60384 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
+@@ -286,6 +286,10 @@ int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+ 	INIT_LIST_HEAD(&mvmvif->time_event_data.list);
+ 	mvmvif->time_event_data.id = TE_MAX;
+ 
++	mvmvif->deflink.bcast_sta.sta_id = IWL_MVM_INVALID_STA;
++	mvmvif->deflink.mcast_sta.sta_id = IWL_MVM_INVALID_STA;
++	mvmvif->deflink.ap_sta_id = IWL_MVM_INVALID_STA;
++
+ 	/* No need to allocate data queues to P2P Device MAC and NAN.*/
+ 	if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
+ 		return 0;
+@@ -300,10 +304,6 @@ int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+ 		mvmvif->deflink.cab_queue = IWL_MVM_DQA_GCAST_QUEUE;
+ 	}
+ 
+-	mvmvif->deflink.bcast_sta.sta_id = IWL_MVM_INVALID_STA;
+-	mvmvif->deflink.mcast_sta.sta_id = IWL_MVM_INVALID_STA;
+-	mvmvif->deflink.ap_sta_id = IWL_MVM_INVALID_STA;
+-
+ 	for (i = 0; i < NUM_IWL_MVM_SMPS_REQ; i++)
+ 		mvmvif->deflink.smps_requests[i] = IEEE80211_SMPS_AUTOMATIC;
+ 
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+index ce7905faa08ff..e2288fd601a6a 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+@@ -1588,32 +1588,8 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
+ 				     IEEE80211_VIF_SUPPORTS_CQM_RSSI;
+ 	}
+ 
+-	/*
+-	 * P2P_DEVICE interface does not have a channel context assigned to it,
+-	 * so a dedicated PHY context is allocated to it and the corresponding
+-	 * MAC context is bound to it at this stage.
+-	 */
+-	if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
+-
+-		mvmvif->deflink.phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm);
+-		if (!mvmvif->deflink.phy_ctxt) {
+-			ret = -ENOSPC;
+-			goto out_free_bf;
+-		}
+-
+-		iwl_mvm_phy_ctxt_ref(mvm, mvmvif->deflink.phy_ctxt);
+-		ret = iwl_mvm_binding_add_vif(mvm, vif);
+-		if (ret)
+-			goto out_unref_phy;
+-
+-		ret = iwl_mvm_add_p2p_bcast_sta(mvm, vif);
+-		if (ret)
+-			goto out_unbind;
+-
+-		/* Save a pointer to p2p device vif, so it can later be used to
+-		 * update the p2p device MAC when a GO is started/stopped */
++	if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
+ 		mvm->p2p_device_vif = vif;
+-	}
+ 
+ 	iwl_mvm_tcm_add_vif(mvm, vif);
+ 	INIT_DELAYED_WORK(&mvmvif->csa_work,
+@@ -1642,11 +1618,6 @@ out:
+ 
+ 	goto out_unlock;
+ 
+- out_unbind:
+-	iwl_mvm_binding_remove_vif(mvm, vif);
+- out_unref_phy:
+-	iwl_mvm_phy_ctxt_unref(mvm, mvmvif->deflink.phy_ctxt);
+- out_free_bf:
+ 	if (mvm->bf_allowed_vif == mvmvif) {
+ 		mvm->bf_allowed_vif = NULL;
+ 		vif->driver_flags &= ~(IEEE80211_VIF_BEACON_FILTER |
+@@ -1743,12 +1714,17 @@ static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw,
+ 	if (iwl_mvm_mac_remove_interface_common(hw, vif))
+ 		goto out;
+ 
++	/* Before the interface removal, mac80211 would cancel the ROC, and the
++	 * ROC worker would be scheduled if needed. The worker would be flushed
++	 * in iwl_mvm_prepare_mac_removal() and thus at this point there is no
++	 * binding etc. so nothing needs to be done here.
++	 */
+ 	if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
++		if (mvmvif->deflink.phy_ctxt) {
++			iwl_mvm_phy_ctxt_unref(mvm, mvmvif->deflink.phy_ctxt);
++			mvmvif->deflink.phy_ctxt = NULL;
++		}
+ 		mvm->p2p_device_vif = NULL;
+-		iwl_mvm_rm_p2p_bcast_sta(mvm, vif);
+-		iwl_mvm_binding_remove_vif(mvm, vif);
+-		iwl_mvm_phy_ctxt_unref(mvm, mvmvif->deflink.phy_ctxt);
+-		mvmvif->deflink.phy_ctxt = NULL;
+ 	}
+ 
+ 	iwl_mvm_mac_ctxt_remove(mvm, vif);
+@@ -3790,6 +3766,12 @@ iwl_mvm_sta_state_assoc_to_authorized(struct iwl_mvm *mvm,
+ 
+ 	iwl_mvm_rs_rate_init_all_links(mvm, vif, sta);
+ 
++	/* MFP is set by default before the station is authorized.
++	 * Clear it here in case it's not used.
++	 */
++	if (!sta->mfp)
++		return callbacks->update_sta(mvm, vif, sta);
++
+ 	return 0;
+ }
+ 
+@@ -4530,30 +4512,20 @@ static int iwl_mvm_add_aux_sta_for_hs20(struct iwl_mvm *mvm, u32 lmac_id)
+ 	return ret;
+ }
+ 
+-static int iwl_mvm_roc_switch_binding(struct iwl_mvm *mvm,
+-				      struct ieee80211_vif *vif,
+-				      struct iwl_mvm_phy_ctxt *new_phy_ctxt)
++static int iwl_mvm_roc_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+ {
+-	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+-	int ret = 0;
++	int ret;
+ 
+ 	lockdep_assert_held(&mvm->mutex);
+ 
+-	/* Unbind the P2P_DEVICE from the current PHY context,
+-	 * and if the PHY context is not used remove it.
+-	 */
+-	ret = iwl_mvm_binding_remove_vif(mvm, vif);
+-	if (WARN(ret, "Failed unbinding P2P_DEVICE\n"))
++	ret = iwl_mvm_binding_add_vif(mvm, vif);
++	if (WARN(ret, "Failed binding P2P_DEVICE\n"))
+ 		return ret;
+ 
+-	iwl_mvm_phy_ctxt_unref(mvm, mvmvif->deflink.phy_ctxt);
+-
+-	/* Bind the P2P_DEVICE to the current PHY Context */
+-	mvmvif->deflink.phy_ctxt = new_phy_ctxt;
+-
+-	ret = iwl_mvm_binding_add_vif(mvm, vif);
+-	WARN(ret, "Failed binding P2P_DEVICE\n");
+-	return ret;
++	/* The station and queue allocation must be done only after the binding
++	 * is done, as otherwise the FW might incorrectly configure its state.
++	 */
++	return iwl_mvm_add_p2p_bcast_sta(mvm, vif);
+ }
+ 
+ static int iwl_mvm_roc(struct ieee80211_hw *hw,
+@@ -4564,7 +4536,7 @@ static int iwl_mvm_roc(struct ieee80211_hw *hw,
+ {
+ 	static const struct iwl_mvm_roc_ops ops = {
+ 		.add_aux_sta_for_hs20 = iwl_mvm_add_aux_sta_for_hs20,
+-		.switch_phy_ctxt = iwl_mvm_roc_switch_binding,
++		.link = iwl_mvm_roc_link,
+ 	};
+ 
+ 	return iwl_mvm_roc_common(hw, vif, channel, duration, type, &ops);
+@@ -4580,7 +4552,6 @@ int iwl_mvm_roc_common(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ 	struct cfg80211_chan_def chandef;
+ 	struct iwl_mvm_phy_ctxt *phy_ctxt;
+-	bool band_change_removal;
+ 	int ret, i;
+ 	u32 lmac_id;
+ 
+@@ -4609,82 +4580,61 @@ int iwl_mvm_roc_common(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ 		/* handle below */
+ 		break;
+ 	default:
+-		IWL_ERR(mvm, "vif isn't P2P_DEVICE: %d\n", vif->type);
++		IWL_ERR(mvm, "ROC: Invalid vif type=%u\n", vif->type);
+ 		ret = -EINVAL;
+ 		goto out_unlock;
+ 	}
+ 
++	/* Try using a PHY context that is already in use */
+ 	for (i = 0; i < NUM_PHY_CTX; i++) {
+ 		phy_ctxt = &mvm->phy_ctxts[i];
+-		if (phy_ctxt->ref == 0 || mvmvif->deflink.phy_ctxt == phy_ctxt)
++		if (!phy_ctxt->ref || mvmvif->deflink.phy_ctxt == phy_ctxt)
+ 			continue;
+ 
+-		if (phy_ctxt->ref && channel == phy_ctxt->channel) {
+-			ret = ops->switch_phy_ctxt(mvm, vif, phy_ctxt);
+-			if (ret)
+-				goto out_unlock;
++		if (channel == phy_ctxt->channel) {
++			if (mvmvif->deflink.phy_ctxt)
++				iwl_mvm_phy_ctxt_unref(mvm,
++						       mvmvif->deflink.phy_ctxt);
+ 
++			mvmvif->deflink.phy_ctxt = phy_ctxt;
+ 			iwl_mvm_phy_ctxt_ref(mvm, mvmvif->deflink.phy_ctxt);
+-			goto schedule_time_event;
++			goto link_and_start_p2p_roc;
+ 		}
+ 	}
+ 
+-	/* Need to update the PHY context only if the ROC channel changed */
+-	if (channel == mvmvif->deflink.phy_ctxt->channel)
+-		goto schedule_time_event;
+-
+-	cfg80211_chandef_create(&chandef, channel, NL80211_CHAN_NO_HT);
+-
+-	/*
+-	 * Check if the remain-on-channel is on a different band and that
+-	 * requires context removal, see iwl_mvm_phy_ctxt_changed(). If
+-	 * so, we'll need to release and then re-configure here, since we
+-	 * must not remove a PHY context that's part of a binding.
++	/* If the currently used PHY context is configured with a matching
++	 * channel use it
+ 	 */
+-	band_change_removal =
+-		fw_has_capa(&mvm->fw->ucode_capa,
+-			    IWL_UCODE_TLV_CAPA_BINDING_CDB_SUPPORT) &&
+-		mvmvif->deflink.phy_ctxt->channel->band != chandef.chan->band;
+-
+-	if (mvmvif->deflink.phy_ctxt->ref == 1 && !band_change_removal) {
+-		/*
+-		 * Change the PHY context configuration as it is currently
+-		 * referenced only by the P2P Device MAC (and we can modify it)
+-		 */
+-		ret = iwl_mvm_phy_ctxt_changed(mvm, mvmvif->deflink.phy_ctxt,
+-					       &chandef, 1, 1);
+-		if (ret)
+-			goto out_unlock;
++	if (mvmvif->deflink.phy_ctxt) {
++		if (channel == mvmvif->deflink.phy_ctxt->channel)
++			goto link_and_start_p2p_roc;
+ 	} else {
+-		/*
+-		 * The PHY context is shared with other MACs (or we're trying to
+-		 * switch bands), so remove the P2P Device from the binding,
+-		 * allocate an new PHY context and create a new binding.
+-		 */
+ 		phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm);
+ 		if (!phy_ctxt) {
+ 			ret = -ENOSPC;
+ 			goto out_unlock;
+ 		}
+ 
+-		ret = iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &chandef,
+-					       1, 1);
+-		if (ret) {
+-			IWL_ERR(mvm, "Failed to change PHY context\n");
+-			goto out_unlock;
+-		}
++		mvmvif->deflink.phy_ctxt = phy_ctxt;
++		iwl_mvm_phy_ctxt_ref(mvm, mvmvif->deflink.phy_ctxt);
++	}
+ 
+-		ret = ops->switch_phy_ctxt(mvm, vif, phy_ctxt);
+-		if (ret)
+-			goto out_unlock;
++	/* Configure the PHY context */
++	cfg80211_chandef_create(&chandef, channel, NL80211_CHAN_NO_HT);
+ 
+-		iwl_mvm_phy_ctxt_ref(mvm, mvmvif->deflink.phy_ctxt);
++	ret = iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &chandef,
++				       1, 1);
++	if (ret) {
++		IWL_ERR(mvm, "Failed to change PHY context\n");
++		goto out_unlock;
+ 	}
+ 
+-schedule_time_event:
+-	/* Schedule the time events */
+-	ret = iwl_mvm_start_p2p_roc(mvm, vif, duration, type);
++link_and_start_p2p_roc:
++	ret = ops->link(mvm, vif);
++	if (ret)
++		goto out_unlock;
+ 
++	ret = iwl_mvm_start_p2p_roc(mvm, vif, duration, type);
+ out_unlock:
+ 	mutex_unlock(&mvm->mutex);
+ 	IWL_DEBUG_MAC80211(mvm, "leave\n");
+@@ -5634,7 +5584,8 @@ void iwl_mvm_mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ 		WARN_ON_ONCE(sta != mvmvif->ap_sta && !sta->tdls);
+ 
+ 		if (drop) {
+-			if (iwl_mvm_flush_sta(mvm, mvmsta, false))
++			if (iwl_mvm_flush_sta(mvm, mvmsta->deflink.sta_id,
++					      mvmsta->tfd_queue_msk))
+ 				IWL_ERR(mvm, "flush request fail\n");
+ 		} else {
+ 			if (iwl_mvm_has_new_tx_api(mvm))
+@@ -5656,22 +5607,21 @@ void iwl_mvm_mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ void iwl_mvm_mac_flush_sta(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ 			   struct ieee80211_sta *sta)
+ {
++	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+-	int i;
++	struct iwl_mvm_link_sta *mvm_link_sta;
++	struct ieee80211_link_sta *link_sta;
++	int link_id;
+ 
+ 	mutex_lock(&mvm->mutex);
+-	for (i = 0; i < mvm->fw->ucode_capa.num_stations; i++) {
+-		struct iwl_mvm_sta *mvmsta;
+-		struct ieee80211_sta *tmp;
+-
+-		tmp = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
+-						lockdep_is_held(&mvm->mutex));
+-		if (tmp != sta)
++	for_each_sta_active_link(vif, sta, link_sta, link_id) {
++		mvm_link_sta = rcu_dereference_protected(mvmsta->link[link_id],
++							 lockdep_is_held(&mvm->mutex));
++		if (!mvm_link_sta)
+ 			continue;
+ 
+-		mvmsta = iwl_mvm_sta_from_mac80211(sta);
+-
+-		if (iwl_mvm_flush_sta(mvm, mvmsta, false))
++		if (iwl_mvm_flush_sta(mvm, mvm_link_sta->sta_id,
++				      mvmsta->tfd_queue_msk))
+ 			IWL_ERR(mvm, "flush request fail\n");
+ 	}
+ 	mutex_unlock(&mvm->mutex);
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mld-key.c b/drivers/net/wireless/intel/iwlwifi/mvm/mld-key.c
+index 2c9f2f71b083a..ea3e9e9c6e26c 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/mld-key.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/mld-key.c
+@@ -24,10 +24,15 @@ static u32 iwl_mvm_get_sec_sta_mask(struct iwl_mvm *mvm,
+ 			return 0;
+ 	}
+ 
+-	/* AP group keys are per link and should be on the mcast STA */
++	/* AP group keys are per link and should be on the mcast/bcast STA */
+ 	if (vif->type == NL80211_IFTYPE_AP &&
+-	    !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE))
++	    !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
++		/* IGTK/BIGTK to bcast STA */
++		if (keyconf->keyidx >= 4)
++			return BIT(link_info->bcast_sta.sta_id);
++		/* GTK for data to mcast STA */
+ 		return BIT(link_info->mcast_sta.sta_id);
++	}
+ 
+ 	/* for client mode use the AP STA also for group keys */
+ 	if (!sta && vif->type == NL80211_IFTYPE_STATION)
+@@ -91,7 +96,12 @@ u32 iwl_mvm_get_sec_flags(struct iwl_mvm *mvm,
+ 	if (!sta && vif->type == NL80211_IFTYPE_STATION)
+ 		sta = mvmvif->ap_sta;
+ 
+-	if (!IS_ERR_OR_NULL(sta) && sta->mfp)
++	/* Set the MFP flag also for an AP interface where the key is an IGTK
++	 * key as in such a case the station would always be NULL
++	 */
++	if ((!IS_ERR_OR_NULL(sta) && sta->mfp) ||
++	    (vif->type == NL80211_IFTYPE_AP &&
++	     (keyconf->keyidx == 4 || keyconf->keyidx == 5)))
+ 		flags |= IWL_SEC_KEY_FLAG_MFP;
+ 
+ 	return flags;
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c
+index b719843e94576..2ddb6f763a0b3 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c
+@@ -56,43 +56,15 @@ static int iwl_mvm_mld_mac_add_interface(struct ieee80211_hw *hw,
+ 				     IEEE80211_VIF_SUPPORTS_CQM_RSSI;
+ 	}
+ 
+-	/*
+-	 * P2P_DEVICE interface does not have a channel context assigned to it,
+-	 * so a dedicated PHY context is allocated to it and the corresponding
+-	 * MAC context is bound to it at this stage.
+-	 */
+-	if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
+-		mvmvif->deflink.phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm);
+-		if (!mvmvif->deflink.phy_ctxt) {
+-			ret = -ENOSPC;
+-			goto out_free_bf;
+-		}
+-
+-		iwl_mvm_phy_ctxt_ref(mvm, mvmvif->deflink.phy_ctxt);
+-		ret = iwl_mvm_add_link(mvm, vif, &vif->bss_conf);
+-		if (ret)
+-			goto out_unref_phy;
+-
+-		ret = iwl_mvm_link_changed(mvm, vif, &vif->bss_conf,
+-					   LINK_CONTEXT_MODIFY_ACTIVE |
+-					   LINK_CONTEXT_MODIFY_RATES_INFO,
+-					   true);
+-		if (ret)
+-			goto out_remove_link;
+-
+-		ret = iwl_mvm_mld_add_bcast_sta(mvm, vif, &vif->bss_conf);
+-		if (ret)
+-			goto out_remove_link;
++	ret = iwl_mvm_add_link(mvm, vif, &vif->bss_conf);
++	if (ret)
++		goto out_free_bf;
+ 
+-		/* Save a pointer to p2p device vif, so it can later be used to
+-		 * update the p2p device MAC when a GO is started/stopped
+-		 */
++	/* Save a pointer to p2p device vif, so it can later be used to
++	 * update the p2p device MAC when a GO is started/stopped
++	 */
++	if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
+ 		mvm->p2p_device_vif = vif;
+-	} else {
+-		ret = iwl_mvm_add_link(mvm, vif, &vif->bss_conf);
+-		if (ret)
+-			goto out_free_bf;
+-	}
+ 
+ 	ret = iwl_mvm_power_update_mac(mvm);
+ 	if (ret)
+@@ -119,10 +91,6 @@ static int iwl_mvm_mld_mac_add_interface(struct ieee80211_hw *hw,
+ 
+ 	goto out_unlock;
+ 
+- out_remove_link:
+-	iwl_mvm_disable_link(mvm, vif, &vif->bss_conf);
+- out_unref_phy:
+-	iwl_mvm_phy_ctxt_unref(mvm, mvmvif->deflink.phy_ctxt);
+  out_free_bf:
+ 	if (mvm->bf_allowed_vif == mvmvif) {
+ 		mvm->bf_allowed_vif = NULL;
+@@ -130,7 +98,6 @@ static int iwl_mvm_mld_mac_add_interface(struct ieee80211_hw *hw,
+ 				       IEEE80211_VIF_SUPPORTS_CQM_RSSI);
+ 	}
+  out_remove_mac:
+-	mvmvif->deflink.phy_ctxt = NULL;
+ 	mvmvif->link[0] = NULL;
+ 	iwl_mvm_mld_mac_ctxt_remove(mvm, vif);
+  out_unlock:
+@@ -185,14 +152,18 @@ static void iwl_mvm_mld_mac_remove_interface(struct ieee80211_hw *hw,
+ 
+ 	iwl_mvm_power_update_mac(mvm);
+ 
++	/* Before the interface removal, mac80211 would cancel the ROC, and the
++	 * ROC worker would be scheduled if needed. The worker would be flushed
++	 * in iwl_mvm_prepare_mac_removal() and thus at this point the link is
++	 * not active. So need only to remove the link.
++	 */
+ 	if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
++		if (mvmvif->deflink.phy_ctxt) {
++			iwl_mvm_phy_ctxt_unref(mvm, mvmvif->deflink.phy_ctxt);
++			mvmvif->deflink.phy_ctxt = NULL;
++		}
+ 		mvm->p2p_device_vif = NULL;
+-
+-		/* P2P device uses only one link */
+-		iwl_mvm_mld_rm_bcast_sta(mvm, vif, &vif->bss_conf);
+-		iwl_mvm_disable_link(mvm, vif, &vif->bss_conf);
+-		iwl_mvm_phy_ctxt_unref(mvm, mvmvif->deflink.phy_ctxt);
+-		mvmvif->deflink.phy_ctxt = NULL;
++		iwl_mvm_remove_link(mvm, vif, &vif->bss_conf);
+ 	} else {
+ 		iwl_mvm_disable_link(mvm, vif, &vif->bss_conf);
+ 	}
+@@ -653,7 +624,7 @@ iwl_mvm_mld_link_info_changed_station(struct iwl_mvm *mvm,
+ 	}
+ 
+ 	/* Update EHT Puncturing info */
+-	if (changes & BSS_CHANGED_EHT_PUNCTURING && vif->cfg.assoc && has_eht)
++	if (changes & BSS_CHANGED_EHT_PUNCTURING && vif->cfg.assoc)
+ 		link_changes |= LINK_CONTEXT_MODIFY_EHT_PARAMS;
+ 
+ 	if (link_changes) {
+@@ -968,36 +939,29 @@ iwl_mvm_mld_mac_conf_tx(struct ieee80211_hw *hw,
+ 	return 0;
+ }
+ 
+-static int iwl_mvm_link_switch_phy_ctx(struct iwl_mvm *mvm,
+-				       struct ieee80211_vif *vif,
+-				       struct iwl_mvm_phy_ctxt *new_phy_ctxt)
++static int iwl_mvm_mld_roc_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+ {
+-	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+-	int ret = 0;
++	int ret;
+ 
+ 	lockdep_assert_held(&mvm->mutex);
+ 
+-	/* Inorder to change the phy_ctx of a link, the link needs to be
+-	 * inactive. Therefore, first deactivate the link, then change its
+-	 * phy_ctx, and then activate it again.
+-	 */
+-	ret = iwl_mvm_link_changed(mvm, vif, &vif->bss_conf,
+-				   LINK_CONTEXT_MODIFY_ACTIVE, false);
+-	if (WARN(ret, "Failed to deactivate link\n"))
++	/* The PHY context ID might have changed so need to set it */
++	ret = iwl_mvm_link_changed(mvm, vif, &vif->bss_conf, 0, false);
++	if (WARN(ret, "Failed to set PHY context ID\n"))
+ 		return ret;
+ 
+-	iwl_mvm_phy_ctxt_unref(mvm, mvmvif->deflink.phy_ctxt);
+-
+-	mvmvif->deflink.phy_ctxt = new_phy_ctxt;
++	ret = iwl_mvm_link_changed(mvm, vif, &vif->bss_conf,
++				   LINK_CONTEXT_MODIFY_ACTIVE |
++				   LINK_CONTEXT_MODIFY_RATES_INFO,
++				   true);
+ 
+-	ret = iwl_mvm_link_changed(mvm, vif, &vif->bss_conf, 0, false);
+-	if (WARN(ret, "Failed to deactivate link\n"))
++	if (WARN(ret, "Failed linking P2P_DEVICE\n"))
+ 		return ret;
+ 
+-	ret = iwl_mvm_link_changed(mvm, vif, &vif->bss_conf,
+-				   LINK_CONTEXT_MODIFY_ACTIVE, true);
+-	WARN(ret, "Failed binding P2P_DEVICE\n");
+-	return ret;
++	/* The station and queue allocation must be done only after the linking
++	 * is done, as otherwise the FW might incorrectly configure its state.
++	 */
++	return iwl_mvm_mld_add_bcast_sta(mvm, vif, &vif->bss_conf);
+ }
+ 
+ static int iwl_mvm_mld_roc(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+@@ -1006,7 +970,7 @@ static int iwl_mvm_mld_roc(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ {
+ 	static const struct iwl_mvm_roc_ops ops = {
+ 		.add_aux_sta_for_hs20 = iwl_mvm_mld_add_aux_sta,
+-		.switch_phy_ctxt = iwl_mvm_link_switch_phy_ctx,
++		.link = iwl_mvm_mld_roc_link,
+ 	};
+ 
+ 	return iwl_mvm_roc_common(hw, vif, channel, duration, type, &ops);
+@@ -1089,9 +1053,6 @@ iwl_mvm_mld_change_vif_links(struct ieee80211_hw *hw,
+ 		}
+ 	}
+ 
+-	if (err)
+-		goto out_err;
+-
+ 	err = 0;
+ 	if (new_links == 0) {
+ 		mvmvif->link[0] = &mvmvif->deflink;
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c
+index 524852cf5cd2d..56f51344c193c 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c
+@@ -347,7 +347,7 @@ static int iwl_mvm_mld_rm_int_sta(struct iwl_mvm *mvm,
+ 		return -EINVAL;
+ 
+ 	if (flush)
+-		iwl_mvm_flush_sta(mvm, int_sta, true);
++		iwl_mvm_flush_sta(mvm, int_sta->sta_id, int_sta->tfd_queue_msk);
+ 
+ 	iwl_mvm_mld_disable_txq(mvm, BIT(int_sta->sta_id), queuptr, tid);
+ 
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+index b18c91c5dd5d1..218f3bc31104b 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+@@ -1658,7 +1658,7 @@ const char *iwl_mvm_get_tx_fail_reason(u32 status);
+ static inline const char *iwl_mvm_get_tx_fail_reason(u32 status) { return ""; }
+ #endif
+ int iwl_mvm_flush_tx_path(struct iwl_mvm *mvm, u32 tfd_msk);
+-int iwl_mvm_flush_sta(struct iwl_mvm *mvm, void *sta, bool internal);
++int iwl_mvm_flush_sta(struct iwl_mvm *mvm, u32 sta_id, u32 tfd_queue_mask);
+ int iwl_mvm_flush_sta_tids(struct iwl_mvm *mvm, u32 sta_id, u16 tids);
+ 
+ /* Utils to extract sta related data */
+@@ -1942,13 +1942,12 @@ void iwl_mvm_bss_info_changed_station_assoc(struct iwl_mvm *mvm,
+  *
+  * @add_aux_sta_for_hs20: pointer to the function that adds an aux sta
+  *	for Hot Spot 2.0
+- * @switch_phy_ctxt: pointer to the function that switches a vif from one
+- *	phy_ctx to another
++ * @link: For a P2P Device interface, pointer to a function that links the
++ *      MAC/Link to the PHY context
+  */
+ struct iwl_mvm_roc_ops {
+ 	int (*add_aux_sta_for_hs20)(struct iwl_mvm *mvm, u32 lmac_id);
+-	int (*switch_phy_ctxt)(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+-			       struct iwl_mvm_phy_ctxt *new_phy_ctxt);
++	int (*link)(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+ };
+ 
+ int iwl_mvm_roc_common(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+index 3b9a343d4f672..2c231f4623893 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+@@ -2059,7 +2059,8 @@ bool iwl_mvm_sta_del(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ 		*status = IWL_MVM_QUEUE_FREE;
+ 	}
+ 
+-	if (vif->type == NL80211_IFTYPE_STATION) {
++	if (vif->type == NL80211_IFTYPE_STATION &&
++	    mvm_link->ap_sta_id == sta_id) {
+ 		/* if associated - we can't remove the AP STA now */
+ 		if (vif->cfg.assoc)
+ 			return true;
+@@ -2097,7 +2098,8 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
+ 		return ret;
+ 
+ 	/* flush its queues here since we are freeing mvm_sta */
+-	ret = iwl_mvm_flush_sta(mvm, mvm_sta, false);
++	ret = iwl_mvm_flush_sta(mvm, mvm_sta->deflink.sta_id,
++				mvm_sta->tfd_queue_msk);
+ 	if (ret)
+ 		return ret;
+ 	if (iwl_mvm_has_new_tx_api(mvm)) {
+@@ -2408,7 +2410,8 @@ void iwl_mvm_free_bcast_sta_queues(struct iwl_mvm *mvm,
+ 
+ 	lockdep_assert_held(&mvm->mutex);
+ 
+-	iwl_mvm_flush_sta(mvm, &mvmvif->deflink.bcast_sta, true);
++	iwl_mvm_flush_sta(mvm, mvmvif->deflink.bcast_sta.sta_id,
++			  mvmvif->deflink.bcast_sta.tfd_queue_msk);
+ 
+ 	switch (vif->type) {
+ 	case NL80211_IFTYPE_AP:
+@@ -2664,7 +2667,8 @@ int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+ 
+ 	lockdep_assert_held(&mvm->mutex);
+ 
+-	iwl_mvm_flush_sta(mvm, &mvmvif->deflink.mcast_sta, true);
++	iwl_mvm_flush_sta(mvm, mvmvif->deflink.mcast_sta.sta_id,
++			  mvmvif->deflink.mcast_sta.tfd_queue_msk);
+ 
+ 	iwl_mvm_disable_txq(mvm, NULL, mvmvif->deflink.mcast_sta.sta_id,
+ 			    &mvmvif->deflink.cab_queue, 0);
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
+index 5f0e7144a951c..158266719ffd7 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
+@@ -78,9 +78,29 @@ void iwl_mvm_roc_done_wk(struct work_struct *wk)
+ 		 */
+ 
+ 		if (!WARN_ON(!mvm->p2p_device_vif)) {
+-			mvmvif = iwl_mvm_vif_from_mac80211(mvm->p2p_device_vif);
+-			iwl_mvm_flush_sta(mvm, &mvmvif->deflink.bcast_sta,
+-					  true);
++			struct ieee80211_vif *vif = mvm->p2p_device_vif;
++
++			mvmvif = iwl_mvm_vif_from_mac80211(vif);
++			iwl_mvm_flush_sta(mvm, mvmvif->deflink.bcast_sta.sta_id,
++					  mvmvif->deflink.bcast_sta.tfd_queue_msk);
++
++			if (mvm->mld_api_is_used) {
++				iwl_mvm_mld_rm_bcast_sta(mvm, vif,
++							 &vif->bss_conf);
++
++				iwl_mvm_link_changed(mvm, vif, &vif->bss_conf,
++						     LINK_CONTEXT_MODIFY_ACTIVE,
++						     false);
++			} else {
++				iwl_mvm_rm_p2p_bcast_sta(mvm, vif);
++				iwl_mvm_binding_remove_vif(mvm, vif);
++			}
++
++			/* Do not remove the PHY context as removing and adding
++			 * a PHY context has timing overheads. Leaving it
++			 * configured in FW would be useful in case the next ROC
++			 * is with the same channel.
++			 */
+ 		}
+ 	}
+ 
+@@ -93,7 +113,8 @@ void iwl_mvm_roc_done_wk(struct work_struct *wk)
+ 	 */
+ 	if (test_and_clear_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status)) {
+ 		/* do the same in case of hot spot 2.0 */
+-		iwl_mvm_flush_sta(mvm, &mvm->aux_sta, true);
++		iwl_mvm_flush_sta(mvm, mvm->aux_sta.sta_id,
++				  mvm->aux_sta.tfd_queue_msk);
+ 
+ 		if (mvm->mld_api_is_used) {
+ 			iwl_mvm_mld_rm_aux_sta(mvm);
+@@ -880,8 +901,8 @@ void iwl_mvm_rx_session_protect_notif(struct iwl_mvm *mvm,
+ 	if (!le32_to_cpu(notif->status) || !le32_to_cpu(notif->start)) {
+ 		/* End TE, notify mac80211 */
+ 		mvmvif->time_event_data.id = SESSION_PROTECT_CONF_MAX_ID;
+-		ieee80211_remain_on_channel_expired(mvm->hw);
+ 		iwl_mvm_p2p_roc_finished(mvm);
++		ieee80211_remain_on_channel_expired(mvm->hw);
+ 	} else if (le32_to_cpu(notif->start)) {
+ 		if (WARN_ON(mvmvif->time_event_data.id !=
+ 				le32_to_cpu(notif->conf_id)))
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+index 898dca3936435..2ede69132fee9 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+@@ -1599,7 +1599,7 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
+ 	seq_ctl = le16_to_cpu(tx_resp->seq_ctl);
+ 
+ 	/* we can free until ssn % q.n_bd not inclusive */
+-	iwl_trans_reclaim(mvm->trans, txq_id, ssn, &skbs);
++	iwl_trans_reclaim(mvm->trans, txq_id, ssn, &skbs, false);
+ 
+ 	while (!skb_queue_empty(&skbs)) {
+ 		struct sk_buff *skb = __skb_dequeue(&skbs);
+@@ -1951,7 +1951,7 @@ static void iwl_mvm_tx_reclaim(struct iwl_mvm *mvm, int sta_id, int tid,
+ 	 * block-ack window (we assume that they've been successfully
+ 	 * transmitted ... if not, it's too late anyway).
+ 	 */
+-	iwl_trans_reclaim(mvm->trans, txq, index, &reclaimed_skbs);
++	iwl_trans_reclaim(mvm->trans, txq, index, &reclaimed_skbs, is_flush);
+ 
+ 	skb_queue_walk(&reclaimed_skbs, skb) {
+ 		struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+@@ -2293,24 +2293,10 @@ free_rsp:
+ 	return ret;
+ }
+ 
+-int iwl_mvm_flush_sta(struct iwl_mvm *mvm, void *sta, bool internal)
++int iwl_mvm_flush_sta(struct iwl_mvm *mvm, u32 sta_id, u32 tfd_queue_mask)
+ {
+-	u32 sta_id, tfd_queue_msk;
+-
+-	if (internal) {
+-		struct iwl_mvm_int_sta *int_sta = sta;
+-
+-		sta_id = int_sta->sta_id;
+-		tfd_queue_msk = int_sta->tfd_queue_msk;
+-	} else {
+-		struct iwl_mvm_sta *mvm_sta = sta;
+-
+-		sta_id = mvm_sta->deflink.sta_id;
+-		tfd_queue_msk = mvm_sta->tfd_queue_msk;
+-	}
+-
+ 	if (iwl_mvm_has_new_tx_api(mvm))
+ 		return iwl_mvm_flush_sta_tids(mvm, sta_id, 0xffff);
+ 
+-	return iwl_mvm_flush_tx_path(mvm, tfd_queue_msk);
++	return iwl_mvm_flush_tx_path(mvm, tfd_queue_mask);
+ }
+diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
+index fa46dad5fd680..2ecf6db95fb31 100644
+--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
++++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
+@@ -161,6 +161,7 @@ void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans)
+ 	if (test_and_clear_bit(STATUS_DEVICE_ENABLED, &trans->status)) {
+ 		IWL_DEBUG_INFO(trans,
+ 			       "DEVICE_ENABLED bit was set and is now cleared\n");
++		iwl_pcie_synchronize_irqs(trans);
+ 		iwl_pcie_rx_napi_sync(trans);
+ 		iwl_txq_gen2_tx_free(trans);
+ 		iwl_pcie_rx_stop(trans);
+diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+index 3e988da449738..d0209983587a2 100644
+--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
++++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+@@ -1263,6 +1263,7 @@ static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans)
+ 	if (test_and_clear_bit(STATUS_DEVICE_ENABLED, &trans->status)) {
+ 		IWL_DEBUG_INFO(trans,
+ 			       "DEVICE_ENABLED bit was set and is now cleared\n");
++		iwl_pcie_synchronize_irqs(trans);
+ 		iwl_pcie_rx_napi_sync(trans);
+ 		iwl_pcie_tx_stop(trans);
+ 		iwl_pcie_rx_stop(trans);
+diff --git a/drivers/net/wireless/intel/iwlwifi/queue/tx.c b/drivers/net/wireless/intel/iwlwifi/queue/tx.c
+index 5bb3cc3367c9f..64dedb1d11862 100644
+--- a/drivers/net/wireless/intel/iwlwifi/queue/tx.c
++++ b/drivers/net/wireless/intel/iwlwifi/queue/tx.c
+@@ -1561,7 +1561,7 @@ void iwl_txq_progress(struct iwl_txq *txq)
+ 
+ /* Frees buffers until index _not_ inclusive */
+ void iwl_txq_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
+-		     struct sk_buff_head *skbs)
++		     struct sk_buff_head *skbs, bool is_flush)
+ {
+ 	struct iwl_txq *txq = trans->txqs.txq[txq_id];
+ 	int tfd_num, read_ptr, last_to_free;
+@@ -1636,9 +1636,11 @@ void iwl_txq_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
+ 	if (iwl_txq_space(trans, txq) > txq->low_mark &&
+ 	    test_bit(txq_id, trans->txqs.queue_stopped)) {
+ 		struct sk_buff_head overflow_skbs;
++		struct sk_buff *skb;
+ 
+ 		__skb_queue_head_init(&overflow_skbs);
+-		skb_queue_splice_init(&txq->overflow_q, &overflow_skbs);
++		skb_queue_splice_init(&txq->overflow_q,
++				      is_flush ? skbs : &overflow_skbs);
+ 
+ 		/*
+ 		 * We are going to transmit from the overflow queue.
+@@ -1658,8 +1660,7 @@ void iwl_txq_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
+ 		 */
+ 		spin_unlock_bh(&txq->lock);
+ 
+-		while (!skb_queue_empty(&overflow_skbs)) {
+-			struct sk_buff *skb = __skb_dequeue(&overflow_skbs);
++		while ((skb = __skb_dequeue(&overflow_skbs))) {
+ 			struct iwl_device_tx_cmd *dev_cmd_ptr;
+ 
+ 			dev_cmd_ptr = *(void **)((u8 *)skb->cb +
+diff --git a/drivers/net/wireless/intel/iwlwifi/queue/tx.h b/drivers/net/wireless/intel/iwlwifi/queue/tx.h
+index 1e4a24ab9bab2..b311426c84f05 100644
+--- a/drivers/net/wireless/intel/iwlwifi/queue/tx.h
++++ b/drivers/net/wireless/intel/iwlwifi/queue/tx.h
+@@ -173,7 +173,7 @@ void iwl_txq_gen1_update_byte_cnt_tbl(struct iwl_trans *trans,
+ 				      struct iwl_txq *txq, u16 byte_cnt,
+ 				      int num_tbs);
+ void iwl_txq_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
+-		     struct sk_buff_head *skbs);
++		     struct sk_buff_head *skbs, bool is_flush);
+ void iwl_txq_set_q_ptrs(struct iwl_trans *trans, int txq_id, int ptr);
+ void iwl_trans_txq_freeze_timer(struct iwl_trans *trans, unsigned long txqs,
+ 				bool freeze);
+diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c
+index f539913aadf86..e57ce25f3d816 100644
+--- a/drivers/net/wireless/mediatek/mt76/dma.c
++++ b/drivers/net/wireless/mediatek/mt76/dma.c
+@@ -330,9 +330,6 @@ mt76_dma_tx_cleanup_idx(struct mt76_dev *dev, struct mt76_queue *q, int idx,
+ 	if (e->txwi == DMA_DUMMY_DATA)
+ 		e->txwi = NULL;
+ 
+-	if (e->skb == DMA_DUMMY_DATA)
+-		e->skb = NULL;
+-
+ 	*prev_e = *e;
+ 	memset(e, 0, sizeof(*e));
+ }
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/beacon.c b/drivers/net/wireless/mediatek/mt76/mt7603/beacon.c
+index b65b0a88c1ded..808466b7de472 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7603/beacon.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7603/beacon.c
+@@ -9,6 +9,23 @@ struct beacon_bc_data {
+ 	int count[MT7603_MAX_INTERFACES];
+ };
+ 
++static void
++mt7603_mac_stuck_beacon_recovery(struct mt7603_dev *dev)
++{
++	if (dev->beacon_check % 5 != 4)
++		return;
++
++	mt76_clear(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_TX_DMA_EN);
++	mt76_set(dev, MT_SCH_4, MT_SCH_4_RESET);
++	mt76_clear(dev, MT_SCH_4, MT_SCH_4_RESET);
++	mt76_set(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_TX_DMA_EN);
++
++	mt76_set(dev, MT_WF_CFG_OFF_WOCCR, MT_WF_CFG_OFF_WOCCR_TMAC_GC_DIS);
++	mt76_set(dev, MT_ARB_SCR, MT_ARB_SCR_TX_DISABLE);
++	mt76_clear(dev, MT_ARB_SCR, MT_ARB_SCR_TX_DISABLE);
++	mt76_clear(dev, MT_WF_CFG_OFF_WOCCR, MT_WF_CFG_OFF_WOCCR_TMAC_GC_DIS);
++}
++
+ static void
+ mt7603_update_beacon_iter(void *priv, u8 *mac, struct ieee80211_vif *vif)
+ {
+@@ -16,6 +33,8 @@ mt7603_update_beacon_iter(void *priv, u8 *mac, struct ieee80211_vif *vif)
+ 	struct mt76_dev *mdev = &dev->mt76;
+ 	struct mt7603_vif *mvif = (struct mt7603_vif *)vif->drv_priv;
+ 	struct sk_buff *skb = NULL;
++	u32 om_idx = mvif->idx;
++	u32 val;
+ 
+ 	if (!(mdev->beacon_mask & BIT(mvif->idx)))
+ 		return;
+@@ -24,20 +43,33 @@ mt7603_update_beacon_iter(void *priv, u8 *mac, struct ieee80211_vif *vif)
+ 	if (!skb)
+ 		return;
+ 
+-	mt76_tx_queue_skb(dev, dev->mphy.q_tx[MT_TXQ_BEACON],
+-			  MT_TXQ_BEACON, skb, &mvif->sta.wcid, NULL);
++	if (om_idx)
++		om_idx |= 0x10;
++	val = MT_DMA_FQCR0_BUSY | MT_DMA_FQCR0_MODE |
++		FIELD_PREP(MT_DMA_FQCR0_TARGET_BSS, om_idx) |
++		FIELD_PREP(MT_DMA_FQCR0_DEST_PORT_ID, 3) |
++		FIELD_PREP(MT_DMA_FQCR0_DEST_QUEUE_ID, 8);
+ 
+ 	spin_lock_bh(&dev->ps_lock);
+-	mt76_wr(dev, MT_DMA_FQCR0, MT_DMA_FQCR0_BUSY |
+-		FIELD_PREP(MT_DMA_FQCR0_TARGET_WCID, mvif->sta.wcid.idx) |
+-		FIELD_PREP(MT_DMA_FQCR0_TARGET_QID,
+-			   dev->mphy.q_tx[MT_TXQ_CAB]->hw_idx) |
+-		FIELD_PREP(MT_DMA_FQCR0_DEST_PORT_ID, 3) |
+-		FIELD_PREP(MT_DMA_FQCR0_DEST_QUEUE_ID, 8));
+ 
+-	if (!mt76_poll(dev, MT_DMA_FQCR0, MT_DMA_FQCR0_BUSY, 0, 5000))
++	mt76_wr(dev, MT_DMA_FQCR0, val |
++		FIELD_PREP(MT_DMA_FQCR0_TARGET_QID, MT_TX_HW_QUEUE_BCN));
++	if (!mt76_poll(dev, MT_DMA_FQCR0, MT_DMA_FQCR0_BUSY, 0, 5000)) {
+ 		dev->beacon_check = MT7603_WATCHDOG_TIMEOUT;
++		goto out;
++	}
++
++	mt76_wr(dev, MT_DMA_FQCR0, val |
++		FIELD_PREP(MT_DMA_FQCR0_TARGET_QID, MT_TX_HW_QUEUE_BMC));
++	if (!mt76_poll(dev, MT_DMA_FQCR0, MT_DMA_FQCR0_BUSY, 0, 5000)) {
++		dev->beacon_check = MT7603_WATCHDOG_TIMEOUT;
++		goto out;
++	}
+ 
++	mt76_tx_queue_skb(dev, dev->mphy.q_tx[MT_TXQ_BEACON],
++			  MT_TXQ_BEACON, skb, &mvif->sta.wcid, NULL);
++
++out:
+ 	spin_unlock_bh(&dev->ps_lock);
+ }
+ 
+@@ -81,6 +113,18 @@ void mt7603_pre_tbtt_tasklet(struct tasklet_struct *t)
+ 	data.dev = dev;
+ 	__skb_queue_head_init(&data.q);
+ 
++	/* Flush all previous CAB queue packets and beacons */
++	mt76_wr(dev, MT_WF_ARB_CAB_FLUSH, GENMASK(30, 16) | BIT(0));
++
++	mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_CAB], false);
++	mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_BEACON], false);
++
++	if (dev->mphy.q_tx[MT_TXQ_BEACON]->queued > 0)
++		dev->beacon_check++;
++	else
++		dev->beacon_check = 0;
++	mt7603_mac_stuck_beacon_recovery(dev);
++
+ 	q = dev->mphy.q_tx[MT_TXQ_BEACON];
+ 	spin_lock(&q->lock);
+ 	ieee80211_iterate_active_interfaces_atomic(mt76_hw(dev),
+@@ -89,14 +133,9 @@ void mt7603_pre_tbtt_tasklet(struct tasklet_struct *t)
+ 	mt76_queue_kick(dev, q);
+ 	spin_unlock(&q->lock);
+ 
+-	/* Flush all previous CAB queue packets */
+-	mt76_wr(dev, MT_WF_ARB_CAB_FLUSH, GENMASK(30, 16) | BIT(0));
+-
+-	mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_CAB], false);
+-
+ 	mt76_csa_check(mdev);
+ 	if (mdev->csa_complete)
+-		goto out;
++		return;
+ 
+ 	q = dev->mphy.q_tx[MT_TXQ_CAB];
+ 	do {
+@@ -108,7 +147,7 @@ void mt7603_pre_tbtt_tasklet(struct tasklet_struct *t)
+ 		 skb_queue_len(&data.q) < 8);
+ 
+ 	if (skb_queue_empty(&data.q))
+-		goto out;
++		return;
+ 
+ 	for (i = 0; i < ARRAY_SIZE(data.tail); i++) {
+ 		if (!data.tail[i])
+@@ -136,11 +175,6 @@ void mt7603_pre_tbtt_tasklet(struct tasklet_struct *t)
+ 		MT_WF_ARB_CAB_START_BSSn(0) |
+ 		(MT_WF_ARB_CAB_START_BSS0n(1) *
+ 		 ((1 << (MT7603_MAX_INTERFACES - 1)) - 1)));
+-
+-out:
+-	mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_BEACON], false);
+-	if (dev->mphy.q_tx[MT_TXQ_BEACON]->queued > hweight8(mdev->beacon_mask))
+-		dev->beacon_check++;
+ }
+ 
+ void mt7603_beacon_set_timer(struct mt7603_dev *dev, int idx, int intval)
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/core.c b/drivers/net/wireless/mediatek/mt76/mt7603/core.c
+index 60a996b63c0c0..915b8349146af 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7603/core.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7603/core.c
+@@ -42,11 +42,13 @@ irqreturn_t mt7603_irq_handler(int irq, void *dev_instance)
+ 	}
+ 
+ 	if (intr & MT_INT_RX_DONE(0)) {
++		dev->rx_pse_check = 0;
+ 		mt7603_irq_disable(dev, MT_INT_RX_DONE(0));
+ 		napi_schedule(&dev->mt76.napi[0]);
+ 	}
+ 
+ 	if (intr & MT_INT_RX_DONE(1)) {
++		dev->rx_pse_check = 0;
+ 		mt7603_irq_disable(dev, MT_INT_RX_DONE(1));
+ 		napi_schedule(&dev->mt76.napi[1]);
+ 	}
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/mac.c b/drivers/net/wireless/mediatek/mt76/mt7603/mac.c
+index 12e0af52082a6..47aefb0efd8ee 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7603/mac.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7603/mac.c
+@@ -1430,15 +1430,6 @@ static void mt7603_mac_watchdog_reset(struct mt7603_dev *dev)
+ 
+ 	mt7603_beacon_set_timer(dev, -1, 0);
+ 
+-	if (dev->reset_cause[RESET_CAUSE_RESET_FAILED] ||
+-	    dev->cur_reset_cause == RESET_CAUSE_RX_PSE_BUSY ||
+-	    dev->cur_reset_cause == RESET_CAUSE_BEACON_STUCK ||
+-	    dev->cur_reset_cause == RESET_CAUSE_TX_HANG)
+-		mt7603_pse_reset(dev);
+-
+-	if (dev->reset_cause[RESET_CAUSE_RESET_FAILED])
+-		goto skip_dma_reset;
+-
+ 	mt7603_mac_stop(dev);
+ 
+ 	mt76_clear(dev, MT_WPDMA_GLO_CFG,
+@@ -1448,28 +1439,32 @@ static void mt7603_mac_watchdog_reset(struct mt7603_dev *dev)
+ 
+ 	mt7603_irq_disable(dev, mask);
+ 
+-	mt76_set(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_FORCE_TX_EOF);
+-
+ 	mt7603_pse_client_reset(dev);
+ 
+ 	mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_WM], true);
+ 	for (i = 0; i < __MT_TXQ_MAX; i++)
+ 		mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[i], true);
+ 
++	mt7603_dma_sched_reset(dev);
++
++	mt76_tx_status_check(&dev->mt76, true);
++
+ 	mt76_for_each_q_rx(&dev->mt76, i) {
+ 		mt76_queue_rx_reset(dev, i);
+ 	}
+ 
+-	mt76_tx_status_check(&dev->mt76, true);
++	if (dev->reset_cause[RESET_CAUSE_RESET_FAILED] ||
++	    dev->cur_reset_cause == RESET_CAUSE_RX_PSE_BUSY)
++		mt7603_pse_reset(dev);
+ 
+-	mt7603_dma_sched_reset(dev);
++	if (!dev->reset_cause[RESET_CAUSE_RESET_FAILED]) {
++		mt7603_mac_dma_start(dev);
+ 
+-	mt7603_mac_dma_start(dev);
++		mt7603_irq_enable(dev, mask);
+ 
+-	mt7603_irq_enable(dev, mask);
++		clear_bit(MT76_RESET, &dev->mphy.state);
++	}
+ 
+-skip_dma_reset:
+-	clear_bit(MT76_RESET, &dev->mphy.state);
+ 	mutex_unlock(&dev->mt76.mutex);
+ 
+ 	mt76_worker_enable(&dev->mt76.tx_worker);
+@@ -1559,20 +1554,29 @@ static bool mt7603_rx_pse_busy(struct mt7603_dev *dev)
+ {
+ 	u32 addr, val;
+ 
+-	if (mt76_rr(dev, MT_MCU_DEBUG_RESET) & MT_MCU_DEBUG_RESET_QUEUES)
+-		return true;
+-
+ 	if (mt7603_rx_fifo_busy(dev))
+-		return false;
++		goto out;
+ 
+ 	addr = mt7603_reg_map(dev, MT_CLIENT_BASE_PHYS_ADDR + MT_CLIENT_STATUS);
+ 	mt76_wr(dev, addr, 3);
+ 	val = mt76_rr(dev, addr) >> 16;
+ 
+-	if (is_mt7628(dev) && (val & 0x4001) == 0x4001)
+-		return true;
++	if (!(val & BIT(0)))
++		return false;
+ 
+-	return (val & 0x8001) == 0x8001 || (val & 0xe001) == 0xe001;
++	if (is_mt7628(dev))
++		val &= 0xa000;
++	else
++		val &= 0x8000;
++	if (!val)
++		return false;
++
++out:
++	if (mt76_rr(dev, MT_INT_SOURCE_CSR) &
++	    (MT_INT_RX_DONE(0) | MT_INT_RX_DONE(1)))
++		return false;
++
++	return true;
+ }
+ 
+ static bool
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/regs.h b/drivers/net/wireless/mediatek/mt76/mt7603/regs.h
+index 3b901090b29c6..9b84db233aceb 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7603/regs.h
++++ b/drivers/net/wireless/mediatek/mt76/mt7603/regs.h
+@@ -462,6 +462,11 @@ enum {
+ #define MT_WF_SEC_BASE			0x21a00
+ #define MT_WF_SEC(ofs)			(MT_WF_SEC_BASE + (ofs))
+ 
++#define MT_WF_CFG_OFF_BASE		0x21e00
++#define MT_WF_CFG_OFF(ofs)		(MT_WF_CFG_OFF_BASE + (ofs))
++#define MT_WF_CFG_OFF_WOCCR		MT_WF_CFG_OFF(0x004)
++#define MT_WF_CFG_OFF_WOCCR_TMAC_GC_DIS	BIT(4)
++
+ #define MT_SEC_SCR			MT_WF_SEC(0x004)
+ #define MT_SEC_SCR_MASK_ORDER		GENMASK(1, 0)
+ 
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
+index 8d745c9730c72..955974a82180f 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
+@@ -2147,7 +2147,7 @@ int mt7615_mcu_set_chan_info(struct mt7615_phy *phy, int cmd)
+ 	};
+ 
+ 	if (cmd == MCU_EXT_CMD(SET_RX_PATH) ||
+-	    dev->mt76.hw->conf.flags & IEEE80211_CONF_MONITOR)
++	    phy->mt76->hw->conf.flags & IEEE80211_CONF_MONITOR)
+ 		req.switch_reason = CH_SWITCH_NORMAL;
+ 	else if (phy->mt76->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)
+ 		req.switch_reason = CH_SWITCH_SCAN_BYPASS_DPD;
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c b/drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c
+index 0019890fdb784..fbb1181c58ff3 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c
+@@ -106,7 +106,7 @@ int mt7615_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
+ 	else
+ 		mt76_connac_write_hw_txp(mdev, tx_info, txp, id);
+ 
+-	tx_info->skb = DMA_DUMMY_DATA;
++	tx_info->skb = NULL;
+ 
+ 	return 0;
+ }
+diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.h b/drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.h
+new file mode 100644
+index 0000000000000..a6d8e59a2b60e
+--- /dev/null
++++ b/drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.h
+@@ -0,0 +1,327 @@
++/* SPDX-License-Identifier: ISC */
++/* Copyright (C) 2023 MediaTek Inc. */
++
++#ifndef __MT76_CONNAC3_MAC_H
++#define __MT76_CONNAC3_MAC_H
++
++#define MT_CT_PARSE_LEN			72
++#define MT_CT_DMA_BUF_NUM		2
++
++#define MT_RXD0_LENGTH			GENMASK(15, 0)
++#define MT_RXD0_PKT_FLAG                GENMASK(19, 16)
++#define MT_RXD0_PKT_TYPE		GENMASK(31, 27)
++
++#define MT_RXD0_MESH			BIT(18)
++#define MT_RXD0_MHCP			BIT(19)
++#define MT_RXD0_NORMAL_ETH_TYPE_OFS	GENMASK(22, 16)
++#define MT_RXD0_NORMAL_IP_SUM		BIT(23)
++#define MT_RXD0_NORMAL_UDP_TCP_SUM	BIT(24)
++
++#define MT_RXD0_SW_PKT_TYPE_MASK	GENMASK(31, 16)
++#define MT_RXD0_SW_PKT_TYPE_MAP		0x380F
++#define MT_RXD0_SW_PKT_TYPE_FRAME	0x3801
++
++/* RXD DW1 */
++#define MT_RXD1_NORMAL_WLAN_IDX		GENMASK(11, 0)
++#define MT_RXD1_NORMAL_GROUP_1		BIT(16)
++#define MT_RXD1_NORMAL_GROUP_2		BIT(17)
++#define MT_RXD1_NORMAL_GROUP_3		BIT(18)
++#define MT_RXD1_NORMAL_GROUP_4		BIT(19)
++#define MT_RXD1_NORMAL_GROUP_5		BIT(20)
++#define MT_RXD1_NORMAL_KEY_ID		GENMASK(22, 21)
++#define MT_RXD1_NORMAL_CM		BIT(23)
++#define MT_RXD1_NORMAL_CLM		BIT(24)
++#define MT_RXD1_NORMAL_ICV_ERR		BIT(25)
++#define MT_RXD1_NORMAL_TKIP_MIC_ERR	BIT(26)
++#define MT_RXD1_NORMAL_BAND_IDX		GENMASK(28, 27)
++#define MT_RXD1_NORMAL_SPP_EN		BIT(29)
++#define MT_RXD1_NORMAL_ADD_OM		BIT(30)
++#define MT_RXD1_NORMAL_SEC_DONE		BIT(31)
++
++/* RXD DW2 */
++#define MT_RXD2_NORMAL_BSSID		GENMASK(5, 0)
++#define MT_RXD2_NORMAL_MAC_HDR_LEN	GENMASK(12, 8)
++#define MT_RXD2_NORMAL_HDR_TRANS	BIT(7)
++#define MT_RXD2_NORMAL_HDR_OFFSET	GENMASK(15, 13)
++#define MT_RXD2_NORMAL_SEC_MODE		GENMASK(20, 16)
++#define MT_RXD2_NORMAL_MU_BAR		BIT(21)
++#define MT_RXD2_NORMAL_SW_BIT		BIT(22)
++#define MT_RXD2_NORMAL_AMSDU_ERR	BIT(23)
++#define MT_RXD2_NORMAL_MAX_LEN_ERROR	BIT(24)
++#define MT_RXD2_NORMAL_HDR_TRANS_ERROR	BIT(25)
++#define MT_RXD2_NORMAL_INT_FRAME	BIT(26)
++#define MT_RXD2_NORMAL_FRAG		BIT(27)
++#define MT_RXD2_NORMAL_NULL_FRAME	BIT(28)
++#define MT_RXD2_NORMAL_NDATA		BIT(29)
++#define MT_RXD2_NORMAL_NON_AMPDU	BIT(30)
++#define MT_RXD2_NORMAL_BF_REPORT	BIT(31)
++
++/* RXD DW3 */
++#define MT_RXD3_NORMAL_RXV_SEQ		GENMASK(7, 0)
++#define MT_RXD3_NORMAL_CH_FREQ		GENMASK(15, 8)
++#define MT_RXD3_NORMAL_ADDR_TYPE	GENMASK(17, 16)
++#define MT_RXD3_NORMAL_U2M		BIT(0)
++#define MT_RXD3_NORMAL_HTC_VLD		BIT(18)
++#define MT_RXD3_NORMAL_BEACON_MC	BIT(20)
++#define MT_RXD3_NORMAL_BEACON_UC	BIT(21)
++#define MT_RXD3_NORMAL_CO_ANT		BIT(22)
++#define MT_RXD3_NORMAL_FCS_ERR		BIT(24)
++#define MT_RXD3_NORMAL_VLAN2ETH		BIT(31)
++
++/* RXD DW4 */
++#define MT_RXD4_NORMAL_PAYLOAD_FORMAT	GENMASK(1, 0)
++#define MT_RXD4_FIRST_AMSDU_FRAME	GENMASK(1, 0)
++#define MT_RXD4_MID_AMSDU_FRAME		BIT(1)
++#define MT_RXD4_LAST_AMSDU_FRAME	BIT(0)
++
++#define MT_RXV_HDR_BAND_IDX		BIT(24)
++
++/* RXD GROUP4 */
++#define MT_RXD8_FRAME_CONTROL		GENMASK(15, 0)
++
++#define MT_RXD10_SEQ_CTRL		GENMASK(15, 0)
++#define MT_RXD10_QOS_CTL		GENMASK(31, 16)
++
++#define MT_RXD11_HT_CONTROL		GENMASK(31, 0)
++
++/* P-RXV */
++#define MT_PRXV_TX_RATE			GENMASK(6, 0)
++#define MT_PRXV_TX_DCM			BIT(4)
++#define MT_PRXV_TX_ER_SU_106T		BIT(5)
++#define MT_PRXV_NSTS			GENMASK(10, 7)
++#define MT_PRXV_TXBF			BIT(11)
++#define MT_PRXV_HT_AD_CODE		BIT(12)
++#define MT_PRXV_HE_RU_ALLOC		GENMASK(30, 22)
++#define MT_PRXV_RCPI3			GENMASK(31, 24)
++#define MT_PRXV_RCPI2			GENMASK(23, 16)
++#define MT_PRXV_RCPI1			GENMASK(15, 8)
++#define MT_PRXV_RCPI0			GENMASK(7, 0)
++#define MT_PRXV_HT_SHORT_GI		GENMASK(4, 3)
++#define MT_PRXV_HT_STBC			GENMASK(10, 9)
++#define MT_PRXV_TX_MODE			GENMASK(14, 11)
++#define MT_PRXV_FRAME_MODE		GENMASK(2, 0)
++#define MT_PRXV_DCM			BIT(5)
++
++/* C-RXV */
++#define MT_CRXV_HE_NUM_USER		GENMASK(26, 20)
++#define MT_CRXV_HE_LTF_SIZE		GENMASK(28, 27)
++#define MT_CRXV_HE_LDPC_EXT_SYM		BIT(30)
++
++#define MT_CRXV_HE_PE_DISAMBIG		BIT(1)
++#define MT_CRXV_HE_UPLINK		BIT(2)
++
++#define MT_CRXV_HE_MU_AID		GENMASK(27, 17)
++#define MT_CRXV_HE_BEAM_CHNG		BIT(29)
++
++#define MT_CRXV_HE_DOPPLER		BIT(0)
++#define MT_CRXV_HE_BSS_COLOR		GENMASK(15, 10)
++#define MT_CRXV_HE_TXOP_DUR		GENMASK(19, 17)
++
++#define MT_CRXV_HE_SR_MASK		GENMASK(11, 8)
++#define MT_CRXV_HE_SR1_MASK		GENMASK(16, 12)
++#define MT_CRXV_HE_SR2_MASK             GENMASK(20, 17)
++#define MT_CRXV_HE_SR3_MASK             GENMASK(24, 21)
++
++#define MT_CRXV_HE_RU0			GENMASK(8, 0)
++#define MT_CRXV_HE_RU1			GENMASK(17, 9)
++#define MT_CRXV_HE_RU2			GENMASK(26, 18)
++#define MT_CRXV_HE_RU3_L		GENMASK(31, 27)
++#define MT_CRXV_HE_RU3_H		GENMASK(3, 0)
++
++enum tx_header_format {
++	MT_HDR_FORMAT_802_3,
++	MT_HDR_FORMAT_CMD,
++	MT_HDR_FORMAT_802_11,
++	MT_HDR_FORMAT_802_11_EXT,
++};
++
++enum tx_pkt_type {
++	MT_TX_TYPE_CT,
++	MT_TX_TYPE_SF,
++	MT_TX_TYPE_CMD,
++	MT_TX_TYPE_FW,
++};
++
++enum tx_port_idx {
++	MT_TX_PORT_IDX_LMAC,
++	MT_TX_PORT_IDX_MCU
++};
++
++enum tx_mcu_port_q_idx {
++	MT_TX_MCU_PORT_RX_Q0 = 0x20,
++	MT_TX_MCU_PORT_RX_Q1,
++	MT_TX_MCU_PORT_RX_Q2,
++	MT_TX_MCU_PORT_RX_Q3,
++	MT_TX_MCU_PORT_RX_FWDL = 0x3e
++};
++
++enum tx_mgnt_type {
++	MT_TX_NORMAL,
++	MT_TX_TIMING,
++	MT_TX_ADDBA,
++};
++
++#define MT_CT_INFO_APPLY_TXD		BIT(0)
++#define MT_CT_INFO_COPY_HOST_TXD_ALL	BIT(1)
++#define MT_CT_INFO_MGMT_FRAME		BIT(2)
++#define MT_CT_INFO_NONE_CIPHER_FRAME	BIT(3)
++#define MT_CT_INFO_HSR2_TX		BIT(4)
++#define MT_CT_INFO_FROM_HOST		BIT(7)
++
++#define MT_TXD_SIZE			(8 * 4)
++
++#define MT_TXD0_Q_IDX			GENMASK(31, 25)
++#define MT_TXD0_PKT_FMT			GENMASK(24, 23)
++#define MT_TXD0_ETH_TYPE_OFFSET		GENMASK(22, 16)
++#define MT_TXD0_TX_BYTES		GENMASK(15, 0)
++
++#define MT_TXD1_FIXED_RATE		BIT(31)
++#define MT_TXD1_OWN_MAC			GENMASK(30, 25)
++#define MT_TXD1_TID			GENMASK(24, 21)
++#define MT_TXD1_BIP			BIT(24)
++#define MT_TXD1_ETH_802_3		BIT(20)
++#define MT_TXD1_HDR_INFO		GENMASK(20, 16)
++#define MT_TXD1_HDR_FORMAT		GENMASK(15, 14)
++#define MT_TXD1_TGID			GENMASK(13, 12)
++#define MT_TXD1_WLAN_IDX		GENMASK(11, 0)
++
++#define MT_TXD2_POWER_OFFSET		GENMASK(31, 26)
++#define MT_TXD2_MAX_TX_TIME		GENMASK(25, 16)
++#define MT_TXD2_FRAG			GENMASK(15, 14)
++#define MT_TXD2_HTC_VLD			BIT(13)
++#define MT_TXD2_DURATION		BIT(12)
++#define MT_TXD2_HDR_PAD			GENMASK(11, 10)
++#define MT_TXD2_RTS			BIT(9)
++#define MT_TXD2_OWN_MAC_MAP		BIT(8)
++#define MT_TXD2_BF_TYPE			GENMASK(6, 7)
++#define MT_TXD2_FRAME_TYPE		GENMASK(5, 4)
++#define MT_TXD2_SUB_TYPE		GENMASK(3, 0)
++
++#define MT_TXD3_SN_VALID		BIT(31)
++#define MT_TXD3_PN_VALID		BIT(30)
++#define MT_TXD3_SW_POWER_MGMT		BIT(29)
++#define MT_TXD3_BA_DISABLE		BIT(28)
++#define MT_TXD3_SEQ			GENMASK(27, 16)
++#define MT_TXD3_REM_TX_COUNT		GENMASK(15, 11)
++#define MT_TXD3_TX_COUNT		GENMASK(10, 6)
++#define MT_TXD3_HW_AMSDU		BIT(5)
++#define MT_TXD3_BCM			BIT(4)
++#define MT_TXD3_EEOSP			BIT(3)
++#define MT_TXD3_EMRD			BIT(2)
++#define MT_TXD3_PROTECT_FRAME		BIT(1)
++#define MT_TXD3_NO_ACK			BIT(0)
++
++#define MT_TXD4_PN_LOW			GENMASK(31, 0)
++
++#define MT_TXD5_PN_HIGH			GENMASK(31, 16)
++#define MT_TXD5_FL			BIT(15)
++#define MT_TXD5_BYPASS_TBB		BIT(14)
++#define MT_TXD5_BYPASS_RBB		BIT(13)
++#define MT_TXD5_BSS_COLOR_ZERO		BIT(12)
++#define MT_TXD5_TX_STATUS_HOST		BIT(10)
++#define MT_TXD5_TX_STATUS_MCU		BIT(9)
++#define MT_TXD5_TX_STATUS_FMT		BIT(8)
++#define MT_TXD5_PID			GENMASK(7, 0)
++
++#define MT_TXD6_TX_SRC			GENMASK(31, 30)
++#define MT_TXD6_VTA			BIT(28)
++#define MT_TXD6_BW			GENMASK(25, 22)
++#define MT_TXD6_TX_RATE			GENMASK(21, 16)
++#define MT_TXD6_TIMESTAMP_OFS_EN	BIT(15)
++#define MT_TXD6_TIMESTAMP_OFS_IDX	GENMASK(14, 10)
++#define MT_TXD6_MSDU_CNT		GENMASK(9, 4)
++#define MT_TXD6_DIS_MAT			BIT(3)
++#define MT_TXD6_DAS			BIT(2)
++#define MT_TXD6_AMSDU_CAP		BIT(1)
++
++#define MT_TXD7_TXD_LEN			GENMASK(31, 30)
++#define MT_TXD7_IP_SUM			BIT(29)
++#define MT_TXD7_DROP_BY_SDO		BIT(28)
++#define MT_TXD7_MAC_TXD			BIT(27)
++#define MT_TXD7_CTXD			BIT(26)
++#define MT_TXD7_CTXD_CNT		GENMASK(25, 22)
++#define MT_TXD7_UDP_TCP_SUM		BIT(15)
++#define MT_TXD7_TX_TIME			GENMASK(9, 0)
++
++#define MT_TXD9_WLAN_IDX		GENMASK(23, 8)
++
++#define MT_TX_RATE_STBC			BIT(14)
++#define MT_TX_RATE_NSS			GENMASK(13, 10)
++#define MT_TX_RATE_MODE			GENMASK(9, 6)
++#define MT_TX_RATE_SU_EXT_TONE		BIT(5)
++#define MT_TX_RATE_DCM			BIT(4)
++/* VHT/HE only use bits 0-3 */
++#define MT_TX_RATE_IDX			GENMASK(5, 0)
++
++#define MT_TXFREE0_PKT_TYPE		GENMASK(31, 27)
++#define MT_TXFREE0_MSDU_CNT		GENMASK(25, 16)
++#define MT_TXFREE0_RX_BYTE		GENMASK(15, 0)
++
++#define MT_TXFREE1_VER			GENMASK(18, 16)
++
++#define MT_TXFREE_INFO_PAIR		BIT(31)
++#define MT_TXFREE_INFO_HEADER		BIT(30)
++#define MT_TXFREE_INFO_WLAN_ID		GENMASK(23, 12)
++#define MT_TXFREE_INFO_MSDU_ID		GENMASK(14, 0)
++#define MT_TXFREE_INFO_COUNT		GENMASK(27, 24)
++#define MT_TXFREE_INFO_STAT		GENMASK(29, 28)
++
++#define MT_TXS0_BW			GENMASK(31, 29)
++#define MT_TXS0_TID			GENMASK(28, 26)
++#define MT_TXS0_AMPDU			BIT(25)
++#define MT_TXS0_TXS_FORMAT		GENMASK(24, 23)
++#define MT_TXS0_BA_ERROR		BIT(22)
++#define MT_TXS0_PS_FLAG			BIT(21)
++#define MT_TXS0_TXOP_TIMEOUT		BIT(20)
++#define MT_TXS0_BIP_ERROR		BIT(19)
++
++#define MT_TXS0_QUEUE_TIMEOUT		BIT(18)
++#define MT_TXS0_RTS_TIMEOUT		BIT(17)
++#define MT_TXS0_ACK_TIMEOUT		BIT(16)
++#define MT_TXS0_ACK_ERROR_MASK		GENMASK(18, 16)
++
++#define MT_TXS0_TX_STATUS_HOST		BIT(15)
++#define MT_TXS0_TX_STATUS_MCU		BIT(14)
++#define MT_TXS0_TX_RATE			GENMASK(13, 0)
++
++#define MT_TXS1_SEQNO			GENMASK(31, 20)
++#define MT_TXS1_RESP_RATE		GENMASK(19, 16)
++#define MT_TXS1_RXV_SEQNO		GENMASK(15, 8)
++#define MT_TXS1_TX_POWER_DBM		GENMASK(7, 0)
++
++#define MT_TXS2_BF_STATUS		GENMASK(31, 30)
++#define MT_TXS2_BAND			GENMASK(29, 28)
++#define MT_TXS2_WCID			GENMASK(27, 16)
++#define MT_TXS2_TX_DELAY		GENMASK(15, 0)
++
++#define MT_TXS3_PID			GENMASK(31, 24)
++#define MT_TXS3_RATE_STBC		BIT(7)
++#define MT_TXS3_FIXED_RATE		BIT(6)
++#define MT_TXS3_SRC			GENMASK(5, 4)
++#define MT_TXS3_SHARED_ANTENNA		BIT(3)
++#define MT_TXS3_LAST_TX_RATE		GENMASK(2, 0)
++
++#define MT_TXS4_TIMESTAMP		GENMASK(31, 0)
++
++#define MT_TXS5_F0_FINAL_MPDU		BIT(31)
++#define MT_TXS5_F0_QOS			BIT(30)
++#define MT_TXS5_F0_TX_COUNT		GENMASK(29, 25)
++#define MT_TXS5_F0_FRONT_TIME		GENMASK(24, 0)
++#define MT_TXS5_F1_MPDU_TX_COUNT	GENMASK(31, 24)
++#define MT_TXS5_F1_MPDU_TX_BYTES	GENMASK(23, 0)
++
++#define MT_TXS6_F0_NOISE_3		GENMASK(31, 24)
++#define MT_TXS6_F0_NOISE_2		GENMASK(23, 16)
++#define MT_TXS6_F0_NOISE_1		GENMASK(15, 8)
++#define MT_TXS6_F0_NOISE_0		GENMASK(7, 0)
++#define MT_TXS6_F1_MPDU_FAIL_COUNT	GENMASK(31, 24)
++#define MT_TXS6_F1_MPDU_FAIL_BYTES	GENMASK(23, 0)
++
++#define MT_TXS7_F0_RCPI_3		GENMASK(31, 24)
++#define MT_TXS7_F0_RCPI_2		GENMASK(23, 16)
++#define MT_TXS7_F0_RCPI_1		GENMASK(15, 8)
++#define MT_TXS7_F0_RCPI_0		GENMASK(7, 0)
++#define MT_TXS7_F1_MPDU_RETRY_COUNT	GENMASK(31, 24)
++#define MT_TXS7_F1_MPDU_RETRY_BYTES	GENMASK(23, 0)
++
++#endif /* __MT76_CONNAC3_MAC_H */
+diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c b/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c
+index e415ac5e321f1..a800c071537f8 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c
++++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c
+@@ -151,23 +151,6 @@ void mt76_connac_tx_complete_skb(struct mt76_dev *mdev,
+ 		return;
+ 	}
+ 
+-	/* error path */
+-	if (e->skb == DMA_DUMMY_DATA) {
+-		struct mt76_connac_txp_common *txp;
+-		struct mt76_txwi_cache *t;
+-		u16 token;
+-
+-		txp = mt76_connac_txwi_to_txp(mdev, e->txwi);
+-		if (is_mt76_fw_txp(mdev))
+-			token = le16_to_cpu(txp->fw.token);
+-		else
+-			token = le16_to_cpu(txp->hw.msdu_id[0]) &
+-				~MT_MSDU_ID_VALID;
+-
+-		t = mt76_token_put(mdev, token);
+-		e->skb = t ? t->skb : NULL;
+-	}
+-
+ 	if (e->skb)
+ 		mt76_tx_complete_skb(mdev, e->wcid, e->skb);
+ }
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
+index 7df8d95fc3fbc..13071df3f6c21 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
+@@ -808,7 +808,7 @@ int mt7915_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
+ 		txp->rept_wds_wcid = cpu_to_le16(wcid->idx);
+ 	else
+ 		txp->rept_wds_wcid = cpu_to_le16(0x3ff);
+-	tx_info->skb = DMA_DUMMY_DATA;
++	tx_info->skb = NULL;
+ 
+ 	/* pass partial skb header to fw */
+ 	tx_info->buf[1].len = MT_CT_PARSE_LEN;
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/main.c b/drivers/net/wireless/mediatek/mt76/mt7915/main.c
+index 42a983e40ade9..b39ccfc641579 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7915/main.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7915/main.c
+@@ -646,11 +646,13 @@ static void mt7915_bss_info_changed(struct ieee80211_hw *hw,
+ 		mt7915_update_bss_color(hw, vif, &info->he_bss_color);
+ 
+ 	if (changed & (BSS_CHANGED_BEACON |
+-		       BSS_CHANGED_BEACON_ENABLED |
+-		       BSS_CHANGED_UNSOL_BCAST_PROBE_RESP |
+-		       BSS_CHANGED_FILS_DISCOVERY))
++		       BSS_CHANGED_BEACON_ENABLED))
+ 		mt7915_mcu_add_beacon(hw, vif, info->enable_beacon, changed);
+ 
++	if (changed & (BSS_CHANGED_UNSOL_BCAST_PROBE_RESP |
++		       BSS_CHANGED_FILS_DISCOVERY))
++		mt7915_mcu_add_inband_discov(dev, vif, changed);
++
+ 	if (set_bss_info == 0)
+ 		mt7915_mcu_add_bss_info(phy, vif, false);
+ 	if (set_sta == 0)
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
+index 1a8611c6b684d..9a8b8356254b5 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
+@@ -1012,13 +1012,13 @@ mt7915_is_ebf_supported(struct mt7915_phy *phy, struct ieee80211_vif *vif,
+ 			struct ieee80211_sta *sta, bool bfee)
+ {
+ 	struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
+-	int tx_ant = hweight8(phy->mt76->chainmask) - 1;
++	int sts = hweight16(phy->mt76->chainmask);
+ 
+ 	if (vif->type != NL80211_IFTYPE_STATION &&
+ 	    vif->type != NL80211_IFTYPE_AP)
+ 		return false;
+ 
+-	if (!bfee && tx_ant < 2)
++	if (!bfee && sts < 2)
+ 		return false;
+ 
+ 	if (sta->deflink.he_cap.has_he) {
+@@ -1879,10 +1879,9 @@ mt7915_mcu_beacon_cont(struct mt7915_dev *dev, struct ieee80211_vif *vif,
+ 	memcpy(buf + MT_TXD_SIZE, skb->data, skb->len);
+ }
+ 
+-static void
+-mt7915_mcu_beacon_inband_discov(struct mt7915_dev *dev, struct ieee80211_vif *vif,
+-				struct sk_buff *rskb, struct bss_info_bcn *bcn,
+-				u32 changed)
++int
++mt7915_mcu_add_inband_discov(struct mt7915_dev *dev, struct ieee80211_vif *vif,
++			     u32 changed)
+ {
+ #define OFFLOAD_TX_MODE_SU	BIT(0)
+ #define OFFLOAD_TX_MODE_MU	BIT(1)
+@@ -1892,14 +1891,27 @@ mt7915_mcu_beacon_inband_discov(struct mt7915_dev *dev, struct ieee80211_vif *vi
+ 	struct cfg80211_chan_def *chandef = &mvif->phy->mt76->chandef;
+ 	enum nl80211_band band = chandef->chan->band;
+ 	struct mt76_wcid *wcid = &dev->mt76.global_wcid;
++	struct bss_info_bcn *bcn;
+ 	struct bss_info_inband_discovery *discov;
+ 	struct ieee80211_tx_info *info;
+-	struct sk_buff *skb = NULL;
+-	struct tlv *tlv;
++	struct sk_buff *rskb, *skb = NULL;
++	struct tlv *tlv, *sub_tlv;
+ 	bool ext_phy = phy != &dev->phy;
+ 	u8 *buf, interval;
+ 	int len;
+ 
++	if (vif->bss_conf.nontransmitted)
++		return 0;
++
++	rskb = __mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mvif->mt76, NULL,
++					       MT7915_MAX_BSS_OFFLOAD_SIZE);
++	if (IS_ERR(rskb))
++		return PTR_ERR(rskb);
++
++	tlv = mt76_connac_mcu_add_tlv(rskb, BSS_INFO_OFFLOAD, sizeof(*bcn));
++	bcn = (struct bss_info_bcn *)tlv;
++	bcn->enable = true;
++
+ 	if (changed & BSS_CHANGED_FILS_DISCOVERY &&
+ 	    vif->bss_conf.fils_discovery.max_interval) {
+ 		interval = vif->bss_conf.fils_discovery.max_interval;
+@@ -1910,27 +1922,29 @@ mt7915_mcu_beacon_inband_discov(struct mt7915_dev *dev, struct ieee80211_vif *vi
+ 		skb = ieee80211_get_unsol_bcast_probe_resp_tmpl(hw, vif);
+ 	}
+ 
+-	if (!skb)
+-		return;
++	if (!skb) {
++		dev_kfree_skb(rskb);
++		return -EINVAL;
++	}
+ 
+ 	info = IEEE80211_SKB_CB(skb);
+ 	info->control.vif = vif;
+ 	info->band = band;
+-
+ 	info->hw_queue |= FIELD_PREP(MT_TX_HW_QUEUE_PHY, ext_phy);
+ 
+ 	len = sizeof(*discov) + MT_TXD_SIZE + skb->len;
+ 	len = (len & 0x3) ? ((len | 0x3) + 1) : len;
+ 
+-	if (len > (MT7915_MAX_BSS_OFFLOAD_SIZE - rskb->len)) {
++	if (skb->len > MT7915_MAX_BEACON_SIZE) {
+ 		dev_err(dev->mt76.dev, "inband discovery size limit exceed\n");
++		dev_kfree_skb(rskb);
+ 		dev_kfree_skb(skb);
+-		return;
++		return -EINVAL;
+ 	}
+ 
+-	tlv = mt7915_mcu_add_nested_subtlv(rskb, BSS_INFO_BCN_DISCOV,
+-					   len, &bcn->sub_ntlv, &bcn->len);
+-	discov = (struct bss_info_inband_discovery *)tlv;
++	sub_tlv = mt7915_mcu_add_nested_subtlv(rskb, BSS_INFO_BCN_DISCOV,
++					       len, &bcn->sub_ntlv, &bcn->len);
++	discov = (struct bss_info_inband_discovery *)sub_tlv;
+ 	discov->tx_mode = OFFLOAD_TX_MODE_SU;
+ 	/* 0: UNSOL PROBE RESP, 1: FILS DISCOV */
+ 	discov->tx_type = !!(changed & BSS_CHANGED_FILS_DISCOVERY);
+@@ -1938,13 +1952,16 @@ mt7915_mcu_beacon_inband_discov(struct mt7915_dev *dev, struct ieee80211_vif *vi
+ 	discov->prob_rsp_len = cpu_to_le16(MT_TXD_SIZE + skb->len);
+ 	discov->enable = true;
+ 
+-	buf = (u8 *)tlv + sizeof(*discov);
++	buf = (u8 *)sub_tlv + sizeof(*discov);
+ 
+ 	mt7915_mac_write_txwi(&dev->mt76, (__le32 *)buf, skb, wcid, 0, NULL,
+ 			      0, changed);
+ 	memcpy(buf + MT_TXD_SIZE, skb->data, skb->len);
+ 
+ 	dev_kfree_skb(skb);
++
++	return mt76_mcu_skb_send_msg(&phy->dev->mt76, rskb,
++				     MCU_EXT_CMD(BSS_INFO_UPDATE), true);
+ }
+ 
+ int mt7915_mcu_add_beacon(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+@@ -1977,11 +1994,14 @@ int mt7915_mcu_add_beacon(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ 		goto out;
+ 
+ 	skb = ieee80211_beacon_get_template(hw, vif, &offs, 0);
+-	if (!skb)
++	if (!skb) {
++		dev_kfree_skb(rskb);
+ 		return -EINVAL;
++	}
+ 
+-	if (skb->len > MT7915_MAX_BEACON_SIZE - MT_TXD_SIZE) {
++	if (skb->len > MT7915_MAX_BEACON_SIZE) {
+ 		dev_err(dev->mt76.dev, "Bcn size limit exceed\n");
++		dev_kfree_skb(rskb);
+ 		dev_kfree_skb(skb);
+ 		return -EINVAL;
+ 	}
+@@ -1994,11 +2014,6 @@ int mt7915_mcu_add_beacon(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ 	mt7915_mcu_beacon_cont(dev, vif, rskb, skb, bcn, &offs);
+ 	dev_kfree_skb(skb);
+ 
+-	if (changed & BSS_CHANGED_UNSOL_BCAST_PROBE_RESP ||
+-	    changed & BSS_CHANGED_FILS_DISCOVERY)
+-		mt7915_mcu_beacon_inband_discov(dev, vif, rskb,
+-						bcn, changed);
+-
+ out:
+ 	return mt76_mcu_skb_send_msg(&phy->dev->mt76, rskb,
+ 				     MCU_EXT_CMD(BSS_INFO_UPDATE), true);
+@@ -2687,10 +2702,10 @@ int mt7915_mcu_set_chan_info(struct mt7915_phy *phy, int cmd)
+ 	if (mt76_connac_spe_idx(phy->mt76->antenna_mask))
+ 		req.tx_path_num = fls(phy->mt76->antenna_mask);
+ 
+-	if (cmd == MCU_EXT_CMD(SET_RX_PATH) ||
+-	    dev->mt76.hw->conf.flags & IEEE80211_CONF_MONITOR)
++	if (phy->mt76->hw->conf.flags & IEEE80211_CONF_MONITOR)
+ 		req.switch_reason = CH_SWITCH_NORMAL;
+-	else if (phy->mt76->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)
++	else if (phy->mt76->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL ||
++		 phy->mt76->hw->conf.flags & IEEE80211_CONF_IDLE)
+ 		req.switch_reason = CH_SWITCH_SCAN_BYPASS_DPD;
+ 	else if (!cfg80211_reg_can_beacon(phy->mt76->hw->wiphy, chandef,
+ 					  NL80211_IFTYPE_AP))
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h
+index b9ea297f382c3..1592b5d6751a0 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h
++++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h
+@@ -495,10 +495,14 @@ enum {
+ 	SER_RECOVER
+ };
+ 
+-#define MT7915_MAX_BEACON_SIZE		512
+-#define MT7915_MAX_INBAND_FRAME_SIZE	256
+-#define MT7915_MAX_BSS_OFFLOAD_SIZE	(MT7915_MAX_BEACON_SIZE +	  \
+-					 MT7915_MAX_INBAND_FRAME_SIZE +	  \
++#define MT7915_MAX_BEACON_SIZE		1308
++#define MT7915_BEACON_UPDATE_SIZE	(sizeof(struct sta_req_hdr) +	\
++					 sizeof(struct bss_info_bcn) +	\
++					 sizeof(struct bss_info_bcn_cntdwn) +	\
++					 sizeof(struct bss_info_bcn_mbss) +	\
++					 MT_TXD_SIZE +	\
++					 sizeof(struct bss_info_bcn_cont))
++#define MT7915_MAX_BSS_OFFLOAD_SIZE	(MT7915_MAX_BEACON_SIZE +	\
+ 					 MT7915_BEACON_UPDATE_SIZE)
+ 
+ #define MT7915_BSS_UPDATE_MAX_SIZE	(sizeof(struct sta_req_hdr) +	\
+@@ -511,12 +515,6 @@ enum {
+ 					 sizeof(struct bss_info_bmc_rate) +\
+ 					 sizeof(struct bss_info_ext_bss))
+ 
+-#define MT7915_BEACON_UPDATE_SIZE	(sizeof(struct sta_req_hdr) +	\
+-					 sizeof(struct bss_info_bcn_cntdwn) + \
+-					 sizeof(struct bss_info_bcn_mbss) + \
+-					 sizeof(struct bss_info_bcn_cont) + \
+-					 sizeof(struct bss_info_inband_discovery))
+-
+ static inline s8
+ mt7915_get_power_bound(struct mt7915_phy *phy, s8 txpower)
+ {
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
+index 0f76733c9c1ac..ed92605059b0c 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
++++ b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
+@@ -505,6 +505,8 @@ int mt7915_mcu_add_rx_ba(struct mt7915_dev *dev,
+ 			 bool add);
+ int mt7915_mcu_update_bss_color(struct mt7915_dev *dev, struct ieee80211_vif *vif,
+ 				struct cfg80211_he_bss_color *he_bss_color);
++int mt7915_mcu_add_inband_discov(struct mt7915_dev *dev, struct ieee80211_vif *vif,
++				 u32 changed);
+ int mt7915_mcu_add_beacon(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ 			  int enable, u32 changed);
+ int mt7915_mcu_add_obss_spr(struct mt7915_phy *phy, struct ieee80211_vif *vif,
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c b/drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c
+index 6053a2556c20c..46f1360fbc59a 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c
+@@ -48,7 +48,7 @@ int mt7921e_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
+ 	memset(txp, 0, sizeof(struct mt76_connac_hw_txp));
+ 	mt76_connac_write_hw_txp(mdev, tx_info, txp, id);
+ 
+-	tx_info->skb = DMA_DUMMY_DATA;
++	tx_info->skb = NULL;
+ 
+ 	return 0;
+ }
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/init.c b/drivers/net/wireless/mediatek/mt76/mt7996/init.c
+index f1b48cdda58f3..f7344d4c7383a 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7996/init.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7996/init.c
+@@ -729,16 +729,17 @@ mt7996_init_eht_caps(struct mt7996_phy *phy, enum nl80211_band band,
+ 		IEEE80211_EHT_PHY_CAP0_SU_BEAMFORMER |
+ 		IEEE80211_EHT_PHY_CAP0_SU_BEAMFORMEE;
+ 
++	val = max_t(u8, sts - 1, 3);
+ 	eht_cap_elem->phy_cap_info[0] |=
+-		u8_encode_bits(u8_get_bits(sts - 1, BIT(0)),
++		u8_encode_bits(u8_get_bits(val, BIT(0)),
+ 			       IEEE80211_EHT_PHY_CAP0_BEAMFORMEE_SS_80MHZ_MASK);
+ 
+ 	eht_cap_elem->phy_cap_info[1] =
+-		u8_encode_bits(u8_get_bits(sts - 1, GENMASK(2, 1)),
++		u8_encode_bits(u8_get_bits(val, GENMASK(2, 1)),
+ 			       IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_80MHZ_MASK) |
+-		u8_encode_bits(sts - 1,
++		u8_encode_bits(val,
+ 			       IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_160MHZ_MASK) |
+-		u8_encode_bits(sts - 1,
++		u8_encode_bits(val,
+ 			       IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_320MHZ_MASK);
+ 
+ 	eht_cap_elem->phy_cap_info[2] =
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mac.c b/drivers/net/wireless/mediatek/mt76/mt7996/mac.c
+index 25c5deb15d213..37104e84db886 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7996/mac.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7996/mac.c
+@@ -611,7 +611,9 @@ mt7996_mac_fill_rx_rate(struct mt7996_dev *dev,
+ 	case IEEE80211_STA_RX_BW_160:
+ 		status->bw = RATE_INFO_BW_160;
+ 		break;
++	/* rxv reports bw 320-1 and 320-2 separately */
+ 	case IEEE80211_STA_RX_BW_320:
++	case IEEE80211_STA_RX_BW_320 + 1:
+ 		status->bw = RATE_INFO_BW_320;
+ 		break;
+ 	default:
+@@ -1168,11 +1170,9 @@ int mt7996_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
+ 	}
+ 
+ 	txp->fw.token = cpu_to_le16(id);
+-	if (test_bit(MT_WCID_FLAG_4ADDR, &wcid->flags))
+-		txp->fw.rept_wds_wcid = cpu_to_le16(wcid->idx);
+-	else
+-		txp->fw.rept_wds_wcid = cpu_to_le16(0xfff);
+-	tx_info->skb = DMA_DUMMY_DATA;
++	txp->fw.rept_wds_wcid = cpu_to_le16(sta ? wcid->idx : 0xfff);
++
++	tx_info->skb = NULL;
+ 
+ 	/* pass partial skb header to fw */
+ 	tx_info->buf[1].len = MT_CT_PARSE_LEN;
+@@ -1228,7 +1228,7 @@ mt7996_txwi_free(struct mt7996_dev *dev, struct mt76_txwi_cache *t,
+ 		if (likely(t->skb->protocol != cpu_to_be16(ETH_P_PAE)))
+ 			mt7996_tx_check_aggr(sta, txwi);
+ 	} else {
+-		wcid_idx = le32_get_bits(txwi[1], MT_TXD1_WLAN_IDX);
++		wcid_idx = le32_get_bits(txwi[9], MT_TXD9_WLAN_IDX);
+ 	}
+ 
+ 	__mt76_tx_complete_skb(mdev, wcid_idx, t->skb, free_list);
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mac.h b/drivers/net/wireless/mediatek/mt76/mt7996/mac.h
+index bc4e6c55373eb..e629324a5617e 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7996/mac.h
++++ b/drivers/net/wireless/mediatek/mt76/mt7996/mac.h
+@@ -6,320 +6,7 @@
+ #ifndef __MT7996_MAC_H
+ #define __MT7996_MAC_H
+ 
+-#define MT_CT_PARSE_LEN			72
+-#define MT_CT_DMA_BUF_NUM		2
+-
+-#define MT_RXD0_LENGTH			GENMASK(15, 0)
+-#define MT_RXD0_PKT_TYPE		GENMASK(31, 27)
+-
+-#define MT_RXD0_MESH			BIT(18)
+-#define MT_RXD0_MHCP			BIT(19)
+-#define MT_RXD0_NORMAL_ETH_TYPE_OFS	GENMASK(22, 16)
+-#define MT_RXD0_NORMAL_IP_SUM		BIT(23)
+-#define MT_RXD0_NORMAL_UDP_TCP_SUM	BIT(24)
+-
+-#define MT_RXD0_SW_PKT_TYPE_MASK	GENMASK(31, 16)
+-#define MT_RXD0_SW_PKT_TYPE_MAP		0x380F
+-#define MT_RXD0_SW_PKT_TYPE_FRAME	0x3801
+-
+-/* RXD DW1 */
+-#define MT_RXD1_NORMAL_WLAN_IDX		GENMASK(11, 0)
+-#define MT_RXD1_NORMAL_GROUP_1		BIT(16)
+-#define MT_RXD1_NORMAL_GROUP_2		BIT(17)
+-#define MT_RXD1_NORMAL_GROUP_3		BIT(18)
+-#define MT_RXD1_NORMAL_GROUP_4		BIT(19)
+-#define MT_RXD1_NORMAL_GROUP_5		BIT(20)
+-#define MT_RXD1_NORMAL_KEY_ID		GENMASK(22, 21)
+-#define MT_RXD1_NORMAL_CM		BIT(23)
+-#define MT_RXD1_NORMAL_CLM		BIT(24)
+-#define MT_RXD1_NORMAL_ICV_ERR		BIT(25)
+-#define MT_RXD1_NORMAL_TKIP_MIC_ERR	BIT(26)
+-#define MT_RXD1_NORMAL_BAND_IDX		GENMASK(28, 27)
+-#define MT_RXD1_NORMAL_SPP_EN		BIT(29)
+-#define MT_RXD1_NORMAL_ADD_OM		BIT(30)
+-#define MT_RXD1_NORMAL_SEC_DONE		BIT(31)
+-
+-/* RXD DW2 */
+-#define MT_RXD2_NORMAL_BSSID		GENMASK(5, 0)
+-#define MT_RXD2_NORMAL_MAC_HDR_LEN	GENMASK(12, 8)
+-#define MT_RXD2_NORMAL_HDR_TRANS	BIT(7)
+-#define MT_RXD2_NORMAL_HDR_OFFSET	GENMASK(15, 13)
+-#define MT_RXD2_NORMAL_SEC_MODE		GENMASK(20, 16)
+-#define MT_RXD2_NORMAL_MU_BAR		BIT(21)
+-#define MT_RXD2_NORMAL_SW_BIT		BIT(22)
+-#define MT_RXD2_NORMAL_AMSDU_ERR	BIT(23)
+-#define MT_RXD2_NORMAL_MAX_LEN_ERROR	BIT(24)
+-#define MT_RXD2_NORMAL_HDR_TRANS_ERROR	BIT(25)
+-#define MT_RXD2_NORMAL_INT_FRAME	BIT(26)
+-#define MT_RXD2_NORMAL_FRAG		BIT(27)
+-#define MT_RXD2_NORMAL_NULL_FRAME	BIT(28)
+-#define MT_RXD2_NORMAL_NDATA		BIT(29)
+-#define MT_RXD2_NORMAL_NON_AMPDU	BIT(30)
+-#define MT_RXD2_NORMAL_BF_REPORT	BIT(31)
+-
+-/* RXD DW3 */
+-#define MT_RXD3_NORMAL_RXV_SEQ		GENMASK(7, 0)
+-#define MT_RXD3_NORMAL_CH_FREQ		GENMASK(15, 8)
+-#define MT_RXD3_NORMAL_ADDR_TYPE	GENMASK(17, 16)
+-#define MT_RXD3_NORMAL_U2M		BIT(0)
+-#define MT_RXD3_NORMAL_HTC_VLD		BIT(18)
+-#define MT_RXD3_NORMAL_BEACON_MC	BIT(20)
+-#define MT_RXD3_NORMAL_BEACON_UC	BIT(21)
+-#define MT_RXD3_NORMAL_CO_ANT		BIT(22)
+-#define MT_RXD3_NORMAL_FCS_ERR		BIT(24)
+-#define MT_RXD3_NORMAL_VLAN2ETH		BIT(31)
+-
+-/* RXD DW4 */
+-#define MT_RXD4_NORMAL_PAYLOAD_FORMAT	GENMASK(1, 0)
+-#define MT_RXD4_FIRST_AMSDU_FRAME	GENMASK(1, 0)
+-#define MT_RXD4_MID_AMSDU_FRAME		BIT(1)
+-#define MT_RXD4_LAST_AMSDU_FRAME	BIT(0)
+-
+-#define MT_RXV_HDR_BAND_IDX		BIT(24)
+-
+-/* RXD GROUP4 */
+-#define MT_RXD8_FRAME_CONTROL		GENMASK(15, 0)
+-
+-#define MT_RXD10_SEQ_CTRL		GENMASK(15, 0)
+-#define MT_RXD10_QOS_CTL		GENMASK(31, 16)
+-
+-#define MT_RXD11_HT_CONTROL		GENMASK(31, 0)
+-
+-/* P-RXV */
+-#define MT_PRXV_TX_RATE			GENMASK(6, 0)
+-#define MT_PRXV_TX_DCM			BIT(4)
+-#define MT_PRXV_TX_ER_SU_106T		BIT(5)
+-#define MT_PRXV_NSTS			GENMASK(10, 7)
+-#define MT_PRXV_TXBF			BIT(11)
+-#define MT_PRXV_HT_AD_CODE		BIT(12)
+-#define MT_PRXV_HE_RU_ALLOC		GENMASK(30, 22)
+-#define MT_PRXV_RCPI3			GENMASK(31, 24)
+-#define MT_PRXV_RCPI2			GENMASK(23, 16)
+-#define MT_PRXV_RCPI1			GENMASK(15, 8)
+-#define MT_PRXV_RCPI0			GENMASK(7, 0)
+-#define MT_PRXV_HT_SHORT_GI		GENMASK(4, 3)
+-#define MT_PRXV_HT_STBC			GENMASK(10, 9)
+-#define MT_PRXV_TX_MODE			GENMASK(14, 11)
+-#define MT_PRXV_FRAME_MODE		GENMASK(2, 0)
+-#define MT_PRXV_DCM			BIT(5)
+-
+-/* C-RXV */
+-#define MT_CRXV_HE_NUM_USER		GENMASK(26, 20)
+-#define MT_CRXV_HE_LTF_SIZE		GENMASK(28, 27)
+-#define MT_CRXV_HE_LDPC_EXT_SYM		BIT(30)
+-
+-#define MT_CRXV_HE_PE_DISAMBIG		BIT(1)
+-#define MT_CRXV_HE_UPLINK		BIT(2)
+-
+-#define MT_CRXV_HE_MU_AID		GENMASK(27, 17)
+-#define MT_CRXV_HE_BEAM_CHNG		BIT(29)
+-
+-#define MT_CRXV_HE_DOPPLER		BIT(0)
+-#define MT_CRXV_HE_BSS_COLOR		GENMASK(15, 10)
+-#define MT_CRXV_HE_TXOP_DUR		GENMASK(19, 17)
+-
+-#define MT_CRXV_HE_SR_MASK		GENMASK(11, 8)
+-#define MT_CRXV_HE_SR1_MASK		GENMASK(16, 12)
+-#define MT_CRXV_HE_SR2_MASK             GENMASK(20, 17)
+-#define MT_CRXV_HE_SR3_MASK             GENMASK(24, 21)
+-
+-#define MT_CRXV_HE_RU0			GENMASK(8, 0)
+-#define MT_CRXV_HE_RU1			GENMASK(17, 9)
+-#define MT_CRXV_HE_RU2			GENMASK(26, 18)
+-#define MT_CRXV_HE_RU3_L		GENMASK(31, 27)
+-#define MT_CRXV_HE_RU3_H		GENMASK(3, 0)
+-
+-enum tx_header_format {
+-	MT_HDR_FORMAT_802_3,
+-	MT_HDR_FORMAT_CMD,
+-	MT_HDR_FORMAT_802_11,
+-	MT_HDR_FORMAT_802_11_EXT,
+-};
+-
+-enum tx_pkt_type {
+-	MT_TX_TYPE_CT,
+-	MT_TX_TYPE_SF,
+-	MT_TX_TYPE_CMD,
+-	MT_TX_TYPE_FW,
+-};
+-
+-enum tx_port_idx {
+-	MT_TX_PORT_IDX_LMAC,
+-	MT_TX_PORT_IDX_MCU
+-};
+-
+-enum tx_mcu_port_q_idx {
+-	MT_TX_MCU_PORT_RX_Q0 = 0x20,
+-	MT_TX_MCU_PORT_RX_Q1,
+-	MT_TX_MCU_PORT_RX_Q2,
+-	MT_TX_MCU_PORT_RX_Q3,
+-	MT_TX_MCU_PORT_RX_FWDL = 0x3e
+-};
+-
+-enum tx_mgnt_type {
+-	MT_TX_NORMAL,
+-	MT_TX_TIMING,
+-	MT_TX_ADDBA,
+-};
+-
+-#define MT_CT_INFO_APPLY_TXD		BIT(0)
+-#define MT_CT_INFO_COPY_HOST_TXD_ALL	BIT(1)
+-#define MT_CT_INFO_MGMT_FRAME		BIT(2)
+-#define MT_CT_INFO_NONE_CIPHER_FRAME	BIT(3)
+-#define MT_CT_INFO_HSR2_TX		BIT(4)
+-#define MT_CT_INFO_FROM_HOST		BIT(7)
+-
+-#define MT_TXD_SIZE			(8 * 4)
+-
+-#define MT_TXD0_Q_IDX			GENMASK(31, 25)
+-#define MT_TXD0_PKT_FMT			GENMASK(24, 23)
+-#define MT_TXD0_ETH_TYPE_OFFSET		GENMASK(22, 16)
+-#define MT_TXD0_TX_BYTES		GENMASK(15, 0)
+-
+-#define MT_TXD1_FIXED_RATE		BIT(31)
+-#define MT_TXD1_OWN_MAC			GENMASK(30, 25)
+-#define MT_TXD1_TID			GENMASK(24, 21)
+-#define MT_TXD1_BIP			BIT(24)
+-#define MT_TXD1_ETH_802_3		BIT(20)
+-#define MT_TXD1_HDR_INFO		GENMASK(20, 16)
+-#define MT_TXD1_HDR_FORMAT		GENMASK(15, 14)
+-#define MT_TXD1_TGID			GENMASK(13, 12)
+-#define MT_TXD1_WLAN_IDX		GENMASK(11, 0)
+-
+-#define MT_TXD2_POWER_OFFSET		GENMASK(31, 26)
+-#define MT_TXD2_MAX_TX_TIME		GENMASK(25, 16)
+-#define MT_TXD2_FRAG			GENMASK(15, 14)
+-#define MT_TXD2_HTC_VLD			BIT(13)
+-#define MT_TXD2_DURATION		BIT(12)
+-#define MT_TXD2_HDR_PAD			GENMASK(11, 10)
+-#define MT_TXD2_RTS			BIT(9)
+-#define MT_TXD2_OWN_MAC_MAP		BIT(8)
+-#define MT_TXD2_BF_TYPE			GENMASK(6, 7)
+-#define MT_TXD2_FRAME_TYPE		GENMASK(5, 4)
+-#define MT_TXD2_SUB_TYPE		GENMASK(3, 0)
+-
+-#define MT_TXD3_SN_VALID		BIT(31)
+-#define MT_TXD3_PN_VALID		BIT(30)
+-#define MT_TXD3_SW_POWER_MGMT		BIT(29)
+-#define MT_TXD3_BA_DISABLE		BIT(28)
+-#define MT_TXD3_SEQ			GENMASK(27, 16)
+-#define MT_TXD3_REM_TX_COUNT		GENMASK(15, 11)
+-#define MT_TXD3_TX_COUNT		GENMASK(10, 6)
+-#define MT_TXD3_HW_AMSDU		BIT(5)
+-#define MT_TXD3_BCM			BIT(4)
+-#define MT_TXD3_EEOSP			BIT(3)
+-#define MT_TXD3_EMRD			BIT(2)
+-#define MT_TXD3_PROTECT_FRAME		BIT(1)
+-#define MT_TXD3_NO_ACK			BIT(0)
+-
+-#define MT_TXD4_PN_LOW			GENMASK(31, 0)
+-
+-#define MT_TXD5_PN_HIGH			GENMASK(31, 16)
+-#define MT_TXD5_FL			BIT(15)
+-#define MT_TXD5_BYPASS_TBB		BIT(14)
+-#define MT_TXD5_BYPASS_RBB		BIT(13)
+-#define MT_TXD5_BSS_COLOR_ZERO		BIT(12)
+-#define MT_TXD5_TX_STATUS_HOST		BIT(10)
+-#define MT_TXD5_TX_STATUS_MCU		BIT(9)
+-#define MT_TXD5_TX_STATUS_FMT		BIT(8)
+-#define MT_TXD5_PID			GENMASK(7, 0)
+-
+-#define MT_TXD6_TX_SRC			GENMASK(31, 30)
+-#define MT_TXD6_VTA			BIT(28)
+-#define MT_TXD6_BW			GENMASK(25, 22)
+-#define MT_TXD6_TX_RATE			GENMASK(21, 16)
+-#define MT_TXD6_TIMESTAMP_OFS_EN	BIT(15)
+-#define MT_TXD6_TIMESTAMP_OFS_IDX	GENMASK(14, 10)
+-#define MT_TXD6_MSDU_CNT		GENMASK(9, 4)
+-#define MT_TXD6_DIS_MAT			BIT(3)
+-#define MT_TXD6_DAS			BIT(2)
+-#define MT_TXD6_AMSDU_CAP		BIT(1)
+-
+-#define MT_TXD7_TXD_LEN			GENMASK(31, 30)
+-#define MT_TXD7_IP_SUM			BIT(29)
+-#define MT_TXD7_DROP_BY_SDO		BIT(28)
+-#define MT_TXD7_MAC_TXD			BIT(27)
+-#define MT_TXD7_CTXD			BIT(26)
+-#define MT_TXD7_CTXD_CNT		GENMASK(25, 22)
+-#define MT_TXD7_UDP_TCP_SUM		BIT(15)
+-#define MT_TXD7_TX_TIME			GENMASK(9, 0)
+-
+-#define MT_TX_RATE_STBC			BIT(14)
+-#define MT_TX_RATE_NSS			GENMASK(13, 10)
+-#define MT_TX_RATE_MODE			GENMASK(9, 6)
+-#define MT_TX_RATE_SU_EXT_TONE		BIT(5)
+-#define MT_TX_RATE_DCM			BIT(4)
+-/* VHT/HE only use bits 0-3 */
+-#define MT_TX_RATE_IDX			GENMASK(5, 0)
+-
+-#define MT_TXFREE0_PKT_TYPE		GENMASK(31, 27)
+-#define MT_TXFREE0_MSDU_CNT		GENMASK(25, 16)
+-#define MT_TXFREE0_RX_BYTE		GENMASK(15, 0)
+-
+-#define MT_TXFREE1_VER			GENMASK(18, 16)
+-
+-#define MT_TXFREE_INFO_PAIR		BIT(31)
+-#define MT_TXFREE_INFO_HEADER		BIT(30)
+-#define MT_TXFREE_INFO_WLAN_ID		GENMASK(23, 12)
+-#define MT_TXFREE_INFO_MSDU_ID		GENMASK(14, 0)
+-
+-#define MT_TXS0_BW			GENMASK(31, 29)
+-#define MT_TXS0_TID			GENMASK(28, 26)
+-#define MT_TXS0_AMPDU			BIT(25)
+-#define MT_TXS0_TXS_FORMAT		GENMASK(24, 23)
+-#define MT_TXS0_BA_ERROR		BIT(22)
+-#define MT_TXS0_PS_FLAG			BIT(21)
+-#define MT_TXS0_TXOP_TIMEOUT		BIT(20)
+-#define MT_TXS0_BIP_ERROR		BIT(19)
+-
+-#define MT_TXS0_QUEUE_TIMEOUT		BIT(18)
+-#define MT_TXS0_RTS_TIMEOUT		BIT(17)
+-#define MT_TXS0_ACK_TIMEOUT		BIT(16)
+-#define MT_TXS0_ACK_ERROR_MASK		GENMASK(18, 16)
+-
+-#define MT_TXS0_TX_STATUS_HOST		BIT(15)
+-#define MT_TXS0_TX_STATUS_MCU		BIT(14)
+-#define MT_TXS0_TX_RATE			GENMASK(13, 0)
+-
+-#define MT_TXS1_SEQNO			GENMASK(31, 20)
+-#define MT_TXS1_RESP_RATE		GENMASK(19, 16)
+-#define MT_TXS1_RXV_SEQNO		GENMASK(15, 8)
+-#define MT_TXS1_TX_POWER_DBM		GENMASK(7, 0)
+-
+-#define MT_TXS2_BF_STATUS		GENMASK(31, 30)
+-#define MT_TXS2_BAND			GENMASK(29, 28)
+-#define MT_TXS2_WCID			GENMASK(27, 16)
+-#define MT_TXS2_TX_DELAY		GENMASK(15, 0)
+-
+-#define MT_TXS3_PID			GENMASK(31, 24)
+-#define MT_TXS3_RATE_STBC		BIT(7)
+-#define MT_TXS3_FIXED_RATE		BIT(6)
+-#define MT_TXS3_SRC			GENMASK(5, 4)
+-#define MT_TXS3_SHARED_ANTENNA		BIT(3)
+-#define MT_TXS3_LAST_TX_RATE		GENMASK(2, 0)
+-
+-#define MT_TXS4_TIMESTAMP		GENMASK(31, 0)
+-
+-#define MT_TXS5_F0_FINAL_MPDU		BIT(31)
+-#define MT_TXS5_F0_QOS			BIT(30)
+-#define MT_TXS5_F0_TX_COUNT		GENMASK(29, 25)
+-#define MT_TXS5_F0_FRONT_TIME		GENMASK(24, 0)
+-#define MT_TXS5_F1_MPDU_TX_COUNT	GENMASK(31, 24)
+-#define MT_TXS5_F1_MPDU_TX_BYTES	GENMASK(23, 0)
+-
+-#define MT_TXS6_F0_NOISE_3		GENMASK(31, 24)
+-#define MT_TXS6_F0_NOISE_2		GENMASK(23, 16)
+-#define MT_TXS6_F0_NOISE_1		GENMASK(15, 8)
+-#define MT_TXS6_F0_NOISE_0		GENMASK(7, 0)
+-#define MT_TXS6_F1_MPDU_FAIL_COUNT	GENMASK(31, 24)
+-#define MT_TXS6_F1_MPDU_FAIL_BYTES	GENMASK(23, 0)
+-
+-#define MT_TXS7_F0_RCPI_3		GENMASK(31, 24)
+-#define MT_TXS7_F0_RCPI_2		GENMASK(23, 16)
+-#define MT_TXS7_F0_RCPI_1		GENMASK(15, 8)
+-#define MT_TXS7_F0_RCPI_0		GENMASK(7, 0)
+-#define MT_TXS7_F1_MPDU_RETRY_COUNT	GENMASK(31, 24)
+-#define MT_TXS7_F1_MPDU_RETRY_BYTES	GENMASK(23, 0)
++#include "../mt76_connac3_mac.h"
+ 
+ struct mt7996_dfs_pulse {
+ 	u32 max_width;		/* us */
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/main.c b/drivers/net/wireless/mediatek/mt76/mt7996/main.c
+index f306e9c50ea3b..623a240938dff 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7996/main.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7996/main.c
+@@ -184,7 +184,7 @@ static int mt7996_add_interface(struct ieee80211_hw *hw,
+ 	mvif->mt76.omac_idx = idx;
+ 	mvif->phy = phy;
+ 	mvif->mt76.band_idx = band_idx;
+-	mvif->mt76.wmm_idx = band_idx;
++	mvif->mt76.wmm_idx = vif->type != NL80211_IFTYPE_AP;
+ 
+ 	ret = mt7996_mcu_add_dev_info(phy, vif, true);
+ 	if (ret)
+@@ -414,10 +414,16 @@ mt7996_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ 	       const struct ieee80211_tx_queue_params *params)
+ {
+ 	struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
++	const u8 mq_to_aci[] = {
++		[IEEE80211_AC_VO] = 3,
++		[IEEE80211_AC_VI] = 2,
++		[IEEE80211_AC_BE] = 0,
++		[IEEE80211_AC_BK] = 1,
++	};
+ 
++	/* firmware uses access class index */
++	mvif->queue_params[mq_to_aci[queue]] = *params;
+ 	/* no need to update right away, we'll get BSS_CHANGED_QOS */
+-	queue = mt76_connac_lmac_mapping(queue);
+-	mvif->queue_params[queue] = *params;
+ 
+ 	return 0;
+ }
+@@ -598,8 +604,8 @@ static void mt7996_bss_info_changed(struct ieee80211_hw *hw,
+ 		mt7996_mcu_add_beacon(hw, vif, info->enable_beacon);
+ 	}
+ 
+-	if (changed & BSS_CHANGED_UNSOL_BCAST_PROBE_RESP ||
+-	    changed & BSS_CHANGED_FILS_DISCOVERY)
++	if (changed & (BSS_CHANGED_UNSOL_BCAST_PROBE_RESP |
++		       BSS_CHANGED_FILS_DISCOVERY))
+ 		mt7996_mcu_beacon_inband_discov(dev, vif, changed);
+ 
+ 	mutex_unlock(&dev->mt76.mutex);
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c
+index 62a02b03d83ba..c45a8afc7c18a 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c
+@@ -1916,7 +1916,7 @@ mt7996_mcu_beacon_cont(struct mt7996_dev *dev, struct ieee80211_vif *vif,
+ 			bcn->bcc_ie_pos = cpu_to_le16(offset - 3);
+ 	}
+ 
+-	buf = (u8 *)bcn + sizeof(*bcn) - MAX_BEACON_SIZE;
++	buf = (u8 *)bcn + sizeof(*bcn);
+ 	mt7996_mac_write_txwi(dev, (__le32 *)buf, skb, wcid, NULL, 0, 0,
+ 			      BSS_CHANGED_BEACON);
+ 
+@@ -1934,26 +1934,22 @@ int mt7996_mcu_add_beacon(struct ieee80211_hw *hw,
+ 	struct sk_buff *skb, *rskb;
+ 	struct tlv *tlv;
+ 	struct bss_bcn_content_tlv *bcn;
++	int len;
+ 
+ 	rskb = __mt7996_mcu_alloc_bss_req(&dev->mt76, &mvif->mt76,
+-					  MT7996_BEACON_UPDATE_SIZE);
++					  MT7996_MAX_BSS_OFFLOAD_SIZE);
+ 	if (IS_ERR(rskb))
+ 		return PTR_ERR(rskb);
+ 
+-	tlv = mt7996_mcu_add_uni_tlv(rskb,
+-				     UNI_BSS_INFO_BCN_CONTENT, sizeof(*bcn));
+-	bcn = (struct bss_bcn_content_tlv *)tlv;
+-	bcn->enable = en;
+-
+-	if (!en)
+-		goto out;
+-
+ 	skb = ieee80211_beacon_get_template(hw, vif, &offs, 0);
+-	if (!skb)
++	if (!skb) {
++		dev_kfree_skb(rskb);
+ 		return -EINVAL;
++	}
+ 
+-	if (skb->len > MAX_BEACON_SIZE - MT_TXD_SIZE) {
++	if (skb->len > MT7996_MAX_BEACON_SIZE) {
+ 		dev_err(dev->mt76.dev, "Bcn size limit exceed\n");
++		dev_kfree_skb(rskb);
+ 		dev_kfree_skb(skb);
+ 		return -EINVAL;
+ 	}
+@@ -1961,11 +1957,18 @@ int mt7996_mcu_add_beacon(struct ieee80211_hw *hw,
+ 	info = IEEE80211_SKB_CB(skb);
+ 	info->hw_queue |= FIELD_PREP(MT_TX_HW_QUEUE_PHY, phy->mt76->band_idx);
+ 
++	len = sizeof(*bcn) + MT_TXD_SIZE + skb->len;
++	tlv = mt7996_mcu_add_uni_tlv(rskb, UNI_BSS_INFO_BCN_CONTENT, len);
++	bcn = (struct bss_bcn_content_tlv *)tlv;
++	bcn->enable = en;
++	if (!en)
++		goto out;
++
+ 	mt7996_mcu_beacon_cont(dev, vif, rskb, skb, bcn, &offs);
+ 	/* TODO: subtag - 11v MBSSID */
+ 	mt7996_mcu_beacon_cntdwn(vif, rskb, skb, &offs);
+-	dev_kfree_skb(skb);
+ out:
++	dev_kfree_skb(skb);
+ 	return mt76_mcu_skb_send_msg(&phy->dev->mt76, rskb,
+ 				     MCU_WMWA_UNI_CMD(BSS_INFO_UPDATE), true);
+ }
+@@ -1986,9 +1989,13 @@ int mt7996_mcu_beacon_inband_discov(struct mt7996_dev *dev,
+ 	struct sk_buff *rskb, *skb = NULL;
+ 	struct tlv *tlv;
+ 	u8 *buf, interval;
++	int len;
++
++	if (vif->bss_conf.nontransmitted)
++		return 0;
+ 
+ 	rskb = __mt7996_mcu_alloc_bss_req(&dev->mt76, &mvif->mt76,
+-					  MT7996_INBAND_FRAME_SIZE);
++					  MT7996_MAX_BSS_OFFLOAD_SIZE);
+ 	if (IS_ERR(rskb))
+ 		return PTR_ERR(rskb);
+ 
+@@ -2002,11 +2009,14 @@ int mt7996_mcu_beacon_inband_discov(struct mt7996_dev *dev,
+ 		skb = ieee80211_get_unsol_bcast_probe_resp_tmpl(hw, vif);
+ 	}
+ 
+-	if (!skb)
++	if (!skb) {
++		dev_kfree_skb(rskb);
+ 		return -EINVAL;
++	}
+ 
+-	if (skb->len > MAX_INBAND_FRAME_SIZE - MT_TXD_SIZE) {
++	if (skb->len > MT7996_MAX_BEACON_SIZE) {
+ 		dev_err(dev->mt76.dev, "inband discovery size limit exceed\n");
++		dev_kfree_skb(rskb);
+ 		dev_kfree_skb(skb);
+ 		return -EINVAL;
+ 	}
+@@ -2016,7 +2026,9 @@ int mt7996_mcu_beacon_inband_discov(struct mt7996_dev *dev,
+ 	info->band = band;
+ 	info->hw_queue |= FIELD_PREP(MT_TX_HW_QUEUE_PHY, phy->mt76->band_idx);
+ 
+-	tlv = mt7996_mcu_add_uni_tlv(rskb, UNI_BSS_INFO_OFFLOAD, sizeof(*discov));
++	len = sizeof(*discov) + MT_TXD_SIZE + skb->len;
++
++	tlv = mt7996_mcu_add_uni_tlv(rskb, UNI_BSS_INFO_OFFLOAD, len);
+ 
+ 	discov = (struct bss_inband_discovery_tlv *)tlv;
+ 	discov->tx_mode = OFFLOAD_TX_MODE_SU;
+@@ -2027,7 +2039,7 @@ int mt7996_mcu_beacon_inband_discov(struct mt7996_dev *dev,
+ 	discov->enable = true;
+ 	discov->wcid = cpu_to_le16(MT7996_WTBL_RESERVED);
+ 
+-	buf = (u8 *)tlv + sizeof(*discov) - MAX_INBAND_FRAME_SIZE;
++	buf = (u8 *)tlv + sizeof(*discov);
+ 
+ 	mt7996_mac_write_txwi(dev, (__le32 *)buf, skb, wcid, NULL, 0, 0, changed);
+ 
+@@ -2580,7 +2592,7 @@ int mt7996_mcu_set_tx(struct mt7996_dev *dev, struct ieee80211_vif *vif)
+ 
+ 		e = (struct edca *)tlv;
+ 		e->set = WMM_PARAM_SET;
+-		e->queue = ac + mvif->mt76.wmm_idx * MT7996_MAX_WMM_SETS;
++		e->queue = ac;
+ 		e->aifs = q->aifs;
+ 		e->txop = cpu_to_le16(q->txop);
+ 
+@@ -2861,10 +2873,10 @@ int mt7996_mcu_set_chan_info(struct mt7996_phy *phy, u16 tag)
+ 		.channel_band = ch_band[chandef->chan->band],
+ 	};
+ 
+-	if (tag == UNI_CHANNEL_RX_PATH ||
+-	    dev->mt76.hw->conf.flags & IEEE80211_CONF_MONITOR)
++	if (phy->mt76->hw->conf.flags & IEEE80211_CONF_MONITOR)
+ 		req.switch_reason = CH_SWITCH_NORMAL;
+-	else if (phy->mt76->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)
++	else if (phy->mt76->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL ||
++		 phy->mt76->hw->conf.flags & IEEE80211_CONF_IDLE)
+ 		req.switch_reason = CH_SWITCH_SCAN_BYPASS_DPD;
+ 	else if (!cfg80211_reg_can_beacon(phy->mt76->hw->wiphy, chandef,
+ 					  NL80211_IFTYPE_AP))
+@@ -3208,8 +3220,8 @@ int mt7996_mcu_set_txbf(struct mt7996_dev *dev, u8 action)
+ 
+ 		tlv = mt7996_mcu_add_uni_tlv(skb, action, sizeof(*req_mod_en));
+ 		req_mod_en = (struct bf_mod_en_ctrl *)tlv;
+-		req_mod_en->bf_num = 2;
+-		req_mod_en->bf_bitmap = GENMASK(0, 0);
++		req_mod_en->bf_num = 3;
++		req_mod_en->bf_bitmap = GENMASK(2, 0);
+ 		break;
+ 	}
+ 	default:
+@@ -3449,7 +3461,9 @@ int mt7996_mcu_twt_agrt_update(struct mt7996_dev *dev,
+ 			       int cmd)
+ {
+ 	struct {
+-		u8 _rsv[4];
++		/* fixed field */
++		u8 bss;
++		u8 _rsv[3];
+ 
+ 		__le16 tag;
+ 		__le16 len;
+@@ -3467,7 +3481,7 @@ int mt7996_mcu_twt_agrt_update(struct mt7996_dev *dev,
+ 		u8 exponent;
+ 		u8 is_ap;
+ 		u8 agrt_params;
+-		u8 __rsv2[135];
++		u8 __rsv2[23];
+ 	} __packed req = {
+ 		.tag = cpu_to_le16(UNI_CMD_TWT_ARGT_UPDATE),
+ 		.len = cpu_to_le16(sizeof(req) - 4),
+@@ -3477,6 +3491,7 @@ int mt7996_mcu_twt_agrt_update(struct mt7996_dev *dev,
+ 		.flowid = flow->id,
+ 		.peer_id = cpu_to_le16(flow->wcid),
+ 		.duration = flow->duration,
++		.bss = mvif->mt76.idx,
+ 		.bss_idx = mvif->mt76.idx,
+ 		.start_tsf = cpu_to_le64(flow->tsf),
+ 		.mantissa = flow->mantissa,
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.h
+index d7075a4d0667c..2620eaeaece04 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.h
++++ b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.h
+@@ -270,8 +270,6 @@ struct bss_inband_discovery_tlv {
+ 	u8 enable;
+ 	__le16 wcid;
+ 	__le16 prob_rsp_len;
+-#define MAX_INBAND_FRAME_SIZE 512
+-	u8 pkt[MAX_INBAND_FRAME_SIZE];
+ } __packed;
+ 
+ struct bss_bcn_content_tlv {
+@@ -283,8 +281,6 @@ struct bss_bcn_content_tlv {
+ 	u8 enable;
+ 	u8 type;
+ 	__le16 pkt_len;
+-#define MAX_BEACON_SIZE 512
+-	u8 pkt[MAX_BEACON_SIZE];
+ } __packed;
+ 
+ struct bss_bcn_cntdwn_tlv {
+@@ -574,13 +570,14 @@ enum {
+ 					 sizeof(struct sta_rec_hdr_trans) +	\
+ 					 sizeof(struct tlv))
+ 
++#define MT7996_MAX_BEACON_SIZE		1342
+ #define MT7996_BEACON_UPDATE_SIZE	(sizeof(struct bss_req_hdr) +		\
+ 					 sizeof(struct bss_bcn_content_tlv) +	\
++					 MT_TXD_SIZE +				\
+ 					 sizeof(struct bss_bcn_cntdwn_tlv) +	\
+ 					 sizeof(struct bss_bcn_mbss_tlv))
+-
+-#define MT7996_INBAND_FRAME_SIZE	(sizeof(struct bss_req_hdr) +		\
+-					 sizeof(struct bss_inband_discovery_tlv))
++#define MT7996_MAX_BSS_OFFLOAD_SIZE	(MT7996_MAX_BEACON_SIZE +		\
++					 MT7996_BEACON_UPDATE_SIZE)
+ 
+ enum {
+ 	UNI_BAND_CONFIG_RADIO_ENABLE,
+diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c
+index 6f61d6a106272..5a34894a533be 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c
++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c
+@@ -799,7 +799,7 @@ static void rtl88e_dm_check_edca_turbo(struct ieee80211_hw *hw)
+ 	}
+ 
+ 	if (rtlpriv->btcoexist.bt_edca_dl != 0) {
+-		edca_be_ul = rtlpriv->btcoexist.bt_edca_dl;
++		edca_be_dl = rtlpriv->btcoexist.bt_edca_dl;
+ 		bt_change_edca = true;
+ 	}
+ 
+diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.c
+index 0b6a15c2e5ccd..d92aad60edfe9 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.c
++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.c
+@@ -640,7 +640,7 @@ static void rtl92c_dm_check_edca_turbo(struct ieee80211_hw *hw)
+ 	}
+ 
+ 	if (rtlpriv->btcoexist.bt_edca_dl != 0) {
+-		edca_be_ul = rtlpriv->btcoexist.bt_edca_dl;
++		edca_be_dl = rtlpriv->btcoexist.bt_edca_dl;
+ 		bt_change_edca = true;
+ 	}
+ 
+diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/dm.c
+index 8ada31380efa4..0ff8e355c23a4 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/dm.c
++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/dm.c
+@@ -466,7 +466,7 @@ static void rtl8723e_dm_check_edca_turbo(struct ieee80211_hw *hw)
+ 	}
+ 
+ 	if (rtlpriv->btcoexist.bt_edca_dl != 0) {
+-		edca_be_ul = rtlpriv->btcoexist.bt_edca_dl;
++		edca_be_dl = rtlpriv->btcoexist.bt_edca_dl;
+ 		bt_change_edca = true;
+ 	}
+ 
+diff --git a/drivers/net/wireless/realtek/rtw88/debug.c b/drivers/net/wireless/realtek/rtw88/debug.c
+index f8ba133baff06..35bc37a3c469d 100644
+--- a/drivers/net/wireless/realtek/rtw88/debug.c
++++ b/drivers/net/wireless/realtek/rtw88/debug.c
+@@ -1233,9 +1233,9 @@ static struct rtw_debugfs_priv rtw_debug_priv_dm_cap = {
+ #define rtw_debugfs_add_core(name, mode, fopname, parent)		\
+ 	do {								\
+ 		rtw_debug_priv_ ##name.rtwdev = rtwdev;			\
+-		if (!debugfs_create_file(#name, mode,			\
++		if (IS_ERR(debugfs_create_file(#name, mode,		\
+ 					 parent, &rtw_debug_priv_ ##name,\
+-					 &file_ops_ ##fopname))		\
++					 &file_ops_ ##fopname)))	\
+ 			pr_debug("Unable to initialize debugfs:%s\n",	\
+ 			       #name);					\
+ 	} while (0)
+diff --git a/drivers/net/wireless/realtek/rtw88/usb.c b/drivers/net/wireless/realtek/rtw88/usb.c
+index c279a500b4bdb..a34bc355fa13d 100644
+--- a/drivers/net/wireless/realtek/rtw88/usb.c
++++ b/drivers/net/wireless/realtek/rtw88/usb.c
+@@ -628,8 +628,7 @@ static void rtw_usb_cancel_rx_bufs(struct rtw_usb *rtwusb)
+ 
+ 	for (i = 0; i < RTW_USB_RXCB_NUM; i++) {
+ 		rxcb = &rtwusb->rx_cb[i];
+-		if (rxcb->rx_urb)
+-			usb_kill_urb(rxcb->rx_urb);
++		usb_kill_urb(rxcb->rx_urb);
+ 	}
+ }
+ 
+@@ -640,10 +639,8 @@ static void rtw_usb_free_rx_bufs(struct rtw_usb *rtwusb)
+ 
+ 	for (i = 0; i < RTW_USB_RXCB_NUM; i++) {
+ 		rxcb = &rtwusb->rx_cb[i];
+-		if (rxcb->rx_urb) {
+-			usb_kill_urb(rxcb->rx_urb);
+-			usb_free_urb(rxcb->rx_urb);
+-		}
++		usb_kill_urb(rxcb->rx_urb);
++		usb_free_urb(rxcb->rx_urb);
+ 	}
+ }
+ 
+diff --git a/drivers/net/wireless/silabs/wfx/data_tx.c b/drivers/net/wireless/silabs/wfx/data_tx.c
+index 6a5e52a96d183..caa22226b01bc 100644
+--- a/drivers/net/wireless/silabs/wfx/data_tx.c
++++ b/drivers/net/wireless/silabs/wfx/data_tx.c
+@@ -226,53 +226,40 @@ static u8 wfx_tx_get_link_id(struct wfx_vif *wvif, struct ieee80211_sta *sta,
+ 
+ static void wfx_tx_fixup_rates(struct ieee80211_tx_rate *rates)
+ {
+-	int i;
+-	bool finished;
++	bool has_rate0 = false;
++	int i, j;
+ 
+-	/* Firmware is not able to mix rates with different flags */
+-	for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) {
+-		if (rates[0].flags & IEEE80211_TX_RC_SHORT_GI)
+-			rates[i].flags |= IEEE80211_TX_RC_SHORT_GI;
+-		if (!(rates[0].flags & IEEE80211_TX_RC_SHORT_GI))
++	for (i = 1, j = 1; j < IEEE80211_TX_MAX_RATES; j++) {
++		if (rates[j].idx == -1)
++			break;
++		/* The device use the rates in descending order, whatever the request from minstrel.
++		 * We have to trade off here. Most important is to respect the primary rate
++		 * requested by minstrel. So, we drops the entries with rate higher than the
++		 * previous.
++		 */
++		if (rates[j].idx >= rates[i - 1].idx) {
++			rates[i - 1].count += rates[j].count;
++			rates[i - 1].count = min_t(u16, 15, rates[i - 1].count);
++		} else {
++			memcpy(rates + i, rates + j, sizeof(rates[i]));
++			if (rates[i].idx == 0)
++				has_rate0 = true;
++			/* The device apply Short GI only on the first rate */
+ 			rates[i].flags &= ~IEEE80211_TX_RC_SHORT_GI;
+-		if (!(rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS))
+-			rates[i].flags &= ~IEEE80211_TX_RC_USE_RTS_CTS;
+-	}
+-
+-	/* Sort rates and remove duplicates */
+-	do {
+-		finished = true;
+-		for (i = 0; i < IEEE80211_TX_MAX_RATES - 1; i++) {
+-			if (rates[i + 1].idx == rates[i].idx &&
+-			    rates[i].idx != -1) {
+-				rates[i].count += rates[i + 1].count;
+-				if (rates[i].count > 15)
+-					rates[i].count = 15;
+-				rates[i + 1].idx = -1;
+-				rates[i + 1].count = 0;
+-
+-				finished = false;
+-			}
+-			if (rates[i + 1].idx > rates[i].idx) {
+-				swap(rates[i + 1], rates[i]);
+-				finished = false;
+-			}
++			i++;
+ 		}
+-	} while (!finished);
++	}
+ 	/* Ensure that MCS0 or 1Mbps is present at the end of the retry list */
+-	for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) {
+-		if (rates[i].idx == 0)
+-			break;
+-		if (rates[i].idx == -1) {
+-			rates[i].idx = 0;
+-			rates[i].count = 8; /* == hw->max_rate_tries */
+-			rates[i].flags = rates[i - 1].flags & IEEE80211_TX_RC_MCS;
+-			break;
+-		}
++	if (!has_rate0 && i < IEEE80211_TX_MAX_RATES) {
++		rates[i].idx = 0;
++		rates[i].count = 8; /* == hw->max_rate_tries */
++		rates[i].flags = rates[0].flags & IEEE80211_TX_RC_MCS;
++		i++;
++	}
++	for (; i < IEEE80211_TX_MAX_RATES; i++) {
++		memset(rates + i, 0, sizeof(rates[i]));
++		rates[i].idx = -1;
+ 	}
+-	/* All retries use long GI */
+-	for (i = 1; i < IEEE80211_TX_MAX_RATES; i++)
+-		rates[i].flags &= ~IEEE80211_TX_RC_SHORT_GI;
+ }
+ 
+ static u8 wfx_tx_get_retry_policy_id(struct wfx_vif *wvif, struct ieee80211_tx_info *tx_info)
+diff --git a/drivers/nvdimm/of_pmem.c b/drivers/nvdimm/of_pmem.c
+index 10dbdcdfb9ce9..0243789ba914b 100644
+--- a/drivers/nvdimm/of_pmem.c
++++ b/drivers/nvdimm/of_pmem.c
+@@ -30,7 +30,13 @@ static int of_pmem_region_probe(struct platform_device *pdev)
+ 	if (!priv)
+ 		return -ENOMEM;
+ 
+-	priv->bus_desc.provider_name = kstrdup(pdev->name, GFP_KERNEL);
++	priv->bus_desc.provider_name = devm_kstrdup(&pdev->dev, pdev->name,
++							GFP_KERNEL);
++	if (!priv->bus_desc.provider_name) {
++		kfree(priv);
++		return -ENOMEM;
++	}
++
+ 	priv->bus_desc.module = THIS_MODULE;
+ 	priv->bus_desc.of_node = np;
+ 
+diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c
+index 8f134d63af131..3f5cf1a817525 100644
+--- a/drivers/nvdimm/region_devs.c
++++ b/drivers/nvdimm/region_devs.c
+@@ -938,7 +938,8 @@ unsigned int nd_region_acquire_lane(struct nd_region *nd_region)
+ {
+ 	unsigned int cpu, lane;
+ 
+-	cpu = get_cpu();
++	migrate_disable();
++	cpu = smp_processor_id();
+ 	if (nd_region->num_lanes < nr_cpu_ids) {
+ 		struct nd_percpu_lane *ndl_lock, *ndl_count;
+ 
+@@ -957,16 +958,15 @@ EXPORT_SYMBOL(nd_region_acquire_lane);
+ void nd_region_release_lane(struct nd_region *nd_region, unsigned int lane)
+ {
+ 	if (nd_region->num_lanes < nr_cpu_ids) {
+-		unsigned int cpu = get_cpu();
++		unsigned int cpu = smp_processor_id();
+ 		struct nd_percpu_lane *ndl_lock, *ndl_count;
+ 
+ 		ndl_count = per_cpu_ptr(nd_region->lane, cpu);
+ 		ndl_lock = per_cpu_ptr(nd_region->lane, lane);
+ 		if (--ndl_count->count == 0)
+ 			spin_unlock(&ndl_lock->lock);
+-		put_cpu();
+ 	}
+-	put_cpu();
++	migrate_enable();
+ }
+ EXPORT_SYMBOL(nd_region_release_lane);
+ 
+diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c
+index 09a223642bc12..c4995d92882b8 100644
+--- a/drivers/nvme/host/ioctl.c
++++ b/drivers/nvme/host/ioctl.c
+@@ -511,10 +511,13 @@ static enum rq_end_io_ret nvme_uring_cmd_end_io(struct request *req,
+ 	struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
+ 
+ 	req->bio = pdu->bio;
+-	if (nvme_req(req)->flags & NVME_REQ_CANCELLED)
++	if (nvme_req(req)->flags & NVME_REQ_CANCELLED) {
+ 		pdu->nvme_status = -EINTR;
+-	else
++	} else {
+ 		pdu->nvme_status = nvme_req(req)->status;
++		if (!pdu->nvme_status)
++			pdu->nvme_status = blk_status_to_errno(err);
++	}
+ 	pdu->u.result = le64_to_cpu(nvme_req(req)->result.u64);
+ 
+ 	/*
+diff --git a/drivers/pci/controller/vmd.c b/drivers/pci/controller/vmd.c
+index ad56df98b8e63..1c1c1aa940a51 100644
+--- a/drivers/pci/controller/vmd.c
++++ b/drivers/pci/controller/vmd.c
+@@ -525,8 +525,7 @@ static void vmd_domain_reset(struct vmd_dev *vmd)
+ 			base = vmd->cfgbar + PCIE_ECAM_OFFSET(bus,
+ 						PCI_DEVFN(dev, 0), 0);
+ 
+-			hdr_type = readb(base + PCI_HEADER_TYPE) &
+-					 PCI_HEADER_TYPE_MASK;
++			hdr_type = readb(base + PCI_HEADER_TYPE);
+ 
+ 			functions = (hdr_type & 0x80) ? 8 : 1;
+ 			for (fn = 0; fn < functions; fn++) {
+diff --git a/drivers/pci/endpoint/pci-epc-core.c b/drivers/pci/endpoint/pci-epc-core.c
+index 6c54fa5684d22..575d67271ccc1 100644
+--- a/drivers/pci/endpoint/pci-epc-core.c
++++ b/drivers/pci/endpoint/pci-epc-core.c
+@@ -870,7 +870,6 @@ __pci_epc_create(struct device *dev, const struct pci_epc_ops *ops,
+ 
+ put_dev:
+ 	put_device(&epc->dev);
+-	kfree(epc);
+ 
+ err_ret:
+ 	return ERR_PTR(ret);
+diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
+index 1bf6300592644..530c3bb5708c5 100644
+--- a/drivers/pci/pcie/aspm.c
++++ b/drivers/pci/pcie/aspm.c
+@@ -1059,7 +1059,8 @@ static int __pci_disable_link_state(struct pci_dev *pdev, int state, bool sem)
+ 	if (state & PCIE_LINK_STATE_L0S)
+ 		link->aspm_disable |= ASPM_STATE_L0S;
+ 	if (state & PCIE_LINK_STATE_L1)
+-		link->aspm_disable |= ASPM_STATE_L1;
++		/* L1 PM substates require L1 */
++		link->aspm_disable |= ASPM_STATE_L1 | ASPM_STATE_L1SS;
+ 	if (state & PCIE_LINK_STATE_L1_1)
+ 		link->aspm_disable |= ASPM_STATE_L1_1;
+ 	if (state & PCIE_LINK_STATE_L1_2)
+diff --git a/drivers/pcmcia/cs.c b/drivers/pcmcia/cs.c
+index 5658745c398f5..b33be1e63c98f 100644
+--- a/drivers/pcmcia/cs.c
++++ b/drivers/pcmcia/cs.c
+@@ -605,6 +605,7 @@ static int pccardd(void *__skt)
+ 		dev_warn(&skt->dev, "PCMCIA: unable to register socket\n");
+ 		skt->thread = NULL;
+ 		complete(&skt->thread_done);
++		put_device(&skt->dev);
+ 		return 0;
+ 	}
+ 	ret = pccard_sysfs_add_socket(&skt->dev);
+diff --git a/drivers/pcmcia/ds.c b/drivers/pcmcia/ds.c
+index d500e5dbbc3f5..b4b8363d1de21 100644
+--- a/drivers/pcmcia/ds.c
++++ b/drivers/pcmcia/ds.c
+@@ -513,9 +513,6 @@ static struct pcmcia_device *pcmcia_device_add(struct pcmcia_socket *s,
+ 	/* by default don't allow DMA */
+ 	p_dev->dma_mask = 0;
+ 	p_dev->dev.dma_mask = &p_dev->dma_mask;
+-	dev_set_name(&p_dev->dev, "%d.%d", p_dev->socket->sock, p_dev->device_no);
+-	if (!dev_name(&p_dev->dev))
+-		goto err_free;
+ 	p_dev->devname = kasprintf(GFP_KERNEL, "pcmcia%s", dev_name(&p_dev->dev));
+ 	if (!p_dev->devname)
+ 		goto err_free;
+@@ -573,8 +570,15 @@ static struct pcmcia_device *pcmcia_device_add(struct pcmcia_socket *s,
+ 
+ 	pcmcia_device_query(p_dev);
+ 
+-	if (device_register(&p_dev->dev))
+-		goto err_unreg;
++	dev_set_name(&p_dev->dev, "%d.%d", p_dev->socket->sock, p_dev->device_no);
++	if (device_register(&p_dev->dev)) {
++		mutex_lock(&s->ops_mutex);
++		list_del(&p_dev->socket_device_list);
++		s->device_count--;
++		mutex_unlock(&s->ops_mutex);
++		put_device(&p_dev->dev);
++		return NULL;
++	}
+ 
+ 	return p_dev;
+ 
+diff --git a/drivers/perf/arm-cmn.c b/drivers/perf/arm-cmn.c
+index 43aba01399bef..4a2cb49010599 100644
+--- a/drivers/perf/arm-cmn.c
++++ b/drivers/perf/arm-cmn.c
+@@ -110,7 +110,9 @@
+ 
+ #define CMN_DTM_PMEVCNTSR		0x240
+ 
+-#define CMN_DTM_UNIT_INFO		0x0910
++#define CMN650_DTM_UNIT_INFO		0x0910
++#define CMN_DTM_UNIT_INFO		0x0960
++#define CMN_DTM_UNIT_INFO_DTC_DOMAIN	GENMASK_ULL(1, 0)
+ 
+ #define CMN_DTM_NUM_COUNTERS		4
+ /* Want more local counters? Why not replicate the whole DTM! Ugh... */
+@@ -2007,6 +2009,16 @@ static int arm_cmn_init_dtcs(struct arm_cmn *cmn)
+ 	return 0;
+ }
+ 
++static unsigned int arm_cmn_dtc_domain(struct arm_cmn *cmn, void __iomem *xp_region)
++{
++	int offset = CMN_DTM_UNIT_INFO;
++
++	if (cmn->part == PART_CMN650 || cmn->part == PART_CI700)
++		offset = CMN650_DTM_UNIT_INFO;
++
++	return FIELD_GET(CMN_DTM_UNIT_INFO_DTC_DOMAIN, readl_relaxed(xp_region + offset));
++}
++
+ static void arm_cmn_init_node_info(struct arm_cmn *cmn, u32 offset, struct arm_cmn_node *node)
+ {
+ 	int level;
+@@ -2138,7 +2150,7 @@ static int arm_cmn_discover(struct arm_cmn *cmn, unsigned int rgn_offset)
+ 		if (cmn->part == PART_CMN600)
+ 			xp->dtc = 0xf;
+ 		else
+-			xp->dtc = 1 << readl_relaxed(xp_region + CMN_DTM_UNIT_INFO);
++			xp->dtc = 1 << arm_cmn_dtc_domain(cmn, xp_region);
+ 
+ 		xp->dtm = dtm - cmn->dtms;
+ 		arm_cmn_init_dtm(dtm++, xp, 0);
+diff --git a/drivers/perf/arm_pmuv3.c b/drivers/perf/arm_pmuv3.c
+index 08b3a1bf0ef62..a50a18e009033 100644
+--- a/drivers/perf/arm_pmuv3.c
++++ b/drivers/perf/arm_pmuv3.c
+@@ -428,12 +428,12 @@ static inline bool armv8pmu_event_is_chained(struct perf_event *event)
+ #define	ARMV8_IDX_TO_COUNTER(x)	\
+ 	(((x) - ARMV8_IDX_COUNTER0) & ARMV8_PMU_COUNTER_MASK)
+ 
+-static inline u32 armv8pmu_pmcr_read(void)
++static inline u64 armv8pmu_pmcr_read(void)
+ {
+ 	return read_pmcr();
+ }
+ 
+-static inline void armv8pmu_pmcr_write(u32 val)
++static inline void armv8pmu_pmcr_write(u64 val)
+ {
+ 	val &= ARMV8_PMU_PMCR_MASK;
+ 	isb();
+@@ -978,7 +978,7 @@ static int armv8pmu_set_event_filter(struct hw_perf_event *event,
+ static void armv8pmu_reset(void *info)
+ {
+ 	struct arm_pmu *cpu_pmu = (struct arm_pmu *)info;
+-	u32 pmcr;
++	u64 pmcr;
+ 
+ 	/* The counter and interrupt enable registers are unknown at reset. */
+ 	armv8pmu_disable_counter(U32_MAX);
+diff --git a/drivers/perf/hisilicon/hisi_pcie_pmu.c b/drivers/perf/hisilicon/hisi_pcie_pmu.c
+index e10fc7cb9493a..dafff711185c8 100644
+--- a/drivers/perf/hisilicon/hisi_pcie_pmu.c
++++ b/drivers/perf/hisilicon/hisi_pcie_pmu.c
+@@ -353,6 +353,10 @@ static int hisi_pcie_pmu_event_init(struct perf_event *event)
+ 	struct hisi_pcie_pmu *pcie_pmu = to_pcie_pmu(event->pmu);
+ 	struct hw_perf_event *hwc = &event->hw;
+ 
++	/* Check the type first before going on, otherwise it's not our event */
++	if (event->attr.type != event->pmu->type)
++		return -ENOENT;
++
+ 	event->cpu = pcie_pmu->on_cpu;
+ 
+ 	if (EXT_COUNTER_IS_USED(hisi_pcie_get_event(event)))
+@@ -360,9 +364,6 @@ static int hisi_pcie_pmu_event_init(struct perf_event *event)
+ 	else
+ 		hwc->event_base = HISI_PCIE_CNT;
+ 
+-	if (event->attr.type != event->pmu->type)
+-		return -ENOENT;
+-
+ 	/* Sampling is not supported. */
+ 	if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
+ 		return -EOPNOTSUPP;
+diff --git a/drivers/perf/hisilicon/hisi_uncore_pa_pmu.c b/drivers/perf/hisilicon/hisi_uncore_pa_pmu.c
+index d941e746b4248..797cf201996a9 100644
+--- a/drivers/perf/hisilicon/hisi_uncore_pa_pmu.c
++++ b/drivers/perf/hisilicon/hisi_uncore_pa_pmu.c
+@@ -505,8 +505,8 @@ static int hisi_pa_pmu_probe(struct platform_device *pdev)
+ 	ret = perf_pmu_register(&pa_pmu->pmu, name, -1);
+ 	if (ret) {
+ 		dev_err(pa_pmu->dev, "PMU register failed, ret = %d\n", ret);
+-		cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_HISI_PA_ONLINE,
+-					    &pa_pmu->node);
++		cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_HISI_PA_ONLINE,
++						    &pa_pmu->node);
+ 		return ret;
+ 	}
+ 
+diff --git a/drivers/perf/hisilicon/hisi_uncore_sllc_pmu.c b/drivers/perf/hisilicon/hisi_uncore_sllc_pmu.c
+index 6fe534a665eda..e706ca5676764 100644
+--- a/drivers/perf/hisilicon/hisi_uncore_sllc_pmu.c
++++ b/drivers/perf/hisilicon/hisi_uncore_sllc_pmu.c
+@@ -450,8 +450,8 @@ static int hisi_sllc_pmu_probe(struct platform_device *pdev)
+ 	ret = perf_pmu_register(&sllc_pmu->pmu, name, -1);
+ 	if (ret) {
+ 		dev_err(sllc_pmu->dev, "PMU register failed, ret = %d\n", ret);
+-		cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_HISI_SLLC_ONLINE,
+-					    &sllc_pmu->node);
++		cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_HISI_SLLC_ONLINE,
++						    &sllc_pmu->node);
+ 		return ret;
+ 	}
+ 
+diff --git a/drivers/perf/hisilicon/hns3_pmu.c b/drivers/perf/hisilicon/hns3_pmu.c
+index e0457d84af6b3..16869bf5bf4cc 100644
+--- a/drivers/perf/hisilicon/hns3_pmu.c
++++ b/drivers/perf/hisilicon/hns3_pmu.c
+@@ -1556,8 +1556,8 @@ static int hns3_pmu_init_pmu(struct pci_dev *pdev, struct hns3_pmu *hns3_pmu)
+ 	ret = perf_pmu_register(&hns3_pmu->pmu, hns3_pmu->pmu.name, -1);
+ 	if (ret) {
+ 		pci_err(pdev, "failed to register perf PMU, ret = %d.\n", ret);
+-		cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_HNS3_PMU_ONLINE,
+-					    &hns3_pmu->node);
++		cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_HNS3_PMU_ONLINE,
++						    &hns3_pmu->node);
+ 	}
+ 
+ 	return ret;
+@@ -1568,8 +1568,8 @@ static void hns3_pmu_uninit_pmu(struct pci_dev *pdev)
+ 	struct hns3_pmu *hns3_pmu = pci_get_drvdata(pdev);
+ 
+ 	perf_pmu_unregister(&hns3_pmu->pmu);
+-	cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_HNS3_PMU_ONLINE,
+-				    &hns3_pmu->node);
++	cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_HNS3_PMU_ONLINE,
++					    &hns3_pmu->node);
+ }
+ 
+ static int hns3_pmu_init_dev(struct pci_dev *pdev)
+diff --git a/drivers/pinctrl/renesas/pinctrl-rzg2l.c b/drivers/pinctrl/renesas/pinctrl-rzg2l.c
+index 6e8a76556e238..3a0697557da9d 100644
+--- a/drivers/pinctrl/renesas/pinctrl-rzg2l.c
++++ b/drivers/pinctrl/renesas/pinctrl-rzg2l.c
+@@ -1175,6 +1175,8 @@ static void rzg2l_gpio_irq_disable(struct irq_data *d)
+ 	u32 port;
+ 	u8 bit;
+ 
++	irq_chip_disable_parent(d);
++
+ 	port = RZG2L_PIN_ID_TO_PORT(hwirq);
+ 	bit = RZG2L_PIN_ID_TO_PIN(hwirq);
+ 
+@@ -1189,7 +1191,6 @@ static void rzg2l_gpio_irq_disable(struct irq_data *d)
+ 	spin_unlock_irqrestore(&pctrl->lock, flags);
+ 
+ 	gpiochip_disable_irq(gc, hwirq);
+-	irq_chip_disable_parent(d);
+ }
+ 
+ static void rzg2l_gpio_irq_enable(struct irq_data *d)
+diff --git a/drivers/platform/chrome/cros_ec.c b/drivers/platform/chrome/cros_ec.c
+index 8b79492203825..f61f4c7a7b82b 100644
+--- a/drivers/platform/chrome/cros_ec.c
++++ b/drivers/platform/chrome/cros_ec.c
+@@ -320,17 +320,8 @@ void cros_ec_unregister(struct cros_ec_device *ec_dev)
+ EXPORT_SYMBOL(cros_ec_unregister);
+ 
+ #ifdef CONFIG_PM_SLEEP
+-/**
+- * cros_ec_suspend() - Handle a suspend operation for the ChromeOS EC device.
+- * @ec_dev: Device to suspend.
+- *
+- * This can be called by drivers to handle a suspend event.
+- *
+- * Return: 0 on success or negative error code.
+- */
+-int cros_ec_suspend(struct cros_ec_device *ec_dev)
++static void cros_ec_send_suspend_event(struct cros_ec_device *ec_dev)
+ {
+-	struct device *dev = ec_dev->dev;
+ 	int ret;
+ 	u8 sleep_event;
+ 
+@@ -342,7 +333,26 @@ int cros_ec_suspend(struct cros_ec_device *ec_dev)
+ 	if (ret < 0)
+ 		dev_dbg(ec_dev->dev, "Error %d sending suspend event to ec\n",
+ 			ret);
++}
+ 
++/**
++ * cros_ec_suspend_prepare() - Handle a suspend prepare operation for the ChromeOS EC device.
++ * @ec_dev: Device to suspend.
++ *
++ * This can be called by drivers to handle a suspend prepare stage of suspend.
++ *
++ * Return: 0 always.
++ */
++int cros_ec_suspend_prepare(struct cros_ec_device *ec_dev)
++{
++	cros_ec_send_suspend_event(ec_dev);
++	return 0;
++}
++EXPORT_SYMBOL(cros_ec_suspend_prepare);
++
++static void cros_ec_disable_irq(struct cros_ec_device *ec_dev)
++{
++	struct device *dev = ec_dev->dev;
+ 	if (device_may_wakeup(dev))
+ 		ec_dev->wake_enabled = !enable_irq_wake(ec_dev->irq);
+ 	else
+@@ -350,7 +360,35 @@ int cros_ec_suspend(struct cros_ec_device *ec_dev)
+ 
+ 	disable_irq(ec_dev->irq);
+ 	ec_dev->suspended = true;
++}
+ 
++/**
++ * cros_ec_suspend_late() - Handle a suspend late operation for the ChromeOS EC device.
++ * @ec_dev: Device to suspend.
++ *
++ * This can be called by drivers to handle a suspend late stage of suspend.
++ *
++ * Return: 0 always.
++ */
++int cros_ec_suspend_late(struct cros_ec_device *ec_dev)
++{
++	cros_ec_disable_irq(ec_dev);
++	return 0;
++}
++EXPORT_SYMBOL(cros_ec_suspend_late);
++
++/**
++ * cros_ec_suspend() - Handle a suspend operation for the ChromeOS EC device.
++ * @ec_dev: Device to suspend.
++ *
++ * This can be called by drivers to handle a suspend event.
++ *
++ * Return: 0 always.
++ */
++int cros_ec_suspend(struct cros_ec_device *ec_dev)
++{
++	cros_ec_send_suspend_event(ec_dev);
++	cros_ec_disable_irq(ec_dev);
+ 	return 0;
+ }
+ EXPORT_SYMBOL(cros_ec_suspend);
+@@ -369,22 +407,11 @@ static void cros_ec_report_events_during_suspend(struct cros_ec_device *ec_dev)
+ 	}
+ }
+ 
+-/**
+- * cros_ec_resume() - Handle a resume operation for the ChromeOS EC device.
+- * @ec_dev: Device to resume.
+- *
+- * This can be called by drivers to handle a resume event.
+- *
+- * Return: 0 on success or negative error code.
+- */
+-int cros_ec_resume(struct cros_ec_device *ec_dev)
++static void cros_ec_send_resume_event(struct cros_ec_device *ec_dev)
+ {
+ 	int ret;
+ 	u8 sleep_event;
+ 
+-	ec_dev->suspended = false;
+-	enable_irq(ec_dev->irq);
+-
+ 	sleep_event = (!IS_ENABLED(CONFIG_ACPI) || pm_suspend_via_firmware()) ?
+ 		      HOST_SLEEP_EVENT_S3_RESUME :
+ 		      HOST_SLEEP_EVENT_S0IX_RESUME;
+@@ -393,6 +420,24 @@ int cros_ec_resume(struct cros_ec_device *ec_dev)
+ 	if (ret < 0)
+ 		dev_dbg(ec_dev->dev, "Error %d sending resume event to ec\n",
+ 			ret);
++}
++
++/**
++ * cros_ec_resume_complete() - Handle a resume complete operation for the ChromeOS EC device.
++ * @ec_dev: Device to resume.
++ *
++ * This can be called by drivers to handle a resume complete stage of resume.
++ */
++void cros_ec_resume_complete(struct cros_ec_device *ec_dev)
++{
++	cros_ec_send_resume_event(ec_dev);
++}
++EXPORT_SYMBOL(cros_ec_resume_complete);
++
++static void cros_ec_enable_irq(struct cros_ec_device *ec_dev)
++{
++	ec_dev->suspended = false;
++	enable_irq(ec_dev->irq);
+ 
+ 	if (ec_dev->wake_enabled)
+ 		disable_irq_wake(ec_dev->irq);
+@@ -402,8 +447,35 @@ int cros_ec_resume(struct cros_ec_device *ec_dev)
+ 	 * suspend. This way the clients know what to do with them.
+ 	 */
+ 	cros_ec_report_events_during_suspend(ec_dev);
++}
+ 
++/**
++ * cros_ec_resume_early() - Handle a resume early operation for the ChromeOS EC device.
++ * @ec_dev: Device to resume.
++ *
++ * This can be called by drivers to handle a resume early stage of resume.
++ *
++ * Return: 0 always.
++ */
++int cros_ec_resume_early(struct cros_ec_device *ec_dev)
++{
++	cros_ec_enable_irq(ec_dev);
++	return 0;
++}
++EXPORT_SYMBOL(cros_ec_resume_early);
+ 
++/**
++ * cros_ec_resume() - Handle a resume operation for the ChromeOS EC device.
++ * @ec_dev: Device to resume.
++ *
++ * This can be called by drivers to handle a resume event.
++ *
++ * Return: 0 always.
++ */
++int cros_ec_resume(struct cros_ec_device *ec_dev)
++{
++	cros_ec_enable_irq(ec_dev);
++	cros_ec_send_resume_event(ec_dev);
+ 	return 0;
+ }
+ EXPORT_SYMBOL(cros_ec_resume);
+diff --git a/drivers/platform/chrome/cros_ec.h b/drivers/platform/chrome/cros_ec.h
+index bbca0096868ac..566332f487892 100644
+--- a/drivers/platform/chrome/cros_ec.h
++++ b/drivers/platform/chrome/cros_ec.h
+@@ -14,7 +14,11 @@ int cros_ec_register(struct cros_ec_device *ec_dev);
+ void cros_ec_unregister(struct cros_ec_device *ec_dev);
+ 
+ int cros_ec_suspend(struct cros_ec_device *ec_dev);
++int cros_ec_suspend_late(struct cros_ec_device *ec_dev);
++int cros_ec_suspend_prepare(struct cros_ec_device *ec_dev);
+ int cros_ec_resume(struct cros_ec_device *ec_dev);
++int cros_ec_resume_early(struct cros_ec_device *ec_dev);
++void cros_ec_resume_complete(struct cros_ec_device *ec_dev);
+ 
+ irqreturn_t cros_ec_irq_thread(int irq, void *data);
+ 
+diff --git a/drivers/platform/chrome/cros_ec_lpc.c b/drivers/platform/chrome/cros_ec_lpc.c
+index 356572452898d..42e1770887fb0 100644
+--- a/drivers/platform/chrome/cros_ec_lpc.c
++++ b/drivers/platform/chrome/cros_ec_lpc.c
+@@ -549,22 +549,36 @@ MODULE_DEVICE_TABLE(dmi, cros_ec_lpc_dmi_table);
+ static int cros_ec_lpc_prepare(struct device *dev)
+ {
+ 	struct cros_ec_device *ec_dev = dev_get_drvdata(dev);
+-
+-	return cros_ec_suspend(ec_dev);
++	return cros_ec_suspend_prepare(ec_dev);
+ }
+ 
+ static void cros_ec_lpc_complete(struct device *dev)
+ {
+ 	struct cros_ec_device *ec_dev = dev_get_drvdata(dev);
+-	cros_ec_resume(ec_dev);
++	cros_ec_resume_complete(ec_dev);
++}
++
++static int cros_ec_lpc_suspend_late(struct device *dev)
++{
++	struct cros_ec_device *ec_dev = dev_get_drvdata(dev);
++
++	return cros_ec_suspend_late(ec_dev);
++}
++
++static int cros_ec_lpc_resume_early(struct device *dev)
++{
++	struct cros_ec_device *ec_dev = dev_get_drvdata(dev);
++
++	return cros_ec_resume_early(ec_dev);
+ }
+ #endif
+ 
+ static const struct dev_pm_ops cros_ec_lpc_pm_ops = {
+ #ifdef CONFIG_PM_SLEEP
+ 	.prepare = cros_ec_lpc_prepare,
+-	.complete = cros_ec_lpc_complete
++	.complete = cros_ec_lpc_complete,
+ #endif
++	SET_LATE_SYSTEM_SLEEP_PM_OPS(cros_ec_lpc_suspend_late, cros_ec_lpc_resume_early)
+ };
+ 
+ static struct platform_driver cros_ec_lpc_driver = {
+diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/x86/wmi.c
+index a78ddd83cda02..317c907304149 100644
+--- a/drivers/platform/x86/wmi.c
++++ b/drivers/platform/x86/wmi.c
+@@ -911,21 +911,13 @@ static int wmi_dev_match(struct device *dev, struct device_driver *driver)
+ }
+ static int wmi_char_open(struct inode *inode, struct file *filp)
+ {
+-	const char *driver_name = filp->f_path.dentry->d_iname;
+-	struct wmi_block *wblock;
+-	struct wmi_block *next;
+-
+-	list_for_each_entry_safe(wblock, next, &wmi_block_list, list) {
+-		if (!wblock->dev.dev.driver)
+-			continue;
+-		if (strcmp(driver_name, wblock->dev.dev.driver->name) == 0) {
+-			filp->private_data = wblock;
+-			break;
+-		}
+-	}
++	/*
++	 * The miscdevice already stores a pointer to itself
++	 * inside filp->private_data
++	 */
++	struct wmi_block *wblock = container_of(filp->private_data, struct wmi_block, char_dev);
+ 
+-	if (!filp->private_data)
+-		return -ENODEV;
++	filp->private_data = wblock;
+ 
+ 	return nonseekable_open(inode, filp);
+ }
+@@ -1270,8 +1262,8 @@ static int parse_wdg(struct device *wmi_bus_dev, struct acpi_device *device)
+ 	struct wmi_block *wblock, *next;
+ 	union acpi_object *obj;
+ 	acpi_status status;
+-	int retval = 0;
+ 	u32 i, total;
++	int retval;
+ 
+ 	status = acpi_evaluate_object(device->handle, "_WDG", NULL, &out);
+ 	if (ACPI_FAILURE(status))
+@@ -1282,8 +1274,8 @@ static int parse_wdg(struct device *wmi_bus_dev, struct acpi_device *device)
+ 		return -ENXIO;
+ 
+ 	if (obj->type != ACPI_TYPE_BUFFER) {
+-		retval = -ENXIO;
+-		goto out_free_pointer;
++		kfree(obj);
++		return -ENXIO;
+ 	}
+ 
+ 	gblock = (const struct guid_block *)obj->buffer.pointer;
+@@ -1298,8 +1290,8 @@ static int parse_wdg(struct device *wmi_bus_dev, struct acpi_device *device)
+ 
+ 		wblock = kzalloc(sizeof(*wblock), GFP_KERNEL);
+ 		if (!wblock) {
+-			retval = -ENOMEM;
+-			break;
++			dev_err(wmi_bus_dev, "Failed to allocate %pUL\n", &gblock[i].guid);
++			continue;
+ 		}
+ 
+ 		wblock->acpi_device = device;
+@@ -1338,9 +1330,9 @@ static int parse_wdg(struct device *wmi_bus_dev, struct acpi_device *device)
+ 		}
+ 	}
+ 
+-out_free_pointer:
+-	kfree(out.pointer);
+-	return retval;
++	kfree(obj);
++
++	return 0;
+ }
+ 
+ /*
+diff --git a/drivers/pwm/pwm-brcmstb.c b/drivers/pwm/pwm-brcmstb.c
+index a3faa9a3de7cc..a7d529bf76adc 100644
+--- a/drivers/pwm/pwm-brcmstb.c
++++ b/drivers/pwm/pwm-brcmstb.c
+@@ -288,7 +288,7 @@ static int brcmstb_pwm_suspend(struct device *dev)
+ {
+ 	struct brcmstb_pwm *p = dev_get_drvdata(dev);
+ 
+-	clk_disable(p->clk);
++	clk_disable_unprepare(p->clk);
+ 
+ 	return 0;
+ }
+@@ -297,7 +297,7 @@ static int brcmstb_pwm_resume(struct device *dev)
+ {
+ 	struct brcmstb_pwm *p = dev_get_drvdata(dev);
+ 
+-	clk_enable(p->clk);
++	clk_prepare_enable(p->clk);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/pwm/pwm-sti.c b/drivers/pwm/pwm-sti.c
+index b1d1373648a38..c8800f84b917f 100644
+--- a/drivers/pwm/pwm-sti.c
++++ b/drivers/pwm/pwm-sti.c
+@@ -79,6 +79,7 @@ struct sti_pwm_compat_data {
+ 	unsigned int cpt_num_devs;
+ 	unsigned int max_pwm_cnt;
+ 	unsigned int max_prescale;
++	struct sti_cpt_ddata *ddata;
+ };
+ 
+ struct sti_pwm_chip {
+@@ -314,7 +315,7 @@ static int sti_pwm_capture(struct pwm_chip *chip, struct pwm_device *pwm,
+ {
+ 	struct sti_pwm_chip *pc = to_sti_pwmchip(chip);
+ 	struct sti_pwm_compat_data *cdata = pc->cdata;
+-	struct sti_cpt_ddata *ddata = pwm_get_chip_data(pwm);
++	struct sti_cpt_ddata *ddata = &cdata->ddata[pwm->hwpwm];
+ 	struct device *dev = pc->dev;
+ 	unsigned int effective_ticks;
+ 	unsigned long long high, low;
+@@ -440,7 +441,7 @@ static irqreturn_t sti_pwm_interrupt(int irq, void *data)
+ 	while (cpt_int_stat) {
+ 		devicenum = ffs(cpt_int_stat) - 1;
+ 
+-		ddata = pwm_get_chip_data(&pc->chip.pwms[devicenum]);
++		ddata = &pc->cdata->ddata[devicenum];
+ 
+ 		/*
+ 		 * Capture input:
+@@ -638,30 +639,28 @@ static int sti_pwm_probe(struct platform_device *pdev)
+ 			dev_err(dev, "failed to prepare clock\n");
+ 			return ret;
+ 		}
++
++		cdata->ddata = devm_kzalloc(dev, cdata->cpt_num_devs * sizeof(*cdata->ddata), GFP_KERNEL);
++		if (!cdata->ddata)
++			return -ENOMEM;
+ 	}
+ 
+ 	pc->chip.dev = dev;
+ 	pc->chip.ops = &sti_pwm_ops;
+ 	pc->chip.npwm = pc->cdata->pwm_num_devs;
+ 
+-	ret = pwmchip_add(&pc->chip);
+-	if (ret < 0) {
+-		clk_unprepare(pc->pwm_clk);
+-		clk_unprepare(pc->cpt_clk);
+-		return ret;
+-	}
+-
+ 	for (i = 0; i < cdata->cpt_num_devs; i++) {
+-		struct sti_cpt_ddata *ddata;
+-
+-		ddata = devm_kzalloc(dev, sizeof(*ddata), GFP_KERNEL);
+-		if (!ddata)
+-			return -ENOMEM;
++		struct sti_cpt_ddata *ddata = &cdata->ddata[i];
+ 
+ 		init_waitqueue_head(&ddata->wait);
+ 		mutex_init(&ddata->lock);
++	}
+ 
+-		pwm_set_chip_data(&pc->chip.pwms[i], ddata);
++	ret = pwmchip_add(&pc->chip);
++	if (ret < 0) {
++		clk_unprepare(pc->pwm_clk);
++		clk_unprepare(pc->cpt_clk);
++		return ret;
+ 	}
+ 
+ 	platform_set_drvdata(pdev, pc);
+diff --git a/drivers/regulator/mt6358-regulator.c b/drivers/regulator/mt6358-regulator.c
+index 65fbd95f1dbb0..4ca8fbf4b3e2e 100644
+--- a/drivers/regulator/mt6358-regulator.c
++++ b/drivers/regulator/mt6358-regulator.c
+@@ -688,12 +688,18 @@ static int mt6358_regulator_probe(struct platform_device *pdev)
+ 	const struct mt6358_regulator_info *mt6358_info;
+ 	int i, max_regulator, ret;
+ 
+-	if (mt6397->chip_id == MT6366_CHIP_ID) {
+-		max_regulator = MT6366_MAX_REGULATOR;
+-		mt6358_info = mt6366_regulators;
+-	} else {
++	switch (mt6397->chip_id) {
++	case MT6358_CHIP_ID:
+ 		max_regulator = MT6358_MAX_REGULATOR;
+ 		mt6358_info = mt6358_regulators;
++		break;
++	case MT6366_CHIP_ID:
++		max_regulator = MT6366_MAX_REGULATOR;
++		mt6358_info = mt6366_regulators;
++		break;
++	default:
++		dev_err(&pdev->dev, "unsupported chip ID: %d\n", mt6397->chip_id);
++		return -EINVAL;
+ 	}
+ 
+ 	ret = mt6358_sync_vcn33_setting(&pdev->dev);
+diff --git a/drivers/regulator/qcom-rpmh-regulator.c b/drivers/regulator/qcom-rpmh-regulator.c
+index cd077b7c4aff3..9f8fbfae93c7c 100644
+--- a/drivers/regulator/qcom-rpmh-regulator.c
++++ b/drivers/regulator/qcom-rpmh-regulator.c
+@@ -1096,7 +1096,7 @@ static const struct rpmh_vreg_init_data pm8550ve_vreg_data[] = {
+ 	RPMH_VREG("smps1", "smp%s1", &pmic5_ftsmps525_lv, "vdd-s1"),
+ 	RPMH_VREG("smps2", "smp%s2", &pmic5_ftsmps525_lv, "vdd-s2"),
+ 	RPMH_VREG("smps3", "smp%s3", &pmic5_ftsmps525_lv, "vdd-s3"),
+-	RPMH_VREG("smps4", "smp%s4", &pmic5_ftsmps525_lv, "vdd-s4"),
++	RPMH_VREG("smps4", "smp%s4", &pmic5_ftsmps525_mv, "vdd-s4"),
+ 	RPMH_VREG("smps5", "smp%s5", &pmic5_ftsmps525_lv, "vdd-s5"),
+ 	RPMH_VREG("smps6", "smp%s6", &pmic5_ftsmps525_lv, "vdd-s6"),
+ 	RPMH_VREG("smps7", "smp%s7", &pmic5_ftsmps525_lv, "vdd-s7"),
+diff --git a/drivers/rtc/rtc-brcmstb-waketimer.c b/drivers/rtc/rtc-brcmstb-waketimer.c
+index 3cdc015692ca6..1a65a4e0dc003 100644
+--- a/drivers/rtc/rtc-brcmstb-waketimer.c
++++ b/drivers/rtc/rtc-brcmstb-waketimer.c
+@@ -1,6 +1,6 @@
+ // SPDX-License-Identifier: GPL-2.0-only
+ /*
+- * Copyright © 2014-2017 Broadcom
++ * Copyright © 2014-2023 Broadcom
+  */
+ 
+ #define pr_fmt(fmt)	KBUILD_MODNAME ": " fmt
+@@ -34,6 +34,7 @@ struct brcmstb_waketmr {
+ 	u32 rate;
+ 	unsigned long rtc_alarm;
+ 	bool alarm_en;
++	bool alarm_expired;
+ };
+ 
+ #define BRCMSTB_WKTMR_EVENT		0x00
+@@ -64,6 +65,11 @@ static inline void brcmstb_waketmr_clear_alarm(struct brcmstb_waketmr *timer)
+ 	writel_relaxed(reg - 1, timer->base + BRCMSTB_WKTMR_ALARM);
+ 	writel_relaxed(WKTMR_ALARM_EVENT, timer->base + BRCMSTB_WKTMR_EVENT);
+ 	(void)readl_relaxed(timer->base + BRCMSTB_WKTMR_EVENT);
++	if (timer->alarm_expired) {
++		timer->alarm_expired = false;
++		/* maintain call balance */
++		enable_irq(timer->alarm_irq);
++	}
+ }
+ 
+ static void brcmstb_waketmr_set_alarm(struct brcmstb_waketmr *timer,
+@@ -105,10 +111,17 @@ static irqreturn_t brcmstb_alarm_irq(int irq, void *data)
+ 		return IRQ_HANDLED;
+ 
+ 	if (timer->alarm_en) {
+-		if (!device_may_wakeup(timer->dev))
++		if (device_may_wakeup(timer->dev)) {
++			disable_irq_nosync(irq);
++			timer->alarm_expired = true;
++		} else {
+ 			writel_relaxed(WKTMR_ALARM_EVENT,
+ 				       timer->base + BRCMSTB_WKTMR_EVENT);
++		}
+ 		rtc_update_irq(timer->rtc, 1, RTC_IRQF | RTC_AF);
++	} else {
++		writel_relaxed(WKTMR_ALARM_EVENT,
++			       timer->base + BRCMSTB_WKTMR_EVENT);
+ 	}
+ 
+ 	return IRQ_HANDLED;
+@@ -221,8 +234,14 @@ static int brcmstb_waketmr_alarm_enable(struct device *dev,
+ 		    !brcmstb_waketmr_is_pending(timer))
+ 			return -EINVAL;
+ 		timer->alarm_en = true;
+-		if (timer->alarm_irq)
++		if (timer->alarm_irq) {
++			if (timer->alarm_expired) {
++				timer->alarm_expired = false;
++				/* maintain call balance */
++				enable_irq(timer->alarm_irq);
++			}
+ 			enable_irq(timer->alarm_irq);
++		}
+ 	} else if (!enabled && timer->alarm_en) {
+ 		if (timer->alarm_irq)
+ 			disable_irq(timer->alarm_irq);
+@@ -352,6 +371,17 @@ static int brcmstb_waketmr_suspend(struct device *dev)
+ 	return brcmstb_waketmr_prepare_suspend(timer);
+ }
+ 
++static int brcmstb_waketmr_suspend_noirq(struct device *dev)
++{
++	struct brcmstb_waketmr *timer = dev_get_drvdata(dev);
++
++	/* Catch any alarms occurring prior to noirq */
++	if (timer->alarm_expired && device_may_wakeup(dev))
++		return -EBUSY;
++
++	return 0;
++}
++
+ static int brcmstb_waketmr_resume(struct device *dev)
+ {
+ 	struct brcmstb_waketmr *timer = dev_get_drvdata(dev);
+@@ -368,10 +398,17 @@ static int brcmstb_waketmr_resume(struct device *dev)
+ 
+ 	return ret;
+ }
++#else
++#define brcmstb_waketmr_suspend		NULL
++#define brcmstb_waketmr_suspend_noirq	NULL
++#define brcmstb_waketmr_resume		NULL
+ #endif /* CONFIG_PM_SLEEP */
+ 
+-static SIMPLE_DEV_PM_OPS(brcmstb_waketmr_pm_ops,
+-			 brcmstb_waketmr_suspend, brcmstb_waketmr_resume);
++static const struct dev_pm_ops brcmstb_waketmr_pm_ops = {
++	.suspend	= brcmstb_waketmr_suspend,
++	.suspend_noirq	= brcmstb_waketmr_suspend_noirq,
++	.resume		= brcmstb_waketmr_resume,
++};
+ 
+ static const __maybe_unused struct of_device_id brcmstb_waketmr_of_match[] = {
+ 	{ .compatible = "brcm,brcmstb-waketimer" },
+diff --git a/drivers/rtc/rtc-pcf85363.c b/drivers/rtc/rtc-pcf85363.c
+index 65b8b1338dbb0..5407556d7bde3 100644
+--- a/drivers/rtc/rtc-pcf85363.c
++++ b/drivers/rtc/rtc-pcf85363.c
+@@ -403,6 +403,7 @@ static int pcf85363_probe(struct i2c_client *client)
+ 		},
+ 	};
+ 	int ret, i, err;
++	bool wakeup_source;
+ 
+ 	if (data)
+ 		config = data;
+@@ -432,25 +433,36 @@ static int pcf85363_probe(struct i2c_client *client)
+ 	pcf85363->rtc->ops = &rtc_ops;
+ 	pcf85363->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
+ 	pcf85363->rtc->range_max = RTC_TIMESTAMP_END_2099;
+-	clear_bit(RTC_FEATURE_ALARM, pcf85363->rtc->features);
++
++	wakeup_source = device_property_read_bool(&client->dev,
++						  "wakeup-source");
++	if (client->irq > 0 || wakeup_source) {
++		regmap_write(pcf85363->regmap, CTRL_FLAGS, 0);
++		regmap_update_bits(pcf85363->regmap, CTRL_PIN_IO,
++				   PIN_IO_INTAPM, PIN_IO_INTA_OUT);
++	}
+ 
+ 	if (client->irq > 0) {
+ 		unsigned long irqflags = IRQF_TRIGGER_LOW;
+ 
+ 		if (dev_fwnode(&client->dev))
+ 			irqflags = 0;
+-
+-		regmap_write(pcf85363->regmap, CTRL_FLAGS, 0);
+-		regmap_update_bits(pcf85363->regmap, CTRL_PIN_IO,
+-				   PIN_IO_INTA_OUT, PIN_IO_INTAPM);
+ 		ret = devm_request_threaded_irq(&client->dev, client->irq,
+ 						NULL, pcf85363_rtc_handle_irq,
+ 						irqflags | IRQF_ONESHOT,
+ 						"pcf85363", client);
+-		if (ret)
+-			dev_warn(&client->dev, "unable to request IRQ, alarms disabled\n");
+-		else
+-			set_bit(RTC_FEATURE_ALARM, pcf85363->rtc->features);
++		if (ret) {
++			dev_warn(&client->dev,
++				 "unable to request IRQ, alarms disabled\n");
++			client->irq = 0;
++		}
++	}
++
++	if (client->irq > 0 || wakeup_source) {
++		device_init_wakeup(&client->dev, true);
++		set_bit(RTC_FEATURE_ALARM, pcf85363->rtc->features);
++	} else {
++		clear_bit(RTC_FEATURE_ALARM, pcf85363->rtc->features);
+ 	}
+ 
+ 	ret = devm_rtc_register_device(pcf85363->rtc);
+diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
+index 420120be300f5..8028f76923b24 100644
+--- a/drivers/s390/crypto/ap_bus.c
++++ b/drivers/s390/crypto/ap_bus.c
+@@ -1873,15 +1873,18 @@ static inline void ap_scan_domains(struct ap_card *ac)
+ 			}
+ 			/* get it and thus adjust reference counter */
+ 			get_device(dev);
+-			if (decfg)
++			if (decfg) {
+ 				AP_DBF_INFO("%s(%d,%d) new (decfg) queue dev created\n",
+ 					    __func__, ac->id, dom);
+-			else if (chkstop)
++			} else if (chkstop) {
+ 				AP_DBF_INFO("%s(%d,%d) new (chkstop) queue dev created\n",
+ 					    __func__, ac->id, dom);
+-			else
++			} else {
++				/* nudge the queue's state machine */
++				ap_queue_init_state(aq);
+ 				AP_DBF_INFO("%s(%d,%d) new queue dev created\n",
+ 					    __func__, ac->id, dom);
++			}
+ 			goto put_dev_and_continue;
+ 		}
+ 		/* handle state changes on already existing queue device */
+@@ -1903,10 +1906,8 @@ static inline void ap_scan_domains(struct ap_card *ac)
+ 		} else if (!chkstop && aq->chkstop) {
+ 			/* checkstop off */
+ 			aq->chkstop = false;
+-			if (aq->dev_state > AP_DEV_STATE_UNINITIATED) {
+-				aq->dev_state = AP_DEV_STATE_OPERATING;
+-				aq->sm_state = AP_SM_STATE_RESET_START;
+-			}
++			if (aq->dev_state > AP_DEV_STATE_UNINITIATED)
++				_ap_queue_init_state(aq);
+ 			spin_unlock_bh(&aq->lock);
+ 			AP_DBF_DBG("%s(%d,%d) queue dev checkstop off\n",
+ 				   __func__, ac->id, dom);
+@@ -1930,10 +1931,8 @@ static inline void ap_scan_domains(struct ap_card *ac)
+ 		} else if (!decfg && !aq->config) {
+ 			/* config on this queue device */
+ 			aq->config = true;
+-			if (aq->dev_state > AP_DEV_STATE_UNINITIATED) {
+-				aq->dev_state = AP_DEV_STATE_OPERATING;
+-				aq->sm_state = AP_SM_STATE_RESET_START;
+-			}
++			if (aq->dev_state > AP_DEV_STATE_UNINITIATED)
++				_ap_queue_init_state(aq);
+ 			spin_unlock_bh(&aq->lock);
+ 			AP_DBF_DBG("%s(%d,%d) queue dev config on\n",
+ 				   __func__, ac->id, dom);
+diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h
+index 0d7b7eb374ad1..ff6fc0b2d38fd 100644
+--- a/drivers/s390/crypto/ap_bus.h
++++ b/drivers/s390/crypto/ap_bus.h
+@@ -301,6 +301,7 @@ struct ap_queue *ap_queue_create(ap_qid_t qid, int device_type);
+ void ap_queue_prepare_remove(struct ap_queue *aq);
+ void ap_queue_remove(struct ap_queue *aq);
+ void ap_queue_init_state(struct ap_queue *aq);
++void _ap_queue_init_state(struct ap_queue *aq);
+ 
+ struct ap_card *ap_card_create(int id, int queue_depth, int raw_type,
+ 			       int comp_type, unsigned int functions, int ml);
+diff --git a/drivers/s390/crypto/ap_queue.c b/drivers/s390/crypto/ap_queue.c
+index 30df83735adf3..9cc144d154dc0 100644
+--- a/drivers/s390/crypto/ap_queue.c
++++ b/drivers/s390/crypto/ap_queue.c
+@@ -1205,14 +1205,19 @@ void ap_queue_remove(struct ap_queue *aq)
+ 	spin_unlock_bh(&aq->lock);
+ }
+ 
+-void ap_queue_init_state(struct ap_queue *aq)
++void _ap_queue_init_state(struct ap_queue *aq)
+ {
+-	spin_lock_bh(&aq->lock);
+ 	aq->dev_state = AP_DEV_STATE_OPERATING;
+ 	aq->sm_state = AP_SM_STATE_RESET_START;
+ 	aq->last_err_rc = 0;
+ 	aq->assoc_idx = ASSOC_IDX_INVALID;
+ 	ap_wait(ap_sm_event(aq, AP_SM_EVENT_POLL));
++}
++
++void ap_queue_init_state(struct ap_queue *aq)
++{
++	spin_lock_bh(&aq->lock);
++	_ap_queue_init_state(aq);
+ 	spin_unlock_bh(&aq->lock);
+ }
+ EXPORT_SYMBOL(ap_queue_init_state);
+diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
+index ce9eb00e2ca04..470e8e6c41b62 100644
+--- a/drivers/scsi/ibmvscsi/ibmvfc.c
++++ b/drivers/scsi/ibmvscsi/ibmvfc.c
+@@ -22,7 +22,6 @@
+ #include <linux/bsg-lib.h>
+ #include <asm/firmware.h>
+ #include <asm/irq.h>
+-#include <asm/rtas.h>
+ #include <asm/vio.h>
+ #include <scsi/scsi.h>
+ #include <scsi/scsi_cmnd.h>
+@@ -5804,7 +5803,7 @@ static int ibmvfc_register_scsi_channel(struct ibmvfc_host *vhost,
+ irq_failed:
+ 	do {
+ 		rc = plpar_hcall_norets(H_FREE_SUB_CRQ, vdev->unit_address, scrq->cookie);
+-	} while (rtas_busy_delay(rc));
++	} while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
+ reg_failed:
+ 	LEAVE;
+ 	return rc;
+diff --git a/drivers/soc/qcom/llcc-qcom.c b/drivers/soc/qcom/llcc-qcom.c
+index 67c19ed2219a6..221a1be098d7b 100644
+--- a/drivers/soc/qcom/llcc-qcom.c
++++ b/drivers/soc/qcom/llcc-qcom.c
+@@ -945,6 +945,9 @@ static int qcom_llcc_probe(struct platform_device *pdev)
+ 	u32 version;
+ 	struct regmap *regmap;
+ 
++	if (!IS_ERR(drv_data))
++		return -EBUSY;
++
+ 	drv_data = devm_kzalloc(dev, sizeof(*drv_data), GFP_KERNEL);
+ 	if (!drv_data) {
+ 		ret = -ENOMEM;
+diff --git a/drivers/soc/qcom/pmic_glink_altmode.c b/drivers/soc/qcom/pmic_glink_altmode.c
+index df48fbea4b686..178778b1373b1 100644
+--- a/drivers/soc/qcom/pmic_glink_altmode.c
++++ b/drivers/soc/qcom/pmic_glink_altmode.c
+@@ -416,7 +416,7 @@ static int pmic_glink_altmode_probe(struct auxiliary_device *adev,
+ 		alt_port->bridge.funcs = &pmic_glink_altmode_bridge_funcs;
+ 		alt_port->bridge.of_node = to_of_node(fwnode);
+ 		alt_port->bridge.ops = DRM_BRIDGE_OP_HPD;
+-		alt_port->bridge.type = DRM_MODE_CONNECTOR_USB;
++		alt_port->bridge.type = DRM_MODE_CONNECTOR_DisplayPort;
+ 
+ 		ret = devm_drm_bridge_add(dev, &alt_port->bridge);
+ 		if (ret)
+diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
+index 8962b25576156..ebc59cf81c255 100644
+--- a/drivers/spi/Kconfig
++++ b/drivers/spi/Kconfig
+@@ -1124,6 +1124,7 @@ config SPI_XTENSA_XTFPGA
+ config SPI_ZYNQ_QSPI
+ 	tristate "Xilinx Zynq QSPI controller"
+ 	depends on ARCH_ZYNQ || COMPILE_TEST
++	depends on SPI_MEM
+ 	help
+ 	  This enables support for the Zynq Quad SPI controller
+ 	  in master mode.
+diff --git a/drivers/spi/spi-nxp-fspi.c b/drivers/spi/spi-nxp-fspi.c
+index 8e44de084bbe3..426aa885072af 100644
+--- a/drivers/spi/spi-nxp-fspi.c
++++ b/drivers/spi/spi-nxp-fspi.c
+@@ -760,7 +760,7 @@ static int nxp_fspi_read_ahb(struct nxp_fspi *f, const struct spi_mem_op *op)
+ 		f->memmap_len = len > NXP_FSPI_MIN_IOMAP ?
+ 				len : NXP_FSPI_MIN_IOMAP;
+ 
+-		f->ahb_addr = ioremap_wc(f->memmap_phy + f->memmap_start,
++		f->ahb_addr = ioremap(f->memmap_phy + f->memmap_start,
+ 					 f->memmap_len);
+ 
+ 		if (!f->ahb_addr) {
+diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c
+index 8331e247bf5ca..ddf1c684bcc7d 100644
+--- a/drivers/spi/spi-omap2-mcspi.c
++++ b/drivers/spi/spi-omap2-mcspi.c
+@@ -117,7 +117,7 @@ struct omap2_mcspi_regs {
+ 
+ struct omap2_mcspi {
+ 	struct completion	txdone;
+-	struct spi_master	*master;
++	struct spi_controller	*ctlr;
+ 	/* Virtual base address of the controller */
+ 	void __iomem		*base;
+ 	unsigned long		phys;
+@@ -125,10 +125,12 @@ struct omap2_mcspi {
+ 	struct omap2_mcspi_dma	*dma_channels;
+ 	struct device		*dev;
+ 	struct omap2_mcspi_regs ctx;
++	struct clk		*ref_clk;
+ 	int			fifo_depth;
+-	bool			slave_aborted;
++	bool			target_aborted;
+ 	unsigned int		pin_dir:1;
+ 	size_t			max_xfer_len;
++	u32			ref_clk_hz;
+ };
+ 
+ struct omap2_mcspi_cs {
+@@ -141,17 +143,17 @@ struct omap2_mcspi_cs {
+ 	u32			chconf0, chctrl0;
+ };
+ 
+-static inline void mcspi_write_reg(struct spi_master *master,
++static inline void mcspi_write_reg(struct spi_controller *ctlr,
+ 		int idx, u32 val)
+ {
+-	struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
++	struct omap2_mcspi *mcspi = spi_controller_get_devdata(ctlr);
+ 
+ 	writel_relaxed(val, mcspi->base + idx);
+ }
+ 
+-static inline u32 mcspi_read_reg(struct spi_master *master, int idx)
++static inline u32 mcspi_read_reg(struct spi_controller *ctlr, int idx)
+ {
+-	struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
++	struct omap2_mcspi *mcspi = spi_controller_get_devdata(ctlr);
+ 
+ 	return readl_relaxed(mcspi->base + idx);
+ }
+@@ -235,7 +237,7 @@ static void omap2_mcspi_set_enable(const struct spi_device *spi, int enable)
+ 
+ static void omap2_mcspi_set_cs(struct spi_device *spi, bool enable)
+ {
+-	struct omap2_mcspi *mcspi = spi_master_get_devdata(spi->master);
++	struct omap2_mcspi *mcspi = spi_controller_get_devdata(spi->controller);
+ 	u32 l;
+ 
+ 	/* The controller handles the inverted chip selects
+@@ -266,24 +268,24 @@ static void omap2_mcspi_set_cs(struct spi_device *spi, bool enable)
+ 	}
+ }
+ 
+-static void omap2_mcspi_set_mode(struct spi_master *master)
++static void omap2_mcspi_set_mode(struct spi_controller *ctlr)
+ {
+-	struct omap2_mcspi	*mcspi = spi_master_get_devdata(master);
++	struct omap2_mcspi	*mcspi = spi_controller_get_devdata(ctlr);
+ 	struct omap2_mcspi_regs	*ctx = &mcspi->ctx;
+ 	u32 l;
+ 
+ 	/*
+-	 * Choose master or slave mode
++	 * Choose host or target mode
+ 	 */
+-	l = mcspi_read_reg(master, OMAP2_MCSPI_MODULCTRL);
++	l = mcspi_read_reg(ctlr, OMAP2_MCSPI_MODULCTRL);
+ 	l &= ~(OMAP2_MCSPI_MODULCTRL_STEST);
+-	if (spi_controller_is_slave(master)) {
++	if (spi_controller_is_target(ctlr)) {
+ 		l |= (OMAP2_MCSPI_MODULCTRL_MS);
+ 	} else {
+ 		l &= ~(OMAP2_MCSPI_MODULCTRL_MS);
+ 		l |= OMAP2_MCSPI_MODULCTRL_SINGLE;
+ 	}
+-	mcspi_write_reg(master, OMAP2_MCSPI_MODULCTRL, l);
++	mcspi_write_reg(ctlr, OMAP2_MCSPI_MODULCTRL, l);
+ 
+ 	ctx->modulctrl = l;
+ }
+@@ -291,14 +293,14 @@ static void omap2_mcspi_set_mode(struct spi_master *master)
+ static void omap2_mcspi_set_fifo(const struct spi_device *spi,
+ 				struct spi_transfer *t, int enable)
+ {
+-	struct spi_master *master = spi->master;
++	struct spi_controller *ctlr = spi->controller;
+ 	struct omap2_mcspi_cs *cs = spi->controller_state;
+ 	struct omap2_mcspi *mcspi;
+ 	unsigned int wcnt;
+ 	int max_fifo_depth, bytes_per_word;
+ 	u32 chconf, xferlevel;
+ 
+-	mcspi = spi_master_get_devdata(master);
++	mcspi = spi_controller_get_devdata(ctlr);
+ 
+ 	chconf = mcspi_cached_chconf0(spi);
+ 	if (enable) {
+@@ -326,7 +328,7 @@ static void omap2_mcspi_set_fifo(const struct spi_device *spi,
+ 			xferlevel |= bytes_per_word - 1;
+ 		}
+ 
+-		mcspi_write_reg(master, OMAP2_MCSPI_XFERLEVEL, xferlevel);
++		mcspi_write_reg(ctlr, OMAP2_MCSPI_XFERLEVEL, xferlevel);
+ 		mcspi_write_chconf0(spi, chconf);
+ 		mcspi->fifo_depth = max_fifo_depth;
+ 
+@@ -364,9 +366,9 @@ static int mcspi_wait_for_reg_bit(void __iomem *reg, unsigned long bit)
+ static int mcspi_wait_for_completion(struct  omap2_mcspi *mcspi,
+ 				     struct completion *x)
+ {
+-	if (spi_controller_is_slave(mcspi->master)) {
++	if (spi_controller_is_target(mcspi->ctlr)) {
+ 		if (wait_for_completion_interruptible(x) ||
+-		    mcspi->slave_aborted)
++		    mcspi->target_aborted)
+ 			return -EINTR;
+ 	} else {
+ 		wait_for_completion(x);
+@@ -378,7 +380,7 @@ static int mcspi_wait_for_completion(struct  omap2_mcspi *mcspi,
+ static void omap2_mcspi_rx_callback(void *data)
+ {
+ 	struct spi_device *spi = data;
+-	struct omap2_mcspi *mcspi = spi_master_get_devdata(spi->master);
++	struct omap2_mcspi *mcspi = spi_controller_get_devdata(spi->controller);
+ 	struct omap2_mcspi_dma *mcspi_dma = &mcspi->dma_channels[spi_get_chipselect(spi, 0)];
+ 
+ 	/* We must disable the DMA RX request */
+@@ -390,7 +392,7 @@ static void omap2_mcspi_rx_callback(void *data)
+ static void omap2_mcspi_tx_callback(void *data)
+ {
+ 	struct spi_device *spi = data;
+-	struct omap2_mcspi *mcspi = spi_master_get_devdata(spi->master);
++	struct omap2_mcspi *mcspi = spi_controller_get_devdata(spi->controller);
+ 	struct omap2_mcspi_dma *mcspi_dma = &mcspi->dma_channels[spi_get_chipselect(spi, 0)];
+ 
+ 	/* We must disable the DMA TX request */
+@@ -407,7 +409,7 @@ static void omap2_mcspi_tx_dma(struct spi_device *spi,
+ 	struct omap2_mcspi_dma  *mcspi_dma;
+ 	struct dma_async_tx_descriptor *tx;
+ 
+-	mcspi = spi_master_get_devdata(spi->master);
++	mcspi = spi_controller_get_devdata(spi->controller);
+ 	mcspi_dma = &mcspi->dma_channels[spi_get_chipselect(spi, 0)];
+ 
+ 	dmaengine_slave_config(mcspi_dma->dma_tx, &cfg);
+@@ -445,13 +447,13 @@ omap2_mcspi_rx_dma(struct spi_device *spi, struct spi_transfer *xfer,
+ 	void __iomem		*chstat_reg = cs->base + OMAP2_MCSPI_CHSTAT0;
+ 	struct dma_async_tx_descriptor *tx;
+ 
+-	mcspi = spi_master_get_devdata(spi->master);
++	mcspi = spi_controller_get_devdata(spi->controller);
+ 	mcspi_dma = &mcspi->dma_channels[spi_get_chipselect(spi, 0)];
+ 	count = xfer->len;
+ 
+ 	/*
+ 	 *  In the "End-of-Transfer Procedure" section for DMA RX in OMAP35x TRM
+-	 *  it mentions reducing DMA transfer length by one element in master
++	 *  it mentions reducing DMA transfer length by one element in host
+ 	 *  normal mode.
+ 	 */
+ 	if (mcspi->fifo_depth == 0)
+@@ -514,7 +516,7 @@ omap2_mcspi_rx_dma(struct spi_device *spi, struct spi_transfer *xfer,
+ 	omap2_mcspi_set_dma_req(spi, 1, 1);
+ 
+ 	ret = mcspi_wait_for_completion(mcspi, &mcspi_dma->dma_rx_completion);
+-	if (ret || mcspi->slave_aborted) {
++	if (ret || mcspi->target_aborted) {
+ 		dmaengine_terminate_sync(mcspi_dma->dma_rx);
+ 		omap2_mcspi_set_dma_req(spi, 1, 0);
+ 		return 0;
+@@ -590,7 +592,7 @@ omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer)
+ 	void __iomem            *irqstat_reg;
+ 	int			wait_res;
+ 
+-	mcspi = spi_master_get_devdata(spi->master);
++	mcspi = spi_controller_get_devdata(spi->controller);
+ 	mcspi_dma = &mcspi->dma_channels[spi_get_chipselect(spi, 0)];
+ 
+ 	if (cs->word_len <= 8) {
+@@ -617,14 +619,14 @@ omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer)
+ 	rx = xfer->rx_buf;
+ 	tx = xfer->tx_buf;
+ 
+-	mcspi->slave_aborted = false;
++	mcspi->target_aborted = false;
+ 	reinit_completion(&mcspi_dma->dma_tx_completion);
+ 	reinit_completion(&mcspi_dma->dma_rx_completion);
+ 	reinit_completion(&mcspi->txdone);
+ 	if (tx) {
+-		/* Enable EOW IRQ to know end of tx in slave mode */
+-		if (spi_controller_is_slave(spi->master))
+-			mcspi_write_reg(spi->master,
++		/* Enable EOW IRQ to know end of tx in target mode */
++		if (spi_controller_is_target(spi->controller))
++			mcspi_write_reg(spi->controller,
+ 					OMAP2_MCSPI_IRQENABLE,
+ 					OMAP2_MCSPI_IRQSTATUS_EOW);
+ 		omap2_mcspi_tx_dma(spi, xfer, cfg);
+@@ -637,15 +639,15 @@ omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer)
+ 		int ret;
+ 
+ 		ret = mcspi_wait_for_completion(mcspi, &mcspi_dma->dma_tx_completion);
+-		if (ret || mcspi->slave_aborted) {
++		if (ret || mcspi->target_aborted) {
+ 			dmaengine_terminate_sync(mcspi_dma->dma_tx);
+ 			omap2_mcspi_set_dma_req(spi, 0, 0);
+ 			return 0;
+ 		}
+ 
+-		if (spi_controller_is_slave(mcspi->master)) {
++		if (spi_controller_is_target(mcspi->ctlr)) {
+ 			ret = mcspi_wait_for_completion(mcspi, &mcspi->txdone);
+-			if (ret || mcspi->slave_aborted)
++			if (ret || mcspi->target_aborted)
+ 				return 0;
+ 		}
+ 
+@@ -656,7 +658,7 @@ omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer)
+ 						OMAP2_MCSPI_IRQSTATUS_EOW) < 0)
+ 				dev_err(&spi->dev, "EOW timed out\n");
+ 
+-			mcspi_write_reg(mcspi->master, OMAP2_MCSPI_IRQSTATUS,
++			mcspi_write_reg(mcspi->ctlr, OMAP2_MCSPI_IRQSTATUS,
+ 					OMAP2_MCSPI_IRQSTATUS_EOW);
+ 		}
+ 
+@@ -880,12 +882,12 @@ out:
+ 	return count - c;
+ }
+ 
+-static u32 omap2_mcspi_calc_divisor(u32 speed_hz)
++static u32 omap2_mcspi_calc_divisor(u32 speed_hz, u32 ref_clk_hz)
+ {
+ 	u32 div;
+ 
+ 	for (div = 0; div < 15; div++)
+-		if (speed_hz >= (OMAP2_MCSPI_MAX_FREQ >> div))
++		if (speed_hz >= (ref_clk_hz >> div))
+ 			return div;
+ 
+ 	return 15;
+@@ -897,11 +899,11 @@ static int omap2_mcspi_setup_transfer(struct spi_device *spi,
+ {
+ 	struct omap2_mcspi_cs *cs = spi->controller_state;
+ 	struct omap2_mcspi *mcspi;
+-	u32 l = 0, clkd = 0, div, extclk = 0, clkg = 0;
++	u32 ref_clk_hz, l = 0, clkd = 0, div, extclk = 0, clkg = 0;
+ 	u8 word_len = spi->bits_per_word;
+ 	u32 speed_hz = spi->max_speed_hz;
+ 
+-	mcspi = spi_master_get_devdata(spi->master);
++	mcspi = spi_controller_get_devdata(spi->controller);
+ 
+ 	if (t != NULL && t->bits_per_word)
+ 		word_len = t->bits_per_word;
+@@ -911,14 +913,15 @@ static int omap2_mcspi_setup_transfer(struct spi_device *spi,
+ 	if (t && t->speed_hz)
+ 		speed_hz = t->speed_hz;
+ 
+-	speed_hz = min_t(u32, speed_hz, OMAP2_MCSPI_MAX_FREQ);
+-	if (speed_hz < (OMAP2_MCSPI_MAX_FREQ / OMAP2_MCSPI_MAX_DIVIDER)) {
+-		clkd = omap2_mcspi_calc_divisor(speed_hz);
+-		speed_hz = OMAP2_MCSPI_MAX_FREQ >> clkd;
++	ref_clk_hz = mcspi->ref_clk_hz;
++	speed_hz = min_t(u32, speed_hz, ref_clk_hz);
++	if (speed_hz < (ref_clk_hz / OMAP2_MCSPI_MAX_DIVIDER)) {
++		clkd = omap2_mcspi_calc_divisor(speed_hz, ref_clk_hz);
++		speed_hz = ref_clk_hz >> clkd;
+ 		clkg = 0;
+ 	} else {
+-		div = (OMAP2_MCSPI_MAX_FREQ + speed_hz - 1) / speed_hz;
+-		speed_hz = OMAP2_MCSPI_MAX_FREQ / div;
++		div = (ref_clk_hz + speed_hz - 1) / speed_hz;
++		speed_hz = ref_clk_hz / div;
+ 		clkd = (div - 1) & 0xf;
+ 		extclk = (div - 1) >> 4;
+ 		clkg = OMAP2_MCSPI_CHCONF_CLKG;
+@@ -926,7 +929,7 @@ static int omap2_mcspi_setup_transfer(struct spi_device *spi,
+ 
+ 	l = mcspi_cached_chconf0(spi);
+ 
+-	/* standard 4-wire master mode:  SCK, MOSI/out, MISO/in, nCS
++	/* standard 4-wire host mode:  SCK, MOSI/out, MISO/in, nCS
+ 	 * REVISIT: this controller could support SPI_3WIRE mode.
+ 	 */
+ 	if (mcspi->pin_dir == MCSPI_PINDIR_D0_IN_D1_OUT) {
+@@ -1017,13 +1020,13 @@ no_dma:
+ 	return ret;
+ }
+ 
+-static void omap2_mcspi_release_dma(struct spi_master *master)
++static void omap2_mcspi_release_dma(struct spi_controller *ctlr)
+ {
+-	struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
++	struct omap2_mcspi *mcspi = spi_controller_get_devdata(ctlr);
+ 	struct omap2_mcspi_dma	*mcspi_dma;
+ 	int i;
+ 
+-	for (i = 0; i < master->num_chipselect; i++) {
++	for (i = 0; i < ctlr->num_chipselect; i++) {
+ 		mcspi_dma = &mcspi->dma_channels[i];
+ 
+ 		if (mcspi_dma->dma_rx) {
+@@ -1054,7 +1057,7 @@ static int omap2_mcspi_setup(struct spi_device *spi)
+ {
+ 	bool			initial_setup = false;
+ 	int			ret;
+-	struct omap2_mcspi	*mcspi = spi_master_get_devdata(spi->master);
++	struct omap2_mcspi	*mcspi = spi_controller_get_devdata(spi->controller);
+ 	struct omap2_mcspi_regs	*ctx = &mcspi->ctx;
+ 	struct omap2_mcspi_cs	*cs = spi->controller_state;
+ 
+@@ -1096,24 +1099,24 @@ static irqreturn_t omap2_mcspi_irq_handler(int irq, void *data)
+ 	struct omap2_mcspi *mcspi = data;
+ 	u32 irqstat;
+ 
+-	irqstat	= mcspi_read_reg(mcspi->master, OMAP2_MCSPI_IRQSTATUS);
++	irqstat	= mcspi_read_reg(mcspi->ctlr, OMAP2_MCSPI_IRQSTATUS);
+ 	if (!irqstat)
+ 		return IRQ_NONE;
+ 
+-	/* Disable IRQ and wakeup slave xfer task */
+-	mcspi_write_reg(mcspi->master, OMAP2_MCSPI_IRQENABLE, 0);
++	/* Disable IRQ and wakeup target xfer task */
++	mcspi_write_reg(mcspi->ctlr, OMAP2_MCSPI_IRQENABLE, 0);
+ 	if (irqstat & OMAP2_MCSPI_IRQSTATUS_EOW)
+ 		complete(&mcspi->txdone);
+ 
+ 	return IRQ_HANDLED;
+ }
+ 
+-static int omap2_mcspi_slave_abort(struct spi_master *master)
++static int omap2_mcspi_target_abort(struct spi_controller *ctlr)
+ {
+-	struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
++	struct omap2_mcspi *mcspi = spi_controller_get_devdata(ctlr);
+ 	struct omap2_mcspi_dma *mcspi_dma = mcspi->dma_channels;
+ 
+-	mcspi->slave_aborted = true;
++	mcspi->target_aborted = true;
+ 	complete(&mcspi_dma->dma_rx_completion);
+ 	complete(&mcspi_dma->dma_tx_completion);
+ 	complete(&mcspi->txdone);
+@@ -1121,7 +1124,7 @@ static int omap2_mcspi_slave_abort(struct spi_master *master)
+ 	return 0;
+ }
+ 
+-static int omap2_mcspi_transfer_one(struct spi_master *master,
++static int omap2_mcspi_transfer_one(struct spi_controller *ctlr,
+ 				    struct spi_device *spi,
+ 				    struct spi_transfer *t)
+ {
+@@ -1129,7 +1132,7 @@ static int omap2_mcspi_transfer_one(struct spi_master *master,
+ 	/* We only enable one channel at a time -- the one whose message is
+ 	 * -- although this controller would gladly
+ 	 * arbitrate among multiple channels.  This corresponds to "single
+-	 * channel" master mode.  As a side effect, we need to manage the
++	 * channel" host mode.  As a side effect, we need to manage the
+ 	 * chipselect with the FORCE bit ... CS != channel enable.
+ 	 */
+ 
+@@ -1141,13 +1144,13 @@ static int omap2_mcspi_transfer_one(struct spi_master *master,
+ 	int				status = 0;
+ 	u32				chconf;
+ 
+-	mcspi = spi_master_get_devdata(master);
++	mcspi = spi_controller_get_devdata(ctlr);
+ 	mcspi_dma = mcspi->dma_channels + spi_get_chipselect(spi, 0);
+ 	cs = spi->controller_state;
+ 	cd = spi->controller_data;
+ 
+ 	/*
+-	 * The slave driver could have changed spi->mode in which case
++	 * The target driver could have changed spi->mode in which case
+ 	 * it will be different from cs->mode (the current hardware setup).
+ 	 * If so, set par_override (even though its not a parity issue) so
+ 	 * omap2_mcspi_setup_transfer will be called to configure the hardware
+@@ -1175,7 +1178,7 @@ static int omap2_mcspi_transfer_one(struct spi_master *master,
+ 	if (cd && cd->cs_per_word) {
+ 		chconf = mcspi->ctx.modulctrl;
+ 		chconf &= ~OMAP2_MCSPI_MODULCTRL_SINGLE;
+-		mcspi_write_reg(master, OMAP2_MCSPI_MODULCTRL, chconf);
++		mcspi_write_reg(ctlr, OMAP2_MCSPI_MODULCTRL, chconf);
+ 		mcspi->ctx.modulctrl =
+ 			mcspi_read_cs_reg(spi, OMAP2_MCSPI_MODULCTRL);
+ 	}
+@@ -1201,8 +1204,8 @@ static int omap2_mcspi_transfer_one(struct spi_master *master,
+ 		unsigned	count;
+ 
+ 		if ((mcspi_dma->dma_rx && mcspi_dma->dma_tx) &&
+-		    master->cur_msg_mapped &&
+-		    master->can_dma(master, spi, t))
++		    ctlr->cur_msg_mapped &&
++		    ctlr->can_dma(ctlr, spi, t))
+ 			omap2_mcspi_set_fifo(spi, t, 1);
+ 
+ 		omap2_mcspi_set_enable(spi, 1);
+@@ -1213,8 +1216,8 @@ static int omap2_mcspi_transfer_one(struct spi_master *master,
+ 					+ OMAP2_MCSPI_TX0);
+ 
+ 		if ((mcspi_dma->dma_rx && mcspi_dma->dma_tx) &&
+-		    master->cur_msg_mapped &&
+-		    master->can_dma(master, spi, t))
++		    ctlr->cur_msg_mapped &&
++		    ctlr->can_dma(ctlr, spi, t))
+ 			count = omap2_mcspi_txrx_dma(spi, t);
+ 		else
+ 			count = omap2_mcspi_txrx_pio(spi, t);
+@@ -1240,7 +1243,7 @@ out:
+ 	if (cd && cd->cs_per_word) {
+ 		chconf = mcspi->ctx.modulctrl;
+ 		chconf |= OMAP2_MCSPI_MODULCTRL_SINGLE;
+-		mcspi_write_reg(master, OMAP2_MCSPI_MODULCTRL, chconf);
++		mcspi_write_reg(ctlr, OMAP2_MCSPI_MODULCTRL, chconf);
+ 		mcspi->ctx.modulctrl =
+ 			mcspi_read_cs_reg(spi, OMAP2_MCSPI_MODULCTRL);
+ 	}
+@@ -1256,10 +1259,10 @@ out:
+ 	return status;
+ }
+ 
+-static int omap2_mcspi_prepare_message(struct spi_master *master,
++static int omap2_mcspi_prepare_message(struct spi_controller *ctlr,
+ 				       struct spi_message *msg)
+ {
+-	struct omap2_mcspi	*mcspi = spi_master_get_devdata(master);
++	struct omap2_mcspi	*mcspi = spi_controller_get_devdata(ctlr);
+ 	struct omap2_mcspi_regs	*ctx = &mcspi->ctx;
+ 	struct omap2_mcspi_cs	*cs;
+ 
+@@ -1283,29 +1286,29 @@ static int omap2_mcspi_prepare_message(struct spi_master *master,
+ 	return 0;
+ }
+ 
+-static bool omap2_mcspi_can_dma(struct spi_master *master,
++static bool omap2_mcspi_can_dma(struct spi_controller *ctlr,
+ 				struct spi_device *spi,
+ 				struct spi_transfer *xfer)
+ {
+-	struct omap2_mcspi *mcspi = spi_master_get_devdata(spi->master);
++	struct omap2_mcspi *mcspi = spi_controller_get_devdata(spi->controller);
+ 	struct omap2_mcspi_dma *mcspi_dma =
+ 		&mcspi->dma_channels[spi_get_chipselect(spi, 0)];
+ 
+ 	if (!mcspi_dma->dma_rx || !mcspi_dma->dma_tx)
+ 		return false;
+ 
+-	if (spi_controller_is_slave(master))
++	if (spi_controller_is_target(ctlr))
+ 		return true;
+ 
+-	master->dma_rx = mcspi_dma->dma_rx;
+-	master->dma_tx = mcspi_dma->dma_tx;
++	ctlr->dma_rx = mcspi_dma->dma_rx;
++	ctlr->dma_tx = mcspi_dma->dma_tx;
+ 
+ 	return (xfer->len >= DMA_MIN_BYTES);
+ }
+ 
+ static size_t omap2_mcspi_max_xfer_size(struct spi_device *spi)
+ {
+-	struct omap2_mcspi *mcspi = spi_master_get_devdata(spi->master);
++	struct omap2_mcspi *mcspi = spi_controller_get_devdata(spi->controller);
+ 	struct omap2_mcspi_dma *mcspi_dma =
+ 		&mcspi->dma_channels[spi_get_chipselect(spi, 0)];
+ 
+@@ -1317,7 +1320,7 @@ static size_t omap2_mcspi_max_xfer_size(struct spi_device *spi)
+ 
+ static int omap2_mcspi_controller_setup(struct omap2_mcspi *mcspi)
+ {
+-	struct spi_master	*master = mcspi->master;
++	struct spi_controller	*ctlr = mcspi->ctlr;
+ 	struct omap2_mcspi_regs	*ctx = &mcspi->ctx;
+ 	int			ret = 0;
+ 
+@@ -1325,11 +1328,11 @@ static int omap2_mcspi_controller_setup(struct omap2_mcspi *mcspi)
+ 	if (ret < 0)
+ 		return ret;
+ 
+-	mcspi_write_reg(master, OMAP2_MCSPI_WAKEUPENABLE,
++	mcspi_write_reg(ctlr, OMAP2_MCSPI_WAKEUPENABLE,
+ 			OMAP2_MCSPI_WAKEUPENABLE_WKEN);
+ 	ctx->wakeupenable = OMAP2_MCSPI_WAKEUPENABLE_WKEN;
+ 
+-	omap2_mcspi_set_mode(master);
++	omap2_mcspi_set_mode(ctlr);
+ 	pm_runtime_mark_last_busy(mcspi->dev);
+ 	pm_runtime_put_autosuspend(mcspi->dev);
+ 	return 0;
+@@ -1353,8 +1356,8 @@ static int omap_mcspi_runtime_suspend(struct device *dev)
+  */
+ static int omap_mcspi_runtime_resume(struct device *dev)
+ {
+-	struct spi_master *master = dev_get_drvdata(dev);
+-	struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
++	struct spi_controller *ctlr = dev_get_drvdata(dev);
++	struct omap2_mcspi *mcspi = spi_controller_get_devdata(ctlr);
+ 	struct omap2_mcspi_regs *ctx = &mcspi->ctx;
+ 	struct omap2_mcspi_cs *cs;
+ 	int error;
+@@ -1364,8 +1367,8 @@ static int omap_mcspi_runtime_resume(struct device *dev)
+ 		dev_warn(dev, "%s: failed to set pins: %i\n", __func__, error);
+ 
+ 	/* McSPI: context restore */
+-	mcspi_write_reg(master, OMAP2_MCSPI_MODULCTRL, ctx->modulctrl);
+-	mcspi_write_reg(master, OMAP2_MCSPI_WAKEUPENABLE, ctx->wakeupenable);
++	mcspi_write_reg(ctlr, OMAP2_MCSPI_MODULCTRL, ctx->modulctrl);
++	mcspi_write_reg(ctlr, OMAP2_MCSPI_WAKEUPENABLE, ctx->wakeupenable);
+ 
+ 	list_for_each_entry(cs, &ctx->cs, node) {
+ 		/*
+@@ -1420,7 +1423,7 @@ MODULE_DEVICE_TABLE(of, omap_mcspi_of_match);
+ 
+ static int omap2_mcspi_probe(struct platform_device *pdev)
+ {
+-	struct spi_master	*master;
++	struct spi_controller	*ctlr;
+ 	const struct omap2_mcspi_platform_config *pdata;
+ 	struct omap2_mcspi	*mcspi;
+ 	struct resource		*r;
+@@ -1430,32 +1433,30 @@ static int omap2_mcspi_probe(struct platform_device *pdev)
+ 	const struct of_device_id *match;
+ 
+ 	if (of_property_read_bool(node, "spi-slave"))
+-		master = spi_alloc_slave(&pdev->dev, sizeof(*mcspi));
++		ctlr = spi_alloc_target(&pdev->dev, sizeof(*mcspi));
+ 	else
+-		master = spi_alloc_master(&pdev->dev, sizeof(*mcspi));
+-	if (!master)
++		ctlr = spi_alloc_host(&pdev->dev, sizeof(*mcspi));
++	if (!ctlr)
+ 		return -ENOMEM;
+ 
+ 	/* the spi->mode bits understood by this driver: */
+-	master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
+-	master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
+-	master->setup = omap2_mcspi_setup;
+-	master->auto_runtime_pm = true;
+-	master->prepare_message = omap2_mcspi_prepare_message;
+-	master->can_dma = omap2_mcspi_can_dma;
+-	master->transfer_one = omap2_mcspi_transfer_one;
+-	master->set_cs = omap2_mcspi_set_cs;
+-	master->cleanup = omap2_mcspi_cleanup;
+-	master->slave_abort = omap2_mcspi_slave_abort;
+-	master->dev.of_node = node;
+-	master->max_speed_hz = OMAP2_MCSPI_MAX_FREQ;
+-	master->min_speed_hz = OMAP2_MCSPI_MAX_FREQ >> 15;
+-	master->use_gpio_descriptors = true;
+-
+-	platform_set_drvdata(pdev, master);
+-
+-	mcspi = spi_master_get_devdata(master);
+-	mcspi->master = master;
++	ctlr->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
++	ctlr->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
++	ctlr->setup = omap2_mcspi_setup;
++	ctlr->auto_runtime_pm = true;
++	ctlr->prepare_message = omap2_mcspi_prepare_message;
++	ctlr->can_dma = omap2_mcspi_can_dma;
++	ctlr->transfer_one = omap2_mcspi_transfer_one;
++	ctlr->set_cs = omap2_mcspi_set_cs;
++	ctlr->cleanup = omap2_mcspi_cleanup;
++	ctlr->target_abort = omap2_mcspi_target_abort;
++	ctlr->dev.of_node = node;
++	ctlr->use_gpio_descriptors = true;
++
++	platform_set_drvdata(pdev, ctlr);
++
++	mcspi = spi_controller_get_devdata(ctlr);
++	mcspi->ctlr = ctlr;
+ 
+ 	match = of_match_device(omap_mcspi_of_match, &pdev->dev);
+ 	if (match) {
+@@ -1463,24 +1464,24 @@ static int omap2_mcspi_probe(struct platform_device *pdev)
+ 		pdata = match->data;
+ 
+ 		of_property_read_u32(node, "ti,spi-num-cs", &num_cs);
+-		master->num_chipselect = num_cs;
++		ctlr->num_chipselect = num_cs;
+ 		if (of_property_read_bool(node, "ti,pindir-d0-out-d1-in"))
+ 			mcspi->pin_dir = MCSPI_PINDIR_D0_OUT_D1_IN;
+ 	} else {
+ 		pdata = dev_get_platdata(&pdev->dev);
+-		master->num_chipselect = pdata->num_cs;
++		ctlr->num_chipselect = pdata->num_cs;
+ 		mcspi->pin_dir = pdata->pin_dir;
+ 	}
+ 	regs_offset = pdata->regs_offset;
+ 	if (pdata->max_xfer_len) {
+ 		mcspi->max_xfer_len = pdata->max_xfer_len;
+-		master->max_transfer_size = omap2_mcspi_max_xfer_size;
++		ctlr->max_transfer_size = omap2_mcspi_max_xfer_size;
+ 	}
+ 
+ 	mcspi->base = devm_platform_get_and_ioremap_resource(pdev, 0, &r);
+ 	if (IS_ERR(mcspi->base)) {
+ 		status = PTR_ERR(mcspi->base);
+-		goto free_master;
++		goto free_ctlr;
+ 	}
+ 	mcspi->phys = r->start + regs_offset;
+ 	mcspi->base += regs_offset;
+@@ -1489,38 +1490,44 @@ static int omap2_mcspi_probe(struct platform_device *pdev)
+ 
+ 	INIT_LIST_HEAD(&mcspi->ctx.cs);
+ 
+-	mcspi->dma_channels = devm_kcalloc(&pdev->dev, master->num_chipselect,
++	mcspi->dma_channels = devm_kcalloc(&pdev->dev, ctlr->num_chipselect,
+ 					   sizeof(struct omap2_mcspi_dma),
+ 					   GFP_KERNEL);
+ 	if (mcspi->dma_channels == NULL) {
+ 		status = -ENOMEM;
+-		goto free_master;
++		goto free_ctlr;
+ 	}
+ 
+-	for (i = 0; i < master->num_chipselect; i++) {
++	for (i = 0; i < ctlr->num_chipselect; i++) {
+ 		sprintf(mcspi->dma_channels[i].dma_rx_ch_name, "rx%d", i);
+ 		sprintf(mcspi->dma_channels[i].dma_tx_ch_name, "tx%d", i);
+ 
+ 		status = omap2_mcspi_request_dma(mcspi,
+ 						 &mcspi->dma_channels[i]);
+ 		if (status == -EPROBE_DEFER)
+-			goto free_master;
++			goto free_ctlr;
+ 	}
+ 
+ 	status = platform_get_irq(pdev, 0);
+-	if (status < 0) {
+-		dev_err_probe(&pdev->dev, status, "no irq resource found\n");
+-		goto free_master;
+-	}
++	if (status < 0)
++		goto free_ctlr;
+ 	init_completion(&mcspi->txdone);
+ 	status = devm_request_irq(&pdev->dev, status,
+ 				  omap2_mcspi_irq_handler, 0, pdev->name,
+ 				  mcspi);
+ 	if (status) {
+ 		dev_err(&pdev->dev, "Cannot request IRQ");
+-		goto free_master;
++		goto free_ctlr;
+ 	}
+ 
++	mcspi->ref_clk = devm_clk_get_optional_enabled(&pdev->dev, NULL);
++	if (mcspi->ref_clk)
++		mcspi->ref_clk_hz = clk_get_rate(mcspi->ref_clk);
++	else
++		mcspi->ref_clk_hz = OMAP2_MCSPI_MAX_FREQ;
++	ctlr->max_speed_hz = mcspi->ref_clk_hz;
++	ctlr->min_speed_hz = mcspi->ref_clk_hz >> 15;
++
+ 	pm_runtime_use_autosuspend(&pdev->dev);
+ 	pm_runtime_set_autosuspend_delay(&pdev->dev, SPI_AUTOSUSPEND_TIMEOUT);
+ 	pm_runtime_enable(&pdev->dev);
+@@ -1529,7 +1536,7 @@ static int omap2_mcspi_probe(struct platform_device *pdev)
+ 	if (status < 0)
+ 		goto disable_pm;
+ 
+-	status = devm_spi_register_controller(&pdev->dev, master);
++	status = devm_spi_register_controller(&pdev->dev, ctlr);
+ 	if (status < 0)
+ 		goto disable_pm;
+ 
+@@ -1539,18 +1546,18 @@ disable_pm:
+ 	pm_runtime_dont_use_autosuspend(&pdev->dev);
+ 	pm_runtime_put_sync(&pdev->dev);
+ 	pm_runtime_disable(&pdev->dev);
+-free_master:
+-	omap2_mcspi_release_dma(master);
+-	spi_master_put(master);
++free_ctlr:
++	omap2_mcspi_release_dma(ctlr);
++	spi_controller_put(ctlr);
+ 	return status;
+ }
+ 
+ static void omap2_mcspi_remove(struct platform_device *pdev)
+ {
+-	struct spi_master *master = platform_get_drvdata(pdev);
+-	struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
++	struct spi_controller *ctlr = platform_get_drvdata(pdev);
++	struct omap2_mcspi *mcspi = spi_controller_get_devdata(ctlr);
+ 
+-	omap2_mcspi_release_dma(master);
++	omap2_mcspi_release_dma(ctlr);
+ 
+ 	pm_runtime_dont_use_autosuspend(mcspi->dev);
+ 	pm_runtime_put_sync(mcspi->dev);
+@@ -1562,8 +1569,8 @@ MODULE_ALIAS("platform:omap2_mcspi");
+ 
+ static int __maybe_unused omap2_mcspi_suspend(struct device *dev)
+ {
+-	struct spi_master *master = dev_get_drvdata(dev);
+-	struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
++	struct spi_controller *ctlr = dev_get_drvdata(dev);
++	struct omap2_mcspi *mcspi = spi_controller_get_devdata(ctlr);
+ 	int error;
+ 
+ 	error = pinctrl_pm_select_sleep_state(dev);
+@@ -1571,9 +1578,9 @@ static int __maybe_unused omap2_mcspi_suspend(struct device *dev)
+ 		dev_warn(mcspi->dev, "%s: failed to set pins: %i\n",
+ 			 __func__, error);
+ 
+-	error = spi_master_suspend(master);
++	error = spi_controller_suspend(ctlr);
+ 	if (error)
+-		dev_warn(mcspi->dev, "%s: master suspend failed: %i\n",
++		dev_warn(mcspi->dev, "%s: controller suspend failed: %i\n",
+ 			 __func__, error);
+ 
+ 	return pm_runtime_force_suspend(dev);
+@@ -1581,13 +1588,13 @@ static int __maybe_unused omap2_mcspi_suspend(struct device *dev)
+ 
+ static int __maybe_unused omap2_mcspi_resume(struct device *dev)
+ {
+-	struct spi_master *master = dev_get_drvdata(dev);
+-	struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
++	struct spi_controller *ctlr = dev_get_drvdata(dev);
++	struct omap2_mcspi *mcspi = spi_controller_get_devdata(ctlr);
+ 	int error;
+ 
+-	error = spi_master_resume(master);
++	error = spi_controller_resume(ctlr);
+ 	if (error)
+-		dev_warn(mcspi->dev, "%s: master resume failed: %i\n",
++		dev_warn(mcspi->dev, "%s: controller resume failed: %i\n",
+ 			 __func__, error);
+ 
+ 	return pm_runtime_force_resume(dev);
+diff --git a/drivers/spi/spi-tegra20-slink.c b/drivers/spi/spi-tegra20-slink.c
+index c2915f7672ccb..b4607d3aac92d 100644
+--- a/drivers/spi/spi-tegra20-slink.c
++++ b/drivers/spi/spi-tegra20-slink.c
+@@ -1093,6 +1093,8 @@ static int tegra_slink_probe(struct platform_device *pdev)
+ 	reset_control_deassert(tspi->rst);
+ 
+ 	spi_irq = platform_get_irq(pdev, 0);
++	if (spi_irq < 0)
++		return spi_irq;
+ 	tspi->irq = spi_irq;
+ 	ret = request_threaded_irq(tspi->irq, tegra_slink_isr,
+ 				   tegra_slink_isr_thread, IRQF_ONESHOT,
+diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_hw.c b/drivers/staging/media/sunxi/cedrus/cedrus_hw.c
+index fa86a658fdc6c..c0d8c8265658b 100644
+--- a/drivers/staging/media/sunxi/cedrus/cedrus_hw.c
++++ b/drivers/staging/media/sunxi/cedrus/cedrus_hw.c
+@@ -172,12 +172,12 @@ int cedrus_hw_suspend(struct device *device)
+ {
+ 	struct cedrus_dev *dev = dev_get_drvdata(device);
+ 
+-	reset_control_assert(dev->rstc);
+-
+ 	clk_disable_unprepare(dev->ram_clk);
+ 	clk_disable_unprepare(dev->mod_clk);
+ 	clk_disable_unprepare(dev->ahb_clk);
+ 
++	reset_control_assert(dev->rstc);
++
+ 	return 0;
+ }
+ 
+@@ -186,11 +186,18 @@ int cedrus_hw_resume(struct device *device)
+ 	struct cedrus_dev *dev = dev_get_drvdata(device);
+ 	int ret;
+ 
++	ret = reset_control_reset(dev->rstc);
++	if (ret) {
++		dev_err(dev->dev, "Failed to apply reset\n");
++
++		return ret;
++	}
++
+ 	ret = clk_prepare_enable(dev->ahb_clk);
+ 	if (ret) {
+ 		dev_err(dev->dev, "Failed to enable AHB clock\n");
+ 
+-		return ret;
++		goto err_rst;
+ 	}
+ 
+ 	ret = clk_prepare_enable(dev->mod_clk);
+@@ -207,21 +214,14 @@ int cedrus_hw_resume(struct device *device)
+ 		goto err_mod_clk;
+ 	}
+ 
+-	ret = reset_control_reset(dev->rstc);
+-	if (ret) {
+-		dev_err(dev->dev, "Failed to apply reset\n");
+-
+-		goto err_ram_clk;
+-	}
+-
+ 	return 0;
+ 
+-err_ram_clk:
+-	clk_disable_unprepare(dev->ram_clk);
+ err_mod_clk:
+ 	clk_disable_unprepare(dev->mod_clk);
+ err_ahb_clk:
+ 	clk_disable_unprepare(dev->ahb_clk);
++err_rst:
++	reset_control_assert(dev->rstc);
+ 
+ 	return ret;
+ }
+diff --git a/drivers/thermal/mediatek/auxadc_thermal.c b/drivers/thermal/mediatek/auxadc_thermal.c
+index f59d36de20a09..677ff04d91ea0 100644
+--- a/drivers/thermal/mediatek/auxadc_thermal.c
++++ b/drivers/thermal/mediatek/auxadc_thermal.c
+@@ -1268,7 +1268,7 @@ static int mtk_thermal_probe(struct platform_device *pdev)
+ 
+ 	mtk_thermal_turn_on_buffer(mt, apmixed_base);
+ 
+-	if (mt->conf->version != MTK_THERMAL_V2)
++	if (mt->conf->version != MTK_THERMAL_V1)
+ 		mtk_thermal_release_periodic_ts(mt, auxadc_base);
+ 
+ 	if (mt->conf->version == MTK_THERMAL_V1)
+diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
+index cc2b5e81c6205..f66d8439ae9de 100644
+--- a/drivers/thermal/thermal_core.c
++++ b/drivers/thermal/thermal_core.c
+@@ -667,7 +667,8 @@ int thermal_zone_bind_cooling_device(struct thermal_zone_device *tz,
+ 	if (result)
+ 		goto release_ida;
+ 
+-	sprintf(dev->attr_name, "cdev%d_trip_point", dev->id);
++	snprintf(dev->attr_name, sizeof(dev->attr_name), "cdev%d_trip_point",
++		 dev->id);
+ 	sysfs_attr_init(&dev->attr.attr);
+ 	dev->attr.attr.name = dev->attr_name;
+ 	dev->attr.attr.mode = 0444;
+@@ -676,7 +677,8 @@ int thermal_zone_bind_cooling_device(struct thermal_zone_device *tz,
+ 	if (result)
+ 		goto remove_symbol_link;
+ 
+-	sprintf(dev->weight_attr_name, "cdev%d_weight", dev->id);
++	snprintf(dev->weight_attr_name, sizeof(dev->weight_attr_name),
++		 "cdev%d_weight", dev->id);
+ 	sysfs_attr_init(&dev->weight_attr.attr);
+ 	dev->weight_attr.attr.name = dev->weight_attr_name;
+ 	dev->weight_attr.attr.mode = S_IWUSR | S_IRUGO;
+diff --git a/drivers/thermal/thermal_trip.c b/drivers/thermal/thermal_trip.c
+index 907f3a4d7bc8c..21736e02fa360 100644
+--- a/drivers/thermal/thermal_trip.c
++++ b/drivers/thermal/thermal_trip.c
+@@ -57,6 +57,7 @@ void __thermal_zone_set_trips(struct thermal_zone_device *tz)
+ {
+ 	struct thermal_trip trip;
+ 	int low = -INT_MAX, high = INT_MAX;
++	bool same_trip = false;
+ 	int i, ret;
+ 
+ 	lockdep_assert_held(&tz->lock);
+@@ -65,6 +66,7 @@ void __thermal_zone_set_trips(struct thermal_zone_device *tz)
+ 		return;
+ 
+ 	for (i = 0; i < tz->num_trips; i++) {
++		bool low_set = false;
+ 		int trip_low;
+ 
+ 		ret = __thermal_zone_get_trip(tz, i , &trip);
+@@ -73,18 +75,31 @@ void __thermal_zone_set_trips(struct thermal_zone_device *tz)
+ 
+ 		trip_low = trip.temperature - trip.hysteresis;
+ 
+-		if (trip_low < tz->temperature && trip_low > low)
++		if (trip_low < tz->temperature && trip_low > low) {
+ 			low = trip_low;
++			low_set = true;
++			same_trip = false;
++		}
+ 
+ 		if (trip.temperature > tz->temperature &&
+-		    trip.temperature < high)
++		    trip.temperature < high) {
+ 			high = trip.temperature;
++			same_trip = low_set;
++		}
+ 	}
+ 
+ 	/* No need to change trip points */
+ 	if (tz->prev_low_trip == low && tz->prev_high_trip == high)
+ 		return;
+ 
++	/*
++	 * If "high" and "low" are the same, skip the change unless this is the
++	 * first time.
++	 */
++	if (same_trip && (tz->prev_low_trip != -INT_MAX ||
++	    tz->prev_high_trip != INT_MAX))
++		return;
++
+ 	tz->prev_low_trip = low;
+ 	tz->prev_high_trip = high;
+ 
+diff --git a/drivers/tty/tty_jobctrl.c b/drivers/tty/tty_jobctrl.c
+index 0d04287da0984..ef8741c3e6629 100644
+--- a/drivers/tty/tty_jobctrl.c
++++ b/drivers/tty/tty_jobctrl.c
+@@ -300,12 +300,7 @@ void disassociate_ctty(int on_exit)
+ 		return;
+ 	}
+ 
+-	spin_lock_irq(&current->sighand->siglock);
+-	put_pid(current->signal->tty_old_pgrp);
+-	current->signal->tty_old_pgrp = NULL;
+-	tty = tty_kref_get(current->signal->tty);
+-	spin_unlock_irq(&current->sighand->siglock);
+-
++	tty = get_current_tty();
+ 	if (tty) {
+ 		unsigned long flags;
+ 
+@@ -320,6 +315,16 @@ void disassociate_ctty(int on_exit)
+ 		tty_kref_put(tty);
+ 	}
+ 
++	/* If tty->ctrl.pgrp is not NULL, it may be assigned to
++	 * current->signal->tty_old_pgrp in a race condition, and
++	 * cause pid memleak. Release current->signal->tty_old_pgrp
++	 * after tty->ctrl.pgrp set to NULL.
++	 */
++	spin_lock_irq(&current->sighand->siglock);
++	put_pid(current->signal->tty_old_pgrp);
++	current->signal->tty_old_pgrp = NULL;
++	spin_unlock_irq(&current->sighand->siglock);
++
+ 	/* Now clear signal->tty under the lock */
+ 	read_lock(&tasklist_lock);
+ 	session_clear_tty(task_session(current));
+diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
+index 3349c46e5fa2c..13cd0f1207bf1 100644
+--- a/drivers/ufs/core/ufshcd.c
++++ b/drivers/ufs/core/ufshcd.c
+@@ -3665,7 +3665,7 @@ int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index,
+ 		 */
+ 		ret = utf16s_to_utf8s(uc_str->uc,
+ 				      uc_str->len - QUERY_DESC_HDR_SIZE,
+-				      UTF16_BIG_ENDIAN, str, ascii_len);
++				      UTF16_BIG_ENDIAN, str, ascii_len - 1);
+ 
+ 		/* replace non-printable or non-ASCII characters with spaces */
+ 		for (i = 0; i < ret; i++)
+diff --git a/drivers/usb/chipidea/host.c b/drivers/usb/chipidea/host.c
+index 08af26b762a2d..0cce192083701 100644
+--- a/drivers/usb/chipidea/host.c
++++ b/drivers/usb/chipidea/host.c
+@@ -30,8 +30,7 @@ struct ehci_ci_priv {
+ };
+ 
+ struct ci_hdrc_dma_aligned_buffer {
+-	void *kmalloc_ptr;
+-	void *old_xfer_buffer;
++	void *original_buffer;
+ 	u8 data[];
+ };
+ 
+@@ -380,59 +379,52 @@ static int ci_ehci_bus_suspend(struct usb_hcd *hcd)
+ 	return 0;
+ }
+ 
+-static void ci_hdrc_free_dma_aligned_buffer(struct urb *urb)
++static void ci_hdrc_free_dma_aligned_buffer(struct urb *urb, bool copy_back)
+ {
+ 	struct ci_hdrc_dma_aligned_buffer *temp;
+-	size_t length;
+ 
+ 	if (!(urb->transfer_flags & URB_ALIGNED_TEMP_BUFFER))
+ 		return;
++	urb->transfer_flags &= ~URB_ALIGNED_TEMP_BUFFER;
+ 
+ 	temp = container_of(urb->transfer_buffer,
+ 			    struct ci_hdrc_dma_aligned_buffer, data);
++	urb->transfer_buffer = temp->original_buffer;
++
++	if (copy_back && usb_urb_dir_in(urb)) {
++		size_t length;
+ 
+-	if (usb_urb_dir_in(urb)) {
+ 		if (usb_pipeisoc(urb->pipe))
+ 			length = urb->transfer_buffer_length;
+ 		else
+ 			length = urb->actual_length;
+ 
+-		memcpy(temp->old_xfer_buffer, temp->data, length);
++		memcpy(temp->original_buffer, temp->data, length);
+ 	}
+-	urb->transfer_buffer = temp->old_xfer_buffer;
+-	kfree(temp->kmalloc_ptr);
+ 
+-	urb->transfer_flags &= ~URB_ALIGNED_TEMP_BUFFER;
++	kfree(temp);
+ }
+ 
+ static int ci_hdrc_alloc_dma_aligned_buffer(struct urb *urb, gfp_t mem_flags)
+ {
+-	struct ci_hdrc_dma_aligned_buffer *temp, *kmalloc_ptr;
+-	const unsigned int ci_hdrc_usb_dma_align = 32;
+-	size_t kmalloc_size;
++	struct ci_hdrc_dma_aligned_buffer *temp;
+ 
+-	if (urb->num_sgs || urb->sg || urb->transfer_buffer_length == 0 ||
+-	    !((uintptr_t)urb->transfer_buffer & (ci_hdrc_usb_dma_align - 1)))
++	if (urb->num_sgs || urb->sg || urb->transfer_buffer_length == 0)
++		return 0;
++	if (IS_ALIGNED((uintptr_t)urb->transfer_buffer, 4)
++	    && IS_ALIGNED(urb->transfer_buffer_length, 4))
+ 		return 0;
+ 
+-	/* Allocate a buffer with enough padding for alignment */
+-	kmalloc_size = urb->transfer_buffer_length +
+-		       sizeof(struct ci_hdrc_dma_aligned_buffer) +
+-		       ci_hdrc_usb_dma_align - 1;
+-
+-	kmalloc_ptr = kmalloc(kmalloc_size, mem_flags);
+-	if (!kmalloc_ptr)
++	temp = kmalloc(sizeof(*temp) + ALIGN(urb->transfer_buffer_length, 4), mem_flags);
++	if (!temp)
+ 		return -ENOMEM;
+ 
+-	/* Position our struct dma_aligned_buffer such that data is aligned */
+-	temp = PTR_ALIGN(kmalloc_ptr + 1, ci_hdrc_usb_dma_align) - 1;
+-	temp->kmalloc_ptr = kmalloc_ptr;
+-	temp->old_xfer_buffer = urb->transfer_buffer;
+ 	if (usb_urb_dir_out(urb))
+ 		memcpy(temp->data, urb->transfer_buffer,
+ 		       urb->transfer_buffer_length);
+-	urb->transfer_buffer = temp->data;
+ 
++	temp->original_buffer = urb->transfer_buffer;
++	urb->transfer_buffer = temp->data;
+ 	urb->transfer_flags |= URB_ALIGNED_TEMP_BUFFER;
+ 
+ 	return 0;
+@@ -449,7 +441,7 @@ static int ci_hdrc_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
+ 
+ 	ret = usb_hcd_map_urb_for_dma(hcd, urb, mem_flags);
+ 	if (ret)
+-		ci_hdrc_free_dma_aligned_buffer(urb);
++		ci_hdrc_free_dma_aligned_buffer(urb, false);
+ 
+ 	return ret;
+ }
+@@ -457,7 +449,7 @@ static int ci_hdrc_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
+ static void ci_hdrc_unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb)
+ {
+ 	usb_hcd_unmap_urb_for_dma(hcd, urb);
+-	ci_hdrc_free_dma_aligned_buffer(urb);
++	ci_hdrc_free_dma_aligned_buffer(urb, true);
+ }
+ 
+ #ifdef CONFIG_PM_SLEEP
+diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c
+index 657f1f659ffaf..35c7a4df8e717 100644
+--- a/drivers/usb/dwc2/hcd.c
++++ b/drivers/usb/dwc2/hcd.c
+@@ -4769,8 +4769,8 @@ fail3:
+ 	if (qh_allocated && qh->channel && qh->channel->qh == qh)
+ 		qh->channel->qh = NULL;
+ fail2:
+-	spin_unlock_irqrestore(&hsotg->lock, flags);
+ 	urb->hcpriv = NULL;
++	spin_unlock_irqrestore(&hsotg->lock, flags);
+ 	kfree(qtd);
+ fail1:
+ 	if (qh_allocated) {
+diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
+index b9ae5c2a25275..bde43cef8846c 100644
+--- a/drivers/usb/host/xhci-pci.c
++++ b/drivers/usb/host/xhci-pci.c
+@@ -535,6 +535,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
+ 	/* xHC spec requires PCI devices to support D3hot and D3cold */
+ 	if (xhci->hci_version >= 0x120)
+ 		xhci->quirks |= XHCI_DEFAULT_PM_RUNTIME_ALLOW;
++	else if (pdev->vendor == PCI_VENDOR_ID_AMD && xhci->hci_version >= 0x110)
++		xhci->quirks |= XHCI_DEFAULT_PM_RUNTIME_ALLOW;
+ 
+ 	if (xhci->quirks & XHCI_RESET_ON_RESUME)
+ 		xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
+diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
+index b26ea7cb4357b..5fabd14e49a03 100644
+--- a/drivers/usb/host/xhci-plat.c
++++ b/drivers/usb/host/xhci-plat.c
+@@ -459,23 +459,38 @@ static int __maybe_unused xhci_plat_resume(struct device *dev)
+ 	int ret;
+ 
+ 	if (!device_may_wakeup(dev) && (xhci->quirks & XHCI_SUSPEND_RESUME_CLKS)) {
+-		clk_prepare_enable(xhci->clk);
+-		clk_prepare_enable(xhci->reg_clk);
++		ret = clk_prepare_enable(xhci->clk);
++		if (ret)
++			return ret;
++
++		ret = clk_prepare_enable(xhci->reg_clk);
++		if (ret) {
++			clk_disable_unprepare(xhci->clk);
++			return ret;
++		}
+ 	}
+ 
+ 	ret = xhci_priv_resume_quirk(hcd);
+ 	if (ret)
+-		return ret;
++		goto disable_clks;
+ 
+ 	ret = xhci_resume(xhci, PMSG_RESUME);
+ 	if (ret)
+-		return ret;
++		goto disable_clks;
+ 
+ 	pm_runtime_disable(dev);
+ 	pm_runtime_set_active(dev);
+ 	pm_runtime_enable(dev);
+ 
+ 	return 0;
++
++disable_clks:
++	if (!device_may_wakeup(dev) && (xhci->quirks & XHCI_SUSPEND_RESUME_CLKS)) {
++		clk_disable_unprepare(xhci->clk);
++		clk_disable_unprepare(xhci->reg_clk);
++	}
++
++	return ret;
+ }
+ 
+ static int __maybe_unused xhci_plat_runtime_suspend(struct device *dev)
+diff --git a/drivers/usb/usbip/stub_dev.c b/drivers/usb/usbip/stub_dev.c
+index 9c6954aad6c88..ce625b1ce9a51 100644
+--- a/drivers/usb/usbip/stub_dev.c
++++ b/drivers/usb/usbip/stub_dev.c
+@@ -464,8 +464,13 @@ static void stub_disconnect(struct usb_device *udev)
+ 	/* release port */
+ 	rc = usb_hub_release_port(udev->parent, udev->portnum,
+ 				  (struct usb_dev_state *) udev);
+-	if (rc) {
+-		dev_dbg(&udev->dev, "unable to release port\n");
++	/*
++	 * NOTE: If a HUB disconnect triggered disconnect of the down stream
++	 * device usb_hub_release_port will return -ENODEV so we can safely ignore
++	 * that error here.
++	 */
++	if (rc && (rc != -ENODEV)) {
++		dev_dbg(&udev->dev, "unable to release port (%i)\n", rc);
+ 		return;
+ 	}
+ 
+diff --git a/drivers/video/backlight/pwm_bl.c b/drivers/video/backlight/pwm_bl.c
+index a51fbab963680..289bd9ce4d36d 100644
+--- a/drivers/video/backlight/pwm_bl.c
++++ b/drivers/video/backlight/pwm_bl.c
+@@ -626,9 +626,14 @@ static void pwm_backlight_remove(struct platform_device *pdev)
+ {
+ 	struct backlight_device *bl = platform_get_drvdata(pdev);
+ 	struct pwm_bl_data *pb = bl_get_data(bl);
++	struct pwm_state state;
+ 
+ 	backlight_device_unregister(bl);
+ 	pwm_backlight_power_off(pb);
++	pwm_get_state(pb->pwm, &state);
++	state.duty_cycle = 0;
++	state.enabled = false;
++	pwm_apply_state(pb->pwm, &state);
+ 
+ 	if (pb->exit)
+ 		pb->exit(&pdev->dev);
+@@ -638,8 +643,13 @@ static void pwm_backlight_shutdown(struct platform_device *pdev)
+ {
+ 	struct backlight_device *bl = platform_get_drvdata(pdev);
+ 	struct pwm_bl_data *pb = bl_get_data(bl);
++	struct pwm_state state;
+ 
+ 	pwm_backlight_power_off(pb);
++	pwm_get_state(pb->pwm, &state);
++	state.duty_cycle = 0;
++	state.enabled = false;
++	pwm_apply_state(pb->pwm, &state);
+ }
+ 
+ #ifdef CONFIG_PM_SLEEP
+@@ -647,12 +657,24 @@ static int pwm_backlight_suspend(struct device *dev)
+ {
+ 	struct backlight_device *bl = dev_get_drvdata(dev);
+ 	struct pwm_bl_data *pb = bl_get_data(bl);
++	struct pwm_state state;
+ 
+ 	if (pb->notify)
+ 		pb->notify(pb->dev, 0);
+ 
+ 	pwm_backlight_power_off(pb);
+ 
++	/*
++	 * Note that disabling the PWM doesn't guarantee that the output stays
++	 * at its inactive state. However without the PWM disabled, the PWM
++	 * driver refuses to suspend. So disable here even though this might
++	 * enable the backlight on poorly designed boards.
++	 */
++	pwm_get_state(pb->pwm, &state);
++	state.duty_cycle = 0;
++	state.enabled = false;
++	pwm_apply_state(pb->pwm, &state);
++
+ 	if (pb->notify_after)
+ 		pb->notify_after(pb->dev, 0);
+ 
+diff --git a/drivers/video/fbdev/fsl-diu-fb.c b/drivers/video/fbdev/fsl-diu-fb.c
+index 730a07d23fa92..1075b11863481 100644
+--- a/drivers/video/fbdev/fsl-diu-fb.c
++++ b/drivers/video/fbdev/fsl-diu-fb.c
+@@ -490,7 +490,7 @@ static enum fsl_diu_monitor_port fsl_diu_name_to_port(const char *s)
+  * Workaround for failed writing desc register of planes.
+  * Needed with MPC5121 DIU rev 2.0 silicon.
+  */
+-void wr_reg_wa(u32 *reg, u32 val)
++static void wr_reg_wa(u32 *reg, u32 val)
+ {
+ 	do {
+ 		out_be32(reg, val);
+diff --git a/drivers/video/fbdev/imsttfb.c b/drivers/video/fbdev/imsttfb.c
+index ee7d01ad14068..d2806ba296aa3 100644
+--- a/drivers/video/fbdev/imsttfb.c
++++ b/drivers/video/fbdev/imsttfb.c
+@@ -1419,7 +1419,6 @@ static int init_imstt(struct fb_info *info)
+ 	if ((info->var.xres * info->var.yres) * (info->var.bits_per_pixel >> 3) > info->fix.smem_len
+ 	    || !(compute_imstt_regvals(par, info->var.xres, info->var.yres))) {
+ 		printk("imsttfb: %ux%ux%u not supported\n", info->var.xres, info->var.yres, info->var.bits_per_pixel);
+-		framebuffer_release(info);
+ 		return -ENODEV;
+ 	}
+ 
+@@ -1452,14 +1451,11 @@ static int init_imstt(struct fb_info *info)
+ 	              FBINFO_HWACCEL_FILLRECT |
+ 	              FBINFO_HWACCEL_YPAN;
+ 
+-	if (fb_alloc_cmap(&info->cmap, 0, 0)) {
+-		framebuffer_release(info);
++	if (fb_alloc_cmap(&info->cmap, 0, 0))
+ 		return -ENODEV;
+-	}
+ 
+ 	if (register_framebuffer(info) < 0) {
+ 		fb_dealloc_cmap(&info->cmap);
+-		framebuffer_release(info);
+ 		return -ENODEV;
+ 	}
+ 
+@@ -1499,8 +1495,8 @@ static int imsttfb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 
+ 	if (!request_mem_region(addr, size, "imsttfb")) {
+ 		printk(KERN_ERR "imsttfb: Can't reserve memory region\n");
+-		framebuffer_release(info);
+-		return -ENODEV;
++		ret = -ENODEV;
++		goto release_info;
+ 	}
+ 
+ 	switch (pdev->device) {
+@@ -1517,36 +1513,39 @@ static int imsttfb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 			printk(KERN_INFO "imsttfb: Device 0x%x unknown, "
+ 					 "contact maintainer.\n", pdev->device);
+ 			ret = -ENODEV;
+-			goto error;
++			goto release_mem_region;
+ 	}
+ 
+ 	info->fix.smem_start = addr;
+ 	info->screen_base = (__u8 *)ioremap(addr, par->ramdac == IBM ?
+ 					    0x400000 : 0x800000);
+ 	if (!info->screen_base)
+-		goto error;
++		goto release_mem_region;
+ 	info->fix.mmio_start = addr + 0x800000;
+ 	par->dc_regs = ioremap(addr + 0x800000, 0x1000);
+ 	if (!par->dc_regs)
+-		goto error;
++		goto unmap_screen_base;
+ 	par->cmap_regs_phys = addr + 0x840000;
+ 	par->cmap_regs = (__u8 *)ioremap(addr + 0x840000, 0x1000);
+ 	if (!par->cmap_regs)
+-		goto error;
++		goto unmap_dc_regs;
+ 	info->pseudo_palette = par->palette;
+ 	ret = init_imstt(info);
+ 	if (ret)
+-		goto error;
++		goto unmap_cmap_regs;
+ 
+ 	pci_set_drvdata(pdev, info);
+-	return ret;
++	return 0;
+ 
+-error:
+-	if (par->dc_regs)
+-		iounmap(par->dc_regs);
+-	if (info->screen_base)
+-		iounmap(info->screen_base);
++unmap_cmap_regs:
++	iounmap(par->cmap_regs);
++unmap_dc_regs:
++	iounmap(par->dc_regs);
++unmap_screen_base:
++	iounmap(info->screen_base);
++release_mem_region:
+ 	release_mem_region(addr, size);
++release_info:
+ 	framebuffer_release(info);
+ 	return ret;
+ }
+diff --git a/drivers/virt/coco/sev-guest/sev-guest.c b/drivers/virt/coco/sev-guest/sev-guest.c
+index 97dbe715e96ad..5bee58ef5f1e3 100644
+--- a/drivers/virt/coco/sev-guest/sev-guest.c
++++ b/drivers/virt/coco/sev-guest/sev-guest.c
+@@ -57,6 +57,11 @@ struct snp_guest_dev {
+ 
+ 	struct snp_secrets_page_layout *layout;
+ 	struct snp_req_data input;
++	union {
++		struct snp_report_req report;
++		struct snp_derived_key_req derived_key;
++		struct snp_ext_report_req ext_report;
++	} req;
+ 	u32 *os_area_msg_seqno;
+ 	u8 *vmpck;
+ };
+@@ -473,8 +478,8 @@ static int handle_guest_request(struct snp_guest_dev *snp_dev, u64 exit_code,
+ static int get_report(struct snp_guest_dev *snp_dev, struct snp_guest_request_ioctl *arg)
+ {
+ 	struct snp_guest_crypto *crypto = snp_dev->crypto;
++	struct snp_report_req *req = &snp_dev->req.report;
+ 	struct snp_report_resp *resp;
+-	struct snp_report_req req;
+ 	int rc, resp_len;
+ 
+ 	lockdep_assert_held(&snp_cmd_mutex);
+@@ -482,7 +487,7 @@ static int get_report(struct snp_guest_dev *snp_dev, struct snp_guest_request_io
+ 	if (!arg->req_data || !arg->resp_data)
+ 		return -EINVAL;
+ 
+-	if (copy_from_user(&req, (void __user *)arg->req_data, sizeof(req)))
++	if (copy_from_user(req, (void __user *)arg->req_data, sizeof(*req)))
+ 		return -EFAULT;
+ 
+ 	/*
+@@ -496,7 +501,7 @@ static int get_report(struct snp_guest_dev *snp_dev, struct snp_guest_request_io
+ 		return -ENOMEM;
+ 
+ 	rc = handle_guest_request(snp_dev, SVM_VMGEXIT_GUEST_REQUEST, arg,
+-				  SNP_MSG_REPORT_REQ, &req, sizeof(req), resp->data,
++				  SNP_MSG_REPORT_REQ, req, sizeof(*req), resp->data,
+ 				  resp_len);
+ 	if (rc)
+ 		goto e_free;
+@@ -511,9 +516,9 @@ e_free:
+ 
+ static int get_derived_key(struct snp_guest_dev *snp_dev, struct snp_guest_request_ioctl *arg)
+ {
++	struct snp_derived_key_req *req = &snp_dev->req.derived_key;
+ 	struct snp_guest_crypto *crypto = snp_dev->crypto;
+ 	struct snp_derived_key_resp resp = {0};
+-	struct snp_derived_key_req req;
+ 	int rc, resp_len;
+ 	/* Response data is 64 bytes and max authsize for GCM is 16 bytes. */
+ 	u8 buf[64 + 16];
+@@ -532,11 +537,11 @@ static int get_derived_key(struct snp_guest_dev *snp_dev, struct snp_guest_reque
+ 	if (sizeof(buf) < resp_len)
+ 		return -ENOMEM;
+ 
+-	if (copy_from_user(&req, (void __user *)arg->req_data, sizeof(req)))
++	if (copy_from_user(req, (void __user *)arg->req_data, sizeof(*req)))
+ 		return -EFAULT;
+ 
+ 	rc = handle_guest_request(snp_dev, SVM_VMGEXIT_GUEST_REQUEST, arg,
+-				  SNP_MSG_KEY_REQ, &req, sizeof(req), buf, resp_len);
++				  SNP_MSG_KEY_REQ, req, sizeof(*req), buf, resp_len);
+ 	if (rc)
+ 		return rc;
+ 
+@@ -552,8 +557,8 @@ static int get_derived_key(struct snp_guest_dev *snp_dev, struct snp_guest_reque
+ 
+ static int get_ext_report(struct snp_guest_dev *snp_dev, struct snp_guest_request_ioctl *arg)
+ {
++	struct snp_ext_report_req *req = &snp_dev->req.ext_report;
+ 	struct snp_guest_crypto *crypto = snp_dev->crypto;
+-	struct snp_ext_report_req req;
+ 	struct snp_report_resp *resp;
+ 	int ret, npages = 0, resp_len;
+ 
+@@ -562,18 +567,18 @@ static int get_ext_report(struct snp_guest_dev *snp_dev, struct snp_guest_reques
+ 	if (!arg->req_data || !arg->resp_data)
+ 		return -EINVAL;
+ 
+-	if (copy_from_user(&req, (void __user *)arg->req_data, sizeof(req)))
++	if (copy_from_user(req, (void __user *)arg->req_data, sizeof(*req)))
+ 		return -EFAULT;
+ 
+ 	/* userspace does not want certificate data */
+-	if (!req.certs_len || !req.certs_address)
++	if (!req->certs_len || !req->certs_address)
+ 		goto cmd;
+ 
+-	if (req.certs_len > SEV_FW_BLOB_MAX_SIZE ||
+-	    !IS_ALIGNED(req.certs_len, PAGE_SIZE))
++	if (req->certs_len > SEV_FW_BLOB_MAX_SIZE ||
++	    !IS_ALIGNED(req->certs_len, PAGE_SIZE))
+ 		return -EINVAL;
+ 
+-	if (!access_ok((const void __user *)req.certs_address, req.certs_len))
++	if (!access_ok((const void __user *)req->certs_address, req->certs_len))
+ 		return -EFAULT;
+ 
+ 	/*
+@@ -582,8 +587,8 @@ static int get_ext_report(struct snp_guest_dev *snp_dev, struct snp_guest_reques
+ 	 * the host. If host does not supply any certs in it, then copy
+ 	 * zeros to indicate that certificate data was not provided.
+ 	 */
+-	memset(snp_dev->certs_data, 0, req.certs_len);
+-	npages = req.certs_len >> PAGE_SHIFT;
++	memset(snp_dev->certs_data, 0, req->certs_len);
++	npages = req->certs_len >> PAGE_SHIFT;
+ cmd:
+ 	/*
+ 	 * The intermediate response buffer is used while decrypting the
+@@ -597,14 +602,14 @@ cmd:
+ 
+ 	snp_dev->input.data_npages = npages;
+ 	ret = handle_guest_request(snp_dev, SVM_VMGEXIT_EXT_GUEST_REQUEST, arg,
+-				   SNP_MSG_REPORT_REQ, &req.data,
+-				   sizeof(req.data), resp->data, resp_len);
++				   SNP_MSG_REPORT_REQ, &req->data,
++				   sizeof(req->data), resp->data, resp_len);
+ 
+ 	/* If certs length is invalid then copy the returned length */
+ 	if (arg->vmm_error == SNP_GUEST_VMM_ERR_INVALID_LEN) {
+-		req.certs_len = snp_dev->input.data_npages << PAGE_SHIFT;
++		req->certs_len = snp_dev->input.data_npages << PAGE_SHIFT;
+ 
+-		if (copy_to_user((void __user *)arg->req_data, &req, sizeof(req)))
++		if (copy_to_user((void __user *)arg->req_data, req, sizeof(*req)))
+ 			ret = -EFAULT;
+ 	}
+ 
+@@ -612,8 +617,8 @@ cmd:
+ 		goto e_free;
+ 
+ 	if (npages &&
+-	    copy_to_user((void __user *)req.certs_address, snp_dev->certs_data,
+-			 req.certs_len)) {
++	    copy_to_user((void __user *)req->certs_address, snp_dev->certs_data,
++			 req->certs_len)) {
+ 		ret = -EFAULT;
+ 		goto e_free;
+ 	}
+diff --git a/drivers/watchdog/ixp4xx_wdt.c b/drivers/watchdog/ixp4xx_wdt.c
+index 607ce4b8df574..ec0c08652ec2f 100644
+--- a/drivers/watchdog/ixp4xx_wdt.c
++++ b/drivers/watchdog/ixp4xx_wdt.c
+@@ -105,6 +105,25 @@ static const struct watchdog_ops ixp4xx_wdt_ops = {
+ 	.owner = THIS_MODULE,
+ };
+ 
++/*
++ * The A0 version of the IXP422 had a bug in the watchdog making
++ * is useless, but we still need to use it to restart the system
++ * as it is the only way, so in this special case we register a
++ * "dummy" watchdog that doesn't really work, but will support
++ * the restart operation.
++ */
++static int ixp4xx_wdt_dummy(struct watchdog_device *wdd)
++{
++	return 0;
++}
++
++static const struct watchdog_ops ixp4xx_wdt_restart_only_ops = {
++	.start = ixp4xx_wdt_dummy,
++	.stop = ixp4xx_wdt_dummy,
++	.restart = ixp4xx_wdt_restart,
++	.owner = THIS_MODULE,
++};
++
+ static const struct watchdog_info ixp4xx_wdt_info = {
+ 	.options = WDIOF_KEEPALIVEPING
+ 		| WDIOF_MAGICCLOSE
+@@ -114,14 +133,17 @@ static const struct watchdog_info ixp4xx_wdt_info = {
+ 
+ static int ixp4xx_wdt_probe(struct platform_device *pdev)
+ {
++	static const struct watchdog_ops *iwdt_ops;
+ 	struct device *dev = &pdev->dev;
+ 	struct ixp4xx_wdt *iwdt;
+ 	struct clk *clk;
+ 	int ret;
+ 
+ 	if (!(read_cpuid_id() & 0xf) && !cpu_is_ixp46x()) {
+-		dev_err(dev, "Rev. A0 IXP42x CPU detected - watchdog disabled\n");
+-		return -ENODEV;
++		dev_info(dev, "Rev. A0 IXP42x CPU detected - only restart supported\n");
++		iwdt_ops = &ixp4xx_wdt_restart_only_ops;
++	} else {
++		iwdt_ops = &ixp4xx_wdt_ops;
+ 	}
+ 
+ 	iwdt = devm_kzalloc(dev, sizeof(*iwdt), GFP_KERNEL);
+@@ -141,7 +163,7 @@ static int ixp4xx_wdt_probe(struct platform_device *pdev)
+ 		iwdt->rate = IXP4XX_TIMER_FREQ;
+ 
+ 	iwdt->wdd.info = &ixp4xx_wdt_info;
+-	iwdt->wdd.ops = &ixp4xx_wdt_ops;
++	iwdt->wdd.ops = iwdt_ops;
+ 	iwdt->wdd.min_timeout = 1;
+ 	iwdt->wdd.max_timeout = U32_MAX / iwdt->rate;
+ 	iwdt->wdd.parent = dev;
+diff --git a/drivers/xen/xen-pciback/conf_space.c b/drivers/xen/xen-pciback/conf_space.c
+index 059de92aea7d0..d47eee6c51435 100644
+--- a/drivers/xen/xen-pciback/conf_space.c
++++ b/drivers/xen/xen-pciback/conf_space.c
+@@ -288,12 +288,6 @@ int xen_pcibk_get_interrupt_type(struct pci_dev *dev)
+ 	u16 val;
+ 	int ret = 0;
+ 
+-	err = pci_read_config_word(dev, PCI_COMMAND, &val);
+-	if (err)
+-		return err;
+-	if (!(val & PCI_COMMAND_INTX_DISABLE))
+-		ret |= INTERRUPT_TYPE_INTX;
+-
+ 	/*
+ 	 * Do not trust dev->msi(x)_enabled here, as enabling could be done
+ 	 * bypassing the pci_*msi* functions, by the qemu.
+@@ -316,6 +310,19 @@ int xen_pcibk_get_interrupt_type(struct pci_dev *dev)
+ 		if (val & PCI_MSIX_FLAGS_ENABLE)
+ 			ret |= INTERRUPT_TYPE_MSIX;
+ 	}
++
++	/*
++	 * PCIe spec says device cannot use INTx if MSI/MSI-X is enabled,
++	 * so check for INTx only when both are disabled.
++	 */
++	if (!ret) {
++		err = pci_read_config_word(dev, PCI_COMMAND, &val);
++		if (err)
++			return err;
++		if (!(val & PCI_COMMAND_INTX_DISABLE))
++			ret |= INTERRUPT_TYPE_INTX;
++	}
++
+ 	return ret ?: INTERRUPT_TYPE_NONE;
+ }
+ 
+diff --git a/drivers/xen/xen-pciback/conf_space_capability.c b/drivers/xen/xen-pciback/conf_space_capability.c
+index 097316a741268..1948a9700c8fa 100644
+--- a/drivers/xen/xen-pciback/conf_space_capability.c
++++ b/drivers/xen/xen-pciback/conf_space_capability.c
+@@ -236,10 +236,16 @@ static int msi_msix_flags_write(struct pci_dev *dev, int offset, u16 new_value,
+ 		return PCIBIOS_SET_FAILED;
+ 
+ 	if (new_value & field_config->enable_bit) {
+-		/* don't allow enabling together with other interrupt types */
++		/*
++		 * Don't allow enabling together with other interrupt type, but do
++		 * allow enabling MSI(-X) while INTx is still active to please Linuxes
++		 * MSI(-X) startup sequence. It is safe to do, as according to PCI
++		 * spec, device with enabled MSI(-X) shouldn't use INTx.
++		 */
+ 		int int_type = xen_pcibk_get_interrupt_type(dev);
+ 
+ 		if (int_type == INTERRUPT_TYPE_NONE ||
++		    int_type == INTERRUPT_TYPE_INTX ||
+ 		    int_type == field_config->int_type)
+ 			goto write;
+ 		return PCIBIOS_SET_FAILED;
+diff --git a/drivers/xen/xen-pciback/conf_space_header.c b/drivers/xen/xen-pciback/conf_space_header.c
+index 981435103af1a..fc03326459664 100644
+--- a/drivers/xen/xen-pciback/conf_space_header.c
++++ b/drivers/xen/xen-pciback/conf_space_header.c
+@@ -104,24 +104,9 @@ static int command_write(struct pci_dev *dev, int offset, u16 value, void *data)
+ 		pci_clear_mwi(dev);
+ 	}
+ 
+-	if (dev_data && dev_data->allow_interrupt_control) {
+-		if ((cmd->val ^ value) & PCI_COMMAND_INTX_DISABLE) {
+-			if (value & PCI_COMMAND_INTX_DISABLE) {
+-				pci_intx(dev, 0);
+-			} else {
+-				/* Do not allow enabling INTx together with MSI or MSI-X. */
+-				switch (xen_pcibk_get_interrupt_type(dev)) {
+-				case INTERRUPT_TYPE_NONE:
+-					pci_intx(dev, 1);
+-					break;
+-				case INTERRUPT_TYPE_INTX:
+-					break;
+-				default:
+-					return PCIBIOS_SET_FAILED;
+-				}
+-			}
+-		}
+-	}
++	if (dev_data && dev_data->allow_interrupt_control &&
++	    ((cmd->val ^ value) & PCI_COMMAND_INTX_DISABLE))
++		pci_intx(dev, !(value & PCI_COMMAND_INTX_DISABLE));
+ 
+ 	cmd->val = value;
+ 
+diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c
+index 639bf628389ba..3205e5d724c8c 100644
+--- a/drivers/xen/xenbus/xenbus_probe.c
++++ b/drivers/xen/xenbus/xenbus_probe.c
+@@ -1025,7 +1025,7 @@ static int __init xenbus_init(void)
+ 			if (err < 0) {
+ 				pr_err("xenstore_late_init couldn't bind irq err=%d\n",
+ 				       err);
+-				return err;
++				goto out_error;
+ 			}
+ 
+ 			xs_init_irq = err;
+diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
+index bf35b6fce8f07..6d0df9bc1e72b 100644
+--- a/fs/btrfs/ioctl.c
++++ b/fs/btrfs/ioctl.c
+@@ -1528,7 +1528,7 @@ static noinline int key_in_sk(struct btrfs_key *key,
+ static noinline int copy_to_sk(struct btrfs_path *path,
+ 			       struct btrfs_key *key,
+ 			       struct btrfs_ioctl_search_key *sk,
+-			       size_t *buf_size,
++			       u64 *buf_size,
+ 			       char __user *ubuf,
+ 			       unsigned long *sk_offset,
+ 			       int *num_found)
+@@ -1660,7 +1660,7 @@ out:
+ 
+ static noinline int search_ioctl(struct inode *inode,
+ 				 struct btrfs_ioctl_search_key *sk,
+-				 size_t *buf_size,
++				 u64 *buf_size,
+ 				 char __user *ubuf)
+ {
+ 	struct btrfs_fs_info *info = btrfs_sb(inode->i_sb);
+@@ -1733,7 +1733,7 @@ static noinline int btrfs_ioctl_tree_search(struct inode *inode,
+ 	struct btrfs_ioctl_search_args __user *uargs = argp;
+ 	struct btrfs_ioctl_search_key sk;
+ 	int ret;
+-	size_t buf_size;
++	u64 buf_size;
+ 
+ 	if (!capable(CAP_SYS_ADMIN))
+ 		return -EPERM;
+@@ -1763,8 +1763,8 @@ static noinline int btrfs_ioctl_tree_search_v2(struct inode *inode,
+ 	struct btrfs_ioctl_search_args_v2 __user *uarg = argp;
+ 	struct btrfs_ioctl_search_args_v2 args;
+ 	int ret;
+-	size_t buf_size;
+-	const size_t buf_limit = SZ_16M;
++	u64 buf_size;
++	const u64 buf_limit = SZ_16M;
+ 
+ 	if (!capable(CAP_SYS_ADMIN))
+ 		return -EPERM;
+diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
+index cfbd6b1c4b7f1..ab8e0c12f0fe4 100644
+--- a/fs/btrfs/scrub.c
++++ b/fs/btrfs/scrub.c
+@@ -1803,6 +1803,9 @@ static int queue_scrub_stripe(struct scrub_ctx *sctx, struct btrfs_block_group *
+ 	 */
+ 	ASSERT(sctx->cur_stripe < SCRUB_TOTAL_STRIPES);
+ 
++	/* @found_logical_ret must be specified. */
++	ASSERT(found_logical_ret);
++
+ 	stripe = &sctx->stripes[sctx->cur_stripe];
+ 	scrub_reset_stripe(stripe);
+ 	ret = scrub_find_fill_first_stripe(bg, &sctx->extent_path,
+@@ -1811,8 +1814,7 @@ static int queue_scrub_stripe(struct scrub_ctx *sctx, struct btrfs_block_group *
+ 	/* Either >0 as no more extents or <0 for error. */
+ 	if (ret)
+ 		return ret;
+-	if (found_logical_ret)
+-		*found_logical_ret = stripe->logical;
++	*found_logical_ret = stripe->logical;
+ 	sctx->cur_stripe++;
+ 
+ 	/* We filled one group, submit it. */
+@@ -2037,7 +2039,7 @@ static int scrub_simple_mirror(struct scrub_ctx *sctx,
+ 	path.skip_locking = 1;
+ 	/* Go through each extent items inside the logical range */
+ 	while (cur_logical < logical_end) {
+-		u64 found_logical;
++		u64 found_logical = U64_MAX;
+ 		u64 cur_physical = physical + cur_logical - logical_start;
+ 
+ 		/* Canceled? */
+@@ -2072,6 +2074,8 @@ static int scrub_simple_mirror(struct scrub_ctx *sctx,
+ 		if (ret < 0)
+ 			break;
+ 
++		/* queue_scrub_stripe() returned 0, @found_logical must be updated. */
++		ASSERT(found_logical != U64_MAX);
+ 		cur_logical = found_logical + BTRFS_STRIPE_LEN;
+ 
+ 		/* Don't hold CPU for too long time */
+diff --git a/fs/dlm/midcomms.c b/fs/dlm/midcomms.c
+index e1a0df67b5669..12b221c9d74d0 100644
+--- a/fs/dlm/midcomms.c
++++ b/fs/dlm/midcomms.c
+@@ -1119,15 +1119,15 @@ struct dlm_mhandle *dlm_midcomms_get_mhandle(int nodeid, int len,
+ 
+ 		break;
+ 	case DLM_VERSION_3_2:
++		/* send ack back if necessary */
++		dlm_send_ack_threshold(node, DLM_SEND_ACK_BACK_MSG_THRESHOLD);
++
+ 		msg = dlm_midcomms_get_msg_3_2(mh, nodeid, len, allocation,
+ 					       ppc);
+ 		if (!msg) {
+ 			dlm_free_mhandle(mh);
+ 			goto err;
+ 		}
+-
+-		/* send ack back if necessary */
+-		dlm_send_ack_threshold(node, DLM_SEND_ACK_BACK_MSG_THRESHOLD);
+ 		break;
+ 	default:
+ 		dlm_free_mhandle(mh);
+diff --git a/fs/erofs/utils.c b/fs/erofs/utils.c
+index cc6fb9e988991..4256a85719a1d 100644
+--- a/fs/erofs/utils.c
++++ b/fs/erofs/utils.c
+@@ -77,12 +77,7 @@ struct erofs_workgroup *erofs_insert_workgroup(struct super_block *sb,
+ 	struct erofs_sb_info *const sbi = EROFS_SB(sb);
+ 	struct erofs_workgroup *pre;
+ 
+-	/*
+-	 * Bump up before making this visible to others for the XArray in order
+-	 * to avoid potential UAF without serialized by xa_lock.
+-	 */
+-	lockref_get(&grp->lockref);
+-
++	DBG_BUGON(grp->lockref.count < 1);
+ repeat:
+ 	xa_lock(&sbi->managed_pslots);
+ 	pre = __xa_cmpxchg(&sbi->managed_pslots, grp->index,
+@@ -96,7 +91,6 @@ repeat:
+ 			cond_resched();
+ 			goto repeat;
+ 		}
+-		lockref_put_return(&grp->lockref);
+ 		grp = pre;
+ 	}
+ 	xa_unlock(&sbi->managed_pslots);
+diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c
+index 9bfdb4ad7c763..2461a3f74e744 100644
+--- a/fs/erofs/zdata.c
++++ b/fs/erofs/zdata.c
+@@ -805,6 +805,7 @@ static int z_erofs_register_pcluster(struct z_erofs_decompress_frontend *fe)
+ 		return PTR_ERR(pcl);
+ 
+ 	spin_lock_init(&pcl->obj.lockref.lock);
++	pcl->obj.lockref.count = 1;	/* one ref for this request */
+ 	pcl->algorithmformat = map->m_algorithmformat;
+ 	pcl->length = 0;
+ 	pcl->partial = true;
+diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
+index e4115d338f101..d97333aa92e99 100644
+--- a/fs/ext4/extents.c
++++ b/fs/ext4/extents.c
+@@ -1010,6 +1010,11 @@ static int ext4_ext_insert_index(handle_t *handle, struct inode *inode,
+ 		ix = curp->p_idx;
+ 	}
+ 
++	if (unlikely(ix > EXT_MAX_INDEX(curp->p_hdr))) {
++		EXT4_ERROR_INODE(inode, "ix > EXT_MAX_INDEX!");
++		return -EFSCORRUPTED;
++	}
++
+ 	len = EXT_LAST_INDEX(curp->p_hdr) - ix + 1;
+ 	BUG_ON(len < 0);
+ 	if (len > 0) {
+@@ -1019,11 +1024,6 @@ static int ext4_ext_insert_index(handle_t *handle, struct inode *inode,
+ 		memmove(ix + 1, ix, len * sizeof(struct ext4_extent_idx));
+ 	}
+ 
+-	if (unlikely(ix > EXT_MAX_INDEX(curp->p_hdr))) {
+-		EXT4_ERROR_INODE(inode, "ix > EXT_MAX_INDEX!");
+-		return -EFSCORRUPTED;
+-	}
+-
+ 	ix->ei_block = cpu_to_le32(logical);
+ 	ext4_idx_store_pblock(ix, ptr);
+ 	le16_add_cpu(&curp->p_hdr->eh_entries, 1);
+diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
+index ecebc3a139be2..1c5703c02132d 100644
+--- a/fs/f2fs/data.c
++++ b/fs/f2fs/data.c
+@@ -2341,8 +2341,10 @@ skip_reading_dnode:
+ 		f2fs_wait_on_block_writeback(inode, blkaddr);
+ 
+ 		if (f2fs_load_compressed_page(sbi, page, blkaddr)) {
+-			if (atomic_dec_and_test(&dic->remaining_pages))
++			if (atomic_dec_and_test(&dic->remaining_pages)) {
+ 				f2fs_decompress_cluster(dic, true);
++				break;
++			}
+ 			continue;
+ 		}
+ 
+@@ -3020,7 +3022,8 @@ static int f2fs_write_cache_pages(struct address_space *mapping,
+ {
+ 	int ret = 0;
+ 	int done = 0, retry = 0;
+-	struct page *pages[F2FS_ONSTACK_PAGES];
++	struct page *pages_local[F2FS_ONSTACK_PAGES];
++	struct page **pages = pages_local;
+ 	struct folio_batch fbatch;
+ 	struct f2fs_sb_info *sbi = F2FS_M_SB(mapping);
+ 	struct bio *bio = NULL;
+@@ -3044,6 +3047,7 @@ static int f2fs_write_cache_pages(struct address_space *mapping,
+ #endif
+ 	int nr_folios, p, idx;
+ 	int nr_pages;
++	unsigned int max_pages = F2FS_ONSTACK_PAGES;
+ 	pgoff_t index;
+ 	pgoff_t end;		/* Inclusive */
+ 	pgoff_t done_index;
+@@ -3053,6 +3057,15 @@ static int f2fs_write_cache_pages(struct address_space *mapping,
+ 	int submitted = 0;
+ 	int i;
+ 
++#ifdef CONFIG_F2FS_FS_COMPRESSION
++	if (f2fs_compressed_file(inode) &&
++		1 << cc.log_cluster_size > F2FS_ONSTACK_PAGES) {
++		pages = f2fs_kzalloc(sbi, sizeof(struct page *) <<
++				cc.log_cluster_size, GFP_NOFS | __GFP_NOFAIL);
++		max_pages = 1 << cc.log_cluster_size;
++	}
++#endif
++
+ 	folio_batch_init(&fbatch);
+ 
+ 	if (get_dirty_pages(mapping->host) <=
+@@ -3098,7 +3111,7 @@ again:
+ add_more:
+ 			pages[nr_pages] = folio_page(folio, idx);
+ 			folio_get(folio);
+-			if (++nr_pages == F2FS_ONSTACK_PAGES) {
++			if (++nr_pages == max_pages) {
+ 				index = folio->index + idx + 1;
+ 				folio_batch_release(&fbatch);
+ 				goto write;
+@@ -3281,6 +3294,11 @@ next:
+ 	if (bio)
+ 		f2fs_submit_merged_ipu_write(sbi, &bio, NULL);
+ 
++#ifdef CONFIG_F2FS_FS_COMPRESSION
++	if (pages != pages_local)
++		kfree(pages);
++#endif
++
+ 	return ret;
+ }
+ 
+diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
+index ea4a094c518f9..e53a429bd4c4c 100644
+--- a/fs/f2fs/file.c
++++ b/fs/f2fs/file.c
+@@ -3258,6 +3258,7 @@ int f2fs_precache_extents(struct inode *inode)
+ 		return -EOPNOTSUPP;
+ 
+ 	map.m_lblk = 0;
++	map.m_pblk = 0;
+ 	map.m_next_pgofs = NULL;
+ 	map.m_next_extent = &m_next_extent;
+ 	map.m_seg_type = NO_CHECK_TYPE;
+diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
+index 8d9d2ee7f3c7f..c03a73ff36e79 100644
+--- a/fs/f2fs/super.c
++++ b/fs/f2fs/super.c
+@@ -547,6 +547,29 @@ static int f2fs_set_test_dummy_encryption(struct super_block *sb,
+ }
+ 
+ #ifdef CONFIG_F2FS_FS_COMPRESSION
++static bool is_compress_extension_exist(struct f2fs_sb_info *sbi,
++					const char *new_ext, bool is_ext)
++{
++	unsigned char (*ext)[F2FS_EXTENSION_LEN];
++	int ext_cnt;
++	int i;
++
++	if (is_ext) {
++		ext = F2FS_OPTION(sbi).extensions;
++		ext_cnt = F2FS_OPTION(sbi).compress_ext_cnt;
++	} else {
++		ext = F2FS_OPTION(sbi).noextensions;
++		ext_cnt = F2FS_OPTION(sbi).nocompress_ext_cnt;
++	}
++
++	for (i = 0; i < ext_cnt; i++) {
++		if (!strcasecmp(new_ext, ext[i]))
++			return true;
++	}
++
++	return false;
++}
++
+ /*
+  * 1. The same extension name cannot not appear in both compress and non-compress extension
+  * at the same time.
+@@ -1149,6 +1172,11 @@ static int parse_options(struct super_block *sb, char *options, bool is_remount)
+ 				return -EINVAL;
+ 			}
+ 
++			if (is_compress_extension_exist(sbi, name, true)) {
++				kfree(name);
++				break;
++			}
++
+ 			strcpy(ext[ext_cnt], name);
+ 			F2FS_OPTION(sbi).compress_ext_cnt++;
+ 			kfree(name);
+@@ -1173,6 +1201,11 @@ static int parse_options(struct super_block *sb, char *options, bool is_remount)
+ 				return -EINVAL;
+ 			}
+ 
++			if (is_compress_extension_exist(sbi, name, false)) {
++				kfree(name);
++				break;
++			}
++
+ 			strcpy(noext[noext_cnt], name);
+ 			F2FS_OPTION(sbi).nocompress_ext_cnt++;
+ 			kfree(name);
+@@ -1629,7 +1662,7 @@ static void f2fs_put_super(struct super_block *sb)
+ 
+ 	f2fs_wait_on_all_pages(sbi, F2FS_WB_CP_DATA);
+ 
+-	if (err) {
++	if (err || f2fs_cp_error(sbi)) {
+ 		truncate_inode_pages_final(NODE_MAPPING(sbi));
+ 		truncate_inode_pages_final(META_MAPPING(sbi));
+ 	}
+diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
+index d532a93e980d7..e19edd179dfc0 100644
+--- a/fs/fs-writeback.c
++++ b/fs/fs-writeback.c
+@@ -613,6 +613,24 @@ out_free:
+ 	kfree(isw);
+ }
+ 
++static bool isw_prepare_wbs_switch(struct inode_switch_wbs_context *isw,
++				   struct list_head *list, int *nr)
++{
++	struct inode *inode;
++
++	list_for_each_entry(inode, list, i_io_list) {
++		if (!inode_prepare_wbs_switch(inode, isw->new_wb))
++			continue;
++
++		isw->inodes[*nr] = inode;
++		(*nr)++;
++
++		if (*nr >= WB_MAX_INODES_PER_ISW - 1)
++			return true;
++	}
++	return false;
++}
++
+ /**
+  * cleanup_offline_cgwb - detach associated inodes
+  * @wb: target wb
+@@ -625,7 +643,6 @@ bool cleanup_offline_cgwb(struct bdi_writeback *wb)
+ {
+ 	struct cgroup_subsys_state *memcg_css;
+ 	struct inode_switch_wbs_context *isw;
+-	struct inode *inode;
+ 	int nr;
+ 	bool restart = false;
+ 
+@@ -647,17 +664,17 @@ bool cleanup_offline_cgwb(struct bdi_writeback *wb)
+ 
+ 	nr = 0;
+ 	spin_lock(&wb->list_lock);
+-	list_for_each_entry(inode, &wb->b_attached, i_io_list) {
+-		if (!inode_prepare_wbs_switch(inode, isw->new_wb))
+-			continue;
+-
+-		isw->inodes[nr++] = inode;
+-
+-		if (nr >= WB_MAX_INODES_PER_ISW - 1) {
+-			restart = true;
+-			break;
+-		}
+-	}
++	/*
++	 * In addition to the inodes that have completed writeback, also switch
++	 * cgwbs for those inodes only with dirty timestamps. Otherwise, those
++	 * inodes won't be written back for a long time when lazytime is
++	 * enabled, and thus pinning the dying cgwbs. It won't break the
++	 * bandwidth restrictions, as writeback of inode metadata is not
++	 * accounted for.
++	 */
++	restart = isw_prepare_wbs_switch(isw, &wb->b_attached, &nr);
++	if (!restart)
++		restart = isw_prepare_wbs_switch(isw, &wb->b_dirty_time, &nr);
+ 	spin_unlock(&wb->list_lock);
+ 
+ 	/* no attached inodes? bail out */
+diff --git a/fs/nfsd/filecache.c b/fs/nfsd/filecache.c
+index ee9c923192e08..07bf219f9ae48 100644
+--- a/fs/nfsd/filecache.c
++++ b/fs/nfsd/filecache.c
+@@ -989,22 +989,21 @@ nfsd_file_do_acquire(struct svc_rqst *rqstp, struct svc_fh *fhp,
+ 	unsigned char need = may_flags & NFSD_FILE_MAY_MASK;
+ 	struct net *net = SVC_NET(rqstp);
+ 	struct nfsd_file *new, *nf;
+-	const struct cred *cred;
++	bool stale_retry = true;
+ 	bool open_retry = true;
+ 	struct inode *inode;
+ 	__be32 status;
+ 	int ret;
+ 
++retry:
+ 	status = fh_verify(rqstp, fhp, S_IFREG,
+ 				may_flags|NFSD_MAY_OWNER_OVERRIDE);
+ 	if (status != nfs_ok)
+ 		return status;
+ 	inode = d_inode(fhp->fh_dentry);
+-	cred = get_current_cred();
+ 
+-retry:
+ 	rcu_read_lock();
+-	nf = nfsd_file_lookup_locked(net, cred, inode, need, want_gc);
++	nf = nfsd_file_lookup_locked(net, current_cred(), inode, need, want_gc);
+ 	rcu_read_unlock();
+ 
+ 	if (nf) {
+@@ -1026,7 +1025,7 @@ retry:
+ 
+ 	rcu_read_lock();
+ 	spin_lock(&inode->i_lock);
+-	nf = nfsd_file_lookup_locked(net, cred, inode, need, want_gc);
++	nf = nfsd_file_lookup_locked(net, current_cred(), inode, need, want_gc);
+ 	if (unlikely(nf)) {
+ 		spin_unlock(&inode->i_lock);
+ 		rcu_read_unlock();
+@@ -1058,6 +1057,7 @@ wait_for_construction:
+ 			goto construction_err;
+ 		}
+ 		open_retry = false;
++		fh_put(fhp);
+ 		goto retry;
+ 	}
+ 	this_cpu_inc(nfsd_file_cache_hits);
+@@ -1074,7 +1074,6 @@ out:
+ 		nfsd_file_check_write_error(nf);
+ 		*pnf = nf;
+ 	}
+-	put_cred(cred);
+ 	trace_nfsd_file_acquire(rqstp, inode, may_flags, nf, status);
+ 	return status;
+ 
+@@ -1088,8 +1087,20 @@ open_file:
+ 			status = nfs_ok;
+ 			trace_nfsd_file_opened(nf, status);
+ 		} else {
+-			status = nfsd_open_verified(rqstp, fhp, may_flags,
+-						    &nf->nf_file);
++			ret = nfsd_open_verified(rqstp, fhp, may_flags,
++						 &nf->nf_file);
++			if (ret == -EOPENSTALE && stale_retry) {
++				stale_retry = false;
++				nfsd_file_unhash(nf);
++				clear_and_wake_up_bit(NFSD_FILE_PENDING,
++						      &nf->nf_flags);
++				if (refcount_dec_and_test(&nf->nf_ref))
++					nfsd_file_free(nf);
++				nf = NULL;
++				fh_put(fhp);
++				goto retry;
++			}
++			status = nfserrno(ret);
+ 			trace_nfsd_file_open(nf, status);
+ 		}
+ 	} else
+diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
+index c7fdc19b0d5f7..4aa421d1efbfd 100644
+--- a/fs/nfsd/vfs.c
++++ b/fs/nfsd/vfs.c
+@@ -823,7 +823,7 @@ int nfsd_open_break_lease(struct inode *inode, int access)
+  * and additional flags.
+  * N.B. After this call fhp needs an fh_put
+  */
+-static __be32
++static int
+ __nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type,
+ 			int may_flags, struct file **filp)
+ {
+@@ -831,14 +831,12 @@ __nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type,
+ 	struct inode	*inode;
+ 	struct file	*file;
+ 	int		flags = O_RDONLY|O_LARGEFILE;
+-	__be32		err;
+-	int		host_err = 0;
++	int		host_err = -EPERM;
+ 
+ 	path.mnt = fhp->fh_export->ex_path.mnt;
+ 	path.dentry = fhp->fh_dentry;
+ 	inode = d_inode(path.dentry);
+ 
+-	err = nfserr_perm;
+ 	if (IS_APPEND(inode) && (may_flags & NFSD_MAY_WRITE))
+ 		goto out;
+ 
+@@ -847,7 +845,7 @@ __nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type,
+ 
+ 	host_err = nfsd_open_break_lease(inode, may_flags);
+ 	if (host_err) /* NOMEM or WOULDBLOCK */
+-		goto out_nfserr;
++		goto out;
+ 
+ 	if (may_flags & NFSD_MAY_WRITE) {
+ 		if (may_flags & NFSD_MAY_READ)
+@@ -859,13 +857,13 @@ __nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type,
+ 	file = dentry_open(&path, flags, current_cred());
+ 	if (IS_ERR(file)) {
+ 		host_err = PTR_ERR(file);
+-		goto out_nfserr;
++		goto out;
+ 	}
+ 
+ 	host_err = ima_file_check(file, may_flags);
+ 	if (host_err) {
+ 		fput(file);
+-		goto out_nfserr;
++		goto out;
+ 	}
+ 
+ 	if (may_flags & NFSD_MAY_64BIT_COOKIE)
+@@ -874,10 +872,8 @@ __nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type,
+ 		file->f_mode |= FMODE_32BITHASH;
+ 
+ 	*filp = file;
+-out_nfserr:
+-	err = nfserrno(host_err);
+ out:
+-	return err;
++	return host_err;
+ }
+ 
+ __be32
+@@ -885,6 +881,7 @@ nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type,
+ 		int may_flags, struct file **filp)
+ {
+ 	__be32 err;
++	int host_err;
+ 	bool retried = false;
+ 
+ 	validate_process_creds();
+@@ -904,12 +901,13 @@ nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type,
+ retry:
+ 	err = fh_verify(rqstp, fhp, type, may_flags);
+ 	if (!err) {
+-		err = __nfsd_open(rqstp, fhp, type, may_flags, filp);
+-		if (err == nfserr_stale && !retried) {
++		host_err = __nfsd_open(rqstp, fhp, type, may_flags, filp);
++		if (host_err == -EOPENSTALE && !retried) {
+ 			retried = true;
+ 			fh_put(fhp);
+ 			goto retry;
+ 		}
++		err = nfserrno(host_err);
+ 	}
+ 	validate_process_creds();
+ 	return err;
+@@ -922,13 +920,13 @@ retry:
+  * @may_flags: internal permission flags
+  * @filp: OUT: open "struct file *"
+  *
+- * Returns an nfsstat value in network byte order.
++ * Returns zero on success, or a negative errno value.
+  */
+-__be32
++int
+ nfsd_open_verified(struct svc_rqst *rqstp, struct svc_fh *fhp, int may_flags,
+ 		   struct file **filp)
+ {
+-	__be32 err;
++	int err;
+ 
+ 	validate_process_creds();
+ 	err = __nfsd_open(rqstp, fhp, S_IFREG, may_flags, filp);
+diff --git a/fs/nfsd/vfs.h b/fs/nfsd/vfs.h
+index a6890ea7b765b..e3c29596f4df1 100644
+--- a/fs/nfsd/vfs.h
++++ b/fs/nfsd/vfs.h
+@@ -104,8 +104,8 @@ __be32		nfsd_setxattr(struct svc_rqst *rqstp, struct svc_fh *fhp,
+ int 		nfsd_open_break_lease(struct inode *, int);
+ __be32		nfsd_open(struct svc_rqst *, struct svc_fh *, umode_t,
+ 				int, struct file **);
+-__be32		nfsd_open_verified(struct svc_rqst *, struct svc_fh *,
+-				int, struct file **);
++int		nfsd_open_verified(struct svc_rqst *rqstp, struct svc_fh *fhp,
++				   int may_flags, struct file **filp);
+ __be32		nfsd_splice_read(struct svc_rqst *rqstp, struct svc_fh *fhp,
+ 				struct file *file, loff_t offset,
+ 				unsigned long *count,
+diff --git a/fs/pstore/platform.c b/fs/pstore/platform.c
+index cbc0b468c1ab6..15cefe268ffd8 100644
+--- a/fs/pstore/platform.c
++++ b/fs/pstore/platform.c
+@@ -573,6 +573,8 @@ out:
+  */
+ int pstore_register(struct pstore_info *psi)
+ {
++	char *new_backend;
++
+ 	if (backend && strcmp(backend, psi->name)) {
+ 		pr_warn("backend '%s' already in use: ignoring '%s'\n",
+ 			backend, psi->name);
+@@ -593,11 +595,16 @@ int pstore_register(struct pstore_info *psi)
+ 		return -EINVAL;
+ 	}
+ 
++	new_backend = kstrdup(psi->name, GFP_KERNEL);
++	if (!new_backend)
++		return -ENOMEM;
++
+ 	mutex_lock(&psinfo_lock);
+ 	if (psinfo) {
+ 		pr_warn("backend '%s' already loaded: ignoring '%s'\n",
+ 			psinfo->name, psi->name);
+ 		mutex_unlock(&psinfo_lock);
++		kfree(new_backend);
+ 		return -EBUSY;
+ 	}
+ 
+@@ -630,7 +637,7 @@ int pstore_register(struct pstore_info *psi)
+ 	 * Update the module parameter backend, so it is visible
+ 	 * through /sys/module/pstore/parameters/backend
+ 	 */
+-	backend = kstrdup(psi->name, GFP_KERNEL);
++	backend = new_backend;
+ 
+ 	pr_info("Registered %s as persistent store backend\n", psi->name);
+ 
+diff --git a/include/drm/bridge/samsung-dsim.h b/include/drm/bridge/samsung-dsim.h
+index 05100e91ecb96..6fc9bb2979e45 100644
+--- a/include/drm/bridge/samsung-dsim.h
++++ b/include/drm/bridge/samsung-dsim.h
+@@ -53,6 +53,7 @@ struct samsung_dsim_driver_data {
+ 	unsigned int plltmr_reg;
+ 	unsigned int has_freqband:1;
+ 	unsigned int has_clklane_stop:1;
++	unsigned int has_broken_fifoctrl_emptyhdr:1;
+ 	unsigned int num_clks;
+ 	unsigned int min_freq;
+ 	unsigned int max_freq;
+diff --git a/include/linux/bpf.h b/include/linux/bpf.h
+index 6ba9d3ed8f0b0..98a7d6fd10360 100644
+--- a/include/linux/bpf.h
++++ b/include/linux/bpf.h
+@@ -1016,6 +1016,11 @@ struct btf_func_model {
+  */
+ #define BPF_TRAMP_F_SHARE_IPMODIFY	BIT(6)
+ 
++/* Indicate that current trampoline is in a tail call context. Then, it has to
++ * cache and restore tail_call_cnt to avoid infinite tail call loop.
++ */
++#define BPF_TRAMP_F_TAIL_CALL_CTX	BIT(7)
++
+ /* Each call __bpf_prog_enter + call bpf_func + call __bpf_prog_exit is ~50
+  * bytes on x86.
+  */
+diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
+index 0f0cd01906b4c..ec93fb23372b4 100644
+--- a/include/linux/clk-provider.h
++++ b/include/linux/clk-provider.h
+@@ -74,7 +74,7 @@ void clk_hw_forward_rate_request(const struct clk_hw *core,
+ 				 unsigned long parent_rate);
+ 
+ /**
+- * struct clk_duty - Struture encoding the duty cycle ratio of a clock
++ * struct clk_duty - Structure encoding the duty cycle ratio of a clock
+  *
+  * @num:	Numerator of the duty cycle ratio
+  * @den:	Denominator of the duty cycle ratio
+@@ -129,7 +129,7 @@ struct clk_duty {
+  * @restore_context: Restore the context of the clock after a restoration
+  *		of power.
+  *
+- * @recalc_rate	Recalculate the rate of this clock, by querying hardware. The
++ * @recalc_rate: Recalculate the rate of this clock, by querying hardware. The
+  *		parent rate is an input parameter.  It is up to the caller to
+  *		ensure that the prepare_mutex is held across this call. If the
+  *		driver cannot figure out a rate for this clock, it must return
+@@ -456,7 +456,7 @@ struct clk *clk_register_fixed_rate(struct device *dev, const char *name,
+  * clock with the clock framework
+  * @dev: device that is registering this clock
+  * @name: name of this clock
+- * @parent_name: name of clock's parent
++ * @parent_data: name of clock's parent
+  * @flags: framework-specific flags
+  * @fixed_rate: non-adjustable clock rate
+  * @fixed_accuracy: non-adjustable clock accuracy
+@@ -471,7 +471,7 @@ struct clk *clk_register_fixed_rate(struct device *dev, const char *name,
+  * the clock framework
+  * @dev: device that is registering this clock
+  * @name: name of this clock
+- * @parent_name: name of clock's parent
++ * @parent_data: name of clock's parent
+  * @flags: framework-specific flags
+  * @fixed_rate: non-adjustable clock rate
+  */
+@@ -649,7 +649,7 @@ struct clk_div_table {
+  * Clock with an adjustable divider affecting its output frequency.  Implements
+  * .recalc_rate, .set_rate and .round_rate
+  *
+- * Flags:
++ * @flags:
+  * CLK_DIVIDER_ONE_BASED - by default the divisor is the value read from the
+  *	register plus one.  If CLK_DIVIDER_ONE_BASED is set then the divider is
+  *	the raw value read from the register, with the value of zero considered
+@@ -1130,11 +1130,12 @@ struct clk_hw *clk_hw_register_fixed_factor_parent_hw(struct device *dev,
+  * @mwidth:	width of the numerator bit field
+  * @nshift:	shift to the denominator bit field
+  * @nwidth:	width of the denominator bit field
++ * @approximation: clk driver's callback for calculating the divider clock
+  * @lock:	register lock
+  *
+  * Clock with adjustable fractional divider affecting its output frequency.
+  *
+- * Flags:
++ * @flags:
+  * CLK_FRAC_DIVIDER_ZERO_BASED - by default the numerator and denominator
+  *	is the value read from the register. If CLK_FRAC_DIVIDER_ZERO_BASED
+  *	is set then the numerator and denominator are both the value read
+@@ -1191,7 +1192,7 @@ void clk_hw_unregister_fractional_divider(struct clk_hw *hw);
+  * Clock with an adjustable multiplier affecting its output frequency.
+  * Implements .recalc_rate, .set_rate and .round_rate
+  *
+- * Flags:
++ * @flags:
+  * CLK_MULTIPLIER_ZERO_BYPASS - By default, the multiplier is the value read
+  *	from the register, with 0 being a valid value effectively
+  *	zeroing the output clock rate. If CLK_MULTIPLIER_ZERO_BYPASS is
+diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
+index 25b6e6e6ba6bc..f0231cc66746e 100644
+--- a/include/linux/cpuhotplug.h
++++ b/include/linux/cpuhotplug.h
+@@ -190,6 +190,7 @@ enum cpuhp_state {
+ 	/* Must be the last timer callback */
+ 	CPUHP_AP_DUMMY_TIMER_STARTING,
+ 	CPUHP_AP_ARM_XEN_STARTING,
++	CPUHP_AP_ARM_XEN_RUNSTATE_STARTING,
+ 	CPUHP_AP_ARM_CORESIGHT_STARTING,
+ 	CPUHP_AP_ARM_CORESIGHT_CTI_STARTING,
+ 	CPUHP_AP_ARM64_ISNDEP_STARTING,
+diff --git a/include/linux/hisi_acc_qm.h b/include/linux/hisi_acc_qm.h
+index a7d54d4d41fdb..e1bb4c2801e6b 100644
+--- a/include/linux/hisi_acc_qm.h
++++ b/include/linux/hisi_acc_qm.h
+@@ -144,6 +144,13 @@ enum qm_vf_state {
+ 	QM_NOT_READY,
+ };
+ 
++enum qm_misc_ctl_bits {
++	QM_DRIVER_REMOVING = 0x0,
++	QM_RST_SCHED,
++	QM_RESETTING,
++	QM_MODULE_PARAM,
++};
++
+ enum qm_cap_bits {
+ 	QM_SUPPORT_DB_ISOLATION = 0x0,
+ 	QM_SUPPORT_FUNC_QOS,
+diff --git a/include/linux/hw_random.h b/include/linux/hw_random.h
+index 8a3115516a1ba..136e9842120e8 100644
+--- a/include/linux/hw_random.h
++++ b/include/linux/hw_random.h
+@@ -63,5 +63,6 @@ extern void hwrng_unregister(struct hwrng *rng);
+ extern void devm_hwrng_unregister(struct device *dve, struct hwrng *rng);
+ 
+ extern long hwrng_msleep(struct hwrng *rng, unsigned int msecs);
++extern long hwrng_yield(struct hwrng *rng);
+ 
+ #endif /* LINUX_HWRANDOM_H_ */
+diff --git a/include/linux/idr.h b/include/linux/idr.h
+index a0dce14090a9e..da5f5fa4a3a6a 100644
+--- a/include/linux/idr.h
++++ b/include/linux/idr.h
+@@ -200,7 +200,7 @@ static inline void idr_preload_end(void)
+  */
+ #define idr_for_each_entry_ul(idr, entry, tmp, id)			\
+ 	for (tmp = 0, id = 0;						\
+-	     tmp <= id && ((entry) = idr_get_next_ul(idr, &(id))) != NULL; \
++	     ((entry) = tmp <= id ? idr_get_next_ul(idr, &(id)) : NULL) != NULL; \
+ 	     tmp = id, ++id)
+ 
+ /**
+@@ -224,10 +224,12 @@ static inline void idr_preload_end(void)
+  * @id: Entry ID.
+  *
+  * Continue to iterate over entries, continuing after the current position.
++ * After normal termination @entry is left with the value NULL.  This
++ * is convenient for a "not found" value.
+  */
+ #define idr_for_each_entry_continue_ul(idr, entry, tmp, id)		\
+ 	for (tmp = id;							\
+-	     tmp <= id && ((entry) = idr_get_next_ul(idr, &(id))) != NULL; \
++	     ((entry) = tmp <= id ? idr_get_next_ul(idr, &(id)) : NULL) != NULL; \
+ 	     tmp = id, ++id)
+ 
+ /*
+diff --git a/include/linux/mfd/core.h b/include/linux/mfd/core.h
+index 47e7a3a61ce69..e8bcad641d8c2 100644
+--- a/include/linux/mfd/core.h
++++ b/include/linux/mfd/core.h
+@@ -92,7 +92,7 @@ struct mfd_cell {
+ 	 * (above) when matching OF nodes with devices that have identical
+ 	 * compatible strings
+ 	 */
+-	const u64 of_reg;
++	u64 of_reg;
+ 
+ 	/* Set to 'true' to use 'of_reg' (above) - allows for of_reg=0 */
+ 	bool use_of_reg;
+diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
+index b828c7a75be20..48134407b70fd 100644
+--- a/include/linux/netdevice.h
++++ b/include/linux/netdevice.h
+@@ -5230,5 +5230,6 @@ extern struct net_device *blackhole_netdev;
+ #define DEV_STATS_INC(DEV, FIELD) atomic_long_inc(&(DEV)->stats.__##FIELD)
+ #define DEV_STATS_ADD(DEV, FIELD, VAL) 	\
+ 		atomic_long_add((VAL), &(DEV)->stats.__##FIELD)
++#define DEV_STATS_READ(DEV, FIELD) atomic_long_read(&(DEV)->stats.__##FIELD)
+ 
+ #endif	/* _LINUX_NETDEVICE_H */
+diff --git a/include/linux/numa.h b/include/linux/numa.h
+index 59df211d051fa..a904861de8000 100644
+--- a/include/linux/numa.h
++++ b/include/linux/numa.h
+@@ -12,6 +12,7 @@
+ #define MAX_NUMNODES    (1 << NODES_SHIFT)
+ 
+ #define	NUMA_NO_NODE	(-1)
++#define	NUMA_NO_MEMBLK	(-1)
+ 
+ /* optionally keep NUMA memory info available post init */
+ #ifdef CONFIG_NUMA_KEEP_MEMINFO
+@@ -25,7 +26,7 @@
+ #include <asm/sparsemem.h>
+ 
+ /* Generic implementation available */
+-int numa_map_to_online_node(int node);
++int numa_nearest_node(int node, unsigned int state);
+ 
+ #ifndef memory_add_physaddr_to_nid
+ static inline int memory_add_physaddr_to_nid(u64 start)
+@@ -43,11 +44,18 @@ static inline int phys_to_target_node(u64 start)
+ 	return 0;
+ }
+ #endif
++#ifndef numa_fill_memblks
++static inline int __init numa_fill_memblks(u64 start, u64 end)
++{
++	return NUMA_NO_MEMBLK;
++}
++#endif
+ #else /* !CONFIG_NUMA */
+-static inline int numa_map_to_online_node(int node)
++static inline int numa_nearest_node(int node, unsigned int state)
+ {
+ 	return NUMA_NO_NODE;
+ }
++
+ static inline int memory_add_physaddr_to_nid(u64 start)
+ {
+ 	return 0;
+@@ -58,6 +66,8 @@ static inline int phys_to_target_node(u64 start)
+ }
+ #endif
+ 
++#define numa_map_to_online_node(node) numa_nearest_node(node, N_ONLINE)
++
+ #ifdef CONFIG_HAVE_ARCH_NODE_DEV_GROUP
+ extern const struct attribute_group arch_node_dev_group;
+ #endif
+diff --git a/include/linux/objtool.h b/include/linux/objtool.h
+index 03f82c2c2ebf6..b5440e7da55bf 100644
+--- a/include/linux/objtool.h
++++ b/include/linux/objtool.h
+@@ -130,7 +130,8 @@
+  * it will be ignored.
+  */
+ .macro VALIDATE_UNRET_BEGIN
+-#if defined(CONFIG_NOINSTR_VALIDATION) && defined(CONFIG_CPU_UNRET_ENTRY)
++#if defined(CONFIG_NOINSTR_VALIDATION) && \
++	(defined(CONFIG_CPU_UNRET_ENTRY) || defined(CONFIG_CPU_SRSO))
+ .Lhere_\@:
+ 	.pushsection .discard.validate_unret
+ 	.long	.Lhere_\@ - .
+diff --git a/include/linux/pci.h b/include/linux/pci.h
+index 7ee498cd1f374..c8688d13cc224 100644
+--- a/include/linux/pci.h
++++ b/include/linux/pci.h
+@@ -1625,6 +1625,8 @@ struct msix_entry {
+ 	u16	entry;	/* Driver uses to specify entry, OS writes */
+ };
+ 
++struct msi_domain_template;
++
+ #ifdef CONFIG_PCI_MSI
+ int pci_msi_vec_count(struct pci_dev *dev);
+ void pci_disable_msi(struct pci_dev *dev);
+@@ -1657,6 +1659,11 @@ void pci_msix_free_irq(struct pci_dev *pdev, struct msi_map map);
+ void pci_free_irq_vectors(struct pci_dev *dev);
+ int pci_irq_vector(struct pci_dev *dev, unsigned int nr);
+ const struct cpumask *pci_irq_get_affinity(struct pci_dev *pdev, int vec);
++bool pci_create_ims_domain(struct pci_dev *pdev, const struct msi_domain_template *template,
++			   unsigned int hwsize, void *data);
++struct msi_map pci_ims_alloc_irq(struct pci_dev *pdev, union msi_instance_cookie *icookie,
++				 const struct irq_affinity_desc *affdesc);
++void pci_ims_free_irq(struct pci_dev *pdev, struct msi_map map);
+ 
+ #else
+ static inline int pci_msi_vec_count(struct pci_dev *dev) { return -ENOSYS; }
+@@ -1720,6 +1727,25 @@ static inline const struct cpumask *pci_irq_get_affinity(struct pci_dev *pdev,
+ {
+ 	return cpu_possible_mask;
+ }
++
++static inline bool pci_create_ims_domain(struct pci_dev *pdev,
++					 const struct msi_domain_template *template,
++					 unsigned int hwsize, void *data)
++{ return false; }
++
++static inline struct msi_map pci_ims_alloc_irq(struct pci_dev *pdev,
++					       union msi_instance_cookie *icookie,
++					       const struct irq_affinity_desc *affdesc)
++{
++	struct msi_map map = { .index = -ENOSYS, };
++
++	return map;
++}
++
++static inline void pci_ims_free_irq(struct pci_dev *pdev, struct msi_map map)
++{
++}
++
+ #endif
+ 
+ /**
+@@ -2612,14 +2638,6 @@ static inline bool pci_is_thunderbolt_attached(struct pci_dev *pdev)
+ void pci_uevent_ers(struct pci_dev *pdev, enum  pci_ers_result err_type);
+ #endif
+ 
+-struct msi_domain_template;
+-
+-bool pci_create_ims_domain(struct pci_dev *pdev, const struct msi_domain_template *template,
+-			   unsigned int hwsize, void *data);
+-struct msi_map pci_ims_alloc_irq(struct pci_dev *pdev, union msi_instance_cookie *icookie,
+-				 const struct irq_affinity_desc *affdesc);
+-void pci_ims_free_irq(struct pci_dev *pdev, struct msi_map map);
+-
+ #include <linux/dma-mapping.h>
+ 
+ #define pci_printk(level, pdev, fmt, arg...) \
+diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
+index 227e9d45f61b6..e7afd0dd8a3d1 100644
+--- a/include/linux/perf_event.h
++++ b/include/linux/perf_event.h
+@@ -879,6 +879,7 @@ struct perf_event_pmu_context {
+ 	unsigned int			embedded : 1;
+ 
+ 	unsigned int			nr_events;
++	unsigned int			nr_cgroups;
+ 
+ 	atomic_t			refcount; /* event <-> epc */
+ 	struct rcu_head			rcu_head;
+diff --git a/include/linux/pm.h b/include/linux/pm.h
+index badad7d11f4fd..d305412556f35 100644
+--- a/include/linux/pm.h
++++ b/include/linux/pm.h
+@@ -374,24 +374,39 @@ const struct dev_pm_ops name = { \
+ 	RUNTIME_PM_OPS(runtime_suspend_fn, runtime_resume_fn, idle_fn) \
+ }
+ 
+-#ifdef CONFIG_PM
+-#define _EXPORT_DEV_PM_OPS(name, license, ns)				\
++#define _EXPORT_PM_OPS(name, license, ns)				\
+ 	const struct dev_pm_ops name;					\
+ 	__EXPORT_SYMBOL(name, license, ns);				\
+ 	const struct dev_pm_ops name
+-#define EXPORT_PM_FN_GPL(name)		EXPORT_SYMBOL_GPL(name)
+-#define EXPORT_PM_FN_NS_GPL(name, ns)	EXPORT_SYMBOL_NS_GPL(name, ns)
+-#else
+-#define _EXPORT_DEV_PM_OPS(name, license, ns)				\
++
++#define _DISCARD_PM_OPS(name, license, ns)				\
+ 	static __maybe_unused const struct dev_pm_ops __static_##name
++
++#ifdef CONFIG_PM
++#define _EXPORT_DEV_PM_OPS(name, license, ns)		_EXPORT_PM_OPS(name, license, ns)
++#define EXPORT_PM_FN_GPL(name)				EXPORT_SYMBOL_GPL(name)
++#define EXPORT_PM_FN_NS_GPL(name, ns)			EXPORT_SYMBOL_NS_GPL(name, ns)
++#else
++#define _EXPORT_DEV_PM_OPS(name, license, ns)		_DISCARD_PM_OPS(name, license, ns)
+ #define EXPORT_PM_FN_GPL(name)
+ #define EXPORT_PM_FN_NS_GPL(name, ns)
+ #endif
+ 
+-#define EXPORT_DEV_PM_OPS(name) _EXPORT_DEV_PM_OPS(name, "", "")
+-#define EXPORT_GPL_DEV_PM_OPS(name) _EXPORT_DEV_PM_OPS(name, "GPL", "")
+-#define EXPORT_NS_DEV_PM_OPS(name, ns) _EXPORT_DEV_PM_OPS(name, "", #ns)
+-#define EXPORT_NS_GPL_DEV_PM_OPS(name, ns) _EXPORT_DEV_PM_OPS(name, "GPL", #ns)
++#ifdef CONFIG_PM_SLEEP
++#define _EXPORT_DEV_SLEEP_PM_OPS(name, license, ns)	_EXPORT_PM_OPS(name, license, ns)
++#else
++#define _EXPORT_DEV_SLEEP_PM_OPS(name, license, ns)	_DISCARD_PM_OPS(name, license, ns)
++#endif
++
++#define EXPORT_DEV_PM_OPS(name)				_EXPORT_DEV_PM_OPS(name, "", "")
++#define EXPORT_GPL_DEV_PM_OPS(name)			_EXPORT_DEV_PM_OPS(name, "GPL", "")
++#define EXPORT_NS_DEV_PM_OPS(name, ns)			_EXPORT_DEV_PM_OPS(name, "", #ns)
++#define EXPORT_NS_GPL_DEV_PM_OPS(name, ns)		_EXPORT_DEV_PM_OPS(name, "GPL", #ns)
++
++#define EXPORT_DEV_SLEEP_PM_OPS(name)			_EXPORT_DEV_SLEEP_PM_OPS(name, "", "")
++#define EXPORT_GPL_DEV_SLEEP_PM_OPS(name)		_EXPORT_DEV_SLEEP_PM_OPS(name, "GPL", "")
++#define EXPORT_NS_DEV_SLEEP_PM_OPS(name, ns)		_EXPORT_DEV_SLEEP_PM_OPS(name, "", #ns)
++#define EXPORT_NS_GPL_DEV_SLEEP_PM_OPS(name, ns)	_EXPORT_DEV_SLEEP_PM_OPS(name, "GPL", #ns)
+ 
+ /*
+  * Use this if you want to use the same suspend and resume callbacks for suspend
+@@ -404,19 +419,19 @@ const struct dev_pm_ops name = { \
+ 	_DEFINE_DEV_PM_OPS(name, suspend_fn, resume_fn, NULL, NULL, NULL)
+ 
+ #define EXPORT_SIMPLE_DEV_PM_OPS(name, suspend_fn, resume_fn) \
+-	EXPORT_DEV_PM_OPS(name) = { \
++	EXPORT_DEV_SLEEP_PM_OPS(name) = { \
+ 		SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \
+ 	}
+ #define EXPORT_GPL_SIMPLE_DEV_PM_OPS(name, suspend_fn, resume_fn) \
+-	EXPORT_GPL_DEV_PM_OPS(name) = { \
++	EXPORT_GPL_DEV_SLEEP_PM_OPS(name) = { \
+ 		SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \
+ 	}
+ #define EXPORT_NS_SIMPLE_DEV_PM_OPS(name, suspend_fn, resume_fn, ns)	\
+-	EXPORT_NS_DEV_PM_OPS(name, ns) = { \
++	EXPORT_NS_DEV_SLEEP_PM_OPS(name, ns) = { \
+ 		SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \
+ 	}
+ #define EXPORT_NS_GPL_SIMPLE_DEV_PM_OPS(name, suspend_fn, resume_fn, ns)	\
+-	EXPORT_NS_GPL_DEV_PM_OPS(name, ns) = { \
++	EXPORT_NS_GPL_DEV_SLEEP_PM_OPS(name, ns) = { \
+ 		SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \
+ 	}
+ 
+diff --git a/include/linux/string.h b/include/linux/string.h
+index dbfc66400050f..9e3cb6923b0ef 100644
+--- a/include/linux/string.h
++++ b/include/linux/string.h
+@@ -277,10 +277,12 @@ void memcpy_and_pad(void *dest, size_t dest_len, const void *src, size_t count,
+  */
+ #define strtomem_pad(dest, src, pad)	do {				\
+ 	const size_t _dest_len = __builtin_object_size(dest, 1);	\
++	const size_t _src_len = __builtin_object_size(src, 1);		\
+ 									\
+ 	BUILD_BUG_ON(!__builtin_constant_p(_dest_len) ||		\
+ 		     _dest_len == (size_t)-1);				\
+-	memcpy_and_pad(dest, _dest_len, src, strnlen(src, _dest_len), pad); \
++	memcpy_and_pad(dest, _dest_len, src,				\
++		       strnlen(src, min(_src_len, _dest_len)), pad);	\
+ } while (0)
+ 
+ /**
+@@ -298,10 +300,11 @@ void memcpy_and_pad(void *dest, size_t dest_len, const void *src, size_t count,
+  */
+ #define strtomem(dest, src)	do {					\
+ 	const size_t _dest_len = __builtin_object_size(dest, 1);	\
++	const size_t _src_len = __builtin_object_size(src, 1);		\
+ 									\
+ 	BUILD_BUG_ON(!__builtin_constant_p(_dest_len) ||		\
+ 		     _dest_len == (size_t)-1);				\
+-	memcpy(dest, src, min(_dest_len, strnlen(src, _dest_len)));	\
++	memcpy(dest, src, strnlen(src, min(_src_len, _dest_len)));	\
+ } while (0)
+ 
+ /**
+diff --git a/include/linux/topology.h b/include/linux/topology.h
+index fea32377f7c77..52f5850730b3e 100644
+--- a/include/linux/topology.h
++++ b/include/linux/topology.h
+@@ -251,7 +251,7 @@ extern const struct cpumask *sched_numa_hop_mask(unsigned int node, unsigned int
+ #else
+ static __always_inline int sched_numa_find_nth_cpu(const struct cpumask *cpus, int cpu, int node)
+ {
+-	return cpumask_nth(cpu, cpus);
++	return cpumask_nth_and(cpu, cpus, cpu_online_mask);
+ }
+ 
+ static inline const struct cpumask *
+diff --git a/include/linux/udp.h b/include/linux/udp.h
+index 43c1fb2d2c21a..d04188714dca1 100644
+--- a/include/linux/udp.h
++++ b/include/linux/udp.h
+@@ -32,25 +32,30 @@ static inline u32 udp_hashfn(const struct net *net, u32 num, u32 mask)
+ 	return (num + net_hash_mix(net)) & mask;
+ }
+ 
++enum {
++	UDP_FLAGS_CORK,		/* Cork is required */
++	UDP_FLAGS_NO_CHECK6_TX, /* Send zero UDP6 checksums on TX? */
++	UDP_FLAGS_NO_CHECK6_RX, /* Allow zero UDP6 checksums on RX? */
++	UDP_FLAGS_GRO_ENABLED,	/* Request GRO aggregation */
++	UDP_FLAGS_ACCEPT_FRAGLIST,
++	UDP_FLAGS_ACCEPT_L4,
++	UDP_FLAGS_ENCAP_ENABLED, /* This socket enabled encap */
++	UDP_FLAGS_UDPLITE_SEND_CC, /* set via udplite setsockopt */
++	UDP_FLAGS_UDPLITE_RECV_CC, /* set via udplite setsockopt */
++};
++
+ struct udp_sock {
+ 	/* inet_sock has to be the first member */
+ 	struct inet_sock inet;
+ #define udp_port_hash		inet.sk.__sk_common.skc_u16hashes[0]
+ #define udp_portaddr_hash	inet.sk.__sk_common.skc_u16hashes[1]
+ #define udp_portaddr_node	inet.sk.__sk_common.skc_portaddr_node
++
++	unsigned long	 udp_flags;
++
+ 	int		 pending;	/* Any pending frames ? */
+-	unsigned int	 corkflag;	/* Cork is required */
+ 	__u8		 encap_type;	/* Is this an Encapsulation socket? */
+-	unsigned char	 no_check6_tx:1,/* Send zero UDP6 checksums on TX? */
+-			 no_check6_rx:1,/* Allow zero UDP6 checksums on RX? */
+-			 encap_enabled:1, /* This socket enabled encap
+-					   * processing; UDP tunnels and
+-					   * different encapsulation layer set
+-					   * this
+-					   */
+-			 gro_enabled:1,	/* Request GRO aggregation */
+-			 accept_udp_l4:1,
+-			 accept_udp_fraglist:1;
++
+ 	/*
+ 	 * Following member retains the information to create a UDP header
+ 	 * when the socket is uncorked.
+@@ -62,12 +67,6 @@ struct udp_sock {
+ 	 */
+ 	__u16		 pcslen;
+ 	__u16		 pcrlen;
+-/* indicator bits used by pcflag: */
+-#define UDPLITE_BIT      0x1  		/* set by udplite proto init function */
+-#define UDPLITE_SEND_CC  0x2  		/* set via udplite setsockopt         */
+-#define UDPLITE_RECV_CC  0x4		/* set via udplite setsocktopt        */
+-	__u8		 pcflag;        /* marks socket as UDP-Lite if > 0    */
+-	__u8		 unused[3];
+ 	/*
+ 	 * For encapsulation sockets.
+ 	 */
+@@ -95,28 +94,39 @@ struct udp_sock {
+ 	int		forward_threshold;
+ };
+ 
++#define udp_test_bit(nr, sk)			\
++	test_bit(UDP_FLAGS_##nr, &udp_sk(sk)->udp_flags)
++#define udp_set_bit(nr, sk)			\
++	set_bit(UDP_FLAGS_##nr, &udp_sk(sk)->udp_flags)
++#define udp_test_and_set_bit(nr, sk)		\
++	test_and_set_bit(UDP_FLAGS_##nr, &udp_sk(sk)->udp_flags)
++#define udp_clear_bit(nr, sk)			\
++	clear_bit(UDP_FLAGS_##nr, &udp_sk(sk)->udp_flags)
++#define udp_assign_bit(nr, sk, val)		\
++	assign_bit(UDP_FLAGS_##nr, &udp_sk(sk)->udp_flags, val)
++
+ #define UDP_MAX_SEGMENTS	(1 << 6UL)
+ 
+ #define udp_sk(ptr) container_of_const(ptr, struct udp_sock, inet.sk)
+ 
+ static inline void udp_set_no_check6_tx(struct sock *sk, bool val)
+ {
+-	udp_sk(sk)->no_check6_tx = val;
++	udp_assign_bit(NO_CHECK6_TX, sk, val);
+ }
+ 
+ static inline void udp_set_no_check6_rx(struct sock *sk, bool val)
+ {
+-	udp_sk(sk)->no_check6_rx = val;
++	udp_assign_bit(NO_CHECK6_RX, sk, val);
+ }
+ 
+-static inline bool udp_get_no_check6_tx(struct sock *sk)
++static inline bool udp_get_no_check6_tx(const struct sock *sk)
+ {
+-	return udp_sk(sk)->no_check6_tx;
++	return udp_test_bit(NO_CHECK6_TX, sk);
+ }
+ 
+-static inline bool udp_get_no_check6_rx(struct sock *sk)
++static inline bool udp_get_no_check6_rx(const struct sock *sk)
+ {
+-	return udp_sk(sk)->no_check6_rx;
++	return udp_test_bit(NO_CHECK6_RX, sk);
+ }
+ 
+ static inline void udp_cmsg_recv(struct msghdr *msg, struct sock *sk,
+@@ -135,10 +145,12 @@ static inline bool udp_unexpected_gso(struct sock *sk, struct sk_buff *skb)
+ 	if (!skb_is_gso(skb))
+ 		return false;
+ 
+-	if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4 && !udp_sk(sk)->accept_udp_l4)
++	if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4 &&
++	    !udp_test_bit(ACCEPT_L4, sk))
+ 		return true;
+ 
+-	if (skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST && !udp_sk(sk)->accept_udp_fraglist)
++	if (skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST &&
++	    !udp_test_bit(ACCEPT_FRAGLIST, sk))
+ 		return true;
+ 
+ 	return false;
+@@ -146,8 +158,8 @@ static inline bool udp_unexpected_gso(struct sock *sk, struct sk_buff *skb)
+ 
+ static inline void udp_allow_gso(struct sock *sk)
+ {
+-	udp_sk(sk)->accept_udp_l4 = 1;
+-	udp_sk(sk)->accept_udp_fraglist = 1;
++	udp_set_bit(ACCEPT_L4, sk);
++	udp_set_bit(ACCEPT_FRAGLIST, sk);
+ }
+ 
+ #define udp_portaddr_for_each_entry(__sk, list) \
+diff --git a/include/linux/verification.h b/include/linux/verification.h
+index f34e50ebcf60a..cb2d47f280910 100644
+--- a/include/linux/verification.h
++++ b/include/linux/verification.h
+@@ -8,6 +8,7 @@
+ #ifndef _LINUX_VERIFICATION_H
+ #define _LINUX_VERIFICATION_H
+ 
++#include <linux/errno.h>
+ #include <linux/types.h>
+ 
+ /*
+diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
+index 3ff822ebb3a47..1788aeedecf5a 100644
+--- a/include/net/bluetooth/hci.h
++++ b/include/net/bluetooth/hci.h
+@@ -1,6 +1,7 @@
+ /*
+    BlueZ - Bluetooth protocol stack for Linux
+    Copyright (C) 2000-2001 Qualcomm Incorporated
++   Copyright 2023 NXP
+ 
+    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
+ 
+@@ -672,6 +673,8 @@ enum {
+ #define HCI_TX_POWER_INVALID	127
+ #define HCI_RSSI_INVALID	127
+ 
++#define HCI_SYNC_HANDLE_INVALID	0xffff
++
+ #define HCI_ROLE_MASTER		0x00
+ #define HCI_ROLE_SLAVE		0x01
+ 
+diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
+index abb7cb5db9457..e159450d3f2c1 100644
+--- a/include/net/bluetooth/hci_core.h
++++ b/include/net/bluetooth/hci_core.h
+@@ -350,6 +350,8 @@ struct hci_dev {
+ 	struct list_head list;
+ 	struct mutex	lock;
+ 
++	struct ida	unset_handle_ida;
++
+ 	const char	*name;
+ 	unsigned long	flags;
+ 	__u16		id;
+@@ -978,6 +980,8 @@ enum {
+ 	HCI_CONN_CREATE_CIS,
+ 	HCI_CONN_BIG_SYNC,
+ 	HCI_CONN_BIG_SYNC_FAILED,
++	HCI_CONN_PA_SYNC,
++	HCI_CONN_PA_SYNC_FAILED,
+ };
+ 
+ static inline bool hci_conn_ssp_enabled(struct hci_conn *conn)
+@@ -1301,12 +1305,58 @@ static inline struct hci_conn *hci_conn_hash_lookup_big_any_dst(struct hci_dev *
+ 		if (c->type != ISO_LINK)
+ 			continue;
+ 
+-		if (handle == c->iso_qos.bcast.big) {
++		if (handle != BT_ISO_QOS_BIG_UNSET && handle == c->iso_qos.bcast.big) {
++			rcu_read_unlock();
++			return c;
++		}
++	}
++
++	rcu_read_unlock();
++
++	return NULL;
++}
++
++static inline struct hci_conn *
++hci_conn_hash_lookup_pa_sync_big_handle(struct hci_dev *hdev, __u8 big)
++{
++	struct hci_conn_hash *h = &hdev->conn_hash;
++	struct hci_conn  *c;
++
++	rcu_read_lock();
++
++	list_for_each_entry_rcu(c, &h->list, list) {
++		if (c->type != ISO_LINK ||
++			!test_bit(HCI_CONN_PA_SYNC, &c->flags))
++			continue;
++
++		if (c->iso_qos.bcast.big == big) {
+ 			rcu_read_unlock();
+ 			return c;
+ 		}
+ 	}
++	rcu_read_unlock();
++
++	return NULL;
++}
++
++static inline struct hci_conn *
++hci_conn_hash_lookup_pa_sync_handle(struct hci_dev *hdev, __u16 sync_handle)
++{
++	struct hci_conn_hash *h = &hdev->conn_hash;
++	struct hci_conn  *c;
++
++	rcu_read_lock();
++
++	list_for_each_entry_rcu(c, &h->list, list) {
++		if (c->type != ISO_LINK ||
++			!test_bit(HCI_CONN_PA_SYNC, &c->flags))
++			continue;
+ 
++		if (c->sync_handle == sync_handle) {
++			rcu_read_unlock();
++			return c;
++		}
++	}
+ 	rcu_read_unlock();
+ 
+ 	return NULL;
+@@ -1381,7 +1431,9 @@ int hci_le_create_cis_pending(struct hci_dev *hdev);
+ int hci_conn_check_create_cis(struct hci_conn *conn);
+ 
+ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
+-			      u8 role);
++			      u8 role, u16 handle);
++struct hci_conn *hci_conn_add_unset(struct hci_dev *hdev, int type,
++				    bdaddr_t *dst, u8 role);
+ void hci_conn_del(struct hci_conn *conn);
+ void hci_conn_hash_flush(struct hci_dev *hdev);
+ void hci_conn_check_pending(struct hci_dev *hdev);
+@@ -1415,7 +1467,8 @@ struct hci_conn *hci_connect_bis(struct hci_dev *hdev, bdaddr_t *dst,
+ 				 __u8 data_len, __u8 *data);
+ int hci_pa_create_sync(struct hci_dev *hdev, bdaddr_t *dst, __u8 dst_type,
+ 		       __u8 sid, struct bt_iso_qos *qos);
+-int hci_le_big_create_sync(struct hci_dev *hdev, struct bt_iso_qos *qos,
++int hci_le_big_create_sync(struct hci_dev *hdev, struct hci_conn *hcon,
++			   struct bt_iso_qos *qos,
+ 			   __u16 sync_handle, __u8 num_bis, __u8 bis[]);
+ int hci_conn_check_link_mode(struct hci_conn *conn);
+ int hci_conn_check_secure(struct hci_conn *conn, __u8 sec_level);
+diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
+index 641c6edc9b81d..d58d6d37a4479 100644
+--- a/include/net/cfg80211.h
++++ b/include/net/cfg80211.h
+@@ -5827,6 +5827,16 @@ void wiphy_work_queue(struct wiphy *wiphy, struct wiphy_work *work);
+  */
+ void wiphy_work_cancel(struct wiphy *wiphy, struct wiphy_work *work);
+ 
++/**
++ * wiphy_work_flush - flush previously queued work
++ * @wiphy: the wiphy, for debug purposes
++ * @work: the work to flush, this can be %NULL to flush all work
++ *
++ * Flush the work (i.e. run it if pending). This must be called
++ * under the wiphy mutex acquired by wiphy_lock().
++ */
++void wiphy_work_flush(struct wiphy *wiphy, struct wiphy_work *work);
++
+ struct wiphy_delayed_work {
+ 	struct wiphy_work work;
+ 	struct wiphy *wiphy;
+@@ -5870,6 +5880,17 @@ void wiphy_delayed_work_queue(struct wiphy *wiphy,
+ void wiphy_delayed_work_cancel(struct wiphy *wiphy,
+ 			       struct wiphy_delayed_work *dwork);
+ 
++/**
++ * wiphy_delayed_work_flush - flush previously queued delayed work
++ * @wiphy: the wiphy, for debug purposes
++ * @work: the work to flush
++ *
++ * Flush the work (i.e. run it if pending). This must be called
++ * under the wiphy mutex acquired by wiphy_lock().
++ */
++void wiphy_delayed_work_flush(struct wiphy *wiphy,
++			      struct wiphy_delayed_work *dwork);
++
+ /**
+  * struct wireless_dev - wireless device state
+  *
+diff --git a/include/net/flow.h b/include/net/flow.h
+index 7f0adda3bf2fe..335bbc52171c1 100644
+--- a/include/net/flow.h
++++ b/include/net/flow.h
+@@ -40,8 +40,8 @@ struct flowi_common {
+ #define FLOWI_FLAG_KNOWN_NH		0x02
+ 	__u32	flowic_secid;
+ 	kuid_t  flowic_uid;
+-	struct flowi_tunnel flowic_tun_key;
+ 	__u32		flowic_multipath_hash;
++	struct flowi_tunnel flowic_tun_key;
+ };
+ 
+ union flowi_uli {
+diff --git a/include/net/netfilter/nf_conntrack_act_ct.h b/include/net/netfilter/nf_conntrack_act_ct.h
+index 078d3c52c03f9..e5f2f0b73a9a0 100644
+--- a/include/net/netfilter/nf_conntrack_act_ct.h
++++ b/include/net/netfilter/nf_conntrack_act_ct.h
+@@ -20,7 +20,22 @@ static inline struct nf_conn_act_ct_ext *nf_conn_act_ct_ext_find(const struct nf
+ #endif
+ }
+ 
+-static inline struct nf_conn_act_ct_ext *nf_conn_act_ct_ext_add(struct nf_conn *ct)
++static inline void nf_conn_act_ct_ext_fill(struct sk_buff *skb, struct nf_conn *ct,
++					   enum ip_conntrack_info ctinfo)
++{
++#if IS_ENABLED(CONFIG_NET_ACT_CT)
++	struct nf_conn_act_ct_ext *act_ct_ext;
++
++	act_ct_ext = nf_conn_act_ct_ext_find(ct);
++	if (dev_net(skb->dev) == &init_net && act_ct_ext)
++		act_ct_ext->ifindex[CTINFO2DIR(ctinfo)] = skb->dev->ifindex;
++#endif
++}
++
++static inline struct
++nf_conn_act_ct_ext *nf_conn_act_ct_ext_add(struct sk_buff *skb,
++					   struct nf_conn *ct,
++					   enum ip_conntrack_info ctinfo)
+ {
+ #if IS_ENABLED(CONFIG_NET_ACT_CT)
+ 	struct nf_conn_act_ct_ext *act_ct = nf_ct_ext_find(ct, NF_CT_EXT_ACT_CT);
+@@ -29,22 +44,11 @@ static inline struct nf_conn_act_ct_ext *nf_conn_act_ct_ext_add(struct nf_conn *
+ 		return act_ct;
+ 
+ 	act_ct = nf_ct_ext_add(ct, NF_CT_EXT_ACT_CT, GFP_ATOMIC);
++	nf_conn_act_ct_ext_fill(skb, ct, ctinfo);
+ 	return act_ct;
+ #else
+ 	return NULL;
+ #endif
+ }
+ 
+-static inline void nf_conn_act_ct_ext_fill(struct sk_buff *skb, struct nf_conn *ct,
+-					   enum ip_conntrack_info ctinfo)
+-{
+-#if IS_ENABLED(CONFIG_NET_ACT_CT)
+-	struct nf_conn_act_ct_ext *act_ct_ext;
+-
+-	act_ct_ext = nf_conn_act_ct_ext_find(ct);
+-	if (dev_net(skb->dev) == &init_net && act_ct_ext)
+-		act_ct_ext->ifindex[CTINFO2DIR(ctinfo)] = skb->dev->ifindex;
+-#endif
+-}
+-
+ #endif /* _NF_CONNTRACK_ACT_CT_H */
+diff --git a/include/net/tcp.h b/include/net/tcp.h
+index 7f684806c2912..b8983d8464ea4 100644
+--- a/include/net/tcp.h
++++ b/include/net/tcp.h
+@@ -805,7 +805,7 @@ static inline u32 tcp_time_stamp(const struct tcp_sock *tp)
+ }
+ 
+ /* Convert a nsec timestamp into TCP TSval timestamp (ms based currently) */
+-static inline u32 tcp_ns_to_ts(u64 ns)
++static inline u64 tcp_ns_to_ts(u64 ns)
+ {
+ 	return div_u64(ns, NSEC_PER_SEC / TCP_TS_HZ);
+ }
+diff --git a/include/net/udp_tunnel.h b/include/net/udp_tunnel.h
+index 0ca9b7a11baf5..29251c3519cf0 100644
+--- a/include/net/udp_tunnel.h
++++ b/include/net/udp_tunnel.h
+@@ -174,16 +174,13 @@ static inline int udp_tunnel_handle_offloads(struct sk_buff *skb, bool udp_csum)
+ }
+ #endif
+ 
+-static inline void udp_tunnel_encap_enable(struct socket *sock)
++static inline void udp_tunnel_encap_enable(struct sock *sk)
+ {
+-	struct udp_sock *up = udp_sk(sock->sk);
+-
+-	if (up->encap_enabled)
++	if (udp_test_and_set_bit(ENCAP_ENABLED, sk))
+ 		return;
+ 
+-	up->encap_enabled = 1;
+ #if IS_ENABLED(CONFIG_IPV6)
+-	if (sock->sk->sk_family == PF_INET6)
++	if (READ_ONCE(sk->sk_family) == PF_INET6)
+ 		ipv6_stub->udpv6_encap_enable();
+ #endif
+ 	udp_encap_enable();
+diff --git a/include/net/udplite.h b/include/net/udplite.h
+index 299c14ce2bb94..dd60b51364837 100644
+--- a/include/net/udplite.h
++++ b/include/net/udplite.h
+@@ -66,14 +66,18 @@ static inline int udplite_checksum_init(struct sk_buff *skb, struct udphdr *uh)
+ /* Fast-path computation of checksum. Socket may not be locked. */
+ static inline __wsum udplite_csum(struct sk_buff *skb)
+ {
+-	const struct udp_sock *up = udp_sk(skb->sk);
+ 	const int off = skb_transport_offset(skb);
++	const struct sock *sk = skb->sk;
+ 	int len = skb->len - off;
+ 
+-	if ((up->pcflag & UDPLITE_SEND_CC) && up->pcslen < len) {
+-		if (0 < up->pcslen)
+-			len = up->pcslen;
+-		udp_hdr(skb)->len = htons(up->pcslen);
++	if (udp_test_bit(UDPLITE_SEND_CC, sk)) {
++		u16 pcslen = READ_ONCE(udp_sk(sk)->pcslen);
++
++		if (pcslen < len) {
++			if (pcslen > 0)
++				len = pcslen;
++			udp_hdr(skb)->len = htons(pcslen);
++		}
+ 	}
+ 	skb->ip_summed = CHECKSUM_NONE;     /* no HW support for checksumming */
+ 
+diff --git a/include/soc/tegra/bpmp.h b/include/soc/tegra/bpmp.h
+index 5842e38bb2880..f5e4ac5b8cce8 100644
+--- a/include/soc/tegra/bpmp.h
++++ b/include/soc/tegra/bpmp.h
+@@ -102,8 +102,12 @@ struct tegra_bpmp {
+ #ifdef CONFIG_DEBUG_FS
+ 	struct dentry *debugfs_mirror;
+ #endif
++
++	bool suspended;
+ };
+ 
++#define TEGRA_BPMP_MESSAGE_RESET BIT(0)
++
+ struct tegra_bpmp_message {
+ 	unsigned int mrq;
+ 
+@@ -117,6 +121,8 @@ struct tegra_bpmp_message {
+ 		size_t size;
+ 		int ret;
+ 	} rx;
++
++	unsigned long flags;
+ };
+ 
+ #if IS_ENABLED(CONFIG_TEGRA_BPMP)
+diff --git a/io_uring/kbuf.c b/io_uring/kbuf.c
+index 7ca6f4f7819ce..c4ec71801d4ad 100644
+--- a/io_uring/kbuf.c
++++ b/io_uring/kbuf.c
+@@ -19,12 +19,15 @@
+ 
+ #define BGID_ARRAY	64
+ 
++/* BIDs are addressed by a 16-bit field in a CQE */
++#define MAX_BIDS_PER_BGID (1 << 16)
++
+ struct io_provide_buf {
+ 	struct file			*file;
+ 	__u64				addr;
+ 	__u32				len;
+ 	__u32				bgid;
+-	__u16				nbufs;
++	__u32				nbufs;
+ 	__u16				bid;
+ };
+ 
+@@ -293,7 +296,7 @@ int io_remove_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+ 		return -EINVAL;
+ 
+ 	tmp = READ_ONCE(sqe->fd);
+-	if (!tmp || tmp > USHRT_MAX)
++	if (!tmp || tmp > MAX_BIDS_PER_BGID)
+ 		return -EINVAL;
+ 
+ 	memset(p, 0, sizeof(*p));
+@@ -336,7 +339,7 @@ int io_provide_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe
+ 		return -EINVAL;
+ 
+ 	tmp = READ_ONCE(sqe->fd);
+-	if (!tmp || tmp > USHRT_MAX)
++	if (!tmp || tmp > MAX_BIDS_PER_BGID)
+ 		return -E2BIG;
+ 	p->nbufs = tmp;
+ 	p->addr = READ_ONCE(sqe->addr);
+@@ -356,7 +359,7 @@ int io_provide_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe
+ 	tmp = READ_ONCE(sqe->off);
+ 	if (tmp > USHRT_MAX)
+ 		return -E2BIG;
+-	if (tmp + p->nbufs >= USHRT_MAX)
++	if (tmp + p->nbufs > MAX_BIDS_PER_BGID)
+ 		return -EINVAL;
+ 	p->bid = tmp;
+ 	return 0;
+diff --git a/io_uring/net.c b/io_uring/net.c
+index 47ecb070194ba..29cd8038777b6 100644
+--- a/io_uring/net.c
++++ b/io_uring/net.c
+@@ -1461,16 +1461,6 @@ int io_connect(struct io_kiocb *req, unsigned int issue_flags)
+ 	int ret;
+ 	bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
+ 
+-	if (connect->in_progress) {
+-		struct socket *socket;
+-
+-		ret = -ENOTSOCK;
+-		socket = sock_from_file(req->file);
+-		if (socket)
+-			ret = sock_error(socket->sk);
+-		goto out;
+-	}
+-
+ 	if (req_has_async_data(req)) {
+ 		io = req->async_data;
+ 	} else {
+@@ -1490,9 +1480,7 @@ int io_connect(struct io_kiocb *req, unsigned int issue_flags)
+ 	    && force_nonblock) {
+ 		if (ret == -EINPROGRESS) {
+ 			connect->in_progress = true;
+-			return -EAGAIN;
+-		}
+-		if (ret == -ECONNABORTED) {
++		} else if (ret == -ECONNABORTED) {
+ 			if (connect->seen_econnaborted)
+ 				goto out;
+ 			connect->seen_econnaborted = true;
+@@ -1506,6 +1494,16 @@ int io_connect(struct io_kiocb *req, unsigned int issue_flags)
+ 		memcpy(req->async_data, &__io, sizeof(__io));
+ 		return -EAGAIN;
+ 	}
++	if (connect->in_progress) {
++		/*
++		 * At least bluetooth will return -EBADFD on a re-connect
++		 * attempt, and it's (supposedly) also valid to get -EISCONN
++		 * which means the previous result is good. For both of these,
++		 * grab the sock_error() and use that for the completion.
++		 */
++		if (ret == -EBADFD || ret == -EISCONN)
++			ret = sock_error(sock_from_file(req->file)->sk);
++	}
+ 	if (ret == -ERESTARTSYS)
+ 		ret = -EINTR;
+ out:
+diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
+index 56d3da7d0bc66..e209e748a8e05 100644
+--- a/kernel/bpf/hashtab.c
++++ b/kernel/bpf/hashtab.c
+@@ -155,13 +155,15 @@ static inline int htab_lock_bucket(const struct bpf_htab *htab,
+ 	hash = hash & min_t(u32, HASHTAB_MAP_LOCK_MASK, htab->n_buckets - 1);
+ 
+ 	preempt_disable();
++	local_irq_save(flags);
+ 	if (unlikely(__this_cpu_inc_return(*(htab->map_locked[hash])) != 1)) {
+ 		__this_cpu_dec(*(htab->map_locked[hash]));
++		local_irq_restore(flags);
+ 		preempt_enable();
+ 		return -EBUSY;
+ 	}
+ 
+-	raw_spin_lock_irqsave(&b->raw_lock, flags);
++	raw_spin_lock(&b->raw_lock);
+ 	*pflags = flags;
+ 
+ 	return 0;
+@@ -172,8 +174,9 @@ static inline void htab_unlock_bucket(const struct bpf_htab *htab,
+ 				      unsigned long flags)
+ {
+ 	hash = hash & min_t(u32, HASHTAB_MAP_LOCK_MASK, htab->n_buckets - 1);
+-	raw_spin_unlock_irqrestore(&b->raw_lock, flags);
++	raw_spin_unlock(&b->raw_lock);
+ 	__this_cpu_dec(*(htab->map_locked[hash]));
++	local_irq_restore(flags);
+ 	preempt_enable();
+ }
+ 
+diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
+index 8812397a5cd96..add185dfa5c65 100644
+--- a/kernel/bpf/helpers.c
++++ b/kernel/bpf/helpers.c
+@@ -1174,13 +1174,6 @@ BPF_CALL_3(bpf_timer_init, struct bpf_timer_kern *, timer, struct bpf_map *, map
+ 		ret = -EBUSY;
+ 		goto out;
+ 	}
+-	if (!atomic64_read(&map->usercnt)) {
+-		/* maps with timers must be either held by user space
+-		 * or pinned in bpffs.
+-		 */
+-		ret = -EPERM;
+-		goto out;
+-	}
+ 	/* allocate hrtimer via map_kmalloc to use memcg accounting */
+ 	t = bpf_map_kmalloc_node(map, sizeof(*t), GFP_ATOMIC, map->numa_node);
+ 	if (!t) {
+@@ -1193,7 +1186,21 @@ BPF_CALL_3(bpf_timer_init, struct bpf_timer_kern *, timer, struct bpf_map *, map
+ 	rcu_assign_pointer(t->callback_fn, NULL);
+ 	hrtimer_init(&t->timer, clockid, HRTIMER_MODE_REL_SOFT);
+ 	t->timer.function = bpf_timer_cb;
+-	timer->timer = t;
++	WRITE_ONCE(timer->timer, t);
++	/* Guarantee the order between timer->timer and map->usercnt. So
++	 * when there are concurrent uref release and bpf timer init, either
++	 * bpf_timer_cancel_and_free() called by uref release reads a no-NULL
++	 * timer or atomic64_read() below returns a zero usercnt.
++	 */
++	smp_mb();
++	if (!atomic64_read(&map->usercnt)) {
++		/* maps with timers must be either held by user space
++		 * or pinned in bpffs.
++		 */
++		WRITE_ONCE(timer->timer, NULL);
++		kfree(t);
++		ret = -EPERM;
++	}
+ out:
+ 	__bpf_spin_unlock_irqrestore(&timer->lock);
+ 	return ret;
+@@ -1368,7 +1375,7 @@ void bpf_timer_cancel_and_free(void *val)
+ 	/* The subsequent bpf_timer_start/cancel() helpers won't be able to use
+ 	 * this timer, since it won't be initialized.
+ 	 */
+-	timer->timer = NULL;
++	WRITE_ONCE(timer->timer, NULL);
+ out:
+ 	__bpf_spin_unlock_irqrestore(&timer->lock);
+ 	if (!t)
+@@ -2167,7 +2174,12 @@ __bpf_kfunc struct cgroup *bpf_cgroup_from_id(u64 cgid)
+ __bpf_kfunc long bpf_task_under_cgroup(struct task_struct *task,
+ 				       struct cgroup *ancestor)
+ {
+-	return task_under_cgroup_hierarchy(task, ancestor);
++	long ret;
++
++	rcu_read_lock();
++	ret = task_under_cgroup_hierarchy(task, ancestor);
++	rcu_read_unlock();
++	return ret;
+ }
+ #endif /* CONFIG_CGROUPS */
+ 
+diff --git a/kernel/bpf/trampoline.c b/kernel/bpf/trampoline.c
+index 53ff50cac61ea..e97aeda3a86b5 100644
+--- a/kernel/bpf/trampoline.c
++++ b/kernel/bpf/trampoline.c
+@@ -415,8 +415,8 @@ static int bpf_trampoline_update(struct bpf_trampoline *tr, bool lock_direct_mut
+ 		goto out;
+ 	}
+ 
+-	/* clear all bits except SHARE_IPMODIFY */
+-	tr->flags &= BPF_TRAMP_F_SHARE_IPMODIFY;
++	/* clear all bits except SHARE_IPMODIFY and TAIL_CALL_CTX */
++	tr->flags &= (BPF_TRAMP_F_SHARE_IPMODIFY | BPF_TRAMP_F_TAIL_CALL_CTX);
+ 
+ 	if (tlinks[BPF_TRAMP_FEXIT].nr_links ||
+ 	    tlinks[BPF_TRAMP_MODIFY_RETURN].nr_links) {
+diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
+index 104681258d24f..e7e2687c35884 100644
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -11029,6 +11029,10 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_
+ 			break;
+ 		}
+ 		case KF_ARG_PTR_TO_CALLBACK:
++			if (reg->type != PTR_TO_FUNC) {
++				verbose(env, "arg%d expected pointer to func\n", i);
++				return -EINVAL;
++			}
+ 			meta->subprogno = reg->subprogno;
+ 			break;
+ 		case KF_ARG_PTR_TO_REFCOUNTED_KPTR:
+@@ -19384,6 +19388,9 @@ static int check_attach_btf_id(struct bpf_verifier_env *env)
+ 	if (!tr)
+ 		return -ENOMEM;
+ 
++	if (tgt_prog && tgt_prog->aux->tail_call_reachable)
++		tr->flags = BPF_TRAMP_F_TAIL_CALL_CTX;
++
+ 	prog->aux->dst_trampoline = tr;
+ 	return 0;
+ }
+diff --git a/kernel/cpu.c b/kernel/cpu.c
+index 9628ae3c2825b..26119d2154102 100644
+--- a/kernel/cpu.c
++++ b/kernel/cpu.c
+@@ -625,9 +625,31 @@ static int __init smt_cmdline_disable(char *str)
+ }
+ early_param("nosmt", smt_cmdline_disable);
+ 
+-static inline bool cpu_smt_allowed(unsigned int cpu)
++/*
++ * For Archicture supporting partial SMT states check if the thread is allowed.
++ * Otherwise this has already been checked through cpu_smt_max_threads when
++ * setting the SMT level.
++ */
++static inline bool cpu_smt_thread_allowed(unsigned int cpu)
++{
++#ifdef CONFIG_SMT_NUM_THREADS_DYNAMIC
++	return topology_smt_thread_allowed(cpu);
++#else
++	return true;
++#endif
++}
++
++static inline bool cpu_bootable(unsigned int cpu)
+ {
+-	if (cpu_smt_control == CPU_SMT_ENABLED)
++	if (cpu_smt_control == CPU_SMT_ENABLED && cpu_smt_thread_allowed(cpu))
++		return true;
++
++	/* All CPUs are bootable if controls are not configured */
++	if (cpu_smt_control == CPU_SMT_NOT_IMPLEMENTED)
++		return true;
++
++	/* All CPUs are bootable if CPU is not SMT capable */
++	if (cpu_smt_control == CPU_SMT_NOT_SUPPORTED)
+ 		return true;
+ 
+ 	if (topology_is_primary_thread(cpu))
+@@ -650,22 +672,8 @@ bool cpu_smt_possible(void)
+ }
+ EXPORT_SYMBOL_GPL(cpu_smt_possible);
+ 
+-static inline bool cpuhp_smt_aware(void)
+-{
+-	return topology_smt_supported();
+-}
+-
+-static inline const struct cpumask *cpuhp_get_primary_thread_mask(void)
+-{
+-	return cpu_primary_thread_mask;
+-}
+ #else
+-static inline bool cpu_smt_allowed(unsigned int cpu) { return true; }
+-static inline bool cpuhp_smt_aware(void) { return false; }
+-static inline const struct cpumask *cpuhp_get_primary_thread_mask(void)
+-{
+-	return cpu_present_mask;
+-}
++static inline bool cpu_bootable(unsigned int cpu) { return true; }
+ #endif
+ 
+ static inline enum cpuhp_state
+@@ -768,10 +776,10 @@ static int bringup_wait_for_ap_online(unsigned int cpu)
+ 	 * SMT soft disabling on X86 requires to bring the CPU out of the
+ 	 * BIOS 'wait for SIPI' state in order to set the CR4.MCE bit.  The
+ 	 * CPU marked itself as booted_once in notify_cpu_starting() so the
+-	 * cpu_smt_allowed() check will now return false if this is not the
++	 * cpu_bootable() check will now return false if this is not the
+ 	 * primary sibling.
+ 	 */
+-	if (!cpu_smt_allowed(cpu))
++	if (!cpu_bootable(cpu))
+ 		return -ECANCELED;
+ 	return 0;
+ }
+@@ -1721,7 +1729,7 @@ static int cpu_up(unsigned int cpu, enum cpuhp_state target)
+ 		err = -EBUSY;
+ 		goto out;
+ 	}
+-	if (!cpu_smt_allowed(cpu)) {
++	if (!cpu_bootable(cpu)) {
+ 		err = -EPERM;
+ 		goto out;
+ 	}
+@@ -1815,6 +1823,16 @@ static int __init parallel_bringup_parse_param(char *arg)
+ }
+ early_param("cpuhp.parallel", parallel_bringup_parse_param);
+ 
++static inline bool cpuhp_smt_aware(void)
++{
++	return topology_smt_supported();
++}
++
++static inline const struct cpumask *cpuhp_get_primary_thread_mask(void)
++{
++	return cpu_primary_thread_mask;
++}
++
+ /*
+  * On architectures which have enabled parallel bringup this invokes all BP
+  * prepare states for each of the to be onlined APs first. The last state
+@@ -2648,6 +2666,12 @@ int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval)
+ 	for_each_online_cpu(cpu) {
+ 		if (topology_is_primary_thread(cpu))
+ 			continue;
++		/*
++		 * Disable can be called with CPU_SMT_ENABLED when changing
++		 * from a higher to lower number of SMT threads per core.
++		 */
++		if (ctrlval == CPU_SMT_ENABLED && cpu_smt_thread_allowed(cpu))
++			continue;
+ 		ret = cpu_down_maps_locked(cpu, CPUHP_OFFLINE);
+ 		if (ret)
+ 			break;
+@@ -2682,6 +2706,8 @@ int cpuhp_smt_enable(void)
+ 		/* Skip online CPUs and CPUs on offline nodes */
+ 		if (cpu_online(cpu) || !node_online(cpu_to_node(cpu)))
+ 			continue;
++		if (!cpu_smt_thread_allowed(cpu))
++			continue;
+ 		ret = _cpu_up(cpu, 0, CPUHP_ONLINE);
+ 		if (ret)
+ 			break;
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index f2f4d2b3beee0..e66398c9ffe05 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -375,6 +375,7 @@ enum event_type_t {
+ 	EVENT_TIME = 0x4,
+ 	/* see ctx_resched() for details */
+ 	EVENT_CPU = 0x8,
++	EVENT_CGROUP = 0x10,
+ 	EVENT_ALL = EVENT_FLEXIBLE | EVENT_PINNED,
+ };
+ 
+@@ -684,20 +685,26 @@ do {									\
+ 	___p;								\
+ })
+ 
+-static void perf_ctx_disable(struct perf_event_context *ctx)
++static void perf_ctx_disable(struct perf_event_context *ctx, bool cgroup)
+ {
+ 	struct perf_event_pmu_context *pmu_ctx;
+ 
+-	list_for_each_entry(pmu_ctx, &ctx->pmu_ctx_list, pmu_ctx_entry)
++	list_for_each_entry(pmu_ctx, &ctx->pmu_ctx_list, pmu_ctx_entry) {
++		if (cgroup && !pmu_ctx->nr_cgroups)
++			continue;
+ 		perf_pmu_disable(pmu_ctx->pmu);
++	}
+ }
+ 
+-static void perf_ctx_enable(struct perf_event_context *ctx)
++static void perf_ctx_enable(struct perf_event_context *ctx, bool cgroup)
+ {
+ 	struct perf_event_pmu_context *pmu_ctx;
+ 
+-	list_for_each_entry(pmu_ctx, &ctx->pmu_ctx_list, pmu_ctx_entry)
++	list_for_each_entry(pmu_ctx, &ctx->pmu_ctx_list, pmu_ctx_entry) {
++		if (cgroup && !pmu_ctx->nr_cgroups)
++			continue;
+ 		perf_pmu_enable(pmu_ctx->pmu);
++	}
+ }
+ 
+ static void ctx_sched_out(struct perf_event_context *ctx, enum event_type_t event_type);
+@@ -856,9 +863,9 @@ static void perf_cgroup_switch(struct task_struct *task)
+ 		return;
+ 
+ 	perf_ctx_lock(cpuctx, cpuctx->task_ctx);
+-	perf_ctx_disable(&cpuctx->ctx);
++	perf_ctx_disable(&cpuctx->ctx, true);
+ 
+-	ctx_sched_out(&cpuctx->ctx, EVENT_ALL);
++	ctx_sched_out(&cpuctx->ctx, EVENT_ALL|EVENT_CGROUP);
+ 	/*
+ 	 * must not be done before ctxswout due
+ 	 * to update_cgrp_time_from_cpuctx() in
+@@ -870,9 +877,9 @@ static void perf_cgroup_switch(struct task_struct *task)
+ 	 * perf_cgroup_set_timestamp() in ctx_sched_in()
+ 	 * to not have to pass task around
+ 	 */
+-	ctx_sched_in(&cpuctx->ctx, EVENT_ALL);
++	ctx_sched_in(&cpuctx->ctx, EVENT_ALL|EVENT_CGROUP);
+ 
+-	perf_ctx_enable(&cpuctx->ctx);
++	perf_ctx_enable(&cpuctx->ctx, true);
+ 	perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
+ }
+ 
+@@ -965,6 +972,8 @@ perf_cgroup_event_enable(struct perf_event *event, struct perf_event_context *ct
+ 	if (!is_cgroup_event(event))
+ 		return;
+ 
++	event->pmu_ctx->nr_cgroups++;
++
+ 	/*
+ 	 * Because cgroup events are always per-cpu events,
+ 	 * @ctx == &cpuctx->ctx.
+@@ -985,6 +994,8 @@ perf_cgroup_event_disable(struct perf_event *event, struct perf_event_context *c
+ 	if (!is_cgroup_event(event))
+ 		return;
+ 
++	event->pmu_ctx->nr_cgroups--;
++
+ 	/*
+ 	 * Because cgroup events are always per-cpu events,
+ 	 * @ctx == &cpuctx->ctx.
+@@ -2679,9 +2690,9 @@ static void ctx_resched(struct perf_cpu_context *cpuctx,
+ 
+ 	event_type &= EVENT_ALL;
+ 
+-	perf_ctx_disable(&cpuctx->ctx);
++	perf_ctx_disable(&cpuctx->ctx, false);
+ 	if (task_ctx) {
+-		perf_ctx_disable(task_ctx);
++		perf_ctx_disable(task_ctx, false);
+ 		task_ctx_sched_out(task_ctx, event_type);
+ 	}
+ 
+@@ -2699,9 +2710,9 @@ static void ctx_resched(struct perf_cpu_context *cpuctx,
+ 
+ 	perf_event_sched_in(cpuctx, task_ctx);
+ 
+-	perf_ctx_enable(&cpuctx->ctx);
++	perf_ctx_enable(&cpuctx->ctx, false);
+ 	if (task_ctx)
+-		perf_ctx_enable(task_ctx);
++		perf_ctx_enable(task_ctx, false);
+ }
+ 
+ void perf_pmu_resched(struct pmu *pmu)
+@@ -3246,6 +3257,9 @@ ctx_sched_out(struct perf_event_context *ctx, enum event_type_t event_type)
+ 	struct perf_cpu_context *cpuctx = this_cpu_ptr(&perf_cpu_context);
+ 	struct perf_event_pmu_context *pmu_ctx;
+ 	int is_active = ctx->is_active;
++	bool cgroup = event_type & EVENT_CGROUP;
++
++	event_type &= ~EVENT_CGROUP;
+ 
+ 	lockdep_assert_held(&ctx->lock);
+ 
+@@ -3292,8 +3306,11 @@ ctx_sched_out(struct perf_event_context *ctx, enum event_type_t event_type)
+ 
+ 	is_active ^= ctx->is_active; /* changed bits */
+ 
+-	list_for_each_entry(pmu_ctx, &ctx->pmu_ctx_list, pmu_ctx_entry)
++	list_for_each_entry(pmu_ctx, &ctx->pmu_ctx_list, pmu_ctx_entry) {
++		if (cgroup && !pmu_ctx->nr_cgroups)
++			continue;
+ 		__pmu_ctx_sched_out(pmu_ctx, is_active);
++	}
+ }
+ 
+ /*
+@@ -3484,7 +3501,7 @@ perf_event_context_sched_out(struct task_struct *task, struct task_struct *next)
+ 		raw_spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING);
+ 		if (context_equiv(ctx, next_ctx)) {
+ 
+-			perf_ctx_disable(ctx);
++			perf_ctx_disable(ctx, false);
+ 
+ 			/* PMIs are disabled; ctx->nr_pending is stable. */
+ 			if (local_read(&ctx->nr_pending) ||
+@@ -3504,7 +3521,7 @@ perf_event_context_sched_out(struct task_struct *task, struct task_struct *next)
+ 			perf_ctx_sched_task_cb(ctx, false);
+ 			perf_event_swap_task_ctx_data(ctx, next_ctx);
+ 
+-			perf_ctx_enable(ctx);
++			perf_ctx_enable(ctx, false);
+ 
+ 			/*
+ 			 * RCU_INIT_POINTER here is safe because we've not
+@@ -3528,13 +3545,13 @@ unlock:
+ 
+ 	if (do_switch) {
+ 		raw_spin_lock(&ctx->lock);
+-		perf_ctx_disable(ctx);
++		perf_ctx_disable(ctx, false);
+ 
+ inside_switch:
+ 		perf_ctx_sched_task_cb(ctx, false);
+ 		task_ctx_sched_out(ctx, EVENT_ALL);
+ 
+-		perf_ctx_enable(ctx);
++		perf_ctx_enable(ctx, false);
+ 		raw_spin_unlock(&ctx->lock);
+ 	}
+ }
+@@ -3820,47 +3837,32 @@ static int merge_sched_in(struct perf_event *event, void *data)
+ 	return 0;
+ }
+ 
+-static void ctx_pinned_sched_in(struct perf_event_context *ctx, struct pmu *pmu)
++static void pmu_groups_sched_in(struct perf_event_context *ctx,
++				struct perf_event_groups *groups,
++				struct pmu *pmu)
+ {
+-	struct perf_event_pmu_context *pmu_ctx;
+ 	int can_add_hw = 1;
+-
+-	if (pmu) {
+-		visit_groups_merge(ctx, &ctx->pinned_groups,
+-				   smp_processor_id(), pmu,
+-				   merge_sched_in, &can_add_hw);
+-	} else {
+-		list_for_each_entry(pmu_ctx, &ctx->pmu_ctx_list, pmu_ctx_entry) {
+-			can_add_hw = 1;
+-			visit_groups_merge(ctx, &ctx->pinned_groups,
+-					   smp_processor_id(), pmu_ctx->pmu,
+-					   merge_sched_in, &can_add_hw);
+-		}
+-	}
++	visit_groups_merge(ctx, groups, smp_processor_id(), pmu,
++			   merge_sched_in, &can_add_hw);
+ }
+ 
+-static void ctx_flexible_sched_in(struct perf_event_context *ctx, struct pmu *pmu)
++static void ctx_groups_sched_in(struct perf_event_context *ctx,
++				struct perf_event_groups *groups,
++				bool cgroup)
+ {
+ 	struct perf_event_pmu_context *pmu_ctx;
+-	int can_add_hw = 1;
+ 
+-	if (pmu) {
+-		visit_groups_merge(ctx, &ctx->flexible_groups,
+-				   smp_processor_id(), pmu,
+-				   merge_sched_in, &can_add_hw);
+-	} else {
+-		list_for_each_entry(pmu_ctx, &ctx->pmu_ctx_list, pmu_ctx_entry) {
+-			can_add_hw = 1;
+-			visit_groups_merge(ctx, &ctx->flexible_groups,
+-					   smp_processor_id(), pmu_ctx->pmu,
+-					   merge_sched_in, &can_add_hw);
+-		}
++	list_for_each_entry(pmu_ctx, &ctx->pmu_ctx_list, pmu_ctx_entry) {
++		if (cgroup && !pmu_ctx->nr_cgroups)
++			continue;
++		pmu_groups_sched_in(ctx, groups, pmu_ctx->pmu);
+ 	}
+ }
+ 
+-static void __pmu_ctx_sched_in(struct perf_event_context *ctx, struct pmu *pmu)
++static void __pmu_ctx_sched_in(struct perf_event_context *ctx,
++			       struct pmu *pmu)
+ {
+-	ctx_flexible_sched_in(ctx, pmu);
++	pmu_groups_sched_in(ctx, &ctx->flexible_groups, pmu);
+ }
+ 
+ static void
+@@ -3868,6 +3870,9 @@ ctx_sched_in(struct perf_event_context *ctx, enum event_type_t event_type)
+ {
+ 	struct perf_cpu_context *cpuctx = this_cpu_ptr(&perf_cpu_context);
+ 	int is_active = ctx->is_active;
++	bool cgroup = event_type & EVENT_CGROUP;
++
++	event_type &= ~EVENT_CGROUP;
+ 
+ 	lockdep_assert_held(&ctx->lock);
+ 
+@@ -3900,11 +3905,11 @@ ctx_sched_in(struct perf_event_context *ctx, enum event_type_t event_type)
+ 	 * in order to give them the best chance of going on.
+ 	 */
+ 	if (is_active & EVENT_PINNED)
+-		ctx_pinned_sched_in(ctx, NULL);
++		ctx_groups_sched_in(ctx, &ctx->pinned_groups, cgroup);
+ 
+ 	/* Then walk through the lower prio flexible groups */
+ 	if (is_active & EVENT_FLEXIBLE)
+-		ctx_flexible_sched_in(ctx, NULL);
++		ctx_groups_sched_in(ctx, &ctx->flexible_groups, cgroup);
+ }
+ 
+ static void perf_event_context_sched_in(struct task_struct *task)
+@@ -3919,11 +3924,11 @@ static void perf_event_context_sched_in(struct task_struct *task)
+ 
+ 	if (cpuctx->task_ctx == ctx) {
+ 		perf_ctx_lock(cpuctx, ctx);
+-		perf_ctx_disable(ctx);
++		perf_ctx_disable(ctx, false);
+ 
+ 		perf_ctx_sched_task_cb(ctx, true);
+ 
+-		perf_ctx_enable(ctx);
++		perf_ctx_enable(ctx, false);
+ 		perf_ctx_unlock(cpuctx, ctx);
+ 		goto rcu_unlock;
+ 	}
+@@ -3936,7 +3941,7 @@ static void perf_event_context_sched_in(struct task_struct *task)
+ 	if (!ctx->nr_events)
+ 		goto unlock;
+ 
+-	perf_ctx_disable(ctx);
++	perf_ctx_disable(ctx, false);
+ 	/*
+ 	 * We want to keep the following priority order:
+ 	 * cpu pinned (that don't need to move), task pinned,
+@@ -3946,7 +3951,7 @@ static void perf_event_context_sched_in(struct task_struct *task)
+ 	 * events, no need to flip the cpuctx's events around.
+ 	 */
+ 	if (!RB_EMPTY_ROOT(&ctx->pinned_groups.tree)) {
+-		perf_ctx_disable(&cpuctx->ctx);
++		perf_ctx_disable(&cpuctx->ctx, false);
+ 		ctx_sched_out(&cpuctx->ctx, EVENT_FLEXIBLE);
+ 	}
+ 
+@@ -3955,9 +3960,9 @@ static void perf_event_context_sched_in(struct task_struct *task)
+ 	perf_ctx_sched_task_cb(cpuctx->task_ctx, true);
+ 
+ 	if (!RB_EMPTY_ROOT(&ctx->pinned_groups.tree))
+-		perf_ctx_enable(&cpuctx->ctx);
++		perf_ctx_enable(&cpuctx->ctx, false);
+ 
+-	perf_ctx_enable(ctx);
++	perf_ctx_enable(ctx, false);
+ 
+ unlock:
+ 	perf_ctx_unlock(cpuctx, ctx);
+diff --git a/kernel/futex/core.c b/kernel/futex/core.c
+index 514e4582b8634..d4141b0547187 100644
+--- a/kernel/futex/core.c
++++ b/kernel/futex/core.c
+@@ -248,7 +248,17 @@ int get_futex_key(u32 __user *uaddr, bool fshared, union futex_key *key,
+ 	 *        but access_ok() should be faster than find_vma()
+ 	 */
+ 	if (!fshared) {
+-		key->private.mm = mm;
++		/*
++		 * On no-MMU, shared futexes are treated as private, therefore
++		 * we must not include the current process in the key. Since
++		 * there is only one address space, the address is a unique key
++		 * on its own.
++		 */
++		if (IS_ENABLED(CONFIG_MMU))
++			key->private.mm = mm;
++		else
++			key->private.mm = NULL;
++
+ 		key->private.address = address;
+ 		return 0;
+ 	}
+diff --git a/kernel/irq/matrix.c b/kernel/irq/matrix.c
+index 1698e77645acf..75d0ae490e29c 100644
+--- a/kernel/irq/matrix.c
++++ b/kernel/irq/matrix.c
+@@ -466,16 +466,16 @@ unsigned int irq_matrix_reserved(struct irq_matrix *m)
+ }
+ 
+ /**
+- * irq_matrix_allocated - Get the number of allocated irqs on the local cpu
++ * irq_matrix_allocated - Get the number of allocated non-managed irqs on the local CPU
+  * @m:		Pointer to the matrix to search
+  *
+- * This returns number of allocated irqs
++ * This returns number of allocated non-managed interrupts.
+  */
+ unsigned int irq_matrix_allocated(struct irq_matrix *m)
+ {
+ 	struct cpumap *cm = this_cpu_ptr(m->maps);
+ 
+-	return cm->allocated;
++	return cm->allocated - cm->managed_allocated;
+ }
+ 
+ #ifdef CONFIG_GENERIC_IRQ_DEBUGFS
+diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c
+index 61328328c474c..ecbc9b6aba3a1 100644
+--- a/kernel/livepatch/core.c
++++ b/kernel/livepatch/core.c
+@@ -243,7 +243,7 @@ static int klp_resolve_symbols(Elf_Shdr *sechdrs, const char *strtab,
+ 		 * symbols are exported and normal relas can be used instead.
+ 		 */
+ 		if (!sec_vmlinux && sym_vmlinux) {
+-			pr_err("invalid access to vmlinux symbol '%s' from module-specific livepatch relocation section",
++			pr_err("invalid access to vmlinux symbol '%s' from module-specific livepatch relocation section\n",
+ 			       sym_name);
+ 			return -EINVAL;
+ 		}
+diff --git a/kernel/module/decompress.c b/kernel/module/decompress.c
+index 87440f714c0ca..474e68f0f0634 100644
+--- a/kernel/module/decompress.c
++++ b/kernel/module/decompress.c
+@@ -100,7 +100,7 @@ static ssize_t module_gzip_decompress(struct load_info *info,
+ 	s.next_in = buf + gzip_hdr_len;
+ 	s.avail_in = size - gzip_hdr_len;
+ 
+-	s.workspace = kmalloc(zlib_inflate_workspacesize(), GFP_KERNEL);
++	s.workspace = kvmalloc(zlib_inflate_workspacesize(), GFP_KERNEL);
+ 	if (!s.workspace)
+ 		return -ENOMEM;
+ 
+@@ -138,7 +138,7 @@ static ssize_t module_gzip_decompress(struct load_info *info,
+ out_inflate_end:
+ 	zlib_inflateEnd(&s);
+ out:
+-	kfree(s.workspace);
++	kvfree(s.workspace);
+ 	return retval;
+ }
+ #elif defined(CONFIG_MODULE_COMPRESS_XZ)
+@@ -241,7 +241,7 @@ static ssize_t module_zstd_decompress(struct load_info *info,
+ 	}
+ 
+ 	wksp_size = zstd_dstream_workspace_bound(header.windowSize);
+-	wksp = vmalloc(wksp_size);
++	wksp = kvmalloc(wksp_size, GFP_KERNEL);
+ 	if (!wksp) {
+ 		retval = -ENOMEM;
+ 		goto out;
+@@ -284,7 +284,7 @@ static ssize_t module_zstd_decompress(struct load_info *info,
+ 	retval = new_size;
+ 
+  out:
+-	vfree(wksp);
++	kvfree(wksp);
+ 	return retval;
+ }
+ #else
+diff --git a/kernel/padata.c b/kernel/padata.c
+index 222d60195de66..ff349e1084c1d 100644
+--- a/kernel/padata.c
++++ b/kernel/padata.c
+@@ -1102,12 +1102,16 @@ EXPORT_SYMBOL(padata_alloc_shell);
+  */
+ void padata_free_shell(struct padata_shell *ps)
+ {
++	struct parallel_data *pd;
++
+ 	if (!ps)
+ 		return;
+ 
+ 	mutex_lock(&ps->pinst->lock);
+ 	list_del(&ps->list);
+-	padata_free_pd(rcu_dereference_protected(ps->pd, 1));
++	pd = rcu_dereference_protected(ps->pd, 1);
++	if (refcount_dec_and_test(&pd->refcnt))
++		padata_free_pd(pd);
+ 	mutex_unlock(&ps->pinst->lock);
+ 
+ 	kfree(ps);
+diff --git a/kernel/rcu/srcutree.c b/kernel/rcu/srcutree.c
+index 20d7a238d675a..253ed509b6abb 100644
+--- a/kernel/rcu/srcutree.c
++++ b/kernel/rcu/srcutree.c
+@@ -1242,10 +1242,37 @@ static unsigned long srcu_gp_start_if_needed(struct srcu_struct *ssp,
+ 	spin_lock_irqsave_sdp_contention(sdp, &flags);
+ 	if (rhp)
+ 		rcu_segcblist_enqueue(&sdp->srcu_cblist, rhp);
++	/*
++	 * The snapshot for acceleration must be taken _before_ the read of the
++	 * current gp sequence used for advancing, otherwise advancing may fail
++	 * and acceleration may then fail too.
++	 *
++	 * This could happen if:
++	 *
++	 *  1) The RCU_WAIT_TAIL segment has callbacks (gp_num = X + 4) and the
++	 *     RCU_NEXT_READY_TAIL also has callbacks (gp_num = X + 8).
++	 *
++	 *  2) The grace period for RCU_WAIT_TAIL is seen as started but not
++	 *     completed so rcu_seq_current() returns X + SRCU_STATE_SCAN1.
++	 *
++	 *  3) This value is passed to rcu_segcblist_advance() which can't move
++	 *     any segment forward and fails.
++	 *
++	 *  4) srcu_gp_start_if_needed() still proceeds with callback acceleration.
++	 *     But then the call to rcu_seq_snap() observes the grace period for the
++	 *     RCU_WAIT_TAIL segment as completed and the subsequent one for the
++	 *     RCU_NEXT_READY_TAIL segment as started (ie: X + 4 + SRCU_STATE_SCAN1)
++	 *     so it returns a snapshot of the next grace period, which is X + 12.
++	 *
++	 *  5) The value of X + 12 is passed to rcu_segcblist_accelerate() but the
++	 *     freshly enqueued callback in RCU_NEXT_TAIL can't move to
++	 *     RCU_NEXT_READY_TAIL which already has callbacks for a previous grace
++	 *     period (gp_num = X + 8). So acceleration fails.
++	 */
++	s = rcu_seq_snap(&ssp->srcu_sup->srcu_gp_seq);
+ 	rcu_segcblist_advance(&sdp->srcu_cblist,
+ 			      rcu_seq_current(&ssp->srcu_sup->srcu_gp_seq));
+-	s = rcu_seq_snap(&ssp->srcu_sup->srcu_gp_seq);
+-	(void)rcu_segcblist_accelerate(&sdp->srcu_cblist, s);
++	WARN_ON_ONCE(!rcu_segcblist_accelerate(&sdp->srcu_cblist, s) && rhp);
+ 	if (ULONG_CMP_LT(sdp->srcu_gp_seq_needed, s)) {
+ 		sdp->srcu_gp_seq_needed = s;
+ 		needgp = true;
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index e8f73ff12126c..149fdb212e20f 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -2644,9 +2644,11 @@ static int migration_cpu_stop(void *data)
+ 		 * it.
+ 		 */
+ 		WARN_ON_ONCE(!pending->stop_pending);
++		preempt_disable();
+ 		task_rq_unlock(rq, p, &rf);
+ 		stop_one_cpu_nowait(task_cpu(p), migration_cpu_stop,
+ 				    &pending->arg, &pending->stop_work);
++		preempt_enable();
+ 		return 0;
+ 	}
+ out:
+@@ -2966,12 +2968,13 @@ static int affine_move_task(struct rq *rq, struct task_struct *p, struct rq_flag
+ 			complete = true;
+ 		}
+ 
++		preempt_disable();
+ 		task_rq_unlock(rq, p, rf);
+-
+ 		if (push_task) {
+ 			stop_one_cpu_nowait(rq->cpu, push_cpu_stop,
+ 					    p, &rq->push_work);
+ 		}
++		preempt_enable();
+ 
+ 		if (complete)
+ 			complete_all(&pending->done);
+@@ -3037,12 +3040,13 @@ static int affine_move_task(struct rq *rq, struct task_struct *p, struct rq_flag
+ 		if (flags & SCA_MIGRATE_ENABLE)
+ 			p->migration_flags &= ~MDF_PUSH;
+ 
++		preempt_disable();
+ 		task_rq_unlock(rq, p, rf);
+-
+ 		if (!stop_pending) {
+ 			stop_one_cpu_nowait(cpu_of(rq), migration_cpu_stop,
+ 					    &pending->arg, &pending->stop_work);
+ 		}
++		preempt_enable();
+ 
+ 		if (flags & SCA_MIGRATE_ENABLE)
+ 			return 0;
+@@ -9507,9 +9511,11 @@ static void balance_push(struct rq *rq)
+ 	 * Temporarily drop rq->lock such that we can wake-up the stop task.
+ 	 * Both preemption and IRQs are still disabled.
+ 	 */
++	preempt_disable();
+ 	raw_spin_rq_unlock(rq);
+ 	stop_one_cpu_nowait(rq->cpu, __balance_push_cpu_stop, push_task,
+ 			    this_cpu_ptr(&push_work));
++	preempt_enable();
+ 	/*
+ 	 * At this point need_resched() is true and we'll take the loop in
+ 	 * schedule(). The next pick is obviously going to be the stop task
+diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
+index 58b542bf28934..d78f2e8769fb4 100644
+--- a/kernel/sched/deadline.c
++++ b/kernel/sched/deadline.c
+@@ -2449,9 +2449,11 @@ skip:
+ 		double_unlock_balance(this_rq, src_rq);
+ 
+ 		if (push_task) {
++			preempt_disable();
+ 			raw_spin_rq_unlock(this_rq);
+ 			stop_one_cpu_nowait(src_rq->cpu, push_cpu_stop,
+ 					    push_task, &src_rq->push_work);
++			preempt_enable();
+ 			raw_spin_rq_lock(this_rq);
+ 		}
+ 	}
+diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
+index 1d9c2482c5a35..8e1b54dc2a214 100644
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -4348,22 +4348,6 @@ static inline unsigned long task_util_est(struct task_struct *p)
+ 	return max(task_util(p), _task_util_est(p));
+ }
+ 
+-#ifdef CONFIG_UCLAMP_TASK
+-static inline unsigned long uclamp_task_util(struct task_struct *p,
+-					     unsigned long uclamp_min,
+-					     unsigned long uclamp_max)
+-{
+-	return clamp(task_util_est(p), uclamp_min, uclamp_max);
+-}
+-#else
+-static inline unsigned long uclamp_task_util(struct task_struct *p,
+-					     unsigned long uclamp_min,
+-					     unsigned long uclamp_max)
+-{
+-	return task_util_est(p);
+-}
+-#endif
+-
+ static inline void util_est_enqueue(struct cfs_rq *cfs_rq,
+ 				    struct task_struct *p)
+ {
+@@ -4654,7 +4638,7 @@ static inline void update_misfit_status(struct task_struct *p, struct rq *rq)
+ 
+ static inline bool cfs_rq_is_decayed(struct cfs_rq *cfs_rq)
+ {
+-	return true;
++	return !cfs_rq->nr_running;
+ }
+ 
+ #define UPDATE_TG	0x0
+@@ -7554,7 +7538,7 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
+ 	target = prev_cpu;
+ 
+ 	sync_entity_load_avg(&p->se);
+-	if (!uclamp_task_util(p, p_util_min, p_util_max))
++	if (!task_util_est(p) && p_util_min == 0)
+ 		goto unlock;
+ 
+ 	eenv_task_busy_time(&eenv, p, prev_cpu);
+@@ -7562,11 +7546,10 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
+ 	for (; pd; pd = pd->next) {
+ 		unsigned long util_min = p_util_min, util_max = p_util_max;
+ 		unsigned long cpu_cap, cpu_thermal_cap, util;
+-		unsigned long cur_delta, max_spare_cap = 0;
++		long prev_spare_cap = -1, max_spare_cap = -1;
+ 		unsigned long rq_util_min, rq_util_max;
+-		unsigned long prev_spare_cap = 0;
++		unsigned long cur_delta, base_energy;
+ 		int max_spare_cap_cpu = -1;
+-		unsigned long base_energy;
+ 		int fits, max_fits = -1;
+ 
+ 		cpumask_and(cpus, perf_domain_span(pd), cpu_online_mask);
+@@ -7629,7 +7612,7 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
+ 				prev_spare_cap = cpu_cap;
+ 				prev_fits = fits;
+ 			} else if ((fits > max_fits) ||
+-				   ((fits == max_fits) && (cpu_cap > max_spare_cap))) {
++				   ((fits == max_fits) && ((long)cpu_cap > max_spare_cap))) {
+ 				/*
+ 				 * Find the CPU with the maximum spare capacity
+ 				 * among the remaining CPUs in the performance
+@@ -7641,7 +7624,7 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
+ 			}
+ 		}
+ 
+-		if (max_spare_cap_cpu < 0 && prev_spare_cap == 0)
++		if (max_spare_cap_cpu < 0 && prev_spare_cap < 0)
+ 			continue;
+ 
+ 		eenv_pd_busy_time(&eenv, cpus, p);
+@@ -7649,7 +7632,7 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
+ 		base_energy = compute_energy(&eenv, pd, cpus, p, -1);
+ 
+ 		/* Evaluate the energy impact of using prev_cpu. */
+-		if (prev_spare_cap > 0) {
++		if (prev_spare_cap > -1) {
+ 			prev_delta = compute_energy(&eenv, pd, cpus, p,
+ 						    prev_cpu);
+ 			/* CPU utilization has changed */
+@@ -11003,13 +10986,15 @@ more_balance:
+ 				busiest->push_cpu = this_cpu;
+ 				active_balance = 1;
+ 			}
+-			raw_spin_rq_unlock_irqrestore(busiest, flags);
+ 
++			preempt_disable();
++			raw_spin_rq_unlock_irqrestore(busiest, flags);
+ 			if (active_balance) {
+ 				stop_one_cpu_nowait(cpu_of(busiest),
+ 					active_load_balance_cpu_stop, busiest,
+ 					&busiest->active_balance_work);
+ 			}
++			preempt_enable();
+ 		}
+ 	} else {
+ 		sd->nr_balance_failed = 0;
+diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
+index 185d3d749f6b6..590abc1a013d2 100644
+--- a/kernel/sched/rt.c
++++ b/kernel/sched/rt.c
+@@ -2109,9 +2109,11 @@ retry:
+ 		 */
+ 		push_task = get_push_task(rq);
+ 		if (push_task) {
++			preempt_disable();
+ 			raw_spin_rq_unlock(rq);
+ 			stop_one_cpu_nowait(rq->cpu, push_cpu_stop,
+ 					    push_task, &rq->push_work);
++			preempt_enable();
+ 			raw_spin_rq_lock(rq);
+ 		}
+ 
+@@ -2448,9 +2450,11 @@ skip:
+ 		double_unlock_balance(this_rq, src_rq);
+ 
+ 		if (push_task) {
++			preempt_disable();
+ 			raw_spin_rq_unlock(this_rq);
+ 			stop_one_cpu_nowait(src_rq->cpu, push_cpu_stop,
+ 					    push_task, &src_rq->push_work);
++			preempt_enable();
+ 			raw_spin_rq_lock(this_rq);
+ 		}
+ 	}
+diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
+index d3a3b2646ec4f..c6e89afa0d65c 100644
+--- a/kernel/sched/topology.c
++++ b/kernel/sched/topology.c
+@@ -2113,12 +2113,16 @@ static int hop_cmp(const void *a, const void *b)
+  */
+ int sched_numa_find_nth_cpu(const struct cpumask *cpus, int cpu, int node)
+ {
+-	struct __cmp_key k = { .cpus = cpus, .node = node, .cpu = cpu };
++	struct __cmp_key k = { .cpus = cpus, .cpu = cpu };
+ 	struct cpumask ***hop_masks;
+ 	int hop, ret = nr_cpu_ids;
+ 
+ 	rcu_read_lock();
+ 
++	/* CPU-less node entries are uninitialized in sched_domains_numa_masks */
++	node = numa_nearest_node(node, N_CPU);
++	k.node = node;
++
+ 	k.masks = rcu_dereference(sched_domains_numa_masks);
+ 	if (!k.masks)
+ 		goto unlock;
+diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
+index c63e25cb9406e..d23b18cbdadd1 100644
+--- a/kernel/trace/trace_kprobe.c
++++ b/kernel/trace/trace_kprobe.c
+@@ -1019,9 +1019,9 @@ EXPORT_SYMBOL_GPL(kprobe_event_cmd_init);
+ /**
+  * __kprobe_event_gen_cmd_start - Generate a kprobe event command from arg list
+  * @cmd: A pointer to the dynevent_cmd struct representing the new event
++ * @kretprobe: Is this a return probe?
+  * @name: The name of the kprobe event
+  * @loc: The location of the kprobe event
+- * @kretprobe: Is this a return probe?
+  * @...: Variable number of arg (pairs), one pair for each field
+  *
+  * NOTE: Users normally won't want to call this function directly, but
+diff --git a/lib/kunit/executor.c b/lib/kunit/executor.c
+index 74982b83707ca..05ac4cdb6806a 100644
+--- a/lib/kunit/executor.c
++++ b/lib/kunit/executor.c
+@@ -102,8 +102,10 @@ static void kunit_free_suite_set(struct suite_set suite_set)
+ {
+ 	struct kunit_suite * const *suites;
+ 
+-	for (suites = suite_set.start; suites < suite_set.end; suites++)
++	for (suites = suite_set.start; suites < suite_set.end; suites++) {
++		kfree((*suites)->test_cases);
+ 		kfree(*suites);
++	}
+ 	kfree(suite_set.start);
+ }
+ 
+diff --git a/mm/mempolicy.c b/mm/mempolicy.c
+index 7d82355ad0b3b..03172a2fd5b3f 100644
+--- a/mm/mempolicy.c
++++ b/mm/mempolicy.c
+@@ -131,22 +131,26 @@ static struct mempolicy default_policy = {
+ static struct mempolicy preferred_node_policy[MAX_NUMNODES];
+ 
+ /**
+- * numa_map_to_online_node - Find closest online node
++ * numa_nearest_node - Find nearest node by state
+  * @node: Node id to start the search
++ * @state: State to filter the search
+  *
+- * Lookup the next closest node by distance if @nid is not online.
++ * Lookup the closest node by distance if @nid is not in state.
+  *
+- * Return: this @node if it is online, otherwise the closest node by distance
++ * Return: this @node if it is in state, otherwise the closest node by distance
+  */
+-int numa_map_to_online_node(int node)
++int numa_nearest_node(int node, unsigned int state)
+ {
+ 	int min_dist = INT_MAX, dist, n, min_node;
+ 
+-	if (node == NUMA_NO_NODE || node_online(node))
++	if (state >= NR_NODE_STATES)
++		return -EINVAL;
++
++	if (node == NUMA_NO_NODE || node_state(node, state))
+ 		return node;
+ 
+ 	min_node = node;
+-	for_each_online_node(n) {
++	for_each_node_state(n, state) {
+ 		dist = node_distance(node, n);
+ 		if (dist < min_dist) {
+ 			min_dist = dist;
+@@ -156,7 +160,7 @@ int numa_map_to_online_node(int node)
+ 
+ 	return min_node;
+ }
+-EXPORT_SYMBOL_GPL(numa_map_to_online_node);
++EXPORT_SYMBOL_GPL(numa_nearest_node);
+ 
+ struct mempolicy *get_task_policy(struct task_struct *p)
+ {
+diff --git a/mm/readahead.c b/mm/readahead.c
+index a9c999aa19af6..1f4701f1b8682 100644
+--- a/mm/readahead.c
++++ b/mm/readahead.c
+@@ -748,7 +748,8 @@ ssize_t ksys_readahead(int fd, loff_t offset, size_t count)
+ 	 */
+ 	ret = -EINVAL;
+ 	if (!f.file->f_mapping || !f.file->f_mapping->a_ops ||
+-	    !S_ISREG(file_inode(f.file)->i_mode))
++	    (!S_ISREG(file_inode(f.file)->i_mode) &&
++	    !S_ISBLK(file_inode(f.file)->i_mode)))
+ 		goto out;
+ 
+ 	ret = vfs_fadvise(f.file, offset, count, POSIX_FADV_WILLNEED);
+diff --git a/net/9p/client.c b/net/9p/client.c
+index 86bbc7147fc14..b0e7cb7e1a54a 100644
+--- a/net/9p/client.c
++++ b/net/9p/client.c
+@@ -540,12 +540,14 @@ static int p9_check_errors(struct p9_client *c, struct p9_req_t *req)
+ 		return 0;
+ 
+ 	if (!p9_is_proto_dotl(c)) {
+-		char *ename;
++		char *ename = NULL;
+ 
+ 		err = p9pdu_readf(&req->rc, c->proto_version, "s?d",
+ 				  &ename, &ecode);
+-		if (err)
++		if (err) {
++			kfree(ename);
+ 			goto out_err;
++		}
+ 
+ 		if (p9_is_proto_dotu(c) && ecode < 512)
+ 			err = -ecode;
+diff --git a/net/bluetooth/amp.c b/net/bluetooth/amp.c
+index 2134f92bd7ac2..5d698f19868c5 100644
+--- a/net/bluetooth/amp.c
++++ b/net/bluetooth/amp.c
+@@ -109,7 +109,7 @@ struct hci_conn *phylink_add(struct hci_dev *hdev, struct amp_mgr *mgr,
+ 	struct hci_conn *hcon;
+ 	u8 role = out ? HCI_ROLE_MASTER : HCI_ROLE_SLAVE;
+ 
+-	hcon = hci_conn_add(hdev, AMP_LINK, dst, role);
++	hcon = hci_conn_add(hdev, AMP_LINK, dst, role, __next_handle(mgr));
+ 	if (!hcon)
+ 		return NULL;
+ 
+@@ -117,7 +117,6 @@ struct hci_conn *phylink_add(struct hci_dev *hdev, struct amp_mgr *mgr,
+ 
+ 	hcon->state = BT_CONNECT;
+ 	hcon->attempt++;
+-	hcon->handle = __next_handle(mgr);
+ 	hcon->remote_id = remote_id;
+ 	hcon->amp_mgr = amp_mgr_get(mgr);
+ 
+diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
+index 6d6192f514d0f..4e03642488230 100644
+--- a/net/bluetooth/hci_conn.c
++++ b/net/bluetooth/hci_conn.c
+@@ -153,6 +153,9 @@ static void hci_conn_cleanup(struct hci_conn *conn)
+ 
+ 	hci_conn_hash_del(hdev, conn);
+ 
++	if (HCI_CONN_HANDLE_UNSET(conn->handle))
++		ida_free(&hdev->unset_handle_ida, conn->handle);
++
+ 	if (conn->cleanup)
+ 		conn->cleanup(conn);
+ 
+@@ -734,6 +737,7 @@ struct iso_list_data {
+ 	};
+ 	int count;
+ 	bool big_term;
++	bool pa_sync_term;
+ 	bool big_sync_term;
+ };
+ 
+@@ -807,7 +811,10 @@ static int big_terminate_sync(struct hci_dev *hdev, void *data)
+ 	if (d->big_sync_term)
+ 		hci_le_big_terminate_sync(hdev, d->big);
+ 
+-	return hci_le_pa_terminate_sync(hdev, d->sync_handle);
++	if (d->pa_sync_term)
++		return hci_le_pa_terminate_sync(hdev, d->sync_handle);
++
++	return 0;
+ }
+ 
+ static int hci_le_big_terminate(struct hci_dev *hdev, u8 big, struct hci_conn *conn)
+@@ -823,6 +830,7 @@ static int hci_le_big_terminate(struct hci_dev *hdev, u8 big, struct hci_conn *c
+ 
+ 	d->big = big;
+ 	d->sync_handle = conn->sync_handle;
++	d->pa_sync_term = test_and_clear_bit(HCI_CONN_PA_SYNC, &conn->flags);
+ 	d->big_sync_term = test_and_clear_bit(HCI_CONN_BIG_SYNC, &conn->flags);
+ 
+ 	ret = hci_cmd_sync_queue(hdev, big_terminate_sync, d,
+@@ -925,31 +933,18 @@ static void cis_cleanup(struct hci_conn *conn)
+ 	hci_le_remove_cig(hdev, conn->iso_qos.ucast.cig);
+ }
+ 
+-static u16 hci_conn_hash_alloc_unset(struct hci_dev *hdev)
++static int hci_conn_hash_alloc_unset(struct hci_dev *hdev)
+ {
+-	struct hci_conn_hash *h = &hdev->conn_hash;
+-	struct hci_conn  *c;
+-	u16 handle = HCI_CONN_HANDLE_MAX + 1;
+-
+-	rcu_read_lock();
+-
+-	list_for_each_entry_rcu(c, &h->list, list) {
+-		/* Find the first unused handle */
+-		if (handle == 0xffff || c->handle != handle)
+-			break;
+-		handle++;
+-	}
+-	rcu_read_unlock();
+-
+-	return handle;
++	return ida_alloc_range(&hdev->unset_handle_ida, HCI_CONN_HANDLE_MAX + 1,
++			       U16_MAX, GFP_ATOMIC);
+ }
+ 
+ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
+-			      u8 role)
++			      u8 role, u16 handle)
+ {
+ 	struct hci_conn *conn;
+ 
+-	BT_DBG("%s dst %pMR", hdev->name, dst);
++	bt_dev_dbg(hdev, "dst %pMR handle 0x%4.4x", dst, handle);
+ 
+ 	conn = kzalloc(sizeof(*conn), GFP_KERNEL);
+ 	if (!conn)
+@@ -957,7 +952,7 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
+ 
+ 	bacpy(&conn->dst, dst);
+ 	bacpy(&conn->src, &hdev->bdaddr);
+-	conn->handle = hci_conn_hash_alloc_unset(hdev);
++	conn->handle = handle;
+ 	conn->hdev  = hdev;
+ 	conn->type  = type;
+ 	conn->role  = role;
+@@ -970,6 +965,7 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
+ 	conn->rssi = HCI_RSSI_INVALID;
+ 	conn->tx_power = HCI_TX_POWER_INVALID;
+ 	conn->max_tx_power = HCI_TX_POWER_INVALID;
++	conn->sync_handle = HCI_SYNC_HANDLE_INVALID;
+ 
+ 	set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
+ 	conn->disc_timeout = HCI_DISCONN_TIMEOUT;
+@@ -1041,6 +1037,20 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
+ 	return conn;
+ }
+ 
++struct hci_conn *hci_conn_add_unset(struct hci_dev *hdev, int type,
++				    bdaddr_t *dst, u8 role)
++{
++	int handle;
++
++	bt_dev_dbg(hdev, "dst %pMR", dst);
++
++	handle = hci_conn_hash_alloc_unset(hdev);
++	if (unlikely(handle < 0))
++		return NULL;
++
++	return hci_conn_add(hdev, type, dst, role, handle);
++}
++
+ static void hci_conn_cleanup_child(struct hci_conn *conn, u8 reason)
+ {
+ 	if (!reason)
+@@ -1271,6 +1281,9 @@ u8 hci_conn_set_handle(struct hci_conn *conn, u16 handle)
+ 	if (conn->abort_reason)
+ 		return conn->abort_reason;
+ 
++	if (HCI_CONN_HANDLE_UNSET(conn->handle))
++		ida_free(&hdev->unset_handle_ida, conn->handle);
++
+ 	conn->handle = handle;
+ 
+ 	return 0;
+@@ -1377,7 +1390,7 @@ struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
+ 	if (conn) {
+ 		bacpy(&conn->dst, dst);
+ 	} else {
+-		conn = hci_conn_add(hdev, LE_LINK, dst, role);
++		conn = hci_conn_add_unset(hdev, LE_LINK, dst, role);
+ 		if (!conn)
+ 			return ERR_PTR(-ENOMEM);
+ 		hci_conn_hold(conn);
+@@ -1548,7 +1561,7 @@ static struct hci_conn *hci_add_bis(struct hci_dev *hdev, bdaddr_t *dst,
+ 		     memcmp(conn->le_per_adv_data, base, base_len)))
+ 		return ERR_PTR(-EADDRINUSE);
+ 
+-	conn = hci_conn_add(hdev, ISO_LINK, dst, HCI_ROLE_MASTER);
++	conn = hci_conn_add_unset(hdev, ISO_LINK, dst, HCI_ROLE_MASTER);
+ 	if (!conn)
+ 		return ERR_PTR(-ENOMEM);
+ 
+@@ -1592,7 +1605,7 @@ struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst,
+ 
+ 	BT_DBG("requesting refresh of dst_addr");
+ 
+-	conn = hci_conn_add(hdev, LE_LINK, dst, HCI_ROLE_MASTER);
++	conn = hci_conn_add_unset(hdev, LE_LINK, dst, HCI_ROLE_MASTER);
+ 	if (!conn)
+ 		return ERR_PTR(-ENOMEM);
+ 
+@@ -1640,7 +1653,7 @@ struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst,
+ 
+ 	acl = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
+ 	if (!acl) {
+-		acl = hci_conn_add(hdev, ACL_LINK, dst, HCI_ROLE_MASTER);
++		acl = hci_conn_add_unset(hdev, ACL_LINK, dst, HCI_ROLE_MASTER);
+ 		if (!acl)
+ 			return ERR_PTR(-ENOMEM);
+ 	}
+@@ -1700,7 +1713,7 @@ struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst,
+ 
+ 	sco = hci_conn_hash_lookup_ba(hdev, type, dst);
+ 	if (!sco) {
+-		sco = hci_conn_add(hdev, type, dst, HCI_ROLE_MASTER);
++		sco = hci_conn_add_unset(hdev, type, dst, HCI_ROLE_MASTER);
+ 		if (!sco) {
+ 			hci_conn_drop(acl);
+ 			return ERR_PTR(-ENOMEM);
+@@ -1892,7 +1905,7 @@ struct hci_conn *hci_bind_cis(struct hci_dev *hdev, bdaddr_t *dst,
+ 	cis = hci_conn_hash_lookup_cis(hdev, dst, dst_type, qos->ucast.cig,
+ 				       qos->ucast.cis);
+ 	if (!cis) {
+-		cis = hci_conn_add(hdev, ISO_LINK, dst, HCI_ROLE_MASTER);
++		cis = hci_conn_add_unset(hdev, ISO_LINK, dst, HCI_ROLE_MASTER);
+ 		if (!cis)
+ 			return ERR_PTR(-ENOMEM);
+ 		cis->cleanup = cis_cleanup;
+@@ -2127,7 +2140,8 @@ int hci_pa_create_sync(struct hci_dev *hdev, bdaddr_t *dst, __u8 dst_type,
+ 	return hci_cmd_sync_queue(hdev, create_pa_sync, cp, create_pa_complete);
+ }
+ 
+-int hci_le_big_create_sync(struct hci_dev *hdev, struct bt_iso_qos *qos,
++int hci_le_big_create_sync(struct hci_dev *hdev, struct hci_conn *hcon,
++			   struct bt_iso_qos *qos,
+ 			   __u16 sync_handle, __u8 num_bis, __u8 bis[])
+ {
+ 	struct _packed {
+@@ -2143,6 +2157,9 @@ int hci_le_big_create_sync(struct hci_dev *hdev, struct bt_iso_qos *qos,
+ 	if (err)
+ 		return err;
+ 
++	if (hcon)
++		hcon->iso_qos.bcast.big = qos->bcast.big;
++
+ 	memset(&pdu, 0, sizeof(pdu));
+ 	pdu.cp.handle = qos->bcast.big;
+ 	pdu.cp.sync_handle = cpu_to_le16(sync_handle);
+diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
+index 63d4d38863acb..eb591495ba245 100644
+--- a/net/bluetooth/hci_core.c
++++ b/net/bluetooth/hci_core.c
+@@ -2535,6 +2535,8 @@ struct hci_dev *hci_alloc_dev_priv(int sizeof_priv)
+ 	mutex_init(&hdev->lock);
+ 	mutex_init(&hdev->req_lock);
+ 
++	ida_init(&hdev->unset_handle_ida);
++
+ 	INIT_LIST_HEAD(&hdev->mesh_pending);
+ 	INIT_LIST_HEAD(&hdev->mgmt_pending);
+ 	INIT_LIST_HEAD(&hdev->reject_list);
+@@ -2789,6 +2791,7 @@ void hci_release_dev(struct hci_dev *hdev)
+ 	hci_codec_list_clear(&hdev->local_codecs);
+ 	hci_dev_unlock(hdev);
+ 
++	ida_destroy(&hdev->unset_handle_ida);
+ 	ida_simple_remove(&hci_index_ida, hdev->id);
+ 	kfree_skb(hdev->sent_cmd);
+ 	kfree_skb(hdev->recv_event);
+diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
+index dd70fd5313840..634ac77cb2c9d 100644
+--- a/net/bluetooth/hci_event.c
++++ b/net/bluetooth/hci_event.c
+@@ -2335,8 +2335,8 @@ static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
+ 		}
+ 	} else {
+ 		if (!conn) {
+-			conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr,
+-					    HCI_ROLE_MASTER);
++			conn = hci_conn_add_unset(hdev, ACL_LINK, &cp->bdaddr,
++						  HCI_ROLE_MASTER);
+ 			if (!conn)
+ 				bt_dev_err(hdev, "no memory for new connection");
+ 		}
+@@ -3151,8 +3151,8 @@ static void hci_conn_complete_evt(struct hci_dev *hdev, void *data,
+ 		    hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
+ 						      &ev->bdaddr,
+ 						      BDADDR_BREDR)) {
+-			conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr,
+-					    HCI_ROLE_SLAVE);
++			conn = hci_conn_add_unset(hdev, ev->link_type,
++						  &ev->bdaddr, HCI_ROLE_SLAVE);
+ 			if (!conn) {
+ 				bt_dev_err(hdev, "no memory for new conn");
+ 				goto unlock;
+@@ -3317,8 +3317,8 @@ static void hci_conn_request_evt(struct hci_dev *hdev, void *data,
+ 	conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
+ 			&ev->bdaddr);
+ 	if (!conn) {
+-		conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr,
+-				    HCI_ROLE_SLAVE);
++		conn = hci_conn_add_unset(hdev, ev->link_type, &ev->bdaddr,
++					  HCI_ROLE_SLAVE);
+ 		if (!conn) {
+ 			bt_dev_err(hdev, "no memory for new connection");
+ 			goto unlock;
+@@ -5867,7 +5867,7 @@ static void le_conn_complete_evt(struct hci_dev *hdev, u8 status,
+ 		if (status)
+ 			goto unlock;
+ 
+-		conn = hci_conn_add(hdev, LE_LINK, bdaddr, role);
++		conn = hci_conn_add_unset(hdev, LE_LINK, bdaddr, role);
+ 		if (!conn) {
+ 			bt_dev_err(hdev, "no memory for new connection");
+ 			goto unlock;
+@@ -5929,17 +5929,11 @@ static void le_conn_complete_evt(struct hci_dev *hdev, u8 status,
+ 
+ 	conn->dst_type = ev_bdaddr_type(hdev, conn->dst_type, NULL);
+ 
+-	if (handle > HCI_CONN_HANDLE_MAX) {
+-		bt_dev_err(hdev, "Invalid handle: 0x%4.4x > 0x%4.4x", handle,
+-			   HCI_CONN_HANDLE_MAX);
+-		status = HCI_ERROR_INVALID_PARAMETERS;
+-	}
+-
+ 	/* All connection failure handling is taken care of by the
+ 	 * hci_conn_failed function which is triggered by the HCI
+ 	 * request completion callbacks used for connecting.
+ 	 */
+-	if (status)
++	if (status || hci_conn_set_handle(conn, handle))
+ 		goto unlock;
+ 
+ 	/* Drop the connection if it has been aborted */
+@@ -5963,7 +5957,6 @@ static void le_conn_complete_evt(struct hci_dev *hdev, u8 status,
+ 		mgmt_device_connected(hdev, conn, NULL, 0);
+ 
+ 	conn->sec_level = BT_SECURITY_LOW;
+-	conn->handle = handle;
+ 	conn->state = BT_CONFIG;
+ 
+ 	/* Store current advertising instance as connection advertising instance
+@@ -6578,20 +6571,38 @@ static void hci_le_pa_sync_estabilished_evt(struct hci_dev *hdev, void *data,
+ 	struct hci_ev_le_pa_sync_established *ev = data;
+ 	int mask = hdev->link_mode;
+ 	__u8 flags = 0;
++	struct hci_conn *pa_sync;
+ 
+ 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
+ 
+-	if (ev->status)
+-		return;
+-
+ 	hci_dev_lock(hdev);
+ 
+ 	hci_dev_clear_flag(hdev, HCI_PA_SYNC);
+ 
+ 	mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ISO_LINK, &flags);
+-	if (!(mask & HCI_LM_ACCEPT))
++	if (!(mask & HCI_LM_ACCEPT)) {
+ 		hci_le_pa_term_sync(hdev, ev->handle);
++		goto unlock;
++	}
+ 
++	if (!(flags & HCI_PROTO_DEFER))
++		goto unlock;
++
++	if (ev->status) {
++		/* Add connection to indicate the failed PA sync event */
++		pa_sync = hci_conn_add_unset(hdev, ISO_LINK, BDADDR_ANY,
++					     HCI_ROLE_SLAVE);
++
++		if (!pa_sync)
++			goto unlock;
++
++		set_bit(HCI_CONN_PA_SYNC_FAILED, &pa_sync->flags);
++
++		/* Notify iso layer */
++		hci_connect_cfm(pa_sync, ev->status);
++	}
++
++unlock:
+ 	hci_dev_unlock(hdev);
+ }
+ 
+@@ -6958,12 +6969,12 @@ static void hci_le_cis_req_evt(struct hci_dev *hdev, void *data,
+ 
+ 	cis = hci_conn_hash_lookup_handle(hdev, cis_handle);
+ 	if (!cis) {
+-		cis = hci_conn_add(hdev, ISO_LINK, &acl->dst, HCI_ROLE_SLAVE);
++		cis = hci_conn_add(hdev, ISO_LINK, &acl->dst, HCI_ROLE_SLAVE,
++				   cis_handle);
+ 		if (!cis) {
+ 			hci_le_reject_cis(hdev, ev->cis_handle);
+ 			goto unlock;
+ 		}
+-		cis->handle = cis_handle;
+ 	}
+ 
+ 	cis->iso_qos.ucast.cig = ev->cig_id;
+@@ -7051,6 +7062,7 @@ static void hci_le_big_sync_established_evt(struct hci_dev *hdev, void *data,
+ {
+ 	struct hci_evt_le_big_sync_estabilished *ev = data;
+ 	struct hci_conn *bis;
++	struct hci_conn *pa_sync;
+ 	int i;
+ 
+ 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
+@@ -7061,6 +7073,15 @@ static void hci_le_big_sync_established_evt(struct hci_dev *hdev, void *data,
+ 
+ 	hci_dev_lock(hdev);
+ 
++	if (!ev->status) {
++		pa_sync = hci_conn_hash_lookup_pa_sync_big_handle(hdev, ev->handle);
++		if (pa_sync)
++			/* Also mark the BIG sync established event on the
++			 * associated PA sync hcon
++			 */
++			set_bit(HCI_CONN_BIG_SYNC, &pa_sync->flags);
++	}
++
+ 	for (i = 0; i < ev->num_bis; i++) {
+ 		u16 handle = le16_to_cpu(ev->bis[i]);
+ 		__le32 interval;
+@@ -7068,12 +7089,15 @@ static void hci_le_big_sync_established_evt(struct hci_dev *hdev, void *data,
+ 		bis = hci_conn_hash_lookup_handle(hdev, handle);
+ 		if (!bis) {
+ 			bis = hci_conn_add(hdev, ISO_LINK, BDADDR_ANY,
+-					   HCI_ROLE_SLAVE);
++					   HCI_ROLE_SLAVE, handle);
+ 			if (!bis)
+ 				continue;
+-			bis->handle = handle;
+ 		}
+ 
++		if (ev->status != 0x42)
++			/* Mark PA sync as established */
++			set_bit(HCI_CONN_PA_SYNC, &bis->flags);
++
+ 		bis->iso_qos.bcast.big = ev->handle;
+ 		memset(&interval, 0, sizeof(interval));
+ 		memcpy(&interval, ev->latency, sizeof(ev->latency));
+@@ -7110,15 +7134,42 @@ static void hci_le_big_info_adv_report_evt(struct hci_dev *hdev, void *data,
+ 	struct hci_evt_le_big_info_adv_report *ev = data;
+ 	int mask = hdev->link_mode;
+ 	__u8 flags = 0;
++	struct hci_conn *pa_sync;
+ 
+ 	bt_dev_dbg(hdev, "sync_handle 0x%4.4x", le16_to_cpu(ev->sync_handle));
+ 
+ 	hci_dev_lock(hdev);
+ 
+ 	mask |= hci_proto_connect_ind(hdev, BDADDR_ANY, ISO_LINK, &flags);
+-	if (!(mask & HCI_LM_ACCEPT))
++	if (!(mask & HCI_LM_ACCEPT)) {
+ 		hci_le_pa_term_sync(hdev, ev->sync_handle);
++		goto unlock;
++	}
++
++	if (!(flags & HCI_PROTO_DEFER))
++		goto unlock;
+ 
++	pa_sync = hci_conn_hash_lookup_pa_sync_handle
++			(hdev,
++			le16_to_cpu(ev->sync_handle));
++
++	if (pa_sync)
++		goto unlock;
++
++	/* Add connection to indicate the PA sync event */
++	pa_sync = hci_conn_add_unset(hdev, ISO_LINK, BDADDR_ANY,
++				     HCI_ROLE_SLAVE);
++
++	if (!pa_sync)
++		goto unlock;
++
++	pa_sync->sync_handle = le16_to_cpu(ev->sync_handle);
++	set_bit(HCI_CONN_PA_SYNC, &pa_sync->flags);
++
++	/* Notify iso layer */
++	hci_connect_cfm(pa_sync, 0x00);
++
++unlock:
+ 	hci_dev_unlock(hdev);
+ }
+ 
+diff --git a/net/bluetooth/hci_sync.c b/net/bluetooth/hci_sync.c
+index 360813ab0c4db..9b073bf2ba088 100644
+--- a/net/bluetooth/hci_sync.c
++++ b/net/bluetooth/hci_sync.c
+@@ -151,7 +151,7 @@ struct sk_buff *__hci_cmd_sync_sk(struct hci_dev *hdev, u16 opcode, u32 plen,
+ 	struct sk_buff *skb;
+ 	int err = 0;
+ 
+-	bt_dev_dbg(hdev, "Opcode 0x%4x", opcode);
++	bt_dev_dbg(hdev, "Opcode 0x%4.4x", opcode);
+ 
+ 	hci_req_init(&req, hdev);
+ 
+@@ -247,7 +247,7 @@ int __hci_cmd_sync_status_sk(struct hci_dev *hdev, u16 opcode, u32 plen,
+ 	skb = __hci_cmd_sync_sk(hdev, opcode, plen, param, event, timeout, sk);
+ 	if (IS_ERR(skb)) {
+ 		if (!event)
+-			bt_dev_err(hdev, "Opcode 0x%4x failed: %ld", opcode,
++			bt_dev_err(hdev, "Opcode 0x%4.4x failed: %ld", opcode,
+ 				   PTR_ERR(skb));
+ 		return PTR_ERR(skb);
+ 	}
+@@ -5389,6 +5389,21 @@ int hci_abort_conn_sync(struct hci_dev *hdev, struct hci_conn *conn, u8 reason)
+ 		err = hci_reject_conn_sync(hdev, conn, reason);
+ 		break;
+ 	case BT_OPEN:
++		hci_dev_lock(hdev);
++
++		/* Cleanup bis or pa sync connections */
++		if (test_and_clear_bit(HCI_CONN_BIG_SYNC_FAILED, &conn->flags) ||
++		    test_and_clear_bit(HCI_CONN_PA_SYNC_FAILED, &conn->flags)) {
++			hci_conn_failed(conn, reason);
++		} else if (test_bit(HCI_CONN_PA_SYNC, &conn->flags) ||
++			   test_bit(HCI_CONN_BIG_SYNC, &conn->flags)) {
++			conn->state = BT_CLOSED;
++			hci_disconn_cfm(conn, reason);
++			hci_conn_del(conn);
++		}
++
++		hci_dev_unlock(hdev);
++		return 0;
+ 	case BT_BOUND:
+ 		break;
+ 	default:
+diff --git a/net/bluetooth/iso.c b/net/bluetooth/iso.c
+index c8460eb7f5c0b..9433a273b4fc2 100644
+--- a/net/bluetooth/iso.c
++++ b/net/bluetooth/iso.c
+@@ -51,6 +51,7 @@ static void iso_sock_kill(struct sock *sk);
+ /* iso_pinfo flags values */
+ enum {
+ 	BT_SK_BIG_SYNC,
++	BT_SK_PA_SYNC,
+ };
+ 
+ struct iso_pinfo {
+@@ -75,6 +76,9 @@ static struct bt_iso_qos default_qos;
+ 
+ static bool check_ucast_qos(struct bt_iso_qos *qos);
+ static bool check_bcast_qos(struct bt_iso_qos *qos);
++static bool iso_match_sid(struct sock *sk, void *data);
++static bool iso_match_sync_handle(struct sock *sk, void *data);
++static void iso_sock_disconn(struct sock *sk);
+ 
+ /* ---- ISO timers ---- */
+ #define ISO_CONN_TIMEOUT	(HZ * 40)
+@@ -601,6 +605,15 @@ static void iso_sock_cleanup_listen(struct sock *parent)
+ 		iso_sock_kill(sk);
+ 	}
+ 
++	/* If listening socket stands for a PA sync connection,
++	 * properly disconnect the hcon and socket.
++	 */
++	if (iso_pi(parent)->conn && iso_pi(parent)->conn->hcon &&
++	    test_bit(HCI_CONN_PA_SYNC, &iso_pi(parent)->conn->hcon->flags)) {
++		iso_sock_disconn(parent);
++		return;
++	}
++
+ 	parent->sk_state  = BT_CLOSED;
+ 	sock_set_flag(parent, SOCK_ZAPPED);
+ }
+@@ -622,6 +635,16 @@ static void iso_sock_kill(struct sock *sk)
+ 	sock_put(sk);
+ }
+ 
++static void iso_sock_disconn(struct sock *sk)
++{
++	sk->sk_state = BT_DISCONN;
++	iso_sock_set_timer(sk, ISO_DISCONN_TIMEOUT);
++	iso_conn_lock(iso_pi(sk)->conn);
++	hci_conn_drop(iso_pi(sk)->conn->hcon);
++	iso_pi(sk)->conn->hcon = NULL;
++	iso_conn_unlock(iso_pi(sk)->conn);
++}
++
+ static void __iso_sock_close(struct sock *sk)
+ {
+ 	BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
+@@ -633,20 +656,19 @@ static void __iso_sock_close(struct sock *sk)
+ 
+ 	case BT_CONNECTED:
+ 	case BT_CONFIG:
+-		if (iso_pi(sk)->conn->hcon) {
+-			sk->sk_state = BT_DISCONN;
+-			iso_sock_set_timer(sk, ISO_DISCONN_TIMEOUT);
+-			iso_conn_lock(iso_pi(sk)->conn);
+-			hci_conn_drop(iso_pi(sk)->conn->hcon);
+-			iso_pi(sk)->conn->hcon = NULL;
+-			iso_conn_unlock(iso_pi(sk)->conn);
+-		} else {
++		if (iso_pi(sk)->conn->hcon)
++			iso_sock_disconn(sk);
++		else
+ 			iso_chan_del(sk, ECONNRESET);
+-		}
+ 		break;
+ 
+ 	case BT_CONNECT2:
+-		iso_chan_del(sk, ECONNRESET);
++		if (iso_pi(sk)->conn->hcon &&
++		    (test_bit(HCI_CONN_PA_SYNC, &iso_pi(sk)->conn->hcon->flags) ||
++		    test_bit(HCI_CONN_PA_SYNC_FAILED, &iso_pi(sk)->conn->hcon->flags)))
++			iso_sock_disconn(sk);
++		else
++			iso_chan_del(sk, ECONNRESET);
+ 		break;
+ 	case BT_CONNECT:
+ 		/* In case of DEFER_SETUP the hcon would be bound to CIG which
+@@ -1162,6 +1184,29 @@ static void iso_conn_defer_accept(struct hci_conn *conn)
+ 	hci_send_cmd(hdev, HCI_OP_LE_ACCEPT_CIS, sizeof(cp), &cp);
+ }
+ 
++static void iso_conn_big_sync(struct sock *sk)
++{
++	int err;
++	struct hci_dev *hdev;
++
++	hdev = hci_get_route(&iso_pi(sk)->dst, &iso_pi(sk)->src,
++			     iso_pi(sk)->src_type);
++
++	if (!hdev)
++		return;
++
++	if (!test_and_set_bit(BT_SK_BIG_SYNC, &iso_pi(sk)->flags)) {
++		err = hci_le_big_create_sync(hdev, iso_pi(sk)->conn->hcon,
++					     &iso_pi(sk)->qos,
++					     iso_pi(sk)->sync_handle,
++					     iso_pi(sk)->bc_num_bis,
++					     iso_pi(sk)->bc_bis);
++		if (err)
++			bt_dev_err(hdev, "hci_le_big_create_sync: %d",
++				   err);
++	}
++}
++
+ static int iso_sock_recvmsg(struct socket *sock, struct msghdr *msg,
+ 			    size_t len, int flags)
+ {
+@@ -1174,8 +1219,14 @@ static int iso_sock_recvmsg(struct socket *sock, struct msghdr *msg,
+ 		lock_sock(sk);
+ 		switch (sk->sk_state) {
+ 		case BT_CONNECT2:
+-			iso_conn_defer_accept(pi->conn->hcon);
+-			sk->sk_state = BT_CONFIG;
++			if (pi->conn->hcon &&
++			    test_bit(HCI_CONN_PA_SYNC, &pi->conn->hcon->flags)) {
++				iso_conn_big_sync(sk);
++				sk->sk_state = BT_LISTEN;
++			} else {
++				iso_conn_defer_accept(pi->conn->hcon);
++				sk->sk_state = BT_CONFIG;
++			}
+ 			release_sock(sk);
+ 			return 0;
+ 		case BT_CONNECT:
+@@ -1518,11 +1569,18 @@ static bool iso_match_big(struct sock *sk, void *data)
+ 	return ev->handle == iso_pi(sk)->qos.bcast.big;
+ }
+ 
++static bool iso_match_pa_sync_flag(struct sock *sk, void *data)
++{
++	return test_bit(BT_SK_PA_SYNC, &iso_pi(sk)->flags);
++}
++
+ static void iso_conn_ready(struct iso_conn *conn)
+ {
+-	struct sock *parent;
++	struct sock *parent = NULL;
+ 	struct sock *sk = conn->sk;
+-	struct hci_ev_le_big_sync_estabilished *ev;
++	struct hci_ev_le_big_sync_estabilished *ev = NULL;
++	struct hci_ev_le_pa_sync_established *ev2 = NULL;
++	struct hci_evt_le_big_info_adv_report *ev3 = NULL;
+ 	struct hci_conn *hcon;
+ 
+ 	BT_DBG("conn %p", conn);
+@@ -1534,15 +1592,38 @@ static void iso_conn_ready(struct iso_conn *conn)
+ 		if (!hcon)
+ 			return;
+ 
+-		ev = hci_recv_event_data(hcon->hdev,
+-					 HCI_EVT_LE_BIG_SYNC_ESTABILISHED);
+-		if (ev)
++		if (test_bit(HCI_CONN_BIG_SYNC, &hcon->flags) ||
++		    test_bit(HCI_CONN_BIG_SYNC_FAILED, &hcon->flags)) {
++			ev = hci_recv_event_data(hcon->hdev,
++						 HCI_EVT_LE_BIG_SYNC_ESTABILISHED);
++
++			/* Get reference to PA sync parent socket, if it exists */
+ 			parent = iso_get_sock_listen(&hcon->src,
+ 						     &hcon->dst,
+-						     iso_match_big, ev);
+-		else
++						     iso_match_pa_sync_flag, NULL);
++			if (!parent && ev)
++				parent = iso_get_sock_listen(&hcon->src,
++							     &hcon->dst,
++							     iso_match_big, ev);
++		} else if (test_bit(HCI_CONN_PA_SYNC_FAILED, &hcon->flags)) {
++			ev2 = hci_recv_event_data(hcon->hdev,
++						  HCI_EV_LE_PA_SYNC_ESTABLISHED);
++			if (ev2)
++				parent = iso_get_sock_listen(&hcon->src,
++							     &hcon->dst,
++							     iso_match_sid, ev2);
++		} else if (test_bit(HCI_CONN_PA_SYNC, &hcon->flags)) {
++			ev3 = hci_recv_event_data(hcon->hdev,
++						  HCI_EVT_LE_BIG_INFO_ADV_REPORT);
++			if (ev3)
++				parent = iso_get_sock_listen(&hcon->src,
++							     &hcon->dst,
++							     iso_match_sync_handle, ev3);
++		}
++
++		if (!parent)
+ 			parent = iso_get_sock_listen(&hcon->src,
+-						     BDADDR_ANY, NULL, NULL);
++							BDADDR_ANY, NULL, NULL);
+ 
+ 		if (!parent)
+ 			return;
+@@ -1559,11 +1640,17 @@ static void iso_conn_ready(struct iso_conn *conn)
+ 		iso_sock_init(sk, parent);
+ 
+ 		bacpy(&iso_pi(sk)->src, &hcon->src);
+-		iso_pi(sk)->src_type = hcon->src_type;
++
++		/* Convert from HCI to three-value type */
++		if (hcon->src_type == ADDR_LE_DEV_PUBLIC)
++			iso_pi(sk)->src_type = BDADDR_LE_PUBLIC;
++		else
++			iso_pi(sk)->src_type = BDADDR_LE_RANDOM;
+ 
+ 		/* If hcon has no destination address (BDADDR_ANY) it means it
+-		 * was created by HCI_EV_LE_BIG_SYNC_ESTABILISHED so we need to
+-		 * initialize using the parent socket destination address.
++		 * was created by HCI_EV_LE_BIG_SYNC_ESTABILISHED or
++		 * HCI_EV_LE_PA_SYNC_ESTABLISHED so we need to initialize using
++		 * the parent socket destination address.
+ 		 */
+ 		if (!bacmp(&hcon->dst, BDADDR_ANY)) {
+ 			bacpy(&hcon->dst, &iso_pi(parent)->dst);
+@@ -1571,13 +1658,23 @@ static void iso_conn_ready(struct iso_conn *conn)
+ 			hcon->sync_handle = iso_pi(parent)->sync_handle;
+ 		}
+ 
++		if (ev3) {
++			iso_pi(sk)->qos = iso_pi(parent)->qos;
++			iso_pi(sk)->qos.bcast.encryption = ev3->encryption;
++			hcon->iso_qos = iso_pi(sk)->qos;
++			iso_pi(sk)->bc_num_bis = iso_pi(parent)->bc_num_bis;
++			memcpy(iso_pi(sk)->bc_bis, iso_pi(parent)->bc_bis, ISO_MAX_NUM_BIS);
++			set_bit(BT_SK_PA_SYNC, &iso_pi(sk)->flags);
++		}
++
+ 		bacpy(&iso_pi(sk)->dst, &hcon->dst);
+ 		iso_pi(sk)->dst_type = hcon->dst_type;
+ 
+ 		hci_conn_hold(hcon);
+ 		iso_chan_add(conn, sk, parent);
+ 
+-		if (ev && ((struct hci_evt_le_big_sync_estabilished *)ev)->status) {
++		if ((ev && ((struct hci_evt_le_big_sync_estabilished *)ev)->status) ||
++		    (ev2 && ev2->status)) {
+ 			/* Trigger error signal on child socket */
+ 			sk->sk_err = ECONNREFUSED;
+ 			sk->sk_error_report(sk);
+@@ -1635,7 +1732,7 @@ int iso_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 *flags)
+ 	if (ev1) {
+ 		sk = iso_get_sock_listen(&hdev->bdaddr, bdaddr, iso_match_sid,
+ 					 ev1);
+-		if (sk)
++		if (sk && !ev1->status)
+ 			iso_pi(sk)->sync_handle = le16_to_cpu(ev1->handle);
+ 
+ 		goto done;
+@@ -1643,16 +1740,21 @@ int iso_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 *flags)
+ 
+ 	ev2 = hci_recv_event_data(hdev, HCI_EVT_LE_BIG_INFO_ADV_REPORT);
+ 	if (ev2) {
++		/* Try to get PA sync listening socket, if it exists */
+ 		sk = iso_get_sock_listen(&hdev->bdaddr, bdaddr,
+-					 iso_match_sync_handle, ev2);
++						iso_match_pa_sync_flag, NULL);
++		if (!sk)
++			sk = iso_get_sock_listen(&hdev->bdaddr, bdaddr,
++						 iso_match_sync_handle, ev2);
+ 		if (sk) {
+ 			int err;
+ 
+ 			if (ev2->num_bis < iso_pi(sk)->bc_num_bis)
+ 				iso_pi(sk)->bc_num_bis = ev2->num_bis;
+ 
+-			if (!test_and_set_bit(BT_SK_BIG_SYNC, &iso_pi(sk)->flags)) {
+-				err = hci_le_big_create_sync(hdev,
++			if (!test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags) &&
++			    !test_and_set_bit(BT_SK_BIG_SYNC, &iso_pi(sk)->flags)) {
++				err = hci_le_big_create_sync(hdev, NULL,
+ 							     &iso_pi(sk)->qos,
+ 							     iso_pi(sk)->sync_handle,
+ 							     iso_pi(sk)->bc_num_bis,
+@@ -1704,12 +1806,13 @@ static void iso_connect_cfm(struct hci_conn *hcon, __u8 status)
+ 
+ 	BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
+ 
+-	/* Similar to the success case, if HCI_CONN_BIG_SYNC_FAILED is set,
+-	 * queue the failed bis connection into the accept queue of the
+-	 * listening socket and wake up userspace, to inform the user about
+-	 * the BIG sync failed event.
++	/* Similar to the success case, if HCI_CONN_BIG_SYNC_FAILED or
++	 * HCI_CONN_PA_SYNC_FAILED is set, queue the failed connection
++	 * into the accept queue of the listening socket and wake up
++	 * userspace, to inform the user about the event.
+ 	 */
+-	if (!status || test_bit(HCI_CONN_BIG_SYNC_FAILED, &hcon->flags)) {
++	if (!status || test_bit(HCI_CONN_BIG_SYNC_FAILED, &hcon->flags) ||
++	    test_bit(HCI_CONN_PA_SYNC_FAILED, &hcon->flags)) {
+ 		struct iso_conn *conn;
+ 
+ 		conn = iso_conn_add(hcon);
+diff --git a/net/core/page_pool.c b/net/core/page_pool.c
+index a3e12a61d456c..1ed4affd9149f 100644
+--- a/net/core/page_pool.c
++++ b/net/core/page_pool.c
+@@ -210,8 +210,12 @@ static int page_pool_init(struct page_pool *pool,
+ 		return -ENOMEM;
+ #endif
+ 
+-	if (ptr_ring_init(&pool->ring, ring_qsize, GFP_KERNEL) < 0)
++	if (ptr_ring_init(&pool->ring, ring_qsize, GFP_KERNEL) < 0) {
++#ifdef CONFIG_PAGE_POOL_STATS
++		free_percpu(pool->recycle_stats);
++#endif
+ 		return -ENOMEM;
++	}
+ 
+ 	atomic_set(&pool->pages_state_release_cnt, 0);
+ 
+diff --git a/net/core/skbuff.c b/net/core/skbuff.c
+index 7dfae58055c2b..6c5b9ad800d20 100644
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -4185,6 +4185,7 @@ static void skb_ts_finish(struct ts_config *conf, struct ts_state *state)
+ unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
+ 			   unsigned int to, struct ts_config *config)
+ {
++	unsigned int patlen = config->ops->get_pattern_len(config);
+ 	struct ts_state state;
+ 	unsigned int ret;
+ 
+@@ -4196,7 +4197,7 @@ unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
+ 	skb_prepare_seq_read(skb, from, to, TS_SKB_CB(&state));
+ 
+ 	ret = textsearch_find(config, &state);
+-	return (ret <= to - from ? ret : UINT_MAX);
++	return (ret + patlen <= to - from ? ret : UINT_MAX);
+ }
+ EXPORT_SYMBOL(skb_find_text);
+ 
+diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
+index fa14eef8f0688..dc874d7503f4a 100644
+--- a/net/dccp/ipv4.c
++++ b/net/dccp/ipv4.c
+@@ -630,9 +630,6 @@ int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
+ 	if (dccp_parse_options(sk, dreq, skb))
+ 		goto drop_and_free;
+ 
+-	if (security_inet_conn_request(sk, skb, req))
+-		goto drop_and_free;
+-
+ 	ireq = inet_rsk(req);
+ 	sk_rcv_saddr_set(req_to_sk(req), ip_hdr(skb)->daddr);
+ 	sk_daddr_set(req_to_sk(req), ip_hdr(skb)->saddr);
+@@ -640,6 +637,9 @@ int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
+ 	ireq->ireq_family = AF_INET;
+ 	ireq->ir_iif = READ_ONCE(sk->sk_bound_dev_if);
+ 
++	if (security_inet_conn_request(sk, skb, req))
++		goto drop_and_free;
++
+ 	/*
+ 	 * Step 3: Process LISTEN state
+ 	 *
+diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
+index c693a570682fb..6f5a556f4f6d7 100644
+--- a/net/dccp/ipv6.c
++++ b/net/dccp/ipv6.c
+@@ -360,15 +360,15 @@ static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
+ 	if (dccp_parse_options(sk, dreq, skb))
+ 		goto drop_and_free;
+ 
+-	if (security_inet_conn_request(sk, skb, req))
+-		goto drop_and_free;
+-
+ 	ireq = inet_rsk(req);
+ 	ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
+ 	ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
+ 	ireq->ireq_family = AF_INET6;
+ 	ireq->ir_mark = inet_request_mark(sk, skb);
+ 
++	if (security_inet_conn_request(sk, skb, req))
++		goto drop_and_free;
++
+ 	if (ipv6_opt_accepted(sk, skb, IP6CB(skb)) ||
+ 	    np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
+ 	    np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
+diff --git a/net/hsr/hsr_forward.c b/net/hsr/hsr_forward.c
+index b71dab630a873..80cdc6f6b34c9 100644
+--- a/net/hsr/hsr_forward.c
++++ b/net/hsr/hsr_forward.c
+@@ -342,9 +342,7 @@ struct sk_buff *prp_create_tagged_frame(struct hsr_frame_info *frame,
+ 	skb = skb_copy_expand(frame->skb_std, 0,
+ 			      skb_tailroom(frame->skb_std) + HSR_HLEN,
+ 			      GFP_ATOMIC);
+-	prp_fill_rct(skb, frame, port);
+-
+-	return skb;
++	return prp_fill_rct(skb, frame, port);
+ }
+ 
+ static void hsr_deliver_master(struct sk_buff *skb, struct net_device *dev,
+diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
+index dc478a0574cbe..3b4dafefb4b03 100644
+--- a/net/ipv4/syncookies.c
++++ b/net/ipv4/syncookies.c
+@@ -41,7 +41,6 @@ static siphash_aligned_key_t syncookie_secret[2];
+  * requested/supported by the syn/synack exchange.
+  */
+ #define TSBITS	6
+-#define TSMASK	(((__u32)1 << TSBITS) - 1)
+ 
+ static u32 cookie_hash(__be32 saddr, __be32 daddr, __be16 sport, __be16 dport,
+ 		       u32 count, int c)
+@@ -62,27 +61,22 @@ static u32 cookie_hash(__be32 saddr, __be32 daddr, __be16 sport, __be16 dport,
+  */
+ u64 cookie_init_timestamp(struct request_sock *req, u64 now)
+ {
+-	struct inet_request_sock *ireq;
+-	u32 ts, ts_now = tcp_ns_to_ts(now);
++	const struct inet_request_sock *ireq = inet_rsk(req);
++	u64 ts, ts_now = tcp_ns_to_ts(now);
+ 	u32 options = 0;
+ 
+-	ireq = inet_rsk(req);
+-
+ 	options = ireq->wscale_ok ? ireq->snd_wscale : TS_OPT_WSCALE_MASK;
+ 	if (ireq->sack_ok)
+ 		options |= TS_OPT_SACK;
+ 	if (ireq->ecn_ok)
+ 		options |= TS_OPT_ECN;
+ 
+-	ts = ts_now & ~TSMASK;
++	ts = (ts_now >> TSBITS) << TSBITS;
+ 	ts |= options;
+-	if (ts > ts_now) {
+-		ts >>= TSBITS;
+-		ts--;
+-		ts <<= TSBITS;
+-		ts |= options;
+-	}
+-	return (u64)ts * (NSEC_PER_SEC / TCP_TS_HZ);
++	if (ts > ts_now)
++		ts -= (1UL << TSBITS);
++
++	return ts * (NSEC_PER_SEC / TCP_TS_HZ);
+ }
+ 
+ 
+diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
+index 7d544f965b264..908db91f7b4cd 100644
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -6434,22 +6434,23 @@ reset_and_undo:
+ 
+ static void tcp_rcv_synrecv_state_fastopen(struct sock *sk)
+ {
++	struct tcp_sock *tp = tcp_sk(sk);
+ 	struct request_sock *req;
+ 
+ 	/* If we are still handling the SYNACK RTO, see if timestamp ECR allows
+ 	 * undo. If peer SACKs triggered fast recovery, we can't undo here.
+ 	 */
+-	if (inet_csk(sk)->icsk_ca_state == TCP_CA_Loss)
+-		tcp_try_undo_loss(sk, false);
++	if (inet_csk(sk)->icsk_ca_state == TCP_CA_Loss && !tp->packets_out)
++		tcp_try_undo_recovery(sk);
+ 
+ 	/* Reset rtx states to prevent spurious retransmits_timed_out() */
+-	tcp_sk(sk)->retrans_stamp = 0;
++	tp->retrans_stamp = 0;
+ 	inet_csk(sk)->icsk_retransmits = 0;
+ 
+ 	/* Once we leave TCP_SYN_RECV or TCP_FIN_WAIT_1,
+ 	 * we no longer need req so release it.
+ 	 */
+-	req = rcu_dereference_protected(tcp_sk(sk)->fastopen_rsk,
++	req = rcu_dereference_protected(tp->fastopen_rsk,
+ 					lockdep_sock_is_held(sk));
+ 	reqsk_fastopen_remove(sk, req, false);
+ 
+diff --git a/net/ipv4/tcp_metrics.c b/net/ipv4/tcp_metrics.c
+index 99ac5efe244d3..a7364ff8b558d 100644
+--- a/net/ipv4/tcp_metrics.c
++++ b/net/ipv4/tcp_metrics.c
+@@ -470,11 +470,15 @@ void tcp_init_metrics(struct sock *sk)
+ 	u32 val, crtt = 0; /* cached RTT scaled by 8 */
+ 
+ 	sk_dst_confirm(sk);
++	/* ssthresh may have been reduced unnecessarily during.
++	 * 3WHS. Restore it back to its initial default.
++	 */
++	tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
+ 	if (!dst)
+ 		goto reset;
+ 
+ 	rcu_read_lock();
+-	tm = tcp_get_metrics(sk, dst, true);
++	tm = tcp_get_metrics(sk, dst, false);
+ 	if (!tm) {
+ 		rcu_read_unlock();
+ 		goto reset;
+@@ -489,11 +493,6 @@ void tcp_init_metrics(struct sock *sk)
+ 		tp->snd_ssthresh = val;
+ 		if (tp->snd_ssthresh > tp->snd_cwnd_clamp)
+ 			tp->snd_ssthresh = tp->snd_cwnd_clamp;
+-	} else {
+-		/* ssthresh may have been reduced unnecessarily during.
+-		 * 3WHS. Restore it back to its initial default.
+-		 */
+-		tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
+ 	}
+ 	val = tcp_metric_get(tm, TCP_METRIC_REORDERING);
+ 	if (val && tp->reordering != val)
+@@ -908,7 +907,7 @@ static void tcp_metrics_flush_all(struct net *net)
+ 			match = net ? net_eq(tm_net(tm), net) :
+ 				!refcount_read(&tm_net(tm)->ns.count);
+ 			if (match) {
+-				*pp = tm->tcpm_next;
++				rcu_assign_pointer(*pp, tm->tcpm_next);
+ 				kfree_rcu(tm, rcu_head);
+ 			} else {
+ 				pp = &tm->tcpm_next;
+@@ -949,7 +948,7 @@ static int tcp_metrics_nl_cmd_del(struct sk_buff *skb, struct genl_info *info)
+ 		if (addr_same(&tm->tcpm_daddr, &daddr) &&
+ 		    (!src || addr_same(&tm->tcpm_saddr, &saddr)) &&
+ 		    net_eq(tm_net(tm), net)) {
+-			*pp = tm->tcpm_next;
++			rcu_assign_pointer(*pp, tm->tcpm_next);
+ 			kfree_rcu(tm, rcu_head);
+ 			found = true;
+ 		} else {
+diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
+index 4c847baf52d1c..f712ff61beb8a 100644
+--- a/net/ipv4/udp.c
++++ b/net/ipv4/udp.c
+@@ -744,7 +744,7 @@ int __udp4_lib_err(struct sk_buff *skb, u32 info, struct udp_table *udptable)
+ 			       iph->saddr, uh->source, skb->dev->ifindex,
+ 			       inet_sdif(skb), udptable, NULL);
+ 
+-	if (!sk || udp_sk(sk)->encap_type) {
++	if (!sk || READ_ONCE(udp_sk(sk)->encap_type)) {
+ 		/* No socket for error: try tunnels before discarding */
+ 		if (static_branch_unlikely(&udp_encap_needed_key)) {
+ 			sk = __udp4_lib_err_encap(net, iph, uh, udptable, sk, skb,
+@@ -1080,7 +1080,7 @@ int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
+ 	u8 tos, scope;
+ 	__be16 dport;
+ 	int err, is_udplite = IS_UDPLITE(sk);
+-	int corkreq = READ_ONCE(up->corkflag) || msg->msg_flags&MSG_MORE;
++	int corkreq = udp_test_bit(CORK, sk) || msg->msg_flags & MSG_MORE;
+ 	int (*getfrag)(void *, char *, int, int, int, struct sk_buff *);
+ 	struct sk_buff *skb;
+ 	struct ip_options_data opt_copy;
+@@ -1344,11 +1344,11 @@ void udp_splice_eof(struct socket *sock)
+ 	struct sock *sk = sock->sk;
+ 	struct udp_sock *up = udp_sk(sk);
+ 
+-	if (!up->pending || READ_ONCE(up->corkflag))
++	if (!up->pending || udp_test_bit(CORK, sk))
+ 		return;
+ 
+ 	lock_sock(sk);
+-	if (up->pending && !READ_ONCE(up->corkflag))
++	if (up->pending && !udp_test_bit(CORK, sk))
+ 		udp_push_pending_frames(sk);
+ 	release_sock(sk);
+ }
+@@ -1897,7 +1897,7 @@ try_again:
+ 						      (struct sockaddr *)sin);
+ 	}
+ 
+-	if (udp_sk(sk)->gro_enabled)
++	if (udp_test_bit(GRO_ENABLED, sk))
+ 		udp_cmsg_recv(msg, sk, skb);
+ 
+ 	if (inet->cmsg_flags)
+@@ -2110,7 +2110,8 @@ static int udp_queue_rcv_one_skb(struct sock *sk, struct sk_buff *skb)
+ 	}
+ 	nf_reset_ct(skb);
+ 
+-	if (static_branch_unlikely(&udp_encap_needed_key) && up->encap_type) {
++	if (static_branch_unlikely(&udp_encap_needed_key) &&
++	    READ_ONCE(up->encap_type)) {
+ 		int (*encap_rcv)(struct sock *sk, struct sk_buff *skb);
+ 
+ 		/*
+@@ -2148,7 +2149,8 @@ static int udp_queue_rcv_one_skb(struct sock *sk, struct sk_buff *skb)
+ 	/*
+ 	 * 	UDP-Lite specific tests, ignored on UDP sockets
+ 	 */
+-	if ((up->pcflag & UDPLITE_RECV_CC)  &&  UDP_SKB_CB(skb)->partial_cov) {
++	if (udp_test_bit(UDPLITE_RECV_CC, sk) && UDP_SKB_CB(skb)->partial_cov) {
++		u16 pcrlen = READ_ONCE(up->pcrlen);
+ 
+ 		/*
+ 		 * MIB statistics other than incrementing the error count are
+@@ -2161,7 +2163,7 @@ static int udp_queue_rcv_one_skb(struct sock *sk, struct sk_buff *skb)
+ 		 * delivery of packets with coverage values less than a value
+ 		 * provided by the application."
+ 		 */
+-		if (up->pcrlen == 0) {          /* full coverage was set  */
++		if (pcrlen == 0) {          /* full coverage was set  */
+ 			net_dbg_ratelimited("UDPLite: partial coverage %d while full coverage %d requested\n",
+ 					    UDP_SKB_CB(skb)->cscov, skb->len);
+ 			goto drop;
+@@ -2172,9 +2174,9 @@ static int udp_queue_rcv_one_skb(struct sock *sk, struct sk_buff *skb)
+ 		 * that it wants x while sender emits packets of smaller size y.
+ 		 * Therefore the above ...()->partial_cov statement is essential.
+ 		 */
+-		if (UDP_SKB_CB(skb)->cscov  <  up->pcrlen) {
++		if (UDP_SKB_CB(skb)->cscov < pcrlen) {
+ 			net_dbg_ratelimited("UDPLite: coverage %d too small, need min %d\n",
+-					    UDP_SKB_CB(skb)->cscov, up->pcrlen);
++					    UDP_SKB_CB(skb)->cscov, pcrlen);
+ 			goto drop;
+ 		}
+ 	}
+@@ -2643,7 +2645,7 @@ void udp_destroy_sock(struct sock *sk)
+ 			if (encap_destroy)
+ 				encap_destroy(sk);
+ 		}
+-		if (up->encap_enabled)
++		if (udp_test_bit(ENCAP_ENABLED, sk))
+ 			static_branch_dec(&udp_encap_needed_key);
+ 	}
+ }
+@@ -2683,9 +2685,9 @@ int udp_lib_setsockopt(struct sock *sk, int level, int optname,
+ 	switch (optname) {
+ 	case UDP_CORK:
+ 		if (val != 0) {
+-			WRITE_ONCE(up->corkflag, 1);
++			udp_set_bit(CORK, sk);
+ 		} else {
+-			WRITE_ONCE(up->corkflag, 0);
++			udp_clear_bit(CORK, sk);
+ 			lock_sock(sk);
+ 			push_pending_frames(sk);
+ 			release_sock(sk);
+@@ -2700,17 +2702,17 @@ int udp_lib_setsockopt(struct sock *sk, int level, int optname,
+ 		case UDP_ENCAP_ESPINUDP_NON_IKE:
+ #if IS_ENABLED(CONFIG_IPV6)
+ 			if (sk->sk_family == AF_INET6)
+-				up->encap_rcv = ipv6_stub->xfrm6_udp_encap_rcv;
++				WRITE_ONCE(up->encap_rcv,
++					   ipv6_stub->xfrm6_udp_encap_rcv);
+ 			else
+ #endif
+-				up->encap_rcv = xfrm4_udp_encap_rcv;
++				WRITE_ONCE(up->encap_rcv,
++					   xfrm4_udp_encap_rcv);
+ #endif
+ 			fallthrough;
+ 		case UDP_ENCAP_L2TPINUDP:
+-			up->encap_type = val;
+-			lock_sock(sk);
+-			udp_tunnel_encap_enable(sk->sk_socket);
+-			release_sock(sk);
++			WRITE_ONCE(up->encap_type, val);
++			udp_tunnel_encap_enable(sk);
+ 			break;
+ 		default:
+ 			err = -ENOPROTOOPT;
+@@ -2719,11 +2721,11 @@ int udp_lib_setsockopt(struct sock *sk, int level, int optname,
+ 		break;
+ 
+ 	case UDP_NO_CHECK6_TX:
+-		up->no_check6_tx = valbool;
++		udp_set_no_check6_tx(sk, valbool);
+ 		break;
+ 
+ 	case UDP_NO_CHECK6_RX:
+-		up->no_check6_rx = valbool;
++		udp_set_no_check6_rx(sk, valbool);
+ 		break;
+ 
+ 	case UDP_SEGMENT:
+@@ -2733,14 +2735,12 @@ int udp_lib_setsockopt(struct sock *sk, int level, int optname,
+ 		break;
+ 
+ 	case UDP_GRO:
+-		lock_sock(sk);
+ 
+ 		/* when enabling GRO, accept the related GSO packet type */
+ 		if (valbool)
+-			udp_tunnel_encap_enable(sk->sk_socket);
+-		up->gro_enabled = valbool;
+-		up->accept_udp_l4 = valbool;
+-		release_sock(sk);
++			udp_tunnel_encap_enable(sk);
++		udp_assign_bit(GRO_ENABLED, sk, valbool);
++		udp_assign_bit(ACCEPT_L4, sk, valbool);
+ 		break;
+ 
+ 	/*
+@@ -2755,8 +2755,8 @@ int udp_lib_setsockopt(struct sock *sk, int level, int optname,
+ 			val = 8;
+ 		else if (val > USHRT_MAX)
+ 			val = USHRT_MAX;
+-		up->pcslen = val;
+-		up->pcflag |= UDPLITE_SEND_CC;
++		WRITE_ONCE(up->pcslen, val);
++		udp_set_bit(UDPLITE_SEND_CC, sk);
+ 		break;
+ 
+ 	/* The receiver specifies a minimum checksum coverage value. To make
+@@ -2769,8 +2769,8 @@ int udp_lib_setsockopt(struct sock *sk, int level, int optname,
+ 			val = 8;
+ 		else if (val > USHRT_MAX)
+ 			val = USHRT_MAX;
+-		up->pcrlen = val;
+-		up->pcflag |= UDPLITE_RECV_CC;
++		WRITE_ONCE(up->pcrlen, val);
++		udp_set_bit(UDPLITE_RECV_CC, sk);
+ 		break;
+ 
+ 	default:
+@@ -2808,19 +2808,19 @@ int udp_lib_getsockopt(struct sock *sk, int level, int optname,
+ 
+ 	switch (optname) {
+ 	case UDP_CORK:
+-		val = READ_ONCE(up->corkflag);
++		val = udp_test_bit(CORK, sk);
+ 		break;
+ 
+ 	case UDP_ENCAP:
+-		val = up->encap_type;
++		val = READ_ONCE(up->encap_type);
+ 		break;
+ 
+ 	case UDP_NO_CHECK6_TX:
+-		val = up->no_check6_tx;
++		val = udp_get_no_check6_tx(sk);
+ 		break;
+ 
+ 	case UDP_NO_CHECK6_RX:
+-		val = up->no_check6_rx;
++		val = udp_get_no_check6_rx(sk);
+ 		break;
+ 
+ 	case UDP_SEGMENT:
+@@ -2828,17 +2828,17 @@ int udp_lib_getsockopt(struct sock *sk, int level, int optname,
+ 		break;
+ 
+ 	case UDP_GRO:
+-		val = up->gro_enabled;
++		val = udp_test_bit(GRO_ENABLED, sk);
+ 		break;
+ 
+ 	/* The following two cannot be changed on UDP sockets, the return is
+ 	 * always 0 (which corresponds to the full checksum coverage of UDP). */
+ 	case UDPLITE_SEND_CSCOV:
+-		val = up->pcslen;
++		val = READ_ONCE(up->pcslen);
+ 		break;
+ 
+ 	case UDPLITE_RECV_CSCOV:
+-		val = up->pcrlen;
++		val = READ_ONCE(up->pcrlen);
+ 		break;
+ 
+ 	default:
+diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
+index 0f46b3c2e4ac5..6c95d28d0c4a7 100644
+--- a/net/ipv4/udp_offload.c
++++ b/net/ipv4/udp_offload.c
+@@ -557,10 +557,10 @@ struct sk_buff *udp_gro_receive(struct list_head *head, struct sk_buff *skb,
+ 	NAPI_GRO_CB(skb)->is_flist = 0;
+ 	if (!sk || !udp_sk(sk)->gro_receive) {
+ 		if (skb->dev->features & NETIF_F_GRO_FRAGLIST)
+-			NAPI_GRO_CB(skb)->is_flist = sk ? !udp_sk(sk)->gro_enabled : 1;
++			NAPI_GRO_CB(skb)->is_flist = sk ? !udp_test_bit(GRO_ENABLED, sk) : 1;
+ 
+ 		if ((!sk && (skb->dev->features & NETIF_F_GRO_UDP_FWD)) ||
+-		    (sk && udp_sk(sk)->gro_enabled) || NAPI_GRO_CB(skb)->is_flist)
++		    (sk && udp_test_bit(GRO_ENABLED, sk)) || NAPI_GRO_CB(skb)->is_flist)
+ 			return call_gro_receive(udp_gro_receive_segment, head, skb);
+ 
+ 		/* no GRO, be sure flush the current packet */
+diff --git a/net/ipv4/udp_tunnel_core.c b/net/ipv4/udp_tunnel_core.c
+index 5f8104cf082d0..732e21b75ba28 100644
+--- a/net/ipv4/udp_tunnel_core.c
++++ b/net/ipv4/udp_tunnel_core.c
+@@ -78,7 +78,7 @@ void setup_udp_tunnel_sock(struct net *net, struct socket *sock,
+ 	udp_sk(sk)->gro_receive = cfg->gro_receive;
+ 	udp_sk(sk)->gro_complete = cfg->gro_complete;
+ 
+-	udp_tunnel_encap_enable(sock);
++	udp_tunnel_encap_enable(sk);
+ }
+ EXPORT_SYMBOL_GPL(setup_udp_tunnel_sock);
+ 
+diff --git a/net/ipv4/udplite.c b/net/ipv4/udplite.c
+index 39ecdad1b50ce..af37af3ab727b 100644
+--- a/net/ipv4/udplite.c
++++ b/net/ipv4/udplite.c
+@@ -21,7 +21,6 @@ EXPORT_SYMBOL(udplite_table);
+ static int udplite_sk_init(struct sock *sk)
+ {
+ 	udp_init_sock(sk);
+-	udp_sk(sk)->pcflag = UDPLITE_BIT;
+ 	pr_warn_once("UDP-Lite is deprecated and scheduled to be removed in 2025, "
+ 		     "please contact the netdev mailing list\n");
+ 	return 0;
+diff --git a/net/ipv4/xfrm4_input.c b/net/ipv4/xfrm4_input.c
+index eac206a290d05..183f6dc372429 100644
+--- a/net/ipv4/xfrm4_input.c
++++ b/net/ipv4/xfrm4_input.c
+@@ -85,11 +85,11 @@ int xfrm4_udp_encap_rcv(struct sock *sk, struct sk_buff *skb)
+ 	struct udphdr *uh;
+ 	struct iphdr *iph;
+ 	int iphlen, len;
+-
+ 	__u8 *udpdata;
+ 	__be32 *udpdata32;
+-	__u16 encap_type = up->encap_type;
++	u16 encap_type;
+ 
++	encap_type = READ_ONCE(up->encap_type);
+ 	/* if this is not encapsulated socket, then just return now */
+ 	if (!encap_type)
+ 		return 1;
+diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
+index 9270ef7f8e98b..0ac1d4595f0f0 100644
+--- a/net/ipv6/ip6_output.c
++++ b/net/ipv6/ip6_output.c
+@@ -162,7 +162,13 @@ ip6_finish_output_gso_slowpath_drop(struct net *net, struct sock *sk,
+ 		int err;
+ 
+ 		skb_mark_not_on_list(segs);
+-		err = ip6_fragment(net, sk, segs, ip6_finish_output2);
++		/* Last GSO segment can be smaller than gso_size (and MTU).
++		 * Adding a fragment header would produce an "atomic fragment",
++		 * which is considered harmful (RFC-8021). Avoid that.
++		 */
++		err = segs->len > mtu ?
++			ip6_fragment(net, sk, segs, ip6_finish_output2) :
++			ip6_finish_output2(net, sk, segs);
+ 		if (err && ret == 0)
+ 			ret = err;
+ 	}
+diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
+index 5014aa6634527..8698b49dfc8de 100644
+--- a/net/ipv6/syncookies.c
++++ b/net/ipv6/syncookies.c
+@@ -180,14 +180,15 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
+ 	treq = tcp_rsk(req);
+ 	treq->tfo_listener = false;
+ 
+-	if (security_inet_conn_request(sk, skb, req))
+-		goto out_free;
+-
+ 	req->mss = mss;
+ 	ireq->ir_rmt_port = th->source;
+ 	ireq->ir_num = ntohs(th->dest);
+ 	ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
+ 	ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
++
++	if (security_inet_conn_request(sk, skb, req))
++		goto out_free;
++
+ 	if (ipv6_opt_accepted(sk, skb, &TCP_SKB_CB(skb)->header.h6) ||
+ 	    np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
+ 	    np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
+diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
+index 24d3c5c791218..8d79642ae45dd 100644
+--- a/net/ipv6/udp.c
++++ b/net/ipv6/udp.c
+@@ -446,7 +446,7 @@ try_again:
+ 						      (struct sockaddr *)sin6);
+ 	}
+ 
+-	if (udp_sk(sk)->gro_enabled)
++	if (udp_test_bit(GRO_ENABLED, sk))
+ 		udp_cmsg_recv(msg, sk, skb);
+ 
+ 	if (np->rxopt.all)
+@@ -604,7 +604,7 @@ int __udp6_lib_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
+ 	sk = __udp6_lib_lookup(net, daddr, uh->dest, saddr, uh->source,
+ 			       inet6_iif(skb), inet6_sdif(skb), udptable, NULL);
+ 
+-	if (!sk || udp_sk(sk)->encap_type) {
++	if (!sk || READ_ONCE(udp_sk(sk)->encap_type)) {
+ 		/* No socket for error: try tunnels before discarding */
+ 		if (static_branch_unlikely(&udpv6_encap_needed_key)) {
+ 			sk = __udp6_lib_err_encap(net, hdr, offset, uh,
+@@ -721,7 +721,8 @@ static int udpv6_queue_rcv_one_skb(struct sock *sk, struct sk_buff *skb)
+ 	}
+ 	nf_reset_ct(skb);
+ 
+-	if (static_branch_unlikely(&udpv6_encap_needed_key) && up->encap_type) {
++	if (static_branch_unlikely(&udpv6_encap_needed_key) &&
++	    READ_ONCE(up->encap_type)) {
+ 		int (*encap_rcv)(struct sock *sk, struct sk_buff *skb);
+ 
+ 		/*
+@@ -759,16 +760,17 @@ static int udpv6_queue_rcv_one_skb(struct sock *sk, struct sk_buff *skb)
+ 	/*
+ 	 * UDP-Lite specific tests, ignored on UDP sockets (see net/ipv4/udp.c).
+ 	 */
+-	if ((up->pcflag & UDPLITE_RECV_CC)  &&  UDP_SKB_CB(skb)->partial_cov) {
++	if (udp_test_bit(UDPLITE_RECV_CC, sk) && UDP_SKB_CB(skb)->partial_cov) {
++		u16 pcrlen = READ_ONCE(up->pcrlen);
+ 
+-		if (up->pcrlen == 0) {          /* full coverage was set  */
++		if (pcrlen == 0) {          /* full coverage was set  */
+ 			net_dbg_ratelimited("UDPLITE6: partial coverage %d while full coverage %d requested\n",
+ 					    UDP_SKB_CB(skb)->cscov, skb->len);
+ 			goto drop;
+ 		}
+-		if (UDP_SKB_CB(skb)->cscov  <  up->pcrlen) {
++		if (UDP_SKB_CB(skb)->cscov < pcrlen) {
+ 			net_dbg_ratelimited("UDPLITE6: coverage %d too small, need min %d\n",
+-					    UDP_SKB_CB(skb)->cscov, up->pcrlen);
++					    UDP_SKB_CB(skb)->cscov, pcrlen);
+ 			goto drop;
+ 		}
+ 	}
+@@ -891,7 +893,7 @@ start_lookup:
+ 		/* If zero checksum and no_check is not on for
+ 		 * the socket then skip it.
+ 		 */
+-		if (!uh->check && !udp_sk(sk)->no_check6_rx)
++		if (!uh->check && !udp_get_no_check6_rx(sk))
+ 			continue;
+ 		if (!first) {
+ 			first = sk;
+@@ -1009,7 +1011,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
+ 		if (unlikely(rcu_dereference(sk->sk_rx_dst) != dst))
+ 			udp6_sk_rx_dst_set(sk, dst);
+ 
+-		if (!uh->check && !udp_sk(sk)->no_check6_rx) {
++		if (!uh->check && !udp_get_no_check6_rx(sk)) {
+ 			if (refcounted)
+ 				sock_put(sk);
+ 			goto report_csum_error;
+@@ -1031,7 +1033,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
+ 	/* Unicast */
+ 	sk = __udp6_lib_lookup_skb(skb, uh->source, uh->dest, udptable);
+ 	if (sk) {
+-		if (!uh->check && !udp_sk(sk)->no_check6_rx)
++		if (!uh->check && !udp_get_no_check6_rx(sk))
+ 			goto report_csum_error;
+ 		return udp6_unicast_rcv_skb(sk, skb, uh);
+ 	}
+@@ -1270,7 +1272,7 @@ static int udp_v6_send_skb(struct sk_buff *skb, struct flowi6 *fl6,
+ 			kfree_skb(skb);
+ 			return -EINVAL;
+ 		}
+-		if (udp_sk(sk)->no_check6_tx) {
++		if (udp_get_no_check6_tx(sk)) {
+ 			kfree_skb(skb);
+ 			return -EINVAL;
+ 		}
+@@ -1291,7 +1293,7 @@ static int udp_v6_send_skb(struct sk_buff *skb, struct flowi6 *fl6,
+ 
+ 	if (is_udplite)
+ 		csum = udplite_csum(skb);
+-	else if (udp_sk(sk)->no_check6_tx) {   /* UDP csum disabled */
++	else if (udp_get_no_check6_tx(sk)) {   /* UDP csum disabled */
+ 		skb->ip_summed = CHECKSUM_NONE;
+ 		goto send;
+ 	} else if (skb->ip_summed == CHECKSUM_PARTIAL) { /* UDP hardware csum */
+@@ -1361,7 +1363,7 @@ int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
+ 	int addr_len = msg->msg_namelen;
+ 	bool connected = false;
+ 	int ulen = len;
+-	int corkreq = READ_ONCE(up->corkflag) || msg->msg_flags&MSG_MORE;
++	int corkreq = udp_test_bit(CORK, sk) || msg->msg_flags & MSG_MORE;
+ 	int err;
+ 	int is_udplite = IS_UDPLITE(sk);
+ 	int (*getfrag)(void *, char *, int, int, int, struct sk_buff *);
+@@ -1673,11 +1675,11 @@ static void udpv6_splice_eof(struct socket *sock)
+ 	struct sock *sk = sock->sk;
+ 	struct udp_sock *up = udp_sk(sk);
+ 
+-	if (!up->pending || READ_ONCE(up->corkflag))
++	if (!up->pending || udp_test_bit(CORK, sk))
+ 		return;
+ 
+ 	lock_sock(sk);
+-	if (up->pending && !READ_ONCE(up->corkflag))
++	if (up->pending && !udp_test_bit(CORK, sk))
+ 		udp_v6_push_pending_frames(sk);
+ 	release_sock(sk);
+ }
+@@ -1699,7 +1701,7 @@ void udpv6_destroy_sock(struct sock *sk)
+ 			if (encap_destroy)
+ 				encap_destroy(sk);
+ 		}
+-		if (up->encap_enabled) {
++		if (udp_test_bit(ENCAP_ENABLED, sk)) {
+ 			static_branch_dec(&udpv6_encap_needed_key);
+ 			udp_encap_disable();
+ 		}
+diff --git a/net/ipv6/udplite.c b/net/ipv6/udplite.c
+index 267d491e97075..a60bec9b14f14 100644
+--- a/net/ipv6/udplite.c
++++ b/net/ipv6/udplite.c
+@@ -17,7 +17,6 @@
+ static int udplitev6_sk_init(struct sock *sk)
+ {
+ 	udpv6_init_sock(sk);
+-	udp_sk(sk)->pcflag = UDPLITE_BIT;
+ 	pr_warn_once("UDP-Lite is deprecated and scheduled to be removed in 2025, "
+ 		     "please contact the netdev mailing list\n");
+ 	return 0;
+diff --git a/net/ipv6/xfrm6_input.c b/net/ipv6/xfrm6_input.c
+index 4907ab241d6be..4156387248e40 100644
+--- a/net/ipv6/xfrm6_input.c
++++ b/net/ipv6/xfrm6_input.c
+@@ -81,14 +81,14 @@ int xfrm6_udp_encap_rcv(struct sock *sk, struct sk_buff *skb)
+ 	struct ipv6hdr *ip6h;
+ 	int len;
+ 	int ip6hlen = sizeof(struct ipv6hdr);
+-
+ 	__u8 *udpdata;
+ 	__be32 *udpdata32;
+-	__u16 encap_type = up->encap_type;
++	u16 encap_type;
+ 
+ 	if (skb->protocol == htons(ETH_P_IP))
+ 		return xfrm4_udp_encap_rcv(sk, skb);
+ 
++	encap_type = READ_ONCE(up->encap_type);
+ 	/* if this is not encapsulated socket, then just return now */
+ 	if (!encap_type)
+ 		return 1;
+diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
+index 03608d3ded4b8..8d21ff25f1602 100644
+--- a/net/l2tp/l2tp_core.c
++++ b/net/l2tp/l2tp_core.c
+@@ -1139,9 +1139,9 @@ static void l2tp_tunnel_destruct(struct sock *sk)
+ 	switch (tunnel->encap) {
+ 	case L2TP_ENCAPTYPE_UDP:
+ 		/* No longer an encapsulation socket. See net/ipv4/udp.c */
+-		(udp_sk(sk))->encap_type = 0;
+-		(udp_sk(sk))->encap_rcv = NULL;
+-		(udp_sk(sk))->encap_destroy = NULL;
++		WRITE_ONCE(udp_sk(sk)->encap_type, 0);
++		udp_sk(sk)->encap_rcv = NULL;
++		udp_sk(sk)->encap_destroy = NULL;
+ 		break;
+ 	case L2TP_ENCAPTYPE_IP:
+ 		break;
+diff --git a/net/llc/llc_input.c b/net/llc/llc_input.c
+index 7cac441862e21..51bccfb00a9cd 100644
+--- a/net/llc/llc_input.c
++++ b/net/llc/llc_input.c
+@@ -127,8 +127,14 @@ static inline int llc_fixup_skb(struct sk_buff *skb)
+ 	skb->transport_header += llc_len;
+ 	skb_pull(skb, llc_len);
+ 	if (skb->protocol == htons(ETH_P_802_2)) {
+-		__be16 pdulen = eth_hdr(skb)->h_proto;
+-		s32 data_size = ntohs(pdulen) - llc_len;
++		__be16 pdulen;
++		s32 data_size;
++
++		if (skb->mac_len < ETH_HLEN)
++			return 0;
++
++		pdulen = eth_hdr(skb)->h_proto;
++		data_size = ntohs(pdulen) - llc_len;
+ 
+ 		if (data_size < 0 ||
+ 		    !pskb_may_pull(skb, data_size))
+diff --git a/net/llc/llc_s_ac.c b/net/llc/llc_s_ac.c
+index 79d1cef8f15a9..06fb8e6944b06 100644
+--- a/net/llc/llc_s_ac.c
++++ b/net/llc/llc_s_ac.c
+@@ -153,6 +153,9 @@ int llc_sap_action_send_test_r(struct llc_sap *sap, struct sk_buff *skb)
+ 	int rc = 1;
+ 	u32 data_size;
+ 
++	if (skb->mac_len < ETH_HLEN)
++		return 1;
++
+ 	llc_pdu_decode_sa(skb, mac_da);
+ 	llc_pdu_decode_da(skb, mac_sa);
+ 	llc_pdu_decode_ssap(skb, &dsap);
+diff --git a/net/llc/llc_station.c b/net/llc/llc_station.c
+index 05c6ae0920534..f506542925109 100644
+--- a/net/llc/llc_station.c
++++ b/net/llc/llc_station.c
+@@ -76,6 +76,9 @@ static int llc_station_ac_send_test_r(struct sk_buff *skb)
+ 	u32 data_size;
+ 	struct sk_buff *nskb;
+ 
++	if (skb->mac_len < ETH_HLEN)
++		goto out;
++
+ 	/* The test request command is type U (llc_len = 3) */
+ 	data_size = ntohs(eth_hdr(skb)->h_proto) - 3;
+ 	nskb = llc_alloc_frame(NULL, skb->dev, LLC_PDU_TYPE_U, data_size);
+diff --git a/net/mac80211/driver-ops.c b/net/mac80211/driver-ops.c
+index 30cd0c905a24f..aa37a1410f377 100644
+--- a/net/mac80211/driver-ops.c
++++ b/net/mac80211/driver-ops.c
+@@ -510,10 +510,13 @@ int drv_change_vif_links(struct ieee80211_local *local,
+ 	if (ret)
+ 		return ret;
+ 
+-	for_each_set_bit(link_id, &links_to_add, IEEE80211_MLD_MAX_NUM_LINKS) {
+-		link = rcu_access_pointer(sdata->link[link_id]);
++	if (!local->in_reconfig) {
++		for_each_set_bit(link_id, &links_to_add,
++				 IEEE80211_MLD_MAX_NUM_LINKS) {
++			link = rcu_access_pointer(sdata->link[link_id]);
+ 
+-		ieee80211_link_debugfs_drv_add(link);
++			ieee80211_link_debugfs_drv_add(link);
++		}
+ 	}
+ 
+ 	return 0;
+diff --git a/net/mac80211/drop.h b/net/mac80211/drop.h
+index 49dc809cab290..1570fac8411f4 100644
+--- a/net/mac80211/drop.h
++++ b/net/mac80211/drop.h
+@@ -53,4 +53,7 @@ enum mac80211_drop_reason {
+ #undef DEF
+ };
+ 
++#define RX_RES_IS_UNUSABLE(result)	\
++	(((__force u32)(result) & SKB_DROP_REASON_SUBSYS_MASK) == ___RX_DROP_UNUSABLE)
++
+ #endif /* MAC80211_DROP_H */
+diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
+index 2cce9eba6a120..8032167e15332 100644
+--- a/net/mac80211/ieee80211_i.h
++++ b/net/mac80211/ieee80211_i.h
+@@ -1406,7 +1406,7 @@ struct ieee80211_local {
+ 	/* wowlan is enabled -- don't reconfig on resume */
+ 	bool wowlan;
+ 
+-	struct work_struct radar_detected_work;
++	struct wiphy_work radar_detected_work;
+ 
+ 	/* number of RX chains the hardware has */
+ 	u8 rx_chains;
+@@ -1483,14 +1483,14 @@ struct ieee80211_local {
+ 	int hw_scan_ies_bufsize;
+ 	struct cfg80211_scan_info scan_info;
+ 
+-	struct work_struct sched_scan_stopped_work;
++	struct wiphy_work sched_scan_stopped_work;
+ 	struct ieee80211_sub_if_data __rcu *sched_scan_sdata;
+ 	struct cfg80211_sched_scan_request __rcu *sched_scan_req;
+ 	u8 scan_addr[ETH_ALEN];
+ 
+ 	unsigned long leave_oper_channel_time;
+ 	enum mac80211_scan_state next_scan_state;
+-	struct delayed_work scan_work;
++	struct wiphy_delayed_work scan_work;
+ 	struct ieee80211_sub_if_data __rcu *scan_sdata;
+ 	/* For backward compatibility only -- do not use */
+ 	struct cfg80211_chan_def _oper_chandef;
+@@ -1583,9 +1583,9 @@ struct ieee80211_local {
+ 	/*
+ 	 * Remain-on-channel support
+ 	 */
+-	struct delayed_work roc_work;
++	struct wiphy_delayed_work roc_work;
+ 	struct list_head roc_list;
+-	struct work_struct hw_roc_start, hw_roc_done;
++	struct wiphy_work hw_roc_start, hw_roc_done;
+ 	unsigned long hw_roc_start_time;
+ 	u64 roc_cookie_counter;
+ 
+@@ -1930,7 +1930,7 @@ int ieee80211_mesh_finish_csa(struct ieee80211_sub_if_data *sdata,
+ 			      u64 *changed);
+ 
+ /* scan/BSS handling */
+-void ieee80211_scan_work(struct work_struct *work);
++void ieee80211_scan_work(struct wiphy *wiphy, struct wiphy_work *work);
+ int ieee80211_request_ibss_scan(struct ieee80211_sub_if_data *sdata,
+ 				const u8 *ssid, u8 ssid_len,
+ 				struct ieee80211_channel **channels,
+@@ -1963,7 +1963,8 @@ int ieee80211_request_sched_scan_start(struct ieee80211_sub_if_data *sdata,
+ 				       struct cfg80211_sched_scan_request *req);
+ int ieee80211_request_sched_scan_stop(struct ieee80211_local *local);
+ void ieee80211_sched_scan_end(struct ieee80211_local *local);
+-void ieee80211_sched_scan_stopped_work(struct work_struct *work);
++void ieee80211_sched_scan_stopped_work(struct wiphy *wiphy,
++				       struct wiphy_work *work);
+ 
+ /* off-channel/mgmt-tx */
+ void ieee80211_offchannel_stop_vifs(struct ieee80211_local *local);
+@@ -2568,7 +2569,8 @@ bool ieee80211_is_radar_required(struct ieee80211_local *local);
+ void ieee80211_dfs_cac_timer(unsigned long data);
+ void ieee80211_dfs_cac_timer_work(struct work_struct *work);
+ void ieee80211_dfs_cac_cancel(struct ieee80211_local *local);
+-void ieee80211_dfs_radar_detected_work(struct work_struct *work);
++void ieee80211_dfs_radar_detected_work(struct wiphy *wiphy,
++				       struct wiphy_work *work);
+ int ieee80211_send_action_csa(struct ieee80211_sub_if_data *sdata,
+ 			      struct cfg80211_csa_settings *csa_settings);
+ 
+diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
+index be586bc0b5b7d..6e3bfb46af44d 100644
+--- a/net/mac80211/iface.c
++++ b/net/mac80211/iface.c
+@@ -691,7 +691,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, bool going_do
+ 	ieee80211_recalc_ps(local);
+ 
+ 	if (cancel_scan)
+-		flush_delayed_work(&local->scan_work);
++		wiphy_delayed_work_flush(local->hw.wiphy, &local->scan_work);
+ 
+ 	if (local->open_count == 0) {
+ 		ieee80211_stop_device(local);
+diff --git a/net/mac80211/link.c b/net/mac80211/link.c
+index 6148208b320e3..16cbaea93fc32 100644
+--- a/net/mac80211/link.c
++++ b/net/mac80211/link.c
+@@ -195,7 +195,7 @@ static int ieee80211_vif_update_links(struct ieee80211_sub_if_data *sdata,
+ 
+ 	memset(to_free, 0, sizeof(links));
+ 
+-	if (old_links == new_links)
++	if (old_links == new_links && dormant_links == sdata->vif.dormant_links)
+ 		return 0;
+ 
+ 	/* if there were no old links, need to clear the pointers to deflink */
+diff --git a/net/mac80211/main.c b/net/mac80211/main.c
+index 24315d7b31263..4548f84451095 100644
+--- a/net/mac80211/main.c
++++ b/net/mac80211/main.c
+@@ -335,10 +335,7 @@ static void ieee80211_restart_work(struct work_struct *work)
+ 	struct ieee80211_sub_if_data *sdata;
+ 	int ret;
+ 
+-	/* wait for scan work complete */
+ 	flush_workqueue(local->workqueue);
+-	flush_work(&local->sched_scan_stopped_work);
+-	flush_work(&local->radar_detected_work);
+ 
+ 	rtnl_lock();
+ 	/* we might do interface manipulations, so need both */
+@@ -379,8 +376,8 @@ static void ieee80211_restart_work(struct work_struct *work)
+ 	ieee80211_scan_cancel(local);
+ 
+ 	/* make sure any new ROC will consider local->in_reconfig */
+-	flush_delayed_work(&local->roc_work);
+-	flush_work(&local->hw_roc_done);
++	wiphy_delayed_work_flush(local->hw.wiphy, &local->roc_work);
++	wiphy_work_flush(local->hw.wiphy, &local->hw_roc_done);
+ 
+ 	/* wait for all packet processing to be done */
+ 	synchronize_net();
+@@ -809,12 +806,12 @@ struct ieee80211_hw *ieee80211_alloc_hw_nm(size_t priv_data_len,
+ 	INIT_LIST_HEAD(&local->chanctx_list);
+ 	mutex_init(&local->chanctx_mtx);
+ 
+-	INIT_DELAYED_WORK(&local->scan_work, ieee80211_scan_work);
++	wiphy_delayed_work_init(&local->scan_work, ieee80211_scan_work);
+ 
+ 	INIT_WORK(&local->restart_work, ieee80211_restart_work);
+ 
+-	INIT_WORK(&local->radar_detected_work,
+-		  ieee80211_dfs_radar_detected_work);
++	wiphy_work_init(&local->radar_detected_work,
++			ieee80211_dfs_radar_detected_work);
+ 
+ 	INIT_WORK(&local->reconfig_filter, ieee80211_reconfig_filter);
+ 	local->smps_mode = IEEE80211_SMPS_OFF;
+@@ -825,8 +822,8 @@ struct ieee80211_hw *ieee80211_alloc_hw_nm(size_t priv_data_len,
+ 		  ieee80211_dynamic_ps_disable_work);
+ 	timer_setup(&local->dynamic_ps_timer, ieee80211_dynamic_ps_timer, 0);
+ 
+-	INIT_WORK(&local->sched_scan_stopped_work,
+-		  ieee80211_sched_scan_stopped_work);
++	wiphy_work_init(&local->sched_scan_stopped_work,
++			ieee80211_sched_scan_stopped_work);
+ 
+ 	spin_lock_init(&local->ack_status_lock);
+ 	idr_init(&local->ack_status_frames);
+@@ -1482,13 +1479,15 @@ void ieee80211_unregister_hw(struct ieee80211_hw *hw)
+ 	 */
+ 	ieee80211_remove_interfaces(local);
+ 
++	wiphy_lock(local->hw.wiphy);
++	wiphy_delayed_work_cancel(local->hw.wiphy, &local->roc_work);
++	wiphy_work_cancel(local->hw.wiphy, &local->sched_scan_stopped_work);
++	wiphy_work_cancel(local->hw.wiphy, &local->radar_detected_work);
++	wiphy_unlock(local->hw.wiphy);
+ 	rtnl_unlock();
+ 
+-	cancel_delayed_work_sync(&local->roc_work);
+ 	cancel_work_sync(&local->restart_work);
+ 	cancel_work_sync(&local->reconfig_filter);
+-	flush_work(&local->sched_scan_stopped_work);
+-	flush_work(&local->radar_detected_work);
+ 
+ 	ieee80211_clear_tx_pending(local);
+ 	rate_control_deinitialize(local);
+diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
+index d32e304eeb4ba..3e52aaa57b1fc 100644
+--- a/net/mac80211/mesh_pathtbl.c
++++ b/net/mac80211/mesh_pathtbl.c
+@@ -648,7 +648,7 @@ void mesh_fast_tx_flush_addr(struct ieee80211_sub_if_data *sdata,
+ 
+ 	cache = &sdata->u.mesh.tx_cache;
+ 	spin_lock_bh(&cache->walk_lock);
+-	entry = rhashtable_lookup(&cache->rht, addr, fast_tx_rht_params);
++	entry = rhashtable_lookup_fast(&cache->rht, addr, fast_tx_rht_params);
+ 	if (entry)
+ 		mesh_fast_tx_entry_free(cache, entry);
+ 	spin_unlock_bh(&cache->walk_lock);
+diff --git a/net/mac80211/offchannel.c b/net/mac80211/offchannel.c
+index cdf991e74ab99..5bedd9cef414d 100644
+--- a/net/mac80211/offchannel.c
++++ b/net/mac80211/offchannel.c
+@@ -230,7 +230,7 @@ static bool ieee80211_recalc_sw_work(struct ieee80211_local *local,
+ 	if (dur == LONG_MAX)
+ 		return false;
+ 
+-	mod_delayed_work(local->workqueue, &local->roc_work, dur);
++	wiphy_delayed_work_queue(local->hw.wiphy, &local->roc_work, dur);
+ 	return true;
+ }
+ 
+@@ -258,7 +258,7 @@ static void ieee80211_handle_roc_started(struct ieee80211_roc_work *roc,
+ 	roc->notified = true;
+ }
+ 
+-static void ieee80211_hw_roc_start(struct work_struct *work)
++static void ieee80211_hw_roc_start(struct wiphy *wiphy, struct wiphy_work *work)
+ {
+ 	struct ieee80211_local *local =
+ 		container_of(work, struct ieee80211_local, hw_roc_start);
+@@ -285,7 +285,7 @@ void ieee80211_ready_on_channel(struct ieee80211_hw *hw)
+ 
+ 	trace_api_ready_on_channel(local);
+ 
+-	ieee80211_queue_work(hw, &local->hw_roc_start);
++	wiphy_work_queue(hw->wiphy, &local->hw_roc_start);
+ }
+ EXPORT_SYMBOL_GPL(ieee80211_ready_on_channel);
+ 
+@@ -338,7 +338,7 @@ static void _ieee80211_start_next_roc(struct ieee80211_local *local)
+ 				tmp->started = true;
+ 				tmp->abort = true;
+ 			}
+-			ieee80211_queue_work(&local->hw, &local->hw_roc_done);
++			wiphy_work_queue(local->hw.wiphy, &local->hw_roc_done);
+ 			return;
+ 		}
+ 
+@@ -368,8 +368,8 @@ static void _ieee80211_start_next_roc(struct ieee80211_local *local)
+ 			ieee80211_hw_config(local, 0);
+ 		}
+ 
+-		ieee80211_queue_delayed_work(&local->hw, &local->roc_work,
+-					     msecs_to_jiffies(min_dur));
++		wiphy_delayed_work_queue(local->hw.wiphy, &local->roc_work,
++					 msecs_to_jiffies(min_dur));
+ 
+ 		/* tell userspace or send frame(s) */
+ 		list_for_each_entry(tmp, &local->roc_list, list) {
+@@ -407,8 +407,8 @@ void ieee80211_start_next_roc(struct ieee80211_local *local)
+ 		_ieee80211_start_next_roc(local);
+ 	} else {
+ 		/* delay it a bit */
+-		ieee80211_queue_delayed_work(&local->hw, &local->roc_work,
+-					     round_jiffies_relative(HZ/2));
++		wiphy_delayed_work_queue(local->hw.wiphy, &local->roc_work,
++					 round_jiffies_relative(HZ / 2));
+ 	}
+ }
+ 
+@@ -451,7 +451,7 @@ static void __ieee80211_roc_work(struct ieee80211_local *local)
+ 	}
+ }
+ 
+-static void ieee80211_roc_work(struct work_struct *work)
++static void ieee80211_roc_work(struct wiphy *wiphy, struct wiphy_work *work)
+ {
+ 	struct ieee80211_local *local =
+ 		container_of(work, struct ieee80211_local, roc_work.work);
+@@ -461,7 +461,7 @@ static void ieee80211_roc_work(struct work_struct *work)
+ 	mutex_unlock(&local->mtx);
+ }
+ 
+-static void ieee80211_hw_roc_done(struct work_struct *work)
++static void ieee80211_hw_roc_done(struct wiphy *wiphy, struct wiphy_work *work)
+ {
+ 	struct ieee80211_local *local =
+ 		container_of(work, struct ieee80211_local, hw_roc_done);
+@@ -482,7 +482,7 @@ void ieee80211_remain_on_channel_expired(struct ieee80211_hw *hw)
+ 
+ 	trace_api_remain_on_channel_expired(local);
+ 
+-	ieee80211_queue_work(hw, &local->hw_roc_done);
++	wiphy_work_queue(hw->wiphy, &local->hw_roc_done);
+ }
+ EXPORT_SYMBOL_GPL(ieee80211_remain_on_channel_expired);
+ 
+@@ -586,8 +586,8 @@ static int ieee80211_start_roc_work(struct ieee80211_local *local,
+ 		/* if not HW assist, just queue & schedule work */
+ 		if (!local->ops->remain_on_channel) {
+ 			list_add_tail(&roc->list, &local->roc_list);
+-			ieee80211_queue_delayed_work(&local->hw,
+-						     &local->roc_work, 0);
++			wiphy_delayed_work_queue(local->hw.wiphy,
++						 &local->roc_work, 0);
+ 		} else {
+ 			/* otherwise actually kick it off here
+ 			 * (for error handling)
+@@ -695,7 +695,7 @@ static int ieee80211_cancel_roc(struct ieee80211_local *local,
+ 	if (!cookie)
+ 		return -ENOENT;
+ 
+-	flush_work(&local->hw_roc_start);
++	wiphy_work_flush(local->hw.wiphy, &local->hw_roc_start);
+ 
+ 	mutex_lock(&local->mtx);
+ 	list_for_each_entry_safe(roc, tmp, &local->roc_list, list) {
+@@ -745,7 +745,7 @@ static int ieee80211_cancel_roc(struct ieee80211_local *local,
+ 	} else {
+ 		/* go through work struct to return to the operating channel */
+ 		found->abort = true;
+-		mod_delayed_work(local->workqueue, &local->roc_work, 0);
++		wiphy_delayed_work_queue(local->hw.wiphy, &local->roc_work, 0);
+ 	}
+ 
+  out_unlock:
+@@ -994,9 +994,9 @@ int ieee80211_mgmt_tx_cancel_wait(struct wiphy *wiphy,
+ 
+ void ieee80211_roc_setup(struct ieee80211_local *local)
+ {
+-	INIT_WORK(&local->hw_roc_start, ieee80211_hw_roc_start);
+-	INIT_WORK(&local->hw_roc_done, ieee80211_hw_roc_done);
+-	INIT_DELAYED_WORK(&local->roc_work, ieee80211_roc_work);
++	wiphy_work_init(&local->hw_roc_start, ieee80211_hw_roc_start);
++	wiphy_work_init(&local->hw_roc_done, ieee80211_hw_roc_done);
++	wiphy_delayed_work_init(&local->roc_work, ieee80211_roc_work);
+ 	INIT_LIST_HEAD(&local->roc_list);
+ }
+ 
+diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
+index 8f6b6f56b65b4..26ca2f5dc52b2 100644
+--- a/net/mac80211/rx.c
++++ b/net/mac80211/rx.c
+@@ -2112,7 +2112,7 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
+ 	/* either the frame has been decrypted or will be dropped */
+ 	status->flag |= RX_FLAG_DECRYPTED;
+ 
+-	if (unlikely(ieee80211_is_beacon(fc) && (result & RX_DROP_UNUSABLE) &&
++	if (unlikely(ieee80211_is_beacon(fc) && RX_RES_IS_UNUSABLE(result) &&
+ 		     rx->sdata->dev))
+ 		cfg80211_rx_unprot_mlme_mgmt(rx->sdata->dev,
+ 					     skb->data, skb->len);
+diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
+index 0805aa8603c61..68ec2124c3db5 100644
+--- a/net/mac80211/scan.c
++++ b/net/mac80211/scan.c
+@@ -274,8 +274,8 @@ void ieee80211_scan_rx(struct ieee80211_local *local, struct sk_buff *skb)
+ 		 * the beacon/proberesp rx gives us an opportunity to upgrade
+ 		 * to active scan
+ 		 */
+-		 set_bit(SCAN_BEACON_DONE, &local->scanning);
+-		 ieee80211_queue_delayed_work(&local->hw, &local->scan_work, 0);
++		set_bit(SCAN_BEACON_DONE, &local->scanning);
++		wiphy_delayed_work_queue(local->hw.wiphy, &local->scan_work, 0);
+ 	}
+ 
+ 	if (ieee80211_is_probe_resp(mgmt->frame_control)) {
+@@ -505,7 +505,7 @@ void ieee80211_scan_completed(struct ieee80211_hw *hw,
+ 
+ 	memcpy(&local->scan_info, info, sizeof(*info));
+ 
+-	ieee80211_queue_delayed_work(&local->hw, &local->scan_work, 0);
++	wiphy_delayed_work_queue(local->hw.wiphy, &local->scan_work, 0);
+ }
+ EXPORT_SYMBOL(ieee80211_scan_completed);
+ 
+@@ -545,8 +545,7 @@ static int ieee80211_start_sw_scan(struct ieee80211_local *local,
+ 	/* We need to set power level at maximum rate for scanning. */
+ 	ieee80211_hw_config(local, 0);
+ 
+-	ieee80211_queue_delayed_work(&local->hw,
+-				     &local->scan_work, 0);
++	wiphy_delayed_work_queue(local->hw.wiphy, &local->scan_work, 0);
+ 
+ 	return 0;
+ }
+@@ -603,8 +602,8 @@ void ieee80211_run_deferred_scan(struct ieee80211_local *local)
+ 					lockdep_is_held(&local->mtx))))
+ 		return;
+ 
+-	ieee80211_queue_delayed_work(&local->hw, &local->scan_work,
+-				     round_jiffies_relative(0));
++	wiphy_delayed_work_queue(local->hw.wiphy, &local->scan_work,
++				 round_jiffies_relative(0));
+ }
+ 
+ static void ieee80211_send_scan_probe_req(struct ieee80211_sub_if_data *sdata,
+@@ -795,8 +794,8 @@ static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata,
+ 		}
+ 
+ 		/* Now, just wait a bit and we are all done! */
+-		ieee80211_queue_delayed_work(&local->hw, &local->scan_work,
+-					     next_delay);
++		wiphy_delayed_work_queue(local->hw.wiphy, &local->scan_work,
++					 next_delay);
+ 		return 0;
+ 	} else {
+ 		/* Do normal software scan */
+@@ -1043,7 +1042,7 @@ static void ieee80211_scan_state_resume(struct ieee80211_local *local,
+ 	local->next_scan_state = SCAN_SET_CHANNEL;
+ }
+ 
+-void ieee80211_scan_work(struct work_struct *work)
++void ieee80211_scan_work(struct wiphy *wiphy, struct wiphy_work *work)
+ {
+ 	struct ieee80211_local *local =
+ 		container_of(work, struct ieee80211_local, scan_work.work);
+@@ -1137,7 +1136,8 @@ void ieee80211_scan_work(struct work_struct *work)
+ 		}
+ 	} while (next_delay == 0);
+ 
+-	ieee80211_queue_delayed_work(&local->hw, &local->scan_work, next_delay);
++	wiphy_delayed_work_queue(local->hw.wiphy, &local->scan_work,
++				 next_delay);
+ 	goto out;
+ 
+ out_complete:
+@@ -1280,12 +1280,7 @@ void ieee80211_scan_cancel(struct ieee80211_local *local)
+ 		goto out;
+ 	}
+ 
+-	/*
+-	 * If the work is currently running, it must be blocked on
+-	 * the mutex, but we'll set scan_sdata = NULL and it'll
+-	 * simply exit once it acquires the mutex.
+-	 */
+-	cancel_delayed_work(&local->scan_work);
++	wiphy_delayed_work_cancel(local->hw.wiphy, &local->scan_work);
+ 	/* and clean up */
+ 	memset(&local->scan_info, 0, sizeof(local->scan_info));
+ 	__ieee80211_scan_completed(&local->hw, true);
+@@ -1427,10 +1422,11 @@ void ieee80211_sched_scan_end(struct ieee80211_local *local)
+ 
+ 	mutex_unlock(&local->mtx);
+ 
+-	cfg80211_sched_scan_stopped(local->hw.wiphy, 0);
++	cfg80211_sched_scan_stopped_locked(local->hw.wiphy, 0);
+ }
+ 
+-void ieee80211_sched_scan_stopped_work(struct work_struct *work)
++void ieee80211_sched_scan_stopped_work(struct wiphy *wiphy,
++				       struct wiphy_work *work)
+ {
+ 	struct ieee80211_local *local =
+ 		container_of(work, struct ieee80211_local,
+@@ -1453,6 +1449,6 @@ void ieee80211_sched_scan_stopped(struct ieee80211_hw *hw)
+ 	if (local->in_reconfig)
+ 		return;
+ 
+-	schedule_work(&local->sched_scan_stopped_work);
++	wiphy_work_queue(hw->wiphy, &local->sched_scan_stopped_work);
+ }
+ EXPORT_SYMBOL(ieee80211_sched_scan_stopped);
+diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
+index 7751f8ba960ee..0c5cc75857e4f 100644
+--- a/net/mac80211/sta_info.c
++++ b/net/mac80211/sta_info.c
+@@ -2990,7 +2990,7 @@ void ieee80211_sta_set_max_amsdu_subframes(struct sta_info *sta,
+ 				   WLAN_EXT_CAPA9_MAX_MSDU_IN_AMSDU_MSB) << 1;
+ 
+ 	if (val)
+-		sta->sta.max_amsdu_subframes = 4 << val;
++		sta->sta.max_amsdu_subframes = 4 << (4 - val);
+ }
+ 
+ #ifdef CONFIG_LOCKDEP
+diff --git a/net/mac80211/util.c b/net/mac80211/util.c
+index 8a6917cf63cf9..172173b2a9eb8 100644
+--- a/net/mac80211/util.c
++++ b/net/mac80211/util.c
+@@ -2340,8 +2340,8 @@ static void ieee80211_flush_completed_scan(struct ieee80211_local *local,
+ 		 */
+ 		if (aborted)
+ 			set_bit(SCAN_ABORTED, &local->scanning);
+-		ieee80211_queue_delayed_work(&local->hw, &local->scan_work, 0);
+-		flush_delayed_work(&local->scan_work);
++		wiphy_delayed_work_queue(local->hw.wiphy, &local->scan_work, 0);
++		wiphy_delayed_work_flush(local->hw.wiphy, &local->scan_work);
+ 	}
+ }
+ 
+@@ -4356,7 +4356,8 @@ void ieee80211_dfs_cac_cancel(struct ieee80211_local *local)
+ 	mutex_unlock(&local->mtx);
+ }
+ 
+-void ieee80211_dfs_radar_detected_work(struct work_struct *work)
++void ieee80211_dfs_radar_detected_work(struct wiphy *wiphy,
++				       struct wiphy_work *work)
+ {
+ 	struct ieee80211_local *local =
+ 		container_of(work, struct ieee80211_local, radar_detected_work);
+@@ -4374,9 +4375,7 @@ void ieee80211_dfs_radar_detected_work(struct work_struct *work)
+ 	}
+ 	mutex_unlock(&local->chanctx_mtx);
+ 
+-	wiphy_lock(local->hw.wiphy);
+ 	ieee80211_dfs_cac_cancel(local);
+-	wiphy_unlock(local->hw.wiphy);
+ 
+ 	if (num_chanctx > 1)
+ 		/* XXX: multi-channel is not supported yet */
+@@ -4391,7 +4390,7 @@ void ieee80211_radar_detected(struct ieee80211_hw *hw)
+ 
+ 	trace_api_radar_detected(local);
+ 
+-	schedule_work(&local->radar_detected_work);
++	wiphy_work_queue(hw->wiphy, &local->radar_detected_work);
+ }
+ EXPORT_SYMBOL(ieee80211_radar_detected);
+ 
+diff --git a/net/mptcp/fastopen.c b/net/mptcp/fastopen.c
+index bceaab8dd8e46..74698582a2859 100644
+--- a/net/mptcp/fastopen.c
++++ b/net/mptcp/fastopen.c
+@@ -52,6 +52,7 @@ void mptcp_fastopen_subflow_synack_set_params(struct mptcp_subflow_context *subf
+ 
+ 	mptcp_set_owner_r(skb, sk);
+ 	__skb_queue_tail(&sk->sk_receive_queue, skb);
++	mptcp_sk(sk)->bytes_received += skb->len;
+ 
+ 	sk->sk_data_ready(sk);
+ 
+diff --git a/net/netfilter/nf_nat_redirect.c b/net/netfilter/nf_nat_redirect.c
+index 6616ba5d0b049..5b37487d9d11f 100644
+--- a/net/netfilter/nf_nat_redirect.c
++++ b/net/netfilter/nf_nat_redirect.c
+@@ -80,6 +80,26 @@ EXPORT_SYMBOL_GPL(nf_nat_redirect_ipv4);
+ 
+ static const struct in6_addr loopback_addr = IN6ADDR_LOOPBACK_INIT;
+ 
++static bool nf_nat_redirect_ipv6_usable(const struct inet6_ifaddr *ifa, unsigned int scope)
++{
++	unsigned int ifa_addr_type = ipv6_addr_type(&ifa->addr);
++
++	if (ifa_addr_type & IPV6_ADDR_MAPPED)
++		return false;
++
++	if ((ifa->flags & IFA_F_TENTATIVE) && (!(ifa->flags & IFA_F_OPTIMISTIC)))
++		return false;
++
++	if (scope) {
++		unsigned int ifa_scope = ifa_addr_type & IPV6_ADDR_SCOPE_MASK;
++
++		if (!(scope & ifa_scope))
++			return false;
++	}
++
++	return true;
++}
++
+ unsigned int
+ nf_nat_redirect_ipv6(struct sk_buff *skb, const struct nf_nat_range2 *range,
+ 		     unsigned int hooknum)
+@@ -89,14 +109,19 @@ nf_nat_redirect_ipv6(struct sk_buff *skb, const struct nf_nat_range2 *range,
+ 	if (hooknum == NF_INET_LOCAL_OUT) {
+ 		newdst.in6 = loopback_addr;
+ 	} else {
++		unsigned int scope = ipv6_addr_scope(&ipv6_hdr(skb)->daddr);
+ 		struct inet6_dev *idev;
+-		struct inet6_ifaddr *ifa;
+ 		bool addr = false;
+ 
+ 		idev = __in6_dev_get(skb->dev);
+ 		if (idev != NULL) {
++			const struct inet6_ifaddr *ifa;
++
+ 			read_lock_bh(&idev->lock);
+ 			list_for_each_entry(ifa, &idev->addr_list, if_list) {
++				if (!nf_nat_redirect_ipv6_usable(ifa, scope))
++					continue;
++
+ 				newdst.in6 = ifa->addr;
+ 				addr = true;
+ 				break;
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
+index 6a05bed3cb46d..8776266ba1532 100644
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -3465,10 +3465,6 @@ static int __nf_tables_dump_rules(struct sk_buff *skb,
+ 			goto cont_skip;
+ 		if (*idx < s_idx)
+ 			goto cont;
+-		if (*idx > s_idx) {
+-			memset(&cb->args[1], 0,
+-					sizeof(cb->args) - sizeof(cb->args[0]));
+-		}
+ 		if (prule)
+ 			handle = prule->handle;
+ 		else
+diff --git a/net/netfilter/xt_recent.c b/net/netfilter/xt_recent.c
+index 7ddb9a78e3fc8..ef93e0d3bee04 100644
+--- a/net/netfilter/xt_recent.c
++++ b/net/netfilter/xt_recent.c
+@@ -561,7 +561,7 @@ recent_mt_proc_write(struct file *file, const char __user *input,
+ {
+ 	struct recent_table *t = pde_data(file_inode(file));
+ 	struct recent_entry *e;
+-	char buf[sizeof("+b335:1d35:1e55:dead:c0de:1715:5afe:c0de")];
++	char buf[sizeof("+b335:1d35:1e55:dead:c0de:1715:255.255.255.255")];
+ 	const char *c = buf;
+ 	union nf_inet_addr addr = {};
+ 	u_int16_t family;
+diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c
+index 331730fd35803..2669a1d1ad9f4 100644
+--- a/net/openvswitch/conntrack.c
++++ b/net/openvswitch/conntrack.c
+@@ -1043,7 +1043,7 @@ static int ovs_ct_commit(struct net *net, struct sw_flow_key *key,
+ 		if (err)
+ 			return err;
+ 
+-		nf_conn_act_ct_ext_add(ct);
++		nf_conn_act_ct_ext_add(skb, ct, ctinfo);
+ 	} else if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) &&
+ 		   labels_nonzero(&info->labels.mask)) {
+ 		err = ovs_ct_set_labels(ct, key, &info->labels.value,
+diff --git a/net/rxrpc/conn_object.c b/net/rxrpc/conn_object.c
+index ac85d4644a3c3..df8a271948a1c 100644
+--- a/net/rxrpc/conn_object.c
++++ b/net/rxrpc/conn_object.c
+@@ -212,7 +212,7 @@ void rxrpc_disconnect_call(struct rxrpc_call *call)
+ 		conn->idle_timestamp = jiffies;
+ 		if (atomic_dec_and_test(&conn->active))
+ 			rxrpc_set_service_reap_timer(conn->rxnet,
+-						     jiffies + rxrpc_connection_expiry);
++						     jiffies + rxrpc_connection_expiry * HZ);
+ 	}
+ 
+ 	rxrpc_put_call(call, rxrpc_call_put_io_thread);
+diff --git a/net/rxrpc/local_object.c b/net/rxrpc/local_object.c
+index 7d910aee4f8cb..c553a30e9c838 100644
+--- a/net/rxrpc/local_object.c
++++ b/net/rxrpc/local_object.c
+@@ -87,7 +87,7 @@ static void rxrpc_client_conn_reap_timeout(struct timer_list *timer)
+ 	struct rxrpc_local *local =
+ 		container_of(timer, struct rxrpc_local, client_conn_reap_timer);
+ 
+-	if (local->kill_all_client_conns &&
++	if (!local->kill_all_client_conns &&
+ 	    test_and_set_bit(RXRPC_CLIENT_CONN_REAP_TIMER, &local->client_conn_flags))
+ 		rxrpc_wake_up_io_thread(local);
+ }
+diff --git a/net/sched/act_ct.c b/net/sched/act_ct.c
+index ad7c955453782..d131750663c3c 100644
+--- a/net/sched/act_ct.c
++++ b/net/sched/act_ct.c
+@@ -376,6 +376,17 @@ static void tcf_ct_flow_tc_ifidx(struct flow_offload *entry,
+ 	entry->tuplehash[dir].tuple.tc.iifidx = act_ct_ext->ifindex[dir];
+ }
+ 
++static void tcf_ct_flow_ct_ext_ifidx_update(struct flow_offload *entry)
++{
++	struct nf_conn_act_ct_ext *act_ct_ext;
++
++	act_ct_ext = nf_conn_act_ct_ext_find(entry->ct);
++	if (act_ct_ext) {
++		tcf_ct_flow_tc_ifidx(entry, act_ct_ext, FLOW_OFFLOAD_DIR_ORIGINAL);
++		tcf_ct_flow_tc_ifidx(entry, act_ct_ext, FLOW_OFFLOAD_DIR_REPLY);
++	}
++}
++
+ static void tcf_ct_flow_table_add(struct tcf_ct_flow_table *ct_ft,
+ 				  struct nf_conn *ct,
+ 				  bool tcp, bool bidirectional)
+@@ -671,6 +682,8 @@ static bool tcf_ct_flow_table_lookup(struct tcf_ct_params *p,
+ 	else
+ 		ctinfo = IP_CT_ESTABLISHED_REPLY;
+ 
++	nf_conn_act_ct_ext_fill(skb, ct, ctinfo);
++	tcf_ct_flow_ct_ext_ifidx_update(flow);
+ 	flow_offload_refresh(nf_ft, flow, force_refresh);
+ 	if (!test_bit(IPS_ASSURED_BIT, &ct->status)) {
+ 		/* Process this flow in SW to allow promoting to ASSURED */
+@@ -1030,7 +1043,7 @@ do_nat:
+ 		tcf_ct_act_set_labels(ct, p->labels, p->labels_mask);
+ 
+ 		if (!nf_ct_is_confirmed(ct))
+-			nf_conn_act_ct_ext_add(ct);
++			nf_conn_act_ct_ext_add(skb, ct, ctinfo);
+ 
+ 		/* This will take care of sending queued events
+ 		 * even if the connection is already confirmed.
+diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
+index c0e4e587b4994..42d4211b6277e 100644
+--- a/net/smc/af_smc.c
++++ b/net/smc/af_smc.c
+@@ -275,7 +275,7 @@ static int __smc_release(struct smc_sock *smc)
+ 
+ 	if (!smc->use_fallback) {
+ 		rc = smc_close_active(smc);
+-		sock_set_flag(sk, SOCK_DEAD);
++		smc_sock_set_flag(sk, SOCK_DEAD);
+ 		sk->sk_shutdown |= SHUTDOWN_MASK;
+ 	} else {
+ 		if (sk->sk_state != SMC_CLOSED) {
+@@ -1722,7 +1722,7 @@ static int smc_clcsock_accept(struct smc_sock *lsmc, struct smc_sock **new_smc)
+ 		if (new_clcsock)
+ 			sock_release(new_clcsock);
+ 		new_sk->sk_state = SMC_CLOSED;
+-		sock_set_flag(new_sk, SOCK_DEAD);
++		smc_sock_set_flag(new_sk, SOCK_DEAD);
+ 		sock_put(new_sk); /* final */
+ 		*new_smc = NULL;
+ 		goto out;
+diff --git a/net/smc/smc.h b/net/smc/smc.h
+index 1f2b912c43d10..8358e96342e7b 100644
+--- a/net/smc/smc.h
++++ b/net/smc/smc.h
+@@ -374,4 +374,9 @@ int smc_nl_dump_hs_limitation(struct sk_buff *skb, struct netlink_callback *cb);
+ int smc_nl_enable_hs_limitation(struct sk_buff *skb, struct genl_info *info);
+ int smc_nl_disable_hs_limitation(struct sk_buff *skb, struct genl_info *info);
+ 
++static inline void smc_sock_set_flag(struct sock *sk, enum sock_flags flag)
++{
++	set_bit(flag, &sk->sk_flags);
++}
++
+ #endif	/* __SMC_H */
+diff --git a/net/smc/smc_cdc.c b/net/smc/smc_cdc.c
+index 89105e95b4523..3c06625ceb200 100644
+--- a/net/smc/smc_cdc.c
++++ b/net/smc/smc_cdc.c
+@@ -28,13 +28,15 @@ static void smc_cdc_tx_handler(struct smc_wr_tx_pend_priv *pnd_snd,
+ {
+ 	struct smc_cdc_tx_pend *cdcpend = (struct smc_cdc_tx_pend *)pnd_snd;
+ 	struct smc_connection *conn = cdcpend->conn;
++	struct smc_buf_desc *sndbuf_desc;
+ 	struct smc_sock *smc;
+ 	int diff;
+ 
++	sndbuf_desc = conn->sndbuf_desc;
+ 	smc = container_of(conn, struct smc_sock, conn);
+ 	bh_lock_sock(&smc->sk);
+-	if (!wc_status) {
+-		diff = smc_curs_diff(cdcpend->conn->sndbuf_desc->len,
++	if (!wc_status && sndbuf_desc) {
++		diff = smc_curs_diff(sndbuf_desc->len,
+ 				     &cdcpend->conn->tx_curs_fin,
+ 				     &cdcpend->cursor);
+ 		/* sndbuf_space is decreased in smc_sendmsg */
+@@ -114,9 +116,6 @@ int smc_cdc_msg_send(struct smc_connection *conn,
+ 	union smc_host_cursor cfed;
+ 	int rc;
+ 
+-	if (unlikely(!READ_ONCE(conn->sndbuf_desc)))
+-		return -ENOBUFS;
+-
+ 	smc_cdc_add_pending_send(conn, pend);
+ 
+ 	conn->tx_cdc_seq++;
+@@ -385,7 +384,7 @@ static void smc_cdc_msg_recv_action(struct smc_sock *smc,
+ 		smc->sk.sk_shutdown |= RCV_SHUTDOWN;
+ 		if (smc->clcsock && smc->clcsock->sk)
+ 			smc->clcsock->sk->sk_shutdown |= RCV_SHUTDOWN;
+-		sock_set_flag(&smc->sk, SOCK_DONE);
++		smc_sock_set_flag(&smc->sk, SOCK_DONE);
+ 		sock_hold(&smc->sk); /* sock_put in close_work */
+ 		if (!queue_work(smc_close_wq, &conn->close_work))
+ 			sock_put(&smc->sk);
+diff --git a/net/smc/smc_close.c b/net/smc/smc_close.c
+index dbdf03e8aa5b5..10219f55aad14 100644
+--- a/net/smc/smc_close.c
++++ b/net/smc/smc_close.c
+@@ -116,7 +116,8 @@ static void smc_close_cancel_work(struct smc_sock *smc)
+ 	struct sock *sk = &smc->sk;
+ 
+ 	release_sock(sk);
+-	cancel_work_sync(&smc->conn.close_work);
++	if (cancel_work_sync(&smc->conn.close_work))
++		sock_put(sk);
+ 	cancel_delayed_work_sync(&smc->conn.tx_work);
+ 	lock_sock(sk);
+ }
+@@ -173,7 +174,7 @@ void smc_close_active_abort(struct smc_sock *smc)
+ 		break;
+ 	}
+ 
+-	sock_set_flag(sk, SOCK_DEAD);
++	smc_sock_set_flag(sk, SOCK_DEAD);
+ 	sk->sk_state_change(sk);
+ 
+ 	if (release_clcsock) {
+diff --git a/net/tipc/link.c b/net/tipc/link.c
+index 2eff1c7949cbc..8715c9b05f90d 100644
+--- a/net/tipc/link.c
++++ b/net/tipc/link.c
+@@ -1446,7 +1446,7 @@ u16 tipc_get_gap_ack_blks(struct tipc_gap_ack_blks **ga, struct tipc_link *l,
+ 		p = (struct tipc_gap_ack_blks *)msg_data(hdr);
+ 		sz = ntohs(p->len);
+ 		/* Sanity check */
+-		if (sz == struct_size(p, gacks, p->ugack_cnt + p->bgack_cnt)) {
++		if (sz == struct_size(p, gacks, size_add(p->ugack_cnt, p->bgack_cnt))) {
+ 			/* Good, check if the desired type exists */
+ 			if ((uc && p->ugack_cnt) || (!uc && p->bgack_cnt))
+ 				goto ok;
+@@ -1533,7 +1533,7 @@ static u16 tipc_build_gap_ack_blks(struct tipc_link *l, struct tipc_msg *hdr)
+ 			__tipc_build_gap_ack_blks(ga, l, ga->bgack_cnt) : 0;
+ 
+ 	/* Total len */
+-	len = struct_size(ga, gacks, ga->bgack_cnt + ga->ugack_cnt);
++	len = struct_size(ga, gacks, size_add(ga->bgack_cnt, ga->ugack_cnt));
+ 	ga->len = htons(len);
+ 	return len;
+ }
+diff --git a/net/tipc/netlink.c b/net/tipc/netlink.c
+index e8fd257c0e688..1a9a5bdaccf4f 100644
+--- a/net/tipc/netlink.c
++++ b/net/tipc/netlink.c
+@@ -88,7 +88,7 @@ const struct nla_policy tipc_nl_net_policy[TIPC_NLA_NET_MAX + 1] = {
+ 
+ const struct nla_policy tipc_nl_link_policy[TIPC_NLA_LINK_MAX + 1] = {
+ 	[TIPC_NLA_LINK_UNSPEC]		= { .type = NLA_UNSPEC },
+-	[TIPC_NLA_LINK_NAME]		= { .type = NLA_STRING,
++	[TIPC_NLA_LINK_NAME]		= { .type = NLA_NUL_STRING,
+ 					    .len = TIPC_MAX_LINK_NAME },
+ 	[TIPC_NLA_LINK_MTU]		= { .type = NLA_U32 },
+ 	[TIPC_NLA_LINK_BROADCAST]	= { .type = NLA_FLAG },
+@@ -125,7 +125,7 @@ const struct nla_policy tipc_nl_prop_policy[TIPC_NLA_PROP_MAX + 1] = {
+ 
+ const struct nla_policy tipc_nl_bearer_policy[TIPC_NLA_BEARER_MAX + 1]	= {
+ 	[TIPC_NLA_BEARER_UNSPEC]	= { .type = NLA_UNSPEC },
+-	[TIPC_NLA_BEARER_NAME]		= { .type = NLA_STRING,
++	[TIPC_NLA_BEARER_NAME]		= { .type = NLA_NUL_STRING,
+ 					    .len = TIPC_MAX_BEARER_NAME },
+ 	[TIPC_NLA_BEARER_PROP]		= { .type = NLA_NESTED },
+ 	[TIPC_NLA_BEARER_DOMAIN]	= { .type = NLA_U32 }
+diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
+index ce925f3a52492..57e4601eaaf50 100644
+--- a/net/tls/tls_sw.c
++++ b/net/tls/tls_sw.c
+@@ -1488,7 +1488,7 @@ static int tls_decrypt_sg(struct sock *sk, struct iov_iter *out_iov,
+ 	 */
+ 	aead_size = sizeof(*aead_req) + crypto_aead_reqsize(ctx->aead_recv);
+ 	aead_size = ALIGN(aead_size, __alignof__(*dctx));
+-	mem = kmalloc(aead_size + struct_size(dctx, sg, n_sgin + n_sgout),
++	mem = kmalloc(aead_size + struct_size(dctx, sg, size_add(n_sgin, n_sgout)),
+ 		      sk->sk_allocation);
+ 	if (!mem) {
+ 		err = -ENOMEM;
+diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c
+index b769fc2589315..f5ea654ca659b 100644
+--- a/net/vmw_vsock/virtio_transport_common.c
++++ b/net/vmw_vsock/virtio_transport_common.c
+@@ -1150,11 +1150,17 @@ virtio_transport_recv_connected(struct sock *sk,
+ 			vsk->peer_shutdown |= RCV_SHUTDOWN;
+ 		if (le32_to_cpu(hdr->flags) & VIRTIO_VSOCK_SHUTDOWN_SEND)
+ 			vsk->peer_shutdown |= SEND_SHUTDOWN;
+-		if (vsk->peer_shutdown == SHUTDOWN_MASK &&
+-		    vsock_stream_has_data(vsk) <= 0 &&
+-		    !sock_flag(sk, SOCK_DONE)) {
+-			(void)virtio_transport_reset(vsk, NULL);
+-			virtio_transport_do_close(vsk, true);
++		if (vsk->peer_shutdown == SHUTDOWN_MASK) {
++			if (vsock_stream_has_data(vsk) <= 0 && !sock_flag(sk, SOCK_DONE)) {
++				(void)virtio_transport_reset(vsk, NULL);
++				virtio_transport_do_close(vsk, true);
++			}
++			/* Remove this socket anyway because the remote peer sent
++			 * the shutdown. This way a new connection will succeed
++			 * if the remote peer uses the same source port,
++			 * even if the old socket is still unreleased, but now disconnected.
++			 */
++			vsock_remove_sock(vsk);
+ 		}
+ 		if (le32_to_cpu(virtio_vsock_hdr(skb)->flags))
+ 			sk->sk_state_change(sk);
+diff --git a/net/wireless/core.c b/net/wireless/core.c
+index acec41c1809a8..563cfbe3237c9 100644
+--- a/net/wireless/core.c
++++ b/net/wireless/core.c
+@@ -1049,7 +1049,8 @@ void wiphy_rfkill_start_polling(struct wiphy *wiphy)
+ }
+ EXPORT_SYMBOL(wiphy_rfkill_start_polling);
+ 
+-void cfg80211_process_wiphy_works(struct cfg80211_registered_device *rdev)
++void cfg80211_process_wiphy_works(struct cfg80211_registered_device *rdev,
++				  struct wiphy_work *end)
+ {
+ 	unsigned int runaway_limit = 100;
+ 	unsigned long flags;
+@@ -1068,6 +1069,10 @@ void cfg80211_process_wiphy_works(struct cfg80211_registered_device *rdev)
+ 		wk->func(&rdev->wiphy, wk);
+ 
+ 		spin_lock_irqsave(&rdev->wiphy_work_lock, flags);
++
++		if (wk == end)
++			break;
++
+ 		if (WARN_ON(--runaway_limit == 0))
+ 			INIT_LIST_HEAD(&rdev->wiphy_work_list);
+ 	}
+@@ -1118,7 +1123,7 @@ void wiphy_unregister(struct wiphy *wiphy)
+ #endif
+ 
+ 	/* surely nothing is reachable now, clean up work */
+-	cfg80211_process_wiphy_works(rdev);
++	cfg80211_process_wiphy_works(rdev, NULL);
+ 	wiphy_unlock(&rdev->wiphy);
+ 	rtnl_unlock();
+ 
+@@ -1640,6 +1645,21 @@ void wiphy_work_cancel(struct wiphy *wiphy, struct wiphy_work *work)
+ }
+ EXPORT_SYMBOL_GPL(wiphy_work_cancel);
+ 
++void wiphy_work_flush(struct wiphy *wiphy, struct wiphy_work *work)
++{
++	struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
++	unsigned long flags;
++	bool run;
++
++	spin_lock_irqsave(&rdev->wiphy_work_lock, flags);
++	run = !work || !list_empty(&work->entry);
++	spin_unlock_irqrestore(&rdev->wiphy_work_lock, flags);
++
++	if (run)
++		cfg80211_process_wiphy_works(rdev, work);
++}
++EXPORT_SYMBOL_GPL(wiphy_work_flush);
++
+ void wiphy_delayed_work_timer(struct timer_list *t)
+ {
+ 	struct wiphy_delayed_work *dwork = from_timer(dwork, t, timer);
+@@ -1672,6 +1692,16 @@ void wiphy_delayed_work_cancel(struct wiphy *wiphy,
+ }
+ EXPORT_SYMBOL_GPL(wiphy_delayed_work_cancel);
+ 
++void wiphy_delayed_work_flush(struct wiphy *wiphy,
++			      struct wiphy_delayed_work *dwork)
++{
++	lockdep_assert_held(&wiphy->mtx);
++
++	del_timer_sync(&dwork->timer);
++	wiphy_work_flush(wiphy, &dwork->work);
++}
++EXPORT_SYMBOL_GPL(wiphy_delayed_work_flush);
++
+ static int __init cfg80211_init(void)
+ {
+ 	int err;
+diff --git a/net/wireless/core.h b/net/wireless/core.h
+index 86f209abc06ab..ba1f2ce5f4ff3 100644
+--- a/net/wireless/core.h
++++ b/net/wireless/core.h
+@@ -464,7 +464,8 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev,
+ 			  struct net_device *dev, enum nl80211_iftype ntype,
+ 			  struct vif_params *params);
+ void cfg80211_process_rdev_events(struct cfg80211_registered_device *rdev);
+-void cfg80211_process_wiphy_works(struct cfg80211_registered_device *rdev);
++void cfg80211_process_wiphy_works(struct cfg80211_registered_device *rdev,
++				  struct wiphy_work *end);
+ void cfg80211_process_wdev_events(struct wireless_dev *wdev);
+ 
+ bool cfg80211_does_bw_fit_range(const struct ieee80211_freq_range *freq_range,
+diff --git a/net/wireless/scan.c b/net/wireless/scan.c
+index 8210a6090ac16..e4cc6209c7b9b 100644
+--- a/net/wireless/scan.c
++++ b/net/wireless/scan.c
+@@ -2358,8 +2358,8 @@ ssize_t cfg80211_defragment_element(const struct element *elem, const u8 *ies,
+ 
+ 	/* elem might be invalid after the memmove */
+ 	next = (void *)(elem->data + elem->datalen);
+-
+ 	elem_datalen = elem->datalen;
++
+ 	if (elem->id == WLAN_EID_EXTENSION) {
+ 		copied = elem->datalen - 1;
+ 		if (copied > data_len)
+@@ -2380,7 +2380,7 @@ ssize_t cfg80211_defragment_element(const struct element *elem, const u8 *ies,
+ 
+ 	for (elem = next;
+ 	     elem->data < ies + ieslen &&
+-		elem->data + elem->datalen < ies + ieslen;
++		elem->data + elem->datalen <= ies + ieslen;
+ 	     elem = next) {
+ 		/* elem might be invalid after the memmove */
+ 		next = (void *)(elem->data + elem->datalen);
+diff --git a/net/wireless/sysfs.c b/net/wireless/sysfs.c
+index c629bac3f2983..565511a3f461e 100644
+--- a/net/wireless/sysfs.c
++++ b/net/wireless/sysfs.c
+@@ -105,14 +105,14 @@ static int wiphy_suspend(struct device *dev)
+ 			cfg80211_leave_all(rdev);
+ 			cfg80211_process_rdev_events(rdev);
+ 		}
+-		cfg80211_process_wiphy_works(rdev);
++		cfg80211_process_wiphy_works(rdev, NULL);
+ 		if (rdev->ops->suspend)
+ 			ret = rdev_suspend(rdev, rdev->wiphy.wowlan_config);
+ 		if (ret == 1) {
+ 			/* Driver refuse to configure wowlan */
+ 			cfg80211_leave_all(rdev);
+ 			cfg80211_process_rdev_events(rdev);
+-			cfg80211_process_wiphy_works(rdev);
++			cfg80211_process_wiphy_works(rdev, NULL);
+ 			ret = rdev_suspend(rdev, NULL);
+ 		}
+ 		if (ret == 0)
+diff --git a/scripts/Makefile.vmlinux_o b/scripts/Makefile.vmlinux_o
+index 0edfdb40364b8..25b3b587d37c0 100644
+--- a/scripts/Makefile.vmlinux_o
++++ b/scripts/Makefile.vmlinux_o
+@@ -37,7 +37,8 @@ objtool-enabled := $(or $(delay-objtool),$(CONFIG_NOINSTR_VALIDATION))
+ 
+ vmlinux-objtool-args-$(delay-objtool)			+= $(objtool-args-y)
+ vmlinux-objtool-args-$(CONFIG_GCOV_KERNEL)		+= --no-unreachable
+-vmlinux-objtool-args-$(CONFIG_NOINSTR_VALIDATION)	+= --noinstr $(if $(CONFIG_CPU_UNRET_ENTRY), --unret)
++vmlinux-objtool-args-$(CONFIG_NOINSTR_VALIDATION)	+= --noinstr \
++							   $(if $(or $(CONFIG_CPU_UNRET_ENTRY),$(CONFIG_CPU_SRSO)), --unret)
+ 
+ objtool-args = $(vmlinux-objtool-args-y) --link
+ 
+diff --git a/scripts/gdb/linux/constants.py.in b/scripts/gdb/linux/constants.py.in
+index fab74ca9df6fc..d16099a2b1c68 100644
+--- a/scripts/gdb/linux/constants.py.in
++++ b/scripts/gdb/linux/constants.py.in
+@@ -63,10 +63,11 @@ LX_GDBPARSED(IRQD_LEVEL)
+ LX_GDBPARSED(IRQ_HIDDEN)
+ 
+ /* linux/module.h */
+-LX_GDBPARSED(MOD_TEXT)
+-LX_GDBPARSED(MOD_DATA)
+-LX_GDBPARSED(MOD_RODATA)
+-LX_GDBPARSED(MOD_RO_AFTER_INIT)
++if IS_BUILTIN(CONFIG_MODULES):
++    LX_GDBPARSED(MOD_TEXT)
++    LX_GDBPARSED(MOD_DATA)
++    LX_GDBPARSED(MOD_RODATA)
++    LX_GDBPARSED(MOD_RO_AFTER_INIT)
+ 
+ /* linux/mount.h */
+ LX_VALUE(MNT_NOSUID)
+diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c
+index 7056751c29b1f..6583b36dbe694 100644
+--- a/scripts/mod/file2alias.c
++++ b/scripts/mod/file2alias.c
+@@ -1348,13 +1348,13 @@ static int do_typec_entry(const char *filename, void *symval, char *alias)
+ /* Looks like: tee:uuid */
+ static int do_tee_entry(const char *filename, void *symval, char *alias)
+ {
+-	DEF_FIELD(symval, tee_client_device_id, uuid);
++	DEF_FIELD_ADDR(symval, tee_client_device_id, uuid);
+ 
+ 	sprintf(alias, "tee:%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x%02x-%02x%02x%02x%02x%02x%02x",
+-		uuid.b[0], uuid.b[1], uuid.b[2], uuid.b[3], uuid.b[4],
+-		uuid.b[5], uuid.b[6], uuid.b[7], uuid.b[8], uuid.b[9],
+-		uuid.b[10], uuid.b[11], uuid.b[12], uuid.b[13], uuid.b[14],
+-		uuid.b[15]);
++		uuid->b[0], uuid->b[1], uuid->b[2], uuid->b[3], uuid->b[4],
++		uuid->b[5], uuid->b[6], uuid->b[7], uuid->b[8], uuid->b[9],
++		uuid->b[10], uuid->b[11], uuid->b[12], uuid->b[13], uuid->b[14],
++		uuid->b[15]);
+ 
+ 	add_wildcard(alias);
+ 	return 1;
+@@ -1401,10 +1401,10 @@ static int do_mhi_ep_entry(const char *filename, void *symval, char *alias)
+ /* Looks like: ishtp:{guid} */
+ static int do_ishtp_entry(const char *filename, void *symval, char *alias)
+ {
+-	DEF_FIELD(symval, ishtp_device_id, guid);
++	DEF_FIELD_ADDR(symval, ishtp_device_id, guid);
+ 
+ 	strcpy(alias, ISHTP_MODULE_PREFIX "{");
+-	add_guid(alias, guid);
++	add_guid(alias, *guid);
+ 	strcat(alias, "}");
+ 
+ 	return 1;
+diff --git a/security/apparmor/policy.c b/security/apparmor/policy.c
+index b38f7b2a5e1d5..ec695a6caac7d 100644
+--- a/security/apparmor/policy.c
++++ b/security/apparmor/policy.c
+@@ -255,6 +255,7 @@ void aa_free_profile(struct aa_profile *profile)
+ 
+ 	aa_put_ns(profile->ns);
+ 	kfree_sensitive(profile->rename);
++	kfree_sensitive(profile->disconnected);
+ 
+ 	free_attachment(&profile->attach);
+ 
+diff --git a/security/apparmor/policy_unpack.c b/security/apparmor/policy_unpack.c
+index 694fb7a099624..74bcd0d79fb33 100644
+--- a/security/apparmor/policy_unpack.c
++++ b/security/apparmor/policy_unpack.c
+@@ -804,7 +804,7 @@ static struct aa_profile *unpack_profile(struct aa_ext *e, char **ns_name)
+ 	const char *info = "failed to unpack profile";
+ 	size_t ns_len;
+ 	struct rhashtable_params params = { 0 };
+-	char *key = NULL;
++	char *key = NULL, *disconnected = NULL;
+ 	struct aa_data *data;
+ 	int error = -EPROTO;
+ 	kernel_cap_t tmpcap;
+@@ -870,7 +870,8 @@ static struct aa_profile *unpack_profile(struct aa_ext *e, char **ns_name)
+ 	}
+ 
+ 	/* disconnected attachment string is optional */
+-	(void) aa_unpack_str(e, &profile->disconnected, "disconnected");
++	(void) aa_unpack_strdup(e, &disconnected, "disconnected");
++	profile->disconnected = disconnected;
+ 
+ 	/* per profile debug flags (complain, audit) */
+ 	if (!aa_unpack_nameX(e, AA_STRUCT, "flags")) {
+diff --git a/sound/pci/hda/cs35l41_hda.c b/sound/pci/hda/cs35l41_hda.c
+index 9ba77e685126a..297ba795c71b9 100644
+--- a/sound/pci/hda/cs35l41_hda.c
++++ b/sound/pci/hda/cs35l41_hda.c
+@@ -1539,8 +1539,7 @@ int cs35l41_hda_probe(struct device *dev, const char *device_name, int id, int i
+ 	ret = component_add(cs35l41->dev, &cs35l41_hda_comp_ops);
+ 	if (ret) {
+ 		dev_err(cs35l41->dev, "Register component failed: %d\n", ret);
+-		pm_runtime_disable(cs35l41->dev);
+-		goto err;
++		goto err_pm;
+ 	}
+ 
+ 	dev_info(cs35l41->dev, "Cirrus Logic CS35L41 (%x), Revision: %02X\n", regid, reg_revid);
+@@ -1548,6 +1547,7 @@ int cs35l41_hda_probe(struct device *dev, const char *device_name, int id, int i
+ 	return 0;
+ 
+ err_pm:
++	pm_runtime_dont_use_autosuspend(cs35l41->dev);
+ 	pm_runtime_disable(cs35l41->dev);
+ 	pm_runtime_put_noidle(cs35l41->dev);
+ 
+@@ -1566,6 +1566,7 @@ void cs35l41_hda_remove(struct device *dev)
+ 	struct cs35l41_hda *cs35l41 = dev_get_drvdata(dev);
+ 
+ 	pm_runtime_get_sync(cs35l41->dev);
++	pm_runtime_dont_use_autosuspend(cs35l41->dev);
+ 	pm_runtime_disable(cs35l41->dev);
+ 
+ 	if (cs35l41->halo_initialized)
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index c2fbf484b1104..7f1d79f450a2a 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -7193,8 +7193,10 @@ enum {
+ 	ALC256_FIXUP_ASUS_MIC_NO_PRESENCE,
+ 	ALC299_FIXUP_PREDATOR_SPK,
+ 	ALC256_FIXUP_MEDION_HEADSET_NO_PRESENCE,
++	ALC289_FIXUP_DELL_SPK1,
+ 	ALC289_FIXUP_DELL_SPK2,
+ 	ALC289_FIXUP_DUAL_SPK,
++	ALC289_FIXUP_RTK_AMP_DUAL_SPK,
+ 	ALC294_FIXUP_SPK2_TO_DAC1,
+ 	ALC294_FIXUP_ASUS_DUAL_SPK,
+ 	ALC285_FIXUP_THINKPAD_X1_GEN7,
+@@ -7293,6 +7295,7 @@ enum {
+ 	ALC287_FIXUP_THINKPAD_I2S_SPK,
+ 	ALC287_FIXUP_MG_RTKC_CSAMP_CS35L41_I2C_THINKPAD,
+ 	ALC2XX_FIXUP_HEADSET_MIC,
++	ALC289_FIXUP_DELL_CS35L41_SPI_2,
+ };
+ 
+ /* A special fixup for Lenovo C940 and Yoga Duet 7;
+@@ -8519,6 +8522,15 @@ static const struct hda_fixup alc269_fixups[] = {
+ 		.chained = true,
+ 		.chain_id = ALC256_FIXUP_ASUS_HEADSET_MODE
+ 	},
++	[ALC289_FIXUP_DELL_SPK1] = {
++		.type = HDA_FIXUP_PINS,
++		.v.pins = (const struct hda_pintbl[]) {
++			{ 0x14, 0x90170140 },
++			{ }
++		},
++		.chained = true,
++		.chain_id = ALC269_FIXUP_DELL4_MIC_NO_PRESENCE
++	},
+ 	[ALC289_FIXUP_DELL_SPK2] = {
+ 		.type = HDA_FIXUP_PINS,
+ 		.v.pins = (const struct hda_pintbl[]) {
+@@ -8534,6 +8546,12 @@ static const struct hda_fixup alc269_fixups[] = {
+ 		.chained = true,
+ 		.chain_id = ALC289_FIXUP_DELL_SPK2
+ 	},
++	[ALC289_FIXUP_RTK_AMP_DUAL_SPK] = {
++		.type = HDA_FIXUP_FUNC,
++		.v.func = alc285_fixup_speaker2_to_dac1,
++		.chained = true,
++		.chain_id = ALC289_FIXUP_DELL_SPK1
++	},
+ 	[ALC294_FIXUP_SPK2_TO_DAC1] = {
+ 		.type = HDA_FIXUP_FUNC,
+ 		.v.func = alc285_fixup_speaker2_to_dac1,
+@@ -9395,6 +9413,12 @@ static const struct hda_fixup alc269_fixups[] = {
+ 		.type = HDA_FIXUP_FUNC,
+ 		.v.func = alc_fixup_headset_mic,
+ 	},
++	[ALC289_FIXUP_DELL_CS35L41_SPI_2] = {
++		.type = HDA_FIXUP_FUNC,
++		.v.func = cs35l41_fixup_spi_two,
++		.chained = true,
++		.chain_id = ALC289_FIXUP_DUAL_SPK
++	},
+ };
+ 
+ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+@@ -9505,13 +9529,15 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1028, 0x0c1c, "Dell Precision 3540", ALC236_FIXUP_DELL_DUAL_CODECS),
+ 	SND_PCI_QUIRK(0x1028, 0x0c1d, "Dell Precision 3440", ALC236_FIXUP_DELL_DUAL_CODECS),
+ 	SND_PCI_QUIRK(0x1028, 0x0c1e, "Dell Precision 3540", ALC236_FIXUP_DELL_DUAL_CODECS),
+-	SND_PCI_QUIRK(0x1028, 0x0cbd, "Dell Oasis 13 CS MTL-U", ALC245_FIXUP_CS35L41_SPI_2),
+-	SND_PCI_QUIRK(0x1028, 0x0cbe, "Dell Oasis 13 2-IN-1 MTL-U", ALC245_FIXUP_CS35L41_SPI_2),
+-	SND_PCI_QUIRK(0x1028, 0x0cbf, "Dell Oasis 13 Low Weight MTU-L", ALC245_FIXUP_CS35L41_SPI_2),
+-	SND_PCI_QUIRK(0x1028, 0x0cc1, "Dell Oasis 14 MTL-H/U", ALC245_FIXUP_CS35L41_SPI_2),
+-	SND_PCI_QUIRK(0x1028, 0x0cc2, "Dell Oasis 14 2-in-1 MTL-H/U", ALC245_FIXUP_CS35L41_SPI_2),
+-	SND_PCI_QUIRK(0x1028, 0x0cc3, "Dell Oasis 14 Low Weight MTL-U", ALC245_FIXUP_CS35L41_SPI_2),
+-	SND_PCI_QUIRK(0x1028, 0x0cc4, "Dell Oasis 16 MTL-H/U", ALC245_FIXUP_CS35L41_SPI_2),
++	SND_PCI_QUIRK(0x1028, 0x0cbd, "Dell Oasis 13 CS MTL-U", ALC289_FIXUP_DELL_CS35L41_SPI_2),
++	SND_PCI_QUIRK(0x1028, 0x0cbe, "Dell Oasis 13 2-IN-1 MTL-U", ALC289_FIXUP_DELL_CS35L41_SPI_2),
++	SND_PCI_QUIRK(0x1028, 0x0cbf, "Dell Oasis 13 Low Weight MTU-L", ALC289_FIXUP_DELL_CS35L41_SPI_2),
++	SND_PCI_QUIRK(0x1028, 0x0cc0, "Dell Oasis 13", ALC289_FIXUP_RTK_AMP_DUAL_SPK),
++	SND_PCI_QUIRK(0x1028, 0x0cc1, "Dell Oasis 14 MTL-H/U", ALC289_FIXUP_DELL_CS35L41_SPI_2),
++	SND_PCI_QUIRK(0x1028, 0x0cc2, "Dell Oasis 14 2-in-1 MTL-H/U", ALC289_FIXUP_DELL_CS35L41_SPI_2),
++	SND_PCI_QUIRK(0x1028, 0x0cc3, "Dell Oasis 14 Low Weight MTL-U", ALC289_FIXUP_DELL_CS35L41_SPI_2),
++	SND_PCI_QUIRK(0x1028, 0x0cc4, "Dell Oasis 16 MTL-H/U", ALC289_FIXUP_DELL_CS35L41_SPI_2),
++	SND_PCI_QUIRK(0x1028, 0x0cc5, "Dell Oasis 14", ALC289_FIXUP_RTK_AMP_DUAL_SPK),
+ 	SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
+diff --git a/sound/soc/codecs/cs35l41-lib.c b/sound/soc/codecs/cs35l41-lib.c
+index 1e4205295a0de..74b9494ca83e9 100644
+--- a/sound/soc/codecs/cs35l41-lib.c
++++ b/sound/soc/codecs/cs35l41-lib.c
+@@ -1224,7 +1224,7 @@ int cs35l41_global_enable(struct regmap *regmap, enum cs35l41_boost_type b_type,
+ 		cs35l41_mdsync_down_seq[2].def = pwr_ctrl1;
+ 		ret = regmap_multi_reg_write(regmap, cs35l41_mdsync_down_seq,
+ 					     ARRAY_SIZE(cs35l41_mdsync_down_seq));
+-		if (!enable)
++		if (ret || !enable)
+ 			break;
+ 
+ 		if (!pll_lock)
+diff --git a/sound/soc/codecs/cs35l41.c b/sound/soc/codecs/cs35l41.c
+index 8a879b6f48290..9e26e96f0776e 100644
+--- a/sound/soc/codecs/cs35l41.c
++++ b/sound/soc/codecs/cs35l41.c
+@@ -386,10 +386,18 @@ static irqreturn_t cs35l41_irq(int irq, void *data)
+ 	struct cs35l41_private *cs35l41 = data;
+ 	unsigned int status[4] = { 0, 0, 0, 0 };
+ 	unsigned int masks[4] = { 0, 0, 0, 0 };
+-	int ret = IRQ_NONE;
+ 	unsigned int i;
++	int ret;
+ 
+-	pm_runtime_get_sync(cs35l41->dev);
++	ret = pm_runtime_resume_and_get(cs35l41->dev);
++	if (ret < 0) {
++		dev_err(cs35l41->dev,
++			"pm_runtime_resume_and_get failed in %s: %d\n",
++			__func__, ret);
++		return IRQ_NONE;
++	}
++
++	ret = IRQ_NONE;
+ 
+ 	for (i = 0; i < ARRAY_SIZE(status); i++) {
+ 		regmap_read(cs35l41->regmap,
+@@ -1283,6 +1291,8 @@ int cs35l41_probe(struct cs35l41_private *cs35l41, const struct cs35l41_hw_cfg *
+ 		regmap_update_bits(cs35l41->regmap, CS35L41_IRQ1_MASK3, CS35L41_INT3_PLL_LOCK_MASK,
+ 				   0 << CS35L41_INT3_PLL_LOCK_SHIFT);
+ 
++	init_completion(&cs35l41->pll_lock);
++
+ 	ret = devm_request_threaded_irq(cs35l41->dev, cs35l41->irq, NULL, cs35l41_irq,
+ 					IRQF_ONESHOT | IRQF_SHARED | irq_pol,
+ 					"cs35l41", cs35l41);
+@@ -1305,8 +1315,6 @@ int cs35l41_probe(struct cs35l41_private *cs35l41, const struct cs35l41_hw_cfg *
+ 	if (ret < 0)
+ 		goto err;
+ 
+-	init_completion(&cs35l41->pll_lock);
+-
+ 	pm_runtime_set_autosuspend_delay(cs35l41->dev, 3000);
+ 	pm_runtime_use_autosuspend(cs35l41->dev);
+ 	pm_runtime_mark_last_busy(cs35l41->dev);
+@@ -1330,6 +1338,7 @@ int cs35l41_probe(struct cs35l41_private *cs35l41, const struct cs35l41_hw_cfg *
+ 	return 0;
+ 
+ err_pm:
++	pm_runtime_dont_use_autosuspend(cs35l41->dev);
+ 	pm_runtime_disable(cs35l41->dev);
+ 	pm_runtime_put_noidle(cs35l41->dev);
+ 
+@@ -1346,6 +1355,7 @@ EXPORT_SYMBOL_GPL(cs35l41_probe);
+ void cs35l41_remove(struct cs35l41_private *cs35l41)
+ {
+ 	pm_runtime_get_sync(cs35l41->dev);
++	pm_runtime_dont_use_autosuspend(cs35l41->dev);
+ 	pm_runtime_disable(cs35l41->dev);
+ 
+ 	regmap_write(cs35l41->regmap, CS35L41_IRQ1_MASK1, 0xFFFFFFFF);
+diff --git a/sound/soc/codecs/hdmi-codec.c b/sound/soc/codecs/hdmi-codec.c
+index d661bc9255f92..91e0b635fb82c 100644
+--- a/sound/soc/codecs/hdmi-codec.c
++++ b/sound/soc/codecs/hdmi-codec.c
+@@ -895,18 +895,13 @@ static int hdmi_codec_set_jack(struct snd_soc_component *component,
+ 			       void *data)
+ {
+ 	struct hdmi_codec_priv *hcp = snd_soc_component_get_drvdata(component);
+-	int ret = -ENOTSUPP;
+ 
+ 	if (hcp->hcd.ops->hook_plugged_cb) {
+ 		hcp->jack = jack;
+-		ret = hcp->hcd.ops->hook_plugged_cb(component->dev->parent,
+-						    hcp->hcd.data,
+-						    plugged_cb,
+-						    component->dev);
+-		if (ret)
+-			hcp->jack = NULL;
++		return 0;
+ 	}
+-	return ret;
++
++	return -ENOTSUPP;
+ }
+ 
+ static int hdmi_dai_spdif_probe(struct snd_soc_dai *dai)
+@@ -982,6 +977,21 @@ static int hdmi_of_xlate_dai_id(struct snd_soc_component *component,
+ 	return ret;
+ }
+ 
++static int hdmi_probe(struct snd_soc_component *component)
++{
++	struct hdmi_codec_priv *hcp = snd_soc_component_get_drvdata(component);
++	int ret = 0;
++
++	if (hcp->hcd.ops->hook_plugged_cb) {
++		ret = hcp->hcd.ops->hook_plugged_cb(component->dev->parent,
++						    hcp->hcd.data,
++						    plugged_cb,
++						    component->dev);
++	}
++
++	return ret;
++}
++
+ static void hdmi_remove(struct snd_soc_component *component)
+ {
+ 	struct hdmi_codec_priv *hcp = snd_soc_component_get_drvdata(component);
+@@ -992,6 +1002,7 @@ static void hdmi_remove(struct snd_soc_component *component)
+ }
+ 
+ static const struct snd_soc_component_driver hdmi_driver = {
++	.probe			= hdmi_probe,
+ 	.remove			= hdmi_remove,
+ 	.dapm_widgets		= hdmi_widgets,
+ 	.num_dapm_widgets	= ARRAY_SIZE(hdmi_widgets),
+diff --git a/sound/soc/fsl/fsl-asoc-card.c b/sound/soc/fsl/fsl-asoc-card.c
+index bab7d34cf585b..5f181b89838ac 100644
+--- a/sound/soc/fsl/fsl-asoc-card.c
++++ b/sound/soc/fsl/fsl-asoc-card.c
+@@ -41,6 +41,7 @@
+ 
+ /**
+  * struct codec_priv - CODEC private data
++ * @mclk: Main clock of the CODEC
+  * @mclk_freq: Clock rate of MCLK
+  * @free_freq: Clock rate of MCLK for hw_free()
+  * @mclk_id: MCLK (or main clock) id for set_sysclk()
+diff --git a/sound/soc/fsl/fsl_easrc.c b/sound/soc/fsl/fsl_easrc.c
+index 670cbdb361b6c..3c79650efac11 100644
+--- a/sound/soc/fsl/fsl_easrc.c
++++ b/sound/soc/fsl/fsl_easrc.c
+@@ -1966,17 +1966,21 @@ static int fsl_easrc_probe(struct platform_device *pdev)
+ 					      &fsl_easrc_dai, 1);
+ 	if (ret) {
+ 		dev_err(dev, "failed to register ASoC DAI\n");
+-		return ret;
++		goto err_pm_disable;
+ 	}
+ 
+ 	ret = devm_snd_soc_register_component(dev, &fsl_asrc_component,
+ 					      NULL, 0);
+ 	if (ret) {
+ 		dev_err(&pdev->dev, "failed to register ASoC platform\n");
+-		return ret;
++		goto err_pm_disable;
+ 	}
+ 
+ 	return 0;
++
++err_pm_disable:
++	pm_runtime_disable(&pdev->dev);
++	return ret;
+ }
+ 
+ static void fsl_easrc_remove(struct platform_device *pdev)
+diff --git a/sound/soc/fsl/mpc5200_dma.c b/sound/soc/fsl/mpc5200_dma.c
+index 9014978100207..3f7ccae3f6b1a 100644
+--- a/sound/soc/fsl/mpc5200_dma.c
++++ b/sound/soc/fsl/mpc5200_dma.c
+@@ -100,6 +100,9 @@ static irqreturn_t psc_dma_bcom_irq(int irq, void *_psc_dma_stream)
+ 
+ /**
+  * psc_dma_trigger: start and stop the DMA transfer.
++ * @component: triggered component
++ * @substream: triggered substream
++ * @cmd: triggered command
+  *
+  * This function is called by ALSA to start, stop, pause, and resume the DMA
+  * transfer of data.
+diff --git a/sound/soc/intel/boards/sof_sdw_rt_sdca_jack_common.c b/sound/soc/intel/boards/sof_sdw_rt_sdca_jack_common.c
+index 623e3bebb8884..4360b9f5ff2c7 100644
+--- a/sound/soc/intel/boards/sof_sdw_rt_sdca_jack_common.c
++++ b/sound/soc/intel/boards/sof_sdw_rt_sdca_jack_common.c
+@@ -58,6 +58,11 @@ static const struct snd_soc_dapm_route rt712_sdca_map[] = {
+ 	{ "rt712 MIC2", NULL, "Headset Mic" },
+ };
+ 
++static const struct snd_soc_dapm_route rt713_sdca_map[] = {
++	{ "Headphone", NULL, "rt713 HP" },
++	{ "rt713 MIC2", NULL, "Headset Mic" },
++};
++
+ static const struct snd_kcontrol_new rt_sdca_jack_controls[] = {
+ 	SOC_DAPM_PIN_SWITCH("Headphone"),
+ 	SOC_DAPM_PIN_SWITCH("Headset Mic"),
+@@ -109,6 +114,9 @@ static int rt_sdca_jack_rtd_init(struct snd_soc_pcm_runtime *rtd)
+ 	} else if (strstr(component->name_prefix, "rt712")) {
+ 		ret = snd_soc_dapm_add_routes(&card->dapm, rt712_sdca_map,
+ 					      ARRAY_SIZE(rt712_sdca_map));
++	} else if (strstr(component->name_prefix, "rt713")) {
++		ret = snd_soc_dapm_add_routes(&card->dapm, rt713_sdca_map,
++					      ARRAY_SIZE(rt713_sdca_map));
+ 	} else {
+ 		dev_err(card->dev, "%s is not supported\n", component->name_prefix);
+ 		return -EINVAL;
+diff --git a/sound/soc/intel/skylake/skl-sst-utils.c b/sound/soc/intel/skylake/skl-sst-utils.c
+index 57ea815d3f041..b776c58dcf47a 100644
+--- a/sound/soc/intel/skylake/skl-sst-utils.c
++++ b/sound/soc/intel/skylake/skl-sst-utils.c
+@@ -299,6 +299,7 @@ int snd_skl_parse_uuids(struct sst_dsp *ctx, const struct firmware *fw,
+ 		module->instance_id = devm_kzalloc(ctx->dev, size, GFP_KERNEL);
+ 		if (!module->instance_id) {
+ 			ret = -ENOMEM;
++			kfree(module);
+ 			goto free_uuid_list;
+ 		}
+ 
+diff --git a/sound/soc/mediatek/mt8186/mt8186-mt6366-rt1019-rt5682s.c b/sound/soc/mediatek/mt8186/mt8186-mt6366-rt1019-rt5682s.c
+index 9c11016f032c2..9777ba89e956c 100644
+--- a/sound/soc/mediatek/mt8186/mt8186-mt6366-rt1019-rt5682s.c
++++ b/sound/soc/mediatek/mt8186/mt8186-mt6366-rt1019-rt5682s.c
+@@ -1179,7 +1179,7 @@ static int mt8186_mt6366_rt1019_rt5682s_dev_probe(struct platform_device *pdev)
+ 	playback_codec = of_get_child_by_name(pdev->dev.of_node, "playback-codecs");
+ 	if (!playback_codec) {
+ 		ret = -EINVAL;
+-		dev_err_probe(&pdev->dev, ret, "Property 'speaker-codecs' missing or invalid\n");
++		dev_err_probe(&pdev->dev, ret, "Property 'playback-codecs' missing or invalid\n");
+ 		goto err_playback_codec;
+ 	}
+ 
+@@ -1193,7 +1193,7 @@ static int mt8186_mt6366_rt1019_rt5682s_dev_probe(struct platform_device *pdev)
+ 	for_each_card_prelinks(card, i, dai_link) {
+ 		ret = mt8186_mt6366_card_set_be_link(card, dai_link, playback_codec, "I2S3");
+ 		if (ret) {
+-			dev_err_probe(&pdev->dev, ret, "%s set speaker_codec fail\n",
++			dev_err_probe(&pdev->dev, ret, "%s set playback_codec fail\n",
+ 				      dai_link->name);
+ 			goto err_probe;
+ 		}
+diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
+index 5fd32185fe63d..de279e51dc571 100644
+--- a/sound/soc/soc-dapm.c
++++ b/sound/soc/soc-dapm.c
+@@ -3668,7 +3668,7 @@ snd_soc_dapm_new_control_unlocked(struct snd_soc_dapm_context *dapm,
+ 		dapm_pinctrl_event(w, NULL, SND_SOC_DAPM_POST_PMD);
+ 		break;
+ 	case snd_soc_dapm_clock_supply:
+-		w->clk = devm_clk_get(dapm->dev, w->name);
++		w->clk = devm_clk_get(dapm->dev, widget->name);
+ 		if (IS_ERR(w->clk)) {
+ 			ret = PTR_ERR(w->clk);
+ 			goto request_failed;
+diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
+index 3aa6b988cb4b4..6cf4cd667d036 100644
+--- a/sound/soc/soc-pcm.c
++++ b/sound/soc/soc-pcm.c
+@@ -698,14 +698,12 @@ static int soc_pcm_clean(struct snd_soc_pcm_runtime *rtd,
+ 
+ 	if (!rollback) {
+ 		snd_soc_runtime_deactivate(rtd, substream->stream);
+-		/* clear the corresponding DAIs parameters when going to be inactive */
+-		for_each_rtd_dais(rtd, i, dai) {
+-			if (snd_soc_dai_active(dai) == 0)
+-				soc_pcm_set_dai_params(dai, NULL);
+ 
+-			if (snd_soc_dai_stream_active(dai, substream->stream) == 0)
+-				snd_soc_dai_digital_mute(dai, 1, substream->stream);
+-		}
++		/* Make sure DAI parameters cleared if the DAI becomes inactive */
++		for_each_rtd_dais(rtd, i, dai)
++			if (snd_soc_dai_active(dai) == 0 &&
++			    (dai->rate || dai->channels || dai->sample_bits))
++				soc_pcm_set_dai_params(dai, NULL);
+ 	}
+ 
+ 	for_each_rtd_dais(rtd, i, dai)
+@@ -936,6 +934,15 @@ static int soc_pcm_hw_clean(struct snd_soc_pcm_runtime *rtd,
+ 
+ 	snd_soc_dpcm_mutex_assert_held(rtd);
+ 
++	/* clear the corresponding DAIs parameters when going to be inactive */
++	for_each_rtd_dais(rtd, i, dai) {
++		if (snd_soc_dai_active(dai) == 1)
++			soc_pcm_set_dai_params(dai, NULL);
++
++		if (snd_soc_dai_stream_active(dai, substream->stream) == 1)
++			snd_soc_dai_digital_mute(dai, 1, substream->stream);
++	}
++
+ 	/* run the stream event */
+ 	snd_soc_dapm_stream_stop(rtd, substream->stream);
+ 
+diff --git a/sound/soc/sof/core.c b/sound/soc/sof/core.c
+index 2d1616b81485c..0938b259f7034 100644
+--- a/sound/soc/sof/core.c
++++ b/sound/soc/sof/core.c
+@@ -459,9 +459,10 @@ int snd_sof_device_remove(struct device *dev)
+ 	struct snd_sof_dev *sdev = dev_get_drvdata(dev);
+ 	struct snd_sof_pdata *pdata = sdev->pdata;
+ 	int ret;
++	bool aborted = false;
+ 
+ 	if (IS_ENABLED(CONFIG_SND_SOC_SOF_PROBE_WORK_QUEUE))
+-		cancel_work_sync(&sdev->probe_work);
++		aborted = cancel_work_sync(&sdev->probe_work);
+ 
+ 	/*
+ 	 * Unregister any registered client device first before IPC and debugfs
+@@ -487,6 +488,9 @@ int snd_sof_device_remove(struct device *dev)
+ 		snd_sof_free_debug(sdev);
+ 		snd_sof_remove(sdev);
+ 		sof_ops_free(sdev);
++	} else if (aborted) {
++		/* probe_work never ran */
++		sof_ops_free(sdev);
+ 	}
+ 
+ 	/* release firmware */
+diff --git a/sound/soc/sof/ipc4-topology.c b/sound/soc/sof/ipc4-topology.c
+index 8fb6582e568e7..98ed20cafb573 100644
+--- a/sound/soc/sof/ipc4-topology.c
++++ b/sound/soc/sof/ipc4-topology.c
+@@ -882,7 +882,8 @@ static int sof_ipc4_widget_setup_comp_process(struct snd_sof_widget *swidget)
+ 	if (process->init_config == SOF_IPC4_MODULE_INIT_CONFIG_TYPE_BASE_CFG_WITH_EXT) {
+ 		struct sof_ipc4_base_module_cfg_ext *base_cfg_ext;
+ 		u32 ext_size = struct_size(base_cfg_ext, pin_formats,
+-						swidget->num_input_pins + swidget->num_output_pins);
++					   size_add(swidget->num_input_pins,
++						    swidget->num_output_pins));
+ 
+ 		base_cfg_ext = kzalloc(ext_size, GFP_KERNEL);
+ 		if (!base_cfg_ext) {
+diff --git a/sound/soc/ti/ams-delta.c b/sound/soc/ti/ams-delta.c
+index 1028b5efcfff8..a93de8b64b251 100644
+--- a/sound/soc/ti/ams-delta.c
++++ b/sound/soc/ti/ams-delta.c
+@@ -303,7 +303,7 @@ static int cx81801_open(struct tty_struct *tty)
+ static void cx81801_close(struct tty_struct *tty)
+ {
+ 	struct snd_soc_component *component = tty->disc_data;
+-	struct snd_soc_dapm_context *dapm = &component->card->dapm;
++	struct snd_soc_dapm_context *dapm;
+ 
+ 	del_timer_sync(&cx81801_timer);
+ 
+@@ -315,6 +315,8 @@ static void cx81801_close(struct tty_struct *tty)
+ 
+ 	v253_ops.close(tty);
+ 
++	dapm = &component->card->dapm;
++
+ 	/* Revert back to default audio input/output constellation */
+ 	snd_soc_dapm_mutex_lock(dapm);
+ 
+diff --git a/tools/iio/iio_generic_buffer.c b/tools/iio/iio_generic_buffer.c
+index 44bbf80f0cfdd..0d0a7a19d6f95 100644
+--- a/tools/iio/iio_generic_buffer.c
++++ b/tools/iio/iio_generic_buffer.c
+@@ -54,9 +54,12 @@ enum autochan {
+ static unsigned int size_from_channelarray(struct iio_channel_info *channels, int num_channels)
+ {
+ 	unsigned int bytes = 0;
+-	int i = 0;
++	int i = 0, max = 0;
++	unsigned int misalignment;
+ 
+ 	while (i < num_channels) {
++		if (channels[i].bytes > max)
++			max = channels[i].bytes;
+ 		if (bytes % channels[i].bytes == 0)
+ 			channels[i].location = bytes;
+ 		else
+@@ -66,6 +69,14 @@ static unsigned int size_from_channelarray(struct iio_channel_info *channels, in
+ 		bytes = channels[i].location + channels[i].bytes;
+ 		i++;
+ 	}
++	/*
++	 * We want the data in next sample to also be properly aligned so
++	 * we'll add padding at the end if needed. Adding padding only
++	 * works for channel data which size is 2^n bytes.
++	 */
++	misalignment = bytes % max;
++	if (misalignment)
++		bytes += max - misalignment;
+ 
+ 	return bytes;
+ }
+diff --git a/tools/lib/perf/include/internal/rc_check.h b/tools/lib/perf/include/internal/rc_check.h
+index d5d771ccdc7b4..e88a6d8a0b0f9 100644
+--- a/tools/lib/perf/include/internal/rc_check.h
++++ b/tools/lib/perf/include/internal/rc_check.h
+@@ -9,8 +9,12 @@
+  * Enable reference count checking implicitly with leak checking, which is
+  * integrated into address sanitizer.
+  */
+-#if defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER)
++#if defined(__SANITIZE_ADDRESS__) || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER)
+ #define REFCNT_CHECKING 1
++#elif defined(__has_feature)
++#if __has_feature(address_sanitizer) || __has_feature(leak_sanitizer)
++#define REFCNT_CHECKING 1
++#endif
+ #endif
+ 
+ /*
+diff --git a/tools/objtool/objtool.c b/tools/objtool/objtool.c
+index c54f7235c5d94..f40febdd6e36a 100644
+--- a/tools/objtool/objtool.c
++++ b/tools/objtool/objtool.c
+@@ -146,7 +146,5 @@ int main(int argc, const char **argv)
+ 	exec_cmd_init("objtool", UNUSED, UNUSED, UNUSED);
+ 	pager_init(UNUSED);
+ 
+-	objtool_run(argc, argv);
+-
+-	return 0;
++	return objtool_run(argc, argv);
+ }
+diff --git a/tools/perf/Documentation/perf-kwork.txt b/tools/perf/Documentation/perf-kwork.txt
+index 3c36324712b6e..482d6c52e2edf 100644
+--- a/tools/perf/Documentation/perf-kwork.txt
++++ b/tools/perf/Documentation/perf-kwork.txt
+@@ -8,7 +8,7 @@ perf-kwork - Tool to trace/measure kernel work properties (latencies)
+ SYNOPSIS
+ --------
+ [verse]
+-'perf kwork' {record}
++'perf kwork' {record|report|latency|timehist}
+ 
+ DESCRIPTION
+ -----------
+diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf
+index f178b36c69402..997b9387ab273 100644
+--- a/tools/perf/Makefile.perf
++++ b/tools/perf/Makefile.perf
+@@ -69,6 +69,10 @@ include ../scripts/utilities.mak
+ # Define NO_LIBDW_DWARF_UNWIND if you do not want libdw support
+ # for dwarf backtrace post unwind.
+ #
++# Define NO_LIBTRACEEVENT=1 if you don't want libtraceevent to be linked,
++# this will remove multiple features and tools, such as 'perf trace',
++# that need it to read tracefs event format files, etc.
++#
+ # Define NO_PERF_READ_VDSO32 if you do not want to build perf-read-vdso32
+ # for reading the 32-bit compatibility VDSO in 64-bit mode
+ #
+diff --git a/tools/perf/arch/arm64/util/mem-events.c b/tools/perf/arch/arm64/util/mem-events.c
+index df817d1f9f3eb..3bcc5c7035c21 100644
+--- a/tools/perf/arch/arm64/util/mem-events.c
++++ b/tools/perf/arch/arm64/util/mem-events.c
+@@ -20,7 +20,7 @@ struct perf_mem_event *perf_mem_events__ptr(int i)
+ 	return &perf_mem_events[i];
+ }
+ 
+-char *perf_mem_events__name(int i, char *pmu_name __maybe_unused)
++const char *perf_mem_events__name(int i, const char *pmu_name __maybe_unused)
+ {
+ 	struct perf_mem_event *e = perf_mem_events__ptr(i);
+ 
+diff --git a/tools/perf/arch/powerpc/util/mem-events.c b/tools/perf/arch/powerpc/util/mem-events.c
+index 4120fafe0be41..78b986e5268d9 100644
+--- a/tools/perf/arch/powerpc/util/mem-events.c
++++ b/tools/perf/arch/powerpc/util/mem-events.c
+@@ -3,10 +3,10 @@
+ #include "mem-events.h"
+ 
+ /* PowerPC does not support 'ldlat' parameter. */
+-char *perf_mem_events__name(int i, char *pmu_name __maybe_unused)
++const char *perf_mem_events__name(int i, const char *pmu_name __maybe_unused)
+ {
+ 	if (i == PERF_MEM_EVENTS__LOAD)
+-		return (char *) "cpu/mem-loads/";
++		return "cpu/mem-loads/";
+ 
+-	return (char *) "cpu/mem-stores/";
++	return "cpu/mem-stores/";
+ }
+diff --git a/tools/perf/arch/x86/util/mem-events.c b/tools/perf/arch/x86/util/mem-events.c
+index a8a782bcb1213..191b372f9a2d3 100644
+--- a/tools/perf/arch/x86/util/mem-events.c
++++ b/tools/perf/arch/x86/util/mem-events.c
+@@ -52,7 +52,7 @@ bool is_mem_loads_aux_event(struct evsel *leader)
+ 	return leader->core.attr.config == MEM_LOADS_AUX;
+ }
+ 
+-char *perf_mem_events__name(int i, char *pmu_name)
++const char *perf_mem_events__name(int i, const char *pmu_name)
+ {
+ 	struct perf_mem_event *e = perf_mem_events__ptr(i);
+ 
+@@ -65,7 +65,7 @@ char *perf_mem_events__name(int i, char *pmu_name)
+ 
+ 		if (!pmu_name) {
+ 			mem_loads_name__init = true;
+-			pmu_name = (char *)"cpu";
++			pmu_name = "cpu";
+ 		}
+ 
+ 		if (perf_pmus__have_event(pmu_name, "mem-loads-aux")) {
+@@ -82,12 +82,12 @@ char *perf_mem_events__name(int i, char *pmu_name)
+ 
+ 	if (i == PERF_MEM_EVENTS__STORE) {
+ 		if (!pmu_name)
+-			pmu_name = (char *)"cpu";
++			pmu_name = "cpu";
+ 
+ 		scnprintf(mem_stores_name, sizeof(mem_stores_name),
+ 			  e->name, pmu_name);
+ 		return mem_stores_name;
+ 	}
+ 
+-	return (char *)e->name;
++	return e->name;
+ }
+diff --git a/tools/perf/arch/x86/util/pmu.c b/tools/perf/arch/x86/util/pmu.c
+index 65d8cdff4d5f4..f428cffb03781 100644
+--- a/tools/perf/arch/x86/util/pmu.c
++++ b/tools/perf/arch/x86/util/pmu.c
+@@ -126,7 +126,7 @@ close_dir:
+ 	return ret;
+ }
+ 
+-static char *__pmu_find_real_name(const char *name)
++static const char *__pmu_find_real_name(const char *name)
+ {
+ 	struct pmu_alias *pmu_alias;
+ 
+@@ -135,10 +135,10 @@ static char *__pmu_find_real_name(const char *name)
+ 			return pmu_alias->name;
+ 	}
+ 
+-	return (char *)name;
++	return name;
+ }
+ 
+-char *pmu_find_real_name(const char *name)
++const char *pmu_find_real_name(const char *name)
+ {
+ 	if (cached_list)
+ 		return __pmu_find_real_name(name);
+@@ -149,7 +149,7 @@ char *pmu_find_real_name(const char *name)
+ 	return __pmu_find_real_name(name);
+ }
+ 
+-static char *__pmu_find_alias_name(const char *name)
++static const char *__pmu_find_alias_name(const char *name)
+ {
+ 	struct pmu_alias *pmu_alias;
+ 
+@@ -160,7 +160,7 @@ static char *__pmu_find_alias_name(const char *name)
+ 	return NULL;
+ }
+ 
+-char *pmu_find_alias_name(const char *name)
++const char *pmu_find_alias_name(const char *name)
+ {
+ 	if (cached_list)
+ 		return __pmu_find_alias_name(name);
+diff --git a/tools/perf/builtin-kwork.c b/tools/perf/builtin-kwork.c
+index 14bf7a8429e76..de2fbb7c56c32 100644
+--- a/tools/perf/builtin-kwork.c
++++ b/tools/perf/builtin-kwork.c
+@@ -406,12 +406,14 @@ static int work_push_atom(struct perf_kwork *kwork,
+ 
+ 	work = work_findnew(&class->work_root, &key, &kwork->cmp_id);
+ 	if (work == NULL) {
+-		free(atom);
++		atom_free(atom);
+ 		return -1;
+ 	}
+ 
+-	if (!profile_event_match(kwork, work, sample))
++	if (!profile_event_match(kwork, work, sample)) {
++		atom_free(atom);
+ 		return 0;
++	}
+ 
+ 	if (dst_type < KWORK_TRACE_MAX) {
+ 		dst_atom = list_last_entry_or_null(&work->atom_list[dst_type],
+@@ -1692,9 +1694,10 @@ int cmd_kwork(int argc, const char **argv)
+ 	static struct perf_kwork kwork = {
+ 		.class_list          = LIST_HEAD_INIT(kwork.class_list),
+ 		.tool = {
+-			.mmap    = perf_event__process_mmap,
+-			.mmap2   = perf_event__process_mmap2,
+-			.sample  = perf_kwork__process_tracepoint_sample,
++			.mmap		= perf_event__process_mmap,
++			.mmap2		= perf_event__process_mmap2,
++			.sample		= perf_kwork__process_tracepoint_sample,
++			.ordered_events = true,
+ 		},
+ 		.atom_page_list      = LIST_HEAD_INIT(kwork.atom_page_list),
+ 		.sort_list           = LIST_HEAD_INIT(kwork.sort_list),
+diff --git a/tools/perf/builtin-list.c b/tools/perf/builtin-list.c
+index 7fec2cca759f6..e74d0223deb7f 100644
+--- a/tools/perf/builtin-list.c
++++ b/tools/perf/builtin-list.c
+@@ -502,7 +502,7 @@ int cmd_list(int argc, const char **argv)
+ 				ret = -1;
+ 				goto out;
+ 			}
+-			default_ps.pmu_glob = pmu->name;
++			default_ps.pmu_glob = strdup(pmu->name);
+ 		}
+ 	}
+ 	print_cb.print_start(ps);
+diff --git a/tools/perf/builtin-lock.c b/tools/perf/builtin-lock.c
+index 0cf1c5a2e0323..17255a0982e7d 100644
+--- a/tools/perf/builtin-lock.c
++++ b/tools/perf/builtin-lock.c
+@@ -524,6 +524,7 @@ bool match_callstack_filter(struct machine *machine, u64 *callstack)
+ 	struct map *kmap;
+ 	struct symbol *sym;
+ 	u64 ip;
++	const char *arch = perf_env__arch(machine->env);
+ 
+ 	if (list_empty(&callstack_filters))
+ 		return true;
+@@ -531,7 +532,21 @@ bool match_callstack_filter(struct machine *machine, u64 *callstack)
+ 	for (int i = 0; i < max_stack_depth; i++) {
+ 		struct callstack_filter *filter;
+ 
+-		if (!callstack || !callstack[i])
++		/*
++		 * In powerpc, the callchain saved by kernel always includes
++		 * first three entries as the NIP (next instruction pointer),
++		 * LR (link register), and the contents of LR save area in the
++		 * second stack frame. In certain scenarios its possible to have
++		 * invalid kernel instruction addresses in either LR or the second
++		 * stack frame's LR. In that case, kernel will store that address as
++		 * zero.
++		 *
++		 * The below check will continue to look into callstack,
++		 * incase first or second callstack index entry has 0
++		 * address for powerpc.
++		 */
++		if (!callstack || (!callstack[i] && (strcmp(arch, "powerpc") ||
++						(i != 1 && i != 2))))
+ 			break;
+ 
+ 		ip = callstack[i];
+diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
+index 07b48f6df48eb..a3af805a1d572 100644
+--- a/tools/perf/builtin-stat.c
++++ b/tools/perf/builtin-stat.c
+@@ -1622,7 +1622,7 @@ static int perf_stat_init_aggr_mode(void)
+ 	 * taking the highest cpu number to be the size of
+ 	 * the aggregation translate cpumap.
+ 	 */
+-	if (evsel_list->core.user_requested_cpus)
++	if (!perf_cpu_map__empty(evsel_list->core.user_requested_cpus))
+ 		nr = perf_cpu_map__max(evsel_list->core.user_requested_cpus).cpu;
+ 	else
+ 		nr = 0;
+diff --git a/tools/perf/examples/bpf/augmented_raw_syscalls.c b/tools/perf/examples/bpf/augmented_raw_syscalls.c
+index 9a03189d33d38..74fa9e642b424 100644
+--- a/tools/perf/examples/bpf/augmented_raw_syscalls.c
++++ b/tools/perf/examples/bpf/augmented_raw_syscalls.c
+@@ -147,7 +147,7 @@ static inline
+ unsigned int augmented_arg__read_str(struct augmented_arg *augmented_arg, const void *arg, unsigned int arg_len)
+ {
+ 	unsigned int augmented_len = sizeof(*augmented_arg);
+-	int string_len = bpf_probe_read_str(&augmented_arg->value, arg_len, arg);
++	int string_len = bpf_probe_read_user_str(&augmented_arg->value, arg_len, arg);
+ 
+ 	augmented_arg->size = augmented_arg->err = 0;
+ 	/*
+@@ -196,7 +196,7 @@ int sys_enter_connect(struct syscall_enter_args *args)
+ 	if (socklen > sizeof(augmented_args->saddr))
+ 		socklen = sizeof(augmented_args->saddr);
+ 
+-	bpf_probe_read(&augmented_args->saddr, socklen, sockaddr_arg);
++	bpf_probe_read_user(&augmented_args->saddr, socklen, sockaddr_arg);
+ 
+ 	return augmented__output(args, augmented_args, len + socklen);
+ }
+@@ -215,7 +215,7 @@ int sys_enter_sendto(struct syscall_enter_args *args)
+ 	if (socklen > sizeof(augmented_args->saddr))
+ 		socklen = sizeof(augmented_args->saddr);
+ 
+-	bpf_probe_read(&augmented_args->saddr, socklen, sockaddr_arg);
++	bpf_probe_read_user(&augmented_args->saddr, socklen, sockaddr_arg);
+ 
+ 	return augmented__output(args, augmented_args, len + socklen);
+ }
+@@ -305,7 +305,7 @@ int sys_enter_perf_event_open(struct syscall_enter_args *args)
+         if (augmented_args == NULL)
+ 		goto failure;
+ 
+-	if (bpf_probe_read(&augmented_args->__data, sizeof(*attr), attr) < 0)
++	if (bpf_probe_read_user(&augmented_args->__data, sizeof(*attr), attr) < 0)
+ 		goto failure;
+ 
+ 	attr_read = (const struct perf_event_attr_size *)augmented_args->__data;
+@@ -319,7 +319,7 @@ int sys_enter_perf_event_open(struct syscall_enter_args *args)
+                 goto failure;
+ 
+ 	// Now that we read attr->size and tested it against the size limits, read it completely
+-	if (bpf_probe_read(&augmented_args->__data, size, attr) < 0)
++	if (bpf_probe_read_user(&augmented_args->__data, size, attr) < 0)
+ 		goto failure;
+ 
+ 	return augmented__output(args, augmented_args, len + size);
+@@ -341,7 +341,7 @@ int sys_enter_clock_nanosleep(struct syscall_enter_args *args)
+ 	if (size > sizeof(augmented_args->__data))
+                 goto failure;
+ 
+-	bpf_probe_read(&augmented_args->__data, size, rqtp_arg);
++	bpf_probe_read_user(&augmented_args->__data, size, rqtp_arg);
+ 
+ 	return augmented__output(args, augmented_args, len + size);
+ failure:
+@@ -380,7 +380,7 @@ int sys_enter(struct syscall_enter_args *args)
+ 	if (augmented_args == NULL)
+ 		return 1;
+ 
+-	bpf_probe_read(&augmented_args->args, sizeof(augmented_args->args), args);
++	bpf_probe_read_kernel(&augmented_args->args, sizeof(augmented_args->args), args);
+ 
+ 	/*
+ 	 * Jump to syscall specific augmenter, even if the default one,
+@@ -401,7 +401,7 @@ int sys_exit(struct syscall_exit_args *args)
+ 	if (pid_filter__has(&pids_filtered, getpid()))
+ 		return 0;
+ 
+-	bpf_probe_read(&exit_args, sizeof(exit_args), args);
++	bpf_probe_read_kernel(&exit_args, sizeof(exit_args), args);
+ 	/*
+ 	 * Jump to syscall specific return augmenter, even if the default one,
+ 	 * "!raw_syscalls:unaugmented" that will just return 1 to return the
+diff --git a/tools/perf/pmu-events/arch/powerpc/power10/pmc.json b/tools/perf/pmu-events/arch/powerpc/power10/pmc.json
+index 0a2bf56ee7c10..1c44f0120505e 100644
+--- a/tools/perf/pmu-events/arch/powerpc/power10/pmc.json
++++ b/tools/perf/pmu-events/arch/powerpc/power10/pmc.json
+@@ -190,7 +190,7 @@
+     "BriefDescription": "Threshold counter exceeded a value of 128."
+   },
+   {
+-    "EventCode": "0x400FA",
++    "EventCode": "0x500FA",
+     "EventName": "PM_RUN_INST_CMPL",
+     "BriefDescription": "PowerPC instruction completed while the run latch is set."
+   }
+diff --git a/tools/perf/pmu-events/arch/x86/broadwellde/bdwde-metrics.json b/tools/perf/pmu-events/arch/x86/broadwellde/bdwde-metrics.json
+index 8fc62b8f667d8..e1f55fcfa0d02 100644
+--- a/tools/perf/pmu-events/arch/x86/broadwellde/bdwde-metrics.json
++++ b/tools/perf/pmu-events/arch/x86/broadwellde/bdwde-metrics.json
+@@ -48,6 +48,12 @@
+         "MetricName": "C7_Pkg_Residency",
+         "ScaleUnit": "100%"
+     },
++    {
++        "BriefDescription": "Uncore frequency per die [GHZ]",
++        "MetricExpr": "tma_info_system_socket_clks / #num_dies / duration_time / 1e9",
++        "MetricGroup": "SoC",
++        "MetricName": "UNCORE_FREQ"
++    },
+     {
+         "BriefDescription": "Percentage of cycles spent in System Management Interrupts.",
+         "MetricExpr": "((msr@aperf@ - cycles) / msr@aperf@ if msr@smi@ > 0 else 0)",
+@@ -652,7 +658,7 @@
+     },
+     {
+         "BriefDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]",
+-        "MetricExpr": "64 * (arb@event\\=0x81\\,umask\\=0x1@ + arb@event\\=0x84\\,umask\\=0x1@) / 1e6 / duration_time / 1e3",
++        "MetricExpr": "64 * (UNC_M_CAS_COUNT.RD + UNC_M_CAS_COUNT.WR) / 1e9 / duration_time",
+         "MetricGroup": "HPC;Mem;MemoryBW;SoC;tma_issueBW",
+         "MetricName": "tma_info_system_dram_bw_use",
+         "PublicDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]. Related metrics: tma_fb_full, tma_mem_bandwidth, tma_sq_full"
+@@ -690,6 +696,12 @@
+         "MetricGroup": "SMT",
+         "MetricName": "tma_info_system_smt_2t_utilization"
+     },
++    {
++        "BriefDescription": "Socket actual clocks when any core is active on that socket",
++        "MetricExpr": "cbox_0@event\\=0x0@",
++        "MetricGroup": "SoC",
++        "MetricName": "tma_info_system_socket_clks"
++    },
+     {
+         "BriefDescription": "Average Frequency Utilization relative nominal frequency",
+         "MetricExpr": "tma_info_thread_clks / CPU_CLK_UNHALTED.REF_TSC",
+diff --git a/tools/perf/tests/pmu-events.c b/tools/perf/tests/pmu-events.c
+index 64383fc34ef1b..5c6f930db4387 100644
+--- a/tools/perf/tests/pmu-events.c
++++ b/tools/perf/tests/pmu-events.c
+@@ -508,7 +508,7 @@ static struct perf_pmu_alias *find_alias(const char *test_event, struct list_hea
+ }
+ 
+ /* Verify aliases are as expected */
+-static int __test_core_pmu_event_aliases(char *pmu_name, int *count)
++static int __test_core_pmu_event_aliases(const char *pmu_name, int *count)
+ {
+ 	struct perf_pmu_test_event const **test_event_table;
+ 	struct perf_pmu *pmu;
+@@ -635,7 +635,7 @@ out:
+ static struct perf_pmu_test_pmu test_pmus[] = {
+ 	{
+ 		.pmu = {
+-			.name = (char *)"hisi_sccl1_ddrc2",
++			.name = "hisi_sccl1_ddrc2",
+ 			.is_uncore = 1,
+ 		},
+ 		.aliases = {
+@@ -644,7 +644,7 @@ static struct perf_pmu_test_pmu test_pmus[] = {
+ 	},
+ 	{
+ 		.pmu = {
+-			.name = (char *)"uncore_cbox_0",
++			.name = "uncore_cbox_0",
+ 			.is_uncore = 1,
+ 		},
+ 		.aliases = {
+@@ -655,7 +655,7 @@ static struct perf_pmu_test_pmu test_pmus[] = {
+ 	},
+ 	{
+ 		.pmu = {
+-			.name = (char *)"hisi_sccl3_l3c7",
++			.name = "hisi_sccl3_l3c7",
+ 			.is_uncore = 1,
+ 		},
+ 		.aliases = {
+@@ -664,7 +664,7 @@ static struct perf_pmu_test_pmu test_pmus[] = {
+ 	},
+ 	{
+ 		.pmu = {
+-			.name = (char *)"uncore_imc_free_running_0",
++			.name = "uncore_imc_free_running_0",
+ 			.is_uncore = 1,
+ 		},
+ 		.aliases = {
+@@ -673,7 +673,7 @@ static struct perf_pmu_test_pmu test_pmus[] = {
+ 	},
+ 	{
+ 		.pmu = {
+-			.name = (char *)"uncore_imc_0",
++			.name = "uncore_imc_0",
+ 			.is_uncore = 1,
+ 		},
+ 		.aliases = {
+@@ -682,7 +682,7 @@ static struct perf_pmu_test_pmu test_pmus[] = {
+ 	},
+ 	{
+ 		.pmu = {
+-			.name = (char *)"uncore_sys_ddr_pmu0",
++			.name = "uncore_sys_ddr_pmu0",
+ 			.is_uncore = 1,
+ 			.id = (char *)"v8",
+ 		},
+@@ -692,7 +692,7 @@ static struct perf_pmu_test_pmu test_pmus[] = {
+ 	},
+ 	{
+ 		.pmu = {
+-			.name = (char *)"uncore_sys_ccn_pmu4",
++			.name = "uncore_sys_ccn_pmu4",
+ 			.is_uncore = 1,
+ 			.id = (char *)"0x01",
+ 		},
+diff --git a/tools/perf/util/bpf_off_cpu.c b/tools/perf/util/bpf_off_cpu.c
+index 01f70b8e705a8..21f4d9ba023d9 100644
+--- a/tools/perf/util/bpf_off_cpu.c
++++ b/tools/perf/util/bpf_off_cpu.c
+@@ -98,7 +98,7 @@ static void off_cpu_finish(void *arg __maybe_unused)
+ /* v5.18 kernel added prev_state arg, so it needs to check the signature */
+ static void check_sched_switch_args(void)
+ {
+-	const struct btf *btf = bpf_object__btf(skel->obj);
++	const struct btf *btf = btf__load_vmlinux_btf();
+ 	const struct btf_type *t1, *t2, *t3;
+ 	u32 type_id;
+ 
+@@ -116,7 +116,8 @@ static void check_sched_switch_args(void)
+ 		return;
+ 
+ 	t3 = btf__type_by_id(btf, t2->type);
+-	if (t3 && btf_is_func_proto(t3) && btf_vlen(t3) == 4) {
++	/* btf_trace func proto has one more argument for the context */
++	if (t3 && btf_is_func_proto(t3) && btf_vlen(t3) == 5) {
+ 		/* new format: pass prev_state as 4th arg */
+ 		skel->rodata->has_prev_state = true;
+ 	}
+diff --git a/tools/perf/util/bpf_skel/vmlinux/.gitignore b/tools/perf/util/bpf_skel/vmlinux/.gitignore
+new file mode 100644
+index 0000000000000..49502c04183a2
+--- /dev/null
++++ b/tools/perf/util/bpf_skel/vmlinux/.gitignore
+@@ -0,0 +1 @@
++!vmlinux.h
+diff --git a/tools/perf/util/env.c b/tools/perf/util/env.c
+index 9eabf3ec56e97..a164164001fb5 100644
+--- a/tools/perf/util/env.c
++++ b/tools/perf/util/env.c
+@@ -324,11 +324,9 @@ int perf_env__read_pmu_mappings(struct perf_env *env)
+ 	u32 pmu_num = 0;
+ 	struct strbuf sb;
+ 
+-	while ((pmu = perf_pmus__scan(pmu))) {
+-		if (!pmu->name)
+-			continue;
++	while ((pmu = perf_pmus__scan(pmu)))
+ 		pmu_num++;
+-	}
++
+ 	if (!pmu_num) {
+ 		pr_debug("pmu mappings not available\n");
+ 		return -ENOENT;
+@@ -339,8 +337,6 @@ int perf_env__read_pmu_mappings(struct perf_env *env)
+ 		return -ENOMEM;
+ 
+ 	while ((pmu = perf_pmus__scan(pmu))) {
+-		if (!pmu->name)
+-			continue;
+ 		if (strbuf_addf(&sb, "%u:%s", pmu->type, pmu->name) < 0)
+ 			goto error;
+ 		/* include a NULL character at the end */
+diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
+index 7af85a479786b..083919e3b8d88 100644
+--- a/tools/perf/util/header.c
++++ b/tools/perf/util/header.c
+@@ -746,20 +746,14 @@ static int write_pmu_mappings(struct feat_fd *ff,
+ 	 * Do a first pass to count number of pmu to avoid lseek so this
+ 	 * works in pipe mode as well.
+ 	 */
+-	while ((pmu = perf_pmus__scan(pmu))) {
+-		if (!pmu->name)
+-			continue;
++	while ((pmu = perf_pmus__scan(pmu)))
+ 		pmu_num++;
+-	}
+ 
+ 	ret = do_write(ff, &pmu_num, sizeof(pmu_num));
+ 	if (ret < 0)
+ 		return ret;
+ 
+ 	while ((pmu = perf_pmus__scan(pmu))) {
+-		if (!pmu->name)
+-			continue;
+-
+ 		ret = do_write(ff, &pmu->type, sizeof(pmu->type));
+ 		if (ret < 0)
+ 			return ret;
+diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
+index 3dc8a4968beb9..ac8c0ef48a7f3 100644
+--- a/tools/perf/util/hist.c
++++ b/tools/perf/util/hist.c
+@@ -2676,8 +2676,6 @@ void hist__account_cycles(struct branch_stack *bs, struct addr_location *al,
+ 
+ 	/* If we have branch cycles always annotate them. */
+ 	if (bs && bs->nr && entries[0].flags.cycles) {
+-		int i;
+-
+ 		bi = sample__resolve_bstack(sample, al);
+ 		if (bi) {
+ 			struct addr_map_symbol *prev = NULL;
+@@ -2692,7 +2690,7 @@ void hist__account_cycles(struct branch_stack *bs, struct addr_location *al,
+ 			 * Note that perf stores branches reversed from
+ 			 * program order!
+ 			 */
+-			for (i = bs->nr - 1; i >= 0; i--) {
++			for (int i = bs->nr - 1; i >= 0; i--) {
+ 				addr_map_symbol__account_cycles(&bi[i].from,
+ 					nonany_branch_mode ? NULL : prev,
+ 					bi[i].flags.cycles);
+@@ -2701,6 +2699,12 @@ void hist__account_cycles(struct branch_stack *bs, struct addr_location *al,
+ 				if (total_cycles)
+ 					*total_cycles += bi[i].flags.cycles;
+ 			}
++			for (unsigned int i = 0; i < bs->nr; i++) {
++				map__put(bi[i].to.ms.map);
++				maps__put(bi[i].to.ms.maps);
++				map__put(bi[i].from.ms.map);
++				maps__put(bi[i].from.ms.maps);
++			}
+ 			free(bi);
+ 		}
+ 	}
+diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
+index f4cb41ee23cdb..fdab969e44b12 100644
+--- a/tools/perf/util/machine.c
++++ b/tools/perf/util/machine.c
+@@ -2622,16 +2622,18 @@ static int lbr_callchain_add_lbr_ip(struct thread *thread,
+ 		save_lbr_cursor_node(thread, cursor, i);
+ 	}
+ 
+-	/* Add LBR ip from first entries.to */
+-	ip = entries[0].to;
+-	flags = &entries[0].flags;
+-	*branch_from = entries[0].from;
+-	err = add_callchain_ip(thread, cursor, parent,
+-			       root_al, &cpumode, ip,
+-			       true, flags, NULL,
+-			       *branch_from);
+-	if (err)
+-		return err;
++	if (lbr_nr > 0) {
++		/* Add LBR ip from first entries.to */
++		ip = entries[0].to;
++		flags = &entries[0].flags;
++		*branch_from = entries[0].from;
++		err = add_callchain_ip(thread, cursor, parent,
++				root_al, &cpumode, ip,
++				true, flags, NULL,
++				*branch_from);
++		if (err)
++			return err;
++	}
+ 
+ 	return 0;
+ }
+diff --git a/tools/perf/util/mem-events.c b/tools/perf/util/mem-events.c
+index c07fe3a907220..954b235e12e51 100644
+--- a/tools/perf/util/mem-events.c
++++ b/tools/perf/util/mem-events.c
+@@ -37,7 +37,7 @@ struct perf_mem_event * __weak perf_mem_events__ptr(int i)
+ 	return &perf_mem_events[i];
+ }
+ 
+-char * __weak perf_mem_events__name(int i, char *pmu_name  __maybe_unused)
++const char * __weak perf_mem_events__name(int i, const char *pmu_name  __maybe_unused)
+ {
+ 	struct perf_mem_event *e = perf_mem_events__ptr(i);
+ 
+@@ -53,7 +53,7 @@ char * __weak perf_mem_events__name(int i, char *pmu_name  __maybe_unused)
+ 		return mem_loads_name;
+ 	}
+ 
+-	return (char *)e->name;
++	return e->name;
+ }
+ 
+ __weak bool is_mem_loads_aux_event(struct evsel *leader __maybe_unused)
+@@ -185,8 +185,6 @@ int perf_mem_events__record_args(const char **rec_argv, int *argv_nr,
+ {
+ 	int i = *argv_nr, k = 0;
+ 	struct perf_mem_event *e;
+-	struct perf_pmu *pmu;
+-	char *s;
+ 
+ 	for (int j = 0; j < PERF_MEM_EVENTS__MAX; j++) {
+ 		e = perf_mem_events__ptr(j);
+@@ -203,21 +201,24 @@ int perf_mem_events__record_args(const char **rec_argv, int *argv_nr,
+ 			rec_argv[i++] = "-e";
+ 			rec_argv[i++] = perf_mem_events__name(j, NULL);
+ 		} else {
++			struct perf_pmu *pmu = NULL;
++
+ 			if (!e->supported) {
+ 				perf_mem_events__print_unsupport_hybrid(e, j);
+ 				return -1;
+ 			}
+ 
+ 			while ((pmu = perf_pmus__scan(pmu)) != NULL) {
++				const char *s = perf_mem_events__name(j, pmu->name);
++
+ 				rec_argv[i++] = "-e";
+-				s = perf_mem_events__name(j, pmu->name);
+ 				if (s) {
+-					s = strdup(s);
+-					if (!s)
++					char *copy = strdup(s);
++					if (!copy)
+ 						return -1;
+ 
+-					rec_argv[i++] = s;
+-					rec_tmp[k++] = s;
++					rec_argv[i++] = copy;
++					rec_tmp[k++] = copy;
+ 				}
+ 			}
+ 		}
+diff --git a/tools/perf/util/mem-events.h b/tools/perf/util/mem-events.h
+index 12372309d60ed..b40ad6ea93fcd 100644
+--- a/tools/perf/util/mem-events.h
++++ b/tools/perf/util/mem-events.h
+@@ -38,7 +38,7 @@ extern unsigned int perf_mem_events__loads_ldlat;
+ int perf_mem_events__parse(const char *str);
+ int perf_mem_events__init(void);
+ 
+-char *perf_mem_events__name(int i, char *pmu_name);
++const char *perf_mem_events__name(int i, const char *pmu_name);
+ struct perf_mem_event *perf_mem_events__ptr(int i);
+ bool is_mem_loads_aux_event(struct evsel *leader);
+ 
+diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
+index 30311844eea7b..596ba10129870 100644
+--- a/tools/perf/util/parse-events.c
++++ b/tools/perf/util/parse-events.c
+@@ -271,7 +271,7 @@ __add_event(struct list_head *list, int *idx,
+ 	evsel->core.is_pmu_core = pmu ? pmu->is_core : false;
+ 	evsel->auto_merge_stats = auto_merge_stats;
+ 	evsel->pmu = pmu;
+-	evsel->pmu_name = pmu && pmu->name ? strdup(pmu->name) : NULL;
++	evsel->pmu_name = pmu ? strdup(pmu->name) : NULL;
+ 
+ 	if (name)
+ 		evsel->name = strdup(name);
+@@ -446,9 +446,6 @@ bool parse_events__filter_pmu(const struct parse_events_state *parse_state,
+ 	if (parse_state->pmu_filter == NULL)
+ 		return false;
+ 
+-	if (pmu->name == NULL)
+-		return true;
+-
+ 	return strcmp(parse_state->pmu_filter, pmu->name) != 0;
+ }
+ 
+@@ -793,13 +790,7 @@ parse_events_config_bpf(struct parse_events_state *parse_state,
+ 
+ 			parse_events_error__handle(parse_state->error, idx,
+ 						strdup(errbuf),
+-						strdup(
+-"Hint:\tValid config terms:\n"
+-"     \tmap:[<arraymap>].value<indices>=[value]\n"
+-"     \tmap:[<eventmap>].event<indices>=[event]\n"
+-"\n"
+-"     \twhere <indices> is something like [0,3...5] or [all]\n"
+-"     \t(add -v to see detail)"));
++						NULL);
+ 			return err;
+ 		}
+ 	}
+@@ -1557,7 +1548,7 @@ static bool config_term_percore(struct list_head *config_terms)
+ }
+ 
+ int parse_events_add_pmu(struct parse_events_state *parse_state,
+-			 struct list_head *list, char *name,
++			 struct list_head *list, const char *name,
+ 			 struct list_head *head_config,
+ 			 bool auto_merge_stats)
+ {
+diff --git a/tools/perf/util/parse-events.h b/tools/perf/util/parse-events.h
+index b0eb95f93e9c3..8e8682ca84654 100644
+--- a/tools/perf/util/parse-events.h
++++ b/tools/perf/util/parse-events.h
+@@ -190,7 +190,7 @@ int parse_events_add_breakpoint(struct parse_events_state *parse_state,
+ 				u64 addr, char *type, u64 len,
+ 				struct list_head *head_config);
+ int parse_events_add_pmu(struct parse_events_state *parse_state,
+-			 struct list_head *list, char *name,
++			 struct list_head *list, const char *name,
+ 			 struct list_head *head_config,
+ 			 bool auto_merge_stats);
+ 
+diff --git a/tools/perf/util/parse-events.l b/tools/perf/util/parse-events.l
+index 99335ec586ae8..d7d084cc4140d 100644
+--- a/tools/perf/util/parse-events.l
++++ b/tools/perf/util/parse-events.l
+@@ -175,7 +175,6 @@ do {							\
+ %x mem
+ %s config
+ %x event
+-%x array
+ 
+ group		[^,{}/]*[{][^}]*[}][^,{}/]*
+ event_pmu	[^,{}/]+[/][^/]*[/][^,{}/]*
+@@ -251,14 +250,6 @@ non_digit	[^0-9]
+ 		}
+ }
+ 
+-<array>{
+-"]"			{ BEGIN(config); return ']'; }
+-{num_dec}		{ return value(yyscanner, 10); }
+-{num_hex}		{ return value(yyscanner, 16); }
+-,			{ return ','; }
+-"\.\.\."		{ return PE_ARRAY_RANGE; }
+-}
+-
+ <config>{
+ 	/*
+ 	 * Please update config_term_names when new static term is added.
+@@ -302,8 +293,6 @@ r0x{num_raw_hex}	{ return str(yyscanner, PE_RAW); }
+ {lc_type}-{lc_op_result}	{ return lc_str(yyscanner, _parse_state); }
+ {lc_type}-{lc_op_result}-{lc_op_result}	{ return lc_str(yyscanner, _parse_state); }
+ {name_minus}		{ return str(yyscanner, PE_NAME); }
+-\[all\]			{ return PE_ARRAY_ALL; }
+-"["			{ BEGIN(array); return '['; }
+ @{drv_cfg_term}		{ return drv_str(yyscanner, PE_DRV_CFG_TERM); }
+ }
+ 
+diff --git a/tools/perf/util/parse-events.y b/tools/perf/util/parse-events.y
+index c590cf7f02a45..9cbbc36f01b59 100644
+--- a/tools/perf/util/parse-events.y
++++ b/tools/perf/util/parse-events.y
+@@ -22,12 +22,6 @@
+ 
+ void parse_events_error(YYLTYPE *loc, void *parse_state, void *scanner, char const *msg);
+ 
+-#define ABORT_ON(val) \
+-do { \
+-	if (val) \
+-		YYABORT; \
+-} while (0)
+-
+ #define PE_ABORT(val) \
+ do { \
+ 	if (val == -ENOMEM) \
+@@ -70,8 +64,6 @@ static void free_list_evsel(struct list_head* list_evsel)
+ %token PE_LEGACY_CACHE
+ %token PE_PREFIX_MEM PE_PREFIX_RAW PE_PREFIX_GROUP
+ %token PE_ERROR
+-%token PE_KERNEL_PMU_EVENT PE_PMU_EVENT_FAKE
+-%token PE_ARRAY_ALL PE_ARRAY_RANGE
+ %token PE_DRV_CFG_TERM
+ %token PE_TERM_HW
+ %type <num> PE_VALUE
+@@ -88,9 +80,8 @@ static void free_list_evsel(struct list_head* list_evsel)
+ %type <str> PE_MODIFIER_EVENT
+ %type <str> PE_MODIFIER_BP
+ %type <str> PE_EVENT_NAME
+-%type <str> PE_KERNEL_PMU_EVENT PE_PMU_EVENT_FAKE
+ %type <str> PE_DRV_CFG_TERM
+-%type <str> name_or_raw name_or_legacy
++%type <str> name_or_raw
+ %destructor { free ($$); } <str>
+ %type <term> event_term
+ %destructor { parse_events_term__delete ($$); } <term>
+@@ -117,10 +108,6 @@ static void free_list_evsel(struct list_head* list_evsel)
+ %destructor { free_list_evsel ($$); } <list_evsel>
+ %type <tracepoint_name> tracepoint_name
+ %destructor { free ($$.sys); free ($$.event); } <tracepoint_name>
+-%type <array> array
+-%type <array> array_term
+-%type <array> array_terms
+-%destructor { free ($$.ranges); } <array>
+ %type <hardware_term> PE_TERM_HW
+ %destructor { free ($$.str); } <hardware_term>
+ 
+@@ -135,7 +122,6 @@ static void free_list_evsel(struct list_head* list_evsel)
+ 		char *sys;
+ 		char *event;
+ 	} tracepoint_name;
+-	struct parse_events_array array;
+ 	struct hardware_term {
+ 		char *str;
+ 		u64 num;
+@@ -326,7 +312,7 @@ PE_NAME opt_pmu_config
+ 		}
+ 
+ 		while ((pmu = perf_pmus__scan(pmu)) != NULL) {
+-			char *name = pmu->name;
++			const char *name = pmu->name;
+ 
+ 			if (parse_events__filter_pmu(parse_state, pmu))
+ 				continue;
+@@ -376,18 +362,6 @@ PE_NAME opt_pmu_config
+ #undef CLEANUP
+ }
+ |
+-PE_KERNEL_PMU_EVENT sep_dc
+-{
+-	struct list_head *list;
+-	int err;
+-
+-	err = parse_events_multi_pmu_add(_parse_state, $1, NULL, &list);
+-	free($1);
+-	if (err < 0)
+-		YYABORT;
+-	$$ = list;
+-}
+-|
+ PE_NAME sep_dc
+ {
+ 	struct list_head *list;
+@@ -408,57 +382,6 @@ PE_NAME sep_dc
+ 	free($1);
+ 	$$ = list;
+ }
+-|
+-PE_KERNEL_PMU_EVENT opt_pmu_config
+-{
+-	struct list_head *list;
+-	int err;
+-
+-	/* frees $2 */
+-	err = parse_events_multi_pmu_add(_parse_state, $1, $2, &list);
+-	free($1);
+-	if (err < 0)
+-		YYABORT;
+-	$$ = list;
+-}
+-|
+-PE_PMU_EVENT_FAKE sep_dc
+-{
+-	struct list_head *list;
+-	int err;
+-
+-	list = alloc_list();
+-	if (!list)
+-		YYABORT;
+-
+-	err = parse_events_add_pmu(_parse_state, list, $1, /*head_config=*/NULL,
+-				   /*auto_merge_stats=*/false);
+-	free($1);
+-	if (err < 0) {
+-		free(list);
+-		YYABORT;
+-	}
+-	$$ = list;
+-}
+-|
+-PE_PMU_EVENT_FAKE opt_pmu_config
+-{
+-	struct list_head *list;
+-	int err;
+-
+-	list = alloc_list();
+-	if (!list)
+-		YYABORT;
+-
+-	err = parse_events_add_pmu(_parse_state, list, $1, $2, /*auto_merge_stats=*/false);
+-	free($1);
+-	parse_events_terms__delete($2);
+-	if (err < 0) {
+-		free(list);
+-		YYABORT;
+-	}
+-	$$ = list;
+-}
+ 
+ value_sym:
+ PE_VALUE_SYM_HW
+@@ -683,7 +606,9 @@ PE_RAW opt_event_config
+ 		YYNOMEM;
+ 	errno = 0;
+ 	num = strtoull($1 + 1, NULL, 16);
+-	ABORT_ON(errno);
++	/* Given the lexer will only give [a-fA-F0-9]+ a failure here should be impossible. */
++	if (errno)
++		YYABORT;
+ 	free($1);
+ 	err = parse_events_add_numeric(_parse_state, list, PERF_TYPE_RAW, num, $2,
+ 				       /*wildcard=*/false);
+@@ -796,8 +721,6 @@ event_term
+ 
+ name_or_raw: PE_RAW | PE_NAME | PE_LEGACY_CACHE
+ 
+-name_or_legacy: PE_NAME | PE_LEGACY_CACHE
+-
+ event_term:
+ PE_RAW
+ {
+@@ -812,7 +735,7 @@ PE_RAW
+ 	$$ = term;
+ }
+ |
+-name_or_raw '=' name_or_legacy
++name_or_raw '=' name_or_raw
+ {
+ 	struct parse_events_term *term;
+ 	int err = parse_events_term__str(&term, PARSE_EVENTS__TERM_TYPE_USER, $1, $3, &@1, &@3);
+@@ -891,7 +814,7 @@ PE_TERM_HW
+ 	$$ = term;
+ }
+ |
+-PE_TERM '=' name_or_legacy
++PE_TERM '=' name_or_raw
+ {
+ 	struct parse_events_term *term;
+ 	int err = parse_events_term__str(&term, (int)$1, NULL, $3, &@1, &@3);
+@@ -948,35 +871,6 @@ PE_TERM
+ 	$$ = term;
+ }
+ |
+-name_or_raw array '=' name_or_legacy
+-{
+-	struct parse_events_term *term;
+-	int err = parse_events_term__str(&term, PARSE_EVENTS__TERM_TYPE_USER, $1, $4, &@1, &@4);
+-
+-	if (err) {
+-		free($1);
+-		free($4);
+-		free($2.ranges);
+-		PE_ABORT(err);
+-	}
+-	term->array = $2;
+-	$$ = term;
+-}
+-|
+-name_or_raw array '=' PE_VALUE
+-{
+-	struct parse_events_term *term;
+-	int err = parse_events_term__num(&term, PARSE_EVENTS__TERM_TYPE_USER, $1, $4, false, &@1, &@4);
+-
+-	if (err) {
+-		free($1);
+-		free($2.ranges);
+-		PE_ABORT(err);
+-	}
+-	term->array = $2;
+-	$$ = term;
+-}
+-|
+ PE_DRV_CFG_TERM
+ {
+ 	struct parse_events_term *term;
+@@ -994,65 +888,6 @@ PE_DRV_CFG_TERM
+ 	$$ = term;
+ }
+ 
+-array:
+-'[' array_terms ']'
+-{
+-	$$ = $2;
+-}
+-|
+-PE_ARRAY_ALL
+-{
+-	$$.nr_ranges = 0;
+-	$$.ranges = NULL;
+-}
+-
+-array_terms:
+-array_terms ',' array_term
+-{
+-	struct parse_events_array new_array;
+-
+-	new_array.nr_ranges = $1.nr_ranges + $3.nr_ranges;
+-	new_array.ranges = realloc($1.ranges,
+-				sizeof(new_array.ranges[0]) *
+-				new_array.nr_ranges);
+-	if (!new_array.ranges)
+-		YYNOMEM;
+-	memcpy(&new_array.ranges[$1.nr_ranges], $3.ranges,
+-	       $3.nr_ranges * sizeof(new_array.ranges[0]));
+-	free($3.ranges);
+-	$$ = new_array;
+-}
+-|
+-array_term
+-
+-array_term:
+-PE_VALUE
+-{
+-	struct parse_events_array array;
+-
+-	array.nr_ranges = 1;
+-	array.ranges = malloc(sizeof(array.ranges[0]));
+-	if (!array.ranges)
+-		YYNOMEM;
+-	array.ranges[0].start = $1;
+-	array.ranges[0].length = 1;
+-	$$ = array;
+-}
+-|
+-PE_VALUE PE_ARRAY_RANGE PE_VALUE
+-{
+-	struct parse_events_array array;
+-
+-	ABORT_ON($3 < $1);
+-	array.nr_ranges = 1;
+-	array.ranges = malloc(sizeof(array.ranges[0]));
+-	if (!array.ranges)
+-		YYNOMEM;
+-	array.ranges[0].start = $1;
+-	array.ranges[0].length = $3 - $1 + 1;
+-	$$ = array;
+-}
+-
+ sep_dc: ':' |
+ 
+ sep_slash_slash_dc: '/' '/' | ':' |
+diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c
+index d5406effc1695..f20fcea215e4b 100644
+--- a/tools/perf/util/pmu.c
++++ b/tools/perf/util/pmu.c
+@@ -29,7 +29,9 @@
+ #include "fncache.h"
+ #include "util/evsel_config.h"
+ 
+-struct perf_pmu perf_pmu__fake;
++struct perf_pmu perf_pmu__fake = {
++	.name = "fake",
++};
+ 
+ /**
+  * struct perf_pmu_format - Values from a format file read from
+@@ -840,13 +842,13 @@ perf_pmu__get_default_config(struct perf_pmu *pmu __maybe_unused)
+ 	return NULL;
+ }
+ 
+-char * __weak
++const char * __weak
+ pmu_find_real_name(const char *name)
+ {
+-	return (char *)name;
++	return name;
+ }
+ 
+-char * __weak
++const char * __weak
+ pmu_find_alias_name(const char *name __maybe_unused)
+ {
+ 	return NULL;
+@@ -866,8 +868,8 @@ struct perf_pmu *perf_pmu__lookup(struct list_head *pmus, int dirfd, const char
+ 	LIST_HEAD(format);
+ 	LIST_HEAD(aliases);
+ 	__u32 type;
+-	char *name = pmu_find_real_name(lookup_name);
+-	char *alias_name;
++	const char *name = pmu_find_real_name(lookup_name);
++	const char *alias_name;
+ 
+ 	/*
+ 	 * The pmu data we store & need consists of the pmu
+@@ -1710,7 +1712,7 @@ void perf_pmu__warn_invalid_config(struct perf_pmu *pmu, __u64 config,
+ 		   name ?: "N/A", buf, config_name, config);
+ }
+ 
+-int perf_pmu__match(char *pattern, char *name, char *tok)
++int perf_pmu__match(const char *pattern, const char *name, const char *tok)
+ {
+ 	if (!name)
+ 		return -1;
+diff --git a/tools/perf/util/pmu.h b/tools/perf/util/pmu.h
+index 6b414cecbad22..461af638399c9 100644
+--- a/tools/perf/util/pmu.h
++++ b/tools/perf/util/pmu.h
+@@ -39,7 +39,7 @@ struct perf_pmu_caps {
+  */
+ struct perf_pmu {
+ 	/** @name: The name of the PMU such as "cpu". */
+-	char *name;
++	const char *name;
+ 	/**
+ 	 * @alias_name: Optional alternate name for the PMU determined in
+ 	 * architecture specific code.
+@@ -275,10 +275,10 @@ void perf_pmu__warn_invalid_config(struct perf_pmu *pmu, __u64 config,
+ 				   const char *config_name);
+ void perf_pmu__warn_invalid_formats(struct perf_pmu *pmu);
+ 
+-int perf_pmu__match(char *pattern, char *name, char *tok);
++int perf_pmu__match(const char *pattern, const char *name, const char *tok);
+ 
+-char *pmu_find_real_name(const char *name);
+-char *pmu_find_alias_name(const char *name);
++const char *pmu_find_real_name(const char *name);
++const char *pmu_find_alias_name(const char *name);
+ double perf_pmu__cpu_slots_per_cycle(void);
+ int perf_pmu__event_source_devices_scnprintf(char *pathname, size_t size);
+ int perf_pmu__pathname_scnprintf(char *buf, size_t size,
+diff --git a/tools/power/cpupower/man/cpupower-powercap-info.1 b/tools/power/cpupower/man/cpupower-powercap-info.1
+index df3087000efb8..145d6f06fa72d 100644
+--- a/tools/power/cpupower/man/cpupower-powercap-info.1
++++ b/tools/power/cpupower/man/cpupower-powercap-info.1
+@@ -17,7 +17,7 @@ settings of all cores, see cpupower(1) how to choose specific cores.
+ .SH "DOCUMENTATION"
+ 
+ kernel sources:
+-Documentation/power/powercap/powercap.txt
++Documentation/power/powercap/powercap.rst
+ 
+ 
+ .SH "SEE ALSO"
+diff --git a/tools/testing/cxl/test/mem.c b/tools/testing/cxl/test/mem.c
+index 464fc39ed2776..68118c37f0b56 100644
+--- a/tools/testing/cxl/test/mem.c
++++ b/tools/testing/cxl/test/mem.c
+@@ -1450,11 +1450,11 @@ static int cxl_mock_mem_probe(struct platform_device *pdev)
+ 	mdata->mes.mds = mds;
+ 	cxl_mock_add_event_logs(&mdata->mes);
+ 
+-	cxlmd = devm_cxl_add_memdev(cxlds);
++	cxlmd = devm_cxl_add_memdev(&pdev->dev, cxlds);
+ 	if (IS_ERR(cxlmd))
+ 		return PTR_ERR(cxlmd);
+ 
+-	rc = cxl_memdev_setup_fw_upload(mds);
++	rc = devm_cxl_setup_fw_upload(&pdev->dev, mds);
+ 	if (rc)
+ 		return rc;
+ 
+diff --git a/tools/testing/selftests/bpf/prog_tests/module_fentry_shadow.c b/tools/testing/selftests/bpf/prog_tests/module_fentry_shadow.c
+index c7636e18b1ebd..aa9f67eb1c95b 100644
+--- a/tools/testing/selftests/bpf/prog_tests/module_fentry_shadow.c
++++ b/tools/testing/selftests/bpf/prog_tests/module_fentry_shadow.c
+@@ -61,6 +61,11 @@ void test_module_fentry_shadow(void)
+ 	int link_fd[2] = {};
+ 	__s32 btf_id[2] = {};
+ 
++	if (!env.has_testmod) {
++		test__skip();
++		return;
++	}
++
+ 	LIBBPF_OPTS(bpf_prog_load_opts, load_opts,
+ 		.expected_attach_type = BPF_TRACE_FENTRY,
+ 	);
+diff --git a/tools/testing/selftests/bpf/prog_tests/tailcalls.c b/tools/testing/selftests/bpf/prog_tests/tailcalls.c
+index 58fe2c586ed76..09c189761926c 100644
+--- a/tools/testing/selftests/bpf/prog_tests/tailcalls.c
++++ b/tools/testing/selftests/bpf/prog_tests/tailcalls.c
+@@ -271,11 +271,11 @@ static void test_tailcall_count(const char *which)
+ 
+ 	data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
+ 	if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
+-		return;
++		goto out;
+ 
+ 	data_fd = bpf_map__fd(data_map);
+-	if (CHECK_FAIL(map_fd < 0))
+-		return;
++	if (CHECK_FAIL(data_fd < 0))
++		goto out;
+ 
+ 	i = 0;
+ 	err = bpf_map_lookup_elem(data_fd, &i, &val);
+@@ -352,11 +352,11 @@ static void test_tailcall_4(void)
+ 
+ 	data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
+ 	if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
+-		return;
++		goto out;
+ 
+ 	data_fd = bpf_map__fd(data_map);
+-	if (CHECK_FAIL(map_fd < 0))
+-		return;
++	if (CHECK_FAIL(data_fd < 0))
++		goto out;
+ 
+ 	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
+ 		snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
+@@ -442,11 +442,11 @@ static void test_tailcall_5(void)
+ 
+ 	data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
+ 	if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
+-		return;
++		goto out;
+ 
+ 	data_fd = bpf_map__fd(data_map);
+-	if (CHECK_FAIL(map_fd < 0))
+-		return;
++	if (CHECK_FAIL(data_fd < 0))
++		goto out;
+ 
+ 	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
+ 		snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
+@@ -631,11 +631,11 @@ static void test_tailcall_bpf2bpf_2(void)
+ 
+ 	data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
+ 	if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
+-		return;
++		goto out;
+ 
+ 	data_fd = bpf_map__fd(data_map);
+-	if (CHECK_FAIL(map_fd < 0))
+-		return;
++	if (CHECK_FAIL(data_fd < 0))
++		goto out;
+ 
+ 	i = 0;
+ 	err = bpf_map_lookup_elem(data_fd, &i, &val);
+@@ -805,11 +805,11 @@ static void test_tailcall_bpf2bpf_4(bool noise)
+ 
+ 	data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
+ 	if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
+-		return;
++		goto out;
+ 
+ 	data_fd = bpf_map__fd(data_map);
+-	if (CHECK_FAIL(map_fd < 0))
+-		return;
++	if (CHECK_FAIL(data_fd < 0))
++		goto out;
+ 
+ 	i = 0;
+ 	val.noise = noise;
+@@ -872,7 +872,7 @@ static void test_tailcall_bpf2bpf_6(void)
+ 	ASSERT_EQ(topts.retval, 0, "tailcall retval");
+ 
+ 	data_fd = bpf_map__fd(obj->maps.bss);
+-	if (!ASSERT_GE(map_fd, 0, "bss map fd"))
++	if (!ASSERT_GE(data_fd, 0, "bss map fd"))
+ 		goto out;
+ 
+ 	i = 0;
+diff --git a/tools/testing/selftests/mm/mdwe_test.c b/tools/testing/selftests/mm/mdwe_test.c
+index bc91bef5d254e..0c5e469ae38fa 100644
+--- a/tools/testing/selftests/mm/mdwe_test.c
++++ b/tools/testing/selftests/mm/mdwe_test.c
+@@ -168,13 +168,10 @@ TEST_F(mdwe, mmap_FIXED)
+ 	self->p = mmap(NULL, self->size, PROT_READ, self->flags, 0, 0);
+ 	ASSERT_NE(self->p, MAP_FAILED);
+ 
+-	p = mmap(self->p + self->size, self->size, PROT_READ | PROT_EXEC,
++	/* MAP_FIXED unmaps the existing page before mapping which is allowed */
++	p = mmap(self->p, self->size, PROT_READ | PROT_EXEC,
+ 		 self->flags | MAP_FIXED, 0, 0);
+-	if (variant->enabled) {
+-		EXPECT_EQ(p, MAP_FAILED);
+-	} else {
+-		EXPECT_EQ(p, self->p);
+-	}
++	EXPECT_EQ(p, self->p);
+ }
+ 
+ TEST_F(mdwe, arm64_BTI)
+diff --git a/tools/testing/selftests/net/mptcp/mptcp_join.sh b/tools/testing/selftests/net/mptcp/mptcp_join.sh
+index 621b1964ea6f3..5a02fef4b070c 100755
+--- a/tools/testing/selftests/net/mptcp/mptcp_join.sh
++++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh
+@@ -3259,6 +3259,7 @@ userspace_pm_rm_sf_addr_ns1()
+ 	local addr=$1
+ 	local id=$2
+ 	local tk sp da dp
++	local cnt_addr cnt_sf
+ 
+ 	tk=$(grep "type:1," "$evts_ns1" |
+ 	     sed -n 's/.*\(token:\)\([[:digit:]]*\).*$/\2/p;q')
+@@ -3268,11 +3269,13 @@ userspace_pm_rm_sf_addr_ns1()
+ 	     sed -n 's/.*\(daddr6:\)\([0-9a-f:.]*\).*$/\2/p;q')
+ 	dp=$(grep "type:10" "$evts_ns1" |
+ 	     sed -n 's/.*\(dport:\)\([[:digit:]]*\).*$/\2/p;q')
++	cnt_addr=$(rm_addr_count ${ns1})
++	cnt_sf=$(rm_sf_count ${ns1})
+ 	ip netns exec $ns1 ./pm_nl_ctl rem token $tk id $id
+ 	ip netns exec $ns1 ./pm_nl_ctl dsf lip "::ffff:$addr" \
+ 				lport $sp rip $da rport $dp token $tk
+-	wait_rm_addr $ns1 1
+-	wait_rm_sf $ns1 1
++	wait_rm_addr $ns1 "${cnt_addr}"
++	wait_rm_sf $ns1 "${cnt_sf}"
+ }
+ 
+ userspace_pm_add_sf()
+@@ -3294,17 +3297,20 @@ userspace_pm_rm_sf_addr_ns2()
+ 	local addr=$1
+ 	local id=$2
+ 	local tk da dp sp
++	local cnt_addr cnt_sf
+ 
+ 	tk=$(sed -n 's/.*\(token:\)\([[:digit:]]*\).*$/\2/p;q' "$evts_ns2")
+ 	da=$(sed -n 's/.*\(daddr4:\)\([0-9.]*\).*$/\2/p;q' "$evts_ns2")
+ 	dp=$(sed -n 's/.*\(dport:\)\([[:digit:]]*\).*$/\2/p;q' "$evts_ns2")
+ 	sp=$(grep "type:10" "$evts_ns2" |
+ 	     sed -n 's/.*\(sport:\)\([[:digit:]]*\).*$/\2/p;q')
++	cnt_addr=$(rm_addr_count ${ns2})
++	cnt_sf=$(rm_sf_count ${ns2})
+ 	ip netns exec $ns2 ./pm_nl_ctl rem token $tk id $id
+ 	ip netns exec $ns2 ./pm_nl_ctl dsf lip $addr lport $sp \
+ 				rip $da rport $dp token $tk
+-	wait_rm_addr $ns2 1
+-	wait_rm_sf $ns2 1
++	wait_rm_addr $ns2 "${cnt_addr}"
++	wait_rm_sf $ns2 "${cnt_sf}"
+ }
+ 
+ userspace_tests()
+diff --git a/tools/testing/selftests/net/pmtu.sh b/tools/testing/selftests/net/pmtu.sh
+index f838dd370f6af..b3b2dc5a630cf 100755
+--- a/tools/testing/selftests/net/pmtu.sh
++++ b/tools/testing/selftests/net/pmtu.sh
+@@ -2048,7 +2048,7 @@ run_test() {
+ 	case $ret in
+ 		0)
+ 			all_skipped=false
+-			[ $exitcode=$ksft_skip ] && exitcode=0
++			[ $exitcode -eq $ksft_skip ] && exitcode=0
+ 		;;
+ 		$ksft_skip)
+ 			[ $all_skipped = true ] && exitcode=$ksft_skip
+diff --git a/tools/testing/selftests/netfilter/Makefile b/tools/testing/selftests/netfilter/Makefile
+index 321db8850da00..bced422b78f72 100644
+--- a/tools/testing/selftests/netfilter/Makefile
++++ b/tools/testing/selftests/netfilter/Makefile
+@@ -6,13 +6,14 @@ TEST_PROGS := nft_trans_stress.sh nft_fib.sh nft_nat.sh bridge_brouter.sh \
+ 	nft_concat_range.sh nft_conntrack_helper.sh \
+ 	nft_queue.sh nft_meta.sh nf_nat_edemux.sh \
+ 	ipip-conntrack-mtu.sh conntrack_tcp_unreplied.sh \
+-	conntrack_vrf.sh nft_synproxy.sh rpath.sh nft_audit.sh
++	conntrack_vrf.sh nft_synproxy.sh rpath.sh nft_audit.sh \
++	conntrack_sctp_collision.sh xt_string.sh
+ 
+ HOSTPKG_CONFIG := pkg-config
+ 
+ CFLAGS += $(shell $(HOSTPKG_CONFIG) --cflags libmnl 2>/dev/null)
+ LDLIBS += $(shell $(HOSTPKG_CONFIG) --libs libmnl 2>/dev/null || echo -lmnl)
+ 
+-TEST_GEN_FILES =  nf-queue connect_close audit_logread
++TEST_GEN_FILES =  nf-queue connect_close audit_logread sctp_collision
+ 
+ include ../lib.mk
+diff --git a/tools/testing/selftests/netfilter/conntrack_sctp_collision.sh b/tools/testing/selftests/netfilter/conntrack_sctp_collision.sh
+new file mode 100755
+index 0000000000000..a924e595cfd8b
+--- /dev/null
++++ b/tools/testing/selftests/netfilter/conntrack_sctp_collision.sh
+@@ -0,0 +1,89 @@
++#!/bin/bash
++# SPDX-License-Identifier: GPL-2.0
++#
++# Testing For SCTP COLLISION SCENARIO as Below:
++#
++#   14:35:47.655279 IP CLIENT_IP.PORT > SERVER_IP.PORT: sctp (1) [INIT] [init tag: 2017837359]
++#   14:35:48.353250 IP SERVER_IP.PORT > CLIENT_IP.PORT: sctp (1) [INIT] [init tag: 1187206187]
++#   14:35:48.353275 IP CLIENT_IP.PORT > SERVER_IP.PORT: sctp (1) [INIT ACK] [init tag: 2017837359]
++#   14:35:48.353283 IP SERVER_IP.PORT > CLIENT_IP.PORT: sctp (1) [COOKIE ECHO]
++#   14:35:48.353977 IP CLIENT_IP.PORT > SERVER_IP.PORT: sctp (1) [COOKIE ACK]
++#   14:35:48.855335 IP SERVER_IP.PORT > CLIENT_IP.PORT: sctp (1) [INIT ACK] [init tag: 164579970]
++#
++# TOPO: SERVER_NS (link0)<--->(link1) ROUTER_NS (link2)<--->(link3) CLIENT_NS
++
++CLIENT_NS=$(mktemp -u client-XXXXXXXX)
++CLIENT_IP="198.51.200.1"
++CLIENT_PORT=1234
++
++SERVER_NS=$(mktemp -u server-XXXXXXXX)
++SERVER_IP="198.51.100.1"
++SERVER_PORT=1234
++
++ROUTER_NS=$(mktemp -u router-XXXXXXXX)
++CLIENT_GW="198.51.200.2"
++SERVER_GW="198.51.100.2"
++
++# setup the topo
++setup() {
++	ip net add $CLIENT_NS
++	ip net add $SERVER_NS
++	ip net add $ROUTER_NS
++	ip -n $SERVER_NS link add link0 type veth peer name link1 netns $ROUTER_NS
++	ip -n $CLIENT_NS link add link3 type veth peer name link2 netns $ROUTER_NS
++
++	ip -n $SERVER_NS link set link0 up
++	ip -n $SERVER_NS addr add $SERVER_IP/24 dev link0
++	ip -n $SERVER_NS route add $CLIENT_IP dev link0 via $SERVER_GW
++
++	ip -n $ROUTER_NS link set link1 up
++	ip -n $ROUTER_NS link set link2 up
++	ip -n $ROUTER_NS addr add $SERVER_GW/24 dev link1
++	ip -n $ROUTER_NS addr add $CLIENT_GW/24 dev link2
++	ip net exec $ROUTER_NS sysctl -wq net.ipv4.ip_forward=1
++
++	ip -n $CLIENT_NS link set link3 up
++	ip -n $CLIENT_NS addr add $CLIENT_IP/24 dev link3
++	ip -n $CLIENT_NS route add $SERVER_IP dev link3 via $CLIENT_GW
++
++	# simulate the delay on OVS upcall by setting up a delay for INIT_ACK with
++	# tc on $SERVER_NS side
++	tc -n $SERVER_NS qdisc add dev link0 root handle 1: htb
++	tc -n $SERVER_NS class add dev link0 parent 1: classid 1:1 htb rate 100mbit
++	tc -n $SERVER_NS filter add dev link0 parent 1: protocol ip u32 match ip protocol 132 \
++		0xff match u8 2 0xff at 32 flowid 1:1
++	tc -n $SERVER_NS qdisc add dev link0 parent 1:1 handle 10: netem delay 1200ms
++
++	# simulate the ctstate check on OVS nf_conntrack
++	ip net exec $ROUTER_NS iptables -A FORWARD -m state --state INVALID,UNTRACKED -j DROP
++	ip net exec $ROUTER_NS iptables -A INPUT -p sctp -j DROP
++
++	# use a smaller number for assoc's max_retrans to reproduce the issue
++	modprobe sctp
++	ip net exec $CLIENT_NS sysctl -wq net.sctp.association_max_retrans=3
++}
++
++cleanup() {
++	ip net exec $CLIENT_NS pkill sctp_collision 2>&1 >/dev/null
++	ip net exec $SERVER_NS pkill sctp_collision 2>&1 >/dev/null
++	ip net del "$CLIENT_NS"
++	ip net del "$SERVER_NS"
++	ip net del "$ROUTER_NS"
++}
++
++do_test() {
++	ip net exec $SERVER_NS ./sctp_collision server \
++		$SERVER_IP $SERVER_PORT $CLIENT_IP $CLIENT_PORT &
++	ip net exec $CLIENT_NS ./sctp_collision client \
++		$CLIENT_IP $CLIENT_PORT $SERVER_IP $SERVER_PORT
++}
++
++# NOTE: one way to work around the issue is set a smaller hb_interval
++# ip net exec $CLIENT_NS sysctl -wq net.sctp.hb_interval=3500
++
++# run the test case
++trap cleanup EXIT
++setup && \
++echo "Test for SCTP Collision in nf_conntrack:" && \
++do_test && echo "PASS!"
++exit $?
+diff --git a/tools/testing/selftests/netfilter/sctp_collision.c b/tools/testing/selftests/netfilter/sctp_collision.c
+new file mode 100644
+index 0000000000000..21bb1cfd8a856
+--- /dev/null
++++ b/tools/testing/selftests/netfilter/sctp_collision.c
+@@ -0,0 +1,99 @@
++// SPDX-License-Identifier: GPL-2.0
++
++#include <stdio.h>
++#include <stdlib.h>
++#include <string.h>
++#include <unistd.h>
++#include <arpa/inet.h>
++
++int main(int argc, char *argv[])
++{
++	struct sockaddr_in saddr = {}, daddr = {};
++	int sd, ret, len = sizeof(daddr);
++	struct timeval tv = {25, 0};
++	char buf[] = "hello";
++
++	if (argc != 6 || (strcmp(argv[1], "server") && strcmp(argv[1], "client"))) {
++		printf("%s <server|client> <LOCAL_IP> <LOCAL_PORT> <REMOTE_IP> <REMOTE_PORT>\n",
++		       argv[0]);
++		return -1;
++	}
++
++	sd = socket(AF_INET, SOCK_SEQPACKET, IPPROTO_SCTP);
++	if (sd < 0) {
++		printf("Failed to create sd\n");
++		return -1;
++	}
++
++	saddr.sin_family = AF_INET;
++	saddr.sin_addr.s_addr = inet_addr(argv[2]);
++	saddr.sin_port = htons(atoi(argv[3]));
++
++	ret = bind(sd, (struct sockaddr *)&saddr, sizeof(saddr));
++	if (ret < 0) {
++		printf("Failed to bind to address\n");
++		goto out;
++	}
++
++	ret = listen(sd, 5);
++	if (ret < 0) {
++		printf("Failed to listen on port\n");
++		goto out;
++	}
++
++	daddr.sin_family = AF_INET;
++	daddr.sin_addr.s_addr = inet_addr(argv[4]);
++	daddr.sin_port = htons(atoi(argv[5]));
++
++	/* make test shorter than 25s */
++	ret = setsockopt(sd, SOL_SOCKET, SO_RCVTIMEO, &tv, sizeof(tv));
++	if (ret < 0) {
++		printf("Failed to setsockopt SO_RCVTIMEO\n");
++		goto out;
++	}
++
++	if (!strcmp(argv[1], "server")) {
++		sleep(1); /* wait a bit for client's INIT */
++		ret = connect(sd, (struct sockaddr *)&daddr, len);
++		if (ret < 0) {
++			printf("Failed to connect to peer\n");
++			goto out;
++		}
++		ret = recvfrom(sd, buf, sizeof(buf), 0, (struct sockaddr *)&daddr, &len);
++		if (ret < 0) {
++			printf("Failed to recv msg %d\n", ret);
++			goto out;
++		}
++		ret = sendto(sd, buf, strlen(buf) + 1, 0, (struct sockaddr *)&daddr, len);
++		if (ret < 0) {
++			printf("Failed to send msg %d\n", ret);
++			goto out;
++		}
++		printf("Server: sent! %d\n", ret);
++	}
++
++	if (!strcmp(argv[1], "client")) {
++		usleep(300000); /* wait a bit for server's listening */
++		ret = connect(sd, (struct sockaddr *)&daddr, len);
++		if (ret < 0) {
++			printf("Failed to connect to peer\n");
++			goto out;
++		}
++		sleep(1); /* wait a bit for server's delayed INIT_ACK to reproduce the issue */
++		ret = sendto(sd, buf, strlen(buf) + 1, 0, (struct sockaddr *)&daddr, len);
++		if (ret < 0) {
++			printf("Failed to send msg %d\n", ret);
++			goto out;
++		}
++		ret = recvfrom(sd, buf, sizeof(buf), 0, (struct sockaddr *)&daddr, &len);
++		if (ret < 0) {
++			printf("Failed to recv msg %d\n", ret);
++			goto out;
++		}
++		printf("Client: rcvd! %d\n", ret);
++	}
++	ret = 0;
++out:
++	close(sd);
++	return ret;
++}
+diff --git a/tools/testing/selftests/netfilter/xt_string.sh b/tools/testing/selftests/netfilter/xt_string.sh
+new file mode 100755
+index 0000000000000..1802653a47287
+--- /dev/null
++++ b/tools/testing/selftests/netfilter/xt_string.sh
+@@ -0,0 +1,128 @@
++#!/bin/bash
++# SPDX-License-Identifier: GPL-2.0
++
++# return code to signal skipped test
++ksft_skip=4
++rc=0
++
++if ! iptables --version >/dev/null 2>&1; then
++	echo "SKIP: Test needs iptables"
++	exit $ksft_skip
++fi
++if ! ip -V >/dev/null 2>&1; then
++	echo "SKIP: Test needs iproute2"
++	exit $ksft_skip
++fi
++if ! nc -h >/dev/null 2>&1; then
++	echo "SKIP: Test needs netcat"
++	exit $ksft_skip
++fi
++
++pattern="foo bar baz"
++patlen=11
++hdrlen=$((20 + 8)) # IPv4 + UDP
++ns="ns-$(mktemp -u XXXXXXXX)"
++trap 'ip netns del $ns' EXIT
++ip netns add "$ns"
++ip -net "$ns" link add d0 type dummy
++ip -net "$ns" link set d0 up
++ip -net "$ns" addr add 10.1.2.1/24 dev d0
++
++#ip netns exec "$ns" tcpdump -npXi d0 &
++#tcpdump_pid=$!
++#trap 'kill $tcpdump_pid; ip netns del $ns' EXIT
++
++add_rule() { # (alg, from, to)
++	ip netns exec "$ns" \
++		iptables -A OUTPUT -o d0 -m string \
++			--string "$pattern" --algo $1 --from $2 --to $3
++}
++showrules() { # ()
++	ip netns exec "$ns" iptables -v -S OUTPUT | grep '^-A'
++}
++zerorules() {
++	ip netns exec "$ns" iptables -Z OUTPUT
++}
++countrule() { # (pattern)
++	showrules | grep -c -- "$*"
++}
++send() { # (offset)
++	( for ((i = 0; i < $1 - $hdrlen; i++)); do
++		printf " "
++	  done
++	  printf "$pattern"
++	) | ip netns exec "$ns" nc -w 1 -u 10.1.2.2 27374
++}
++
++add_rule bm 1000 1500
++add_rule bm 1400 1600
++add_rule kmp 1000 1500
++add_rule kmp 1400 1600
++
++zerorules
++send 0
++send $((1000 - $patlen))
++if [ $(countrule -c 0 0) -ne 4 ]; then
++	echo "FAIL: rules match data before --from"
++	showrules
++	((rc--))
++fi
++
++zerorules
++send 1000
++send $((1400 - $patlen))
++if [ $(countrule -c 2) -ne 2 ]; then
++	echo "FAIL: only two rules should match at low offset"
++	showrules
++	((rc--))
++fi
++
++zerorules
++send $((1500 - $patlen))
++if [ $(countrule -c 1) -ne 4 ]; then
++	echo "FAIL: all rules should match at end of packet"
++	showrules
++	((rc--))
++fi
++
++zerorules
++send 1495
++if [ $(countrule -c 1) -ne 1 ]; then
++	echo "FAIL: only kmp with proper --to should match pattern spanning fragments"
++	showrules
++	((rc--))
++fi
++
++zerorules
++send 1500
++if [ $(countrule -c 1) -ne 2 ]; then
++	echo "FAIL: two rules should match pattern at start of second fragment"
++	showrules
++	((rc--))
++fi
++
++zerorules
++send $((1600 - $patlen))
++if [ $(countrule -c 1) -ne 2 ]; then
++	echo "FAIL: two rules should match pattern at end of largest --to"
++	showrules
++	((rc--))
++fi
++
++zerorules
++send $((1600 - $patlen + 1))
++if [ $(countrule -c 1) -ne 0 ]; then
++	echo "FAIL: no rules should match pattern extending largest --to"
++	showrules
++	((rc--))
++fi
++
++zerorules
++send 1600
++if [ $(countrule -c 1) -ne 0 ]; then
++	echo "FAIL: no rule should match pattern past largest --to"
++	showrules
++	((rc--))
++fi
++
++exit $rc
+diff --git a/tools/testing/selftests/pidfd/pidfd_fdinfo_test.c b/tools/testing/selftests/pidfd/pidfd_fdinfo_test.c
+index 4e86f927880c3..01cc37bf611c3 100644
+--- a/tools/testing/selftests/pidfd/pidfd_fdinfo_test.c
++++ b/tools/testing/selftests/pidfd/pidfd_fdinfo_test.c
+@@ -62,7 +62,7 @@ static void error_report(struct error *err, const char *test_name)
+ 		break;
+ 
+ 	case PIDFD_PASS:
+-		ksft_test_result_pass("%s test: Passed\n");
++		ksft_test_result_pass("%s test: Passed\n", test_name);
+ 		break;
+ 
+ 	default:
+diff --git a/tools/testing/selftests/pidfd/pidfd_test.c b/tools/testing/selftests/pidfd/pidfd_test.c
+index 00a07e7c571cd..c081ae91313aa 100644
+--- a/tools/testing/selftests/pidfd/pidfd_test.c
++++ b/tools/testing/selftests/pidfd/pidfd_test.c
+@@ -381,13 +381,13 @@ static int test_pidfd_send_signal_syscall_support(void)
+ 
+ static void *test_pidfd_poll_exec_thread(void *priv)
+ {
+-	ksft_print_msg("Child Thread: starting. pid %d tid %d ; and sleeping\n",
++	ksft_print_msg("Child Thread: starting. pid %d tid %ld ; and sleeping\n",
+ 			getpid(), syscall(SYS_gettid));
+ 	ksft_print_msg("Child Thread: doing exec of sleep\n");
+ 
+ 	execl("/bin/sleep", "sleep", str(CHILD_THREAD_MIN_WAIT), (char *)NULL);
+ 
+-	ksft_print_msg("Child Thread: DONE. pid %d tid %d\n",
++	ksft_print_msg("Child Thread: DONE. pid %d tid %ld\n",
+ 			getpid(), syscall(SYS_gettid));
+ 	return NULL;
+ }
+@@ -427,7 +427,7 @@ static int child_poll_exec_test(void *args)
+ {
+ 	pthread_t t1;
+ 
+-	ksft_print_msg("Child (pidfd): starting. pid %d tid %d\n", getpid(),
++	ksft_print_msg("Child (pidfd): starting. pid %d tid %ld\n", getpid(),
+ 			syscall(SYS_gettid));
+ 	pthread_create(&t1, NULL, test_pidfd_poll_exec_thread, NULL);
+ 	/*
+@@ -480,10 +480,10 @@ static void test_pidfd_poll_exec(int use_waitpid)
+ 
+ static void *test_pidfd_poll_leader_exit_thread(void *priv)
+ {
+-	ksft_print_msg("Child Thread: starting. pid %d tid %d ; and sleeping\n",
++	ksft_print_msg("Child Thread: starting. pid %d tid %ld ; and sleeping\n",
+ 			getpid(), syscall(SYS_gettid));
+ 	sleep(CHILD_THREAD_MIN_WAIT);
+-	ksft_print_msg("Child Thread: DONE. pid %d tid %d\n", getpid(), syscall(SYS_gettid));
++	ksft_print_msg("Child Thread: DONE. pid %d tid %ld\n", getpid(), syscall(SYS_gettid));
+ 	return NULL;
+ }
+ 
+@@ -492,7 +492,7 @@ static int child_poll_leader_exit_test(void *args)
+ {
+ 	pthread_t t1, t2;
+ 
+-	ksft_print_msg("Child: starting. pid %d tid %d\n", getpid(), syscall(SYS_gettid));
++	ksft_print_msg("Child: starting. pid %d tid %ld\n", getpid(), syscall(SYS_gettid));
+ 	pthread_create(&t1, NULL, test_pidfd_poll_leader_exit_thread, NULL);
+ 	pthread_create(&t2, NULL, test_pidfd_poll_leader_exit_thread, NULL);
+ 
+diff --git a/tools/testing/selftests/resctrl/resctrl_tests.c b/tools/testing/selftests/resctrl/resctrl_tests.c
+index 9b9751206e1c1..deddd26c4fbf8 100644
+--- a/tools/testing/selftests/resctrl/resctrl_tests.c
++++ b/tools/testing/selftests/resctrl/resctrl_tests.c
+@@ -224,9 +224,14 @@ int main(int argc, char **argv)
+ 		return ksft_exit_skip("Not running as root. Skipping...\n");
+ 
+ 	if (has_ben) {
++		if (argc - ben_ind >= BENCHMARK_ARGS)
++			ksft_exit_fail_msg("Too long benchmark command.\n");
++
+ 		/* Extract benchmark command from command line. */
+ 		for (i = ben_ind; i < argc; i++) {
+ 			benchmark_cmd[i - ben_ind] = benchmark_cmd_area[i];
++			if (strlen(argv[i]) >= BENCHMARK_ARG_SIZE)
++				ksft_exit_fail_msg("Too long benchmark command argument.\n");
+ 			sprintf(benchmark_cmd[i - ben_ind], "%s", argv[i]);
+ 		}
+ 		benchmark_cmd[ben_count] = NULL;
+diff --git a/tools/testing/selftests/x86/lam.c b/tools/testing/selftests/x86/lam.c
+index eb0e46905bf9d..8f9b06d9ce039 100644
+--- a/tools/testing/selftests/x86/lam.c
++++ b/tools/testing/selftests/x86/lam.c
+@@ -573,7 +573,7 @@ int do_uring(unsigned long lam)
+ 	char path[PATH_MAX] = {0};
+ 
+ 	/* get current process path */
+-	if (readlink("/proc/self/exe", path, PATH_MAX) <= 0)
++	if (readlink("/proc/self/exe", path, PATH_MAX - 1) <= 0)
+ 		return 1;
+ 
+ 	int file_fd = open(path, O_RDONLY);
+@@ -680,14 +680,14 @@ static int handle_execve(struct testcases *test)
+ 		perror("Fork failed.");
+ 		ret = 1;
+ 	} else if (pid == 0) {
+-		char path[PATH_MAX];
++		char path[PATH_MAX] = {0};
+ 
+ 		/* Set LAM mode in parent process */
+ 		if (set_lam(lam) != 0)
+ 			return 1;
+ 
+ 		/* Get current binary's path and the binary was run by execve */
+-		if (readlink("/proc/self/exe", path, PATH_MAX) <= 0)
++		if (readlink("/proc/self/exe", path, PATH_MAX - 1) <= 0)
+ 			exit(-1);
+ 
+ 		/* run binary to get LAM mode and return to parent process */
+diff --git a/tools/tracing/rtla/src/utils.c b/tools/tracing/rtla/src/utils.c
+index 623a38908ed5b..c769d7b3842c0 100644
+--- a/tools/tracing/rtla/src/utils.c
++++ b/tools/tracing/rtla/src/utils.c
+@@ -538,7 +538,7 @@ static const int find_mount(const char *fs, char *mp, int sizeof_mp)
+ {
+ 	char mount_point[MAX_PATH];
+ 	char type[100];
+-	int found;
++	int found = 0;
+ 	FILE *fp;
+ 
+ 	fp = fopen("/proc/mounts", "r");


^ permalink raw reply related	[flat|nested] 24+ messages in thread
* [gentoo-commits] proj/linux-patches:6.5 commit in: /
@ 2023-11-09 18:00 Mike Pagano
  0 siblings, 0 replies; 24+ messages in thread
From: Mike Pagano @ 2023-11-09 18:00 UTC (permalink / raw
  To: gentoo-commits

commit:     4f42445d6ec6dc05fb74d5547df42572cef288e3
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Thu Nov  9 17:59:40 2023 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Thu Nov  9 17:59:40 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=4f42445d

Remove patch, as the issue is fixed

Removed:
1805_mm-disable-CONFIG-PER-VMA-LOCK-by-def.patch

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README                                      |  4 ---
 1805_mm-disable-CONFIG-PER-VMA-LOCK-by-def.patch | 35 ------------------------
 2 files changed, 39 deletions(-)

diff --git a/0000_README b/0000_README
index 2dc5ce55..28310de0 100644
--- a/0000_README
+++ b/0000_README
@@ -99,10 +99,6 @@ Patch:  1700_sparc-address-warray-bound-warnings.patch
 From:		https://github.com/KSPP/linux/issues/109
 Desc:		Address -Warray-bounds warnings 
 
-Patch:  1805_mm-disable-CONFIG-PER-VMA-LOCK-by-def.patch
-From:   https://lore.kernel.org/all/20230703182150.2193578-1-surenb@google.com/
-Desc:   mm: disable CONFIG_PER_VMA_LOCK by default until its fixed
-
 Patch:  2000_BT-Check-key-sizes-only-if-Secure-Simple-Pairing-enabled.patch
 From:   https://lore.kernel.org/linux-bluetooth/20190522070540.48895-1-marcel@holtmann.org/raw
 Desc:   Bluetooth: Check key sizes only when Secure Simple Pairing is enabled. See bug #686758

diff --git a/1805_mm-disable-CONFIG-PER-VMA-LOCK-by-def.patch b/1805_mm-disable-CONFIG-PER-VMA-LOCK-by-def.patch
deleted file mode 100644
index c98255a6..00000000
--- a/1805_mm-disable-CONFIG-PER-VMA-LOCK-by-def.patch
+++ /dev/null
@@ -1,35 +0,0 @@
-Subject: [PATCH 1/1] mm: disable CONFIG_PER_VMA_LOCK by default until its fixed
-Date: Mon,  3 Jul 2023 11:21:50 -0700	[thread overview]
-Message-ID: <20230703182150.2193578-1-surenb@google.com> (raw)
-
-A memory corruption was reported in [1] with bisection pointing to the
-patch [2] enabling per-VMA locks for x86.
-Disable per-VMA locks config to prevent this issue while the problem is
-being investigated. This is expected to be a temporary measure.
-
-[1] https://bugzilla.kernel.org/show_bug.cgi?id=217624
-[2] https://lore.kernel.org/all/20230227173632.3292573-30-surenb@google.com
-
-Reported-by: Jiri Slaby <jirislaby@kernel.org>
-Reported-by: Jacob Young <jacobly.alt@gmail.com>
-Fixes: 0bff0aaea03e ("x86/mm: try VMA lock-based page fault handling first")
-Signed-off-by: Suren Baghdasaryan <surenb@google.com>
----
- mm/Kconfig | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
-diff --git a/mm/Kconfig b/mm/Kconfig
-index 09130434e30d..de94b2497600 100644
---- a/mm/Kconfig
-+++ b/mm/Kconfig
-@@ -1224,7 +1224,7 @@ config ARCH_SUPPORTS_PER_VMA_LOCK
-        def_bool n
- 
- config PER_VMA_LOCK
--	def_bool y
-+	bool "Enable per-vma locking during page fault handling."
- 	depends on ARCH_SUPPORTS_PER_VMA_LOCK && MMU && SMP
- 	help
- 	  Allow per-vma locking during page fault handling.
--- 
-2.41.0.255.g8b1d071c50-goog


^ permalink raw reply related	[flat|nested] 24+ messages in thread
* [gentoo-commits] proj/linux-patches:6.5 commit in: /
@ 2023-11-08 14:01 Mike Pagano
  0 siblings, 0 replies; 24+ messages in thread
From: Mike Pagano @ 2023-11-08 14:01 UTC (permalink / raw
  To: gentoo-commits

commit:     24837d3d782dd30d999114233051802c7df60801
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed Nov  8 14:01:14 2023 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed Nov  8 14:01:14 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=24837d3d

Linux patch 6.5.11

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README             |    4 +
 1010_linux-6.5.11.patch | 3169 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 3173 insertions(+)

diff --git a/0000_README b/0000_README
index 5af72671..2dc5ce55 100644
--- a/0000_README
+++ b/0000_README
@@ -83,6 +83,10 @@ Patch:  1009_linux-6.5.10.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.5.10
 
+Patch:  1010_linux-6.5.11.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.5.11
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1010_linux-6.5.11.patch b/1010_linux-6.5.11.patch
new file mode 100644
index 00000000..48d04e35
--- /dev/null
+++ b/1010_linux-6.5.11.patch
@@ -0,0 +1,3169 @@
+diff --git a/Documentation/devicetree/bindings/serial/rs485.yaml b/Documentation/devicetree/bindings/serial/rs485.yaml
+index 303a443d9e29b..9418fd66a8e95 100644
+--- a/Documentation/devicetree/bindings/serial/rs485.yaml
++++ b/Documentation/devicetree/bindings/serial/rs485.yaml
+@@ -29,6 +29,10 @@ properties:
+           default: 0
+           maximum: 100
+ 
++  rs485-rts-active-high:
++    description: drive RTS high when sending (this is the default).
++    $ref: /schemas/types.yaml#/definitions/flag
++
+   rs485-rts-active-low:
+     description: drive RTS low when sending (default is high).
+     $ref: /schemas/types.yaml#/definitions/flag
+diff --git a/Makefile b/Makefile
+index ab9f291c1d3f7..555cc34f47301 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 5
+-SUBLEVEL = 10
++SUBLEVEL = 11
+ EXTRAVERSION =
+ NAME = Hurr durr I'ma ninja sloth
+ 
+diff --git a/arch/arm64/boot/dts/freescale/imx93.dtsi b/arch/arm64/boot/dts/freescale/imx93.dtsi
+index 1d8dd14b65cfa..2a9b89bf52698 100644
+--- a/arch/arm64/boot/dts/freescale/imx93.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx93.dtsi
+@@ -146,7 +146,7 @@
+ 			#size-cells = <1>;
+ 			ranges;
+ 
+-			anomix_ns_gpr: syscon@44210000 {
++			aonmix_ns_gpr: syscon@44210000 {
+ 				compatible = "fsl,imx93-aonmix-ns-syscfg", "syscon";
+ 				reg = <0x44210000 0x1000>;
+ 			};
+@@ -280,6 +280,7 @@
+ 				assigned-clock-parents = <&clk IMX93_CLK_SYS_PLL_PFD1_DIV2>;
+ 				assigned-clock-rates = <40000000>;
+ 				fsl,clk-source = /bits/ 8 <0>;
++				fsl,stop-mode = <&aonmix_ns_gpr 0x14 0>;
+ 				status = "disabled";
+ 			};
+ 
+@@ -532,6 +533,7 @@
+ 				assigned-clock-parents = <&clk IMX93_CLK_SYS_PLL_PFD1_DIV2>;
+ 				assigned-clock-rates = <40000000>;
+ 				fsl,clk-source = /bits/ 8 <0>;
++				fsl,stop-mode = <&wakeupmix_gpr 0x0c 2>;
+ 				status = "disabled";
+ 			};
+ 
+diff --git a/arch/loongarch/include/asm/io.h b/arch/loongarch/include/asm/io.h
+index 1c94102200407..0355b64e90ed0 100644
+--- a/arch/loongarch/include/asm/io.h
++++ b/arch/loongarch/include/asm/io.h
+@@ -54,10 +54,9 @@ static inline void __iomem *ioremap_prot(phys_addr_t offset, unsigned long size,
+  * @offset:    bus address of the memory
+  * @size:      size of the resource to map
+  */
+-extern pgprot_t pgprot_wc;
+-
+ #define ioremap_wc(offset, size)	\
+-	ioremap_prot((offset), (size), pgprot_val(pgprot_wc))
++	ioremap_prot((offset), (size),	\
++		pgprot_val(wc_enabled ? PAGE_KERNEL_WUC : PAGE_KERNEL_SUC))
+ 
+ #define ioremap_cache(offset, size)	\
+ 	ioremap_prot((offset), (size), pgprot_val(PAGE_KERNEL))
+diff --git a/arch/loongarch/include/asm/linkage.h b/arch/loongarch/include/asm/linkage.h
+index 81b0c4cfbf4f2..e2eca1a25b4ef 100644
+--- a/arch/loongarch/include/asm/linkage.h
++++ b/arch/loongarch/include/asm/linkage.h
+@@ -33,4 +33,12 @@
+ 	.cfi_endproc;					\
+ 	SYM_END(name, SYM_T_FUNC)
+ 
++#define SYM_CODE_START(name)				\
++	SYM_START(name, SYM_L_GLOBAL, SYM_A_ALIGN)	\
++	.cfi_startproc;
++
++#define SYM_CODE_END(name)				\
++	.cfi_endproc;					\
++	SYM_END(name, SYM_T_NONE)
++
+ #endif
+diff --git a/arch/loongarch/include/asm/pgtable-bits.h b/arch/loongarch/include/asm/pgtable-bits.h
+index de46a6b1e9f11..7b9ac012cd090 100644
+--- a/arch/loongarch/include/asm/pgtable-bits.h
++++ b/arch/loongarch/include/asm/pgtable-bits.h
+@@ -105,13 +105,15 @@ static inline pgprot_t pgprot_noncached(pgprot_t _prot)
+ 	return __pgprot(prot);
+ }
+ 
++extern bool wc_enabled;
++
+ #define pgprot_writecombine pgprot_writecombine
+ 
+ static inline pgprot_t pgprot_writecombine(pgprot_t _prot)
+ {
+ 	unsigned long prot = pgprot_val(_prot);
+ 
+-	prot = (prot & ~_CACHE_MASK) | _CACHE_WUC;
++	prot = (prot & ~_CACHE_MASK) | (wc_enabled ? _CACHE_WUC : _CACHE_SUC);
+ 
+ 	return __pgprot(prot);
+ }
+diff --git a/arch/loongarch/kernel/entry.S b/arch/loongarch/kernel/entry.S
+index d737e3cf42d3f..1781c6a5befa2 100644
+--- a/arch/loongarch/kernel/entry.S
++++ b/arch/loongarch/kernel/entry.S
+@@ -18,7 +18,7 @@
+ 	.text
+ 	.cfi_sections	.debug_frame
+ 	.align	5
+-SYM_FUNC_START(handle_syscall)
++SYM_CODE_START(handle_syscall)
+ 	csrrd		t0, PERCPU_BASE_KS
+ 	la.pcrel	t1, kernelsp
+ 	add.d		t1, t1, t0
+@@ -66,7 +66,7 @@ SYM_FUNC_START(handle_syscall)
+ 	bl		do_syscall
+ 
+ 	RESTORE_ALL_AND_RET
+-SYM_FUNC_END(handle_syscall)
++SYM_CODE_END(handle_syscall)
+ _ASM_NOKPROBE(handle_syscall)
+ 
+ SYM_CODE_START(ret_from_fork)
+diff --git a/arch/loongarch/kernel/genex.S b/arch/loongarch/kernel/genex.S
+index 78f0663846575..2bb3aa2dcfcb2 100644
+--- a/arch/loongarch/kernel/genex.S
++++ b/arch/loongarch/kernel/genex.S
+@@ -31,7 +31,7 @@ SYM_FUNC_START(__arch_cpu_idle)
+ 1:	jr	ra
+ SYM_FUNC_END(__arch_cpu_idle)
+ 
+-SYM_FUNC_START(handle_vint)
++SYM_CODE_START(handle_vint)
+ 	BACKUP_T0T1
+ 	SAVE_ALL
+ 	la_abs	t1, __arch_cpu_idle
+@@ -46,11 +46,11 @@ SYM_FUNC_START(handle_vint)
+ 	la_abs	t0, do_vint
+ 	jirl	ra, t0, 0
+ 	RESTORE_ALL_AND_RET
+-SYM_FUNC_END(handle_vint)
++SYM_CODE_END(handle_vint)
+ 
+-SYM_FUNC_START(except_vec_cex)
++SYM_CODE_START(except_vec_cex)
+ 	b	cache_parity_error
+-SYM_FUNC_END(except_vec_cex)
++SYM_CODE_END(except_vec_cex)
+ 
+ 	.macro	build_prep_badv
+ 	csrrd	t0, LOONGARCH_CSR_BADV
+@@ -66,7 +66,7 @@ SYM_FUNC_END(except_vec_cex)
+ 
+ 	.macro	BUILD_HANDLER exception handler prep
+ 	.align	5
+-	SYM_FUNC_START(handle_\exception)
++	SYM_CODE_START(handle_\exception)
+ 	666:
+ 	BACKUP_T0T1
+ 	SAVE_ALL
+@@ -76,7 +76,7 @@ SYM_FUNC_END(except_vec_cex)
+ 	jirl	ra, t0, 0
+ 	668:
+ 	RESTORE_ALL_AND_RET
+-	SYM_FUNC_END(handle_\exception)
++	SYM_CODE_END(handle_\exception)
+ 	SYM_DATA(unwind_hint_\exception, .word 668b - 666b)
+ 	.endm
+ 
+@@ -93,7 +93,7 @@ SYM_FUNC_END(except_vec_cex)
+ 	BUILD_HANDLER watch watch none
+ 	BUILD_HANDLER reserved reserved none	/* others */
+ 
+-SYM_FUNC_START(handle_sys)
++SYM_CODE_START(handle_sys)
+ 	la_abs	t0, handle_syscall
+ 	jr	t0
+-SYM_FUNC_END(handle_sys)
++SYM_CODE_END(handle_sys)
+diff --git a/arch/loongarch/kernel/setup.c b/arch/loongarch/kernel/setup.c
+index 9d830ab4e3025..1351614042d4e 100644
+--- a/arch/loongarch/kernel/setup.c
++++ b/arch/loongarch/kernel/setup.c
+@@ -161,19 +161,19 @@ static void __init smbios_parse(void)
+ }
+ 
+ #ifdef CONFIG_ARCH_WRITECOMBINE
+-pgprot_t pgprot_wc = PAGE_KERNEL_WUC;
++bool wc_enabled = true;
+ #else
+-pgprot_t pgprot_wc = PAGE_KERNEL_SUC;
++bool wc_enabled = false;
+ #endif
+ 
+-EXPORT_SYMBOL(pgprot_wc);
++EXPORT_SYMBOL(wc_enabled);
+ 
+ static int __init setup_writecombine(char *p)
+ {
+ 	if (!strcmp(p, "on"))
+-		pgprot_wc = PAGE_KERNEL_WUC;
++		wc_enabled = true;
+ 	else if (!strcmp(p, "off"))
+-		pgprot_wc = PAGE_KERNEL_SUC;
++		wc_enabled = false;
+ 	else
+ 		pr_warn("Unknown writecombine setting \"%s\".\n", p);
+ 
+diff --git a/arch/loongarch/mm/init.c b/arch/loongarch/mm/init.c
+index 3b7d8129570b8..d967d881c3fef 100644
+--- a/arch/loongarch/mm/init.c
++++ b/arch/loongarch/mm/init.c
+@@ -68,11 +68,11 @@ void copy_user_highpage(struct page *to, struct page *from,
+ {
+ 	void *vfrom, *vto;
+ 
+-	vto = kmap_atomic(to);
+-	vfrom = kmap_atomic(from);
++	vfrom = kmap_local_page(from);
++	vto = kmap_local_page(to);
+ 	copy_page(vto, vfrom);
+-	kunmap_atomic(vfrom);
+-	kunmap_atomic(vto);
++	kunmap_local(vfrom);
++	kunmap_local(vto);
+ 	/* Make sure this page is cleared on other CPU's too before using it */
+ 	smp_wmb();
+ }
+@@ -267,6 +267,7 @@ pgd_t swapper_pg_dir[_PTRS_PER_PGD] __section(".bss..swapper_pg_dir");
+ pgd_t invalid_pg_dir[_PTRS_PER_PGD] __page_aligned_bss;
+ #ifndef __PAGETABLE_PUD_FOLDED
+ pud_t invalid_pud_table[PTRS_PER_PUD] __page_aligned_bss;
++EXPORT_SYMBOL(invalid_pud_table);
+ #endif
+ #ifndef __PAGETABLE_PMD_FOLDED
+ pmd_t invalid_pmd_table[PTRS_PER_PMD] __page_aligned_bss;
+diff --git a/arch/loongarch/mm/tlbex.S b/arch/loongarch/mm/tlbex.S
+index ca17dd3a19153..d5d682f3d29f3 100644
+--- a/arch/loongarch/mm/tlbex.S
++++ b/arch/loongarch/mm/tlbex.S
+@@ -17,7 +17,7 @@
+ #define PTRS_PER_PTE_BITS	(PAGE_SHIFT - 3)
+ 
+ 	.macro tlb_do_page_fault, write
+-	SYM_FUNC_START(tlb_do_page_fault_\write)
++	SYM_CODE_START(tlb_do_page_fault_\write)
+ 	SAVE_ALL
+ 	csrrd		a2, LOONGARCH_CSR_BADV
+ 	move		a0, sp
+@@ -25,13 +25,13 @@
+ 	li.w		a1, \write
+ 	bl		do_page_fault
+ 	RESTORE_ALL_AND_RET
+-	SYM_FUNC_END(tlb_do_page_fault_\write)
++	SYM_CODE_END(tlb_do_page_fault_\write)
+ 	.endm
+ 
+ 	tlb_do_page_fault 0
+ 	tlb_do_page_fault 1
+ 
+-SYM_FUNC_START(handle_tlb_protect)
++SYM_CODE_START(handle_tlb_protect)
+ 	BACKUP_T0T1
+ 	SAVE_ALL
+ 	move		a0, sp
+@@ -41,9 +41,9 @@ SYM_FUNC_START(handle_tlb_protect)
+ 	la_abs		t0, do_page_fault
+ 	jirl		ra, t0, 0
+ 	RESTORE_ALL_AND_RET
+-SYM_FUNC_END(handle_tlb_protect)
++SYM_CODE_END(handle_tlb_protect)
+ 
+-SYM_FUNC_START(handle_tlb_load)
++SYM_CODE_START(handle_tlb_load)
+ 	csrwr		t0, EXCEPTION_KS0
+ 	csrwr		t1, EXCEPTION_KS1
+ 	csrwr		ra, EXCEPTION_KS2
+@@ -187,16 +187,16 @@ nopage_tlb_load:
+ 	csrrd		ra, EXCEPTION_KS2
+ 	la_abs		t0, tlb_do_page_fault_0
+ 	jr		t0
+-SYM_FUNC_END(handle_tlb_load)
++SYM_CODE_END(handle_tlb_load)
+ 
+-SYM_FUNC_START(handle_tlb_load_ptw)
++SYM_CODE_START(handle_tlb_load_ptw)
+ 	csrwr		t0, LOONGARCH_CSR_KS0
+ 	csrwr		t1, LOONGARCH_CSR_KS1
+ 	la_abs		t0, tlb_do_page_fault_0
+ 	jr		t0
+-SYM_FUNC_END(handle_tlb_load_ptw)
++SYM_CODE_END(handle_tlb_load_ptw)
+ 
+-SYM_FUNC_START(handle_tlb_store)
++SYM_CODE_START(handle_tlb_store)
+ 	csrwr		t0, EXCEPTION_KS0
+ 	csrwr		t1, EXCEPTION_KS1
+ 	csrwr		ra, EXCEPTION_KS2
+@@ -343,16 +343,16 @@ nopage_tlb_store:
+ 	csrrd		ra, EXCEPTION_KS2
+ 	la_abs		t0, tlb_do_page_fault_1
+ 	jr		t0
+-SYM_FUNC_END(handle_tlb_store)
++SYM_CODE_END(handle_tlb_store)
+ 
+-SYM_FUNC_START(handle_tlb_store_ptw)
++SYM_CODE_START(handle_tlb_store_ptw)
+ 	csrwr		t0, LOONGARCH_CSR_KS0
+ 	csrwr		t1, LOONGARCH_CSR_KS1
+ 	la_abs		t0, tlb_do_page_fault_1
+ 	jr		t0
+-SYM_FUNC_END(handle_tlb_store_ptw)
++SYM_CODE_END(handle_tlb_store_ptw)
+ 
+-SYM_FUNC_START(handle_tlb_modify)
++SYM_CODE_START(handle_tlb_modify)
+ 	csrwr		t0, EXCEPTION_KS0
+ 	csrwr		t1, EXCEPTION_KS1
+ 	csrwr		ra, EXCEPTION_KS2
+@@ -497,16 +497,16 @@ nopage_tlb_modify:
+ 	csrrd		ra, EXCEPTION_KS2
+ 	la_abs		t0, tlb_do_page_fault_1
+ 	jr		t0
+-SYM_FUNC_END(handle_tlb_modify)
++SYM_CODE_END(handle_tlb_modify)
+ 
+-SYM_FUNC_START(handle_tlb_modify_ptw)
++SYM_CODE_START(handle_tlb_modify_ptw)
+ 	csrwr		t0, LOONGARCH_CSR_KS0
+ 	csrwr		t1, LOONGARCH_CSR_KS1
+ 	la_abs		t0, tlb_do_page_fault_1
+ 	jr		t0
+-SYM_FUNC_END(handle_tlb_modify_ptw)
++SYM_CODE_END(handle_tlb_modify_ptw)
+ 
+-SYM_FUNC_START(handle_tlb_refill)
++SYM_CODE_START(handle_tlb_refill)
+ 	csrwr		t0, LOONGARCH_CSR_TLBRSAVE
+ 	csrrd		t0, LOONGARCH_CSR_PGD
+ 	lddir		t0, t0, 3
+@@ -521,4 +521,4 @@ SYM_FUNC_START(handle_tlb_refill)
+ 	tlbfill
+ 	csrrd		t0, LOONGARCH_CSR_TLBRSAVE
+ 	ertn
+-SYM_FUNC_END(handle_tlb_refill)
++SYM_CODE_END(handle_tlb_refill)
+diff --git a/arch/powerpc/kernel/head_85xx.S b/arch/powerpc/kernel/head_85xx.S
+index fdbee1093e2ba..f9634111e82ed 100644
+--- a/arch/powerpc/kernel/head_85xx.S
++++ b/arch/powerpc/kernel/head_85xx.S
+@@ -396,7 +396,7 @@ interrupt_base:
+ #ifdef CONFIG_PPC_FPU
+ 	FP_UNAVAILABLE_EXCEPTION
+ #else
+-	EXCEPTION(0x0800, FP_UNAVAIL, FloatingPointUnavailable, unknown_exception)
++	EXCEPTION(0x0800, FP_UNAVAIL, FloatingPointUnavailable, emulation_assist_interrupt)
+ #endif
+ 
+ 	/* System Call Interrupt */
+diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
+index d2a446216444f..d35ba3ac218bf 100644
+--- a/arch/powerpc/kernel/setup-common.c
++++ b/arch/powerpc/kernel/setup-common.c
+@@ -948,6 +948,8 @@ void __init setup_arch(char **cmdline_p)
+ 
+ 	/* Parse memory topology */
+ 	mem_topology_setup();
++	/* Set max_mapnr before paging_init() */
++	set_max_mapnr(max_pfn);
+ 
+ 	/*
+ 	 * Release secondary cpus out of their spinloops at 0x60 now that
+diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
+index 8b121df7b08f8..07e8f4f1e07f8 100644
+--- a/arch/powerpc/mm/mem.c
++++ b/arch/powerpc/mm/mem.c
+@@ -288,7 +288,6 @@ void __init mem_init(void)
+ #endif
+ 
+ 	high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
+-	set_max_mapnr(max_pfn);
+ 
+ 	kasan_late_init();
+ 
+diff --git a/arch/riscv/boot/dts/thead/th1520.dtsi b/arch/riscv/boot/dts/thead/th1520.dtsi
+index 56a73134b49e6..58108f0eb3fdc 100644
+--- a/arch/riscv/boot/dts/thead/th1520.dtsi
++++ b/arch/riscv/boot/dts/thead/th1520.dtsi
+@@ -139,6 +139,7 @@
+ 		interrupt-parent = <&plic>;
+ 		#address-cells = <2>;
+ 		#size-cells = <2>;
++		dma-noncoherent;
+ 		ranges;
+ 
+ 		plic: interrupt-controller@ffd8000000 {
+diff --git a/arch/s390/boot/vmem.c b/arch/s390/boot/vmem.c
+index c67f59db7a512..f66d642251fe8 100644
+--- a/arch/s390/boot/vmem.c
++++ b/arch/s390/boot/vmem.c
+@@ -57,6 +57,7 @@ static void kasan_populate_shadow(void)
+ 	pmd_t pmd_z = __pmd(__pa(kasan_early_shadow_pte) | _SEGMENT_ENTRY);
+ 	pud_t pud_z = __pud(__pa(kasan_early_shadow_pmd) | _REGION3_ENTRY);
+ 	p4d_t p4d_z = __p4d(__pa(kasan_early_shadow_pud) | _REGION2_ENTRY);
++	unsigned long memgap_start = 0;
+ 	unsigned long untracked_end;
+ 	unsigned long start, end;
+ 	int i;
+@@ -101,8 +102,12 @@ static void kasan_populate_shadow(void)
+ 	 * +- shadow end ----+---------+- shadow end ---+
+ 	 */
+ 
+-	for_each_physmem_usable_range(i, &start, &end)
++	for_each_physmem_usable_range(i, &start, &end) {
+ 		kasan_populate(start, end, POPULATE_KASAN_MAP_SHADOW);
++		if (memgap_start && physmem_info.info_source == MEM_DETECT_DIAG260)
++			kasan_populate(memgap_start, start, POPULATE_KASAN_ZERO_SHADOW);
++		memgap_start = end;
++	}
+ 	if (IS_ENABLED(CONFIG_KASAN_VMALLOC)) {
+ 		untracked_end = VMALLOC_START;
+ 		/* shallowly populate kasan shadow for vmalloc and modules */
+diff --git a/drivers/ata/pata_parport/fit3.c b/drivers/ata/pata_parport/fit3.c
+index bad7aa920cdca..d2b81cf2e16d2 100644
+--- a/drivers/ata/pata_parport/fit3.c
++++ b/drivers/ata/pata_parport/fit3.c
+@@ -9,11 +9,6 @@
+  *
+  * The TD-2000 and certain older devices use a different protocol.
+  * Try the fit2 protocol module with them.
+- *
+- * NB:  The FIT adapters do not appear to support the control
+- * registers.  So, we map ALT_STATUS to STATUS and NO-OP writes
+- * to the device control register - this means that IDE reset
+- * will not work on these devices.
+  */
+ 
+ #include <linux/module.h>
+@@ -37,8 +32,7 @@
+ 
+ static void fit3_write_regr(struct pi_adapter *pi, int cont, int regr, int val)
+ {
+-	if (cont == 1)
+-		return;
++	regr += cont << 3;
+ 
+ 	switch (pi->mode) {
+ 	case 0:
+@@ -59,11 +53,7 @@ static int fit3_read_regr(struct pi_adapter *pi, int cont, int regr)
+ {
+ 	int  a, b;
+ 
+-	if (cont) {
+-		if (regr != 6)
+-			return 0xff;
+-		regr = 7;
+-	}
++	regr += cont << 3;
+ 
+ 	switch (pi->mode) {
+ 	case 0:
+diff --git a/drivers/ata/pata_parport/pata_parport.c b/drivers/ata/pata_parport/pata_parport.c
+index cf87bbb52f1ff..a7adfdcb5e27c 100644
+--- a/drivers/ata/pata_parport/pata_parport.c
++++ b/drivers/ata/pata_parport/pata_parport.c
+@@ -80,6 +80,72 @@ static bool pata_parport_devchk(struct ata_port *ap, unsigned int device)
+ 	return (nsect == 0x55) && (lbal == 0xaa);
+ }
+ 
++static int pata_parport_wait_after_reset(struct ata_link *link,
++					 unsigned int devmask,
++					 unsigned long deadline)
++{
++	struct ata_port *ap = link->ap;
++	struct pi_adapter *pi = ap->host->private_data;
++	unsigned int dev0 = devmask & (1 << 0);
++	unsigned int dev1 = devmask & (1 << 1);
++	int rc, ret = 0;
++
++	ata_msleep(ap, ATA_WAIT_AFTER_RESET);
++
++	/* always check readiness of the master device */
++	rc = ata_sff_wait_ready(link, deadline);
++	if (rc) {
++		/*
++		 * some adapters return bogus values if master device is not
++		 * present, so don't abort now if a slave device is present
++		 */
++		if (!dev1)
++			return rc;
++		ret = -ENODEV;
++	}
++
++	/*
++	 * if device 1 was found in ata_devchk, wait for register
++	 * access briefly, then wait for BSY to clear.
++	 */
++	if (dev1) {
++		int i;
++
++		pata_parport_dev_select(ap, 1);
++
++		/*
++		 * Wait for register access.  Some ATAPI devices fail
++		 * to set nsect/lbal after reset, so don't waste too
++		 * much time on it.  We're gonna wait for !BSY anyway.
++		 */
++		for (i = 0; i < 2; i++) {
++			u8 nsect, lbal;
++
++			nsect = pi->proto->read_regr(pi, 0, ATA_REG_NSECT);
++			lbal = pi->proto->read_regr(pi, 0, ATA_REG_LBAL);
++			if (nsect == 1 && lbal == 1)
++				break;
++			/* give drive a breather */
++			ata_msleep(ap, 50);
++		}
++
++		rc = ata_sff_wait_ready(link, deadline);
++		if (rc) {
++			if (rc != -ENODEV)
++				return rc;
++			ret = rc;
++		}
++	}
++
++	pata_parport_dev_select(ap, 0);
++	if (dev1)
++		pata_parport_dev_select(ap, 1);
++	if (dev0)
++		pata_parport_dev_select(ap, 0);
++
++	return ret;
++}
++
+ static int pata_parport_bus_softreset(struct ata_port *ap, unsigned int devmask,
+ 				      unsigned long deadline)
+ {
+@@ -94,7 +160,7 @@ static int pata_parport_bus_softreset(struct ata_port *ap, unsigned int devmask,
+ 	ap->last_ctl = ap->ctl;
+ 
+ 	/* wait the port to become ready */
+-	return ata_sff_wait_after_reset(&ap->link, devmask, deadline);
++	return pata_parport_wait_after_reset(&ap->link, devmask, deadline);
+ }
+ 
+ static int pata_parport_softreset(struct ata_link *link, unsigned int *classes,
+diff --git a/drivers/bluetooth/hci_bcm4377.c b/drivers/bluetooth/hci_bcm4377.c
+index 19ad0e7886462..a617578356953 100644
+--- a/drivers/bluetooth/hci_bcm4377.c
++++ b/drivers/bluetooth/hci_bcm4377.c
+@@ -512,6 +512,7 @@ struct bcm4377_hw {
+ 	unsigned long disable_aspm : 1;
+ 	unsigned long broken_ext_scan : 1;
+ 	unsigned long broken_mws_transport_config : 1;
++	unsigned long broken_le_coded : 1;
+ 
+ 	int (*send_calibration)(struct bcm4377_data *bcm4377);
+ 	int (*send_ptb)(struct bcm4377_data *bcm4377,
+@@ -2372,6 +2373,8 @@ static int bcm4377_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ 		set_bit(HCI_QUIRK_BROKEN_MWS_TRANSPORT_CONFIG, &hdev->quirks);
+ 	if (bcm4377->hw->broken_ext_scan)
+ 		set_bit(HCI_QUIRK_BROKEN_EXT_SCAN, &hdev->quirks);
++	if (bcm4377->hw->broken_le_coded)
++		set_bit(HCI_QUIRK_BROKEN_LE_CODED, &hdev->quirks);
+ 
+ 	pci_set_drvdata(pdev, bcm4377);
+ 	hci_set_drvdata(hdev, bcm4377);
+@@ -2461,6 +2464,7 @@ static const struct bcm4377_hw bcm4377_hw_variants[] = {
+ 		.bar0_core2_window2 = 0x18107000,
+ 		.has_bar0_core2_window2 = true,
+ 		.broken_mws_transport_config = true,
++		.broken_le_coded = true,
+ 		.send_calibration = bcm4378_send_calibration,
+ 		.send_ptb = bcm4378_send_ptb,
+ 	},
+@@ -2474,6 +2478,7 @@ static const struct bcm4377_hw bcm4377_hw_variants[] = {
+ 		.has_bar0_core2_window2 = true,
+ 		.clear_pciecfg_subsystem_ctrl_bit19 = true,
+ 		.broken_mws_transport_config = true,
++		.broken_le_coded = true,
+ 		.send_calibration = bcm4387_send_calibration,
+ 		.send_ptb = bcm4378_send_ptb,
+ 	},
+diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
+index 89e82508c1339..002833fb1fa04 100644
+--- a/drivers/dma/ste_dma40.c
++++ b/drivers/dma/ste_dma40.c
+@@ -3668,6 +3668,7 @@ static int __init d40_probe(struct platform_device *pdev)
+ 		regulator_disable(base->lcpa_regulator);
+ 		regulator_put(base->lcpa_regulator);
+ 	}
++	pm_runtime_disable(base->dev);
+ 
+  report_failure:
+ 	d40_err(dev, "probe failed\n");
+diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
+index 1599f11768426..9cfac61812f68 100644
+--- a/drivers/firmware/efi/efi.c
++++ b/drivers/firmware/efi/efi.c
+@@ -273,9 +273,13 @@ static __init int efivar_ssdt_load(void)
+ 		if (status == EFI_NOT_FOUND) {
+ 			break;
+ 		} else if (status == EFI_BUFFER_TOO_SMALL) {
+-			name = krealloc(name, name_size, GFP_KERNEL);
+-			if (!name)
++			efi_char16_t *name_tmp =
++				krealloc(name, name_size, GFP_KERNEL);
++			if (!name_tmp) {
++				kfree(name);
+ 				return -ENOMEM;
++			}
++			name = name_tmp;
+ 			continue;
+ 		}
+ 
+diff --git a/drivers/firmware/efi/libstub/x86-stub.c b/drivers/firmware/efi/libstub/x86-stub.c
+index 146477da2b98c..a5a856a7639e1 100644
+--- a/drivers/firmware/efi/libstub/x86-stub.c
++++ b/drivers/firmware/efi/libstub/x86-stub.c
+@@ -648,11 +648,8 @@ setup_e820(struct boot_params *params, struct setup_data *e820ext, u32 e820ext_s
+ 			break;
+ 
+ 		case EFI_UNACCEPTED_MEMORY:
+-			if (!IS_ENABLED(CONFIG_UNACCEPTED_MEMORY)) {
+-				efi_warn_once(
+-"The system has unaccepted memory,  but kernel does not support it\nConsider enabling CONFIG_UNACCEPTED_MEMORY\n");
++			if (!IS_ENABLED(CONFIG_UNACCEPTED_MEMORY))
+ 				continue;
+-			}
+ 			e820_type = E820_TYPE_RAM;
+ 			process_unaccepted_memory(d->phys_addr,
+ 						  d->phys_addr + PAGE_SIZE * d->num_pages);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+index 0dc9c655c4fbd..aac52d9754e6d 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+@@ -47,7 +47,6 @@ const unsigned int amdgpu_ctx_num_entities[AMDGPU_HW_IP_NUM] = {
+ bool amdgpu_ctx_priority_is_valid(int32_t ctx_prio)
+ {
+ 	switch (ctx_prio) {
+-	case AMDGPU_CTX_PRIORITY_UNSET:
+ 	case AMDGPU_CTX_PRIORITY_VERY_LOW:
+ 	case AMDGPU_CTX_PRIORITY_LOW:
+ 	case AMDGPU_CTX_PRIORITY_NORMAL:
+@@ -55,6 +54,7 @@ bool amdgpu_ctx_priority_is_valid(int32_t ctx_prio)
+ 	case AMDGPU_CTX_PRIORITY_VERY_HIGH:
+ 		return true;
+ 	default:
++	case AMDGPU_CTX_PRIORITY_UNSET:
+ 		return false;
+ 	}
+ }
+@@ -64,7 +64,8 @@ amdgpu_ctx_to_drm_sched_prio(int32_t ctx_prio)
+ {
+ 	switch (ctx_prio) {
+ 	case AMDGPU_CTX_PRIORITY_UNSET:
+-		return DRM_SCHED_PRIORITY_UNSET;
++		pr_warn_once("AMD-->DRM context priority value UNSET-->NORMAL");
++		return DRM_SCHED_PRIORITY_NORMAL;
+ 
+ 	case AMDGPU_CTX_PRIORITY_VERY_LOW:
+ 		return DRM_SCHED_PRIORITY_MIN;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
+index 12210598e5b8e..ba3a87cb88ccc 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
+@@ -403,7 +403,10 @@ amdgpu_dma_buf_move_notify(struct dma_buf_attachment *attach)
+ 				continue;
+ 		}
+ 
+-		r = amdgpu_vm_clear_freed(adev, vm, NULL);
++		/* Reserve fences for two SDMA page table updates */
++		r = dma_resv_reserve_fences(resv, 2);
++		if (!r)
++			r = amdgpu_vm_clear_freed(adev, vm, NULL);
+ 		if (!r)
+ 			r = amdgpu_vm_handle_moved(adev, vm);
+ 
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
+index 63009db8b5a72..d156eeef466f9 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
+@@ -586,7 +586,8 @@ static void dcn10_dmcu_set_psr_enable(struct dmcu *dmcu, bool enable, bool wait)
+ 				if (state == PSR_STATE0)
+ 					break;
+ 			}
+-			fsleep(500);
++			/* must *not* be fsleep - this can be called from high irq levels */
++			udelay(500);
+ 		}
+ 
+ 		/* assert if max retry hit */
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
+index 0f24b6fbd2201..4704c9c85ee6f 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
++++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
+@@ -216,7 +216,8 @@ static void dmub_psr_enable(struct dmub_psr *dmub, bool enable, bool wait, uint8
+ 					break;
+ 			}
+ 
+-			fsleep(500);
++			/* must *not* be fsleep - this can be called from high irq levels */
++			udelay(500);
+ 		}
+ 
+ 		/* assert if max retry hit */
+diff --git a/drivers/gpu/drm/ttm/ttm_device.c b/drivers/gpu/drm/ttm/ttm_device.c
+index 7726a72befc54..d48b39132b324 100644
+--- a/drivers/gpu/drm/ttm/ttm_device.c
++++ b/drivers/gpu/drm/ttm/ttm_device.c
+@@ -232,10 +232,6 @@ void ttm_device_fini(struct ttm_device *bdev)
+ 	struct ttm_resource_manager *man;
+ 	unsigned i;
+ 
+-	man = ttm_manager_type(bdev, TTM_PL_SYSTEM);
+-	ttm_resource_manager_set_used(man, false);
+-	ttm_set_driver_manager(bdev, TTM_PL_SYSTEM, NULL);
+-
+ 	mutex_lock(&ttm_global_mutex);
+ 	list_del(&bdev->device_list);
+ 	mutex_unlock(&ttm_global_mutex);
+@@ -243,6 +239,10 @@ void ttm_device_fini(struct ttm_device *bdev)
+ 	drain_workqueue(bdev->wq);
+ 	destroy_workqueue(bdev->wq);
+ 
++	man = ttm_manager_type(bdev, TTM_PL_SYSTEM);
++	ttm_resource_manager_set_used(man, false);
++	ttm_set_driver_manager(bdev, TTM_PL_SYSTEM, NULL);
++
+ 	spin_lock(&bdev->lru_lock);
+ 	for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
+ 		if (list_empty(&man->lru[0]))
+diff --git a/drivers/hwtracing/coresight/coresight-tmc-etr.c b/drivers/hwtracing/coresight/coresight-tmc-etr.c
+index 6132c5b3db9c7..8311e1028ddb0 100644
+--- a/drivers/hwtracing/coresight/coresight-tmc-etr.c
++++ b/drivers/hwtracing/coresight/coresight-tmc-etr.c
+@@ -610,7 +610,8 @@ static int tmc_etr_alloc_flat_buf(struct tmc_drvdata *drvdata,
+ 
+ 	flat_buf->vaddr = dma_alloc_noncoherent(real_dev, etr_buf->size,
+ 						&flat_buf->daddr,
+-						DMA_FROM_DEVICE, GFP_KERNEL);
++						DMA_FROM_DEVICE,
++						GFP_KERNEL | __GFP_NOWARN);
+ 	if (!flat_buf->vaddr) {
+ 		kfree(flat_buf);
+ 		return -ENOMEM;
+diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
+index cefc74b3b34b1..22d16d80efb93 100644
+--- a/drivers/input/mouse/synaptics.c
++++ b/drivers/input/mouse/synaptics.c
+@@ -1753,6 +1753,7 @@ static int synaptics_create_intertouch(struct psmouse *psmouse,
+ 		psmouse_matches_pnp_id(psmouse, topbuttonpad_pnp_ids) &&
+ 		!SYN_CAP_EXT_BUTTONS_STICK(info->ext_cap_10);
+ 	const struct rmi_device_platform_data pdata = {
++		.reset_delay_ms = 30,
+ 		.sensor_pdata = {
+ 			.sensor_type = rmi_sensor_touchpad,
+ 			.axis_align.flip_y = true,
+diff --git a/drivers/input/rmi4/rmi_smbus.c b/drivers/input/rmi4/rmi_smbus.c
+index 7059a2762aebc..b0b099b5528a8 100644
+--- a/drivers/input/rmi4/rmi_smbus.c
++++ b/drivers/input/rmi4/rmi_smbus.c
+@@ -235,12 +235,29 @@ static void rmi_smb_clear_state(struct rmi_smb_xport *rmi_smb)
+ 
+ static int rmi_smb_enable_smbus_mode(struct rmi_smb_xport *rmi_smb)
+ {
+-	int retval;
++	struct i2c_client *client = rmi_smb->client;
++	int smbus_version;
++
++	/*
++	 * psmouse driver resets the controller, we only need to wait
++	 * to give the firmware chance to fully reinitialize.
++	 */
++	if (rmi_smb->xport.pdata.reset_delay_ms)
++		msleep(rmi_smb->xport.pdata.reset_delay_ms);
+ 
+ 	/* we need to get the smbus version to activate the touchpad */
+-	retval = rmi_smb_get_version(rmi_smb);
+-	if (retval < 0)
+-		return retval;
++	smbus_version = rmi_smb_get_version(rmi_smb);
++	if (smbus_version < 0)
++		return smbus_version;
++
++	rmi_dbg(RMI_DEBUG_XPORT, &client->dev, "Smbus version is %d",
++		smbus_version);
++
++	if (smbus_version != 2 && smbus_version != 3) {
++		dev_err(&client->dev, "Unrecognized SMB version %d\n",
++				smbus_version);
++		return -ENODEV;
++	}
+ 
+ 	return 0;
+ }
+@@ -253,11 +270,10 @@ static int rmi_smb_reset(struct rmi_transport_dev *xport, u16 reset_addr)
+ 	rmi_smb_clear_state(rmi_smb);
+ 
+ 	/*
+-	 * we do not call the actual reset command, it has to be handled in
+-	 * PS/2 or there will be races between PS/2 and SMBus.
+-	 * PS/2 should ensure that a psmouse_reset is called before
+-	 * intializing the device and after it has been removed to be in a known
+-	 * state.
++	 * We do not call the actual reset command, it has to be handled in
++	 * PS/2 or there will be races between PS/2 and SMBus. PS/2 should
++	 * ensure that a psmouse_reset is called before initializing the
++	 * device and after it has been removed to be in a known state.
+ 	 */
+ 	return rmi_smb_enable_smbus_mode(rmi_smb);
+ }
+@@ -272,7 +288,6 @@ static int rmi_smb_probe(struct i2c_client *client)
+ {
+ 	struct rmi_device_platform_data *pdata = dev_get_platdata(&client->dev);
+ 	struct rmi_smb_xport *rmi_smb;
+-	int smbus_version;
+ 	int error;
+ 
+ 	if (!pdata) {
+@@ -311,18 +326,9 @@ static int rmi_smb_probe(struct i2c_client *client)
+ 	rmi_smb->xport.proto_name = "smb";
+ 	rmi_smb->xport.ops = &rmi_smb_ops;
+ 
+-	smbus_version = rmi_smb_get_version(rmi_smb);
+-	if (smbus_version < 0)
+-		return smbus_version;
+-
+-	rmi_dbg(RMI_DEBUG_XPORT, &client->dev, "Smbus version is %d",
+-		smbus_version);
+-
+-	if (smbus_version != 2 && smbus_version != 3) {
+-		dev_err(&client->dev, "Unrecognized SMB version %d\n",
+-				smbus_version);
+-		return -ENODEV;
+-	}
++	error = rmi_smb_enable_smbus_mode(rmi_smb);
++	if (error)
++		return error;
+ 
+ 	i2c_set_clientdata(client, rmi_smb);
+ 
+diff --git a/drivers/irqchip/irq-riscv-intc.c b/drivers/irqchip/irq-riscv-intc.c
+index 4adeee1bc391f..e8d01b14ccdde 100644
+--- a/drivers/irqchip/irq-riscv-intc.c
++++ b/drivers/irqchip/irq-riscv-intc.c
+@@ -155,8 +155,16 @@ static int __init riscv_intc_init(struct device_node *node,
+ 	 * for each INTC DT node. We only need to do INTC initialization
+ 	 * for the INTC DT node belonging to boot CPU (or boot HART).
+ 	 */
+-	if (riscv_hartid_to_cpuid(hartid) != smp_processor_id())
++	if (riscv_hartid_to_cpuid(hartid) != smp_processor_id()) {
++		/*
++		 * The INTC nodes of each CPU are suppliers for downstream
++		 * interrupt controllers (such as PLIC, IMSIC and APLIC
++		 * direct-mode) so we should mark an INTC node as initialized
++		 * if we are not creating IRQ domain for it.
++		 */
++		fwnode_dev_initialized(of_fwnode_handle(node), true);
+ 		return 0;
++	}
+ 
+ 	return riscv_intc_init_common(of_node_to_fwnode(node));
+ }
+diff --git a/drivers/irqchip/irq-stm32-exti.c b/drivers/irqchip/irq-stm32-exti.c
+index b5fa76ce5046a..cb4b195bc849a 100644
+--- a/drivers/irqchip/irq-stm32-exti.c
++++ b/drivers/irqchip/irq-stm32-exti.c
+@@ -459,6 +459,7 @@ static const struct irq_domain_ops irq_exti_domain_ops = {
+ 	.map	= irq_map_generic_chip,
+ 	.alloc  = stm32_exti_alloc,
+ 	.free	= stm32_exti_free,
++	.xlate	= irq_domain_xlate_twocell,
+ };
+ 
+ static void stm32_irq_ack(struct irq_data *d)
+diff --git a/drivers/media/i2c/ov8858.c b/drivers/media/i2c/ov8858.c
+index 3af6125a2eee8..4d9fd76e2f60f 100644
+--- a/drivers/media/i2c/ov8858.c
++++ b/drivers/media/i2c/ov8858.c
+@@ -1850,9 +1850,9 @@ static int ov8858_parse_of(struct ov8858 *ov8858)
+ 	}
+ 
+ 	ret = v4l2_fwnode_endpoint_parse(endpoint, &vep);
++	fwnode_handle_put(endpoint);
+ 	if (ret) {
+ 		dev_err(dev, "Failed to parse endpoint: %d\n", ret);
+-		fwnode_handle_put(endpoint);
+ 		return ret;
+ 	}
+ 
+@@ -1864,12 +1864,9 @@ static int ov8858_parse_of(struct ov8858 *ov8858)
+ 	default:
+ 		dev_err(dev, "Unsupported number of data lanes %u\n",
+ 			ov8858->num_lanes);
+-		fwnode_handle_put(endpoint);
+ 		return -EINVAL;
+ 	}
+ 
+-	ov8858->subdev.fwnode = endpoint;
+-
+ 	return 0;
+ }
+ 
+@@ -1913,7 +1910,7 @@ static int ov8858_probe(struct i2c_client *client)
+ 
+ 	ret = ov8858_init_ctrls(ov8858);
+ 	if (ret)
+-		goto err_put_fwnode;
++		return ret;
+ 
+ 	sd = &ov8858->subdev;
+ 	sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS;
+@@ -1964,8 +1961,6 @@ err_clean_entity:
+ 	media_entity_cleanup(&sd->entity);
+ err_free_handler:
+ 	v4l2_ctrl_handler_free(&ov8858->ctrl_handler);
+-err_put_fwnode:
+-	fwnode_handle_put(ov8858->subdev.fwnode);
+ 
+ 	return ret;
+ }
+@@ -1978,7 +1973,6 @@ static void ov8858_remove(struct i2c_client *client)
+ 	v4l2_async_unregister_subdev(sd);
+ 	media_entity_cleanup(&sd->entity);
+ 	v4l2_ctrl_handler_free(&ov8858->ctrl_handler);
+-	fwnode_handle_put(ov8858->subdev.fwnode);
+ 
+ 	pm_runtime_disable(&client->dev);
+ 	if (!pm_runtime_status_suspended(&client->dev))
+diff --git a/drivers/misc/pci_endpoint_test.c b/drivers/misc/pci_endpoint_test.c
+index ed4d0ef5e5c31..7e1acc68d4359 100644
+--- a/drivers/misc/pci_endpoint_test.c
++++ b/drivers/misc/pci_endpoint_test.c
+@@ -71,6 +71,7 @@
+ #define PCI_DEVICE_ID_TI_AM654			0xb00c
+ #define PCI_DEVICE_ID_TI_J7200			0xb00f
+ #define PCI_DEVICE_ID_TI_AM64			0xb010
++#define PCI_DEVICE_ID_TI_J721S2		0xb013
+ #define PCI_DEVICE_ID_LS1088A			0x80c0
+ #define PCI_DEVICE_ID_IMX8			0x0808
+ 
+@@ -999,6 +1000,9 @@ static const struct pci_device_id pci_endpoint_test_tbl[] = {
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_AM64),
+ 	  .driver_data = (kernel_ulong_t)&j721e_data,
+ 	},
++	{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_J721S2),
++	  .driver_data = (kernel_ulong_t)&j721e_data,
++	},
+ 	{ }
+ };
+ MODULE_DEVICE_TABLE(pci, pci_endpoint_test_tbl);
+diff --git a/drivers/net/can/flexcan/flexcan-core.c b/drivers/net/can/flexcan/flexcan-core.c
+index ff0fc18baf133..d8be69f4a0c3f 100644
+--- a/drivers/net/can/flexcan/flexcan-core.c
++++ b/drivers/net/can/flexcan/flexcan-core.c
+@@ -348,7 +348,7 @@ static struct flexcan_devtype_data fsl_imx8mp_devtype_data = {
+ static struct flexcan_devtype_data fsl_imx93_devtype_data = {
+ 	.quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS |
+ 		FLEXCAN_QUIRK_DISABLE_MECR | FLEXCAN_QUIRK_USE_RX_MAILBOX |
+-		FLEXCAN_QUIRK_BROKEN_PERR_STATE | FLEXCAN_QUIRK_AUTO_STOP_MODE |
++		FLEXCAN_QUIRK_BROKEN_PERR_STATE | FLEXCAN_QUIRK_SETUP_STOP_MODE_GPR |
+ 		FLEXCAN_QUIRK_SUPPORT_FD | FLEXCAN_QUIRK_SUPPORT_ECC |
+ 		FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX |
+ 		FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR,
+@@ -544,11 +544,6 @@ static inline int flexcan_enter_stop_mode(struct flexcan_priv *priv)
+ 	} else if (priv->devtype_data.quirks & FLEXCAN_QUIRK_SETUP_STOP_MODE_GPR) {
+ 		regmap_update_bits(priv->stm.gpr, priv->stm.req_gpr,
+ 				   1 << priv->stm.req_bit, 1 << priv->stm.req_bit);
+-	} else if (priv->devtype_data.quirks & FLEXCAN_QUIRK_AUTO_STOP_MODE) {
+-		/* For the auto stop mode, software do nothing, hardware will cover
+-		 * all the operation automatically after system go into low power mode.
+-		 */
+-		return 0;
+ 	}
+ 
+ 	return flexcan_low_power_enter_ack(priv);
+@@ -574,12 +569,6 @@ static inline int flexcan_exit_stop_mode(struct flexcan_priv *priv)
+ 	reg_mcr &= ~FLEXCAN_MCR_SLF_WAK;
+ 	priv->write(reg_mcr, &regs->mcr);
+ 
+-	/* For the auto stop mode, hardware will exist stop mode
+-	 * automatically after system go out of low power mode.
+-	 */
+-	if (priv->devtype_data.quirks & FLEXCAN_QUIRK_AUTO_STOP_MODE)
+-		return 0;
+-
+ 	return flexcan_low_power_exit_ack(priv);
+ }
+ 
+@@ -1994,13 +1983,18 @@ static int flexcan_setup_stop_mode(struct platform_device *pdev)
+ 		ret = flexcan_setup_stop_mode_scfw(pdev);
+ 	else if (priv->devtype_data.quirks & FLEXCAN_QUIRK_SETUP_STOP_MODE_GPR)
+ 		ret = flexcan_setup_stop_mode_gpr(pdev);
+-	else if (priv->devtype_data.quirks & FLEXCAN_QUIRK_AUTO_STOP_MODE)
+-		ret = 0;
+ 	else
+ 		/* return 0 directly if doesn't support stop mode feature */
+ 		return 0;
+ 
+-	if (ret)
++	/* If ret is -EINVAL, this means SoC claim to support stop mode, but
++	 * dts file lack the stop mode property definition. For this case,
++	 * directly return 0, this will skip the wakeup capable setting and
++	 * will not block the driver probe.
++	 */
++	if (ret == -EINVAL)
++		return 0;
++	else if (ret)
+ 		return ret;
+ 
+ 	device_set_wakeup_capable(&pdev->dev, true);
+@@ -2320,16 +2314,8 @@ static int __maybe_unused flexcan_noirq_suspend(struct device *device)
+ 	if (netif_running(dev)) {
+ 		int err;
+ 
+-		if (device_may_wakeup(device)) {
++		if (device_may_wakeup(device))
+ 			flexcan_enable_wakeup_irq(priv, true);
+-			/* For auto stop mode, need to keep the clock on before
+-			 * system go into low power mode. After system go into
+-			 * low power mode, hardware will config the flexcan into
+-			 * stop mode, and gate off the clock automatically.
+-			 */
+-			if (priv->devtype_data.quirks & FLEXCAN_QUIRK_AUTO_STOP_MODE)
+-				return 0;
+-		}
+ 
+ 		err = pm_runtime_force_suspend(device);
+ 		if (err)
+@@ -2347,15 +2333,9 @@ static int __maybe_unused flexcan_noirq_resume(struct device *device)
+ 	if (netif_running(dev)) {
+ 		int err;
+ 
+-		/* For the wakeup in auto stop mode, no need to gate on the
+-		 * clock here, hardware will do this automatically.
+-		 */
+-		if (!(device_may_wakeup(device) &&
+-		      priv->devtype_data.quirks & FLEXCAN_QUIRK_AUTO_STOP_MODE)) {
+-			err = pm_runtime_force_resume(device);
+-			if (err)
+-				return err;
+-		}
++		err = pm_runtime_force_resume(device);
++		if (err)
++			return err;
+ 
+ 		if (device_may_wakeup(device))
+ 			flexcan_enable_wakeup_irq(priv, false);
+diff --git a/drivers/net/can/flexcan/flexcan.h b/drivers/net/can/flexcan/flexcan.h
+index 91402977780b2..025c3417031f4 100644
+--- a/drivers/net/can/flexcan/flexcan.h
++++ b/drivers/net/can/flexcan/flexcan.h
+@@ -68,8 +68,6 @@
+ #define FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR BIT(15)
+ /* Device supports RX via FIFO */
+ #define FLEXCAN_QUIRK_SUPPORT_RX_FIFO BIT(16)
+-/* auto enter stop mode to support wakeup */
+-#define FLEXCAN_QUIRK_AUTO_STOP_MODE BIT(17)
+ 
+ struct flexcan_devtype_data {
+ 	u32 quirks;		/* quirks needed for different IP cores */
+diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+index 8d719f82854a9..76de55306c4d0 100644
+--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
++++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+@@ -3816,6 +3816,8 @@ int t4_load_phy_fw(struct adapter *adap, int win,
+ 		 FW_PARAMS_PARAM_Z_V(FW_PARAMS_PARAM_DEV_PHYFW_DOWNLOAD));
+ 	ret = t4_set_params_timeout(adap, adap->mbox, adap->pf, 0, 1,
+ 				    &param, &val, 30000);
++	if (ret)
++		return ret;
+ 
+ 	/* If we have version number support, then check to see that the new
+ 	 * firmware got loaded properly.
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
+index 5608002465734..285c13edc09f0 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
+@@ -463,6 +463,17 @@ static int mlx5_esw_bridge_switchdev_event(struct notifier_block *nb,
+ 		/* only handle the event on peers */
+ 		if (mlx5_esw_bridge_is_local(dev, rep, esw))
+ 			break;
++
++		fdb_info = container_of(info,
++					struct switchdev_notifier_fdb_info,
++					info);
++		/* Mark for deletion to prevent the update wq task from
++		 * spuriously refreshing the entry which would mark it again as
++		 * offloaded in SW bridge. After this fallthrough to regular
++		 * async delete code.
++		 */
++		mlx5_esw_bridge_fdb_mark_deleted(dev, vport_num, esw_owner_vhca_id, br_offloads,
++						 fdb_info);
+ 		fallthrough;
+ 	case SWITCHDEV_FDB_ADD_TO_DEVICE:
+ 	case SWITCHDEV_FDB_DEL_TO_DEVICE:
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c
+index f4fe1daa4afd5..de1ed59239da8 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c
+@@ -1748,6 +1748,28 @@ void mlx5_esw_bridge_fdb_update_used(struct net_device *dev, u16 vport_num, u16
+ 	entry->lastuse = jiffies;
+ }
+ 
++void mlx5_esw_bridge_fdb_mark_deleted(struct net_device *dev, u16 vport_num, u16 esw_owner_vhca_id,
++				      struct mlx5_esw_bridge_offloads *br_offloads,
++				      struct switchdev_notifier_fdb_info *fdb_info)
++{
++	struct mlx5_esw_bridge_fdb_entry *entry;
++	struct mlx5_esw_bridge *bridge;
++
++	bridge = mlx5_esw_bridge_from_port_lookup(vport_num, esw_owner_vhca_id, br_offloads);
++	if (!bridge)
++		return;
++
++	entry = mlx5_esw_bridge_fdb_lookup(bridge, fdb_info->addr, fdb_info->vid);
++	if (!entry) {
++		esw_debug(br_offloads->esw->dev,
++			  "FDB mark deleted entry with specified key not found (MAC=%pM,vid=%u,vport=%u)\n",
++			  fdb_info->addr, fdb_info->vid, vport_num);
++		return;
++	}
++
++	entry->flags |= MLX5_ESW_BRIDGE_FLAG_DELETED;
++}
++
+ void mlx5_esw_bridge_fdb_create(struct net_device *dev, u16 vport_num, u16 esw_owner_vhca_id,
+ 				struct mlx5_esw_bridge_offloads *br_offloads,
+ 				struct switchdev_notifier_fdb_info *fdb_info)
+@@ -1810,7 +1832,8 @@ void mlx5_esw_bridge_update(struct mlx5_esw_bridge_offloads *br_offloads)
+ 			unsigned long lastuse =
+ 				(unsigned long)mlx5_fc_query_lastuse(entry->ingress_counter);
+ 
+-			if (entry->flags & MLX5_ESW_BRIDGE_FLAG_ADDED_BY_USER)
++			if (entry->flags & (MLX5_ESW_BRIDGE_FLAG_ADDED_BY_USER |
++					    MLX5_ESW_BRIDGE_FLAG_DELETED))
+ 				continue;
+ 
+ 			if (time_after(lastuse, entry->lastuse))
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.h b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.h
+index c2c7c70d99eb7..d6f5391619930 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.h
+@@ -62,6 +62,9 @@ int mlx5_esw_bridge_vport_peer_unlink(struct net_device *br_netdev, u16 vport_nu
+ void mlx5_esw_bridge_fdb_update_used(struct net_device *dev, u16 vport_num, u16 esw_owner_vhca_id,
+ 				     struct mlx5_esw_bridge_offloads *br_offloads,
+ 				     struct switchdev_notifier_fdb_info *fdb_info);
++void mlx5_esw_bridge_fdb_mark_deleted(struct net_device *dev, u16 vport_num, u16 esw_owner_vhca_id,
++				      struct mlx5_esw_bridge_offloads *br_offloads,
++				      struct switchdev_notifier_fdb_info *fdb_info);
+ void mlx5_esw_bridge_fdb_create(struct net_device *dev, u16 vport_num, u16 esw_owner_vhca_id,
+ 				struct mlx5_esw_bridge_offloads *br_offloads,
+ 				struct switchdev_notifier_fdb_info *fdb_info);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_priv.h b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_priv.h
+index 4911cc32161b4..7c251af566c6f 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_priv.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_priv.h
+@@ -133,6 +133,7 @@ struct mlx5_esw_bridge_mdb_key {
+ enum {
+ 	MLX5_ESW_BRIDGE_FLAG_ADDED_BY_USER = BIT(0),
+ 	MLX5_ESW_BRIDGE_FLAG_PEER = BIT(1),
++	MLX5_ESW_BRIDGE_FLAG_DELETED = BIT(2),
+ };
+ 
+ enum {
+diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
+index 14497e5558bf9..b64df36fbb115 100644
+--- a/drivers/net/usb/r8152.c
++++ b/drivers/net/usb/r8152.c
+@@ -3656,6 +3656,8 @@ static void r8153b_ups_en(struct r8152 *tp, bool enable)
+ 			int i;
+ 
+ 			for (i = 0; i < 500; i++) {
++				if (test_bit(RTL8152_UNPLUG, &tp->flags))
++					return;
+ 				if (ocp_read_word(tp, MCU_TYPE_PLA, PLA_BOOT_CTRL) &
+ 				    AUTOLOAD_DONE)
+ 					break;
+@@ -3696,6 +3698,8 @@ static void r8153c_ups_en(struct r8152 *tp, bool enable)
+ 			int i;
+ 
+ 			for (i = 0; i < 500; i++) {
++				if (test_bit(RTL8152_UNPLUG, &tp->flags))
++					return;
+ 				if (ocp_read_word(tp, MCU_TYPE_PLA, PLA_BOOT_CTRL) &
+ 				    AUTOLOAD_DONE)
+ 					break;
+@@ -4059,6 +4063,9 @@ static int rtl_phy_patch_request(struct r8152 *tp, bool request, bool wait)
+ 	for (i = 0; wait && i < 5000; i++) {
+ 		u32 ocp_data;
+ 
++		if (test_bit(RTL8152_UNPLUG, &tp->flags))
++			break;
++
+ 		usleep_range(1000, 2000);
+ 		ocp_data = ocp_reg_read(tp, OCP_PHY_PATCH_STAT);
+ 		if ((ocp_data & PATCH_READY) ^ check)
+diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
+index 321156ca273d5..eb65170b97ff0 100644
+--- a/drivers/pci/quirks.c
++++ b/drivers/pci/quirks.c
+@@ -688,7 +688,7 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI,	PCI_DEVICE_ID_ATI_RS100,   quirk_ati_
+ /*
+  * In the AMD NL platform, this device ([1022:7912]) has a class code of
+  * PCI_CLASS_SERIAL_USB_XHCI (0x0c0330), which means the xhci driver will
+- * claim it.
++ * claim it. The same applies on the VanGogh platform device ([1022:163a]).
+  *
+  * But the dwc3 driver is a more specific driver for this device, and we'd
+  * prefer to use it instead of xhci. To prevent xhci from claiming the
+@@ -696,7 +696,7 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI,	PCI_DEVICE_ID_ATI_RS100,   quirk_ati_
+  * defines as "USB device (not host controller)". The dwc3 driver can then
+  * claim it based on its Vendor and Device ID.
+  */
+-static void quirk_amd_nl_class(struct pci_dev *pdev)
++static void quirk_amd_dwc_class(struct pci_dev *pdev)
+ {
+ 	u32 class = pdev->class;
+ 
+@@ -706,7 +706,9 @@ static void quirk_amd_nl_class(struct pci_dev *pdev)
+ 		 class, pdev->class);
+ }
+ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_NL_USB,
+-		quirk_amd_nl_class);
++		quirk_amd_dwc_class);
++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_VANGOGH_USB,
++		quirk_amd_dwc_class);
+ 
+ /*
+  * Synopsys USB 3.x host HAPS platform has a class code of
+diff --git a/drivers/platform/mellanox/mlxbf-tmfifo.c b/drivers/platform/mellanox/mlxbf-tmfifo.c
+index f3696a54a2bd7..d9615ad600123 100644
+--- a/drivers/platform/mellanox/mlxbf-tmfifo.c
++++ b/drivers/platform/mellanox/mlxbf-tmfifo.c
+@@ -607,24 +607,25 @@ static void mlxbf_tmfifo_rxtx_word(struct mlxbf_tmfifo_vring *vring,
+ 
+ 	if (vring->cur_len + sizeof(u64) <= len) {
+ 		/* The whole word. */
+-		if (!IS_VRING_DROP(vring)) {
+-			if (is_rx)
++		if (is_rx) {
++			if (!IS_VRING_DROP(vring))
+ 				memcpy(addr + vring->cur_len, &data,
+ 				       sizeof(u64));
+-			else
+-				memcpy(&data, addr + vring->cur_len,
+-				       sizeof(u64));
++		} else {
++			memcpy(&data, addr + vring->cur_len,
++			       sizeof(u64));
+ 		}
+ 		vring->cur_len += sizeof(u64);
+ 	} else {
+ 		/* Leftover bytes. */
+-		if (!IS_VRING_DROP(vring)) {
+-			if (is_rx)
++		if (is_rx) {
++			if (!IS_VRING_DROP(vring))
+ 				memcpy(addr + vring->cur_len, &data,
+ 				       len - vring->cur_len);
+-			else
+-				memcpy(&data, addr + vring->cur_len,
+-				       len - vring->cur_len);
++		} else {
++			data = 0;
++			memcpy(&data, addr + vring->cur_len,
++			       len - vring->cur_len);
+ 		}
+ 		vring->cur_len = len;
+ 	}
+diff --git a/drivers/power/supply/power_supply_core.c b/drivers/power/supply/power_supply_core.c
+index 3791aec69ddc6..0d2e72a966c9b 100644
+--- a/drivers/power/supply/power_supply_core.c
++++ b/drivers/power/supply/power_supply_core.c
+@@ -29,7 +29,7 @@
+ struct class *power_supply_class;
+ EXPORT_SYMBOL_GPL(power_supply_class);
+ 
+-ATOMIC_NOTIFIER_HEAD(power_supply_notifier);
++BLOCKING_NOTIFIER_HEAD(power_supply_notifier);
+ EXPORT_SYMBOL_GPL(power_supply_notifier);
+ 
+ static struct device_type power_supply_dev_type;
+@@ -97,7 +97,7 @@ static void power_supply_changed_work(struct work_struct *work)
+ 		class_for_each_device(power_supply_class, NULL, psy,
+ 				      __power_supply_changed_work);
+ 		power_supply_update_leds(psy);
+-		atomic_notifier_call_chain(&power_supply_notifier,
++		blocking_notifier_call_chain(&power_supply_notifier,
+ 				PSY_EVENT_PROP_CHANGED, psy);
+ 		kobject_uevent(&psy->dev.kobj, KOBJ_CHANGE);
+ 		spin_lock_irqsave(&psy->changed_lock, flags);
+@@ -1262,13 +1262,13 @@ static void power_supply_dev_release(struct device *dev)
+ 
+ int power_supply_reg_notifier(struct notifier_block *nb)
+ {
+-	return atomic_notifier_chain_register(&power_supply_notifier, nb);
++	return blocking_notifier_chain_register(&power_supply_notifier, nb);
+ }
+ EXPORT_SYMBOL_GPL(power_supply_reg_notifier);
+ 
+ void power_supply_unreg_notifier(struct notifier_block *nb)
+ {
+-	atomic_notifier_chain_unregister(&power_supply_notifier, nb);
++	blocking_notifier_chain_unregister(&power_supply_notifier, nb);
+ }
+ EXPORT_SYMBOL_GPL(power_supply_unreg_notifier);
+ 
+diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+index c3c1f466fe01d..605013d3ee83a 100644
+--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
++++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+@@ -12913,8 +12913,10 @@ _mpt3sas_init(void)
+ 	mpt3sas_ctl_init(hbas_to_enumerate);
+ 
+ 	error = pci_register_driver(&mpt3sas_driver);
+-	if (error)
++	if (error) {
++		mpt3sas_ctl_exit(hbas_to_enumerate);
+ 		scsih_exit();
++	}
+ 
+ 	return error;
+ }
+diff --git a/drivers/spi/spi-npcm-fiu.c b/drivers/spi/spi-npcm-fiu.c
+index eb353561509a8..01a2b9de18b7f 100644
+--- a/drivers/spi/spi-npcm-fiu.c
++++ b/drivers/spi/spi-npcm-fiu.c
+@@ -353,8 +353,9 @@ static int npcm_fiu_uma_read(struct spi_mem *mem,
+ 		uma_cfg |= ilog2(op->cmd.buswidth);
+ 		uma_cfg |= ilog2(op->addr.buswidth)
+ 			<< NPCM_FIU_UMA_CFG_ADBPCK_SHIFT;
+-		uma_cfg |= ilog2(op->dummy.buswidth)
+-			<< NPCM_FIU_UMA_CFG_DBPCK_SHIFT;
++		if (op->dummy.nbytes)
++			uma_cfg |= ilog2(op->dummy.buswidth)
++				<< NPCM_FIU_UMA_CFG_DBPCK_SHIFT;
+ 		uma_cfg |= ilog2(op->data.buswidth)
+ 			<< NPCM_FIU_UMA_CFG_RDBPCK_SHIFT;
+ 		uma_cfg |= op->dummy.nbytes << NPCM_FIU_UMA_CFG_DBSIZ_SHIFT;
+diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
+index 5574b4b61a25c..897bd575330c2 100644
+--- a/drivers/tty/n_gsm.c
++++ b/drivers/tty/n_gsm.c
+@@ -4058,6 +4058,8 @@ static int gsm_modem_upd_via_msc(struct gsm_dlci *dlci, u8 brk)
+ 
+ static int gsm_modem_update(struct gsm_dlci *dlci, u8 brk)
+ {
++	if (dlci->gsm->dead)
++		return -EL2HLT;
+ 	if (dlci->adaption == 2) {
+ 		/* Send convergence layer type 2 empty data frame. */
+ 		gsm_modem_upd_via_data(dlci, brk);
+diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
+index d2d547b5da95a..d4029f008bb28 100644
+--- a/drivers/tty/serial/8250/8250_pci.c
++++ b/drivers/tty/serial/8250/8250_pci.c
+@@ -2427,6 +2427,153 @@ static struct pci_serial_quirk pci_serial_quirks[] = {
+ 		.init			= pci_oxsemi_tornado_init,
+ 		.setup		= pci_oxsemi_tornado_setup,
+ 	},
++	/*
++	 * Brainboxes devices - all Oxsemi based
++	 */
++	{
++		.vendor		= PCI_VENDOR_ID_INTASHIELD,
++		.device		= 0x4027,
++		.subvendor	= PCI_ANY_ID,
++		.subdevice	= PCI_ANY_ID,
++		.init		= pci_oxsemi_tornado_init,
++		.setup		= pci_oxsemi_tornado_setup,
++	},
++	{
++		.vendor		= PCI_VENDOR_ID_INTASHIELD,
++		.device		= 0x4028,
++		.subvendor	= PCI_ANY_ID,
++		.subdevice	= PCI_ANY_ID,
++		.init		= pci_oxsemi_tornado_init,
++		.setup		= pci_oxsemi_tornado_setup,
++	},
++	{
++		.vendor		= PCI_VENDOR_ID_INTASHIELD,
++		.device		= 0x4029,
++		.subvendor	= PCI_ANY_ID,
++		.subdevice	= PCI_ANY_ID,
++		.init		= pci_oxsemi_tornado_init,
++		.setup		= pci_oxsemi_tornado_setup,
++	},
++	{
++		.vendor		= PCI_VENDOR_ID_INTASHIELD,
++		.device		= 0x4019,
++		.subvendor	= PCI_ANY_ID,
++		.subdevice	= PCI_ANY_ID,
++		.init		= pci_oxsemi_tornado_init,
++		.setup		= pci_oxsemi_tornado_setup,
++	},
++	{
++		.vendor		= PCI_VENDOR_ID_INTASHIELD,
++		.device		= 0x4016,
++		.subvendor	= PCI_ANY_ID,
++		.subdevice	= PCI_ANY_ID,
++		.init		= pci_oxsemi_tornado_init,
++		.setup		= pci_oxsemi_tornado_setup,
++	},
++	{
++		.vendor		= PCI_VENDOR_ID_INTASHIELD,
++		.device		= 0x4015,
++		.subvendor	= PCI_ANY_ID,
++		.subdevice	= PCI_ANY_ID,
++		.init		= pci_oxsemi_tornado_init,
++		.setup		= pci_oxsemi_tornado_setup,
++	},
++	{
++		.vendor		= PCI_VENDOR_ID_INTASHIELD,
++		.device		= 0x400A,
++		.subvendor	= PCI_ANY_ID,
++		.subdevice	= PCI_ANY_ID,
++		.init		= pci_oxsemi_tornado_init,
++		.setup		= pci_oxsemi_tornado_setup,
++	},
++	{
++		.vendor		= PCI_VENDOR_ID_INTASHIELD,
++		.device		= 0x400E,
++		.subvendor	= PCI_ANY_ID,
++		.subdevice	= PCI_ANY_ID,
++		.init		= pci_oxsemi_tornado_init,
++		.setup		= pci_oxsemi_tornado_setup,
++	},
++	{
++		.vendor		= PCI_VENDOR_ID_INTASHIELD,
++		.device		= 0x400C,
++		.subvendor	= PCI_ANY_ID,
++		.subdevice	= PCI_ANY_ID,
++		.init		= pci_oxsemi_tornado_init,
++		.setup		= pci_oxsemi_tornado_setup,
++	},
++	{
++		.vendor		= PCI_VENDOR_ID_INTASHIELD,
++		.device		= 0x400B,
++		.subvendor	= PCI_ANY_ID,
++		.subdevice	= PCI_ANY_ID,
++		.init		= pci_oxsemi_tornado_init,
++		.setup		= pci_oxsemi_tornado_setup,
++	},
++	{
++		.vendor		= PCI_VENDOR_ID_INTASHIELD,
++		.device		= 0x400F,
++		.subvendor	= PCI_ANY_ID,
++		.subdevice	= PCI_ANY_ID,
++		.init		= pci_oxsemi_tornado_init,
++		.setup		= pci_oxsemi_tornado_setup,
++	},
++	{
++		.vendor		= PCI_VENDOR_ID_INTASHIELD,
++		.device		= 0x4010,
++		.subvendor	= PCI_ANY_ID,
++		.subdevice	= PCI_ANY_ID,
++		.init		= pci_oxsemi_tornado_init,
++		.setup		= pci_oxsemi_tornado_setup,
++	},
++	{
++		.vendor		= PCI_VENDOR_ID_INTASHIELD,
++		.device		= 0x4011,
++		.subvendor	= PCI_ANY_ID,
++		.subdevice	= PCI_ANY_ID,
++		.init		= pci_oxsemi_tornado_init,
++		.setup		= pci_oxsemi_tornado_setup,
++	},
++	{
++		.vendor		= PCI_VENDOR_ID_INTASHIELD,
++		.device		= 0x401D,
++		.subvendor	= PCI_ANY_ID,
++		.subdevice	= PCI_ANY_ID,
++		.init		= pci_oxsemi_tornado_init,
++		.setup		= pci_oxsemi_tornado_setup,
++	},
++	{
++		.vendor		= PCI_VENDOR_ID_INTASHIELD,
++		.device		= 0x401E,
++		.subvendor	= PCI_ANY_ID,
++		.subdevice	= PCI_ANY_ID,
++		.init		= pci_oxsemi_tornado_init,
++		.setup		= pci_oxsemi_tornado_setup,
++	},
++	{
++		.vendor		= PCI_VENDOR_ID_INTASHIELD,
++		.device		= 0x4013,
++		.subvendor	= PCI_ANY_ID,
++		.subdevice	= PCI_ANY_ID,
++		.init		= pci_oxsemi_tornado_init,
++		.setup		= pci_oxsemi_tornado_setup,
++	},
++	{
++		.vendor		= PCI_VENDOR_ID_INTASHIELD,
++		.device		= 0x4017,
++		.subvendor	= PCI_ANY_ID,
++		.subdevice	= PCI_ANY_ID,
++		.init		= pci_oxsemi_tornado_init,
++		.setup		= pci_oxsemi_tornado_setup,
++	},
++	{
++		.vendor		= PCI_VENDOR_ID_INTASHIELD,
++		.device		= 0x4018,
++		.subvendor	= PCI_ANY_ID,
++		.subdevice	= PCI_ANY_ID,
++		.init		= pci_oxsemi_tornado_init,
++		.setup		= pci_oxsemi_tornado_setup,
++	},
+ 	{
+ 		.vendor         = PCI_VENDOR_ID_INTEL,
+ 		.device         = 0x8811,
+@@ -4911,6 +5058,12 @@ static const struct pci_device_id serial_pci_tbl[] = {
+ 		0, 0,
+ 		pbn_b1_bt_1_115200 },
+ 
++	/*
++	 * IntaShield IS-100
++	 */
++	{	PCI_VENDOR_ID_INTASHIELD, 0x0D60,
++		PCI_ANY_ID, PCI_ANY_ID, 0, 0,
++		pbn_b2_1_115200 },
+ 	/*
+ 	 * IntaShield IS-200
+ 	 */
+@@ -4923,6 +5076,27 @@ static const struct pci_device_id serial_pci_tbl[] = {
+ 	{	PCI_VENDOR_ID_INTASHIELD, PCI_DEVICE_ID_INTASHIELD_IS400,
+ 		PCI_ANY_ID, PCI_ANY_ID, 0, 0,    /* 135a.0dc0 */
+ 		pbn_b2_4_115200 },
++	/*
++	 * IntaShield IX-100
++	 */
++	{	PCI_VENDOR_ID_INTASHIELD, 0x4027,
++		PCI_ANY_ID, PCI_ANY_ID,
++		0, 0,
++		pbn_oxsemi_1_15625000 },
++	/*
++	 * IntaShield IX-200
++	 */
++	{	PCI_VENDOR_ID_INTASHIELD, 0x4028,
++		PCI_ANY_ID, PCI_ANY_ID,
++		0, 0,
++		pbn_oxsemi_2_15625000 },
++	/*
++	 * IntaShield IX-400
++	 */
++	{	PCI_VENDOR_ID_INTASHIELD, 0x4029,
++		PCI_ANY_ID, PCI_ANY_ID,
++		0, 0,
++		pbn_oxsemi_4_15625000 },
+ 	/* Brainboxes Devices */
+ 	/*
+ 	* Brainboxes UC-101
+@@ -4938,10 +5112,14 @@ static const struct pci_device_id serial_pci_tbl[] = {
+ 		PCI_ANY_ID, PCI_ANY_ID,
+ 		0, 0,
+ 		pbn_b2_1_115200 },
++	{	PCI_VENDOR_ID_INTASHIELD, 0x0AA2,
++		PCI_ANY_ID, PCI_ANY_ID,
++		0, 0,
++		pbn_b2_1_115200 },
+ 	/*
+-	 * Brainboxes UC-257
++	 * Brainboxes UC-253/UC-734
+ 	 */
+-	{	PCI_VENDOR_ID_INTASHIELD, 0x0861,
++	{	PCI_VENDOR_ID_INTASHIELD, 0x0CA1,
+ 		PCI_ANY_ID, PCI_ANY_ID,
+ 		0, 0,
+ 		pbn_b2_2_115200 },
+@@ -4977,6 +5155,14 @@ static const struct pci_device_id serial_pci_tbl[] = {
+ 		PCI_ANY_ID, PCI_ANY_ID,
+ 		0, 0,
+ 		pbn_b2_2_115200 },
++	{	PCI_VENDOR_ID_INTASHIELD, 0x08E2,
++		PCI_ANY_ID, PCI_ANY_ID,
++		0, 0,
++		pbn_b2_2_115200 },
++	{	PCI_VENDOR_ID_INTASHIELD, 0x08E3,
++		PCI_ANY_ID, PCI_ANY_ID,
++		0, 0,
++		pbn_b2_2_115200 },
+ 	/*
+ 	 * Brainboxes UC-310
+ 	 */
+@@ -4987,6 +5173,14 @@ static const struct pci_device_id serial_pci_tbl[] = {
+ 	/*
+ 	 * Brainboxes UC-313
+ 	 */
++	{       PCI_VENDOR_ID_INTASHIELD, 0x08A1,
++		PCI_ANY_ID, PCI_ANY_ID,
++		0, 0,
++		pbn_b2_2_115200 },
++	{       PCI_VENDOR_ID_INTASHIELD, 0x08A2,
++		PCI_ANY_ID, PCI_ANY_ID,
++		0, 0,
++		pbn_b2_2_115200 },
+ 	{       PCI_VENDOR_ID_INTASHIELD, 0x08A3,
+ 		PCI_ANY_ID, PCI_ANY_ID,
+ 		0, 0,
+@@ -5001,6 +5195,10 @@ static const struct pci_device_id serial_pci_tbl[] = {
+ 	/*
+ 	 * Brainboxes UC-346
+ 	 */
++	{	PCI_VENDOR_ID_INTASHIELD, 0x0B01,
++		PCI_ANY_ID, PCI_ANY_ID,
++		0, 0,
++		pbn_b2_4_115200 },
+ 	{	PCI_VENDOR_ID_INTASHIELD, 0x0B02,
+ 		PCI_ANY_ID, PCI_ANY_ID,
+ 		0, 0,
+@@ -5012,6 +5210,10 @@ static const struct pci_device_id serial_pci_tbl[] = {
+ 		PCI_ANY_ID, PCI_ANY_ID,
+ 		0, 0,
+ 		pbn_b2_2_115200 },
++	{	PCI_VENDOR_ID_INTASHIELD, 0x0A82,
++		PCI_ANY_ID, PCI_ANY_ID,
++		0, 0,
++		pbn_b2_2_115200 },
+ 	{	PCI_VENDOR_ID_INTASHIELD, 0x0A83,
+ 		PCI_ANY_ID, PCI_ANY_ID,
+ 		0, 0,
+@@ -5024,12 +5226,94 @@ static const struct pci_device_id serial_pci_tbl[] = {
+ 		0, 0,
+ 		pbn_b2_4_115200 },
+ 	/*
+-	 * Brainboxes UC-420/431
++	 * Brainboxes UC-420
+ 	 */
+ 	{       PCI_VENDOR_ID_INTASHIELD, 0x0921,
+ 		PCI_ANY_ID, PCI_ANY_ID,
+ 		0, 0,
+ 		pbn_b2_4_115200 },
++	/*
++	 * Brainboxes UC-607
++	 */
++	{	PCI_VENDOR_ID_INTASHIELD, 0x09A1,
++		PCI_ANY_ID, PCI_ANY_ID,
++		0, 0,
++		pbn_b2_2_115200 },
++	{	PCI_VENDOR_ID_INTASHIELD, 0x09A2,
++		PCI_ANY_ID, PCI_ANY_ID,
++		0, 0,
++		pbn_b2_2_115200 },
++	{	PCI_VENDOR_ID_INTASHIELD, 0x09A3,
++		PCI_ANY_ID, PCI_ANY_ID,
++		0, 0,
++		pbn_b2_2_115200 },
++	/*
++	 * Brainboxes UC-836
++	 */
++	{	PCI_VENDOR_ID_INTASHIELD, 0x0D41,
++		PCI_ANY_ID, PCI_ANY_ID,
++		0, 0,
++		pbn_b2_4_115200 },
++	/*
++	 * Brainboxes UP-189
++	 */
++	{	PCI_VENDOR_ID_INTASHIELD, 0x0AC1,
++		PCI_ANY_ID, PCI_ANY_ID,
++		0, 0,
++		pbn_b2_2_115200 },
++	{	PCI_VENDOR_ID_INTASHIELD, 0x0AC2,
++		PCI_ANY_ID, PCI_ANY_ID,
++		0, 0,
++		pbn_b2_2_115200 },
++	{	PCI_VENDOR_ID_INTASHIELD, 0x0AC3,
++		PCI_ANY_ID, PCI_ANY_ID,
++		0, 0,
++		pbn_b2_2_115200 },
++	/*
++	 * Brainboxes UP-200
++	 */
++	{	PCI_VENDOR_ID_INTASHIELD, 0x0B21,
++		PCI_ANY_ID, PCI_ANY_ID,
++		0, 0,
++		pbn_b2_2_115200 },
++	{	PCI_VENDOR_ID_INTASHIELD, 0x0B22,
++		PCI_ANY_ID, PCI_ANY_ID,
++		0, 0,
++		pbn_b2_2_115200 },
++	{	PCI_VENDOR_ID_INTASHIELD, 0x0B23,
++		PCI_ANY_ID, PCI_ANY_ID,
++		0, 0,
++		pbn_b2_2_115200 },
++	/*
++	 * Brainboxes UP-869
++	 */
++	{	PCI_VENDOR_ID_INTASHIELD, 0x0C01,
++		PCI_ANY_ID, PCI_ANY_ID,
++		0, 0,
++		pbn_b2_2_115200 },
++	{	PCI_VENDOR_ID_INTASHIELD, 0x0C02,
++		PCI_ANY_ID, PCI_ANY_ID,
++		0, 0,
++		pbn_b2_2_115200 },
++	{	PCI_VENDOR_ID_INTASHIELD, 0x0C03,
++		PCI_ANY_ID, PCI_ANY_ID,
++		0, 0,
++		pbn_b2_2_115200 },
++	/*
++	 * Brainboxes UP-880
++	 */
++	{	PCI_VENDOR_ID_INTASHIELD, 0x0C21,
++		PCI_ANY_ID, PCI_ANY_ID,
++		0, 0,
++		pbn_b2_2_115200 },
++	{	PCI_VENDOR_ID_INTASHIELD, 0x0C22,
++		PCI_ANY_ID, PCI_ANY_ID,
++		0, 0,
++		pbn_b2_2_115200 },
++	{	PCI_VENDOR_ID_INTASHIELD, 0x0C23,
++		PCI_ANY_ID, PCI_ANY_ID,
++		0, 0,
++		pbn_b2_2_115200 },
+ 	/*
+ 	 * Brainboxes PX-101
+ 	 */
+@@ -5062,7 +5346,7 @@ static const struct pci_device_id serial_pci_tbl[] = {
+ 	{	PCI_VENDOR_ID_INTASHIELD, 0x4015,
+ 		PCI_ANY_ID, PCI_ANY_ID,
+ 		0, 0,
+-		pbn_oxsemi_4_15625000 },
++		pbn_oxsemi_2_15625000 },
+ 	/*
+ 	 * Brainboxes PX-260/PX-701
+ 	 */
+@@ -5070,6 +5354,13 @@ static const struct pci_device_id serial_pci_tbl[] = {
+ 		PCI_ANY_ID, PCI_ANY_ID,
+ 		0, 0,
+ 		pbn_oxsemi_4_15625000 },
++	/*
++	 * Brainboxes PX-275/279
++	 */
++	{	PCI_VENDOR_ID_INTASHIELD, 0x0E41,
++		PCI_ANY_ID, PCI_ANY_ID,
++		0, 0,
++		pbn_b2_8_115200 },
+ 	/*
+ 	 * Brainboxes PX-310
+ 	 */
+@@ -5117,16 +5408,38 @@ static const struct pci_device_id serial_pci_tbl[] = {
+ 		0, 0,
+ 		pbn_oxsemi_4_15625000 },
+ 	/*
+-	 * Brainboxes PX-803
++	 * Brainboxes PX-475
++	 */
++	{	PCI_VENDOR_ID_INTASHIELD, 0x401D,
++		PCI_ANY_ID, PCI_ANY_ID,
++		0, 0,
++		pbn_oxsemi_1_15625000 },
++	/*
++	 * Brainboxes PX-803/PX-857
+ 	 */
+ 	{	PCI_VENDOR_ID_INTASHIELD, 0x4009,
+ 		PCI_ANY_ID, PCI_ANY_ID,
+ 		0, 0,
+-		pbn_b0_1_115200 },
++		pbn_b0_2_115200 },
++	{	PCI_VENDOR_ID_INTASHIELD, 0x4018,
++		PCI_ANY_ID, PCI_ANY_ID,
++		0, 0,
++		pbn_oxsemi_2_15625000 },
+ 	{	PCI_VENDOR_ID_INTASHIELD, 0x401E,
+ 		PCI_ANY_ID, PCI_ANY_ID,
+ 		0, 0,
+-		pbn_oxsemi_1_15625000 },
++		pbn_oxsemi_2_15625000 },
++	/*
++	 * Brainboxes PX-820
++	 */
++	{	PCI_VENDOR_ID_INTASHIELD, 0x4002,
++		PCI_ANY_ID, PCI_ANY_ID,
++		0, 0,
++		pbn_b0_4_115200 },
++	{	PCI_VENDOR_ID_INTASHIELD, 0x4013,
++		PCI_ANY_ID, PCI_ANY_ID,
++		0, 0,
++		pbn_oxsemi_4_15625000 },
+ 	/*
+ 	 * Brainboxes PX-846
+ 	 */
+diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
+index 83c419ac78bca..0c6c633c0924a 100644
+--- a/drivers/tty/serial/serial_core.c
++++ b/drivers/tty/serial/serial_core.c
+@@ -147,7 +147,7 @@ static void __uart_start(struct tty_struct *tty)
+ 
+ 	/* Increment the runtime PM usage count for the active check below */
+ 	err = pm_runtime_get(&port_dev->dev);
+-	if (err < 0) {
++	if (err < 0 && err != -EINPROGRESS) {
+ 		pm_runtime_put_noidle(&port_dev->dev);
+ 		return;
+ 	}
+diff --git a/drivers/usb/gadget/legacy/raw_gadget.c b/drivers/usb/gadget/legacy/raw_gadget.c
+index e549022642e56..ea106ad665a1f 100644
+--- a/drivers/usb/gadget/legacy/raw_gadget.c
++++ b/drivers/usb/gadget/legacy/raw_gadget.c
+@@ -663,12 +663,12 @@ static int raw_process_ep0_io(struct raw_dev *dev, struct usb_raw_ep_io *io,
+ 	if (WARN_ON(in && dev->ep0_out_pending)) {
+ 		ret = -ENODEV;
+ 		dev->state = STATE_DEV_FAILED;
+-		goto out_done;
++		goto out_unlock;
+ 	}
+ 	if (WARN_ON(!in && dev->ep0_in_pending)) {
+ 		ret = -ENODEV;
+ 		dev->state = STATE_DEV_FAILED;
+-		goto out_done;
++		goto out_unlock;
+ 	}
+ 
+ 	dev->req->buf = data;
+@@ -683,7 +683,7 @@ static int raw_process_ep0_io(struct raw_dev *dev, struct usb_raw_ep_io *io,
+ 				"fail, usb_ep_queue returned %d\n", ret);
+ 		spin_lock_irqsave(&dev->lock, flags);
+ 		dev->state = STATE_DEV_FAILED;
+-		goto out_done;
++		goto out_queue_failed;
+ 	}
+ 
+ 	ret = wait_for_completion_interruptible(&dev->ep0_done);
+@@ -692,13 +692,16 @@ static int raw_process_ep0_io(struct raw_dev *dev, struct usb_raw_ep_io *io,
+ 		usb_ep_dequeue(dev->gadget->ep0, dev->req);
+ 		wait_for_completion(&dev->ep0_done);
+ 		spin_lock_irqsave(&dev->lock, flags);
+-		goto out_done;
++		if (dev->ep0_status == -ECONNRESET)
++			dev->ep0_status = -EINTR;
++		goto out_interrupted;
+ 	}
+ 
+ 	spin_lock_irqsave(&dev->lock, flags);
+-	ret = dev->ep0_status;
+ 
+-out_done:
++out_interrupted:
++	ret = dev->ep0_status;
++out_queue_failed:
+ 	dev->ep0_urb_queued = false;
+ out_unlock:
+ 	spin_unlock_irqrestore(&dev->lock, flags);
+@@ -1067,7 +1070,7 @@ static int raw_process_ep_io(struct raw_dev *dev, struct usb_raw_ep_io *io,
+ 				"fail, usb_ep_queue returned %d\n", ret);
+ 		spin_lock_irqsave(&dev->lock, flags);
+ 		dev->state = STATE_DEV_FAILED;
+-		goto out_done;
++		goto out_queue_failed;
+ 	}
+ 
+ 	ret = wait_for_completion_interruptible(&done);
+@@ -1076,13 +1079,16 @@ static int raw_process_ep_io(struct raw_dev *dev, struct usb_raw_ep_io *io,
+ 		usb_ep_dequeue(ep->ep, ep->req);
+ 		wait_for_completion(&done);
+ 		spin_lock_irqsave(&dev->lock, flags);
+-		goto out_done;
++		if (ep->status == -ECONNRESET)
++			ep->status = -EINTR;
++		goto out_interrupted;
+ 	}
+ 
+ 	spin_lock_irqsave(&dev->lock, flags);
+-	ret = ep->status;
+ 
+-out_done:
++out_interrupted:
++	ret = ep->status;
++out_queue_failed:
+ 	ep->urb_queued = false;
+ out_unlock:
+ 	spin_unlock_irqrestore(&dev->lock, flags);
+diff --git a/drivers/usb/storage/unusual_cypress.h b/drivers/usb/storage/unusual_cypress.h
+index 0547daf116a26..5df40759d77ad 100644
+--- a/drivers/usb/storage/unusual_cypress.h
++++ b/drivers/usb/storage/unusual_cypress.h
+@@ -19,7 +19,7 @@ UNUSUAL_DEV(  0x04b4, 0x6831, 0x0000, 0x9999,
+ 		"Cypress ISD-300LP",
+ 		USB_SC_CYP_ATACB, USB_PR_DEVICE, NULL, 0),
+ 
+-UNUSUAL_DEV( 0x14cd, 0x6116, 0x0160, 0x0160,
++UNUSUAL_DEV( 0x14cd, 0x6116, 0x0150, 0x0160,
+ 		"Super Top",
+ 		"USB 2.0  SATA BRIDGE",
+ 		USB_SC_CYP_ATACB, USB_PR_DEVICE, NULL, 0),
+diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
+index 1596afee6c86f..7ca611497a523 100644
+--- a/drivers/usb/typec/tcpm/tcpm.c
++++ b/drivers/usb/typec/tcpm/tcpm.c
+@@ -1625,6 +1625,9 @@ static int tcpm_pd_svdm(struct tcpm_port *port, struct typec_altmode *adev,
+ 			if (PD_VDO_VID(p[0]) != USB_SID_PD)
+ 				break;
+ 
++			if (IS_ERR_OR_NULL(port->partner))
++				break;
++
+ 			if (PD_VDO_SVDM_VER(p[0]) < svdm_version) {
+ 				typec_partner_set_svdm_version(port->partner,
+ 							       PD_VDO_SVDM_VER(p[0]));
+@@ -3970,6 +3973,8 @@ static void run_state_machine(struct tcpm_port *port)
+ 		port->potential_contaminant = ((port->enter_state == SRC_ATTACH_WAIT &&
+ 						port->state == SRC_UNATTACHED) ||
+ 					       (port->enter_state == SNK_ATTACH_WAIT &&
++						port->state == SNK_UNATTACHED) ||
++					       (port->enter_state == SNK_DEBOUNCED &&
+ 						port->state == SNK_UNATTACHED));
+ 
+ 	port->enter_state = port->state;
+diff --git a/drivers/video/fbdev/aty/atyfb_base.c b/drivers/video/fbdev/aty/atyfb_base.c
+index cba2b113b28b0..a73114c1c6918 100644
+--- a/drivers/video/fbdev/aty/atyfb_base.c
++++ b/drivers/video/fbdev/aty/atyfb_base.c
+@@ -3440,11 +3440,15 @@ static int atyfb_setup_generic(struct pci_dev *pdev, struct fb_info *info,
+ 	}
+ 
+ 	info->fix.mmio_start = raddr;
++#if defined(__i386__) || defined(__ia64__)
+ 	/*
+ 	 * By using strong UC we force the MTRR to never have an
+ 	 * effect on the MMIO region on both non-PAT and PAT systems.
+ 	 */
+ 	par->ati_regbase = ioremap_uc(info->fix.mmio_start, 0x1000);
++#else
++	par->ati_regbase = ioremap(info->fix.mmio_start, 0x1000);
++#endif
+ 	if (par->ati_regbase == NULL)
+ 		return -ENOMEM;
+ 
+diff --git a/drivers/video/fbdev/omap/omapfb_main.c b/drivers/video/fbdev/omap/omapfb_main.c
+index ad65554b33c35..0be95b4e14fdb 100644
+--- a/drivers/video/fbdev/omap/omapfb_main.c
++++ b/drivers/video/fbdev/omap/omapfb_main.c
+@@ -1648,13 +1648,13 @@ static int omapfb_do_probe(struct platform_device *pdev,
+ 	}
+ 	fbdev->int_irq = platform_get_irq(pdev, 0);
+ 	if (fbdev->int_irq < 0) {
+-		r = ENXIO;
++		r = -ENXIO;
+ 		goto cleanup;
+ 	}
+ 
+ 	fbdev->ext_irq = platform_get_irq(pdev, 1);
+ 	if (fbdev->ext_irq < 0) {
+-		r = ENXIO;
++		r = -ENXIO;
+ 		goto cleanup;
+ 	}
+ 
+diff --git a/drivers/video/fbdev/uvesafb.c b/drivers/video/fbdev/uvesafb.c
+index 78d85dae8ec80..c4559768f00f6 100644
+--- a/drivers/video/fbdev/uvesafb.c
++++ b/drivers/video/fbdev/uvesafb.c
+@@ -1931,10 +1931,10 @@ static void uvesafb_exit(void)
+ 		}
+ 	}
+ 
+-	cn_del_callback(&uvesafb_cn_id);
+ 	driver_remove_file(&uvesafb_driver.driver, &driver_attr_v86d);
+ 	platform_device_unregister(uvesafb_device);
+ 	platform_driver_unregister(&uvesafb_driver);
++	cn_del_callback(&uvesafb_cn_id);
+ }
+ 
+ module_exit(uvesafb_exit);
+diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
+index 4b0ba067e9c93..e40aafbfa7b9f 100644
+--- a/fs/ceph/mds_client.c
++++ b/fs/ceph/mds_client.c
+@@ -709,8 +709,8 @@ int ceph_wait_on_conflict_unlink(struct dentry *dentry)
+ 		if (!d_same_name(udentry, pdentry, &dname))
+ 			goto next;
+ 
++		found = dget_dlock(udentry);
+ 		spin_unlock(&udentry->d_lock);
+-		found = dget(udentry);
+ 		break;
+ next:
+ 		spin_unlock(&udentry->d_lock);
+diff --git a/fs/ntfs3/attrib.c b/fs/ntfs3/attrib.c
+index a9d82bbb4729e..0b52bc9681085 100644
+--- a/fs/ntfs3/attrib.c
++++ b/fs/ntfs3/attrib.c
+@@ -1736,10 +1736,8 @@ repack:
+ 			le_b = NULL;
+ 			attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL,
+ 					      0, NULL, &mi_b);
+-			if (!attr_b) {
+-				err = -ENOENT;
+-				goto out;
+-			}
++			if (!attr_b)
++				return -ENOENT;
+ 
+ 			attr = attr_b;
+ 			le = le_b;
+diff --git a/fs/ntfs3/attrlist.c b/fs/ntfs3/attrlist.c
+index 42631b31adf17..7c01735d1219d 100644
+--- a/fs/ntfs3/attrlist.c
++++ b/fs/ntfs3/attrlist.c
+@@ -52,7 +52,8 @@ int ntfs_load_attr_list(struct ntfs_inode *ni, struct ATTRIB *attr)
+ 
+ 	if (!attr->non_res) {
+ 		lsize = le32_to_cpu(attr->res.data_size);
+-		le = kmalloc(al_aligned(lsize), GFP_NOFS | __GFP_NOWARN);
++		/* attr is resident: lsize < record_size (1K or 4K) */
++		le = kvmalloc(al_aligned(lsize), GFP_KERNEL);
+ 		if (!le) {
+ 			err = -ENOMEM;
+ 			goto out;
+@@ -80,7 +81,17 @@ int ntfs_load_attr_list(struct ntfs_inode *ni, struct ATTRIB *attr)
+ 		if (err < 0)
+ 			goto out;
+ 
+-		le = kmalloc(al_aligned(lsize), GFP_NOFS | __GFP_NOWARN);
++		/* attr is nonresident.
++		 * The worst case:
++		 * 1T (2^40) extremely fragmented file.
++		 * cluster = 4K (2^12) => 2^28 fragments
++		 * 2^9 fragments per one record => 2^19 records
++		 * 2^5 bytes of ATTR_LIST_ENTRY per one record => 2^24 bytes.
++		 *
++		 * the result is 16M bytes per attribute list.
++		 * Use kvmalloc to allocate in range [several Kbytes - dozen Mbytes]
++		 */
++		le = kvmalloc(al_aligned(lsize), GFP_KERNEL);
+ 		if (!le) {
+ 			err = -ENOMEM;
+ 			goto out;
+diff --git a/fs/ntfs3/bitmap.c b/fs/ntfs3/bitmap.c
+index 107e808e06eae..d66055e30aff9 100644
+--- a/fs/ntfs3/bitmap.c
++++ b/fs/ntfs3/bitmap.c
+@@ -659,7 +659,8 @@ int wnd_init(struct wnd_bitmap *wnd, struct super_block *sb, size_t nbits)
+ 		wnd->bits_last = wbits;
+ 
+ 	wnd->free_bits =
+-		kcalloc(wnd->nwnd, sizeof(u16), GFP_NOFS | __GFP_NOWARN);
++		kvmalloc_array(wnd->nwnd, sizeof(u16), GFP_KERNEL | __GFP_ZERO);
++
+ 	if (!wnd->free_bits)
+ 		return -ENOMEM;
+ 
+diff --git a/fs/ntfs3/dir.c b/fs/ntfs3/dir.c
+index 063a6654199bc..ec0566b322d5d 100644
+--- a/fs/ntfs3/dir.c
++++ b/fs/ntfs3/dir.c
+@@ -309,7 +309,11 @@ static inline int ntfs_filldir(struct ntfs_sb_info *sbi, struct ntfs_inode *ni,
+ 		return 0;
+ 	}
+ 
+-	dt_type = (fname->dup.fa & FILE_ATTRIBUTE_DIRECTORY) ? DT_DIR : DT_REG;
++	/* NTFS: symlinks are "dir + reparse" or "file + reparse" */
++	if (fname->dup.fa & FILE_ATTRIBUTE_REPARSE_POINT)
++		dt_type = DT_LNK;
++	else
++		dt_type = (fname->dup.fa & FILE_ATTRIBUTE_DIRECTORY) ? DT_DIR : DT_REG;
+ 
+ 	return !dir_emit(ctx, (s8 *)name, name_len, ino, dt_type);
+ }
+diff --git a/fs/ntfs3/frecord.c b/fs/ntfs3/frecord.c
+index 16bd9faa2d28b..05fb3dbe39076 100644
+--- a/fs/ntfs3/frecord.c
++++ b/fs/ntfs3/frecord.c
+@@ -2148,7 +2148,7 @@ out1:
+ 
+ 	for (i = 0; i < pages_per_frame; i++) {
+ 		pg = pages[i];
+-		if (i == idx)
++		if (i == idx || !pg)
+ 			continue;
+ 		unlock_page(pg);
+ 		put_page(pg);
+@@ -3208,6 +3208,12 @@ static bool ni_update_parent(struct ntfs_inode *ni, struct NTFS_DUP_INFO *dup,
+ 		if (!fname || !memcmp(&fname->dup, dup, sizeof(fname->dup)))
+ 			continue;
+ 
++		/* Check simple case when parent inode equals current inode. */
++		if (ino_get(&fname->home) == ni->vfs_inode.i_ino) {
++			ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
++			continue;
++		}
++
+ 		/* ntfs_iget5 may sleep. */
+ 		dir = ntfs_iget5(sb, &fname->home, NULL);
+ 		if (IS_ERR(dir)) {
+diff --git a/fs/ntfs3/fslog.c b/fs/ntfs3/fslog.c
+index 12f28cdf5c838..98ccb66508583 100644
+--- a/fs/ntfs3/fslog.c
++++ b/fs/ntfs3/fslog.c
+@@ -2168,8 +2168,10 @@ file_is_valid:
+ 
+ 			if (!page) {
+ 				page = kmalloc(log->page_size, GFP_NOFS);
+-				if (!page)
+-					return -ENOMEM;
++				if (!page) {
++					err = -ENOMEM;
++					goto out;
++				}
+ 			}
+ 
+ 			/*
+diff --git a/fs/ntfs3/fsntfs.c b/fs/ntfs3/fsntfs.c
+index 9ddb2ab23b954..fbfe21dbb4259 100644
+--- a/fs/ntfs3/fsntfs.c
++++ b/fs/ntfs3/fsntfs.c
+@@ -983,18 +983,11 @@ out:
+ 	if (err)
+ 		return err;
+ 
+-	mark_inode_dirty(&ni->vfs_inode);
++	mark_inode_dirty_sync(&ni->vfs_inode);
+ 	/* verify(!ntfs_update_mftmirr()); */
+ 
+-	/*
+-	 * If we used wait=1, sync_inode_metadata waits for the io for the
+-	 * inode to finish. It hangs when media is removed.
+-	 * So wait=0 is sent down to sync_inode_metadata
+-	 * and filemap_fdatawrite is used for the data blocks.
+-	 */
+-	err = sync_inode_metadata(&ni->vfs_inode, 0);
+-	if (!err)
+-		err = filemap_fdatawrite(ni->vfs_inode.i_mapping);
++	/* write mft record on disk. */
++	err = _ni_write_inode(&ni->vfs_inode, 1);
+ 
+ 	return err;
+ }
+diff --git a/fs/ntfs3/record.c b/fs/ntfs3/record.c
+index c12ebffc94da4..02cc91ed88357 100644
+--- a/fs/ntfs3/record.c
++++ b/fs/ntfs3/record.c
+@@ -193,8 +193,9 @@ struct ATTRIB *mi_enum_attr(struct mft_inode *mi, struct ATTRIB *attr)
+ {
+ 	const struct MFT_REC *rec = mi->mrec;
+ 	u32 used = le32_to_cpu(rec->used);
+-	u32 t32, off, asize;
++	u32 t32, off, asize, prev_type;
+ 	u16 t16;
++	u64 data_size, alloc_size, tot_size;
+ 
+ 	if (!attr) {
+ 		u32 total = le32_to_cpu(rec->total);
+@@ -213,6 +214,7 @@ struct ATTRIB *mi_enum_attr(struct mft_inode *mi, struct ATTRIB *attr)
+ 		if (!is_rec_inuse(rec))
+ 			return NULL;
+ 
++		prev_type = 0;
+ 		attr = Add2Ptr(rec, off);
+ 	} else {
+ 		/* Check if input attr inside record. */
+@@ -226,11 +228,11 @@ struct ATTRIB *mi_enum_attr(struct mft_inode *mi, struct ATTRIB *attr)
+ 			return NULL;
+ 		}
+ 
+-		if (off + asize < off) {
+-			/* Overflow check. */
++		/* Overflow check. */
++		if (off + asize < off)
+ 			return NULL;
+-		}
+ 
++		prev_type = le32_to_cpu(attr->type);
+ 		attr = Add2Ptr(attr, asize);
+ 		off += asize;
+ 	}
+@@ -250,7 +252,11 @@ struct ATTRIB *mi_enum_attr(struct mft_inode *mi, struct ATTRIB *attr)
+ 
+ 	/* 0x100 is last known attribute for now. */
+ 	t32 = le32_to_cpu(attr->type);
+-	if ((t32 & 0xf) || (t32 > 0x100))
++	if (!t32 || (t32 & 0xf) || (t32 > 0x100))
++		return NULL;
++
++	/* attributes in record must be ordered by type */
++	if (t32 < prev_type)
+ 		return NULL;
+ 
+ 	/* Check overflow and boundary. */
+@@ -259,16 +265,15 @@ struct ATTRIB *mi_enum_attr(struct mft_inode *mi, struct ATTRIB *attr)
+ 
+ 	/* Check size of attribute. */
+ 	if (!attr->non_res) {
++		/* Check resident fields. */
+ 		if (asize < SIZEOF_RESIDENT)
+ 			return NULL;
+ 
+ 		t16 = le16_to_cpu(attr->res.data_off);
+-
+ 		if (t16 > asize)
+ 			return NULL;
+ 
+-		t32 = le32_to_cpu(attr->res.data_size);
+-		if (t16 + t32 > asize)
++		if (t16 + le32_to_cpu(attr->res.data_size) > asize)
+ 			return NULL;
+ 
+ 		t32 = sizeof(short) * attr->name_len;
+@@ -278,21 +283,52 @@ struct ATTRIB *mi_enum_attr(struct mft_inode *mi, struct ATTRIB *attr)
+ 		return attr;
+ 	}
+ 
+-	/* Check some nonresident fields. */
+-	if (attr->name_len &&
+-	    le16_to_cpu(attr->name_off) + sizeof(short) * attr->name_len >
+-		    le16_to_cpu(attr->nres.run_off)) {
++	/* Check nonresident fields. */
++	if (attr->non_res != 1)
++		return NULL;
++
++	t16 = le16_to_cpu(attr->nres.run_off);
++	if (t16 > asize)
++		return NULL;
++
++	t32 = sizeof(short) * attr->name_len;
++	if (t32 && le16_to_cpu(attr->name_off) + t32 > t16)
++		return NULL;
++
++	/* Check start/end vcn. */
++	if (le64_to_cpu(attr->nres.svcn) > le64_to_cpu(attr->nres.evcn) + 1)
++		return NULL;
++
++	data_size = le64_to_cpu(attr->nres.data_size);
++	if (le64_to_cpu(attr->nres.valid_size) > data_size)
+ 		return NULL;
+-	}
+ 
+-	if (attr->nres.svcn || !is_attr_ext(attr)) {
++	alloc_size = le64_to_cpu(attr->nres.alloc_size);
++	if (data_size > alloc_size)
++		return NULL;
++
++	t32 = mi->sbi->cluster_mask;
++	if (alloc_size & t32)
++		return NULL;
++
++	if (!attr->nres.svcn && is_attr_ext(attr)) {
++		/* First segment of sparse/compressed attribute */
++		if (asize + 8 < SIZEOF_NONRESIDENT_EX)
++			return NULL;
++
++		tot_size = le64_to_cpu(attr->nres.total_size);
++		if (tot_size & t32)
++			return NULL;
++
++		if (tot_size > alloc_size)
++			return NULL;
++	} else {
+ 		if (asize + 8 < SIZEOF_NONRESIDENT)
+ 			return NULL;
+ 
+ 		if (attr->nres.c_unit)
+ 			return NULL;
+-	} else if (asize + 8 < SIZEOF_NONRESIDENT_EX)
+-		return NULL;
++	}
+ 
+ 	return attr;
+ }
+diff --git a/fs/ntfs3/super.c b/fs/ntfs3/super.c
+index d6b5170253a69..32c5de5699929 100644
+--- a/fs/ntfs3/super.c
++++ b/fs/ntfs3/super.c
+@@ -453,15 +453,23 @@ static struct proc_dir_entry *proc_info_root;
+  * ntfs3.1
+  * cluster size
+  * number of clusters
++ * total number of mft records
++ * number of used mft records ~= number of files + folders
++ * real state of ntfs "dirty"/"clean"
++ * current state of ntfs "dirty"/"clean"
+ */
+ static int ntfs3_volinfo(struct seq_file *m, void *o)
+ {
+ 	struct super_block *sb = m->private;
+ 	struct ntfs_sb_info *sbi = sb->s_fs_info;
+ 
+-	seq_printf(m, "ntfs%d.%d\n%u\n%zu\n", sbi->volume.major_ver,
+-		   sbi->volume.minor_ver, sbi->cluster_size,
+-		   sbi->used.bitmap.nbits);
++	seq_printf(m, "ntfs%d.%d\n%u\n%zu\n\%zu\n%zu\n%s\n%s\n",
++		   sbi->volume.major_ver, sbi->volume.minor_ver,
++		   sbi->cluster_size, sbi->used.bitmap.nbits,
++		   sbi->mft.bitmap.nbits,
++		   sbi->mft.bitmap.nbits - wnd_zeroes(&sbi->mft.bitmap),
++		   sbi->volume.real_dirty ? "dirty" : "clean",
++		   (sbi->volume.flags & VOLUME_FLAG_DIRTY) ? "dirty" : "clean");
+ 
+ 	return 0;
+ }
+@@ -490,7 +498,12 @@ static ssize_t ntfs3_label_write(struct file *file, const char __user *buffer,
+ 	struct super_block *sb = pde_data(file_inode(file));
+ 	struct ntfs_sb_info *sbi = sb->s_fs_info;
+ 	ssize_t ret = count;
+-	u8 *label = kmalloc(count, GFP_NOFS);
++	u8 *label;
++
++	if (sb_rdonly(sb))
++		return -EROFS;
++
++	label = kmalloc(count, GFP_NOFS);
+ 
+ 	if (!label)
+ 		return -ENOMEM;
+@@ -838,7 +851,7 @@ static int ntfs_init_from_boot(struct super_block *sb, u32 sector_size,
+ 	struct ntfs_sb_info *sbi = sb->s_fs_info;
+ 	int err;
+ 	u32 mb, gb, boot_sector_size, sct_per_clst, record_size;
+-	u64 sectors, clusters, mlcn, mlcn2;
++	u64 sectors, clusters, mlcn, mlcn2, dev_size0;
+ 	struct NTFS_BOOT *boot;
+ 	struct buffer_head *bh;
+ 	struct MFT_REC *rec;
+@@ -847,6 +860,9 @@ static int ntfs_init_from_boot(struct super_block *sb, u32 sector_size,
+ 	u32 boot_off = 0;
+ 	const char *hint = "Primary boot";
+ 
++	/* Save original dev_size. Used with alternative boot. */
++	dev_size0 = dev_size;
++
+ 	sbi->volume.blocks = dev_size >> PAGE_SHIFT;
+ 
+ 	bh = ntfs_bread(sb, 0);
+@@ -1084,9 +1100,9 @@ check_boot:
+ 	}
+ 
+ out:
+-	if (err == -EINVAL && !bh->b_blocknr && dev_size > PAGE_SHIFT) {
++	if (err == -EINVAL && !bh->b_blocknr && dev_size0 > PAGE_SHIFT) {
+ 		u32 block_size = min_t(u32, sector_size, PAGE_SIZE);
+-		u64 lbo = dev_size - sizeof(*boot);
++		u64 lbo = dev_size0 - sizeof(*boot);
+ 
+ 		/*
+ 	 	 * Try alternative boot (last sector)
+@@ -1100,6 +1116,7 @@ out:
+ 
+ 		boot_off = lbo & (block_size - 1);
+ 		hint = "Alternative boot";
++		dev_size = dev_size0; /* restore original size. */
+ 		goto check_boot;
+ 	}
+ 	brelse(bh);
+@@ -1388,7 +1405,7 @@ static int ntfs_fill_super(struct super_block *sb, struct fs_context *fc)
+ 	}
+ 
+ 	bytes = inode->i_size;
+-	sbi->def_table = t = kmalloc(bytes, GFP_NOFS | __GFP_NOWARN);
++	sbi->def_table = t = kvmalloc(bytes, GFP_KERNEL);
+ 	if (!t) {
+ 		err = -ENOMEM;
+ 		goto put_inode_out;
+diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
+index f9544d9b670d3..ac65f0626cfc9 100644
+--- a/include/drm/gpu_scheduler.h
++++ b/include/drm/gpu_scheduler.h
+@@ -68,8 +68,7 @@ enum drm_sched_priority {
+ 	DRM_SCHED_PRIORITY_HIGH,
+ 	DRM_SCHED_PRIORITY_KERNEL,
+ 
+-	DRM_SCHED_PRIORITY_COUNT,
+-	DRM_SCHED_PRIORITY_UNSET = -2
++	DRM_SCHED_PRIORITY_COUNT
+ };
+ 
+ /* Used to chose between FIFO and RR jobs scheduling */
+diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
+index 8f9a459e16718..7702f078ef4ad 100644
+--- a/include/linux/pci_ids.h
++++ b/include/linux/pci_ids.h
+@@ -579,6 +579,7 @@
+ #define PCI_DEVICE_ID_AMD_1AH_M00H_DF_F3 0x12c3
+ #define PCI_DEVICE_ID_AMD_1AH_M20H_DF_F3 0x16fb
+ #define PCI_DEVICE_ID_AMD_MI200_DF_F3	0x14d3
++#define PCI_DEVICE_ID_AMD_VANGOGH_USB	0x163a
+ #define PCI_DEVICE_ID_AMD_CNB17H_F3	0x1703
+ #define PCI_DEVICE_ID_AMD_LANCE		0x2000
+ #define PCI_DEVICE_ID_AMD_LANCE_HOME	0x2001
+diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h
+index a427f13c757f4..85b86768c0b91 100644
+--- a/include/linux/power_supply.h
++++ b/include/linux/power_supply.h
+@@ -767,7 +767,7 @@ struct power_supply_battery_info {
+ 	int bti_resistance_tolerance;
+ };
+ 
+-extern struct atomic_notifier_head power_supply_notifier;
++extern struct blocking_notifier_head power_supply_notifier;
+ extern int power_supply_reg_notifier(struct notifier_block *nb);
+ extern void power_supply_unreg_notifier(struct notifier_block *nb);
+ #if IS_ENABLED(CONFIG_POWER_SUPPLY)
+diff --git a/include/sound/soc-dapm.h b/include/sound/soc-dapm.h
+index 87f8e1793af15..295d63437e4d8 100644
+--- a/include/sound/soc-dapm.h
++++ b/include/sound/soc-dapm.h
+@@ -423,6 +423,7 @@ void snd_soc_dapm_connect_dai_link_widgets(struct snd_soc_card *card);
+ 
+ int snd_soc_dapm_update_dai(struct snd_pcm_substream *substream,
+ 			    struct snd_pcm_hw_params *params, struct snd_soc_dai *dai);
++int snd_soc_dapm_widget_name_cmp(struct snd_soc_dapm_widget *widget, const char *s);
+ 
+ /* dapm path setup */
+ int snd_soc_dapm_new_widgets(struct snd_soc_card *card);
+diff --git a/include/sound/soc.h b/include/sound/soc.h
+index b27f84580c5b0..cf34810882347 100644
+--- a/include/sound/soc.h
++++ b/include/sound/soc.h
+@@ -1125,6 +1125,8 @@ struct snd_soc_pcm_runtime {
+ 	unsigned int pop_wait:1;
+ 	unsigned int fe_compr:1; /* for Dynamic PCM */
+ 
++	bool initialized;
++
+ 	int num_components;
+ 	struct snd_soc_component *components[]; /* CPU/Codec/Platform */
+ };
+diff --git a/io_uring/rw.c b/io_uring/rw.c
+index 1bce2208b65c4..d61620e080d10 100644
+--- a/io_uring/rw.c
++++ b/io_uring/rw.c
+@@ -332,7 +332,7 @@ static int kiocb_done(struct io_kiocb *req, ssize_t ret,
+ 	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
+ 	unsigned final_ret = io_fixup_rw_res(req, ret);
+ 
+-	if (req->flags & REQ_F_CUR_POS)
++	if (ret >= 0 && req->flags & REQ_F_CUR_POS)
+ 		req->file->f_pos = rw->kiocb.ki_pos;
+ 	if (ret >= 0 && (rw->kiocb.ki_complete == io_complete_rw)) {
+ 		if (!__io_complete_rw_common(req, ret)) {
+diff --git a/mm/mmap.c b/mm/mmap.c
+index 6d25c619911f1..58b4bbb7c3967 100644
+--- a/mm/mmap.c
++++ b/mm/mmap.c
+@@ -603,11 +603,12 @@ again:
+  * dup_anon_vma() - Helper function to duplicate anon_vma
+  * @dst: The destination VMA
+  * @src: The source VMA
++ * @dup: Pointer to the destination VMA when successful.
+  *
+  * Returns: 0 on success.
+  */
+ static inline int dup_anon_vma(struct vm_area_struct *dst,
+-			       struct vm_area_struct *src)
++		struct vm_area_struct *src, struct vm_area_struct **dup)
+ {
+ 	/*
+ 	 * Easily overlooked: when mprotect shifts the boundary, make sure the
+@@ -615,9 +616,15 @@ static inline int dup_anon_vma(struct vm_area_struct *dst,
+ 	 * anon pages imported.
+ 	 */
+ 	if (src->anon_vma && !dst->anon_vma) {
++		int ret;
++
+ 		vma_start_write(dst);
+ 		dst->anon_vma = src->anon_vma;
+-		return anon_vma_clone(dst, src);
++		ret = anon_vma_clone(dst, src);
++		if (ret)
++			return ret;
++
++		*dup = dst;
+ 	}
+ 
+ 	return 0;
+@@ -644,6 +651,7 @@ int vma_expand(struct vma_iterator *vmi, struct vm_area_struct *vma,
+ 	       unsigned long start, unsigned long end, pgoff_t pgoff,
+ 	       struct vm_area_struct *next)
+ {
++	struct vm_area_struct *anon_dup = NULL;
+ 	bool remove_next = false;
+ 	struct vma_prepare vp;
+ 
+@@ -651,7 +659,7 @@ int vma_expand(struct vma_iterator *vmi, struct vm_area_struct *vma,
+ 		int ret;
+ 
+ 		remove_next = true;
+-		ret = dup_anon_vma(vma, next);
++		ret = dup_anon_vma(vma, next, &anon_dup);
+ 		if (ret)
+ 			return ret;
+ 	}
+@@ -683,6 +691,8 @@ int vma_expand(struct vma_iterator *vmi, struct vm_area_struct *vma,
+ 	return 0;
+ 
+ nomem:
++	if (anon_dup)
++		unlink_anon_vmas(anon_dup);
+ 	return -ENOMEM;
+ }
+ 
+@@ -881,6 +891,7 @@ struct vm_area_struct *vma_merge(struct vma_iterator *vmi, struct mm_struct *mm,
+ {
+ 	struct vm_area_struct *curr, *next, *res;
+ 	struct vm_area_struct *vma, *adjust, *remove, *remove2;
++	struct vm_area_struct *anon_dup = NULL;
+ 	struct vma_prepare vp;
+ 	pgoff_t vma_pgoff;
+ 	int err = 0;
+@@ -945,16 +956,16 @@ struct vm_area_struct *vma_merge(struct vma_iterator *vmi, struct mm_struct *mm,
+ 	    is_mergeable_anon_vma(prev->anon_vma, next->anon_vma, NULL)) {
+ 		remove = next;				/* case 1 */
+ 		vma_end = next->vm_end;
+-		err = dup_anon_vma(prev, next);
++		err = dup_anon_vma(prev, next, &anon_dup);
+ 		if (curr) {				/* case 6 */
+ 			remove = curr;
+ 			remove2 = next;
+ 			if (!next->anon_vma)
+-				err = dup_anon_vma(prev, curr);
++				err = dup_anon_vma(prev, curr, &anon_dup);
+ 		}
+ 	} else if (merge_prev) {			/* case 2 */
+ 		if (curr) {
+-			err = dup_anon_vma(prev, curr);
++			err = dup_anon_vma(prev, curr, &anon_dup);
+ 			if (end == curr->vm_end) {	/* case 7 */
+ 				remove = curr;
+ 			} else {			/* case 5 */
+@@ -968,7 +979,7 @@ struct vm_area_struct *vma_merge(struct vma_iterator *vmi, struct mm_struct *mm,
+ 			vma_end = addr;
+ 			adjust = next;
+ 			adj_start = -(prev->vm_end - addr);
+-			err = dup_anon_vma(next, prev);
++			err = dup_anon_vma(next, prev, &anon_dup);
+ 		} else {
+ 			/*
+ 			 * Note that cases 3 and 8 are the ONLY ones where prev
+@@ -981,17 +992,17 @@ struct vm_area_struct *vma_merge(struct vma_iterator *vmi, struct mm_struct *mm,
+ 			if (curr) {			/* case 8 */
+ 				vma_pgoff = curr->vm_pgoff;
+ 				remove = curr;
+-				err = dup_anon_vma(next, curr);
++				err = dup_anon_vma(next, curr, &anon_dup);
+ 			}
+ 		}
+ 	}
+ 
+ 	/* Error in anon_vma clone. */
+ 	if (err)
+-		return NULL;
++		goto anon_vma_fail;
+ 
+ 	if (vma_iter_prealloc(vmi))
+-		return NULL;
++		goto prealloc_fail;
+ 
+ 	init_multi_vma_prep(&vp, vma, adjust, remove, remove2);
+ 	VM_WARN_ON(vp.anon_vma && adjust && adjust->anon_vma &&
+@@ -1024,6 +1035,15 @@ struct vm_area_struct *vma_merge(struct vma_iterator *vmi, struct mm_struct *mm,
+ 	khugepaged_enter_vma(res, vm_flags);
+ 
+ 	return res;
++
++prealloc_fail:
++	if (anon_dup)
++		unlink_anon_vmas(anon_dup);
++
++anon_vma_fail:
++	vma_iter_set(vmi, addr);
++	vma_iter_load(vmi);
++	return NULL;
+ }
+ 
+ /*
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
+index e43d9508e7a9c..6a05bed3cb46d 100644
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -7604,6 +7604,16 @@ nla_put_failure:
+ 	return -1;
+ }
+ 
++static void audit_log_obj_reset(const struct nft_table *table,
++				unsigned int base_seq, unsigned int nentries)
++{
++	char *buf = kasprintf(GFP_ATOMIC, "%s:%u", table->name, base_seq);
++
++	audit_log_nfcfg(buf, table->family, nentries,
++			AUDIT_NFT_OP_OBJ_RESET, GFP_ATOMIC);
++	kfree(buf);
++}
++
+ struct nft_obj_filter {
+ 	char		*table;
+ 	u32		type;
+@@ -7618,8 +7628,10 @@ static int nf_tables_dump_obj(struct sk_buff *skb, struct netlink_callback *cb)
+ 	struct net *net = sock_net(skb->sk);
+ 	int family = nfmsg->nfgen_family;
+ 	struct nftables_pernet *nft_net;
++	unsigned int entries = 0;
+ 	struct nft_object *obj;
+ 	bool reset = false;
++	int rc = 0;
+ 
+ 	if (NFNL_MSG_TYPE(cb->nlh->nlmsg_type) == NFT_MSG_GETOBJ_RESET)
+ 		reset = true;
+@@ -7632,6 +7644,7 @@ static int nf_tables_dump_obj(struct sk_buff *skb, struct netlink_callback *cb)
+ 		if (family != NFPROTO_UNSPEC && family != table->family)
+ 			continue;
+ 
++		entries = 0;
+ 		list_for_each_entry_rcu(obj, &table->objects, list) {
+ 			if (!nft_is_active(net, obj))
+ 				goto cont;
+@@ -7647,34 +7660,27 @@ static int nf_tables_dump_obj(struct sk_buff *skb, struct netlink_callback *cb)
+ 			    filter->type != NFT_OBJECT_UNSPEC &&
+ 			    obj->ops->type->type != filter->type)
+ 				goto cont;
+-			if (reset) {
+-				char *buf = kasprintf(GFP_ATOMIC,
+-						      "%s:%u",
+-						      table->name,
+-						      nft_net->base_seq);
+-
+-				audit_log_nfcfg(buf,
+-						family,
+-						obj->handle,
+-						AUDIT_NFT_OP_OBJ_RESET,
+-						GFP_ATOMIC);
+-				kfree(buf);
+-			}
+ 
+-			if (nf_tables_fill_obj_info(skb, net, NETLINK_CB(cb->skb).portid,
+-						    cb->nlh->nlmsg_seq,
+-						    NFT_MSG_NEWOBJ,
+-						    NLM_F_MULTI | NLM_F_APPEND,
+-						    table->family, table,
+-						    obj, reset) < 0)
+-				goto done;
++			rc = nf_tables_fill_obj_info(skb, net,
++						     NETLINK_CB(cb->skb).portid,
++						     cb->nlh->nlmsg_seq,
++						     NFT_MSG_NEWOBJ,
++						     NLM_F_MULTI | NLM_F_APPEND,
++						     table->family, table,
++						     obj, reset);
++			if (rc < 0)
++				break;
+ 
++			entries++;
+ 			nl_dump_check_consistent(cb, nlmsg_hdr(skb));
+ cont:
+ 			idx++;
+ 		}
++		if (reset && entries)
++			audit_log_obj_reset(table, nft_net->base_seq, entries);
++		if (rc < 0)
++			break;
+ 	}
+-done:
+ 	rcu_read_unlock();
+ 
+ 	cb->args[0] = idx;
+@@ -7779,7 +7785,7 @@ static int nf_tables_getobj(struct sk_buff *skb, const struct nfnl_info *info,
+ 
+ 		audit_log_nfcfg(buf,
+ 				family,
+-				obj->handle,
++				1,
+ 				AUDIT_NFT_OP_OBJ_RESET,
+ 				GFP_ATOMIC);
+ 		kfree(buf);
+diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
+index e57eb168ee130..984f6f106e4ac 100644
+--- a/net/netfilter/nfnetlink_log.c
++++ b/net/netfilter/nfnetlink_log.c
+@@ -700,8 +700,8 @@ nfulnl_log_packet(struct net *net,
+ 	unsigned int plen = 0;
+ 	struct nfnl_log_net *log = nfnl_log_pernet(net);
+ 	const struct nfnl_ct_hook *nfnl_ct = NULL;
++	enum ip_conntrack_info ctinfo = 0;
+ 	struct nf_conn *ct = NULL;
+-	enum ip_conntrack_info ctinfo;
+ 
+ 	if (li_user && li_user->type == NF_LOG_TYPE_ULOG)
+ 		li = li_user;
+diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
+index da4c179a4d418..6663e971a13e7 100644
+--- a/net/sched/cls_u32.c
++++ b/net/sched/cls_u32.c
+@@ -366,7 +366,7 @@ static int u32_init(struct tcf_proto *tp)
+ 	idr_init(&root_ht->handle_idr);
+ 
+ 	if (tp_c == NULL) {
+-		tp_c = kzalloc(struct_size(tp_c, hlist->ht, 1), GFP_KERNEL);
++		tp_c = kzalloc(sizeof(*tp_c), GFP_KERNEL);
+ 		if (tp_c == NULL) {
+ 			kfree(root_ht);
+ 			return -ENOBUFS;
+diff --git a/rust/kernel/types.rs b/rust/kernel/types.rs
+index d479f8da8f381..50cbd767ea9dd 100644
+--- a/rust/kernel/types.rs
++++ b/rust/kernel/types.rs
+@@ -6,7 +6,7 @@ use crate::init::{self, PinInit};
+ use alloc::boxed::Box;
+ use core::{
+     cell::UnsafeCell,
+-    marker::PhantomData,
++    marker::{PhantomData, PhantomPinned},
+     mem::MaybeUninit,
+     ops::{Deref, DerefMut},
+     ptr::NonNull,
+@@ -206,17 +206,26 @@ impl<T, F: FnOnce(T)> Drop for ScopeGuard<T, F> {
+ ///
+ /// This is meant to be used with FFI objects that are never interpreted by Rust code.
+ #[repr(transparent)]
+-pub struct Opaque<T>(MaybeUninit<UnsafeCell<T>>);
++pub struct Opaque<T> {
++    value: UnsafeCell<MaybeUninit<T>>,
++    _pin: PhantomPinned,
++}
+ 
+ impl<T> Opaque<T> {
+     /// Creates a new opaque value.
+     pub const fn new(value: T) -> Self {
+-        Self(MaybeUninit::new(UnsafeCell::new(value)))
++        Self {
++            value: UnsafeCell::new(MaybeUninit::new(value)),
++            _pin: PhantomPinned,
++        }
+     }
+ 
+     /// Creates an uninitialised value.
+     pub const fn uninit() -> Self {
+-        Self(MaybeUninit::uninit())
++        Self {
++            value: UnsafeCell::new(MaybeUninit::uninit()),
++            _pin: PhantomPinned,
++        }
+     }
+ 
+     /// Creates a pin-initializer from the given initializer closure.
+@@ -240,7 +249,7 @@ impl<T> Opaque<T> {
+ 
+     /// Returns a raw pointer to the opaque data.
+     pub fn get(&self) -> *mut T {
+-        UnsafeCell::raw_get(self.0.as_ptr())
++        UnsafeCell::get(&self.value).cast::<T>()
+     }
+ 
+     /// Gets the value behind `this`.
+@@ -248,7 +257,7 @@ impl<T> Opaque<T> {
+     /// This function is useful to get access to the value without creating intermediate
+     /// references.
+     pub const fn raw_get(this: *const Self) -> *mut T {
+-        UnsafeCell::raw_get(this.cast::<UnsafeCell<T>>())
++        UnsafeCell::raw_get(this.cast::<UnsafeCell<MaybeUninit<T>>>()).cast::<T>()
+     }
+ }
+ 
+diff --git a/sound/hda/intel-dsp-config.c b/sound/hda/intel-dsp-config.c
+index 2873420c9aca8..bc03b5692983c 100644
+--- a/sound/hda/intel-dsp-config.c
++++ b/sound/hda/intel-dsp-config.c
+@@ -343,6 +343,12 @@ static const struct config_entry config_table[] = {
+ 					DMI_MATCH(DMI_SYS_VENDOR, "Google"),
+ 				}
+ 			},
++			{
++				.ident = "Google firmware",
++				.matches = {
++					DMI_MATCH(DMI_BIOS_VERSION, "Google"),
++				}
++			},
+ 			{}
+ 		}
+ 	},
+diff --git a/sound/soc/codecs/da7219-aad.c b/sound/soc/codecs/da7219-aad.c
+index 581b334a6631d..3bbe850916493 100644
+--- a/sound/soc/codecs/da7219-aad.c
++++ b/sound/soc/codecs/da7219-aad.c
+@@ -59,9 +59,6 @@ static void da7219_aad_btn_det_work(struct work_struct *work)
+ 	bool micbias_up = false;
+ 	int retries = 0;
+ 
+-	/* Disable ground switch */
+-	snd_soc_component_update_bits(component, 0xFB, 0x01, 0x00);
+-
+ 	/* Drive headphones/lineout */
+ 	snd_soc_component_update_bits(component, DA7219_HP_L_CTRL,
+ 			    DA7219_HP_L_AMP_OE_MASK,
+@@ -155,9 +152,6 @@ static void da7219_aad_hptest_work(struct work_struct *work)
+ 		tonegen_freq_hptest = cpu_to_le16(DA7219_AAD_HPTEST_RAMP_FREQ_INT_OSC);
+ 	}
+ 
+-	/* Disable ground switch */
+-	snd_soc_component_update_bits(component, 0xFB, 0x01, 0x00);
+-
+ 	/* Ensure gain ramping at fastest rate */
+ 	gain_ramp_ctrl = snd_soc_component_read(component, DA7219_GAIN_RAMP_CTRL);
+ 	snd_soc_component_write(component, DA7219_GAIN_RAMP_CTRL, DA7219_GAIN_RAMP_RATE_X8);
+@@ -421,6 +415,11 @@ static irqreturn_t da7219_aad_irq_thread(int irq, void *data)
+ 			 * handle a removal, and we can check at the end of
+ 			 * hptest if we have a valid result or not.
+ 			 */
++
++			cancel_delayed_work_sync(&da7219_aad->jack_det_work);
++			/* Disable ground switch */
++			snd_soc_component_update_bits(component, 0xFB, 0x01, 0x00);
++
+ 			if (statusa & DA7219_JACK_TYPE_STS_MASK) {
+ 				report |= SND_JACK_HEADSET;
+ 				mask |=	SND_JACK_HEADSET | SND_JACK_LINEOUT;
+diff --git a/sound/soc/codecs/rt5645.c b/sound/soc/codecs/rt5645.c
+index a506d940a2ead..fae04e9fae4e7 100644
+--- a/sound/soc/codecs/rt5645.c
++++ b/sound/soc/codecs/rt5645.c
+@@ -3251,6 +3251,8 @@ int rt5645_set_jack_detect(struct snd_soc_component *component,
+ 				RT5645_GP1_PIN_IRQ, RT5645_GP1_PIN_IRQ);
+ 		regmap_update_bits(rt5645->regmap, RT5645_GEN_CTRL1,
+ 				RT5645_DIG_GATE_CTRL, RT5645_DIG_GATE_CTRL);
++		regmap_update_bits(rt5645->regmap, RT5645_DEPOP_M1,
++				RT5645_HP_CB_MASK, RT5645_HP_CB_PU);
+ 	}
+ 	rt5645_irq(0, rt5645);
+ 
+diff --git a/sound/soc/codecs/tas2780.c b/sound/soc/codecs/tas2780.c
+index 86bd6c18a9440..41076be238542 100644
+--- a/sound/soc/codecs/tas2780.c
++++ b/sound/soc/codecs/tas2780.c
+@@ -39,7 +39,7 @@ static void tas2780_reset(struct tas2780_priv *tas2780)
+ 		usleep_range(2000, 2050);
+ 	}
+ 
+-	snd_soc_component_write(tas2780->component, TAS2780_SW_RST,
++	ret = snd_soc_component_write(tas2780->component, TAS2780_SW_RST,
+ 				TAS2780_RST);
+ 	if (ret)
+ 		dev_err(tas2780->dev, "%s:errCode:0x%x Reset error!\n",
+diff --git a/sound/soc/codecs/tlv320adc3xxx.c b/sound/soc/codecs/tlv320adc3xxx.c
+index b976c1946286a..420bbf588efea 100644
+--- a/sound/soc/codecs/tlv320adc3xxx.c
++++ b/sound/soc/codecs/tlv320adc3xxx.c
+@@ -293,7 +293,7 @@
+ #define ADC3XXX_BYPASS_RPGA		0x80
+ 
+ /* MICBIAS control bits */
+-#define ADC3XXX_MICBIAS_MASK		0x2
++#define ADC3XXX_MICBIAS_MASK		0x3
+ #define ADC3XXX_MICBIAS1_SHIFT		5
+ #define ADC3XXX_MICBIAS2_SHIFT		3
+ 
+@@ -1099,7 +1099,7 @@ static int adc3xxx_parse_dt_micbias(struct adc3xxx *adc3xxx,
+ 	unsigned int val;
+ 
+ 	if (!of_property_read_u32(np, propname, &val)) {
+-		if (val >= ADC3XXX_MICBIAS_AVDD) {
++		if (val > ADC3XXX_MICBIAS_AVDD) {
+ 			dev_err(dev, "Invalid property value for '%s'\n", propname);
+ 			return -EINVAL;
+ 		}
+diff --git a/sound/soc/fsl/fsl-asoc-card.c b/sound/soc/fsl/fsl-asoc-card.c
+index 76b5bfc288fde..bab7d34cf585b 100644
+--- a/sound/soc/fsl/fsl-asoc-card.c
++++ b/sound/soc/fsl/fsl-asoc-card.c
+@@ -52,8 +52,8 @@ struct codec_priv {
+ 	unsigned long mclk_freq;
+ 	unsigned long free_freq;
+ 	u32 mclk_id;
+-	u32 fll_id;
+-	u32 pll_id;
++	int fll_id;
++	int pll_id;
+ };
+ 
+ /**
+@@ -206,7 +206,7 @@ static int fsl_asoc_card_hw_params(struct snd_pcm_substream *substream,
+ 	}
+ 
+ 	/* Specific configuration for PLL */
+-	if (codec_priv->pll_id && codec_priv->fll_id) {
++	if (codec_priv->pll_id >= 0 && codec_priv->fll_id >= 0) {
+ 		if (priv->sample_format == SNDRV_PCM_FORMAT_S24_LE)
+ 			pll_out = priv->sample_rate * 384;
+ 		else
+@@ -248,7 +248,7 @@ static int fsl_asoc_card_hw_free(struct snd_pcm_substream *substream)
+ 
+ 	priv->streams &= ~BIT(substream->stream);
+ 
+-	if (!priv->streams && codec_priv->pll_id && codec_priv->fll_id) {
++	if (!priv->streams && codec_priv->pll_id >= 0 && codec_priv->fll_id >= 0) {
+ 		/* Force freq to be free_freq to avoid error message in codec */
+ 		ret = snd_soc_dai_set_sysclk(asoc_rtd_to_codec(rtd, 0),
+ 					     codec_priv->mclk_id,
+@@ -621,6 +621,10 @@ static int fsl_asoc_card_probe(struct platform_device *pdev)
+ 	priv->card.dapm_routes = audio_map;
+ 	priv->card.num_dapm_routes = ARRAY_SIZE(audio_map);
+ 	priv->card.driver_name = DRIVER_NAME;
++
++	priv->codec_priv.fll_id = -1;
++	priv->codec_priv.pll_id = -1;
++
+ 	/* Diversify the card configurations */
+ 	if (of_device_is_compatible(np, "fsl,imx-audio-cs42888")) {
+ 		codec_dai_name = "cs42888";
+diff --git a/sound/soc/generic/simple-card.c b/sound/soc/generic/simple-card.c
+index 0745bf6a09aa0..5005d3c9c659a 100644
+--- a/sound/soc/generic/simple-card.c
++++ b/sound/soc/generic/simple-card.c
+@@ -701,10 +701,12 @@ static int asoc_simple_probe(struct platform_device *pdev)
+ 		struct snd_soc_dai_link *dai_link = priv->dai_link;
+ 		struct simple_dai_props *dai_props = priv->dai_props;
+ 
++		ret = -EINVAL;
++
+ 		cinfo = dev->platform_data;
+ 		if (!cinfo) {
+ 			dev_err(dev, "no info for asoc-simple-card\n");
+-			return -EINVAL;
++			goto err;
+ 		}
+ 
+ 		if (!cinfo->name ||
+@@ -713,7 +715,7 @@ static int asoc_simple_probe(struct platform_device *pdev)
+ 		    !cinfo->platform ||
+ 		    !cinfo->cpu_dai.name) {
+ 			dev_err(dev, "insufficient asoc_simple_card_info settings\n");
+-			return -EINVAL;
++			goto err;
+ 		}
+ 
+ 		cpus			= dai_link->cpus;
+diff --git a/sound/soc/intel/boards/sof_sdw.c b/sound/soc/intel/boards/sof_sdw.c
+index 93544eac23ad8..a23a7a1a35815 100644
+--- a/sound/soc/intel/boards/sof_sdw.c
++++ b/sound/soc/intel/boards/sof_sdw.c
+@@ -366,6 +366,16 @@ static const struct dmi_system_id sof_sdw_quirk_table[] = {
+ 		/* No Jack */
+ 		.driver_data = (void *)SOF_SDW_TGL_HDMI,
+ 	},
++	{
++		.callback = sof_sdw_quirk_cb,
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc"),
++			DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "0B14"),
++		},
++		/* No Jack */
++		.driver_data = (void *)SOF_SDW_TGL_HDMI,
++	},
++
+ 	{
+ 		.callback = sof_sdw_quirk_cb,
+ 		.matches = {
+diff --git a/sound/soc/soc-component.c b/sound/soc/soc-component.c
+index 4356cc320fea0..10b5fe5a3af85 100644
+--- a/sound/soc/soc-component.c
++++ b/sound/soc/soc-component.c
+@@ -242,6 +242,7 @@ int snd_soc_component_notify_control(struct snd_soc_component *component,
+ 	char name[SNDRV_CTL_ELEM_ID_NAME_MAXLEN];
+ 	struct snd_kcontrol *kctl;
+ 
++	/* When updating, change also snd_soc_dapm_widget_name_cmp() */
+ 	if (component->name_prefix)
+ 		snprintf(name, ARRAY_SIZE(name), "%s %s", component->name_prefix, ctl);
+ 	else
+diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
+index 1a0bde23f5e6f..2d85164457f73 100644
+--- a/sound/soc/soc-core.c
++++ b/sound/soc/soc-core.c
+@@ -1259,7 +1259,7 @@ static int soc_init_pcm_runtime(struct snd_soc_card *card,
+ 	snd_soc_runtime_get_dai_fmt(rtd);
+ 	ret = snd_soc_runtime_set_dai_fmt(rtd, dai_link->dai_fmt);
+ 	if (ret)
+-		return ret;
++		goto err;
+ 
+ 	/* add DPCM sysfs entries */
+ 	soc_dpcm_debugfs_add(rtd);
+@@ -1284,17 +1284,26 @@ static int soc_init_pcm_runtime(struct snd_soc_card *card,
+ 	/* create compress_device if possible */
+ 	ret = snd_soc_dai_compress_new(cpu_dai, rtd, num);
+ 	if (ret != -ENOTSUPP)
+-		return ret;
++		goto err;
+ 
+ 	/* create the pcm */
+ 	ret = soc_new_pcm(rtd, num);
+ 	if (ret < 0) {
+ 		dev_err(card->dev, "ASoC: can't create pcm %s :%d\n",
+ 			dai_link->stream_name, ret);
+-		return ret;
++		goto err;
+ 	}
+ 
+-	return snd_soc_pcm_dai_new(rtd);
++	ret = snd_soc_pcm_dai_new(rtd);
++	if (ret < 0)
++		goto err;
++
++	rtd->initialized = true;
++
++	return 0;
++err:
++	snd_soc_link_exit(rtd);
++	return ret;
+ }
+ 
+ static void soc_set_name_prefix(struct snd_soc_card *card,
+@@ -1892,7 +1901,8 @@ static void soc_cleanup_card_resources(struct snd_soc_card *card)
+ 
+ 	/* release machine specific resources */
+ 	for_each_card_rtds(card, rtd)
+-		snd_soc_link_exit(rtd);
++		if (rtd->initialized)
++			snd_soc_link_exit(rtd);
+ 	/* remove and free each DAI */
+ 	soc_remove_link_dais(card);
+ 	soc_remove_link_components(card);
+diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
+index 3091e8160bad7..5fd32185fe63d 100644
+--- a/sound/soc/soc-dapm.c
++++ b/sound/soc/soc-dapm.c
+@@ -2726,6 +2726,18 @@ int snd_soc_dapm_update_dai(struct snd_pcm_substream *substream,
+ }
+ EXPORT_SYMBOL_GPL(snd_soc_dapm_update_dai);
+ 
++int snd_soc_dapm_widget_name_cmp(struct snd_soc_dapm_widget *widget, const char *s)
++{
++	struct snd_soc_component *component = snd_soc_dapm_to_component(widget->dapm);
++	const char *wname = widget->name;
++
++	if (component->name_prefix)
++		wname += strlen(component->name_prefix) + 1; /* plus space */
++
++	return strcmp(wname, s);
++}
++EXPORT_SYMBOL_GPL(snd_soc_dapm_widget_name_cmp);
++
+ /*
+  * dapm_update_widget_flags() - Re-compute widget sink and source flags
+  * @w: The widget for which to update the flags
+diff --git a/sound/soc/sof/sof-pci-dev.c b/sound/soc/sof/sof-pci-dev.c
+index f5ece43d0ec24..f42c85df88a80 100644
+--- a/sound/soc/sof/sof-pci-dev.c
++++ b/sound/soc/sof/sof-pci-dev.c
+@@ -145,6 +145,13 @@ static const struct dmi_system_id community_key_platforms[] = {
+ 			DMI_MATCH(DMI_PRODUCT_FAMILY, "Google"),
+ 		}
+ 	},
++	{
++		.ident = "Google firmware",
++		.callback = chromebook_use_community_key,
++		.matches = {
++			DMI_MATCH(DMI_BIOS_VERSION, "Google"),
++		}
++	},
+ 	{},
+ };
+ 
+diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
+index 4e64842245e19..ab2b938502ebe 100644
+--- a/sound/usb/quirks.c
++++ b/sound/usb/quirks.c
+@@ -2220,6 +2220,8 @@ static const struct usb_audio_quirk_flags_table quirk_flags_table[] = {
+ 		   QUIRK_FLAG_DSD_RAW),
+ 	VENDOR_FLG(0x2ab6, /* T+A devices */
+ 		   QUIRK_FLAG_DSD_RAW),
++	VENDOR_FLG(0x2afd, /* McIntosh Laboratory, Inc. */
++		   QUIRK_FLAG_DSD_RAW),
+ 	VENDOR_FLG(0x2d87, /* Cayin device */
+ 		   QUIRK_FLAG_DSD_RAW),
+ 	VENDOR_FLG(0x3336, /* HEM devices */
+diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
+index 7ef43f72098e0..c779b9f2e6220 100644
+--- a/tools/perf/util/evlist.c
++++ b/tools/perf/util/evlist.c
+@@ -251,6 +251,9 @@ static struct evsel *evlist__dummy_event(struct evlist *evlist)
+ 		.type	= PERF_TYPE_SOFTWARE,
+ 		.config = PERF_COUNT_SW_DUMMY,
+ 		.size	= sizeof(attr), /* to capture ABI version */
++		/* Avoid frequency mode for dummy events to avoid associated timers. */
++		.freq = 0,
++		.sample_period = 1,
+ 	};
+ 
+ 	return evsel__new_idx(&attr, evlist->core.nr_entries);
+@@ -277,8 +280,6 @@ struct evsel *evlist__add_aux_dummy(struct evlist *evlist, bool system_wide)
+ 	evsel->core.attr.exclude_kernel = 1;
+ 	evsel->core.attr.exclude_guest = 1;
+ 	evsel->core.attr.exclude_hv = 1;
+-	evsel->core.attr.freq = 0;
+-	evsel->core.attr.sample_period = 1;
+ 	evsel->core.system_wide = system_wide;
+ 	evsel->no_aux_samples = true;
+ 	evsel->name = strdup("dummy:u");
+diff --git a/tools/testing/selftests/netfilter/nft_audit.sh b/tools/testing/selftests/netfilter/nft_audit.sh
+index 5267c88496d51..99ed5bd6e8402 100755
+--- a/tools/testing/selftests/netfilter/nft_audit.sh
++++ b/tools/testing/selftests/netfilter/nft_audit.sh
+@@ -99,6 +99,12 @@ do_test 'nft add counter t1 c1' \
+ do_test 'nft add counter t2 c1; add counter t2 c2' \
+ 'table=t2 family=2 entries=2 op=nft_register_obj'
+ 
++for ((i = 3; i <= 500; i++)); do
++	echo "add counter t2 c$i"
++done >$rulefile
++do_test "nft -f $rulefile" \
++'table=t2 family=2 entries=498 op=nft_register_obj'
++
+ # adding/updating quotas
+ 
+ do_test 'nft add quota t1 q1 { 10 bytes }' \
+@@ -107,6 +113,12 @@ do_test 'nft add quota t1 q1 { 10 bytes }' \
+ do_test 'nft add quota t2 q1 { 10 bytes }; add quota t2 q2 { 10 bytes }' \
+ 'table=t2 family=2 entries=2 op=nft_register_obj'
+ 
++for ((i = 3; i <= 500; i++)); do
++	echo "add quota t2 q$i { 10 bytes }"
++done >$rulefile
++do_test "nft -f $rulefile" \
++'table=t2 family=2 entries=498 op=nft_register_obj'
++
+ # changing the quota value triggers obj update path
+ do_test 'nft add quota t1 q1 { 20 bytes }' \
+ 'table=t1 family=2 entries=1 op=nft_register_obj'
+@@ -156,6 +168,40 @@ done
+ do_test 'nft reset set t1 s' \
+ 'table=t1 family=2 entries=3 op=nft_reset_setelem'
+ 
++# resetting counters
++
++do_test 'nft reset counter t1 c1' \
++'table=t1 family=2 entries=1 op=nft_reset_obj'
++
++do_test 'nft reset counters t1' \
++'table=t1 family=2 entries=1 op=nft_reset_obj'
++
++do_test 'nft reset counters t2' \
++'table=t2 family=2 entries=342 op=nft_reset_obj
++table=t2 family=2 entries=158 op=nft_reset_obj'
++
++do_test 'nft reset counters' \
++'table=t1 family=2 entries=1 op=nft_reset_obj
++table=t2 family=2 entries=341 op=nft_reset_obj
++table=t2 family=2 entries=159 op=nft_reset_obj'
++
++# resetting quotas
++
++do_test 'nft reset quota t1 q1' \
++'table=t1 family=2 entries=1 op=nft_reset_obj'
++
++do_test 'nft reset quotas t1' \
++'table=t1 family=2 entries=1 op=nft_reset_obj'
++
++do_test 'nft reset quotas t2' \
++'table=t2 family=2 entries=315 op=nft_reset_obj
++table=t2 family=2 entries=185 op=nft_reset_obj'
++
++do_test 'nft reset quotas' \
++'table=t1 family=2 entries=1 op=nft_reset_obj
++table=t2 family=2 entries=314 op=nft_reset_obj
++table=t2 family=2 entries=186 op=nft_reset_obj'
++
+ # deleting rules
+ 
+ readarray -t handles < <(nft -a list chain t1 c1 | \


^ permalink raw reply related	[flat|nested] 24+ messages in thread
* [gentoo-commits] proj/linux-patches:6.5 commit in: /
@ 2023-11-02 11:09 Mike Pagano
  0 siblings, 0 replies; 24+ messages in thread
From: Mike Pagano @ 2023-11-02 11:09 UTC (permalink / raw
  To: gentoo-commits

commit:     d3514ca1e8933510460de0862647c794cb1ce9c5
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Thu Nov  2 11:09:05 2023 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Thu Nov  2 11:09:05 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=d3514ca1

Linux patch 6.5.10

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README             |    4 +
 1009_linux-6.5.10.patch | 4421 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 4425 insertions(+)

diff --git a/0000_README b/0000_README
index 63f98435..5af72671 100644
--- a/0000_README
+++ b/0000_README
@@ -79,6 +79,10 @@ Patch:  1008_linux-6.5.9.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.5.9
 
+Patch:  1009_linux-6.5.10.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.5.10
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1009_linux-6.5.10.patch b/1009_linux-6.5.10.patch
new file mode 100644
index 00000000..03dbc9cd
--- /dev/null
+++ b/1009_linux-6.5.10.patch
@@ -0,0 +1,4421 @@
+diff --git a/Makefile b/Makefile
+index fc83c079c4716..ab9f291c1d3f7 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 5
+-SUBLEVEL = 9
++SUBLEVEL = 10
+ EXTRAVERSION =
+ NAME = Hurr durr I'ma ninja sloth
+ 
+diff --git a/arch/arm/boot/dts/rockchip/rk3128.dtsi b/arch/arm/boot/dts/rockchip/rk3128.dtsi
+index b63bd4ad3143c..88a4b0d6d928d 100644
+--- a/arch/arm/boot/dts/rockchip/rk3128.dtsi
++++ b/arch/arm/boot/dts/rockchip/rk3128.dtsi
+@@ -64,7 +64,8 @@
+ 		compatible = "arm,armv7-timer";
+ 		interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_HIGH)>,
+ 			     <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_HIGH)>,
+-			     <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_HIGH)>;
++			     <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_HIGH)>,
++			     <GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_HIGH)>;
+ 		arm,cpu-registers-not-fw-configured;
+ 		clock-frequency = <24000000>;
+ 	};
+@@ -233,7 +234,7 @@
+ 		compatible = "rockchip,rk3128-timer", "rockchip,rk3288-timer";
+ 		reg = <0x20044000 0x20>;
+ 		interrupts = <GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>;
+-		clocks = <&cru PCLK_TIMER>, <&xin24m>;
++		clocks = <&cru PCLK_TIMER>, <&cru SCLK_TIMER0>;
+ 		clock-names = "pclk", "timer";
+ 	};
+ 
+@@ -241,7 +242,7 @@
+ 		compatible = "rockchip,rk3128-timer", "rockchip,rk3288-timer";
+ 		reg = <0x20044020 0x20>;
+ 		interrupts = <GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>;
+-		clocks = <&cru PCLK_TIMER>, <&xin24m>;
++		clocks = <&cru PCLK_TIMER>, <&cru SCLK_TIMER1>;
+ 		clock-names = "pclk", "timer";
+ 	};
+ 
+@@ -249,7 +250,7 @@
+ 		compatible = "rockchip,rk3128-timer", "rockchip,rk3288-timer";
+ 		reg = <0x20044040 0x20>;
+ 		interrupts = <GIC_SPI 59 IRQ_TYPE_LEVEL_HIGH>;
+-		clocks = <&cru PCLK_TIMER>, <&xin24m>;
++		clocks = <&cru PCLK_TIMER>, <&cru SCLK_TIMER2>;
+ 		clock-names = "pclk", "timer";
+ 	};
+ 
+@@ -257,7 +258,7 @@
+ 		compatible = "rockchip,rk3128-timer", "rockchip,rk3288-timer";
+ 		reg = <0x20044060 0x20>;
+ 		interrupts = <GIC_SPI 60 IRQ_TYPE_LEVEL_HIGH>;
+-		clocks = <&cru PCLK_TIMER>, <&xin24m>;
++		clocks = <&cru PCLK_TIMER>, <&cru SCLK_TIMER3>;
+ 		clock-names = "pclk", "timer";
+ 	};
+ 
+@@ -265,7 +266,7 @@
+ 		compatible = "rockchip,rk3128-timer", "rockchip,rk3288-timer";
+ 		reg = <0x20044080 0x20>;
+ 		interrupts = <GIC_SPI 61 IRQ_TYPE_LEVEL_HIGH>;
+-		clocks = <&cru PCLK_TIMER>, <&xin24m>;
++		clocks = <&cru PCLK_TIMER>, <&cru SCLK_TIMER4>;
+ 		clock-names = "pclk", "timer";
+ 	};
+ 
+@@ -273,7 +274,7 @@
+ 		compatible = "rockchip,rk3128-timer", "rockchip,rk3288-timer";
+ 		reg = <0x200440a0 0x20>;
+ 		interrupts = <GIC_SPI 62 IRQ_TYPE_LEVEL_HIGH>;
+-		clocks = <&cru PCLK_TIMER>, <&xin24m>;
++		clocks = <&cru PCLK_TIMER>, <&cru SCLK_TIMER5>;
+ 		clock-names = "pclk", "timer";
+ 	};
+ 
+@@ -426,7 +427,7 @@
+ 
+ 	i2c0: i2c@20072000 {
+ 		compatible = "rockchip,rk3128-i2c", "rockchip,rk3288-i2c";
+-		reg = <20072000 0x1000>;
++		reg = <0x20072000 0x1000>;
+ 		interrupts = <GIC_SPI 24 IRQ_TYPE_LEVEL_HIGH>;
+ 		clock-names = "i2c";
+ 		clocks = <&cru PCLK_I2C0>;
+@@ -458,6 +459,7 @@
+ 		interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>,
+ 			     <GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>;
+ 		arm,pl330-broken-no-flushp;
++		arm,pl330-periph-burst;
+ 		clocks = <&cru ACLK_DMAC>;
+ 		clock-names = "apb_pclk";
+ 		#dma-cells = <1>;
+diff --git a/arch/arm/boot/dts/ti/omap/omap4-l4-abe.dtsi b/arch/arm/boot/dts/ti/omap/omap4-l4-abe.dtsi
+index 7ae8b620515c5..59f546a278f87 100644
+--- a/arch/arm/boot/dts/ti/omap/omap4-l4-abe.dtsi
++++ b/arch/arm/boot/dts/ti/omap/omap4-l4-abe.dtsi
+@@ -109,6 +109,8 @@
+ 				reg = <0x0 0xff>, /* MPU private access */
+ 				      <0x49022000 0xff>; /* L3 Interconnect */
+ 				reg-names = "mpu", "dma";
++				clocks = <&abe_clkctrl OMAP4_MCBSP1_CLKCTRL 24>;
++				clock-names = "fck";
+ 				interrupts = <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>;
+ 				interrupt-names = "common";
+ 				ti,buffer-size = <128>;
+@@ -142,6 +144,8 @@
+ 				reg = <0x0 0xff>, /* MPU private access */
+ 				      <0x49024000 0xff>; /* L3 Interconnect */
+ 				reg-names = "mpu", "dma";
++				clocks = <&abe_clkctrl OMAP4_MCBSP2_CLKCTRL 24>;
++				clock-names = "fck";
+ 				interrupts = <GIC_SPI 22 IRQ_TYPE_LEVEL_HIGH>;
+ 				interrupt-names = "common";
+ 				ti,buffer-size = <128>;
+@@ -175,6 +179,8 @@
+ 				reg = <0x0 0xff>, /* MPU private access */
+ 				      <0x49026000 0xff>; /* L3 Interconnect */
+ 				reg-names = "mpu", "dma";
++				clocks = <&abe_clkctrl OMAP4_MCBSP3_CLKCTRL 24>;
++				clock-names = "fck";
+ 				interrupts = <GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH>;
+ 				interrupt-names = "common";
+ 				ti,buffer-size = <128>;
+diff --git a/arch/arm/boot/dts/ti/omap/omap4-l4.dtsi b/arch/arm/boot/dts/ti/omap/omap4-l4.dtsi
+index 46b8f9efd4131..3fcef3080eaec 100644
+--- a/arch/arm/boot/dts/ti/omap/omap4-l4.dtsi
++++ b/arch/arm/boot/dts/ti/omap/omap4-l4.dtsi
+@@ -2043,6 +2043,8 @@
+ 				compatible = "ti,omap4-mcbsp";
+ 				reg = <0x0 0xff>; /* L4 Interconnect */
+ 				reg-names = "mpu";
++				clocks = <&l4_per_clkctrl OMAP4_MCBSP4_CLKCTRL 24>;
++				clock-names = "fck";
+ 				interrupts = <GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>;
+ 				interrupt-names = "common";
+ 				ti,buffer-size = <128>;
+diff --git a/arch/arm/boot/dts/ti/omap/omap5-l4-abe.dtsi b/arch/arm/boot/dts/ti/omap/omap5-l4-abe.dtsi
+index a03bca5a35844..97b0c3b5f573f 100644
+--- a/arch/arm/boot/dts/ti/omap/omap5-l4-abe.dtsi
++++ b/arch/arm/boot/dts/ti/omap/omap5-l4-abe.dtsi
+@@ -109,6 +109,8 @@
+ 				reg = <0x0 0xff>, /* MPU private access */
+ 				      <0x49022000 0xff>; /* L3 Interconnect */
+ 				reg-names = "mpu", "dma";
++				clocks = <&abe_clkctrl OMAP5_MCBSP1_CLKCTRL 24>;
++				clock-names = "fck";
+ 				interrupts = <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>;
+ 				interrupt-names = "common";
+ 				ti,buffer-size = <128>;
+@@ -142,6 +144,8 @@
+ 				reg = <0x0 0xff>, /* MPU private access */
+ 				      <0x49024000 0xff>; /* L3 Interconnect */
+ 				reg-names = "mpu", "dma";
++				clocks = <&abe_clkctrl OMAP5_MCBSP2_CLKCTRL 24>;
++				clock-names = "fck";
+ 				interrupts = <GIC_SPI 22 IRQ_TYPE_LEVEL_HIGH>;
+ 				interrupt-names = "common";
+ 				ti,buffer-size = <128>;
+@@ -175,6 +179,8 @@
+ 				reg = <0x0 0xff>, /* MPU private access */
+ 				      <0x49026000 0xff>; /* L3 Interconnect */
+ 				reg-names = "mpu", "dma";
++				clocks = <&abe_clkctrl OMAP5_MCBSP3_CLKCTRL 24>;
++				clock-names = "fck";
+ 				interrupts = <GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH>;
+ 				interrupt-names = "common";
+ 				ti,buffer-size = <128>;
+diff --git a/arch/arm/mach-omap1/board-ams-delta.c b/arch/arm/mach-omap1/board-ams-delta.c
+index 9808cd27e2cf9..67de96c7717db 100644
+--- a/arch/arm/mach-omap1/board-ams-delta.c
++++ b/arch/arm/mach-omap1/board-ams-delta.c
+@@ -550,6 +550,7 @@ static struct platform_device *ams_delta_devices[] __initdata = {
+ 	&ams_delta_nand_device,
+ 	&ams_delta_lcd_device,
+ 	&cx20442_codec_device,
++	&modem_nreset_device,
+ };
+ 
+ static struct gpiod_lookup_table *ams_delta_gpio_tables[] __initdata = {
+@@ -782,26 +783,28 @@ static struct plat_serial8250_port ams_delta_modem_ports[] = {
+ 	{ },
+ };
+ 
++static int ams_delta_modem_pm_activate(struct device *dev)
++{
++	modem_priv.regulator = regulator_get(dev, "RESET#");
++	if (IS_ERR(modem_priv.regulator))
++		return -EPROBE_DEFER;
++
++	return 0;
++}
++
++static struct dev_pm_domain ams_delta_modem_pm_domain = {
++	.activate	= ams_delta_modem_pm_activate,
++};
++
+ static struct platform_device ams_delta_modem_device = {
+ 	.name	= "serial8250",
+ 	.id	= PLAT8250_DEV_PLATFORM1,
+ 	.dev		= {
+ 		.platform_data = ams_delta_modem_ports,
++		.pm_domain = &ams_delta_modem_pm_domain,
+ 	},
+ };
+ 
+-static int __init modem_nreset_init(void)
+-{
+-	int err;
+-
+-	err = platform_device_register(&modem_nreset_device);
+-	if (err)
+-		pr_err("Couldn't register the modem regulator device\n");
+-
+-	return err;
+-}
+-
+-
+ /*
+  * This function expects MODEM IRQ number already assigned to the port.
+  * The MODEM device requires its RESET# pin kept high during probe.
+@@ -833,37 +836,6 @@ static int __init ams_delta_modem_init(void)
+ }
+ arch_initcall_sync(ams_delta_modem_init);
+ 
+-static int __init late_init(void)
+-{
+-	int err;
+-
+-	err = modem_nreset_init();
+-	if (err)
+-		return err;
+-
+-	/*
+-	 * Once the modem device is registered, the modem_nreset
+-	 * regulator can be requested on behalf of that device.
+-	 */
+-	modem_priv.regulator = regulator_get(&ams_delta_modem_device.dev,
+-			"RESET#");
+-	if (IS_ERR(modem_priv.regulator)) {
+-		err = PTR_ERR(modem_priv.regulator);
+-		goto unregister;
+-	}
+-	return 0;
+-
+-unregister:
+-	platform_device_unregister(&ams_delta_modem_device);
+-	return err;
+-}
+-
+-static void __init ams_delta_init_late(void)
+-{
+-	omap1_init_late();
+-	late_init();
+-}
+-
+ static void __init ams_delta_map_io(void)
+ {
+ 	omap1_map_io();
+@@ -877,7 +849,7 @@ MACHINE_START(AMS_DELTA, "Amstrad E3 (Delta)")
+ 	.init_early	= omap1_init_early,
+ 	.init_irq	= omap1_init_irq,
+ 	.init_machine	= ams_delta_init,
+-	.init_late	= ams_delta_init_late,
++	.init_late	= omap1_init_late,
+ 	.init_time	= omap1_timer_init,
+ 	.restart	= omap1_restart,
+ MACHINE_END
+diff --git a/arch/arm/mach-omap1/timer32k.c b/arch/arm/mach-omap1/timer32k.c
+index 410d17d1d4431..f618a6df29382 100644
+--- a/arch/arm/mach-omap1/timer32k.c
++++ b/arch/arm/mach-omap1/timer32k.c
+@@ -176,17 +176,18 @@ static u64 notrace omap_32k_read_sched_clock(void)
+ 	return sync32k_cnt_reg ? readl_relaxed(sync32k_cnt_reg) : 0;
+ }
+ 
++static struct timespec64 persistent_ts;
++static cycles_t cycles;
++static unsigned int persistent_mult, persistent_shift;
++
+ /**
+  * omap_read_persistent_clock64 -  Return time from a persistent clock.
++ * @ts: &struct timespec64 for the returned time
+  *
+  * Reads the time from a source which isn't disabled during PM, the
+  * 32k sync timer.  Convert the cycles elapsed since last read into
+  * nsecs and adds to a monotonically increasing timespec64.
+  */
+-static struct timespec64 persistent_ts;
+-static cycles_t cycles;
+-static unsigned int persistent_mult, persistent_shift;
+-
+ static void omap_read_persistent_clock64(struct timespec64 *ts)
+ {
+ 	unsigned long long nsecs;
+@@ -206,10 +207,9 @@ static void omap_read_persistent_clock64(struct timespec64 *ts)
+ /**
+  * omap_init_clocksource_32k - setup and register counter 32k as a
+  * kernel clocksource
+- * @pbase: base addr of counter_32k module
+- * @size: size of counter_32k to map
++ * @vbase: base addr of counter_32k module
+  *
+- * Returns 0 upon success or negative error code upon failure.
++ * Returns: %0 upon success or negative error code upon failure.
+  *
+  */
+ static int __init omap_init_clocksource_32k(void __iomem *vbase)
+diff --git a/arch/arm64/boot/dts/qcom/apq8096-db820c.dts b/arch/arm64/boot/dts/qcom/apq8096-db820c.dts
+index b599909c44639..8b5a2c7381379 100644
+--- a/arch/arm64/boot/dts/qcom/apq8096-db820c.dts
++++ b/arch/arm64/boot/dts/qcom/apq8096-db820c.dts
+@@ -62,25 +62,23 @@
+ 		stdout-path = "serial0:115200n8";
+ 	};
+ 
+-	clocks {
+-		divclk4: divclk4 {
+-			compatible = "fixed-clock";
+-			#clock-cells = <0>;
+-			clock-frequency = <32768>;
+-			clock-output-names = "divclk4";
++	div1_mclk: divclk1 {
++		compatible = "gpio-gate-clock";
++		pinctrl-0 = <&audio_mclk>;
++		pinctrl-names = "default";
++		clocks = <&rpmcc RPM_SMD_DIV_CLK1>;
++		#clock-cells = <0>;
++		enable-gpios = <&pm8994_gpios 15 0>;
++	};
+ 
+-			pinctrl-names = "default";
+-			pinctrl-0 = <&divclk4_pin_a>;
+-		};
++	divclk4: divclk4 {
++		compatible = "fixed-clock";
++		#clock-cells = <0>;
++		clock-frequency = <32768>;
++		clock-output-names = "divclk4";
+ 
+-		div1_mclk: divclk1 {
+-			compatible = "gpio-gate-clock";
+-			pinctrl-0 = <&audio_mclk>;
+-			pinctrl-names = "default";
+-			clocks = <&rpmcc RPM_SMD_DIV_CLK1>;
+-			#clock-cells = <0>;
+-			enable-gpios = <&pm8994_gpios 15 0>;
+-		};
++		pinctrl-names = "default";
++		pinctrl-0 = <&divclk4_pin_a>;
+ 	};
+ 
+ 	gpio-keys {
+diff --git a/arch/arm64/boot/dts/qcom/msm8996-xiaomi-common.dtsi b/arch/arm64/boot/dts/qcom/msm8996-xiaomi-common.dtsi
+index 47f55c7311e92..3c5719640fabf 100644
+--- a/arch/arm64/boot/dts/qcom/msm8996-xiaomi-common.dtsi
++++ b/arch/arm64/boot/dts/qcom/msm8996-xiaomi-common.dtsi
+@@ -11,26 +11,24 @@
+ #include <dt-bindings/pinctrl/qcom,pmic-gpio.h>
+ 
+ / {
+-	clocks {
+-		divclk1_cdc: divclk1 {
+-			compatible = "gpio-gate-clock";
+-			clocks = <&rpmcc RPM_SMD_DIV_CLK1>;
+-			#clock-cells = <0>;
+-			enable-gpios = <&pm8994_gpios 15 GPIO_ACTIVE_HIGH>;
++	divclk1_cdc: divclk1 {
++		compatible = "gpio-gate-clock";
++		clocks = <&rpmcc RPM_SMD_DIV_CLK1>;
++		#clock-cells = <0>;
++		enable-gpios = <&pm8994_gpios 15 GPIO_ACTIVE_HIGH>;
+ 
+-			pinctrl-names = "default";
+-			pinctrl-0 = <&divclk1_default>;
+-		};
++		pinctrl-names = "default";
++		pinctrl-0 = <&divclk1_default>;
++	};
+ 
+-		divclk4: divclk4 {
+-			compatible = "fixed-clock";
+-			#clock-cells = <0>;
+-			clock-frequency = <32768>;
+-			clock-output-names = "divclk4";
++	divclk4: divclk4 {
++		compatible = "fixed-clock";
++		#clock-cells = <0>;
++		clock-frequency = <32768>;
++		clock-output-names = "divclk4";
+ 
+-			pinctrl-names = "default";
+-			pinctrl-0 = <&divclk4_pin_a>;
+-		};
++		pinctrl-names = "default";
++		pinctrl-0 = <&divclk4_pin_a>;
+ 	};
+ 
+ 	gpio-keys {
+diff --git a/arch/arm64/boot/dts/qcom/msm8996-xiaomi-gemini.dts b/arch/arm64/boot/dts/qcom/msm8996-xiaomi-gemini.dts
+index d1066edaea471..f8e9d90afab00 100644
+--- a/arch/arm64/boot/dts/qcom/msm8996-xiaomi-gemini.dts
++++ b/arch/arm64/boot/dts/qcom/msm8996-xiaomi-gemini.dts
+@@ -20,16 +20,14 @@
+ 	qcom,pmic-id = <0x20009 0x2000a 0x00 0x00>;
+ 	qcom,board-id = <31 0>;
+ 
+-	clocks {
+-		divclk2_haptics: divclk2 {
+-			compatible = "fixed-clock";
+-			#clock-cells = <0>;
+-			clock-frequency = <32768>;
+-			clock-output-names = "divclk2";
+-
+-			pinctrl-names = "default";
+-			pinctrl-0 = <&divclk2_pin_a>;
+-		};
++	divclk2_haptics: divclk2 {
++		compatible = "fixed-clock";
++		#clock-cells = <0>;
++		clock-frequency = <32768>;
++		clock-output-names = "divclk2";
++
++		pinctrl-names = "default";
++		pinctrl-0 = <&divclk2_pin_a>;
+ 	};
+ };
+ 
+diff --git a/arch/arm64/boot/dts/qcom/sa8775p-pmics.dtsi b/arch/arm64/boot/dts/qcom/sa8775p-pmics.dtsi
+index 3c3b6287cd274..eaa43f022a654 100644
+--- a/arch/arm64/boot/dts/qcom/sa8775p-pmics.dtsi
++++ b/arch/arm64/boot/dts/qcom/sa8775p-pmics.dtsi
+@@ -173,7 +173,7 @@
+ 			compatible = "qcom,pmm8654au-gpio", "qcom,spmi-gpio";
+ 			reg = <0x8800>;
+ 			gpio-controller;
+-			gpio-ranges = <&pmm8654au_2_gpios 0 0 12>;
++			gpio-ranges = <&pmm8654au_1_gpios 0 0 12>;
+ 			#gpio-cells = <2>;
+ 			interrupt-controller;
+ 			#interrupt-cells = <2>;
+diff --git a/arch/arm64/boot/dts/rockchip/px30-ringneck-haikou.dts b/arch/arm64/boot/dts/rockchip/px30-ringneck-haikou.dts
+index 08a3ad3e7ae92..de0a1f2af983b 100644
+--- a/arch/arm64/boot/dts/rockchip/px30-ringneck-haikou.dts
++++ b/arch/arm64/boot/dts/rockchip/px30-ringneck-haikou.dts
+@@ -68,15 +68,17 @@
+ 		simple-audio-card,format = "i2s";
+ 		simple-audio-card,name = "Haikou,I2S-codec";
+ 		simple-audio-card,mclk-fs = <512>;
++		simple-audio-card,frame-master = <&sgtl5000_codec>;
++		simple-audio-card,bitclock-master = <&sgtl5000_codec>;
+ 
+-		simple-audio-card,codec {
+-			clocks = <&sgtl5000_clk>;
++		sgtl5000_codec: simple-audio-card,codec {
+ 			sound-dai = <&sgtl5000>;
++			// Prevent the dai subsystem from overwriting the clock
++			// frequency. We are using a fixed-frequency oscillator.
++			system-clock-fixed;
+ 		};
+ 
+ 		simple-audio-card,cpu {
+-			bitclock-master;
+-			frame-master;
+ 			sound-dai = <&i2s0_8ch>;
+ 		};
+ 	};
+diff --git a/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dtsi
+index 980c4534313a2..c0f430f045a9c 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dtsi
++++ b/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dtsi
+@@ -493,6 +493,7 @@
+ 
+ &i2s0 {
+ 	pinctrl-0 = <&i2s0_2ch_bus>;
++	pinctrl-1 = <&i2s0_2ch_bus_bclk_off>;
+ 	rockchip,capture-channels = <2>;
+ 	rockchip,playback-channels = <2>;
+ 	status = "okay";
+diff --git a/arch/arm64/boot/dts/rockchip/rk3399.dtsi b/arch/arm64/boot/dts/rockchip/rk3399.dtsi
+index 928948e7c7bbb..bf1251cc71954 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3399.dtsi
++++ b/arch/arm64/boot/dts/rockchip/rk3399.dtsi
+@@ -2430,6 +2430,16 @@
+ 					<4 RK_PA0 1 &pcfg_pull_none>;
+ 			};
+ 
++			i2s0_2ch_bus_bclk_off: i2s0-2ch-bus-bclk-off {
++				rockchip,pins =
++					<3 RK_PD0 RK_FUNC_GPIO &pcfg_pull_none>,
++					<3 RK_PD1 1 &pcfg_pull_none>,
++					<3 RK_PD2 1 &pcfg_pull_none>,
++					<3 RK_PD3 1 &pcfg_pull_none>,
++					<3 RK_PD7 1 &pcfg_pull_none>,
++					<4 RK_PA0 1 &pcfg_pull_none>;
++			};
++
+ 			i2s0_8ch_bus: i2s0-8ch-bus {
+ 				rockchip,pins =
+ 					<3 RK_PD0 1 &pcfg_pull_none>,
+diff --git a/arch/sparc/lib/checksum_32.S b/arch/sparc/lib/checksum_32.S
+index 781e39b3c009f..481e94e1f6857 100644
+--- a/arch/sparc/lib/checksum_32.S
++++ b/arch/sparc/lib/checksum_32.S
+@@ -453,5 +453,5 @@ ccslow:	cmp	%g1, 0
+  * we only bother with faults on loads... */
+ 
+ cc_fault:
+-	ret
++	retl
+ 	 clr	%o0
+diff --git a/arch/x86/include/asm/i8259.h b/arch/x86/include/asm/i8259.h
+index 637fa1df35124..c715097e92fd2 100644
+--- a/arch/x86/include/asm/i8259.h
++++ b/arch/x86/include/asm/i8259.h
+@@ -69,6 +69,8 @@ struct legacy_pic {
+ 	void (*make_irq)(unsigned int irq);
+ };
+ 
++void legacy_pic_pcat_compat(void);
++
+ extern struct legacy_pic *legacy_pic;
+ extern struct legacy_pic null_legacy_pic;
+ 
+diff --git a/arch/x86/include/asm/intel-family.h b/arch/x86/include/asm/intel-family.h
+index b3af2d45bbbb5..5190cc3db771e 100644
+--- a/arch/x86/include/asm/intel-family.h
++++ b/arch/x86/include/asm/intel-family.h
+@@ -27,6 +27,7 @@
+  *		_X	- regular server parts
+  *		_D	- micro server parts
+  *		_N,_P	- other mobile parts
++ *		_H	- premium mobile parts
+  *		_S	- other client parts
+  *
+  *		Historical OPTDIFFs:
+@@ -125,6 +126,7 @@
+ 
+ #define INTEL_FAM6_LUNARLAKE_M		0xBD
+ 
++#define INTEL_FAM6_ARROWLAKE_H		0xC5
+ #define INTEL_FAM6_ARROWLAKE		0xC6
+ 
+ /* "Small Core" Processors (Atom/E-Core) */
+diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
+index 53369c57751ec..9cc9f12679f14 100644
+--- a/arch/x86/kernel/acpi/boot.c
++++ b/arch/x86/kernel/acpi/boot.c
+@@ -148,6 +148,9 @@ static int __init acpi_parse_madt(struct acpi_table_header *table)
+ 		pr_debug("Local APIC address 0x%08x\n", madt->address);
+ 	}
+ 
++	if (madt->flags & ACPI_MADT_PCAT_COMPAT)
++		legacy_pic_pcat_compat();
++
+ 	/* ACPI 6.3 and newer support the online capable bit. */
+ 	if (acpi_gbl_FADT.header.revision > 6 ||
+ 	    (acpi_gbl_FADT.header.revision == 6 &&
+diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
+index 4d8aff05a509e..17385d74465ee 100644
+--- a/arch/x86/kernel/i8259.c
++++ b/arch/x86/kernel/i8259.c
+@@ -32,6 +32,7 @@
+  */
+ static void init_8259A(int auto_eoi);
+ 
++static bool pcat_compat __ro_after_init;
+ static int i8259A_auto_eoi;
+ DEFINE_RAW_SPINLOCK(i8259A_lock);
+ 
+@@ -301,15 +302,32 @@ static void unmask_8259A(void)
+ 
+ static int probe_8259A(void)
+ {
++	unsigned char new_val, probe_val = ~(1 << PIC_CASCADE_IR);
+ 	unsigned long flags;
+-	unsigned char probe_val = ~(1 << PIC_CASCADE_IR);
+-	unsigned char new_val;
++
++	/*
++	 * If MADT has the PCAT_COMPAT flag set, then do not bother probing
++	 * for the PIC. Some BIOSes leave the PIC uninitialized and probing
++	 * fails.
++	 *
++	 * Right now this causes problems as quite some code depends on
++	 * nr_legacy_irqs() > 0 or has_legacy_pic() == true. This is silly
++	 * when the system has an IO/APIC because then PIC is not required
++	 * at all, except for really old machines where the timer interrupt
++	 * must be routed through the PIC. So just pretend that the PIC is
++	 * there and let legacy_pic->init() initialize it for nothing.
++	 *
++	 * Alternatively this could just try to initialize the PIC and
++	 * repeat the probe, but for cases where there is no PIC that's
++	 * just pointless.
++	 */
++	if (pcat_compat)
++		return nr_legacy_irqs();
++
+ 	/*
+-	 * Check to see if we have a PIC.
+-	 * Mask all except the cascade and read
+-	 * back the value we just wrote. If we don't
+-	 * have a PIC, we will read 0xff as opposed to the
+-	 * value we wrote.
++	 * Check to see if we have a PIC.  Mask all except the cascade and
++	 * read back the value we just wrote. If we don't have a PIC, we
++	 * will read 0xff as opposed to the value we wrote.
+ 	 */
+ 	raw_spin_lock_irqsave(&i8259A_lock, flags);
+ 
+@@ -431,5 +449,9 @@ static int __init i8259A_init_ops(void)
+ 
+ 	return 0;
+ }
+-
+ device_initcall(i8259A_init_ops);
++
++void __init legacy_pic_pcat_compat(void)
++{
++	pcat_compat = true;
++}
+diff --git a/arch/x86/kernel/tsc_sync.c b/arch/x86/kernel/tsc_sync.c
+index bbc440c93e088..1123ef3ccf901 100644
+--- a/arch/x86/kernel/tsc_sync.c
++++ b/arch/x86/kernel/tsc_sync.c
+@@ -15,6 +15,7 @@
+  * ( The serial nature of the boot logic and the CPU hotplug lock
+  *   protects against more than 2 CPUs entering this code. )
+  */
++#include <linux/workqueue.h>
+ #include <linux/topology.h>
+ #include <linux/spinlock.h>
+ #include <linux/kernel.h>
+@@ -342,6 +343,13 @@ static inline unsigned int loop_timeout(int cpu)
+ 	return (cpumask_weight(topology_core_cpumask(cpu)) > 1) ? 2 : 20;
+ }
+ 
++static void tsc_sync_mark_tsc_unstable(struct work_struct *work)
++{
++	mark_tsc_unstable("check_tsc_sync_source failed");
++}
++
++static DECLARE_WORK(tsc_sync_work, tsc_sync_mark_tsc_unstable);
++
+ /*
+  * The freshly booted CPU initiates this via an async SMP function call.
+  */
+@@ -395,7 +403,7 @@ retry:
+ 			"turning off TSC clock.\n", max_warp);
+ 		if (random_warps)
+ 			pr_warn("TSC warped randomly between CPUs\n");
+-		mark_tsc_unstable("check_tsc_sync_source failed");
++		schedule_work(&tsc_sync_work);
+ 	}
+ 
+ 	/*
+diff --git a/block/blk-throttle.c b/block/blk-throttle.c
+index e78bc3b65ec80..dcf95e4e0cd9f 100644
+--- a/block/blk-throttle.c
++++ b/block/blk-throttle.c
+@@ -723,6 +723,12 @@ static unsigned int calculate_io_allowed(u32 iops_limit,
+ 
+ static u64 calculate_bytes_allowed(u64 bps_limit, unsigned long jiffy_elapsed)
+ {
++	/*
++	 * Can result be wider than 64 bits?
++	 * We check against 62, not 64, due to ilog2 truncation.
++	 */
++	if (ilog2(bps_limit) + ilog2(jiffy_elapsed) - ilog2(HZ) > 62)
++		return U64_MAX;
+ 	return mul_u64_u64_div_u64(bps_limit, (u64)jiffy_elapsed, (u64)HZ);
+ }
+ 
+diff --git a/drivers/accel/ivpu/ivpu_drv.c b/drivers/accel/ivpu/ivpu_drv.c
+index 09dee5be20700..2e236df4039d5 100644
+--- a/drivers/accel/ivpu/ivpu_drv.c
++++ b/drivers/accel/ivpu/ivpu_drv.c
+@@ -343,14 +343,19 @@ int ivpu_boot(struct ivpu_device *vdev)
+ 	return 0;
+ }
+ 
+-int ivpu_shutdown(struct ivpu_device *vdev)
++void ivpu_prepare_for_reset(struct ivpu_device *vdev)
+ {
+-	int ret;
+-
+ 	ivpu_hw_irq_disable(vdev);
+ 	disable_irq(vdev->irq);
+ 	ivpu_ipc_disable(vdev);
+ 	ivpu_mmu_disable(vdev);
++}
++
++int ivpu_shutdown(struct ivpu_device *vdev)
++{
++	int ret;
++
++	ivpu_prepare_for_reset(vdev);
+ 
+ 	ret = ivpu_hw_power_down(vdev);
+ 	if (ret)
+diff --git a/drivers/accel/ivpu/ivpu_drv.h b/drivers/accel/ivpu/ivpu_drv.h
+index 399dc5dcefd7c..343b15e567991 100644
+--- a/drivers/accel/ivpu/ivpu_drv.h
++++ b/drivers/accel/ivpu/ivpu_drv.h
+@@ -144,6 +144,7 @@ void ivpu_file_priv_put(struct ivpu_file_priv **link);
+ 
+ int ivpu_boot(struct ivpu_device *vdev);
+ int ivpu_shutdown(struct ivpu_device *vdev);
++void ivpu_prepare_for_reset(struct ivpu_device *vdev);
+ 
+ static inline bool ivpu_is_mtl(struct ivpu_device *vdev)
+ {
+diff --git a/drivers/accel/ivpu/ivpu_hw.h b/drivers/accel/ivpu/ivpu_hw.h
+index 50a9304ab09cf..dac59f1c1e05d 100644
+--- a/drivers/accel/ivpu/ivpu_hw.h
++++ b/drivers/accel/ivpu/ivpu_hw.h
+@@ -13,6 +13,7 @@ struct ivpu_hw_ops {
+ 	int (*power_up)(struct ivpu_device *vdev);
+ 	int (*boot_fw)(struct ivpu_device *vdev);
+ 	int (*power_down)(struct ivpu_device *vdev);
++	int (*reset)(struct ivpu_device *vdev);
+ 	bool (*is_idle)(struct ivpu_device *vdev);
+ 	void (*wdt_disable)(struct ivpu_device *vdev);
+ 	void (*diagnose_failure)(struct ivpu_device *vdev);
+@@ -90,6 +91,13 @@ static inline int ivpu_hw_power_down(struct ivpu_device *vdev)
+ 	return vdev->hw->ops->power_down(vdev);
+ };
+ 
++static inline int ivpu_hw_reset(struct ivpu_device *vdev)
++{
++	ivpu_dbg(vdev, PM, "HW reset\n");
++
++	return vdev->hw->ops->reset(vdev);
++};
++
+ static inline void ivpu_hw_wdt_disable(struct ivpu_device *vdev)
+ {
+ 	vdev->hw->ops->wdt_disable(vdev);
+diff --git a/drivers/accel/ivpu/ivpu_hw_mtl.c b/drivers/accel/ivpu/ivpu_hw_mtl.c
+index 2a5dd3a5dc461..52a9d60a44f5b 100644
+--- a/drivers/accel/ivpu/ivpu_hw_mtl.c
++++ b/drivers/accel/ivpu/ivpu_hw_mtl.c
+@@ -953,9 +953,6 @@ static u32 ivpu_hw_mtl_irqb_handler(struct ivpu_device *vdev, int irq)
+ 	if (status == 0)
+ 		return 0;
+ 
+-	/* Disable global interrupt before handling local buttress interrupts */
+-	REGB_WR32(MTL_BUTTRESS_GLOBAL_INT_MASK, 0x1);
+-
+ 	if (REG_TEST_FLD(MTL_BUTTRESS_INTERRUPT_STAT, FREQ_CHANGE, status))
+ 		ivpu_dbg(vdev, IRQ, "FREQ_CHANGE irq: %08x", REGB_RD32(MTL_BUTTRESS_CURRENT_PLL));
+ 
+@@ -986,9 +983,6 @@ static u32 ivpu_hw_mtl_irqb_handler(struct ivpu_device *vdev, int irq)
+ 	else
+ 		REGB_WR32(MTL_BUTTRESS_INTERRUPT_STAT, status);
+ 
+-	/* Re-enable global interrupt */
+-	REGB_WR32(MTL_BUTTRESS_GLOBAL_INT_MASK, 0x0);
+-
+ 	if (schedule_recovery)
+ 		ivpu_pm_schedule_recovery(vdev);
+ 
+@@ -1000,9 +994,14 @@ static irqreturn_t ivpu_hw_mtl_irq_handler(int irq, void *ptr)
+ 	struct ivpu_device *vdev = ptr;
+ 	u32 ret_irqv, ret_irqb;
+ 
++	REGB_WR32(MTL_BUTTRESS_GLOBAL_INT_MASK, 0x1);
++
+ 	ret_irqv = ivpu_hw_mtl_irqv_handler(vdev, irq);
+ 	ret_irqb = ivpu_hw_mtl_irqb_handler(vdev, irq);
+ 
++	/* Re-enable global interrupts to re-trigger MSI for pending interrupts */
++	REGB_WR32(MTL_BUTTRESS_GLOBAL_INT_MASK, 0x0);
++
+ 	return IRQ_RETVAL(ret_irqb | ret_irqv);
+ }
+ 
+@@ -1041,6 +1040,7 @@ const struct ivpu_hw_ops ivpu_hw_mtl_ops = {
+ 	.power_up = ivpu_hw_mtl_power_up,
+ 	.is_idle = ivpu_hw_mtl_is_idle,
+ 	.power_down = ivpu_hw_mtl_power_down,
++	.reset = ivpu_hw_mtl_reset,
+ 	.boot_fw = ivpu_hw_mtl_boot_fw,
+ 	.wdt_disable = ivpu_hw_mtl_wdt_disable,
+ 	.diagnose_failure = ivpu_hw_mtl_diagnose_failure,
+diff --git a/drivers/accel/ivpu/ivpu_pm.c b/drivers/accel/ivpu/ivpu_pm.c
+index aa4d56dc52b39..86ec828dd1dfd 100644
+--- a/drivers/accel/ivpu/ivpu_pm.c
++++ b/drivers/accel/ivpu/ivpu_pm.c
+@@ -260,7 +260,8 @@ void ivpu_pm_reset_prepare_cb(struct pci_dev *pdev)
+ 
+ 	ivpu_dbg(vdev, PM, "Pre-reset..\n");
+ 	atomic_set(&vdev->pm->in_reset, 1);
+-	ivpu_shutdown(vdev);
++	ivpu_prepare_for_reset(vdev);
++	ivpu_hw_reset(vdev);
+ 	ivpu_pm_prepare_cold_boot(vdev);
+ 	ivpu_jobs_abort_all(vdev);
+ 	ivpu_dbg(vdev, PM, "Pre-reset done.\n");
+diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
+index cdbc382e949b8..d1fd47d6704ab 100644
+--- a/drivers/ata/libata-scsi.c
++++ b/drivers/ata/libata-scsi.c
+@@ -1103,10 +1103,11 @@ int ata_scsi_dev_config(struct scsi_device *sdev, struct ata_device *dev)
+ 
+ 		/*
+ 		 * Ask the sd driver to issue START STOP UNIT on runtime suspend
+-		 * and resume only. For system level suspend/resume, devices
+-		 * power state is handled directly by libata EH.
++		 * and resume and shutdown only. For system level suspend/resume,
++		 * devices power state is handled directly by libata EH.
+ 		 */
+ 		sdev->manage_runtime_start_stop = true;
++		sdev->manage_shutdown = true;
+ 	}
+ 
+ 	/*
+diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
+index c249f9791ae86..473563bc74960 100644
+--- a/drivers/clk/clk.c
++++ b/drivers/clk/clk.c
+@@ -3416,6 +3416,7 @@ static void possible_parent_show(struct seq_file *s, struct clk_core *core,
+ 				 unsigned int i, char terminator)
+ {
+ 	struct clk_core *parent;
++	const char *name = NULL;
+ 
+ 	/*
+ 	 * Go through the following options to fetch a parent's name.
+@@ -3430,18 +3431,20 @@ static void possible_parent_show(struct seq_file *s, struct clk_core *core,
+ 	 * registered (yet).
+ 	 */
+ 	parent = clk_core_get_parent_by_index(core, i);
+-	if (parent)
++	if (parent) {
+ 		seq_puts(s, parent->name);
+-	else if (core->parents[i].name)
++	} else if (core->parents[i].name) {
+ 		seq_puts(s, core->parents[i].name);
+-	else if (core->parents[i].fw_name)
++	} else if (core->parents[i].fw_name) {
+ 		seq_printf(s, "<%s>(fw)", core->parents[i].fw_name);
+-	else if (core->parents[i].index >= 0)
+-		seq_puts(s,
+-			 of_clk_get_parent_name(core->of_node,
+-						core->parents[i].index));
+-	else
+-		seq_puts(s, "(missing)");
++	} else {
++		if (core->parents[i].index >= 0)
++			name = of_clk_get_parent_name(core->of_node, core->parents[i].index);
++		if (!name)
++			name = "(missing)";
++
++		seq_puts(s, name);
++	}
+ 
+ 	seq_putc(s, terminator);
+ }
+diff --git a/drivers/clk/socfpga/clk-gate.c b/drivers/clk/socfpga/clk-gate.c
+index 8dd601bd85389..0a5a95e0267ff 100644
+--- a/drivers/clk/socfpga/clk-gate.c
++++ b/drivers/clk/socfpga/clk-gate.c
+@@ -87,10 +87,8 @@ static int socfpga_clk_set_parent(struct clk_hw *hwclk, u8 parent)
+ 	return 0;
+ }
+ 
+-static unsigned long socfpga_clk_recalc_rate(struct clk_hw *hwclk,
+-	unsigned long parent_rate)
++static u32 socfpga_clk_get_div(struct socfpga_gate_clk *socfpgaclk)
+ {
+-	struct socfpga_gate_clk *socfpgaclk = to_socfpga_gate_clk(hwclk);
+ 	u32 div = 1, val;
+ 
+ 	if (socfpgaclk->fixed_div)
+@@ -105,12 +103,33 @@ static unsigned long socfpga_clk_recalc_rate(struct clk_hw *hwclk,
+ 			div = (1 << val);
+ 	}
+ 
++	return div;
++}
++
++static unsigned long socfpga_clk_recalc_rate(struct clk_hw *hwclk,
++					     unsigned long parent_rate)
++{
++	struct socfpga_gate_clk *socfpgaclk = to_socfpga_gate_clk(hwclk);
++	u32 div = socfpga_clk_get_div(socfpgaclk);
++
+ 	return parent_rate / div;
+ }
+ 
++
++static int socfpga_clk_determine_rate(struct clk_hw *hwclk,
++				      struct clk_rate_request *req)
++{
++	struct socfpga_gate_clk *socfpgaclk = to_socfpga_gate_clk(hwclk);
++	u32 div = socfpga_clk_get_div(socfpgaclk);
++
++	req->rate = req->best_parent_rate / div;
++
++	return 0;
++}
++
+ static struct clk_ops gateclk_ops = {
+ 	.recalc_rate = socfpga_clk_recalc_rate,
+-	.determine_rate = clk_hw_determine_rate_no_reparent,
++	.determine_rate = socfpga_clk_determine_rate,
+ 	.get_parent = socfpga_clk_get_parent,
+ 	.set_parent = socfpga_clk_set_parent,
+ };
+diff --git a/drivers/clk/stm32/clk-stm32-core.c b/drivers/clk/stm32/clk-stm32-core.c
+index d5aa09e9fce4c..067b918a88945 100644
+--- a/drivers/clk/stm32/clk-stm32-core.c
++++ b/drivers/clk/stm32/clk-stm32-core.c
+@@ -431,7 +431,7 @@ static int clk_stm32_composite_determine_rate(struct clk_hw *hw,
+ {
+ 	struct clk_stm32_composite *composite = to_clk_stm32_composite(hw);
+ 	const struct stm32_div_cfg *divider;
+-	unsigned long rate;
++	long rate;
+ 
+ 	if (composite->div_id == NO_STM32_DIV)
+ 		return 0;
+diff --git a/drivers/clk/ti/clk-44xx.c b/drivers/clk/ti/clk-44xx.c
+index 868bc7af21b0b..9b2824ed785b9 100644
+--- a/drivers/clk/ti/clk-44xx.c
++++ b/drivers/clk/ti/clk-44xx.c
+@@ -749,9 +749,14 @@ static struct ti_dt_clk omap44xx_clks[] = {
+ 	DT_CLK(NULL, "mcbsp1_sync_mux_ck", "abe-clkctrl:0028:26"),
+ 	DT_CLK(NULL, "mcbsp2_sync_mux_ck", "abe-clkctrl:0030:26"),
+ 	DT_CLK(NULL, "mcbsp3_sync_mux_ck", "abe-clkctrl:0038:26"),
++	DT_CLK("40122000.mcbsp", "prcm_fck", "abe-clkctrl:0028:26"),
++	DT_CLK("40124000.mcbsp", "prcm_fck", "abe-clkctrl:0030:26"),
++	DT_CLK("40126000.mcbsp", "prcm_fck", "abe-clkctrl:0038:26"),
+ 	DT_CLK(NULL, "mcbsp4_sync_mux_ck", "l4-per-clkctrl:00c0:26"),
++	DT_CLK("48096000.mcbsp", "prcm_fck", "l4-per-clkctrl:00c0:26"),
+ 	DT_CLK(NULL, "ocp2scp_usb_phy_phy_48m", "l3-init-clkctrl:00c0:8"),
+ 	DT_CLK(NULL, "otg_60m_gfclk", "l3-init-clkctrl:0040:24"),
++	DT_CLK(NULL, "pad_fck", "pad_clks_ck"),
+ 	DT_CLK(NULL, "per_mcbsp4_gfclk", "l4-per-clkctrl:00c0:24"),
+ 	DT_CLK(NULL, "pmd_stm_clock_mux_ck", "emu-sys-clkctrl:0000:20"),
+ 	DT_CLK(NULL, "pmd_trace_clk_mux_ck", "emu-sys-clkctrl:0000:22"),
+diff --git a/drivers/clk/ti/clk-54xx.c b/drivers/clk/ti/clk-54xx.c
+index b4aff76eb3735..74dfd5823f835 100644
+--- a/drivers/clk/ti/clk-54xx.c
++++ b/drivers/clk/ti/clk-54xx.c
+@@ -565,15 +565,19 @@ static struct ti_dt_clk omap54xx_clks[] = {
+ 	DT_CLK(NULL, "gpio8_dbclk", "l4per-clkctrl:00f8:8"),
+ 	DT_CLK(NULL, "mcbsp1_gfclk", "abe-clkctrl:0028:24"),
+ 	DT_CLK(NULL, "mcbsp1_sync_mux_ck", "abe-clkctrl:0028:26"),
++	DT_CLK("40122000.mcbsp", "prcm_fck", "abe-clkctrl:0028:26"),
+ 	DT_CLK(NULL, "mcbsp2_gfclk", "abe-clkctrl:0030:24"),
+ 	DT_CLK(NULL, "mcbsp2_sync_mux_ck", "abe-clkctrl:0030:26"),
++	DT_CLK("40124000.mcbsp", "prcm_fck", "abe-clkctrl:0030:26"),
+ 	DT_CLK(NULL, "mcbsp3_gfclk", "abe-clkctrl:0038:24"),
+ 	DT_CLK(NULL, "mcbsp3_sync_mux_ck", "abe-clkctrl:0038:26"),
++	DT_CLK("40126000.mcbsp", "prcm_fck", "abe-clkctrl:0038:26"),
+ 	DT_CLK(NULL, "mmc1_32khz_clk", "l3init-clkctrl:0008:8"),
+ 	DT_CLK(NULL, "mmc1_fclk", "l3init-clkctrl:0008:25"),
+ 	DT_CLK(NULL, "mmc1_fclk_mux", "l3init-clkctrl:0008:24"),
+ 	DT_CLK(NULL, "mmc2_fclk", "l3init-clkctrl:0010:25"),
+ 	DT_CLK(NULL, "mmc2_fclk_mux", "l3init-clkctrl:0010:24"),
++	DT_CLK(NULL, "pad_fck", "pad_clks_ck"),
+ 	DT_CLK(NULL, "sata_ref_clk", "l3init-clkctrl:0068:8"),
+ 	DT_CLK(NULL, "timer10_gfclk_mux", "l4per-clkctrl:0008:24"),
+ 	DT_CLK(NULL, "timer11_gfclk_mux", "l4per-clkctrl:0010:24"),
+diff --git a/drivers/crypto/virtio/virtio_crypto_common.h b/drivers/crypto/virtio/virtio_crypto_common.h
+index 59a4c02594563..154590e1f7643 100644
+--- a/drivers/crypto/virtio/virtio_crypto_common.h
++++ b/drivers/crypto/virtio/virtio_crypto_common.h
+@@ -35,6 +35,9 @@ struct virtio_crypto {
+ 	struct virtqueue *ctrl_vq;
+ 	struct data_queue *data_vq;
+ 
++	/* Work struct for config space updates */
++	struct work_struct config_work;
++
+ 	/* To protect the vq operations for the controlq */
+ 	spinlock_t ctrl_lock;
+ 
+diff --git a/drivers/crypto/virtio/virtio_crypto_core.c b/drivers/crypto/virtio/virtio_crypto_core.c
+index 94849fa3bd74a..43a0838d31ff0 100644
+--- a/drivers/crypto/virtio/virtio_crypto_core.c
++++ b/drivers/crypto/virtio/virtio_crypto_core.c
+@@ -335,6 +335,14 @@ static void virtcrypto_del_vqs(struct virtio_crypto *vcrypto)
+ 	virtcrypto_free_queues(vcrypto);
+ }
+ 
++static void vcrypto_config_changed_work(struct work_struct *work)
++{
++	struct virtio_crypto *vcrypto =
++		container_of(work, struct virtio_crypto, config_work);
++
++	virtcrypto_update_status(vcrypto);
++}
++
+ static int virtcrypto_probe(struct virtio_device *vdev)
+ {
+ 	int err = -EFAULT;
+@@ -454,6 +462,8 @@ static int virtcrypto_probe(struct virtio_device *vdev)
+ 	if (err)
+ 		goto free_engines;
+ 
++	INIT_WORK(&vcrypto->config_work, vcrypto_config_changed_work);
++
+ 	return 0;
+ 
+ free_engines:
+@@ -490,6 +500,7 @@ static void virtcrypto_remove(struct virtio_device *vdev)
+ 
+ 	dev_info(&vdev->dev, "Start virtcrypto_remove.\n");
+ 
++	flush_work(&vcrypto->config_work);
+ 	if (virtcrypto_dev_started(vcrypto))
+ 		virtcrypto_dev_stop(vcrypto);
+ 	virtio_reset_device(vdev);
+@@ -504,7 +515,7 @@ static void virtcrypto_config_changed(struct virtio_device *vdev)
+ {
+ 	struct virtio_crypto *vcrypto = vdev->priv;
+ 
+-	virtcrypto_update_status(vcrypto);
++	schedule_work(&vcrypto->config_work);
+ }
+ 
+ #ifdef CONFIG_PM_SLEEP
+@@ -512,6 +523,7 @@ static int virtcrypto_freeze(struct virtio_device *vdev)
+ {
+ 	struct virtio_crypto *vcrypto = vdev->priv;
+ 
++	flush_work(&vcrypto->config_work);
+ 	virtio_reset_device(vdev);
+ 	virtcrypto_free_unused_reqs(vcrypto);
+ 	if (virtcrypto_dev_started(vcrypto))
+diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c
+index 749868b9e80d6..7edf2c95282fa 100644
+--- a/drivers/firewire/sbp2.c
++++ b/drivers/firewire/sbp2.c
+@@ -1521,6 +1521,7 @@ static int sbp2_scsi_slave_configure(struct scsi_device *sdev)
+ 	if (sbp2_param_exclusive_login) {
+ 		sdev->manage_system_start_stop = true;
+ 		sdev->manage_runtime_start_stop = true;
++		sdev->manage_shutdown = true;
+ 	}
+ 
+ 	if (sdev->type == TYPE_ROM)
+diff --git a/drivers/firmware/imx/imx-dsp.c b/drivers/firmware/imx/imx-dsp.c
+index 1f410809d3ee4..0f656e4191d5c 100644
+--- a/drivers/firmware/imx/imx-dsp.c
++++ b/drivers/firmware/imx/imx-dsp.c
+@@ -115,11 +115,11 @@ static int imx_dsp_setup_channels(struct imx_dsp_ipc *dsp_ipc)
+ 		dsp_chan->idx = i % 2;
+ 		dsp_chan->ch = mbox_request_channel_byname(cl, chan_name);
+ 		if (IS_ERR(dsp_chan->ch)) {
+-			kfree(dsp_chan->name);
+ 			ret = PTR_ERR(dsp_chan->ch);
+ 			if (ret != -EPROBE_DEFER)
+ 				dev_err(dev, "Failed to request mbox chan %s ret %d\n",
+ 					chan_name, ret);
++			kfree(dsp_chan->name);
+ 			goto out;
+ 		}
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
+index 6a8494f98d3ef..fe8ba9e9837b3 100644
+--- a/drivers/gpu/drm/amd/amdgpu/vi.c
++++ b/drivers/gpu/drm/amd/amdgpu/vi.c
+@@ -1124,7 +1124,7 @@ static void vi_program_aspm(struct amdgpu_device *adev)
+ 	bool bL1SS = false;
+ 	bool bClkReqSupport = true;
+ 
+-	if (!amdgpu_device_should_use_aspm(adev) || !amdgpu_device_aspm_support_quirk())
++	if (!amdgpu_device_should_use_aspm(adev) || !amdgpu_device_pcie_dynamic_switching_supported())
+ 		return;
+ 
+ 	if (adev->flags & AMD_IS_APU ||
+diff --git a/drivers/gpu/drm/display/drm_dp_mst_topology.c b/drivers/gpu/drm/display/drm_dp_mst_topology.c
+index ed96cfcfa3040..8c929ef72c72c 100644
+--- a/drivers/gpu/drm/display/drm_dp_mst_topology.c
++++ b/drivers/gpu/drm/display/drm_dp_mst_topology.c
+@@ -2574,14 +2574,14 @@ static struct drm_dp_mst_branch *get_mst_branch_device_by_guid_helper(
+ 	struct drm_dp_mst_branch *found_mstb;
+ 	struct drm_dp_mst_port *port;
+ 
++	if (!mstb)
++		return NULL;
++
+ 	if (memcmp(mstb->guid, guid, 16) == 0)
+ 		return mstb;
+ 
+ 
+ 	list_for_each_entry(port, &mstb->ports, next) {
+-		if (!port->mstb)
+-			continue;
+-
+ 		found_mstb = get_mst_branch_device_by_guid_helper(port->mstb, guid);
+ 
+ 		if (found_mstb)
+diff --git a/drivers/gpu/drm/i915/gt/intel_gt_mcr.c b/drivers/gpu/drm/i915/gt/intel_gt_mcr.c
+index 0b414eae16831..2c0f1f3e28ff8 100644
+--- a/drivers/gpu/drm/i915/gt/intel_gt_mcr.c
++++ b/drivers/gpu/drm/i915/gt/intel_gt_mcr.c
+@@ -376,9 +376,26 @@ void intel_gt_mcr_lock(struct intel_gt *gt, unsigned long *flags)
+ 	 * driver threads, but also with hardware/firmware agents.  A dedicated
+ 	 * locking register is used.
+ 	 */
+-	if (GRAPHICS_VER_FULL(gt->i915) >= IP_VER(12, 70))
++	if (GRAPHICS_VER_FULL(gt->i915) >= IP_VER(12, 70)) {
++		/*
++		 * The steering control and semaphore registers are inside an
++		 * "always on" power domain with respect to RC6.  However there
++		 * are some issues if higher-level platform sleep states are
++		 * entering/exiting at the same time these registers are
++		 * accessed.  Grabbing GT forcewake and holding it over the
++		 * entire lock/steer/unlock cycle ensures that those sleep
++		 * states have been fully exited before we access these
++		 * registers.  This wakeref will be released in the unlock
++		 * routine.
++		 *
++		 * This is expected to become a formally documented/numbered
++		 * workaround soon.
++		 */
++		intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_GT);
++
+ 		err = wait_for(intel_uncore_read_fw(gt->uncore,
+ 						    MTL_STEER_SEMAPHORE) == 0x1, 100);
++	}
+ 
+ 	/*
+ 	 * Even on platforms with a hardware lock, we'll continue to grab
+@@ -415,8 +432,11 @@ void intel_gt_mcr_unlock(struct intel_gt *gt, unsigned long flags)
+ {
+ 	spin_unlock_irqrestore(&gt->mcr_lock, flags);
+ 
+-	if (GRAPHICS_VER_FULL(gt->i915) >= IP_VER(12, 70))
++	if (GRAPHICS_VER_FULL(gt->i915) >= IP_VER(12, 70)) {
+ 		intel_uncore_write_fw(gt->uncore, MTL_STEER_SEMAPHORE, 0x1);
++
++		intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_GT);
++	}
+ }
+ 
+ /**
+diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
+index 49c6f1ff11284..331685e1b7b7d 100644
+--- a/drivers/gpu/drm/i915/i915_perf.c
++++ b/drivers/gpu/drm/i915/i915_perf.c
+@@ -482,8 +482,7 @@ static void oa_report_id_clear(struct i915_perf_stream *stream, u32 *report)
+ static bool oa_report_ctx_invalid(struct i915_perf_stream *stream, void *report)
+ {
+ 	return !(oa_report_id(stream, report) &
+-	       stream->perf->gen8_valid_ctx_bit) &&
+-	       GRAPHICS_VER(stream->perf->i915) <= 11;
++	       stream->perf->gen8_valid_ctx_bit);
+ }
+ 
+ static u64 oa_timestamp(struct i915_perf_stream *stream, void *report)
+@@ -5106,6 +5105,7 @@ static void i915_perf_init_info(struct drm_i915_private *i915)
+ 		perf->gen8_valid_ctx_bit = BIT(16);
+ 		break;
+ 	case 12:
++		perf->gen8_valid_ctx_bit = BIT(16);
+ 		/*
+ 		 * Calculate offset at runtime in oa_pin_context for gen12 and
+ 		 * cache the value in perf->ctx_oactxctrl_offset.
+diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c
+index d35973b411863..7b1076b5e748c 100644
+--- a/drivers/gpu/drm/i915/i915_pmu.c
++++ b/drivers/gpu/drm/i915/i915_pmu.c
+@@ -832,9 +832,18 @@ static void i915_pmu_event_start(struct perf_event *event, int flags)
+ 
+ static void i915_pmu_event_stop(struct perf_event *event, int flags)
+ {
++	struct drm_i915_private *i915 =
++		container_of(event->pmu, typeof(*i915), pmu.base);
++	struct i915_pmu *pmu = &i915->pmu;
++
++	if (pmu->closed)
++		goto out;
++
+ 	if (flags & PERF_EF_UPDATE)
+ 		i915_pmu_event_read(event);
+ 	i915_pmu_disable(event);
++
++out:
+ 	event->hw.state = PERF_HES_STOPPED;
+ }
+ 
+diff --git a/drivers/gpu/drm/logicvc/Kconfig b/drivers/gpu/drm/logicvc/Kconfig
+index fa7a883688094..1df22a852a23e 100644
+--- a/drivers/gpu/drm/logicvc/Kconfig
++++ b/drivers/gpu/drm/logicvc/Kconfig
+@@ -5,5 +5,7 @@ config DRM_LOGICVC
+ 	select DRM_KMS_HELPER
+ 	select DRM_KMS_DMA_HELPER
+ 	select DRM_GEM_DMA_HELPER
++	select REGMAP
++	select REGMAP_MMIO
+ 	help
+ 	  DRM display driver for the logiCVC programmable logic block from Xylon
+diff --git a/drivers/i2c/busses/i2c-aspeed.c b/drivers/i2c/busses/i2c-aspeed.c
+index 5a416b39b8183..28e2a5fc45282 100644
+--- a/drivers/i2c/busses/i2c-aspeed.c
++++ b/drivers/i2c/busses/i2c-aspeed.c
+@@ -749,6 +749,8 @@ static void __aspeed_i2c_reg_slave(struct aspeed_i2c_bus *bus, u16 slave_addr)
+ 	func_ctrl_reg_val = readl(bus->base + ASPEED_I2C_FUN_CTRL_REG);
+ 	func_ctrl_reg_val |= ASPEED_I2CD_SLAVE_EN;
+ 	writel(func_ctrl_reg_val, bus->base + ASPEED_I2C_FUN_CTRL_REG);
++
++	bus->slave_state = ASPEED_I2C_SLAVE_INACTIVE;
+ }
+ 
+ static int aspeed_i2c_reg_slave(struct i2c_client *client)
+@@ -765,7 +767,6 @@ static int aspeed_i2c_reg_slave(struct i2c_client *client)
+ 	__aspeed_i2c_reg_slave(bus, client->addr);
+ 
+ 	bus->slave = client;
+-	bus->slave_state = ASPEED_I2C_SLAVE_INACTIVE;
+ 	spin_unlock_irqrestore(&bus->lock, flags);
+ 
+ 	return 0;
+diff --git a/drivers/i2c/busses/i2c-stm32f7.c b/drivers/i2c/busses/i2c-stm32f7.c
+index e897d9101434d..9ee37c59025ec 100644
+--- a/drivers/i2c/busses/i2c-stm32f7.c
++++ b/drivers/i2c/busses/i2c-stm32f7.c
+@@ -1059,9 +1059,10 @@ static int stm32f7_i2c_smbus_xfer_msg(struct stm32f7_i2c_dev *i2c_dev,
+ 	/* Configure PEC */
+ 	if ((flags & I2C_CLIENT_PEC) && f7_msg->size != I2C_SMBUS_QUICK) {
+ 		cr1 |= STM32F7_I2C_CR1_PECEN;
+-		cr2 |= STM32F7_I2C_CR2_PECBYTE;
+-		if (!f7_msg->read_write)
++		if (!f7_msg->read_write) {
++			cr2 |= STM32F7_I2C_CR2_PECBYTE;
+ 			f7_msg->count++;
++		}
+ 	} else {
+ 		cr1 &= ~STM32F7_I2C_CR1_PECEN;
+ 		cr2 &= ~STM32F7_I2C_CR2_PECBYTE;
+@@ -1149,8 +1150,10 @@ static void stm32f7_i2c_smbus_rep_start(struct stm32f7_i2c_dev *i2c_dev)
+ 	f7_msg->stop = true;
+ 
+ 	/* Add one byte for PEC if needed */
+-	if (cr1 & STM32F7_I2C_CR1_PECEN)
++	if (cr1 & STM32F7_I2C_CR1_PECEN) {
++		cr2 |= STM32F7_I2C_CR2_PECBYTE;
+ 		f7_msg->count++;
++	}
+ 
+ 	/* Set number of bytes to be transferred */
+ 	cr2 &= ~(STM32F7_I2C_CR2_NBYTES_MASK);
+diff --git a/drivers/i2c/muxes/i2c-demux-pinctrl.c b/drivers/i2c/muxes/i2c-demux-pinctrl.c
+index 22f2280eab7f7..9f2e4aa281593 100644
+--- a/drivers/i2c/muxes/i2c-demux-pinctrl.c
++++ b/drivers/i2c/muxes/i2c-demux-pinctrl.c
+@@ -61,7 +61,7 @@ static int i2c_demux_activate_master(struct i2c_demux_pinctrl_priv *priv, u32 ne
+ 	if (ret)
+ 		goto err;
+ 
+-	adap = of_find_i2c_adapter_by_node(priv->chan[new_chan].parent_np);
++	adap = of_get_i2c_adapter_by_node(priv->chan[new_chan].parent_np);
+ 	if (!adap) {
+ 		ret = -ENODEV;
+ 		goto err_with_revert;
+diff --git a/drivers/i2c/muxes/i2c-mux-gpmux.c b/drivers/i2c/muxes/i2c-mux-gpmux.c
+index 0405af0e15104..cd11de8b45058 100644
+--- a/drivers/i2c/muxes/i2c-mux-gpmux.c
++++ b/drivers/i2c/muxes/i2c-mux-gpmux.c
+@@ -52,7 +52,7 @@ static struct i2c_adapter *mux_parent_adapter(struct device *dev)
+ 		dev_err(dev, "Cannot parse i2c-parent\n");
+ 		return ERR_PTR(-ENODEV);
+ 	}
+-	parent = of_find_i2c_adapter_by_node(parent_np);
++	parent = of_get_i2c_adapter_by_node(parent_np);
+ 	of_node_put(parent_np);
+ 	if (!parent)
+ 		return ERR_PTR(-EPROBE_DEFER);
+diff --git a/drivers/i2c/muxes/i2c-mux-pinctrl.c b/drivers/i2c/muxes/i2c-mux-pinctrl.c
+index 18236b9fa14a9..6ebca7bfd8a26 100644
+--- a/drivers/i2c/muxes/i2c-mux-pinctrl.c
++++ b/drivers/i2c/muxes/i2c-mux-pinctrl.c
+@@ -62,7 +62,7 @@ static struct i2c_adapter *i2c_mux_pinctrl_parent_adapter(struct device *dev)
+ 		dev_err(dev, "Cannot parse i2c-parent\n");
+ 		return ERR_PTR(-ENODEV);
+ 	}
+-	parent = of_find_i2c_adapter_by_node(parent_np);
++	parent = of_get_i2c_adapter_by_node(parent_np);
+ 	of_node_put(parent_np);
+ 	if (!parent)
+ 		return ERR_PTR(-EPROBE_DEFER);
+diff --git a/drivers/iio/adc/exynos_adc.c b/drivers/iio/adc/exynos_adc.c
+index cff1ba57fb16a..43c8af41b4a9d 100644
+--- a/drivers/iio/adc/exynos_adc.c
++++ b/drivers/iio/adc/exynos_adc.c
+@@ -826,16 +826,26 @@ static int exynos_adc_probe(struct platform_device *pdev)
+ 		}
+ 	}
+ 
++	/* leave out any TS related code if unreachable */
++	if (IS_REACHABLE(CONFIG_INPUT)) {
++		has_ts = of_property_read_bool(pdev->dev.of_node,
++					       "has-touchscreen") || pdata;
++	}
++
+ 	irq = platform_get_irq(pdev, 0);
+ 	if (irq < 0)
+ 		return irq;
+ 	info->irq = irq;
+ 
+-	irq = platform_get_irq(pdev, 1);
+-	if (irq == -EPROBE_DEFER)
+-		return irq;
++	if (has_ts) {
++		irq = platform_get_irq(pdev, 1);
++		if (irq == -EPROBE_DEFER)
++			return irq;
+ 
+-	info->tsirq = irq;
++		info->tsirq = irq;
++	} else {
++		info->tsirq = -1;
++	}
+ 
+ 	info->dev = &pdev->dev;
+ 
+@@ -900,12 +910,6 @@ static int exynos_adc_probe(struct platform_device *pdev)
+ 	if (info->data->init_hw)
+ 		info->data->init_hw(info);
+ 
+-	/* leave out any TS related code if unreachable */
+-	if (IS_REACHABLE(CONFIG_INPUT)) {
+-		has_ts = of_property_read_bool(pdev->dev.of_node,
+-					       "has-touchscreen") || pdata;
+-	}
+-
+ 	if (pdata)
+ 		info->delay = pdata->delay;
+ 	else
+diff --git a/drivers/iio/adc/xilinx-xadc-core.c b/drivers/iio/adc/xilinx-xadc-core.c
+index dba73300f8948..564c0cad0fc79 100644
+--- a/drivers/iio/adc/xilinx-xadc-core.c
++++ b/drivers/iio/adc/xilinx-xadc-core.c
+@@ -456,6 +456,9 @@ static const struct xadc_ops xadc_zynq_ops = {
+ 	.interrupt_handler = xadc_zynq_interrupt_handler,
+ 	.update_alarm = xadc_zynq_update_alarm,
+ 	.type = XADC_TYPE_S7,
++	/* Temp in C = (val * 503.975) / 2**bits - 273.15 */
++	.temp_scale = 503975,
++	.temp_offset = 273150,
+ };
+ 
+ static const unsigned int xadc_axi_reg_offsets[] = {
+@@ -566,6 +569,9 @@ static const struct xadc_ops xadc_7s_axi_ops = {
+ 	.interrupt_handler = xadc_axi_interrupt_handler,
+ 	.flags = XADC_FLAGS_BUFFERED | XADC_FLAGS_IRQ_OPTIONAL,
+ 	.type = XADC_TYPE_S7,
++	/* Temp in C = (val * 503.975) / 2**bits - 273.15 */
++	.temp_scale = 503975,
++	.temp_offset = 273150,
+ };
+ 
+ static const struct xadc_ops xadc_us_axi_ops = {
+@@ -577,6 +583,12 @@ static const struct xadc_ops xadc_us_axi_ops = {
+ 	.interrupt_handler = xadc_axi_interrupt_handler,
+ 	.flags = XADC_FLAGS_BUFFERED | XADC_FLAGS_IRQ_OPTIONAL,
+ 	.type = XADC_TYPE_US,
++	/**
++	 * Values below are for UltraScale+ (SYSMONE4) using internal reference.
++	 * See https://docs.xilinx.com/v/u/en-US/ug580-ultrascale-sysmon
++	 */
++	.temp_scale = 509314,
++	.temp_offset = 280231,
+ };
+ 
+ static int _xadc_update_adc_reg(struct xadc *xadc, unsigned int reg,
+@@ -945,8 +957,7 @@ static int xadc_read_raw(struct iio_dev *indio_dev,
+ 			*val2 = bits;
+ 			return IIO_VAL_FRACTIONAL_LOG2;
+ 		case IIO_TEMP:
+-			/* Temp in C = (val * 503.975) / 2**bits - 273.15 */
+-			*val = 503975;
++			*val = xadc->ops->temp_scale;
+ 			*val2 = bits;
+ 			return IIO_VAL_FRACTIONAL_LOG2;
+ 		default:
+@@ -954,7 +965,7 @@ static int xadc_read_raw(struct iio_dev *indio_dev,
+ 		}
+ 	case IIO_CHAN_INFO_OFFSET:
+ 		/* Only the temperature channel has an offset */
+-		*val = -((273150 << bits) / 503975);
++		*val = -((xadc->ops->temp_offset << bits) / xadc->ops->temp_scale);
+ 		return IIO_VAL_INT;
+ 	case IIO_CHAN_INFO_SAMP_FREQ:
+ 		ret = xadc_read_samplerate(xadc);
+@@ -1423,28 +1434,6 @@ static int xadc_probe(struct platform_device *pdev)
+ 	if (ret)
+ 		return ret;
+ 
+-	/* Disable all alarms */
+-	ret = xadc_update_adc_reg(xadc, XADC_REG_CONF1, XADC_CONF1_ALARM_MASK,
+-				  XADC_CONF1_ALARM_MASK);
+-	if (ret)
+-		return ret;
+-
+-	/* Set thresholds to min/max */
+-	for (i = 0; i < 16; i++) {
+-		/*
+-		 * Set max voltage threshold and both temperature thresholds to
+-		 * 0xffff, min voltage threshold to 0.
+-		 */
+-		if (i % 8 < 4 || i == 7)
+-			xadc->threshold[i] = 0xffff;
+-		else
+-			xadc->threshold[i] = 0;
+-		ret = xadc_write_adc_reg(xadc, XADC_REG_THRESHOLD(i),
+-			xadc->threshold[i]);
+-		if (ret)
+-			return ret;
+-	}
+-
+ 	/* Go to non-buffered mode */
+ 	xadc_postdisable(indio_dev);
+ 
+diff --git a/drivers/iio/adc/xilinx-xadc.h b/drivers/iio/adc/xilinx-xadc.h
+index 7d78ce6989671..3036f4d613ff5 100644
+--- a/drivers/iio/adc/xilinx-xadc.h
++++ b/drivers/iio/adc/xilinx-xadc.h
+@@ -85,6 +85,8 @@ struct xadc_ops {
+ 
+ 	unsigned int flags;
+ 	enum xadc_type type;
++	int temp_scale;
++	int temp_offset;
+ };
+ 
+ static inline int _xadc_read_adc_reg(struct xadc *xadc, unsigned int reg,
+diff --git a/drivers/iio/afe/iio-rescale.c b/drivers/iio/afe/iio-rescale.c
+index 1f280c360701b..56e5913ab82d1 100644
+--- a/drivers/iio/afe/iio-rescale.c
++++ b/drivers/iio/afe/iio-rescale.c
+@@ -214,8 +214,18 @@ static int rescale_read_raw(struct iio_dev *indio_dev,
+ 				return ret < 0 ? ret : -EOPNOTSUPP;
+ 		}
+ 
+-		ret = iio_read_channel_scale(rescale->source, &scale, &scale2);
+-		return rescale_process_offset(rescale, ret, scale, scale2,
++		if (iio_channel_has_info(rescale->source->channel,
++					 IIO_CHAN_INFO_SCALE)) {
++			ret = iio_read_channel_scale(rescale->source, &scale, &scale2);
++			return rescale_process_offset(rescale, ret, scale, scale2,
++						      schan_off, val, val2);
++		}
++
++		/*
++		 * If we get here we have no scale so scale 1:1 but apply
++		 * rescaler and offset, if any.
++		 */
++		return rescale_process_offset(rescale, IIO_VAL_FRACTIONAL, 1, 1,
+ 					      schan_off, val, val2);
+ 	default:
+ 		return -EINVAL;
+@@ -280,8 +290,9 @@ static int rescale_configure_channel(struct device *dev,
+ 	chan->type = rescale->cfg->type;
+ 
+ 	if (iio_channel_has_info(schan, IIO_CHAN_INFO_RAW) &&
+-	    iio_channel_has_info(schan, IIO_CHAN_INFO_SCALE)) {
+-		dev_info(dev, "using raw+scale source channel\n");
++	    (iio_channel_has_info(schan, IIO_CHAN_INFO_SCALE) ||
++	     iio_channel_has_info(schan, IIO_CHAN_INFO_OFFSET))) {
++		dev_info(dev, "using raw+scale/offset source channel\n");
+ 	} else if (iio_channel_has_info(schan, IIO_CHAN_INFO_PROCESSED)) {
+ 		dev_info(dev, "using processed channel\n");
+ 		rescale->chan_processed = true;
+diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c
+index 42c4f603ec81f..fde2e170fed34 100644
+--- a/drivers/misc/fastrpc.c
++++ b/drivers/misc/fastrpc.c
+@@ -957,6 +957,7 @@ static int fastrpc_get_args(u32 kernel, struct fastrpc_invoke_ctx *ctx)
+ 	if (err)
+ 		return err;
+ 
++	memset(ctx->buf->virt, 0, pkt_size);
+ 	rpra = ctx->buf->virt;
+ 	list = fastrpc_invoke_buf_start(rpra, ctx->nscalars);
+ 	pages = fastrpc_phy_page_start(list, ctx->nscalars);
+@@ -1089,6 +1090,7 @@ static int fastrpc_put_args(struct fastrpc_invoke_ctx *ctx,
+ 		}
+ 	}
+ 
++	/* Clean up fdlist which is updated by DSP */
+ 	for (i = 0; i < FASTRPC_MAX_FDLIST; i++) {
+ 		if (!fdlist[i])
+ 			break;
+@@ -1155,11 +1157,9 @@ static int fastrpc_internal_invoke(struct fastrpc_user *fl,  u32 kernel,
+ 	if (IS_ERR(ctx))
+ 		return PTR_ERR(ctx);
+ 
+-	if (ctx->nscalars) {
+-		err = fastrpc_get_args(kernel, ctx);
+-		if (err)
+-			goto bail;
+-	}
++	err = fastrpc_get_args(kernel, ctx);
++	if (err)
++		goto bail;
+ 
+ 	/* make sure that all CPU memory writes are seen by DSP */
+ 	dma_wmb();
+@@ -1175,6 +1175,13 @@ static int fastrpc_internal_invoke(struct fastrpc_user *fl,  u32 kernel,
+ 		err = wait_for_completion_interruptible(&ctx->work);
+ 	}
+ 
++	if (err)
++		goto bail;
++
++	/* make sure that all memory writes by DSP are seen by CPU */
++	dma_rmb();
++	/* populate all the output buffers with results */
++	err = fastrpc_put_args(ctx, kernel);
+ 	if (err)
+ 		goto bail;
+ 
+@@ -1183,15 +1190,6 @@ static int fastrpc_internal_invoke(struct fastrpc_user *fl,  u32 kernel,
+ 	if (err)
+ 		goto bail;
+ 
+-	if (ctx->nscalars) {
+-		/* make sure that all memory writes by DSP are seen by CPU */
+-		dma_rmb();
+-		/* populate all the output buffers with results */
+-		err = fastrpc_put_args(ctx, kernel);
+-		if (err)
+-			goto bail;
+-	}
+-
+ bail:
+ 	if (err != -ERESTARTSYS && err != -ETIMEDOUT) {
+ 		/* We are done with this compute context */
+@@ -1982,11 +1980,13 @@ static int fastrpc_req_mem_unmap_impl(struct fastrpc_user *fl, struct fastrpc_me
+ 	sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_MEM_UNMAP, 1, 0);
+ 	err = fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE, sc,
+ 				      &args[0]);
+-	fastrpc_map_put(map);
+-	if (err)
++	if (err) {
+ 		dev_err(dev, "unmmap\tpt fd = %d, 0x%09llx error\n",  map->fd, map->raddr);
++		return err;
++	}
++	fastrpc_map_put(map);
+ 
+-	return err;
++	return 0;
+ }
+ 
+ static int fastrpc_req_mem_unmap(struct fastrpc_user *fl, char __user *argp)
+diff --git a/drivers/net/ethernet/adi/adin1110.c b/drivers/net/ethernet/adi/adin1110.c
+index ca66b747b7c5d..d7c274af6d4da 100644
+--- a/drivers/net/ethernet/adi/adin1110.c
++++ b/drivers/net/ethernet/adi/adin1110.c
+@@ -294,7 +294,7 @@ static int adin1110_read_fifo(struct adin1110_port_priv *port_priv)
+ {
+ 	struct adin1110_priv *priv = port_priv->priv;
+ 	u32 header_len = ADIN1110_RD_HEADER_LEN;
+-	struct spi_transfer t;
++	struct spi_transfer t = {0};
+ 	u32 frame_size_no_fcs;
+ 	struct sk_buff *rxb;
+ 	u32 frame_size;
+diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
+index 6e310a5394678..55bb0b5310d5b 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e.h
++++ b/drivers/net/ethernet/intel/i40e/i40e.h
+@@ -580,7 +580,6 @@ struct i40e_pf {
+ #define I40E_FLAG_DISABLE_FW_LLDP		BIT(24)
+ #define I40E_FLAG_RS_FEC			BIT(25)
+ #define I40E_FLAG_BASE_R_FEC			BIT(26)
+-#define I40E_FLAG_VF_VLAN_PRUNING		BIT(27)
+ /* TOTAL_PORT_SHUTDOWN
+  * Allows to physically disable the link on the NIC's port.
+  * If enabled, (after link down request from the OS)
+@@ -603,6 +602,7 @@ struct i40e_pf {
+  *   in abilities field of i40e_aq_set_phy_config structure
+  */
+ #define I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED	BIT(27)
++#define I40E_FLAG_VF_VLAN_PRUNING		BIT(28)
+ 
+ 	struct i40e_client_instance *cinst;
+ 	bool stat_offsets_loaded;
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+index 8b8bf4880faa6..b59fef9d7c4ad 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+@@ -2544,7 +2544,14 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget,
+ 			rx_buffer = i40e_rx_bi(rx_ring, ntp);
+ 			i40e_inc_ntp(rx_ring);
+ 			i40e_reuse_rx_page(rx_ring, rx_buffer);
+-			cleaned_count++;
++			/* Update ntc and bump cleaned count if not in the
++			 * middle of mb packet.
++			 */
++			if (rx_ring->next_to_clean == ntp) {
++				rx_ring->next_to_clean =
++					rx_ring->next_to_process;
++				cleaned_count++;
++			}
+ 			continue;
+ 		}
+ 
+@@ -2847,7 +2854,7 @@ tx_only:
+ 		return budget;
+ 	}
+ 
+-	if (vsi->back->flags & I40E_TXR_FLAGS_WB_ON_ITR)
++	if (q_vector->tx.ring[0].flags & I40E_TXR_FLAGS_WB_ON_ITR)
+ 		q_vector->arm_wb_state = false;
+ 
+ 	/* Exit the polling mode, but don't re-enable interrupts if stack might
+diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
+index 8ea5c0825c3c4..13bfc9333a8c3 100644
+--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
++++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
+@@ -1437,9 +1437,9 @@ void iavf_down(struct iavf_adapter *adapter)
+ 			adapter->aq_required |= IAVF_FLAG_AQ_DEL_FDIR_FILTER;
+ 		if (!list_empty(&adapter->adv_rss_list_head))
+ 			adapter->aq_required |= IAVF_FLAG_AQ_DEL_ADV_RSS_CFG;
+-		adapter->aq_required |= IAVF_FLAG_AQ_DISABLE_QUEUES;
+ 	}
+ 
++	adapter->aq_required |= IAVF_FLAG_AQ_DISABLE_QUEUES;
+ 	mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
+ }
+ 
+@@ -4982,8 +4982,6 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 	INIT_WORK(&adapter->finish_config, iavf_finish_config);
+ 	INIT_DELAYED_WORK(&adapter->watchdog_task, iavf_watchdog_task);
+ 	INIT_DELAYED_WORK(&adapter->client_task, iavf_client_task);
+-	queue_delayed_work(adapter->wq, &adapter->watchdog_task,
+-			   msecs_to_jiffies(5 * (pdev->devfn & 0x07)));
+ 
+ 	/* Setup the wait queue for indicating transition to down status */
+ 	init_waitqueue_head(&adapter->down_waitqueue);
+@@ -4994,6 +4992,9 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 	/* Setup the wait queue for indicating virtchannel events */
+ 	init_waitqueue_head(&adapter->vc_waitqueue);
+ 
++	queue_delayed_work(adapter->wq, &adapter->watchdog_task,
++			   msecs_to_jiffies(5 * (pdev->devfn & 0x07)));
++	/* Initialization goes on in the work. Do not add more of it below. */
+ 	return 0;
+ 
+ err_ioremap:
+diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
+index 319ed601eaa1e..4ee849985e2b8 100644
+--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
++++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
+@@ -2978,11 +2978,15 @@ static int igb_add_ethtool_nfc_entry(struct igb_adapter *adapter,
+ 	if (err)
+ 		goto err_out_w_lock;
+ 
+-	igb_update_ethtool_nfc_entry(adapter, input, input->sw_idx);
++	err = igb_update_ethtool_nfc_entry(adapter, input, input->sw_idx);
++	if (err)
++		goto err_out_input_filter;
+ 
+ 	spin_unlock(&adapter->nfc_lock);
+ 	return 0;
+ 
++err_out_input_filter:
++	igb_erase_filter(adapter, input);
+ err_out_w_lock:
+ 	spin_unlock(&adapter->nfc_lock);
+ err_out:
+diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c
+index 7ab6dd58e4001..dd8a9d27a1670 100644
+--- a/drivers/net/ethernet/intel/igc/igc_ethtool.c
++++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c
+@@ -1817,7 +1817,7 @@ igc_ethtool_set_link_ksettings(struct net_device *netdev,
+ 	struct igc_adapter *adapter = netdev_priv(netdev);
+ 	struct net_device *dev = adapter->netdev;
+ 	struct igc_hw *hw = &adapter->hw;
+-	u32 advertising;
++	u16 advertised = 0;
+ 
+ 	/* When adapter in resetting mode, autoneg/speed/duplex
+ 	 * cannot be changed
+@@ -1842,18 +1842,33 @@ igc_ethtool_set_link_ksettings(struct net_device *netdev,
+ 	while (test_and_set_bit(__IGC_RESETTING, &adapter->state))
+ 		usleep_range(1000, 2000);
+ 
+-	ethtool_convert_link_mode_to_legacy_u32(&advertising,
+-						cmd->link_modes.advertising);
+-	/* Converting to legacy u32 drops ETHTOOL_LINK_MODE_2500baseT_Full_BIT.
+-	 * We have to check this and convert it to ADVERTISE_2500_FULL
+-	 * (aka ETHTOOL_LINK_MODE_2500baseX_Full_BIT) explicitly.
+-	 */
+-	if (ethtool_link_ksettings_test_link_mode(cmd, advertising, 2500baseT_Full))
+-		advertising |= ADVERTISE_2500_FULL;
++	if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
++						  2500baseT_Full))
++		advertised |= ADVERTISE_2500_FULL;
++
++	if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
++						  1000baseT_Full))
++		advertised |= ADVERTISE_1000_FULL;
++
++	if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
++						  100baseT_Full))
++		advertised |= ADVERTISE_100_FULL;
++
++	if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
++						  100baseT_Half))
++		advertised |= ADVERTISE_100_HALF;
++
++	if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
++						  10baseT_Full))
++		advertised |= ADVERTISE_10_FULL;
++
++	if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
++						  10baseT_Half))
++		advertised |= ADVERTISE_10_HALF;
+ 
+ 	if (cmd->base.autoneg == AUTONEG_ENABLE) {
+ 		hw->mac.autoneg = 1;
+-		hw->phy.autoneg_advertised = advertising;
++		hw->phy.autoneg_advertised = advertised;
+ 		if (adapter->fc_autoneg)
+ 			hw->fc.requested_mode = igc_fc_default;
+ 	} else {
+diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
+index 6351a2dc13bce..361b90007148b 100644
+--- a/drivers/net/ethernet/realtek/r8169_main.c
++++ b/drivers/net/ethernet/realtek/r8169_main.c
+@@ -4364,7 +4364,7 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp,
+ 		unsigned int entry = dirty_tx % NUM_TX_DESC;
+ 		u32 status;
+ 
+-		status = le32_to_cpu(tp->TxDescArray[entry].opts1);
++		status = le32_to_cpu(READ_ONCE(tp->TxDescArray[entry].opts1));
+ 		if (status & DescOwn)
+ 			break;
+ 
+@@ -4394,7 +4394,7 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp,
+ 		 * If skb is NULL then we come here again once a tx irq is
+ 		 * triggered after the last fragment is marked transmitted.
+ 		 */
+-		if (tp->cur_tx != dirty_tx && skb)
++		if (READ_ONCE(tp->cur_tx) != dirty_tx && skb)
+ 			rtl8169_doorbell(tp);
+ 	}
+ }
+@@ -4427,7 +4427,7 @@ static int rtl_rx(struct net_device *dev, struct rtl8169_private *tp, int budget
+ 		dma_addr_t addr;
+ 		u32 status;
+ 
+-		status = le32_to_cpu(desc->opts1);
++		status = le32_to_cpu(READ_ONCE(desc->opts1));
+ 		if (status & DescOwn)
+ 			break;
+ 
+diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c b/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c
+index dc14a66583ff3..44488c153ea25 100644
+--- a/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c
++++ b/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c
+@@ -1217,7 +1217,7 @@ static int gelic_wl_set_encodeext(struct net_device *netdev,
+ 		key_index = wl->current_key;
+ 
+ 	if (!enc->length && (ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY)) {
+-		/* reques to change default key index */
++		/* request to change default key index */
+ 		pr_debug("%s: request to change default key to %d\n",
+ 			 __func__, key_index);
+ 		wl->current_key = key_index;
+diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
+index acb20ad4e37eb..477b4d4f860bd 100644
+--- a/drivers/net/gtp.c
++++ b/drivers/net/gtp.c
+@@ -871,8 +871,9 @@ static int gtp_build_skb_ip4(struct sk_buff *skb, struct net_device *dev,
+ 
+ 	skb_dst_update_pmtu_no_confirm(skb, mtu);
+ 
+-	if (!skb_is_gso(skb) && (iph->frag_off & htons(IP_DF)) &&
+-	    mtu < ntohs(iph->tot_len)) {
++	if (iph->frag_off & htons(IP_DF) &&
++	    ((!skb_is_gso(skb) && skb->len > mtu) ||
++	     (skb_is_gso(skb) && !skb_gso_validate_network_len(skb, mtu)))) {
+ 		netdev_dbg(dev, "packet too big, fragmentation needed\n");
+ 		icmp_ndo_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
+ 			      htonl(mtu));
+diff --git a/drivers/net/ieee802154/adf7242.c b/drivers/net/ieee802154/adf7242.c
+index a03490ba2e5b3..cc7ddc40020fd 100644
+--- a/drivers/net/ieee802154/adf7242.c
++++ b/drivers/net/ieee802154/adf7242.c
+@@ -1162,9 +1162,10 @@ static int adf7242_stats_show(struct seq_file *file, void *offset)
+ 
+ static void adf7242_debugfs_init(struct adf7242_local *lp)
+ {
+-	char debugfs_dir_name[DNAME_INLINE_LEN + 1] = "adf7242-";
++	char debugfs_dir_name[DNAME_INLINE_LEN + 1];
+ 
+-	strncat(debugfs_dir_name, dev_name(&lp->spi->dev), DNAME_INLINE_LEN);
++	snprintf(debugfs_dir_name, sizeof(debugfs_dir_name),
++		 "adf7242-%s", dev_name(&lp->spi->dev));
+ 
+ 	lp->debugfs_root = debugfs_create_dir(debugfs_dir_name, NULL);
+ 
+diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
+index e88bedca8f32f..14497e5558bf9 100644
+--- a/drivers/net/usb/r8152.c
++++ b/drivers/net/usb/r8152.c
+@@ -1212,7 +1212,7 @@ int get_registers(struct r8152 *tp, u16 value, u16 index, u16 size, void *data)
+ 
+ 	ret = usb_control_msg(tp->udev, tp->pipe_ctrl_in,
+ 			      RTL8152_REQ_GET_REGS, RTL8152_REQT_READ,
+-			      value, index, tmp, size, 500);
++			      value, index, tmp, size, USB_CTRL_GET_TIMEOUT);
+ 	if (ret < 0)
+ 		memset(data, 0xff, size);
+ 	else
+@@ -1235,7 +1235,7 @@ int set_registers(struct r8152 *tp, u16 value, u16 index, u16 size, void *data)
+ 
+ 	ret = usb_control_msg(tp->udev, tp->pipe_ctrl_out,
+ 			      RTL8152_REQ_SET_REGS, RTL8152_REQT_WRITE,
+-			      value, index, tmp, size, 500);
++			      value, index, tmp, size, USB_CTRL_SET_TIMEOUT);
+ 
+ 	kfree(tmp);
+ 
+@@ -9512,7 +9512,8 @@ static u8 __rtl_get_hw_ver(struct usb_device *udev)
+ 
+ 	ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
+ 			      RTL8152_REQ_GET_REGS, RTL8152_REQT_READ,
+-			      PLA_TCR0, MCU_TYPE_PLA, tmp, sizeof(*tmp), 500);
++			      PLA_TCR0, MCU_TYPE_PLA, tmp, sizeof(*tmp),
++			      USB_CTRL_GET_TIMEOUT);
+ 	if (ret > 0)
+ 		ocp_data = (__le32_to_cpu(*tmp) >> 16) & VERSION_MASK;
+ 
+@@ -9801,6 +9802,10 @@ static int rtl8152_probe(struct usb_interface *intf,
+ 
+ out1:
+ 	tasklet_kill(&tp->tx_tl);
++	cancel_delayed_work_sync(&tp->hw_phy_work);
++	if (tp->rtl_ops.unload)
++		tp->rtl_ops.unload(tp);
++	rtl8152_release_firmware(tp);
+ 	usb_set_intfdata(intf, NULL);
+ out:
+ 	free_netdev(netdev);
+diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
+index 17da42fe605c3..a530f20ee2575 100644
+--- a/drivers/net/usb/smsc95xx.c
++++ b/drivers/net/usb/smsc95xx.c
+@@ -95,7 +95,9 @@ static int __must_check smsc95xx_read_reg(struct usbnet *dev, u32 index,
+ 	ret = fn(dev, USB_VENDOR_REQUEST_READ_REGISTER, USB_DIR_IN
+ 		 | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ 		 0, index, &buf, 4);
+-	if (ret < 0) {
++	if (ret < 4) {
++		ret = ret < 0 ? ret : -ENODATA;
++
+ 		if (ret != -ENODEV)
+ 			netdev_warn(dev->net, "Failed to read reg index 0x%08x: %d\n",
+ 				    index, ret);
+diff --git a/drivers/nvmem/imx-ocotp.c b/drivers/nvmem/imx-ocotp.c
+index ab556c011f3e8..2e6ed170ef7c7 100644
+--- a/drivers/nvmem/imx-ocotp.c
++++ b/drivers/nvmem/imx-ocotp.c
+@@ -499,7 +499,7 @@ static const struct ocotp_params imx6sl_params = {
+ };
+ 
+ static const struct ocotp_params imx6sll_params = {
+-	.nregs = 128,
++	.nregs = 80,
+ 	.bank_address_words = 0,
+ 	.set_timing = imx_ocotp_set_imx6_timing,
+ 	.ctrl = IMX_OCOTP_BM_CTRL_DEFAULT,
+@@ -513,14 +513,14 @@ static const struct ocotp_params imx6sx_params = {
+ };
+ 
+ static const struct ocotp_params imx6ul_params = {
+-	.nregs = 128,
++	.nregs = 144,
+ 	.bank_address_words = 0,
+ 	.set_timing = imx_ocotp_set_imx6_timing,
+ 	.ctrl = IMX_OCOTP_BM_CTRL_DEFAULT,
+ };
+ 
+ static const struct ocotp_params imx6ull_params = {
+-	.nregs = 64,
++	.nregs = 80,
+ 	.bank_address_words = 0,
+ 	.set_timing = imx_ocotp_set_imx6_timing,
+ 	.ctrl = IMX_OCOTP_BM_CTRL_DEFAULT,
+diff --git a/drivers/platform/x86/amd/pmc-quirks.c b/drivers/platform/x86/amd/pmc-quirks.c
+index ad702463a65d3..6bbffb081053e 100644
+--- a/drivers/platform/x86/amd/pmc-quirks.c
++++ b/drivers/platform/x86/amd/pmc-quirks.c
+@@ -111,6 +111,79 @@ static const struct dmi_system_id fwbug_list[] = {
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "21A1"),
+ 		}
+ 	},
++	/* https://bugzilla.kernel.org/show_bug.cgi?id=218024 */
++	{
++		.ident = "V14 G4 AMN",
++		.driver_data = &quirk_s2idle_bug,
++		.matches = {
++			DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "82YT"),
++		}
++	},
++	{
++		.ident = "V14 G4 AMN",
++		.driver_data = &quirk_s2idle_bug,
++		.matches = {
++			DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "83GE"),
++		}
++	},
++	{
++		.ident = "V15 G4 AMN",
++		.driver_data = &quirk_s2idle_bug,
++		.matches = {
++			DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "82YU"),
++		}
++	},
++	{
++		.ident = "V15 G4 AMN",
++		.driver_data = &quirk_s2idle_bug,
++		.matches = {
++			DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "83CQ"),
++		}
++	},
++	{
++		.ident = "IdeaPad 1 14AMN7",
++		.driver_data = &quirk_s2idle_bug,
++		.matches = {
++			DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "82VF"),
++		}
++	},
++	{
++		.ident = "IdeaPad 1 15AMN7",
++		.driver_data = &quirk_s2idle_bug,
++		.matches = {
++			DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "82VG"),
++		}
++	},
++	{
++		.ident = "IdeaPad 1 15AMN7",
++		.driver_data = &quirk_s2idle_bug,
++		.matches = {
++			DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "82X5"),
++		}
++	},
++	{
++		.ident = "IdeaPad Slim 3 14AMN8",
++		.driver_data = &quirk_s2idle_bug,
++		.matches = {
++			DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "82XN"),
++		}
++	},
++	{
++		.ident = "IdeaPad Slim 3 15AMN8",
++		.driver_data = &quirk_s2idle_bug,
++		.matches = {
++			DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "82XQ"),
++		}
++	},
+ 	/* https://gitlab.freedesktop.org/drm/amd/-/issues/2684 */
+ 	{
+ 		.ident = "HP Laptop 15s-eq2xxx",
+diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
+index d6535cbb4e05e..c4babb16dac73 100644
+--- a/drivers/scsi/sd.c
++++ b/drivers/scsi/sd.c
+@@ -221,7 +221,8 @@ manage_start_stop_show(struct device *dev,
+ 
+ 	return sysfs_emit(buf, "%u\n",
+ 			  sdp->manage_system_start_stop &&
+-			  sdp->manage_runtime_start_stop);
++			  sdp->manage_runtime_start_stop &&
++			  sdp->manage_shutdown);
+ }
+ static DEVICE_ATTR_RO(manage_start_stop);
+ 
+@@ -287,6 +288,35 @@ manage_runtime_start_stop_store(struct device *dev,
+ }
+ static DEVICE_ATTR_RW(manage_runtime_start_stop);
+ 
++static ssize_t manage_shutdown_show(struct device *dev,
++				    struct device_attribute *attr, char *buf)
++{
++	struct scsi_disk *sdkp = to_scsi_disk(dev);
++	struct scsi_device *sdp = sdkp->device;
++
++	return sysfs_emit(buf, "%u\n", sdp->manage_shutdown);
++}
++
++static ssize_t manage_shutdown_store(struct device *dev,
++				     struct device_attribute *attr,
++				     const char *buf, size_t count)
++{
++	struct scsi_disk *sdkp = to_scsi_disk(dev);
++	struct scsi_device *sdp = sdkp->device;
++	bool v;
++
++	if (!capable(CAP_SYS_ADMIN))
++		return -EACCES;
++
++	if (kstrtobool(buf, &v))
++		return -EINVAL;
++
++	sdp->manage_shutdown = v;
++
++	return count;
++}
++static DEVICE_ATTR_RW(manage_shutdown);
++
+ static ssize_t
+ allow_restart_show(struct device *dev, struct device_attribute *attr, char *buf)
+ {
+@@ -619,6 +649,7 @@ static struct attribute *sd_disk_attrs[] = {
+ 	&dev_attr_manage_start_stop.attr,
+ 	&dev_attr_manage_system_start_stop.attr,
+ 	&dev_attr_manage_runtime_start_stop.attr,
++	&dev_attr_manage_shutdown.attr,
+ 	&dev_attr_protection_type.attr,
+ 	&dev_attr_protection_mode.attr,
+ 	&dev_attr_app_tag_own.attr,
+@@ -3858,8 +3889,10 @@ static void sd_shutdown(struct device *dev)
+ 		sd_sync_cache(sdkp, NULL);
+ 	}
+ 
+-	if (system_state != SYSTEM_RESTART &&
+-	    sdkp->device->manage_system_start_stop) {
++	if ((system_state != SYSTEM_RESTART &&
++	     sdkp->device->manage_system_start_stop) ||
++	    (system_state == SYSTEM_POWER_OFF &&
++	     sdkp->device->manage_shutdown)) {
+ 		sd_printk(KERN_NOTICE, sdkp, "Stopping disk\n");
+ 		sd_start_stop_device(sdkp, 0);
+ 	}
+diff --git a/drivers/vdpa/mlx5/net/debug.c b/drivers/vdpa/mlx5/net/debug.c
+index 60d6ac68cdc41..9c85162c19fc4 100644
+--- a/drivers/vdpa/mlx5/net/debug.c
++++ b/drivers/vdpa/mlx5/net/debug.c
+@@ -146,7 +146,8 @@ void mlx5_vdpa_add_debugfs(struct mlx5_vdpa_net *ndev)
+ 		ndev->rx_dent = debugfs_create_dir("rx", ndev->debugfs);
+ }
+ 
+-void mlx5_vdpa_remove_debugfs(struct dentry *dbg)
++void mlx5_vdpa_remove_debugfs(struct mlx5_vdpa_net *ndev)
+ {
+-	debugfs_remove_recursive(dbg);
++	debugfs_remove_recursive(ndev->debugfs);
++	ndev->debugfs = NULL;
+ }
+diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c
+index 37be945a02308..0ab19a258ebb8 100644
+--- a/drivers/vdpa/mlx5/net/mlx5_vnet.c
++++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c
+@@ -625,30 +625,70 @@ static void cq_destroy(struct mlx5_vdpa_net *ndev, u16 idx)
+ 	mlx5_db_free(ndev->mvdev.mdev, &vcq->db);
+ }
+ 
++static int read_umem_params(struct mlx5_vdpa_net *ndev)
++{
++	u32 in[MLX5_ST_SZ_DW(query_hca_cap_in)] = {};
++	u16 opmod = (MLX5_CAP_VDPA_EMULATION << 1) | (HCA_CAP_OPMOD_GET_CUR & 0x01);
++	struct mlx5_core_dev *mdev = ndev->mvdev.mdev;
++	int out_size;
++	void *caps;
++	void *out;
++	int err;
++
++	out_size = MLX5_ST_SZ_BYTES(query_hca_cap_out);
++	out = kzalloc(out_size, GFP_KERNEL);
++	if (!out)
++		return -ENOMEM;
++
++	MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
++	MLX5_SET(query_hca_cap_in, in, op_mod, opmod);
++	err = mlx5_cmd_exec_inout(mdev, query_hca_cap, in, out);
++	if (err) {
++		mlx5_vdpa_warn(&ndev->mvdev,
++			"Failed reading vdpa umem capabilities with err %d\n", err);
++		goto out;
++	}
++
++	caps =  MLX5_ADDR_OF(query_hca_cap_out, out, capability);
++
++	ndev->umem_1_buffer_param_a = MLX5_GET(virtio_emulation_cap, caps, umem_1_buffer_param_a);
++	ndev->umem_1_buffer_param_b = MLX5_GET(virtio_emulation_cap, caps, umem_1_buffer_param_b);
++
++	ndev->umem_2_buffer_param_a = MLX5_GET(virtio_emulation_cap, caps, umem_2_buffer_param_a);
++	ndev->umem_2_buffer_param_b = MLX5_GET(virtio_emulation_cap, caps, umem_2_buffer_param_b);
++
++	ndev->umem_3_buffer_param_a = MLX5_GET(virtio_emulation_cap, caps, umem_3_buffer_param_a);
++	ndev->umem_3_buffer_param_b = MLX5_GET(virtio_emulation_cap, caps, umem_3_buffer_param_b);
++
++out:
++	kfree(out);
++	return 0;
++}
++
+ static void set_umem_size(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, int num,
+ 			  struct mlx5_vdpa_umem **umemp)
+ {
+-	struct mlx5_core_dev *mdev = ndev->mvdev.mdev;
+-	int p_a;
+-	int p_b;
++	u32 p_a;
++	u32 p_b;
+ 
+ 	switch (num) {
+ 	case 1:
+-		p_a = MLX5_CAP_DEV_VDPA_EMULATION(mdev, umem_1_buffer_param_a);
+-		p_b = MLX5_CAP_DEV_VDPA_EMULATION(mdev, umem_1_buffer_param_b);
++		p_a = ndev->umem_1_buffer_param_a;
++		p_b = ndev->umem_1_buffer_param_b;
+ 		*umemp = &mvq->umem1;
+ 		break;
+ 	case 2:
+-		p_a = MLX5_CAP_DEV_VDPA_EMULATION(mdev, umem_2_buffer_param_a);
+-		p_b = MLX5_CAP_DEV_VDPA_EMULATION(mdev, umem_2_buffer_param_b);
++		p_a = ndev->umem_2_buffer_param_a;
++		p_b = ndev->umem_2_buffer_param_b;
+ 		*umemp = &mvq->umem2;
+ 		break;
+ 	case 3:
+-		p_a = MLX5_CAP_DEV_VDPA_EMULATION(mdev, umem_3_buffer_param_a);
+-		p_b = MLX5_CAP_DEV_VDPA_EMULATION(mdev, umem_3_buffer_param_b);
++		p_a = ndev->umem_3_buffer_param_a;
++		p_b = ndev->umem_3_buffer_param_b;
+ 		*umemp = &mvq->umem3;
+ 		break;
+ 	}
++
+ 	(*umemp)->size = p_a * mvq->num_ent + p_b;
+ }
+ 
+@@ -2679,6 +2719,11 @@ static int setup_driver(struct mlx5_vdpa_dev *mvdev)
+ 		goto out;
+ 	}
+ 	mlx5_vdpa_add_debugfs(ndev);
++
++	err = read_umem_params(ndev);
++	if (err)
++		goto err_setup;
++
+ 	err = setup_virtqueues(mvdev);
+ 	if (err) {
+ 		mlx5_vdpa_warn(mvdev, "setup_virtqueues\n");
+@@ -2713,7 +2758,7 @@ err_tir:
+ err_rqt:
+ 	teardown_virtqueues(ndev);
+ err_setup:
+-	mlx5_vdpa_remove_debugfs(ndev->debugfs);
++	mlx5_vdpa_remove_debugfs(ndev);
+ out:
+ 	return err;
+ }
+@@ -2727,8 +2772,7 @@ static void teardown_driver(struct mlx5_vdpa_net *ndev)
+ 	if (!ndev->setup)
+ 		return;
+ 
+-	mlx5_vdpa_remove_debugfs(ndev->debugfs);
+-	ndev->debugfs = NULL;
++	mlx5_vdpa_remove_debugfs(ndev);
+ 	teardown_steering(ndev);
+ 	destroy_tir(ndev);
+ 	destroy_rqt(ndev);
+@@ -3489,8 +3533,6 @@ static void mlx5_vdpa_dev_del(struct vdpa_mgmt_dev *v_mdev, struct vdpa_device *
+ 	struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
+ 	struct workqueue_struct *wq;
+ 
+-	mlx5_vdpa_remove_debugfs(ndev->debugfs);
+-	ndev->debugfs = NULL;
+ 	unregister_link_notifier(ndev);
+ 	_vdpa_unregister_device(dev);
+ 	wq = mvdev->wq;
+diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.h b/drivers/vdpa/mlx5/net/mlx5_vnet.h
+index 36c44d9fdd166..90b556a579713 100644
+--- a/drivers/vdpa/mlx5/net/mlx5_vnet.h
++++ b/drivers/vdpa/mlx5/net/mlx5_vnet.h
+@@ -65,6 +65,15 @@ struct mlx5_vdpa_net {
+ 	struct hlist_head macvlan_hash[MLX5V_MACVLAN_SIZE];
+ 	struct mlx5_vdpa_irq_pool irqp;
+ 	struct dentry *debugfs;
++
++	u32 umem_1_buffer_param_a;
++	u32 umem_1_buffer_param_b;
++
++	u32 umem_2_buffer_param_a;
++	u32 umem_2_buffer_param_b;
++
++	u32 umem_3_buffer_param_a;
++	u32 umem_3_buffer_param_b;
+ };
+ 
+ struct mlx5_vdpa_counter {
+@@ -88,7 +97,7 @@ struct macvlan_node {
+ };
+ 
+ void mlx5_vdpa_add_debugfs(struct mlx5_vdpa_net *ndev);
+-void mlx5_vdpa_remove_debugfs(struct dentry *dbg);
++void mlx5_vdpa_remove_debugfs(struct mlx5_vdpa_net *ndev);
+ void mlx5_vdpa_add_rx_flow_table(struct mlx5_vdpa_net *ndev);
+ void mlx5_vdpa_remove_rx_flow_table(struct mlx5_vdpa_net *ndev);
+ void mlx5_vdpa_add_tirn(struct mlx5_vdpa_net *ndev);
+diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim_blk.c b/drivers/vdpa/vdpa_sim/vdpa_sim_blk.c
+index 00d7d72713beb..b3a3cb1657955 100644
+--- a/drivers/vdpa/vdpa_sim/vdpa_sim_blk.c
++++ b/drivers/vdpa/vdpa_sim/vdpa_sim_blk.c
+@@ -499,12 +499,13 @@ static int __init vdpasim_blk_init(void)
+ 					 GFP_KERNEL);
+ 		if (!shared_buffer) {
+ 			ret = -ENOMEM;
+-			goto parent_err;
++			goto mgmt_dev_err;
+ 		}
+ 	}
+ 
+ 	return 0;
+-
++mgmt_dev_err:
++	vdpa_mgmtdev_unregister(&mgmt_dev);
+ parent_err:
+ 	device_unregister(&vdpasim_blk_mgmtdev);
+ 	return ret;
+diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
+index c71d573f1c949..e0c181ad17e31 100644
+--- a/drivers/vhost/vhost.c
++++ b/drivers/vhost/vhost.c
+@@ -1458,9 +1458,7 @@ ssize_t vhost_chr_write_iter(struct vhost_dev *dev,
+ 		goto done;
+ 	}
+ 
+-	if ((msg.type == VHOST_IOTLB_UPDATE ||
+-	     msg.type == VHOST_IOTLB_INVALIDATE) &&
+-	     msg.size == 0) {
++	if (msg.type == VHOST_IOTLB_UPDATE && msg.size == 0) {
+ 		ret = -EINVAL;
+ 		goto done;
+ 	}
+diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
+index 5b15936a52149..2d5d252ef4197 100644
+--- a/drivers/virtio/virtio_balloon.c
++++ b/drivers/virtio/virtio_balloon.c
+@@ -395,7 +395,11 @@ static inline s64 towards_target(struct virtio_balloon *vb)
+ 	virtio_cread_le(vb->vdev, struct virtio_balloon_config, num_pages,
+ 			&num_pages);
+ 
+-	target = num_pages;
++	/*
++	 * Aligned up to guest page size to avoid inflating and deflating
++	 * balloon endlessly.
++	 */
++	target = ALIGN(num_pages, VIRTIO_BALLOON_PAGES_PER_PAGE);
+ 	return target - vb->num_pages;
+ }
+ 
+diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c
+index 97760f6112959..59892a31cf761 100644
+--- a/drivers/virtio/virtio_mmio.c
++++ b/drivers/virtio/virtio_mmio.c
+@@ -631,14 +631,17 @@ static int virtio_mmio_probe(struct platform_device *pdev)
+ 	spin_lock_init(&vm_dev->lock);
+ 
+ 	vm_dev->base = devm_platform_ioremap_resource(pdev, 0);
+-	if (IS_ERR(vm_dev->base))
+-		return PTR_ERR(vm_dev->base);
++	if (IS_ERR(vm_dev->base)) {
++		rc = PTR_ERR(vm_dev->base);
++		goto free_vm_dev;
++	}
+ 
+ 	/* Check magic value */
+ 	magic = readl(vm_dev->base + VIRTIO_MMIO_MAGIC_VALUE);
+ 	if (magic != ('v' | 'i' << 8 | 'r' << 16 | 't' << 24)) {
+ 		dev_warn(&pdev->dev, "Wrong magic value 0x%08lx!\n", magic);
+-		return -ENODEV;
++		rc = -ENODEV;
++		goto free_vm_dev;
+ 	}
+ 
+ 	/* Check device version */
+@@ -646,7 +649,8 @@ static int virtio_mmio_probe(struct platform_device *pdev)
+ 	if (vm_dev->version < 1 || vm_dev->version > 2) {
+ 		dev_err(&pdev->dev, "Version %ld not supported!\n",
+ 				vm_dev->version);
+-		return -ENXIO;
++		rc = -ENXIO;
++		goto free_vm_dev;
+ 	}
+ 
+ 	vm_dev->vdev.id.device = readl(vm_dev->base + VIRTIO_MMIO_DEVICE_ID);
+@@ -655,7 +659,8 @@ static int virtio_mmio_probe(struct platform_device *pdev)
+ 		 * virtio-mmio device with an ID 0 is a (dummy) placeholder
+ 		 * with no function. End probing now with no error reported.
+ 		 */
+-		return -ENODEV;
++		rc = -ENODEV;
++		goto free_vm_dev;
+ 	}
+ 	vm_dev->vdev.id.vendor = readl(vm_dev->base + VIRTIO_MMIO_VENDOR_ID);
+ 
+@@ -685,6 +690,10 @@ static int virtio_mmio_probe(struct platform_device *pdev)
+ 		put_device(&vm_dev->vdev.dev);
+ 
+ 	return rc;
++
++free_vm_dev:
++	kfree(vm_dev);
++	return rc;
+ }
+ 
+ static int virtio_mmio_remove(struct platform_device *pdev)
+diff --git a/drivers/virtio/virtio_pci_modern_dev.c b/drivers/virtio/virtio_pci_modern_dev.c
+index aad7d9296e772..9cb601e16688d 100644
+--- a/drivers/virtio/virtio_pci_modern_dev.c
++++ b/drivers/virtio/virtio_pci_modern_dev.c
+@@ -291,7 +291,7 @@ int vp_modern_probe(struct virtio_pci_modern_device *mdev)
+ 	err = -EINVAL;
+ 	mdev->common = vp_modern_map_capability(mdev, common,
+ 				      sizeof(struct virtio_pci_common_cfg), 4,
+-				      0, sizeof(struct virtio_pci_common_cfg),
++				      0, sizeof(struct virtio_pci_modern_common_cfg),
+ 				      NULL, NULL);
+ 	if (!mdev->common)
+ 		goto err_map_common;
+diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
+index 79336fa853db3..a4a809efc92fc 100644
+--- a/fs/btrfs/backref.c
++++ b/fs/btrfs/backref.c
+@@ -3196,12 +3196,14 @@ static int handle_direct_tree_backref(struct btrfs_backref_cache *cache,
+  * We still need to do a tree search to find out the parents. This is for
+  * TREE_BLOCK_REF backref (keyed or inlined).
+  *
++ * @trans:	Transaction handle.
+  * @ref_key:	The same as @ref_key in  handle_direct_tree_backref()
+  * @tree_key:	The first key of this tree block.
+  * @path:	A clean (released) path, to avoid allocating path every time
+  *		the function get called.
+  */
+-static int handle_indirect_tree_backref(struct btrfs_backref_cache *cache,
++static int handle_indirect_tree_backref(struct btrfs_trans_handle *trans,
++					struct btrfs_backref_cache *cache,
+ 					struct btrfs_path *path,
+ 					struct btrfs_key *ref_key,
+ 					struct btrfs_key *tree_key,
+@@ -3315,7 +3317,7 @@ static int handle_indirect_tree_backref(struct btrfs_backref_cache *cache,
+ 			 * If we know the block isn't shared we can avoid
+ 			 * checking its backrefs.
+ 			 */
+-			if (btrfs_block_can_be_shared(root, eb))
++			if (btrfs_block_can_be_shared(trans, root, eb))
+ 				upper->checked = 0;
+ 			else
+ 				upper->checked = 1;
+@@ -3363,17 +3365,18 @@ out:
+  *	 links aren't yet bi-directional. Needs to finish such links.
+  *	 Use btrfs_backref_finish_upper_links() to finish such linkage.
+  *
++ * @trans:	Transaction handle.
+  * @path:	Released path for indirect tree backref lookup
+  * @iter:	Released backref iter for extent tree search
+  * @node_key:	The first key of the tree block
+  */
+-int btrfs_backref_add_tree_node(struct btrfs_backref_cache *cache,
++int btrfs_backref_add_tree_node(struct btrfs_trans_handle *trans,
++				struct btrfs_backref_cache *cache,
+ 				struct btrfs_path *path,
+ 				struct btrfs_backref_iter *iter,
+ 				struct btrfs_key *node_key,
+ 				struct btrfs_backref_node *cur)
+ {
+-	struct btrfs_fs_info *fs_info = cache->fs_info;
+ 	struct btrfs_backref_edge *edge;
+ 	struct btrfs_backref_node *exist;
+ 	int ret;
+@@ -3462,25 +3465,21 @@ int btrfs_backref_add_tree_node(struct btrfs_backref_cache *cache,
+ 			ret = handle_direct_tree_backref(cache, &key, cur);
+ 			if (ret < 0)
+ 				goto out;
+-			continue;
+-		} else if (unlikely(key.type == BTRFS_EXTENT_REF_V0_KEY)) {
+-			ret = -EINVAL;
+-			btrfs_print_v0_err(fs_info);
+-			btrfs_handle_fs_error(fs_info, ret, NULL);
+-			goto out;
+-		} else if (key.type != BTRFS_TREE_BLOCK_REF_KEY) {
+-			continue;
++		} else if (key.type == BTRFS_TREE_BLOCK_REF_KEY) {
++			/*
++			 * key.type == BTRFS_TREE_BLOCK_REF_KEY, inline ref
++			 * offset means the root objectid. We need to search
++			 * the tree to get its parent bytenr.
++			 */
++			ret = handle_indirect_tree_backref(trans, cache, path,
++							   &key, node_key, cur);
++			if (ret < 0)
++				goto out;
+ 		}
+-
+ 		/*
+-		 * key.type == BTRFS_TREE_BLOCK_REF_KEY, inline ref offset
+-		 * means the root objectid. We need to search the tree to get
+-		 * its parent bytenr.
++		 * Unrecognized tree backref items (if it can pass tree-checker)
++		 * would be ignored.
+ 		 */
+-		ret = handle_indirect_tree_backref(cache, path, &key, node_key,
+-						   cur);
+-		if (ret < 0)
+-			goto out;
+ 	}
+ 	ret = 0;
+ 	cur->checked = 1;
+diff --git a/fs/btrfs/backref.h b/fs/btrfs/backref.h
+index 1616e3e3f1e41..71d535e03dca8 100644
+--- a/fs/btrfs/backref.h
++++ b/fs/btrfs/backref.h
+@@ -540,7 +540,8 @@ static inline void btrfs_backref_panic(struct btrfs_fs_info *fs_info,
+ 		    bytenr);
+ }
+ 
+-int btrfs_backref_add_tree_node(struct btrfs_backref_cache *cache,
++int btrfs_backref_add_tree_node(struct btrfs_trans_handle *trans,
++				struct btrfs_backref_cache *cache,
+ 				struct btrfs_path *path,
+ 				struct btrfs_backref_iter *iter,
+ 				struct btrfs_key *node_key,
+diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
+index da519c1b6ad08..617d4827eec26 100644
+--- a/fs/btrfs/ctree.c
++++ b/fs/btrfs/ctree.c
+@@ -367,7 +367,8 @@ int btrfs_copy_root(struct btrfs_trans_handle *trans,
+ /*
+  * check if the tree block can be shared by multiple trees
+  */
+-int btrfs_block_can_be_shared(struct btrfs_root *root,
++int btrfs_block_can_be_shared(struct btrfs_trans_handle *trans,
++			      struct btrfs_root *root,
+ 			      struct extent_buffer *buf)
+ {
+ 	/*
+@@ -376,11 +377,21 @@ int btrfs_block_can_be_shared(struct btrfs_root *root,
+ 	 * not allocated by tree relocation, we know the block is not shared.
+ 	 */
+ 	if (test_bit(BTRFS_ROOT_SHAREABLE, &root->state) &&
+-	    buf != root->node && buf != root->commit_root &&
++	    buf != root->node &&
+ 	    (btrfs_header_generation(buf) <=
+ 	     btrfs_root_last_snapshot(&root->root_item) ||
+-	     btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)))
+-		return 1;
++	     btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC))) {
++		if (buf != root->commit_root)
++			return 1;
++		/*
++		 * An extent buffer that used to be the commit root may still be
++		 * shared because the tree height may have increased and it
++		 * became a child of a higher level root. This can happen when
++		 * snapshotting a subvolume created in the current transaction.
++		 */
++		if (btrfs_header_generation(buf) == trans->transid)
++			return 1;
++	}
+ 
+ 	return 0;
+ }
+@@ -415,7 +426,7 @@ static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
+ 	 * are only allowed for blocks use full backrefs.
+ 	 */
+ 
+-	if (btrfs_block_can_be_shared(root, buf)) {
++	if (btrfs_block_can_be_shared(trans, root, buf)) {
+ 		ret = btrfs_lookup_extent_info(trans, fs_info, buf->start,
+ 					       btrfs_header_level(buf), 1,
+ 					       &refs, &flags);
+diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
+index 9419f4e37a58c..ff40acd63a374 100644
+--- a/fs/btrfs/ctree.h
++++ b/fs/btrfs/ctree.h
+@@ -540,7 +540,8 @@ int btrfs_copy_root(struct btrfs_trans_handle *trans,
+ 		      struct btrfs_root *root,
+ 		      struct extent_buffer *buf,
+ 		      struct extent_buffer **cow_ret, u64 new_root_objectid);
+-int btrfs_block_can_be_shared(struct btrfs_root *root,
++int btrfs_block_can_be_shared(struct btrfs_trans_handle *trans,
++			      struct btrfs_root *root,
+ 			      struct extent_buffer *buf);
+ int btrfs_del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root,
+ 		  struct btrfs_path *path, int level, int slot);
+diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
+index 2cf8d646085c2..14ea6b587e97b 100644
+--- a/fs/btrfs/extent-tree.c
++++ b/fs/btrfs/extent-tree.c
+@@ -187,8 +187,10 @@ search_again:
+ 			num_refs = btrfs_extent_refs(leaf, ei);
+ 			extent_flags = btrfs_extent_flags(leaf, ei);
+ 		} else {
+-			ret = -EINVAL;
+-			btrfs_print_v0_err(fs_info);
++			ret = -EUCLEAN;
++			btrfs_err(fs_info,
++			"unexpected extent item size, has %u expect >= %zu",
++				  item_size, sizeof(*ei));
+ 			if (trans)
+ 				btrfs_abort_transaction(trans, ret);
+ 			else
+@@ -624,12 +626,12 @@ static noinline int remove_extent_data_ref(struct btrfs_trans_handle *trans,
+ 		ref2 = btrfs_item_ptr(leaf, path->slots[0],
+ 				      struct btrfs_shared_data_ref);
+ 		num_refs = btrfs_shared_data_ref_count(leaf, ref2);
+-	} else if (unlikely(key.type == BTRFS_EXTENT_REF_V0_KEY)) {
+-		btrfs_print_v0_err(trans->fs_info);
+-		btrfs_abort_transaction(trans, -EINVAL);
+-		return -EINVAL;
+ 	} else {
+-		BUG();
++		btrfs_err(trans->fs_info,
++			  "unrecognized backref key (%llu %u %llu)",
++			  key.objectid, key.type, key.offset);
++		btrfs_abort_transaction(trans, -EUCLEAN);
++		return -EUCLEAN;
+ 	}
+ 
+ 	BUG_ON(num_refs < refs_to_drop);
+@@ -660,7 +662,6 @@ static noinline u32 extent_data_ref_count(struct btrfs_path *path,
+ 	leaf = path->nodes[0];
+ 	btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
+ 
+-	BUG_ON(key.type == BTRFS_EXTENT_REF_V0_KEY);
+ 	if (iref) {
+ 		/*
+ 		 * If type is invalid, we should have bailed out earlier than
+@@ -881,8 +882,10 @@ again:
+ 	leaf = path->nodes[0];
+ 	item_size = btrfs_item_size(leaf, path->slots[0]);
+ 	if (unlikely(item_size < sizeof(*ei))) {
+-		err = -EINVAL;
+-		btrfs_print_v0_err(fs_info);
++		err = -EUCLEAN;
++		btrfs_err(fs_info,
++			  "unexpected extent item size, has %llu expect >= %zu",
++			  item_size, sizeof(*ei));
+ 		btrfs_abort_transaction(trans, err);
+ 		goto out;
+ 	}
+@@ -1683,8 +1686,10 @@ again:
+ 	item_size = btrfs_item_size(leaf, path->slots[0]);
+ 
+ 	if (unlikely(item_size < sizeof(*ei))) {
+-		err = -EINVAL;
+-		btrfs_print_v0_err(fs_info);
++		err = -EUCLEAN;
++		btrfs_err(fs_info,
++			  "unexpected extent item size, has %u expect >= %zu",
++			  item_size, sizeof(*ei));
+ 		btrfs_abort_transaction(trans, err);
+ 		goto out;
+ 	}
+@@ -3113,8 +3118,10 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
+ 	leaf = path->nodes[0];
+ 	item_size = btrfs_item_size(leaf, extent_slot);
+ 	if (unlikely(item_size < sizeof(*ei))) {
+-		ret = -EINVAL;
+-		btrfs_print_v0_err(info);
++		ret = -EUCLEAN;
++		btrfs_err(trans->fs_info,
++			  "unexpected extent item size, has %u expect >= %zu",
++			  item_size, sizeof(*ei));
+ 		btrfs_abort_transaction(trans, ret);
+ 		goto out;
+ 	}
+diff --git a/fs/btrfs/messages.c b/fs/btrfs/messages.c
+index 23fc11af498ac..21f2d101f681d 100644
+--- a/fs/btrfs/messages.c
++++ b/fs/btrfs/messages.c
+@@ -252,12 +252,6 @@ void __cold _btrfs_printk(const struct btrfs_fs_info *fs_info, const char *fmt,
+ }
+ #endif
+ 
+-void __cold btrfs_print_v0_err(struct btrfs_fs_info *fs_info)
+-{
+-	btrfs_err(fs_info,
+-"Unsupported V0 extent filesystem detected. Aborting. Please re-create your filesystem with a newer kernel");
+-}
+-
+ #if BITS_PER_LONG == 32
+ void __cold btrfs_warn_32bit_limit(struct btrfs_fs_info *fs_info)
+ {
+diff --git a/fs/btrfs/messages.h b/fs/btrfs/messages.h
+index deedc1a168e24..1ae6f8e23e071 100644
+--- a/fs/btrfs/messages.h
++++ b/fs/btrfs/messages.h
+@@ -181,8 +181,6 @@ do {								\
+ #define ASSERT(expr)	(void)(expr)
+ #endif
+ 
+-void __cold btrfs_print_v0_err(struct btrfs_fs_info *fs_info);
+-
+ __printf(5, 6)
+ __cold
+ void __btrfs_handle_fs_error(struct btrfs_fs_info *fs_info, const char *function,
+diff --git a/fs/btrfs/print-tree.c b/fs/btrfs/print-tree.c
+index aa06d9ca911d9..0c93439e929fb 100644
+--- a/fs/btrfs/print-tree.c
++++ b/fs/btrfs/print-tree.c
+@@ -95,8 +95,10 @@ static void print_extent_item(const struct extent_buffer *eb, int slot, int type
+ 	int ref_index = 0;
+ 
+ 	if (unlikely(item_size < sizeof(*ei))) {
+-		btrfs_print_v0_err(eb->fs_info);
+-		btrfs_handle_fs_error(eb->fs_info, -EINVAL, NULL);
++		btrfs_err(eb->fs_info,
++			  "unexpected extent item size, has %u expect >= %zu",
++			  item_size, sizeof(*ei));
++		btrfs_handle_fs_error(eb->fs_info, -EUCLEAN, NULL);
+ 	}
+ 
+ 	ei = btrfs_item_ptr(eb, slot, struct btrfs_extent_item);
+@@ -291,10 +293,6 @@ void btrfs_print_leaf(const struct extent_buffer *l)
+ 			       btrfs_file_extent_num_bytes(l, fi),
+ 			       btrfs_file_extent_ram_bytes(l, fi));
+ 			break;
+-		case BTRFS_EXTENT_REF_V0_KEY:
+-			btrfs_print_v0_err(fs_info);
+-			btrfs_handle_fs_error(fs_info, -EINVAL, NULL);
+-			break;
+ 		case BTRFS_BLOCK_GROUP_ITEM_KEY:
+ 			bi = btrfs_item_ptr(l, i,
+ 					    struct btrfs_block_group_item);
+diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
+index 5f4ff7d5b5c19..62ed57551824c 100644
+--- a/fs/btrfs/relocation.c
++++ b/fs/btrfs/relocation.c
+@@ -466,6 +466,7 @@ static bool handle_useless_nodes(struct reloc_control *rc,
+  * cached.
+  */
+ static noinline_for_stack struct btrfs_backref_node *build_backref_tree(
++			struct btrfs_trans_handle *trans,
+ 			struct reloc_control *rc, struct btrfs_key *node_key,
+ 			int level, u64 bytenr)
+ {
+@@ -499,8 +500,8 @@ static noinline_for_stack struct btrfs_backref_node *build_backref_tree(
+ 
+ 	/* Breadth-first search to build backref cache */
+ 	do {
+-		ret = btrfs_backref_add_tree_node(cache, path, iter, node_key,
+-						  cur);
++		ret = btrfs_backref_add_tree_node(trans, cache, path, iter,
++						  node_key, cur);
+ 		if (ret < 0) {
+ 			err = ret;
+ 			goto out;
+@@ -2803,7 +2804,7 @@ int relocate_tree_blocks(struct btrfs_trans_handle *trans,
+ 
+ 	/* Do tree relocation */
+ 	rbtree_postorder_for_each_entry_safe(block, next, blocks, rb_node) {
+-		node = build_backref_tree(rc, &block->key,
++		node = build_backref_tree(trans, rc, &block->key,
+ 					  block->level, block->bytenr);
+ 		if (IS_ERR(node)) {
+ 			err = PTR_ERR(node);
+@@ -3256,12 +3257,13 @@ static int add_tree_block(struct reloc_control *rc,
+ 			if (type == BTRFS_TREE_BLOCK_REF_KEY)
+ 				owner = btrfs_extent_inline_ref_offset(eb, iref);
+ 		}
+-	} else if (unlikely(item_size == sizeof(struct btrfs_extent_item_v0))) {
+-		btrfs_print_v0_err(eb->fs_info);
+-		btrfs_handle_fs_error(eb->fs_info, -EINVAL, NULL);
+-		return -EINVAL;
+ 	} else {
+-		BUG();
++		btrfs_print_leaf(eb);
++		btrfs_err(rc->block_group->fs_info,
++			  "unrecognized tree backref at tree block %llu slot %u",
++			  eb->start, path->slots[0]);
++		btrfs_release_path(path);
++		return -EUCLEAN;
+ 	}
+ 
+ 	btrfs_release_path(path);
+diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
+index 2c9074ab2315a..c7fdc19b0d5f7 100644
+--- a/fs/nfsd/vfs.c
++++ b/fs/nfsd/vfs.c
+@@ -1781,6 +1781,12 @@ nfsd_rename(struct svc_rqst *rqstp, struct svc_fh *ffhp, char *fname, int flen,
+ 	if (!flen || isdotent(fname, flen) || !tlen || isdotent(tname, tlen))
+ 		goto out;
+ 
++	err = (rqstp->rq_vers == 2) ? nfserr_acces : nfserr_xdev;
++	if (ffhp->fh_export->ex_path.mnt != tfhp->fh_export->ex_path.mnt)
++		goto out;
++	if (ffhp->fh_export->ex_path.dentry != tfhp->fh_export->ex_path.dentry)
++		goto out;
++
+ retry:
+ 	host_err = fh_want_write(ffhp);
+ 	if (host_err) {
+@@ -1812,12 +1818,6 @@ retry:
+ 	if (ndentry == trap)
+ 		goto out_dput_new;
+ 
+-	host_err = -EXDEV;
+-	if (ffhp->fh_export->ex_path.mnt != tfhp->fh_export->ex_path.mnt)
+-		goto out_dput_new;
+-	if (ffhp->fh_export->ex_path.dentry != tfhp->fh_export->ex_path.dentry)
+-		goto out_dput_new;
+-
+ 	if ((ndentry->d_sb->s_export_op->flags & EXPORT_OP_CLOSE_BEFORE_UNLINK) &&
+ 	    nfsd_has_cached_files(ndentry)) {
+ 		close_cached = true;
+diff --git a/fs/smb/client/cached_dir.c b/fs/smb/client/cached_dir.c
+index 2d5e9a9d5b8be..fe1bf5b6e0cb3 100644
+--- a/fs/smb/client/cached_dir.c
++++ b/fs/smb/client/cached_dir.c
+@@ -15,10 +15,12 @@
+ static struct cached_fid *init_cached_dir(const char *path);
+ static void free_cached_dir(struct cached_fid *cfid);
+ static void smb2_close_cached_fid(struct kref *ref);
++static void cfids_laundromat_worker(struct work_struct *work);
+ 
+ static struct cached_fid *find_or_create_cached_dir(struct cached_fids *cfids,
+ 						    const char *path,
+-						    bool lookup_only)
++						    bool lookup_only,
++						    __u32 max_cached_dirs)
+ {
+ 	struct cached_fid *cfid;
+ 
+@@ -43,7 +45,7 @@ static struct cached_fid *find_or_create_cached_dir(struct cached_fids *cfids,
+ 		spin_unlock(&cfids->cfid_list_lock);
+ 		return NULL;
+ 	}
+-	if (cfids->num_entries >= MAX_CACHED_FIDS) {
++	if (cfids->num_entries >= max_cached_dirs) {
+ 		spin_unlock(&cfids->cfid_list_lock);
+ 		return NULL;
+ 	}
+@@ -145,7 +147,7 @@ int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon,
+ 	const char *npath;
+ 
+ 	if (tcon == NULL || tcon->cfids == NULL || tcon->nohandlecache ||
+-	    is_smb1_server(tcon->ses->server))
++	    is_smb1_server(tcon->ses->server) || (dir_cache_timeout == 0))
+ 		return -EOPNOTSUPP;
+ 
+ 	ses = tcon->ses;
+@@ -162,21 +164,24 @@ int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon,
+ 	if (!utf16_path)
+ 		return -ENOMEM;
+ 
+-	cfid = find_or_create_cached_dir(cfids, path, lookup_only);
++	cfid = find_or_create_cached_dir(cfids, path, lookup_only, tcon->max_cached_dirs);
+ 	if (cfid == NULL) {
+ 		kfree(utf16_path);
+ 		return -ENOENT;
+ 	}
+ 	/*
+-	 * At this point we either have a lease already and we can just
+-	 * return it. If not we are guaranteed to be the only thread accessing
+-	 * this cfid.
++	 * Return cached fid if it has a lease.  Otherwise, it is either a new
++	 * entry or laundromat worker removed it from @cfids->entries.  Caller
++	 * will put last reference if the latter.
+ 	 */
++	spin_lock(&cfids->cfid_list_lock);
+ 	if (cfid->has_lease) {
++		spin_unlock(&cfids->cfid_list_lock);
+ 		*ret_cfid = cfid;
+ 		kfree(utf16_path);
+ 		return 0;
+ 	}
++	spin_unlock(&cfids->cfid_list_lock);
+ 
+ 	/*
+ 	 * Skip any prefix paths in @path as lookup_positive_unlocked() ends up
+@@ -293,9 +298,11 @@ int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon,
+ 			goto oshr_free;
+ 		}
+ 	}
++	spin_lock(&cfids->cfid_list_lock);
+ 	cfid->dentry = dentry;
+ 	cfid->time = jiffies;
+ 	cfid->has_lease = true;
++	spin_unlock(&cfids->cfid_list_lock);
+ 
+ oshr_free:
+ 	kfree(utf16_path);
+@@ -304,24 +311,28 @@ oshr_free:
+ 	free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
+ 	free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
+ 	spin_lock(&cfids->cfid_list_lock);
+-	if (rc && !cfid->has_lease) {
+-		if (cfid->on_list) {
+-			list_del(&cfid->entry);
+-			cfid->on_list = false;
+-			cfids->num_entries--;
++	if (!cfid->has_lease) {
++		if (rc) {
++			if (cfid->on_list) {
++				list_del(&cfid->entry);
++				cfid->on_list = false;
++				cfids->num_entries--;
++			}
++			rc = -ENOENT;
++		} else {
++			/*
++			 * We are guaranteed to have two references at this
++			 * point. One for the caller and one for a potential
++			 * lease. Release the Lease-ref so that the directory
++			 * will be closed when the caller closes the cached
++			 * handle.
++			 */
++			spin_unlock(&cfids->cfid_list_lock);
++			kref_put(&cfid->refcount, smb2_close_cached_fid);
++			goto out;
+ 		}
+-		rc = -ENOENT;
+ 	}
+ 	spin_unlock(&cfids->cfid_list_lock);
+-	if (!rc && !cfid->has_lease) {
+-		/*
+-		 * We are guaranteed to have two references at this point.
+-		 * One for the caller and one for a potential lease.
+-		 * Release the Lease-ref so that the directory will be closed
+-		 * when the caller closes the cached handle.
+-		 */
+-		kref_put(&cfid->refcount, smb2_close_cached_fid);
+-	}
+ 	if (rc) {
+ 		if (cfid->is_open)
+ 			SMB2_close(0, cfid->tcon, cfid->fid.persistent_fid,
+@@ -329,7 +340,7 @@ oshr_free:
+ 		free_cached_dir(cfid);
+ 		cfid = NULL;
+ 	}
+-
++out:
+ 	if (rc == 0) {
+ 		*ret_cfid = cfid;
+ 		atomic_inc(&tcon->num_remote_opens);
+@@ -451,6 +462,9 @@ void invalidate_all_cached_dirs(struct cifs_tcon *tcon)
+ 	struct cached_fid *cfid, *q;
+ 	LIST_HEAD(entry);
+ 
++	if (cfids == NULL)
++		return;
++
+ 	spin_lock(&cfids->cfid_list_lock);
+ 	list_for_each_entry_safe(cfid, q, &cfids->entries, entry) {
+ 		list_move(&cfid->entry, &entry);
+@@ -568,53 +582,51 @@ static void free_cached_dir(struct cached_fid *cfid)
+ 	kfree(cfid);
+ }
+ 
+-static int
+-cifs_cfids_laundromat_thread(void *p)
++static void cfids_laundromat_worker(struct work_struct *work)
+ {
+-	struct cached_fids *cfids = p;
++	struct cached_fids *cfids;
+ 	struct cached_fid *cfid, *q;
+-	struct list_head entry;
++	LIST_HEAD(entry);
+ 
+-	while (!kthread_should_stop()) {
+-		ssleep(1);
+-		INIT_LIST_HEAD(&entry);
+-		if (kthread_should_stop())
+-			return 0;
+-		spin_lock(&cfids->cfid_list_lock);
+-		list_for_each_entry_safe(cfid, q, &cfids->entries, entry) {
+-			if (time_after(jiffies, cfid->time + HZ * 30)) {
+-				list_del(&cfid->entry);
+-				list_add(&cfid->entry, &entry);
+-				cfids->num_entries--;
+-			}
+-		}
+-		spin_unlock(&cfids->cfid_list_lock);
++	cfids = container_of(work, struct cached_fids, laundromat_work.work);
+ 
+-		list_for_each_entry_safe(cfid, q, &entry, entry) {
++	spin_lock(&cfids->cfid_list_lock);
++	list_for_each_entry_safe(cfid, q, &cfids->entries, entry) {
++		if (cfid->time &&
++		    time_after(jiffies, cfid->time + HZ * dir_cache_timeout)) {
+ 			cfid->on_list = false;
+-			list_del(&cfid->entry);
++			list_move(&cfid->entry, &entry);
++			cfids->num_entries--;
++			/* To prevent race with smb2_cached_lease_break() */
++			kref_get(&cfid->refcount);
++		}
++	}
++	spin_unlock(&cfids->cfid_list_lock);
++
++	list_for_each_entry_safe(cfid, q, &entry, entry) {
++		list_del(&cfid->entry);
++		/*
++		 * Cancel and wait for the work to finish in case we are racing
++		 * with it.
++		 */
++		cancel_work_sync(&cfid->lease_break);
++		if (cfid->has_lease) {
+ 			/*
+-			 * Cancel, and wait for the work to finish in
+-			 * case we are racing with it.
++			 * Our lease has not yet been cancelled from the server
++			 * so we need to drop the reference.
+ 			 */
+-			cancel_work_sync(&cfid->lease_break);
+-			if (cfid->has_lease) {
+-				/*
+-				 * We lease has not yet been cancelled from
+-				 * the server so we need to drop the reference.
+-				 */
+-				spin_lock(&cfids->cfid_list_lock);
+-				cfid->has_lease = false;
+-				spin_unlock(&cfids->cfid_list_lock);
+-				kref_put(&cfid->refcount, smb2_close_cached_fid);
+-			}
++			spin_lock(&cfids->cfid_list_lock);
++			cfid->has_lease = false;
++			spin_unlock(&cfids->cfid_list_lock);
++			kref_put(&cfid->refcount, smb2_close_cached_fid);
+ 		}
++		/* Drop the extra reference opened above */
++		kref_put(&cfid->refcount, smb2_close_cached_fid);
+ 	}
+-
+-	return 0;
++	queue_delayed_work(cifsiod_wq, &cfids->laundromat_work,
++			   dir_cache_timeout * HZ);
+ }
+ 
+-
+ struct cached_fids *init_cached_dirs(void)
+ {
+ 	struct cached_fids *cfids;
+@@ -625,19 +637,10 @@ struct cached_fids *init_cached_dirs(void)
+ 	spin_lock_init(&cfids->cfid_list_lock);
+ 	INIT_LIST_HEAD(&cfids->entries);
+ 
+-	/*
+-	 * since we're in a cifs function already, we know that
+-	 * this will succeed. No need for try_module_get().
+-	 */
+-	__module_get(THIS_MODULE);
+-	cfids->laundromat = kthread_run(cifs_cfids_laundromat_thread,
+-				  cfids, "cifsd-cfid-laundromat");
+-	if (IS_ERR(cfids->laundromat)) {
+-		cifs_dbg(VFS, "Failed to start cfids laundromat thread.\n");
+-		kfree(cfids);
+-		module_put(THIS_MODULE);
+-		return NULL;
+-	}
++	INIT_DELAYED_WORK(&cfids->laundromat_work, cfids_laundromat_worker);
++	queue_delayed_work(cifsiod_wq, &cfids->laundromat_work,
++			   dir_cache_timeout * HZ);
++
+ 	return cfids;
+ }
+ 
+@@ -650,11 +653,10 @@ void free_cached_dirs(struct cached_fids *cfids)
+ 	struct cached_fid *cfid, *q;
+ 	LIST_HEAD(entry);
+ 
+-	if (cfids->laundromat) {
+-		kthread_stop(cfids->laundromat);
+-		cfids->laundromat = NULL;
+-		module_put(THIS_MODULE);
+-	}
++	if (cfids == NULL)
++		return;
++
++	cancel_delayed_work_sync(&cfids->laundromat_work);
+ 
+ 	spin_lock(&cfids->cfid_list_lock);
+ 	list_for_each_entry_safe(cfid, q, &cfids->entries, entry) {
+diff --git a/fs/smb/client/cached_dir.h b/fs/smb/client/cached_dir.h
+index facc9b154d009..81ba0fd5cc16d 100644
+--- a/fs/smb/client/cached_dir.h
++++ b/fs/smb/client/cached_dir.h
+@@ -49,7 +49,7 @@ struct cached_fid {
+ 	struct cached_dirents dirents;
+ };
+ 
+-#define MAX_CACHED_FIDS 16
++/* default MAX_CACHED_FIDS is 16 */
+ struct cached_fids {
+ 	/* Must be held when:
+ 	 * - accessing the cfids->entries list
+@@ -57,7 +57,7 @@ struct cached_fids {
+ 	spinlock_t cfid_list_lock;
+ 	int num_entries;
+ 	struct list_head entries;
+-	struct task_struct *laundromat;
++	struct delayed_work laundromat_work;
+ };
+ 
+ extern struct cached_fids *init_cached_dirs(void);
+diff --git a/fs/smb/client/cifsfs.c b/fs/smb/client/cifsfs.c
+index a4d8b0ea1c8cb..e19df244ea7ea 100644
+--- a/fs/smb/client/cifsfs.c
++++ b/fs/smb/client/cifsfs.c
+@@ -117,6 +117,10 @@ module_param(cifs_max_pending, uint, 0444);
+ MODULE_PARM_DESC(cifs_max_pending, "Simultaneous requests to server for "
+ 				   "CIFS/SMB1 dialect (N/A for SMB3) "
+ 				   "Default: 32767 Range: 2 to 32767.");
++unsigned int dir_cache_timeout = 30;
++module_param(dir_cache_timeout, uint, 0644);
++MODULE_PARM_DESC(dir_cache_timeout, "Number of seconds to cache directory contents for which we have a lease. Default: 30 "
++				 "Range: 1 to 65000 seconds, 0 to disable caching dir contents");
+ #ifdef CONFIG_CIFS_STATS2
+ unsigned int slow_rsp_threshold = 1;
+ module_param(slow_rsp_threshold, uint, 0644);
+@@ -695,6 +699,8 @@ cifs_show_options(struct seq_file *s, struct dentry *root)
+ 		seq_printf(s, ",snapshot=%llu", tcon->snapshot_time);
+ 	if (tcon->handle_timeout)
+ 		seq_printf(s, ",handletimeout=%u", tcon->handle_timeout);
++	if (tcon->max_cached_dirs != MAX_CACHED_FIDS)
++		seq_printf(s, ",max_cached_dirs=%u", tcon->max_cached_dirs);
+ 
+ 	/*
+ 	 * Display file and directory attribute timeout in seconds.
+@@ -1679,6 +1685,12 @@ init_cifs(void)
+ 			 CIFS_MAX_REQ);
+ 	}
+ 
++	/* Limit max to about 18 hours, and setting to zero disables directory entry caching */
++	if (dir_cache_timeout > 65000) {
++		dir_cache_timeout = 65000;
++		cifs_dbg(VFS, "dir_cache_timeout set to max of 65000 seconds\n");
++	}
++
+ 	cifsiod_wq = alloc_workqueue("cifsiod", WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
+ 	if (!cifsiod_wq) {
+ 		rc = -ENOMEM;
+diff --git a/fs/smb/client/cifsglob.h b/fs/smb/client/cifsglob.h
+index 35782a6bede0b..ac68fed5ad28a 100644
+--- a/fs/smb/client/cifsglob.h
++++ b/fs/smb/client/cifsglob.h
+@@ -1192,6 +1192,7 @@ struct cifs_tcon {
+ 	__u32 max_chunks;
+ 	__u32 max_bytes_chunk;
+ 	__u32 max_bytes_copy;
++	__u32 max_cached_dirs;
+ #ifdef CONFIG_CIFS_FSCACHE
+ 	u64 resource_id;		/* server resource id */
+ 	struct fscache_volume *fscache;	/* cookie for share */
+@@ -1913,7 +1914,7 @@ require use of the stronger protocol */
+  * cifsInodeInfo->lock_sem	cifsInodeInfo->llist		cifs_init_once
+  *				->can_cache_brlcks
+  * cifsInodeInfo->deferred_lock	cifsInodeInfo->deferred_closes	cifsInodeInfo_alloc
+- * cached_fid->fid_mutex		cifs_tcon->crfid		tconInfoAlloc
++ * cached_fid->fid_mutex		cifs_tcon->crfid		tcon_info_alloc
+  * cifsFileInfo->fh_mutex		cifsFileInfo			cifs_new_fileinfo
+  * cifsFileInfo->file_info_lock	cifsFileInfo->count		cifs_new_fileinfo
+  *				->invalidHandle			initiate_cifs_search
+@@ -1987,6 +1988,7 @@ extern unsigned int CIFSMaxBufSize;  /* max size not including hdr */
+ extern unsigned int cifs_min_rcv;    /* min size of big ntwrk buf pool */
+ extern unsigned int cifs_min_small;  /* min size of small buf pool */
+ extern unsigned int cifs_max_pending; /* MAX requests at once to server*/
++extern unsigned int dir_cache_timeout; /* max time for directory lease caching of dir */
+ extern bool disable_legacy_dialects;  /* forbid vers=1.0 and vers=2.0 mounts */
+ extern atomic_t mid_count;
+ 
+diff --git a/fs/smb/client/cifsproto.h b/fs/smb/client/cifsproto.h
+index 1d71d658e1679..bd0a1505719a4 100644
+--- a/fs/smb/client/cifsproto.h
++++ b/fs/smb/client/cifsproto.h
+@@ -513,7 +513,7 @@ extern int CIFSSMBLogoff(const unsigned int xid, struct cifs_ses *ses);
+ 
+ extern struct cifs_ses *sesInfoAlloc(void);
+ extern void sesInfoFree(struct cifs_ses *);
+-extern struct cifs_tcon *tconInfoAlloc(void);
++extern struct cifs_tcon *tcon_info_alloc(bool dir_leases_enabled);
+ extern void tconInfoFree(struct cifs_tcon *);
+ 
+ extern int cifs_sign_rqst(struct smb_rqst *rqst, struct TCP_Server_Info *server,
+diff --git a/fs/smb/client/connect.c b/fs/smb/client/connect.c
+index 352e251c41132..bd33661dcb57f 100644
+--- a/fs/smb/client/connect.c
++++ b/fs/smb/client/connect.c
+@@ -1882,7 +1882,8 @@ cifs_setup_ipc(struct cifs_ses *ses, struct smb3_fs_context *ctx)
+ 		}
+ 	}
+ 
+-	tcon = tconInfoAlloc();
++	/* no need to setup directory caching on IPC share, so pass in false */
++	tcon = tcon_info_alloc(false);
+ 	if (tcon == NULL)
+ 		return -ENOMEM;
+ 
+@@ -2473,8 +2474,9 @@ cifs_put_tcon(struct cifs_tcon *tcon)
+ static struct cifs_tcon *
+ cifs_get_tcon(struct cifs_ses *ses, struct smb3_fs_context *ctx)
+ {
+-	int rc, xid;
+ 	struct cifs_tcon *tcon;
++	bool nohandlecache;
++	int rc, xid;
+ 
+ 	tcon = cifs_find_tcon(ses, ctx);
+ 	if (tcon) {
+@@ -2492,11 +2494,17 @@ cifs_get_tcon(struct cifs_ses *ses, struct smb3_fs_context *ctx)
+ 		goto out_fail;
+ 	}
+ 
+-	tcon = tconInfoAlloc();
++	if (ses->server->dialect >= SMB20_PROT_ID &&
++	    (ses->server->capabilities & SMB2_GLOBAL_CAP_DIRECTORY_LEASING))
++		nohandlecache = ctx->nohandlecache;
++	else
++		nohandlecache = true;
++	tcon = tcon_info_alloc(!nohandlecache);
+ 	if (tcon == NULL) {
+ 		rc = -ENOMEM;
+ 		goto out_fail;
+ 	}
++	tcon->nohandlecache = nohandlecache;
+ 
+ 	if (ctx->snapshot_time) {
+ 		if (ses->server->vals->protocol_id == 0) {
+@@ -2657,10 +2665,7 @@ cifs_get_tcon(struct cifs_ses *ses, struct smb3_fs_context *ctx)
+ 	tcon->retry = ctx->retry;
+ 	tcon->nocase = ctx->nocase;
+ 	tcon->broken_sparse_sup = ctx->no_sparse;
+-	if (ses->server->capabilities & SMB2_GLOBAL_CAP_DIRECTORY_LEASING)
+-		tcon->nohandlecache = ctx->nohandlecache;
+-	else
+-		tcon->nohandlecache = true;
++	tcon->max_cached_dirs = ctx->max_cached_dirs;
+ 	tcon->nodelete = ctx->nodelete;
+ 	tcon->local_lease = ctx->local_lease;
+ 	INIT_LIST_HEAD(&tcon->pending_opens);
+diff --git a/fs/smb/client/fs_context.c b/fs/smb/client/fs_context.c
+index f12203c49b802..a3493da12ad1e 100644
+--- a/fs/smb/client/fs_context.c
++++ b/fs/smb/client/fs_context.c
+@@ -150,6 +150,7 @@ const struct fs_parameter_spec smb3_fs_parameters[] = {
+ 	fsparam_u32("closetimeo", Opt_closetimeo),
+ 	fsparam_u32("echo_interval", Opt_echo_interval),
+ 	fsparam_u32("max_credits", Opt_max_credits),
++	fsparam_u32("max_cached_dirs", Opt_max_cached_dirs),
+ 	fsparam_u32("handletimeout", Opt_handletimeout),
+ 	fsparam_u64("snapshot", Opt_snapshot),
+ 	fsparam_u32("max_channels", Opt_max_channels),
+@@ -1165,6 +1166,14 @@ static int smb3_fs_context_parse_param(struct fs_context *fc,
+ 		if (result.uint_32 > 1)
+ 			ctx->multichannel = true;
+ 		break;
++	case Opt_max_cached_dirs:
++		if (result.uint_32 < 1) {
++			cifs_errorf(fc, "%s: Invalid max_cached_dirs, needs to be 1 or more\n",
++				    __func__);
++			goto cifs_parse_mount_err;
++		}
++		ctx->max_cached_dirs = result.uint_32;
++		break;
+ 	case Opt_handletimeout:
+ 		ctx->handle_timeout = result.uint_32;
+ 		if (ctx->handle_timeout > SMB3_MAX_HANDLE_TIMEOUT) {
+@@ -1593,7 +1602,7 @@ int smb3_init_fs_context(struct fs_context *fc)
+ 	ctx->acregmax = CIFS_DEF_ACTIMEO;
+ 	ctx->acdirmax = CIFS_DEF_ACTIMEO;
+ 	ctx->closetimeo = SMB3_DEF_DCLOSETIMEO;
+-
++	ctx->max_cached_dirs = MAX_CACHED_FIDS;
+ 	/* Most clients set timeout to 0, allows server to use its default */
+ 	ctx->handle_timeout = 0; /* See MS-SMB2 spec section 2.2.14.2.12 */
+ 
+diff --git a/fs/smb/client/fs_context.h b/fs/smb/client/fs_context.h
+index f4eaf85589022..9d8d34af02114 100644
+--- a/fs/smb/client/fs_context.h
++++ b/fs/smb/client/fs_context.h
+@@ -128,6 +128,7 @@ enum cifs_param {
+ 	Opt_closetimeo,
+ 	Opt_echo_interval,
+ 	Opt_max_credits,
++	Opt_max_cached_dirs,
+ 	Opt_snapshot,
+ 	Opt_max_channels,
+ 	Opt_handletimeout,
+@@ -261,6 +262,7 @@ struct smb3_fs_context {
+ 	__u32 handle_timeout; /* persistent and durable handle timeout in ms */
+ 	unsigned int max_credits; /* smb3 max_credits 10 < credits < 60000 */
+ 	unsigned int max_channels;
++	unsigned int max_cached_dirs;
+ 	__u16 compression; /* compression algorithm 0xFFFF default 0=disabled */
+ 	bool rootfs:1; /* if it's a SMB root file system */
+ 	bool witness:1; /* use witness protocol */
+@@ -287,7 +289,7 @@ extern void smb3_update_mnt_flags(struct cifs_sb_info *cifs_sb);
+  */
+ #define SMB3_MAX_DCLOSETIMEO (1 << 30)
+ #define SMB3_DEF_DCLOSETIMEO (1 * HZ) /* even 1 sec enough to help eg open/write/close/open/read */
+-
++#define MAX_CACHED_FIDS 16
+ extern char *cifs_sanitize_prepath(char *prepath, gfp_t gfp);
+ 
+ #endif
+diff --git a/fs/smb/client/misc.c b/fs/smb/client/misc.c
+index d7e85d9a26553..249fac8be5a51 100644
+--- a/fs/smb/client/misc.c
++++ b/fs/smb/client/misc.c
+@@ -113,18 +113,22 @@ sesInfoFree(struct cifs_ses *buf_to_free)
+ }
+ 
+ struct cifs_tcon *
+-tconInfoAlloc(void)
++tcon_info_alloc(bool dir_leases_enabled)
+ {
+ 	struct cifs_tcon *ret_buf;
+ 
+ 	ret_buf = kzalloc(sizeof(*ret_buf), GFP_KERNEL);
+ 	if (!ret_buf)
+ 		return NULL;
+-	ret_buf->cfids = init_cached_dirs();
+-	if (!ret_buf->cfids) {
+-		kfree(ret_buf);
+-		return NULL;
++
++	if (dir_leases_enabled == true) {
++		ret_buf->cfids = init_cached_dirs();
++		if (!ret_buf->cfids) {
++			kfree(ret_buf);
++			return NULL;
++		}
+ 	}
++	/* else ret_buf->cfids is already set to NULL above */
+ 
+ 	atomic_inc(&tconInfoAllocCount);
+ 	ret_buf->status = TID_NEW;
+diff --git a/fs/smb/client/smb2pdu.c b/fs/smb/client/smb2pdu.c
+index 9c7e46b7e7c7a..c22cc72223814 100644
+--- a/fs/smb/client/smb2pdu.c
++++ b/fs/smb/client/smb2pdu.c
+@@ -3871,7 +3871,7 @@ void smb2_reconnect_server(struct work_struct *work)
+ 		goto done;
+ 
+ 	/* allocate a dummy tcon struct used for reconnect */
+-	tcon = tconInfoAlloc();
++	tcon = tcon_info_alloc(false);
+ 	if (!tcon) {
+ 		resched = true;
+ 		list_for_each_entry_safe(ses, ses2, &tmp_ses_list, rlist) {
+diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
+index ca3c8e10f24a0..2bb22c8ceb595 100644
+--- a/include/linux/hugetlb.h
++++ b/include/linux/hugetlb.h
+@@ -58,6 +58,7 @@ struct resv_map {
+ 	long adds_in_progress;
+ 	struct list_head region_cache;
+ 	long region_cache_count;
++	struct rw_semaphore rw_sema;
+ #ifdef CONFIG_CGROUP_HUGETLB
+ 	/*
+ 	 * On private mappings, the counter to uncharge reservations is stored
+@@ -1245,6 +1246,11 @@ static inline bool __vma_shareable_lock(struct vm_area_struct *vma)
+ 	return (vma->vm_flags & VM_MAYSHARE) && vma->vm_private_data;
+ }
+ 
++static inline bool __vma_private_lock(struct vm_area_struct *vma)
++{
++	return (!(vma->vm_flags & VM_MAYSHARE)) && vma->vm_private_data;
++}
++
+ /*
+  * Safe version of huge_pte_offset() to check the locks.  See comments
+  * above huge_pte_offset().
+diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
+index 4b998090898e3..1d7aea6342171 100644
+--- a/include/linux/ieee80211.h
++++ b/include/linux/ieee80211.h
+@@ -4236,6 +4236,35 @@ static inline bool ieee80211_is_public_action(struct ieee80211_hdr *hdr,
+ 	return mgmt->u.action.category == WLAN_CATEGORY_PUBLIC;
+ }
+ 
++/**
++ * ieee80211_is_protected_dual_of_public_action - check if skb contains a
++ * protected dual of public action management frame
++ * @skb: the skb containing the frame, length will be checked
++ *
++ * Return: true if the skb contains a protected dual of public action
++ * management frame, false otherwise.
++ */
++static inline bool
++ieee80211_is_protected_dual_of_public_action(struct sk_buff *skb)
++{
++	u8 action;
++
++	if (!ieee80211_is_public_action((void *)skb->data, skb->len) ||
++	    skb->len < IEEE80211_MIN_ACTION_SIZE + 1)
++		return false;
++
++	action = *(u8 *)(skb->data + IEEE80211_MIN_ACTION_SIZE);
++
++	return action != WLAN_PUB_ACTION_20_40_BSS_COEX &&
++		action != WLAN_PUB_ACTION_DSE_REG_LOC_ANN &&
++		action != WLAN_PUB_ACTION_MSMT_PILOT &&
++		action != WLAN_PUB_ACTION_TDLS_DISCOVER_RES &&
++		action != WLAN_PUB_ACTION_LOC_TRACK_NOTI &&
++		action != WLAN_PUB_ACTION_FTM_REQUEST &&
++		action != WLAN_PUB_ACTION_FTM_RESPONSE &&
++		action != WLAN_PUB_ACTION_FILS_DISCOVERY;
++}
++
+ /**
+  * _ieee80211_is_group_privacy_action - check if frame is a group addressed
+  * privacy action frame
+diff --git a/include/linux/kasan.h b/include/linux/kasan.h
+index 819b6bc8ac088..045d7913a0f40 100644
+--- a/include/linux/kasan.h
++++ b/include/linux/kasan.h
+@@ -464,10 +464,10 @@ static inline void kasan_free_module_shadow(const struct vm_struct *vm) {}
+ 
+ #endif /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */
+ 
+-#ifdef CONFIG_KASAN_INLINE
++#ifdef CONFIG_KASAN
+ void kasan_non_canonical_hook(unsigned long addr);
+-#else /* CONFIG_KASAN_INLINE */
++#else /* CONFIG_KASAN */
+ static inline void kasan_non_canonical_hook(unsigned long addr) { }
+-#endif /* CONFIG_KASAN_INLINE */
++#endif /* CONFIG_KASAN */
+ 
+ #endif /* LINUX_KASAN_H */
+diff --git a/include/net/netfilter/nf_flow_table.h b/include/net/netfilter/nf_flow_table.h
+index d466e1a3b0b19..fe1507c1db828 100644
+--- a/include/net/netfilter/nf_flow_table.h
++++ b/include/net/netfilter/nf_flow_table.h
+@@ -53,6 +53,7 @@ struct nf_flowtable_type {
+ 	struct list_head		list;
+ 	int				family;
+ 	int				(*init)(struct nf_flowtable *ft);
++	bool				(*gc)(const struct flow_offload *flow);
+ 	int				(*setup)(struct nf_flowtable *ft,
+ 						 struct net_device *dev,
+ 						 enum flow_block_command cmd);
+diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
+index fd41fdac0a8e6..65e49fae8da7a 100644
+--- a/include/scsi/scsi_device.h
++++ b/include/scsi/scsi_device.h
+@@ -162,8 +162,24 @@ struct scsi_device {
+ 				 * core. */
+ 	unsigned int eh_timeout; /* Error handling timeout */
+ 
+-	bool manage_system_start_stop; /* Let HLD (sd) manage system start/stop */
+-	bool manage_runtime_start_stop; /* Let HLD (sd) manage runtime start/stop */
++	/*
++	 * If true, let the high-level device driver (sd) manage the device
++	 * power state for system suspend/resume (suspend to RAM and
++	 * hibernation) operations.
++	 */
++	bool manage_system_start_stop;
++
++	/*
++	 * If true, let the high-level device driver (sd) manage the device
++	 * power state for runtime device suspand and resume operations.
++	 */
++	bool manage_runtime_start_stop;
++
++	/*
++	 * If true, let the high-level device driver (sd) manage the device
++	 * power state for system shutdown (power off) operations.
++	 */
++	bool manage_shutdown;
+ 
+ 	unsigned removable:1;
+ 	unsigned changed:1;	/* Data invalid due to media change */
+diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h
+index a8206f5332e99..da0734b182f2f 100644
+--- a/include/trace/events/btrfs.h
++++ b/include/trace/events/btrfs.h
+@@ -38,7 +38,6 @@ struct find_free_extent_ctl;
+ 	__print_symbolic(type,						\
+ 		{ BTRFS_TREE_BLOCK_REF_KEY, 	"TREE_BLOCK_REF" },	\
+ 		{ BTRFS_EXTENT_DATA_REF_KEY, 	"EXTENT_DATA_REF" },	\
+-		{ BTRFS_EXTENT_REF_V0_KEY, 	"EXTENT_REF_V0" },	\
+ 		{ BTRFS_SHARED_BLOCK_REF_KEY, 	"SHARED_BLOCK_REF" },	\
+ 		{ BTRFS_SHARED_DATA_REF_KEY, 	"SHARED_DATA_REF" })
+ 
+diff --git a/include/uapi/linux/btrfs_tree.h b/include/uapi/linux/btrfs_tree.h
+index ab38d0f411fa4..fc3c32186d7eb 100644
+--- a/include/uapi/linux/btrfs_tree.h
++++ b/include/uapi/linux/btrfs_tree.h
+@@ -220,7 +220,11 @@
+ 
+ #define BTRFS_EXTENT_DATA_REF_KEY	178
+ 
+-#define BTRFS_EXTENT_REF_V0_KEY		180
++/*
++ * Obsolete key. Defintion removed in 6.6, value may be reused in the future.
++ *
++ * #define BTRFS_EXTENT_REF_V0_KEY	180
++ */
+ 
+ #define BTRFS_SHARED_BLOCK_REF_KEY	182
+ 
+diff --git a/include/uapi/linux/gtp.h b/include/uapi/linux/gtp.h
+index 2f61298a7b779..3dcdb9e33cba2 100644
+--- a/include/uapi/linux/gtp.h
++++ b/include/uapi/linux/gtp.h
+@@ -33,6 +33,6 @@ enum gtp_attrs {
+ 	GTPA_PAD,
+ 	__GTPA_MAX,
+ };
+-#define GTPA_MAX (__GTPA_MAX + 1)
++#define GTPA_MAX (__GTPA_MAX - 1)
+ 
+ #endif /* _UAPI_LINUX_GTP_H_ */
+diff --git a/io_uring/fdinfo.c b/io_uring/fdinfo.c
+index 76c279b13aee4..b603a06f7103d 100644
+--- a/io_uring/fdinfo.c
++++ b/io_uring/fdinfo.c
+@@ -49,7 +49,6 @@ static __cold int io_uring_show_cred(struct seq_file *m, unsigned int id,
+ static __cold void __io_uring_show_fdinfo(struct io_ring_ctx *ctx,
+ 					  struct seq_file *m)
+ {
+-	struct io_sq_data *sq = NULL;
+ 	struct io_overflow_cqe *ocqe;
+ 	struct io_rings *r = ctx->rings;
+ 	unsigned int sq_mask = ctx->sq_entries - 1, cq_mask = ctx->cq_entries - 1;
+@@ -60,6 +59,7 @@ static __cold void __io_uring_show_fdinfo(struct io_ring_ctx *ctx,
+ 	unsigned int cq_shift = 0;
+ 	unsigned int sq_shift = 0;
+ 	unsigned int sq_entries, cq_entries;
++	int sq_pid = -1, sq_cpu = -1;
+ 	bool has_lock;
+ 	unsigned int i;
+ 
+@@ -137,13 +137,19 @@ static __cold void __io_uring_show_fdinfo(struct io_ring_ctx *ctx,
+ 	has_lock = mutex_trylock(&ctx->uring_lock);
+ 
+ 	if (has_lock && (ctx->flags & IORING_SETUP_SQPOLL)) {
+-		sq = ctx->sq_data;
+-		if (!sq->thread)
+-			sq = NULL;
++		struct io_sq_data *sq = ctx->sq_data;
++
++		if (mutex_trylock(&sq->lock)) {
++			if (sq->thread) {
++				sq_pid = task_pid_nr(sq->thread);
++				sq_cpu = task_cpu(sq->thread);
++			}
++			mutex_unlock(&sq->lock);
++		}
+ 	}
+ 
+-	seq_printf(m, "SqThread:\t%d\n", sq ? task_pid_nr(sq->thread) : -1);
+-	seq_printf(m, "SqThreadCpu:\t%d\n", sq ? task_cpu(sq->thread) : -1);
++	seq_printf(m, "SqThread:\t%d\n", sq_pid);
++	seq_printf(m, "SqThreadCpu:\t%d\n", sq_cpu);
+ 	seq_printf(m, "UserFiles:\t%u\n", ctx->nr_user_files);
+ 	for (i = 0; has_lock && i < ctx->nr_user_files; i++) {
+ 		struct file *f = io_file_from_index(&ctx->file_table, i);
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index b3d800738fc5f..f2f4d2b3beee0 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -13383,7 +13383,8 @@ static int inherit_group(struct perf_event *parent_event,
+ 		    !perf_get_aux_event(child_ctr, leader))
+ 			return -EINVAL;
+ 	}
+-	leader->group_generation = parent_event->group_generation;
++	if (leader)
++		leader->group_generation = parent_event->group_generation;
+ 	return 0;
+ }
+ 
+diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
+index 92dbb21c69616..c63e25cb9406e 100644
+--- a/kernel/trace/trace_kprobe.c
++++ b/kernel/trace/trace_kprobe.c
+@@ -714,14 +714,30 @@ static int count_symbols(void *data, unsigned long unused)
+ 	return 0;
+ }
+ 
++struct sym_count_ctx {
++	unsigned int count;
++	const char *name;
++};
++
++static int count_mod_symbols(void *data, const char *name, unsigned long unused)
++{
++	struct sym_count_ctx *ctx = data;
++
++	if (strcmp(name, ctx->name) == 0)
++		ctx->count++;
++
++	return 0;
++}
++
+ static unsigned int number_of_same_symbols(char *func_name)
+ {
+-	unsigned int count;
++	struct sym_count_ctx ctx = { .count = 0, .name = func_name };
++
++	kallsyms_on_each_match_symbol(count_symbols, func_name, &ctx.count);
+ 
+-	count = 0;
+-	kallsyms_on_each_match_symbol(count_symbols, func_name, &count);
++	module_kallsyms_on_each_symbol(NULL, count_mod_symbols, &ctx);
+ 
+-	return count;
++	return ctx.count;
+ }
+ 
+ static int __trace_kprobe_create(int argc, const char *argv[])
+@@ -1006,7 +1022,7 @@ EXPORT_SYMBOL_GPL(kprobe_event_cmd_init);
+  * @name: The name of the kprobe event
+  * @loc: The location of the kprobe event
+  * @kretprobe: Is this a return probe?
+- * @args: Variable number of arg (pairs), one pair for each field
++ * @...: Variable number of arg (pairs), one pair for each field
+  *
+  * NOTE: Users normally won't want to call this function directly, but
+  * rather use the kprobe_event_gen_cmd_start() wrapper, which automatically
+@@ -1079,7 +1095,7 @@ EXPORT_SYMBOL_GPL(__kprobe_event_gen_cmd_start);
+ /**
+  * __kprobe_event_add_fields - Add probe fields to a kprobe command from arg list
+  * @cmd: A pointer to the dynevent_cmd struct representing the new event
+- * @args: Variable number of arg (pairs), one pair for each field
++ * @...: Variable number of arg (pairs), one pair for each field
+  *
+  * NOTE: Users normally won't want to call this function directly, but
+  * rather use the kprobe_event_add_fields() wrapper, which
+diff --git a/lib/maple_tree.c b/lib/maple_tree.c
+index b2f1c90c18fa0..20923e03aa35a 100644
+--- a/lib/maple_tree.c
++++ b/lib/maple_tree.c
+@@ -5729,7 +5729,7 @@ int mas_expected_entries(struct ma_state *mas, unsigned long nr_entries)
+ 	/* Internal nodes */
+ 	nr_nodes += DIV_ROUND_UP(nr_nodes, nonleaf_cap);
+ 	/* Add working room for split (2 nodes) + new parents */
+-	mas_node_count(mas, nr_nodes + 3);
++	mas_node_count_gfp(mas, nr_nodes + 3, GFP_KERNEL);
+ 
+ 	/* Detect if allocations run out */
+ 	mas->mas_flags |= MA_STATE_PREALLOC;
+diff --git a/lib/test_maple_tree.c b/lib/test_maple_tree.c
+index 4e7fd364f0f1f..e310e1c7a5407 100644
+--- a/lib/test_maple_tree.c
++++ b/lib/test_maple_tree.c
+@@ -9,6 +9,7 @@
+ 
+ #include <linux/maple_tree.h>
+ #include <linux/module.h>
++#include <linux/rwsem.h>
+ 
+ #define MTREE_ALLOC_MAX 0x2000000000000Ul
+ #define CONFIG_MAPLE_SEARCH
+@@ -1714,17 +1715,21 @@ static noinline void __init check_forking(struct maple_tree *mt)
+ 	void *val;
+ 	MA_STATE(mas, mt, 0, 0);
+ 	MA_STATE(newmas, mt, 0, 0);
++	struct rw_semaphore newmt_lock;
++
++	init_rwsem(&newmt_lock);
+ 
+ 	for (i = 0; i <= nr_entries; i++)
+ 		mtree_store_range(mt, i*10, i*10 + 5,
+ 				  xa_mk_value(i), GFP_KERNEL);
+ 
+ 	mt_set_non_kernel(99999);
+-	mt_init_flags(&newmt, MT_FLAGS_ALLOC_RANGE);
++	mt_init_flags(&newmt, MT_FLAGS_ALLOC_RANGE | MT_FLAGS_LOCK_EXTERN);
++	mt_set_external_lock(&newmt, &newmt_lock);
+ 	newmas.tree = &newmt;
+ 	mas_reset(&newmas);
+ 	mas_reset(&mas);
+-	mas_lock(&newmas);
++	down_write(&newmt_lock);
+ 	mas.index = 0;
+ 	mas.last = 0;
+ 	if (mas_expected_entries(&newmas, nr_entries)) {
+@@ -1739,10 +1744,10 @@ static noinline void __init check_forking(struct maple_tree *mt)
+ 	}
+ 	rcu_read_unlock();
+ 	mas_destroy(&newmas);
+-	mas_unlock(&newmas);
+ 	mt_validate(&newmt);
+ 	mt_set_non_kernel(0);
+-	mtree_destroy(&newmt);
++	__mt_destroy(&newmt);
++	up_write(&newmt_lock);
+ }
+ 
+ static noinline void __init check_iteration(struct maple_tree *mt)
+@@ -1853,6 +1858,10 @@ static noinline void __init bench_forking(struct maple_tree *mt)
+ 	void *val;
+ 	MA_STATE(mas, mt, 0, 0);
+ 	MA_STATE(newmas, mt, 0, 0);
++	struct rw_semaphore newmt_lock;
++
++	init_rwsem(&newmt_lock);
++	mt_set_external_lock(&newmt, &newmt_lock);
+ 
+ 	for (i = 0; i <= nr_entries; i++)
+ 		mtree_store_range(mt, i*10, i*10 + 5,
+@@ -1867,7 +1876,7 @@ static noinline void __init bench_forking(struct maple_tree *mt)
+ 		mas.index = 0;
+ 		mas.last = 0;
+ 		rcu_read_lock();
+-		mas_lock(&newmas);
++		down_write(&newmt_lock);
+ 		if (mas_expected_entries(&newmas, nr_entries)) {
+ 			printk("OOM!");
+ 			BUG_ON(1);
+@@ -1878,11 +1887,11 @@ static noinline void __init bench_forking(struct maple_tree *mt)
+ 			mas_store(&newmas, val);
+ 		}
+ 		mas_destroy(&newmas);
+-		mas_unlock(&newmas);
+ 		rcu_read_unlock();
+ 		mt_validate(&newmt);
+ 		mt_set_non_kernel(0);
+-		mtree_destroy(&newmt);
++		__mt_destroy(&newmt);
++		up_write(&newmt_lock);
+ 	}
+ }
+ #endif
+@@ -2489,6 +2498,10 @@ static noinline void __init check_dup_gaps(struct maple_tree *mt,
+ 	void *tmp;
+ 	MA_STATE(mas, mt, 0, 0);
+ 	MA_STATE(newmas, &newmt, 0, 0);
++	struct rw_semaphore newmt_lock;
++
++	init_rwsem(&newmt_lock);
++	mt_set_external_lock(&newmt, &newmt_lock);
+ 
+ 	if (!zero_start)
+ 		i = 1;
+@@ -2498,9 +2511,9 @@ static noinline void __init check_dup_gaps(struct maple_tree *mt,
+ 		mtree_store_range(mt, i*10, (i+1)*10 - gap,
+ 				  xa_mk_value(i), GFP_KERNEL);
+ 
+-	mt_init_flags(&newmt, MT_FLAGS_ALLOC_RANGE);
++	mt_init_flags(&newmt, MT_FLAGS_ALLOC_RANGE | MT_FLAGS_LOCK_EXTERN);
+ 	mt_set_non_kernel(99999);
+-	mas_lock(&newmas);
++	down_write(&newmt_lock);
+ 	ret = mas_expected_entries(&newmas, nr_entries);
+ 	mt_set_non_kernel(0);
+ 	MT_BUG_ON(mt, ret != 0);
+@@ -2513,9 +2526,9 @@ static noinline void __init check_dup_gaps(struct maple_tree *mt,
+ 	}
+ 	rcu_read_unlock();
+ 	mas_destroy(&newmas);
+-	mas_unlock(&newmas);
+ 
+-	mtree_destroy(&newmt);
++	__mt_destroy(&newmt);
++	up_write(&newmt_lock);
+ }
+ 
+ /* Duplicate many sizes of trees.  Mainly to test expected entry values */
+diff --git a/mm/damon/sysfs.c b/mm/damon/sysfs.c
+index 33e1d5c9cb549..df165820c6054 100644
+--- a/mm/damon/sysfs.c
++++ b/mm/damon/sysfs.c
+@@ -1202,6 +1202,8 @@ static int damon_sysfs_set_targets(struct damon_ctx *ctx,
+ 	return 0;
+ }
+ 
++static bool damon_sysfs_schemes_regions_updating;
++
+ static void damon_sysfs_before_terminate(struct damon_ctx *ctx)
+ {
+ 	struct damon_target *t, *next;
+@@ -1209,10 +1211,12 @@ static void damon_sysfs_before_terminate(struct damon_ctx *ctx)
+ 
+ 	/* damon_sysfs_schemes_update_regions_stop() might not yet called */
+ 	kdamond = damon_sysfs_cmd_request.kdamond;
+-	if (kdamond && damon_sysfs_cmd_request.cmd ==
++	if (kdamond && (damon_sysfs_cmd_request.cmd ==
+ 			DAMON_SYSFS_CMD_UPDATE_SCHEMES_TRIED_REGIONS &&
++			damon_sysfs_schemes_regions_updating) &&
+ 			ctx == kdamond->damon_ctx) {
+ 		damon_sysfs_schemes_update_regions_stop(ctx);
++		damon_sysfs_schemes_regions_updating = false;
+ 		mutex_unlock(&damon_sysfs_lock);
+ 	}
+ 
+@@ -1331,7 +1335,6 @@ static int damon_sysfs_commit_input(struct damon_sysfs_kdamond *kdamond)
+ static int damon_sysfs_cmd_request_callback(struct damon_ctx *c)
+ {
+ 	struct damon_sysfs_kdamond *kdamond;
+-	static bool damon_sysfs_schemes_regions_updating;
+ 	int err = 0;
+ 
+ 	/* avoid deadlock due to concurrent state_store('off') */
+diff --git a/mm/hugetlb.c b/mm/hugetlb.c
+index 6da626bfb52e3..097b81c37597e 100644
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -96,6 +96,7 @@ static void hugetlb_vma_lock_alloc(struct vm_area_struct *vma);
+ static void __hugetlb_vma_unlock_write_free(struct vm_area_struct *vma);
+ static void hugetlb_unshare_pmds(struct vm_area_struct *vma,
+ 		unsigned long start, unsigned long end);
++static struct resv_map *vma_resv_map(struct vm_area_struct *vma);
+ 
+ static inline bool subpool_is_free(struct hugepage_subpool *spool)
+ {
+@@ -266,6 +267,10 @@ void hugetlb_vma_lock_read(struct vm_area_struct *vma)
+ 		struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
+ 
+ 		down_read(&vma_lock->rw_sema);
++	} else if (__vma_private_lock(vma)) {
++		struct resv_map *resv_map = vma_resv_map(vma);
++
++		down_read(&resv_map->rw_sema);
+ 	}
+ }
+ 
+@@ -275,6 +280,10 @@ void hugetlb_vma_unlock_read(struct vm_area_struct *vma)
+ 		struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
+ 
+ 		up_read(&vma_lock->rw_sema);
++	} else if (__vma_private_lock(vma)) {
++		struct resv_map *resv_map = vma_resv_map(vma);
++
++		up_read(&resv_map->rw_sema);
+ 	}
+ }
+ 
+@@ -284,6 +293,10 @@ void hugetlb_vma_lock_write(struct vm_area_struct *vma)
+ 		struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
+ 
+ 		down_write(&vma_lock->rw_sema);
++	} else if (__vma_private_lock(vma)) {
++		struct resv_map *resv_map = vma_resv_map(vma);
++
++		down_write(&resv_map->rw_sema);
+ 	}
+ }
+ 
+@@ -293,17 +306,27 @@ void hugetlb_vma_unlock_write(struct vm_area_struct *vma)
+ 		struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
+ 
+ 		up_write(&vma_lock->rw_sema);
++	} else if (__vma_private_lock(vma)) {
++		struct resv_map *resv_map = vma_resv_map(vma);
++
++		up_write(&resv_map->rw_sema);
+ 	}
+ }
+ 
+ int hugetlb_vma_trylock_write(struct vm_area_struct *vma)
+ {
+-	struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
+ 
+-	if (!__vma_shareable_lock(vma))
+-		return 1;
++	if (__vma_shareable_lock(vma)) {
++		struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
+ 
+-	return down_write_trylock(&vma_lock->rw_sema);
++		return down_write_trylock(&vma_lock->rw_sema);
++	} else if (__vma_private_lock(vma)) {
++		struct resv_map *resv_map = vma_resv_map(vma);
++
++		return down_write_trylock(&resv_map->rw_sema);
++	}
++
++	return 1;
+ }
+ 
+ void hugetlb_vma_assert_locked(struct vm_area_struct *vma)
+@@ -312,6 +335,10 @@ void hugetlb_vma_assert_locked(struct vm_area_struct *vma)
+ 		struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
+ 
+ 		lockdep_assert_held(&vma_lock->rw_sema);
++	} else if (__vma_private_lock(vma)) {
++		struct resv_map *resv_map = vma_resv_map(vma);
++
++		lockdep_assert_held(&resv_map->rw_sema);
+ 	}
+ }
+ 
+@@ -344,6 +371,11 @@ static void __hugetlb_vma_unlock_write_free(struct vm_area_struct *vma)
+ 		struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
+ 
+ 		__hugetlb_vma_unlock_write_put(vma_lock);
++	} else if (__vma_private_lock(vma)) {
++		struct resv_map *resv_map = vma_resv_map(vma);
++
++		/* no free for anon vmas, but still need to unlock */
++		up_write(&resv_map->rw_sema);
+ 	}
+ }
+ 
+@@ -1062,6 +1094,7 @@ struct resv_map *resv_map_alloc(void)
+ 	kref_init(&resv_map->refs);
+ 	spin_lock_init(&resv_map->lock);
+ 	INIT_LIST_HEAD(&resv_map->regions);
++	init_rwsem(&resv_map->rw_sema);
+ 
+ 	resv_map->adds_in_progress = 0;
+ 	/*
+@@ -1132,8 +1165,7 @@ static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map)
+ 	VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
+ 	VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma);
+ 
+-	set_vma_private_data(vma, (get_vma_private_data(vma) &
+-				HPAGE_RESV_MASK) | (unsigned long)map);
++	set_vma_private_data(vma, (unsigned long)map);
+ }
+ 
+ static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags)
+@@ -7015,8 +7047,10 @@ out_err:
+ 		 */
+ 		if (chg >= 0 && add < 0)
+ 			region_abort(resv_map, from, to, regions_needed);
+-	if (vma && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
++	if (vma && is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
+ 		kref_put(&resv_map->refs, resv_map_release);
++		set_vma_resv_map(vma, NULL);
++	}
+ 	return false;
+ }
+ 
+diff --git a/mm/kasan/report.c b/mm/kasan/report.c
+index ca4b6ff080a64..3974e4549c3e7 100644
+--- a/mm/kasan/report.c
++++ b/mm/kasan/report.c
+@@ -621,9 +621,8 @@ void kasan_report_async(void)
+ }
+ #endif /* CONFIG_KASAN_HW_TAGS */
+ 
+-#ifdef CONFIG_KASAN_INLINE
+ /*
+- * With CONFIG_KASAN_INLINE, accesses to bogus pointers (outside the high
++ * With CONFIG_KASAN, accesses to bogus pointers (outside the high
+  * canonical half of the address space) cause out-of-bounds shadow memory reads
+  * before the actual access. For addresses in the low canonical half of the
+  * address space, as well as most non-canonical addresses, that out-of-bounds
+@@ -659,4 +658,3 @@ void kasan_non_canonical_hook(unsigned long addr)
+ 	pr_alert("KASAN: %s in range [0x%016lx-0x%016lx]\n", bug_type,
+ 		 orig_addr, orig_addr + KASAN_GRANULE_SIZE - 1);
+ }
+-#endif
+diff --git a/mm/mempolicy.c b/mm/mempolicy.c
+index 071edec3dca2a..7d82355ad0b3b 100644
+--- a/mm/mempolicy.c
++++ b/mm/mempolicy.c
+@@ -1543,8 +1543,10 @@ SYSCALL_DEFINE4(set_mempolicy_home_node, unsigned long, start, unsigned long, le
+ 		 * the home node for vmas we already updated before.
+ 		 */
+ 		old = vma_policy(vma);
+-		if (!old)
++		if (!old) {
++			prev = vma;
+ 			continue;
++		}
+ 		if (old->mode != MPOL_BIND && old->mode != MPOL_PREFERRED_MANY) {
+ 			err = -EOPNOTSUPP;
+ 			break;
+diff --git a/mm/migrate.c b/mm/migrate.c
+index 24baad2571e31..0e5cce8e8bb41 100644
+--- a/mm/migrate.c
++++ b/mm/migrate.c
+@@ -2160,6 +2160,7 @@ static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes,
+ 			 const int __user *nodes,
+ 			 int __user *status, int flags)
+ {
++	compat_uptr_t __user *compat_pages = (void __user *)pages;
+ 	int current_node = NUMA_NO_NODE;
+ 	LIST_HEAD(pagelist);
+ 	int start, i;
+@@ -2172,8 +2173,17 @@ static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes,
+ 		int node;
+ 
+ 		err = -EFAULT;
+-		if (get_user(p, pages + i))
+-			goto out_flush;
++		if (in_compat_syscall()) {
++			compat_uptr_t cp;
++
++			if (get_user(cp, compat_pages + i))
++				goto out_flush;
++
++			p = compat_ptr(cp);
++		} else {
++			if (get_user(p, pages + i))
++				goto out_flush;
++		}
+ 		if (get_user(node, nodes + i))
+ 			goto out_flush;
+ 
+diff --git a/mm/mmap.c b/mm/mmap.c
+index 3937479d0e071..6d25c619911f1 100644
+--- a/mm/mmap.c
++++ b/mm/mmap.c
+@@ -3136,13 +3136,13 @@ int vm_brk_flags(unsigned long addr, unsigned long request, unsigned long flags)
+ 	if (!len)
+ 		return 0;
+ 
+-	if (mmap_write_lock_killable(mm))
+-		return -EINTR;
+-
+ 	/* Until we need other flags, refuse anything except VM_EXEC. */
+ 	if ((flags & (~VM_EXEC)) != 0)
+ 		return -EINVAL;
+ 
++	if (mmap_write_lock_killable(mm))
++		return -EINTR;
++
+ 	ret = check_brk_limits(addr, len);
+ 	if (ret)
+ 		goto limits_failed;
+diff --git a/mm/page_alloc.c b/mm/page_alloc.c
+index d322bfae8f69b..f9a6a42f78016 100644
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -6528,6 +6528,7 @@ static void break_down_buddy_pages(struct zone *zone, struct page *page,
+ 			next_page = page;
+ 			current_buddy = page + size;
+ 		}
++		page = next_page;
+ 
+ 		if (set_page_guard(zone, current_buddy, high, migratetype))
+ 			continue;
+@@ -6535,7 +6536,6 @@ static void break_down_buddy_pages(struct zone *zone, struct page *page,
+ 		if (current_buddy != target) {
+ 			add_to_free_list(current_buddy, zone, high, migratetype);
+ 			set_buddy_order(current_buddy, high);
+-			page = next_page;
+ 		}
+ 	}
+ }
+diff --git a/net/core/neighbour.c b/net/core/neighbour.c
+index f16ec0e8a0348..4a1d669b46f90 100644
+--- a/net/core/neighbour.c
++++ b/net/core/neighbour.c
+@@ -251,7 +251,8 @@ bool neigh_remove_one(struct neighbour *ndel, struct neigh_table *tbl)
+ 
+ static int neigh_forced_gc(struct neigh_table *tbl)
+ {
+-	int max_clean = atomic_read(&tbl->gc_entries) - tbl->gc_thresh2;
++	int max_clean = atomic_read(&tbl->gc_entries) -
++			READ_ONCE(tbl->gc_thresh2);
+ 	unsigned long tref = jiffies - 5 * HZ;
+ 	struct neighbour *n, *tmp;
+ 	int shrunk = 0;
+@@ -280,7 +281,7 @@ static int neigh_forced_gc(struct neigh_table *tbl)
+ 		}
+ 	}
+ 
+-	tbl->last_flush = jiffies;
++	WRITE_ONCE(tbl->last_flush, jiffies);
+ 
+ 	write_unlock_bh(&tbl->lock);
+ 
+@@ -464,17 +465,17 @@ static struct neighbour *neigh_alloc(struct neigh_table *tbl,
+ {
+ 	struct neighbour *n = NULL;
+ 	unsigned long now = jiffies;
+-	int entries;
++	int entries, gc_thresh3;
+ 
+ 	if (exempt_from_gc)
+ 		goto do_alloc;
+ 
+ 	entries = atomic_inc_return(&tbl->gc_entries) - 1;
+-	if (entries >= tbl->gc_thresh3 ||
+-	    (entries >= tbl->gc_thresh2 &&
+-	     time_after(now, tbl->last_flush + 5 * HZ))) {
+-		if (!neigh_forced_gc(tbl) &&
+-		    entries >= tbl->gc_thresh3) {
++	gc_thresh3 = READ_ONCE(tbl->gc_thresh3);
++	if (entries >= gc_thresh3 ||
++	    (entries >= READ_ONCE(tbl->gc_thresh2) &&
++	     time_after(now, READ_ONCE(tbl->last_flush) + 5 * HZ))) {
++		if (!neigh_forced_gc(tbl) && entries >= gc_thresh3) {
+ 			net_info_ratelimited("%s: neighbor table overflow!\n",
+ 					     tbl->id);
+ 			NEIGH_CACHE_STAT_INC(tbl, table_fulls);
+@@ -955,13 +956,14 @@ static void neigh_periodic_work(struct work_struct *work)
+ 
+ 	if (time_after(jiffies, tbl->last_rand + 300 * HZ)) {
+ 		struct neigh_parms *p;
+-		tbl->last_rand = jiffies;
++
++		WRITE_ONCE(tbl->last_rand, jiffies);
+ 		list_for_each_entry(p, &tbl->parms_list, list)
+ 			p->reachable_time =
+ 				neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
+ 	}
+ 
+-	if (atomic_read(&tbl->entries) < tbl->gc_thresh1)
++	if (atomic_read(&tbl->entries) < READ_ONCE(tbl->gc_thresh1))
+ 		goto out;
+ 
+ 	for (i = 0 ; i < (1 << nht->hash_shift); i++) {
+@@ -2167,15 +2169,16 @@ static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
+ 	ndtmsg->ndtm_pad2   = 0;
+ 
+ 	if (nla_put_string(skb, NDTA_NAME, tbl->id) ||
+-	    nla_put_msecs(skb, NDTA_GC_INTERVAL, tbl->gc_interval, NDTA_PAD) ||
+-	    nla_put_u32(skb, NDTA_THRESH1, tbl->gc_thresh1) ||
+-	    nla_put_u32(skb, NDTA_THRESH2, tbl->gc_thresh2) ||
+-	    nla_put_u32(skb, NDTA_THRESH3, tbl->gc_thresh3))
++	    nla_put_msecs(skb, NDTA_GC_INTERVAL, READ_ONCE(tbl->gc_interval),
++			  NDTA_PAD) ||
++	    nla_put_u32(skb, NDTA_THRESH1, READ_ONCE(tbl->gc_thresh1)) ||
++	    nla_put_u32(skb, NDTA_THRESH2, READ_ONCE(tbl->gc_thresh2)) ||
++	    nla_put_u32(skb, NDTA_THRESH3, READ_ONCE(tbl->gc_thresh3)))
+ 		goto nla_put_failure;
+ 	{
+ 		unsigned long now = jiffies;
+-		long flush_delta = now - tbl->last_flush;
+-		long rand_delta = now - tbl->last_rand;
++		long flush_delta = now - READ_ONCE(tbl->last_flush);
++		long rand_delta = now - READ_ONCE(tbl->last_rand);
+ 		struct neigh_hash_table *nht;
+ 		struct ndt_config ndc = {
+ 			.ndtc_key_len		= tbl->key_len,
+@@ -2183,7 +2186,7 @@ static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
+ 			.ndtc_entries		= atomic_read(&tbl->entries),
+ 			.ndtc_last_flush	= jiffies_to_msecs(flush_delta),
+ 			.ndtc_last_rand		= jiffies_to_msecs(rand_delta),
+-			.ndtc_proxy_qlen	= tbl->proxy_queue.qlen,
++			.ndtc_proxy_qlen	= READ_ONCE(tbl->proxy_queue.qlen),
+ 		};
+ 
+ 		rcu_read_lock();
+@@ -2206,17 +2209,17 @@ static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
+ 			struct neigh_statistics	*st;
+ 
+ 			st = per_cpu_ptr(tbl->stats, cpu);
+-			ndst.ndts_allocs		+= st->allocs;
+-			ndst.ndts_destroys		+= st->destroys;
+-			ndst.ndts_hash_grows		+= st->hash_grows;
+-			ndst.ndts_res_failed		+= st->res_failed;
+-			ndst.ndts_lookups		+= st->lookups;
+-			ndst.ndts_hits			+= st->hits;
+-			ndst.ndts_rcv_probes_mcast	+= st->rcv_probes_mcast;
+-			ndst.ndts_rcv_probes_ucast	+= st->rcv_probes_ucast;
+-			ndst.ndts_periodic_gc_runs	+= st->periodic_gc_runs;
+-			ndst.ndts_forced_gc_runs	+= st->forced_gc_runs;
+-			ndst.ndts_table_fulls		+= st->table_fulls;
++			ndst.ndts_allocs		+= READ_ONCE(st->allocs);
++			ndst.ndts_destroys		+= READ_ONCE(st->destroys);
++			ndst.ndts_hash_grows		+= READ_ONCE(st->hash_grows);
++			ndst.ndts_res_failed		+= READ_ONCE(st->res_failed);
++			ndst.ndts_lookups		+= READ_ONCE(st->lookups);
++			ndst.ndts_hits			+= READ_ONCE(st->hits);
++			ndst.ndts_rcv_probes_mcast	+= READ_ONCE(st->rcv_probes_mcast);
++			ndst.ndts_rcv_probes_ucast	+= READ_ONCE(st->rcv_probes_ucast);
++			ndst.ndts_periodic_gc_runs	+= READ_ONCE(st->periodic_gc_runs);
++			ndst.ndts_forced_gc_runs	+= READ_ONCE(st->forced_gc_runs);
++			ndst.ndts_table_fulls		+= READ_ONCE(st->table_fulls);
+ 		}
+ 
+ 		if (nla_put_64bit(skb, NDTA_STATS, sizeof(ndst), &ndst,
+@@ -2445,16 +2448,16 @@ static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh,
+ 		goto errout_tbl_lock;
+ 
+ 	if (tb[NDTA_THRESH1])
+-		tbl->gc_thresh1 = nla_get_u32(tb[NDTA_THRESH1]);
++		WRITE_ONCE(tbl->gc_thresh1, nla_get_u32(tb[NDTA_THRESH1]));
+ 
+ 	if (tb[NDTA_THRESH2])
+-		tbl->gc_thresh2 = nla_get_u32(tb[NDTA_THRESH2]);
++		WRITE_ONCE(tbl->gc_thresh2, nla_get_u32(tb[NDTA_THRESH2]));
+ 
+ 	if (tb[NDTA_THRESH3])
+-		tbl->gc_thresh3 = nla_get_u32(tb[NDTA_THRESH3]);
++		WRITE_ONCE(tbl->gc_thresh3, nla_get_u32(tb[NDTA_THRESH3]));
+ 
+ 	if (tb[NDTA_GC_INTERVAL])
+-		tbl->gc_interval = nla_get_msecs(tb[NDTA_GC_INTERVAL]);
++		WRITE_ONCE(tbl->gc_interval, nla_get_msecs(tb[NDTA_GC_INTERVAL]));
+ 
+ 	err = 0;
+ 
+diff --git a/net/handshake/netlink.c b/net/handshake/netlink.c
+index d0bc1dd8e65a8..80c7302692c74 100644
+--- a/net/handshake/netlink.c
++++ b/net/handshake/netlink.c
+@@ -87,29 +87,6 @@ struct nlmsghdr *handshake_genl_put(struct sk_buff *msg,
+ }
+ EXPORT_SYMBOL(handshake_genl_put);
+ 
+-/*
+- * dup() a kernel socket for use as a user space file descriptor
+- * in the current process. The kernel socket must have an
+- * instatiated struct file.
+- *
+- * Implicit argument: "current()"
+- */
+-static int handshake_dup(struct socket *sock)
+-{
+-	struct file *file;
+-	int newfd;
+-
+-	file = get_file(sock->file);
+-	newfd = get_unused_fd_flags(O_CLOEXEC);
+-	if (newfd < 0) {
+-		fput(file);
+-		return newfd;
+-	}
+-
+-	fd_install(newfd, file);
+-	return newfd;
+-}
+-
+ int handshake_nl_accept_doit(struct sk_buff *skb, struct genl_info *info)
+ {
+ 	struct net *net = sock_net(skb->sk);
+@@ -133,17 +110,20 @@ int handshake_nl_accept_doit(struct sk_buff *skb, struct genl_info *info)
+ 		goto out_status;
+ 
+ 	sock = req->hr_sk->sk_socket;
+-	fd = handshake_dup(sock);
++	fd = get_unused_fd_flags(O_CLOEXEC);
+ 	if (fd < 0) {
+ 		err = fd;
+ 		goto out_complete;
+ 	}
++
+ 	err = req->hr_proto->hp_accept(req, info, fd);
+ 	if (err) {
+-		fput(sock->file);
++		put_unused_fd(fd);
+ 		goto out_complete;
+ 	}
+ 
++	fd_install(fd, get_file(sock->file));
++
+ 	trace_handshake_cmd_accept(net, req, req->hr_sk, fd);
+ 	return 0;
+ 
+diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
+index 9bdc1b2eaf734..a0a87446f827c 100644
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -925,10 +925,11 @@ int tcp_send_mss(struct sock *sk, int *size_goal, int flags)
+ 	return mss_now;
+ }
+ 
+-/* In some cases, both sendmsg() could have added an skb to the write queue,
+- * but failed adding payload on it.  We need to remove it to consume less
++/* In some cases, sendmsg() could have added an skb to the write queue,
++ * but failed adding payload on it. We need to remove it to consume less
+  * memory, but more importantly be able to generate EPOLLOUT for Edge Trigger
+- * epoll() users.
++ * epoll() users. Another reason is that tcp_write_xmit() does not like
++ * finding an empty skb in the write queue.
+  */
+ void tcp_remove_empty_skb(struct sock *sk)
+ {
+@@ -1286,6 +1287,7 @@ new_segment:
+ 
+ wait_for_space:
+ 		set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
++		tcp_remove_empty_skb(sk);
+ 		if (copied)
+ 			tcp_push(sk, flags & ~MSG_MORE, mss_now,
+ 				 TCP_NAGLE_PUSH, size_goal);
+diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
+index a5781f86ac375..7d544f965b264 100644
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -2202,16 +2202,17 @@ void tcp_enter_loss(struct sock *sk)
+  * restore sanity to the SACK scoreboard. If the apparent reneging
+  * persists until this RTO then we'll clear the SACK scoreboard.
+  */
+-static bool tcp_check_sack_reneging(struct sock *sk, int flag)
++static bool tcp_check_sack_reneging(struct sock *sk, int *ack_flag)
+ {
+-	if (flag & FLAG_SACK_RENEGING &&
+-	    flag & FLAG_SND_UNA_ADVANCED) {
++	if (*ack_flag & FLAG_SACK_RENEGING &&
++	    *ack_flag & FLAG_SND_UNA_ADVANCED) {
+ 		struct tcp_sock *tp = tcp_sk(sk);
+ 		unsigned long delay = max(usecs_to_jiffies(tp->srtt_us >> 4),
+ 					  msecs_to_jiffies(10));
+ 
+ 		inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
+ 					  delay, TCP_RTO_MAX);
++		*ack_flag &= ~FLAG_SET_XMIT_TIMER;
+ 		return true;
+ 	}
+ 	return false;
+@@ -2981,7 +2982,7 @@ static void tcp_fastretrans_alert(struct sock *sk, const u32 prior_snd_una,
+ 		tp->prior_ssthresh = 0;
+ 
+ 	/* B. In all the states check for reneging SACKs. */
+-	if (tcp_check_sack_reneging(sk, flag))
++	if (tcp_check_sack_reneging(sk, ack_flag))
+ 		return;
+ 
+ 	/* C. Check consistency of the current state. */
+diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
+index e751cda5eef69..8f6b6f56b65b4 100644
+--- a/net/mac80211/rx.c
++++ b/net/mac80211/rx.c
+@@ -2468,8 +2468,7 @@ static int ieee80211_drop_unencrypted_mgmt(struct ieee80211_rx_data *rx)
+ 
+ 		/* drop unicast public action frames when using MPF */
+ 		if (is_unicast_ether_addr(mgmt->da) &&
+-		    ieee80211_is_public_action((void *)rx->skb->data,
+-					       rx->skb->len))
++		    ieee80211_is_protected_dual_of_public_action(rx->skb))
+ 			return -EACCES;
+ 	}
+ 
+diff --git a/net/netfilter/nf_flow_table_core.c b/net/netfilter/nf_flow_table_core.c
+index 1d34d700bd09b..920a5a29ae1dc 100644
+--- a/net/netfilter/nf_flow_table_core.c
++++ b/net/netfilter/nf_flow_table_core.c
+@@ -316,12 +316,6 @@ void flow_offload_refresh(struct nf_flowtable *flow_table,
+ }
+ EXPORT_SYMBOL_GPL(flow_offload_refresh);
+ 
+-static bool nf_flow_is_outdated(const struct flow_offload *flow)
+-{
+-	return test_bit(IPS_SEEN_REPLY_BIT, &flow->ct->status) &&
+-		!test_bit(NF_FLOW_HW_ESTABLISHED, &flow->flags);
+-}
+-
+ static inline bool nf_flow_has_expired(const struct flow_offload *flow)
+ {
+ 	return nf_flow_timeout_delta(flow->timeout) <= 0;
+@@ -407,12 +401,18 @@ nf_flow_table_iterate(struct nf_flowtable *flow_table,
+ 	return err;
+ }
+ 
++static bool nf_flow_custom_gc(struct nf_flowtable *flow_table,
++			      const struct flow_offload *flow)
++{
++	return flow_table->type->gc && flow_table->type->gc(flow);
++}
++
+ static void nf_flow_offload_gc_step(struct nf_flowtable *flow_table,
+ 				    struct flow_offload *flow, void *data)
+ {
+ 	if (nf_flow_has_expired(flow) ||
+ 	    nf_ct_is_dying(flow->ct) ||
+-	    nf_flow_is_outdated(flow))
++	    nf_flow_custom_gc(flow_table, flow))
+ 		flow_offload_teardown(flow);
+ 
+ 	if (test_bit(NF_FLOW_TEARDOWN, &flow->flags)) {
+diff --git a/net/sched/act_ct.c b/net/sched/act_ct.c
+index abc71a06d634a..ad7c955453782 100644
+--- a/net/sched/act_ct.c
++++ b/net/sched/act_ct.c
+@@ -278,7 +278,16 @@ err_nat:
+ 	return err;
+ }
+ 
++static bool tcf_ct_flow_is_outdated(const struct flow_offload *flow)
++{
++	return test_bit(IPS_SEEN_REPLY_BIT, &flow->ct->status) &&
++	       test_bit(IPS_HW_OFFLOAD_BIT, &flow->ct->status) &&
++	       !test_bit(NF_FLOW_HW_PENDING, &flow->flags) &&
++	       !test_bit(NF_FLOW_HW_ESTABLISHED, &flow->flags);
++}
++
+ static struct nf_flowtable_type flowtable_ct = {
++	.gc		= tcf_ct_flow_is_outdated,
+ 	.action		= tcf_ct_flow_table_fill_actions,
+ 	.owner		= THIS_MODULE,
+ };
+diff --git a/net/vmw_vsock/virtio_transport.c b/net/vmw_vsock/virtio_transport.c
+index e95df847176b6..b80bf681327bd 100644
+--- a/net/vmw_vsock/virtio_transport.c
++++ b/net/vmw_vsock/virtio_transport.c
+@@ -555,6 +555,11 @@ static int virtio_vsock_vqs_init(struct virtio_vsock *vsock)
+ 
+ 	virtio_device_ready(vdev);
+ 
++	return 0;
++}
++
++static void virtio_vsock_vqs_start(struct virtio_vsock *vsock)
++{
+ 	mutex_lock(&vsock->tx_lock);
+ 	vsock->tx_run = true;
+ 	mutex_unlock(&vsock->tx_lock);
+@@ -569,7 +574,16 @@ static int virtio_vsock_vqs_init(struct virtio_vsock *vsock)
+ 	vsock->event_run = true;
+ 	mutex_unlock(&vsock->event_lock);
+ 
+-	return 0;
++	/* virtio_transport_send_pkt() can queue packets once
++	 * the_virtio_vsock is set, but they won't be processed until
++	 * vsock->tx_run is set to true. We queue vsock->send_pkt_work
++	 * when initialization finishes to send those packets queued
++	 * earlier.
++	 * We don't need to queue the other workers (rx, event) because
++	 * as long as we don't fill the queues with empty buffers, the
++	 * host can't send us any notification.
++	 */
++	queue_work(virtio_vsock_workqueue, &vsock->send_pkt_work);
+ }
+ 
+ static void virtio_vsock_vqs_del(struct virtio_vsock *vsock)
+@@ -664,6 +678,7 @@ static int virtio_vsock_probe(struct virtio_device *vdev)
+ 		goto out;
+ 
+ 	rcu_assign_pointer(the_virtio_vsock, vsock);
++	virtio_vsock_vqs_start(vsock);
+ 
+ 	mutex_unlock(&the_virtio_vsock_mutex);
+ 
+@@ -736,6 +751,7 @@ static int virtio_vsock_restore(struct virtio_device *vdev)
+ 		goto out;
+ 
+ 	rcu_assign_pointer(the_virtio_vsock, vsock);
++	virtio_vsock_vqs_start(vsock);
+ 
+ out:
+ 	mutex_unlock(&the_virtio_vsock_mutex);
+diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c
+index 3e2c398abddcc..55a1d3633853f 100644
+--- a/net/wireless/mlme.c
++++ b/net/wireless/mlme.c
+@@ -43,10 +43,11 @@ void cfg80211_rx_assoc_resp(struct net_device *dev,
+ 
+ 	for (link_id = 0; link_id < ARRAY_SIZE(data->links); link_id++) {
+ 		cr.links[link_id].status = data->links[link_id].status;
++		cr.links[link_id].bss = data->links[link_id].bss;
++
+ 		WARN_ON_ONCE(cr.links[link_id].status != WLAN_STATUS_SUCCESS &&
+ 			     (!cr.ap_mld_addr || !cr.links[link_id].bss));
+ 
+-		cr.links[link_id].bss = data->links[link_id].bss;
+ 		if (!cr.links[link_id].bss)
+ 			continue;
+ 		cr.links[link_id].bssid = data->links[link_id].bss->bssid;
+diff --git a/net/wireless/scan.c b/net/wireless/scan.c
+index 939deecf0bbef..8210a6090ac16 100644
+--- a/net/wireless/scan.c
++++ b/net/wireless/scan.c
+@@ -2125,7 +2125,7 @@ cfg80211_inform_single_bss_data(struct wiphy *wiphy,
+ 	if (!res)
+ 		goto drop;
+ 
+-	rdev_inform_bss(rdev, &res->pub, ies, data->drv_data);
++	rdev_inform_bss(rdev, &res->pub, ies, drv_data->drv_data);
+ 
+ 	if (data->bss_source == BSS_SOURCE_MBSSID) {
+ 		/* this is a nontransmitting bss, we need to add it to
+diff --git a/tools/include/linux/rwsem.h b/tools/include/linux/rwsem.h
+new file mode 100644
+index 0000000000000..83971b3cbfced
+--- /dev/null
++++ b/tools/include/linux/rwsem.h
+@@ -0,0 +1,40 @@
++/* SPDX-License-Identifier: GPL-2.0+ */
++#ifndef _TOOLS__RWSEM_H
++#define _TOOLS__RWSEM_H
++
++#include <pthread.h>
++
++struct rw_semaphore {
++	pthread_rwlock_t lock;
++};
++
++static inline int init_rwsem(struct rw_semaphore *sem)
++{
++	return pthread_rwlock_init(&sem->lock, NULL);
++}
++
++static inline int exit_rwsem(struct rw_semaphore *sem)
++{
++	return pthread_rwlock_destroy(&sem->lock);
++}
++
++static inline int down_read(struct rw_semaphore *sem)
++{
++	return pthread_rwlock_rdlock(&sem->lock);
++}
++
++static inline int up_read(struct rw_semaphore *sem)
++{
++	return pthread_rwlock_unlock(&sem->lock);
++}
++
++static inline int down_write(struct rw_semaphore *sem)
++{
++	return pthread_rwlock_wrlock(&sem->lock);
++}
++
++static inline int up_write(struct rw_semaphore *sem)
++{
++	return pthread_rwlock_unlock(&sem->lock);
++}
++#endif /* _TOOLS_RWSEM_H */
+diff --git a/tools/testing/selftests/mm/mremap_dontunmap.c b/tools/testing/selftests/mm/mremap_dontunmap.c
+index ca2359835e751..a06e73ec85682 100644
+--- a/tools/testing/selftests/mm/mremap_dontunmap.c
++++ b/tools/testing/selftests/mm/mremap_dontunmap.c
+@@ -7,6 +7,7 @@
+  */
+ #define _GNU_SOURCE
+ #include <sys/mman.h>
++#include <linux/mman.h>
+ #include <errno.h>
+ #include <stdio.h>
+ #include <stdlib.h>


^ permalink raw reply related	[flat|nested] 24+ messages in thread
* [gentoo-commits] proj/linux-patches:6.5 commit in: /
@ 2023-10-25 11:35 Mike Pagano
  0 siblings, 0 replies; 24+ messages in thread
From: Mike Pagano @ 2023-10-25 11:35 UTC (permalink / raw
  To: gentoo-commits

commit:     faeab2754ec5d68c53edb5980435be933c285186
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed Oct 25 11:34:55 2023 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed Oct 25 11:34:55 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=faeab275

Linux patch 6.5.9

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README            |    4 +
 1008_linux-6.5.9.patch | 8349 ++++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 8353 insertions(+)

diff --git a/0000_README b/0000_README
index 0f2b2d88..63f98435 100644
--- a/0000_README
+++ b/0000_README
@@ -75,6 +75,10 @@ Patch:  1007_linux-6.5.8.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.5.8
 
+Patch:  1008_linux-6.5.9.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.5.9
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1008_linux-6.5.9.patch b/1008_linux-6.5.9.patch
new file mode 100644
index 00000000..6ce021b0
--- /dev/null
+++ b/1008_linux-6.5.9.patch
@@ -0,0 +1,8349 @@
+diff --git a/Documentation/devicetree/bindings/mmc/sdhci-msm.yaml b/Documentation/devicetree/bindings/mmc/sdhci-msm.yaml
+index 6da28e6305778..30a2c1e36b970 100644
+--- a/Documentation/devicetree/bindings/mmc/sdhci-msm.yaml
++++ b/Documentation/devicetree/bindings/mmc/sdhci-msm.yaml
+@@ -69,7 +69,7 @@ properties:
+     maxItems: 4
+ 
+   clocks:
+-    minItems: 3
++    minItems: 2
+     items:
+       - description: Main peripheral bus clock, PCLK/HCLK - AHB Bus clock
+       - description: SDC MMC clock, MCLK
+diff --git a/Documentation/networking/representors.rst b/Documentation/networking/representors.rst
+index ee1f5cd54496e..decb39c19b9ed 100644
+--- a/Documentation/networking/representors.rst
++++ b/Documentation/networking/representors.rst
+@@ -162,9 +162,11 @@ How are representors identified?
+ The representor netdevice should *not* directly refer to a PCIe device (e.g.
+ through ``net_dev->dev.parent`` / ``SET_NETDEV_DEV()``), either of the
+ representee or of the switchdev function.
+-Instead, it should implement the ``ndo_get_devlink_port()`` netdevice op, which
+-the kernel uses to provide the ``phys_switch_id`` and ``phys_port_name`` sysfs
+-nodes.  (Some legacy drivers implement ``ndo_get_port_parent_id()`` and
++Instead, the driver should use the ``SET_NETDEV_DEVLINK_PORT`` macro to
++assign a devlink port instance to the netdevice before registering the
++netdevice; the kernel uses the devlink port to provide the ``phys_switch_id``
++and ``phys_port_name`` sysfs nodes.
++(Some legacy drivers implement ``ndo_get_port_parent_id()`` and
+ ``ndo_get_phys_port_name()`` directly, but this is deprecated.)  See
+ :ref:`Documentation/networking/devlink/devlink-port.rst <devlink_port>` for the
+ details of this API.
+diff --git a/Documentation/rust/general-information.rst b/Documentation/rust/general-information.rst
+index 49029ee82e559..081397827a7ea 100644
+--- a/Documentation/rust/general-information.rst
++++ b/Documentation/rust/general-information.rst
+@@ -29,7 +29,7 @@ target with the same invocation used for compilation, e.g.::
+ 
+ To read the docs locally in your web browser, run e.g.::
+ 
+-	xdg-open rust/doc/kernel/index.html
++	xdg-open Documentation/output/rust/rustdoc/kernel/index.html
+ 
+ To learn about how to write the documentation, please see coding-guidelines.rst.
+ 
+diff --git a/Documentation/rust/index.rst b/Documentation/rust/index.rst
+index 4ae8c66b94faf..e599be2cec9ba 100644
+--- a/Documentation/rust/index.rst
++++ b/Documentation/rust/index.rst
+@@ -6,6 +6,14 @@ Rust
+ Documentation related to Rust within the kernel. To start using Rust
+ in the kernel, please read the quick-start.rst guide.
+ 
++.. only:: rustdoc and html
++
++	You can also browse `rustdoc documentation <rustdoc/kernel/index.html>`_.
++
++.. only:: not rustdoc and html
++
++	This documentation does not include rustdoc generated information.
++
+ .. toctree::
+     :maxdepth: 1
+ 
+diff --git a/Makefile b/Makefile
+index a687c9a0646cb..fc83c079c4716 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 5
+-SUBLEVEL = 8
++SUBLEVEL = 9
+ EXTRAVERSION =
+ NAME = Hurr durr I'ma ninja sloth
+ 
+@@ -1595,7 +1595,7 @@ endif
+ # Directories & files removed with 'make clean'
+ CLEAN_FILES += include/ksym vmlinux.symvers modules-only.symvers \
+ 	       modules.builtin modules.builtin.modinfo modules.nsdeps \
+-	       compile_commands.json .thinlto-cache rust/test rust/doc \
++	       compile_commands.json .thinlto-cache rust/test \
+ 	       rust-project.json .vmlinux.objs .vmlinux.export.c
+ 
+ # Directories & files removed with 'make mrproper'
+diff --git a/arch/arm/boot/dts/ti/omap/motorola-mapphone-common.dtsi b/arch/arm/boot/dts/ti/omap/motorola-mapphone-common.dtsi
+index d69f0f4b4990d..d2d516d113baa 100644
+--- a/arch/arm/boot/dts/ti/omap/motorola-mapphone-common.dtsi
++++ b/arch/arm/boot/dts/ti/omap/motorola-mapphone-common.dtsi
+@@ -640,6 +640,7 @@
+ &uart3 {
+ 	interrupts-extended = <&wakeupgen GIC_SPI 74 IRQ_TYPE_LEVEL_HIGH
+ 			       &omap4_pmx_core 0x17c>;
++	overrun-throttle-ms = <500>;
+ };
+ 
+ &uart4 {
+diff --git a/arch/arm64/boot/dts/mediatek/mt8195.dtsi b/arch/arm64/boot/dts/mediatek/mt8195.dtsi
+index 43011bc41da77..54c674c45b49a 100644
+--- a/arch/arm64/boot/dts/mediatek/mt8195.dtsi
++++ b/arch/arm64/boot/dts/mediatek/mt8195.dtsi
+@@ -2958,7 +2958,7 @@
+ 			clock-names = "merge","merge_async";
+ 			power-domains = <&spm MT8195_POWER_DOMAIN_VDOSYS1>;
+ 			mediatek,gce-client-reg = <&gce0 SUBSYS_1c10XXXX 0xc000 0x1000>;
+-			mediatek,merge-mute = <1>;
++			mediatek,merge-mute;
+ 			resets = <&vdosys1 MT8195_VDOSYS1_SW0_RST_B_MERGE0_DL_ASYNC>;
+ 		};
+ 
+@@ -2971,7 +2971,7 @@
+ 			clock-names = "merge","merge_async";
+ 			power-domains = <&spm MT8195_POWER_DOMAIN_VDOSYS1>;
+ 			mediatek,gce-client-reg = <&gce0 SUBSYS_1c10XXXX 0xd000 0x1000>;
+-			mediatek,merge-mute = <1>;
++			mediatek,merge-mute;
+ 			resets = <&vdosys1 MT8195_VDOSYS1_SW0_RST_B_MERGE1_DL_ASYNC>;
+ 		};
+ 
+@@ -2984,7 +2984,7 @@
+ 			clock-names = "merge","merge_async";
+ 			power-domains = <&spm MT8195_POWER_DOMAIN_VDOSYS1>;
+ 			mediatek,gce-client-reg = <&gce0 SUBSYS_1c10XXXX 0xe000 0x1000>;
+-			mediatek,merge-mute = <1>;
++			mediatek,merge-mute;
+ 			resets = <&vdosys1 MT8195_VDOSYS1_SW0_RST_B_MERGE2_DL_ASYNC>;
+ 		};
+ 
+@@ -2997,7 +2997,7 @@
+ 			clock-names = "merge","merge_async";
+ 			power-domains = <&spm MT8195_POWER_DOMAIN_VDOSYS1>;
+ 			mediatek,gce-client-reg = <&gce0 SUBSYS_1c10XXXX 0xf000 0x1000>;
+-			mediatek,merge-mute = <1>;
++			mediatek,merge-mute;
+ 			resets = <&vdosys1 MT8195_VDOSYS1_SW0_RST_B_MERGE3_DL_ASYNC>;
+ 		};
+ 
+@@ -3010,7 +3010,7 @@
+ 			clock-names = "merge","merge_async";
+ 			power-domains = <&spm MT8195_POWER_DOMAIN_VDOSYS1>;
+ 			mediatek,gce-client-reg = <&gce0 SUBSYS_1c11XXXX 0x0000 0x1000>;
+-			mediatek,merge-fifo-en = <1>;
++			mediatek,merge-fifo-en;
+ 			resets = <&vdosys1 MT8195_VDOSYS1_SW0_RST_B_MERGE4_DL_ASYNC>;
+ 		};
+ 
+diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
+index 0b1172cbeccb3..b3fdb3d268367 100644
+--- a/arch/powerpc/Kconfig
++++ b/arch/powerpc/Kconfig
+@@ -917,7 +917,7 @@ config ARCH_FORCE_MAX_ORDER
+ 	default "6" if PPC32 && PPC_64K_PAGES
+ 	range 4 10 if PPC32 && PPC_256K_PAGES
+ 	default "4" if PPC32 && PPC_256K_PAGES
+-	range 10 10
++	range 10 12
+ 	default "10"
+ 	help
+ 	  The kernel page allocator limits the size of maximal physically
+diff --git a/arch/powerpc/lib/qspinlock.c b/arch/powerpc/lib/qspinlock.c
+index 253620979d0cd..6dd2f46bd3ef6 100644
+--- a/arch/powerpc/lib/qspinlock.c
++++ b/arch/powerpc/lib/qspinlock.c
+@@ -406,6 +406,9 @@ static __always_inline bool yield_to_prev(struct qspinlock *lock, struct qnode *
+ 	if ((yield_count & 1) == 0)
+ 		goto yield_prev; /* owner vcpu is running */
+ 
++	if (get_owner_cpu(READ_ONCE(lock->val)) != yield_cpu)
++		goto yield_prev; /* re-sample lock owner */
++
+ 	spin_end();
+ 
+ 	preempted = true;
+diff --git a/arch/s390/pci/pci_dma.c b/arch/s390/pci/pci_dma.c
+index 2d9b01d7ca4c5..99209085c75bc 100644
+--- a/arch/s390/pci/pci_dma.c
++++ b/arch/s390/pci/pci_dma.c
+@@ -564,6 +564,17 @@ static void s390_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
+ 		s->dma_length = 0;
+ 	}
+ }
++
++static unsigned long *bitmap_vzalloc(size_t bits, gfp_t flags)
++{
++	size_t n = BITS_TO_LONGS(bits);
++	size_t bytes;
++
++	if (unlikely(check_mul_overflow(n, sizeof(unsigned long), &bytes)))
++		return NULL;
++
++	return vzalloc(bytes);
++}
+ 	
+ int zpci_dma_init_device(struct zpci_dev *zdev)
+ {
+@@ -604,13 +615,13 @@ int zpci_dma_init_device(struct zpci_dev *zdev)
+ 				zdev->end_dma - zdev->start_dma + 1);
+ 	zdev->end_dma = zdev->start_dma + zdev->iommu_size - 1;
+ 	zdev->iommu_pages = zdev->iommu_size >> PAGE_SHIFT;
+-	zdev->iommu_bitmap = vzalloc(zdev->iommu_pages / 8);
++	zdev->iommu_bitmap = bitmap_vzalloc(zdev->iommu_pages, GFP_KERNEL);
+ 	if (!zdev->iommu_bitmap) {
+ 		rc = -ENOMEM;
+ 		goto free_dma_table;
+ 	}
+ 	if (!s390_iommu_strict) {
+-		zdev->lazy_bitmap = vzalloc(zdev->iommu_pages / 8);
++		zdev->lazy_bitmap = bitmap_vzalloc(zdev->iommu_pages, GFP_KERNEL);
+ 		if (!zdev->lazy_bitmap) {
+ 			rc = -ENOMEM;
+ 			goto free_bitmap;
+diff --git a/arch/x86/boot/compressed/sev.c b/arch/x86/boot/compressed/sev.c
+index c3e343bd4760e..45dd3ca2c737d 100644
+--- a/arch/x86/boot/compressed/sev.c
++++ b/arch/x86/boot/compressed/sev.c
+@@ -103,6 +103,16 @@ static enum es_result vc_read_mem(struct es_em_ctxt *ctxt,
+ 	return ES_OK;
+ }
+ 
++static enum es_result vc_ioio_check(struct es_em_ctxt *ctxt, u16 port, size_t size)
++{
++	return ES_OK;
++}
++
++static bool fault_in_kernel_space(unsigned long address)
++{
++	return false;
++}
++
+ #undef __init
+ #define __init
+ 
+diff --git a/arch/x86/include/asm/fpu/api.h b/arch/x86/include/asm/fpu/api.h
+index b475d9a582b88..e829fa4c6788e 100644
+--- a/arch/x86/include/asm/fpu/api.h
++++ b/arch/x86/include/asm/fpu/api.h
+@@ -148,7 +148,8 @@ static inline void fpu_update_guest_xfd(struct fpu_guest *guest_fpu, u64 xfd) {
+ static inline void fpu_sync_guest_vmexit_xfd_state(void) { }
+ #endif
+ 
+-extern void fpu_copy_guest_fpstate_to_uabi(struct fpu_guest *gfpu, void *buf, unsigned int size, u32 pkru);
++extern void fpu_copy_guest_fpstate_to_uabi(struct fpu_guest *gfpu, void *buf,
++					   unsigned int size, u64 xfeatures, u32 pkru);
+ extern int fpu_copy_uabi_to_guest_fpstate(struct fpu_guest *gfpu, const void *buf, u64 xcr0, u32 *vpkru);
+ 
+ static inline void fpstate_set_confidential(struct fpu_guest *gfpu)
+diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h
+index e7c7379d6ac7b..3aa23b250d5e1 100644
+--- a/arch/x86/include/asm/svm.h
++++ b/arch/x86/include/asm/svm.h
+@@ -268,6 +268,7 @@ enum avic_ipi_failure_cause {
+ 	AVIC_IPI_FAILURE_TARGET_NOT_RUNNING,
+ 	AVIC_IPI_FAILURE_INVALID_TARGET,
+ 	AVIC_IPI_FAILURE_INVALID_BACKING_PAGE,
++	AVIC_IPI_FAILURE_INVALID_IPI_VECTOR,
+ };
+ 
+ #define AVIC_PHYSICAL_MAX_INDEX_MASK	GENMASK_ULL(8, 0)
+diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c
+index 98e507cc7d34c..b582325b9c374 100644
+--- a/arch/x86/kernel/fpu/core.c
++++ b/arch/x86/kernel/fpu/core.c
+@@ -369,14 +369,15 @@ int fpu_swap_kvm_fpstate(struct fpu_guest *guest_fpu, bool enter_guest)
+ EXPORT_SYMBOL_GPL(fpu_swap_kvm_fpstate);
+ 
+ void fpu_copy_guest_fpstate_to_uabi(struct fpu_guest *gfpu, void *buf,
+-				    unsigned int size, u32 pkru)
++				    unsigned int size, u64 xfeatures, u32 pkru)
+ {
+ 	struct fpstate *kstate = gfpu->fpstate;
+ 	union fpregs_state *ustate = buf;
+ 	struct membuf mb = { .p = buf, .left = size };
+ 
+ 	if (cpu_feature_enabled(X86_FEATURE_XSAVE)) {
+-		__copy_xstate_to_uabi_buf(mb, kstate, pkru, XSTATE_COPY_XSAVE);
++		__copy_xstate_to_uabi_buf(mb, kstate, xfeatures, pkru,
++					  XSTATE_COPY_XSAVE);
+ 	} else {
+ 		memcpy(&ustate->fxsave, &kstate->regs.fxsave,
+ 		       sizeof(ustate->fxsave));
+diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c
+index 1afbc4866b100..ebe698f8af73b 100644
+--- a/arch/x86/kernel/fpu/xstate.c
++++ b/arch/x86/kernel/fpu/xstate.c
+@@ -1053,6 +1053,7 @@ static void copy_feature(bool from_xstate, struct membuf *to, void *xstate,
+  * __copy_xstate_to_uabi_buf - Copy kernel saved xstate to a UABI buffer
+  * @to:		membuf descriptor
+  * @fpstate:	The fpstate buffer from which to copy
++ * @xfeatures:	The mask of xfeatures to save (XSAVE mode only)
+  * @pkru_val:	The PKRU value to store in the PKRU component
+  * @copy_mode:	The requested copy mode
+  *
+@@ -1063,7 +1064,8 @@ static void copy_feature(bool from_xstate, struct membuf *to, void *xstate,
+  * It supports partial copy but @to.pos always starts from zero.
+  */
+ void __copy_xstate_to_uabi_buf(struct membuf to, struct fpstate *fpstate,
+-			       u32 pkru_val, enum xstate_copy_mode copy_mode)
++			       u64 xfeatures, u32 pkru_val,
++			       enum xstate_copy_mode copy_mode)
+ {
+ 	const unsigned int off_mxcsr = offsetof(struct fxregs_state, mxcsr);
+ 	struct xregs_state *xinit = &init_fpstate.regs.xsave;
+@@ -1087,7 +1089,7 @@ void __copy_xstate_to_uabi_buf(struct membuf to, struct fpstate *fpstate,
+ 		break;
+ 
+ 	case XSTATE_COPY_XSAVE:
+-		header.xfeatures &= fpstate->user_xfeatures;
++		header.xfeatures &= fpstate->user_xfeatures & xfeatures;
+ 		break;
+ 	}
+ 
+@@ -1189,6 +1191,7 @@ void copy_xstate_to_uabi_buf(struct membuf to, struct task_struct *tsk,
+ 			     enum xstate_copy_mode copy_mode)
+ {
+ 	__copy_xstate_to_uabi_buf(to, tsk->thread.fpu.fpstate,
++				  tsk->thread.fpu.fpstate->user_xfeatures,
+ 				  tsk->thread.pkru, copy_mode);
+ }
+ 
+@@ -1540,10 +1543,7 @@ static int fpstate_realloc(u64 xfeatures, unsigned int ksize,
+ 		fpregs_restore_userregs();
+ 
+ 	newfps->xfeatures = curfps->xfeatures | xfeatures;
+-
+-	if (!guest_fpu)
+-		newfps->user_xfeatures = curfps->user_xfeatures | xfeatures;
+-
++	newfps->user_xfeatures = curfps->user_xfeatures | xfeatures;
+ 	newfps->xfd = curfps->xfd & ~xfeatures;
+ 
+ 	/* Do the final updates within the locked region */
+diff --git a/arch/x86/kernel/fpu/xstate.h b/arch/x86/kernel/fpu/xstate.h
+index a4ecb04d8d646..3518fb26d06b0 100644
+--- a/arch/x86/kernel/fpu/xstate.h
++++ b/arch/x86/kernel/fpu/xstate.h
+@@ -43,7 +43,8 @@ enum xstate_copy_mode {
+ 
+ struct membuf;
+ extern void __copy_xstate_to_uabi_buf(struct membuf to, struct fpstate *fpstate,
+-				      u32 pkru_val, enum xstate_copy_mode copy_mode);
++				      u64 xfeatures, u32 pkru_val,
++				      enum xstate_copy_mode copy_mode);
+ extern void copy_xstate_to_uabi_buf(struct membuf to, struct task_struct *tsk,
+ 				    enum xstate_copy_mode mode);
+ extern int copy_uabi_from_kernel_to_xstate(struct fpstate *fpstate, const void *kbuf, u32 *pkru);
+diff --git a/arch/x86/kernel/sev-shared.c b/arch/x86/kernel/sev-shared.c
+index dcf325b7b0229..ccb0915e84e10 100644
+--- a/arch/x86/kernel/sev-shared.c
++++ b/arch/x86/kernel/sev-shared.c
+@@ -632,6 +632,23 @@ fail:
+ 	sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SEV_ES_GEN_REQ);
+ }
+ 
++static enum es_result vc_insn_string_check(struct es_em_ctxt *ctxt,
++					   unsigned long address,
++					   bool write)
++{
++	if (user_mode(ctxt->regs) && fault_in_kernel_space(address)) {
++		ctxt->fi.vector     = X86_TRAP_PF;
++		ctxt->fi.error_code = X86_PF_USER;
++		ctxt->fi.cr2        = address;
++		if (write)
++			ctxt->fi.error_code |= X86_PF_WRITE;
++
++		return ES_EXCEPTION;
++	}
++
++	return ES_OK;
++}
++
+ static enum es_result vc_insn_string_read(struct es_em_ctxt *ctxt,
+ 					  void *src, char *buf,
+ 					  unsigned int data_size,
+@@ -639,7 +656,12 @@ static enum es_result vc_insn_string_read(struct es_em_ctxt *ctxt,
+ 					  bool backwards)
+ {
+ 	int i, b = backwards ? -1 : 1;
+-	enum es_result ret = ES_OK;
++	unsigned long address = (unsigned long)src;
++	enum es_result ret;
++
++	ret = vc_insn_string_check(ctxt, address, false);
++	if (ret != ES_OK)
++		return ret;
+ 
+ 	for (i = 0; i < count; i++) {
+ 		void *s = src + (i * data_size * b);
+@@ -660,7 +682,12 @@ static enum es_result vc_insn_string_write(struct es_em_ctxt *ctxt,
+ 					   bool backwards)
+ {
+ 	int i, s = backwards ? -1 : 1;
+-	enum es_result ret = ES_OK;
++	unsigned long address = (unsigned long)dst;
++	enum es_result ret;
++
++	ret = vc_insn_string_check(ctxt, address, true);
++	if (ret != ES_OK)
++		return ret;
+ 
+ 	for (i = 0; i < count; i++) {
+ 		void *d = dst + (i * data_size * s);
+@@ -696,6 +723,9 @@ static enum es_result vc_insn_string_write(struct es_em_ctxt *ctxt,
+ static enum es_result vc_ioio_exitinfo(struct es_em_ctxt *ctxt, u64 *exitinfo)
+ {
+ 	struct insn *insn = &ctxt->insn;
++	size_t size;
++	u64 port;
++
+ 	*exitinfo = 0;
+ 
+ 	switch (insn->opcode.bytes[0]) {
+@@ -704,7 +734,7 @@ static enum es_result vc_ioio_exitinfo(struct es_em_ctxt *ctxt, u64 *exitinfo)
+ 	case 0x6d:
+ 		*exitinfo |= IOIO_TYPE_INS;
+ 		*exitinfo |= IOIO_SEG_ES;
+-		*exitinfo |= (ctxt->regs->dx & 0xffff) << 16;
++		port	   = ctxt->regs->dx & 0xffff;
+ 		break;
+ 
+ 	/* OUTS opcodes */
+@@ -712,41 +742,43 @@ static enum es_result vc_ioio_exitinfo(struct es_em_ctxt *ctxt, u64 *exitinfo)
+ 	case 0x6f:
+ 		*exitinfo |= IOIO_TYPE_OUTS;
+ 		*exitinfo |= IOIO_SEG_DS;
+-		*exitinfo |= (ctxt->regs->dx & 0xffff) << 16;
++		port	   = ctxt->regs->dx & 0xffff;
+ 		break;
+ 
+ 	/* IN immediate opcodes */
+ 	case 0xe4:
+ 	case 0xe5:
+ 		*exitinfo |= IOIO_TYPE_IN;
+-		*exitinfo |= (u8)insn->immediate.value << 16;
++		port	   = (u8)insn->immediate.value & 0xffff;
+ 		break;
+ 
+ 	/* OUT immediate opcodes */
+ 	case 0xe6:
+ 	case 0xe7:
+ 		*exitinfo |= IOIO_TYPE_OUT;
+-		*exitinfo |= (u8)insn->immediate.value << 16;
++		port	   = (u8)insn->immediate.value & 0xffff;
+ 		break;
+ 
+ 	/* IN register opcodes */
+ 	case 0xec:
+ 	case 0xed:
+ 		*exitinfo |= IOIO_TYPE_IN;
+-		*exitinfo |= (ctxt->regs->dx & 0xffff) << 16;
++		port	   = ctxt->regs->dx & 0xffff;
+ 		break;
+ 
+ 	/* OUT register opcodes */
+ 	case 0xee:
+ 	case 0xef:
+ 		*exitinfo |= IOIO_TYPE_OUT;
+-		*exitinfo |= (ctxt->regs->dx & 0xffff) << 16;
++		port	   = ctxt->regs->dx & 0xffff;
+ 		break;
+ 
+ 	default:
+ 		return ES_DECODE_FAILED;
+ 	}
+ 
++	*exitinfo |= port << 16;
++
+ 	switch (insn->opcode.bytes[0]) {
+ 	case 0x6c:
+ 	case 0x6e:
+@@ -756,12 +788,15 @@ static enum es_result vc_ioio_exitinfo(struct es_em_ctxt *ctxt, u64 *exitinfo)
+ 	case 0xee:
+ 		/* Single byte opcodes */
+ 		*exitinfo |= IOIO_DATA_8;
++		size       = 1;
+ 		break;
+ 	default:
+ 		/* Length determined by instruction parsing */
+ 		*exitinfo |= (insn->opnd_bytes == 2) ? IOIO_DATA_16
+ 						     : IOIO_DATA_32;
++		size       = (insn->opnd_bytes == 2) ? 2 : 4;
+ 	}
++
+ 	switch (insn->addr_bytes) {
+ 	case 2:
+ 		*exitinfo |= IOIO_ADDR_16;
+@@ -777,7 +812,7 @@ static enum es_result vc_ioio_exitinfo(struct es_em_ctxt *ctxt, u64 *exitinfo)
+ 	if (insn_has_rep_prefix(insn))
+ 		*exitinfo |= IOIO_REP;
+ 
+-	return ES_OK;
++	return vc_ioio_check(ctxt, (u16)port, size);
+ }
+ 
+ static enum es_result vc_handle_ioio(struct ghcb *ghcb, struct es_em_ctxt *ctxt)
+diff --git a/arch/x86/kernel/sev.c b/arch/x86/kernel/sev.c
+index 2b0342a03c1ba..60ea3f1a9653e 100644
+--- a/arch/x86/kernel/sev.c
++++ b/arch/x86/kernel/sev.c
+@@ -524,6 +524,33 @@ static enum es_result vc_slow_virt_to_phys(struct ghcb *ghcb, struct es_em_ctxt
+ 	return ES_OK;
+ }
+ 
++static enum es_result vc_ioio_check(struct es_em_ctxt *ctxt, u16 port, size_t size)
++{
++	BUG_ON(size > 4);
++
++	if (user_mode(ctxt->regs)) {
++		struct thread_struct *t = &current->thread;
++		struct io_bitmap *iobm = t->io_bitmap;
++		size_t idx;
++
++		if (!iobm)
++			goto fault;
++
++		for (idx = port; idx < port + size; ++idx) {
++			if (test_bit(idx, iobm->bitmap))
++				goto fault;
++		}
++	}
++
++	return ES_OK;
++
++fault:
++	ctxt->fi.vector = X86_TRAP_GP;
++	ctxt->fi.error_code = 0;
++
++	return ES_EXCEPTION;
++}
++
+ /* Include code shared with pre-decompression boot stage */
+ #include "sev-shared.c"
+ 
+@@ -1508,6 +1535,9 @@ static enum es_result vc_handle_mmio(struct ghcb *ghcb, struct es_em_ctxt *ctxt)
+ 			return ES_DECODE_FAILED;
+ 	}
+ 
++	if (user_mode(ctxt->regs))
++		return ES_UNSUPPORTED;
++
+ 	switch (mmio) {
+ 	case INSN_MMIO_WRITE:
+ 		memcpy(ghcb->shared_buffer, reg_data, bytes);
+diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
+index d3432687c9e63..7bdc66abfc92f 100644
+--- a/arch/x86/kvm/cpuid.c
++++ b/arch/x86/kvm/cpuid.c
+@@ -326,14 +326,6 @@ static void kvm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
+ 	vcpu->arch.guest_supported_xcr0 =
+ 		cpuid_get_supported_xcr0(vcpu->arch.cpuid_entries, vcpu->arch.cpuid_nent);
+ 
+-	/*
+-	 * FP+SSE can always be saved/restored via KVM_{G,S}ET_XSAVE, even if
+-	 * XSAVE/XCRO are not exposed to the guest, and even if XSAVE isn't
+-	 * supported by the host.
+-	 */
+-	vcpu->arch.guest_fpu.fpstate->user_xfeatures = vcpu->arch.guest_supported_xcr0 |
+-						       XFEATURE_MASK_FPSSE;
+-
+ 	kvm_update_pv_runtime(vcpu);
+ 
+ 	vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu);
+diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
+index a983a16163b13..e74e223f46aa3 100644
+--- a/arch/x86/kvm/lapic.c
++++ b/arch/x86/kvm/lapic.c
+@@ -2738,13 +2738,17 @@ int kvm_apic_local_deliver(struct kvm_lapic *apic, int lvt_type)
+ {
+ 	u32 reg = kvm_lapic_get_reg(apic, lvt_type);
+ 	int vector, mode, trig_mode;
++	int r;
+ 
+ 	if (kvm_apic_hw_enabled(apic) && !(reg & APIC_LVT_MASKED)) {
+ 		vector = reg & APIC_VECTOR_MASK;
+ 		mode = reg & APIC_MODE_MASK;
+ 		trig_mode = reg & APIC_LVT_LEVEL_TRIGGER;
+-		return __apic_accept_irq(apic, mode, vector, 1, trig_mode,
+-					NULL);
++
++		r = __apic_accept_irq(apic, mode, vector, 1, trig_mode, NULL);
++		if (r && lvt_type == APIC_LVTPC)
++			kvm_lapic_set_reg(apic, APIC_LVTPC, reg | APIC_LVT_MASKED);
++		return r;
+ 	}
+ 	return 0;
+ }
+diff --git a/arch/x86/kvm/pmu.h b/arch/x86/kvm/pmu.h
+index 7d9ba301c0906..1d64113de4883 100644
+--- a/arch/x86/kvm/pmu.h
++++ b/arch/x86/kvm/pmu.h
+@@ -74,6 +74,12 @@ static inline u64 pmc_read_counter(struct kvm_pmc *pmc)
+ 	return counter & pmc_bitmask(pmc);
+ }
+ 
++static inline void pmc_write_counter(struct kvm_pmc *pmc, u64 val)
++{
++	pmc->counter += val - pmc_read_counter(pmc);
++	pmc->counter &= pmc_bitmask(pmc);
++}
++
+ static inline void pmc_release_perf_event(struct kvm_pmc *pmc)
+ {
+ 	if (pmc->perf_event) {
+diff --git a/arch/x86/kvm/svm/avic.c b/arch/x86/kvm/svm/avic.c
+index 2092db892d7d0..4b74ea91f4e6b 100644
+--- a/arch/x86/kvm/svm/avic.c
++++ b/arch/x86/kvm/svm/avic.c
+@@ -529,8 +529,11 @@ int avic_incomplete_ipi_interception(struct kvm_vcpu *vcpu)
+ 	case AVIC_IPI_FAILURE_INVALID_BACKING_PAGE:
+ 		WARN_ONCE(1, "Invalid backing page\n");
+ 		break;
++	case AVIC_IPI_FAILURE_INVALID_IPI_VECTOR:
++		/* Invalid IPI with vector < 16 */
++		break;
+ 	default:
+-		pr_err("Unknown IPI interception\n");
++		vcpu_unimpl(vcpu, "Unknown avic incomplete IPI interception\n");
+ 	}
+ 
+ 	return 1;
+diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
+index c66c823ae222a..36482780a42f5 100644
+--- a/arch/x86/kvm/svm/nested.c
++++ b/arch/x86/kvm/svm/nested.c
+@@ -1243,6 +1243,9 @@ void svm_leave_nested(struct kvm_vcpu *vcpu)
+ 
+ 		nested_svm_uninit_mmu_context(vcpu);
+ 		vmcb_mark_all_dirty(svm->vmcb);
++
++		if (kvm_apicv_activated(vcpu->kvm))
++			kvm_make_request(KVM_REQ_APICV_UPDATE, vcpu);
+ 	}
+ 
+ 	kvm_clear_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
+diff --git a/arch/x86/kvm/svm/pmu.c b/arch/x86/kvm/svm/pmu.c
+index cef5a3d0abd09..373ff6a6687b3 100644
+--- a/arch/x86/kvm/svm/pmu.c
++++ b/arch/x86/kvm/svm/pmu.c
+@@ -160,7 +160,7 @@ static int amd_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
+ 	/* MSR_PERFCTRn */
+ 	pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_COUNTER);
+ 	if (pmc) {
+-		pmc->counter += data - pmc_read_counter(pmc);
++		pmc_write_counter(pmc, data);
+ 		pmc_update_sample_period(pmc);
+ 		return 0;
+ 	}
+diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
+index e3d92670c1115..c8466bc64b873 100644
+--- a/arch/x86/kvm/svm/svm.c
++++ b/arch/x86/kvm/svm/svm.c
+@@ -829,8 +829,7 @@ void svm_set_x2apic_msr_interception(struct vcpu_svm *svm, bool intercept)
+ 	if (intercept == svm->x2avic_msrs_intercepted)
+ 		return;
+ 
+-	if (!x2avic_enabled ||
+-	    !apic_x2apic_mode(svm->vcpu.arch.apic))
++	if (!x2avic_enabled)
+ 		return;
+ 
+ 	for (i = 0; i < MAX_DIRECT_ACCESS_MSRS; i++) {
+diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c
+index 80c769c58a876..18a658aa2a8d2 100644
+--- a/arch/x86/kvm/vmx/pmu_intel.c
++++ b/arch/x86/kvm/vmx/pmu_intel.c
+@@ -406,11 +406,11 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
+ 			if (!msr_info->host_initiated &&
+ 			    !(msr & MSR_PMC_FULL_WIDTH_BIT))
+ 				data = (s64)(s32)data;
+-			pmc->counter += data - pmc_read_counter(pmc);
++			pmc_write_counter(pmc, data);
+ 			pmc_update_sample_period(pmc);
+ 			break;
+ 		} else if ((pmc = get_fixed_pmc(pmu, msr))) {
+-			pmc->counter += data - pmc_read_counter(pmc);
++			pmc_write_counter(pmc, data);
+ 			pmc_update_sample_period(pmc);
+ 			break;
+ 		} else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) {
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index e24bbc8d1fc19..7bcf1a76a6abc 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -5385,26 +5385,37 @@ static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu,
+ 	return 0;
+ }
+ 
+-static void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu,
+-					 struct kvm_xsave *guest_xsave)
+-{
+-	if (fpstate_is_confidential(&vcpu->arch.guest_fpu))
+-		return;
+-
+-	fpu_copy_guest_fpstate_to_uabi(&vcpu->arch.guest_fpu,
+-				       guest_xsave->region,
+-				       sizeof(guest_xsave->region),
+-				       vcpu->arch.pkru);
+-}
+ 
+ static void kvm_vcpu_ioctl_x86_get_xsave2(struct kvm_vcpu *vcpu,
+ 					  u8 *state, unsigned int size)
+ {
++	/*
++	 * Only copy state for features that are enabled for the guest.  The
++	 * state itself isn't problematic, but setting bits in the header for
++	 * features that are supported in *this* host but not exposed to the
++	 * guest can result in KVM_SET_XSAVE failing when live migrating to a
++	 * compatible host without the features that are NOT exposed to the
++	 * guest.
++	 *
++	 * FP+SSE can always be saved/restored via KVM_{G,S}ET_XSAVE, even if
++	 * XSAVE/XCRO are not exposed to the guest, and even if XSAVE isn't
++	 * supported by the host.
++	 */
++	u64 supported_xcr0 = vcpu->arch.guest_supported_xcr0 |
++			     XFEATURE_MASK_FPSSE;
++
+ 	if (fpstate_is_confidential(&vcpu->arch.guest_fpu))
+ 		return;
+ 
+-	fpu_copy_guest_fpstate_to_uabi(&vcpu->arch.guest_fpu,
+-				       state, size, vcpu->arch.pkru);
++	fpu_copy_guest_fpstate_to_uabi(&vcpu->arch.guest_fpu, state, size,
++				       supported_xcr0, vcpu->arch.pkru);
++}
++
++static void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu,
++					 struct kvm_xsave *guest_xsave)
++{
++	return kvm_vcpu_ioctl_x86_get_xsave2(vcpu, (void *)guest_xsave->region,
++					     sizeof(guest_xsave->region));
+ }
+ 
+ static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu,
+diff --git a/crypto/asymmetric_keys/public_key.c b/crypto/asymmetric_keys/public_key.c
+index 773e159dbbcb8..a957304304be3 100644
+--- a/crypto/asymmetric_keys/public_key.c
++++ b/crypto/asymmetric_keys/public_key.c
+@@ -81,14 +81,13 @@ software_key_determine_akcipher(const struct public_key *pkey,
+ 		 * RSA signatures usually use EMSA-PKCS1-1_5 [RFC3447 sec 8.2].
+ 		 */
+ 		if (strcmp(encoding, "pkcs1") == 0) {
++			*sig = op == kernel_pkey_sign ||
++			       op == kernel_pkey_verify;
+ 			if (!hash_algo) {
+-				*sig = false;
+ 				n = snprintf(alg_name, CRYPTO_MAX_ALG_NAME,
+ 					     "pkcs1pad(%s)",
+ 					     pkey->pkey_algo);
+ 			} else {
+-				*sig = op == kernel_pkey_sign ||
+-				       op == kernel_pkey_verify;
+ 				n = snprintf(alg_name, CRYPTO_MAX_ALG_NAME,
+ 					     "pkcs1pad(%s,%s)",
+ 					     pkey->pkey_algo, hash_algo);
+diff --git a/drivers/accel/ivpu/ivpu_drv.c b/drivers/accel/ivpu/ivpu_drv.c
+index 8396db2b52030..09dee5be20700 100644
+--- a/drivers/accel/ivpu/ivpu_drv.c
++++ b/drivers/accel/ivpu/ivpu_drv.c
+@@ -303,7 +303,7 @@ static int ivpu_wait_for_ready(struct ivpu_device *vdev)
+ 	}
+ 
+ 	if (!ret)
+-		ivpu_info(vdev, "VPU ready message received successfully\n");
++		ivpu_dbg(vdev, PM, "VPU ready message received successfully\n");
+ 	else
+ 		ivpu_hw_diagnose_failure(vdev);
+ 
+diff --git a/drivers/accel/ivpu/ivpu_fw.c b/drivers/accel/ivpu/ivpu_fw.c
+index 93c69aaa6218d..34e4026c77589 100644
+--- a/drivers/accel/ivpu/ivpu_fw.c
++++ b/drivers/accel/ivpu/ivpu_fw.c
+@@ -195,8 +195,7 @@ static int ivpu_fw_mem_init(struct ivpu_device *vdev)
+ 	if (ret)
+ 		return ret;
+ 
+-	fw->mem = ivpu_bo_alloc_internal(vdev, fw->runtime_addr, fw->runtime_size,
+-					 DRM_IVPU_BO_CACHED | DRM_IVPU_BO_NOSNOOP);
++	fw->mem = ivpu_bo_alloc_internal(vdev, fw->runtime_addr, fw->runtime_size, DRM_IVPU_BO_WC);
+ 	if (!fw->mem) {
+ 		ivpu_err(vdev, "Failed to allocate firmware runtime memory\n");
+ 		return -ENOMEM;
+@@ -273,7 +272,7 @@ int ivpu_fw_load(struct ivpu_device *vdev)
+ 		memset(start, 0, size);
+ 	}
+ 
+-	clflush_cache_range(fw->mem->kvaddr, fw->mem->base.size);
++	wmb(); /* Flush WC buffers after writing fw->mem */
+ 
+ 	return 0;
+ }
+@@ -375,7 +374,7 @@ void ivpu_fw_boot_params_setup(struct ivpu_device *vdev, struct vpu_boot_params
+ 	if (!ivpu_fw_is_cold_boot(vdev)) {
+ 		boot_params->save_restore_ret_address = 0;
+ 		vdev->pm->is_warmboot = true;
+-		clflush_cache_range(vdev->fw->mem->kvaddr, SZ_4K);
++		wmb(); /* Flush WC buffers after writing save_restore_ret_address */
+ 		return;
+ 	}
+ 
+@@ -430,7 +429,7 @@ void ivpu_fw_boot_params_setup(struct ivpu_device *vdev, struct vpu_boot_params
+ 	boot_params->punit_telemetry_sram_size = ivpu_hw_reg_telemetry_size_get(vdev);
+ 	boot_params->vpu_telemetry_enable = ivpu_hw_reg_telemetry_enable_get(vdev);
+ 
+-	clflush_cache_range(vdev->fw->mem->kvaddr, SZ_4K);
++	wmb(); /* Flush WC buffers after writing bootparams */
+ 
+ 	ivpu_fw_boot_params_print(vdev, boot_params);
+ }
+diff --git a/drivers/accel/ivpu/ivpu_gem.h b/drivers/accel/ivpu/ivpu_gem.h
+index f4130586ff1b2..6b0ceda5f2537 100644
+--- a/drivers/accel/ivpu/ivpu_gem.h
++++ b/drivers/accel/ivpu/ivpu_gem.h
+@@ -8,8 +8,6 @@
+ #include <drm/drm_gem.h>
+ #include <drm/drm_mm.h>
+ 
+-#define DRM_IVPU_BO_NOSNOOP       0x10000000
+-
+ struct dma_buf;
+ struct ivpu_bo_ops;
+ struct ivpu_file_priv;
+@@ -85,9 +83,6 @@ static inline u32 ivpu_bo_cache_mode(struct ivpu_bo *bo)
+ 
+ static inline bool ivpu_bo_is_snooped(struct ivpu_bo *bo)
+ {
+-	if (bo->flags & DRM_IVPU_BO_NOSNOOP)
+-		return false;
+-
+ 	return ivpu_bo_cache_mode(bo) == DRM_IVPU_BO_CACHED;
+ }
+ 
+diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
+index 2fc2b43a4ed38..6d3b878a13588 100644
+--- a/drivers/acpi/bus.c
++++ b/drivers/acpi/bus.c
+@@ -1387,10 +1387,10 @@ static int __init acpi_init(void)
+ 	acpi_init_ffh();
+ 
+ 	pci_mmcfg_late_init();
+-	acpi_arm_init();
+ 	acpi_viot_early_init();
+ 	acpi_hest_init();
+ 	acpi_ghes_init();
++	acpi_arm_init();
+ 	acpi_scan_init();
+ 	acpi_ec_init();
+ 	acpi_debugfs_init();
+diff --git a/drivers/acpi/irq.c b/drivers/acpi/irq.c
+index c2c786eb95abc..1687483ff319e 100644
+--- a/drivers/acpi/irq.c
++++ b/drivers/acpi/irq.c
+@@ -57,6 +57,7 @@ int acpi_register_gsi(struct device *dev, u32 gsi, int trigger,
+ 		      int polarity)
+ {
+ 	struct irq_fwspec fwspec;
++	unsigned int irq;
+ 
+ 	fwspec.fwnode = acpi_get_gsi_domain_id(gsi);
+ 	if (WARN_ON(!fwspec.fwnode)) {
+@@ -68,7 +69,11 @@ int acpi_register_gsi(struct device *dev, u32 gsi, int trigger,
+ 	fwspec.param[1] = acpi_dev_get_irq_type(trigger, polarity);
+ 	fwspec.param_count = 2;
+ 
+-	return irq_create_fwspec_mapping(&fwspec);
++	irq = irq_create_fwspec_mapping(&fwspec);
++	if (!irq)
++		return -EINVAL;
++
++	return irq;
+ }
+ EXPORT_SYMBOL_GPL(acpi_register_gsi);
+ 
+diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
+index 2a21f4d9500db..97a842b57a751 100644
+--- a/drivers/ata/libata-core.c
++++ b/drivers/ata/libata-core.c
+@@ -2624,7 +2624,7 @@ static int ata_dev_config_lba(struct ata_device *dev)
+ {
+ 	const u16 *id = dev->id;
+ 	const char *lba_desc;
+-	char ncq_desc[24];
++	char ncq_desc[32];
+ 	int ret;
+ 
+ 	dev->flags |= ATA_DFLAG_LBA;
+diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
+index 150e7ab62d1ae..e7c4edb04f8ed 100644
+--- a/drivers/ata/libata-eh.c
++++ b/drivers/ata/libata-eh.c
+@@ -2366,7 +2366,7 @@ static void ata_eh_link_report(struct ata_link *link)
+ 	struct ata_eh_context *ehc = &link->eh_context;
+ 	struct ata_queued_cmd *qc;
+ 	const char *frozen, *desc;
+-	char tries_buf[6] = "";
++	char tries_buf[16] = "";
+ 	int tag, nr_failed = 0;
+ 
+ 	if (ehc->i.flags & ATA_EHI_QUIET)
+diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
+index 1bfd1727b4da7..bb2f41043f602 100644
+--- a/drivers/base/regmap/regmap.c
++++ b/drivers/base/regmap/regmap.c
+@@ -1572,7 +1572,7 @@ static int dev_get_regmap_match(struct device *dev, void *res, void *data)
+ 
+ 	/* If the user didn't specify a name match any */
+ 	if (data)
+-		return !strcmp((*r)->name, data);
++		return (*r)->name && !strcmp((*r)->name, data);
+ 	else
+ 		return 1;
+ }
+diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
+index dfdfb72d350fe..ca9e2a210fff2 100644
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -4348,6 +4348,7 @@ static int btusb_probe(struct usb_interface *intf,
+ 
+ 	if (id->driver_info & BTUSB_QCA_ROME) {
+ 		data->setup_on_usb = btusb_setup_qca;
++		hdev->shutdown = btusb_shutdown_qca;
+ 		hdev->set_bdaddr = btusb_set_bdaddr_ath3012;
+ 		hdev->cmd_timeout = btusb_qca_cmd_timeout;
+ 		set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks);
+diff --git a/drivers/bluetooth/hci_vhci.c b/drivers/bluetooth/hci_vhci.c
+index 40e2b9fa11a26..f3892e9ce800f 100644
+--- a/drivers/bluetooth/hci_vhci.c
++++ b/drivers/bluetooth/hci_vhci.c
+@@ -74,7 +74,10 @@ static int vhci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
+ 	struct vhci_data *data = hci_get_drvdata(hdev);
+ 
+ 	memcpy(skb_push(skb, 1), &hci_skb_pkt_type(skb), 1);
++
++	mutex_lock(&data->open_mutex);
+ 	skb_queue_tail(&data->readq, skb);
++	mutex_unlock(&data->open_mutex);
+ 
+ 	wake_up_interruptible(&data->read_wait);
+ 	return 0;
+diff --git a/drivers/firmware/efi/unaccepted_memory.c b/drivers/firmware/efi/unaccepted_memory.c
+index 853f7dc3c21d8..135278ddaf627 100644
+--- a/drivers/firmware/efi/unaccepted_memory.c
++++ b/drivers/firmware/efi/unaccepted_memory.c
+@@ -5,9 +5,17 @@
+ #include <linux/spinlock.h>
+ #include <asm/unaccepted_memory.h>
+ 
+-/* Protects unaccepted memory bitmap */
++/* Protects unaccepted memory bitmap and accepting_list */
+ static DEFINE_SPINLOCK(unaccepted_memory_lock);
+ 
++struct accept_range {
++	struct list_head list;
++	unsigned long start;
++	unsigned long end;
++};
++
++static LIST_HEAD(accepting_list);
++
+ /*
+  * accept_memory() -- Consult bitmap and accept the memory if needed.
+  *
+@@ -24,6 +32,7 @@ void accept_memory(phys_addr_t start, phys_addr_t end)
+ {
+ 	struct efi_unaccepted_memory *unaccepted;
+ 	unsigned long range_start, range_end;
++	struct accept_range range, *entry;
+ 	unsigned long flags;
+ 	u64 unit_size;
+ 
+@@ -78,20 +87,67 @@ void accept_memory(phys_addr_t start, phys_addr_t end)
+ 	if (end > unaccepted->size * unit_size * BITS_PER_BYTE)
+ 		end = unaccepted->size * unit_size * BITS_PER_BYTE;
+ 
+-	range_start = start / unit_size;
+-
++	range.start = start / unit_size;
++	range.end = DIV_ROUND_UP(end, unit_size);
++retry:
+ 	spin_lock_irqsave(&unaccepted_memory_lock, flags);
++
++	/*
++	 * Check if anybody works on accepting the same range of the memory.
++	 *
++	 * The check is done with unit_size granularity. It is crucial to catch
++	 * all accept requests to the same unit_size block, even if they don't
++	 * overlap on physical address level.
++	 */
++	list_for_each_entry(entry, &accepting_list, list) {
++		if (entry->end < range.start)
++			continue;
++		if (entry->start >= range.end)
++			continue;
++
++		/*
++		 * Somebody else accepting the range. Or at least part of it.
++		 *
++		 * Drop the lock and retry until it is complete.
++		 */
++		spin_unlock_irqrestore(&unaccepted_memory_lock, flags);
++		goto retry;
++	}
++
++	/*
++	 * Register that the range is about to be accepted.
++	 * Make sure nobody else will accept it.
++	 */
++	list_add(&range.list, &accepting_list);
++
++	range_start = range.start;
+ 	for_each_set_bitrange_from(range_start, range_end, unaccepted->bitmap,
+-				   DIV_ROUND_UP(end, unit_size)) {
++				   range.end) {
+ 		unsigned long phys_start, phys_end;
+ 		unsigned long len = range_end - range_start;
+ 
+ 		phys_start = range_start * unit_size + unaccepted->phys_base;
+ 		phys_end = range_end * unit_size + unaccepted->phys_base;
+ 
++		/*
++		 * Keep interrupts disabled until the accept operation is
++		 * complete in order to prevent deadlocks.
++		 *
++		 * Enabling interrupts before calling arch_accept_memory()
++		 * creates an opportunity for an interrupt handler to request
++		 * acceptance for the same memory. The handler will continuously
++		 * spin with interrupts disabled, preventing other task from
++		 * making progress with the acceptance process.
++		 */
++		spin_unlock(&unaccepted_memory_lock);
++
+ 		arch_accept_memory(phys_start, phys_end);
++
++		spin_lock(&unaccepted_memory_lock);
+ 		bitmap_clear(unaccepted->bitmap, range_start, len);
+ 	}
++
++	list_del(&range.list);
+ 	spin_unlock_irqrestore(&unaccepted_memory_lock, flags);
+ }
+ 
+diff --git a/drivers/gpio/gpio-timberdale.c b/drivers/gpio/gpio-timberdale.c
+index de14949a3fe5a..92c1f2baa4bff 100644
+--- a/drivers/gpio/gpio-timberdale.c
++++ b/drivers/gpio/gpio-timberdale.c
+@@ -43,9 +43,10 @@ static int timbgpio_update_bit(struct gpio_chip *gpio, unsigned index,
+ 	unsigned offset, bool enabled)
+ {
+ 	struct timbgpio *tgpio = gpiochip_get_data(gpio);
++	unsigned long flags;
+ 	u32 reg;
+ 
+-	spin_lock(&tgpio->lock);
++	spin_lock_irqsave(&tgpio->lock, flags);
+ 	reg = ioread32(tgpio->membase + offset);
+ 
+ 	if (enabled)
+@@ -54,7 +55,7 @@ static int timbgpio_update_bit(struct gpio_chip *gpio, unsigned index,
+ 		reg &= ~(1 << index);
+ 
+ 	iowrite32(reg, tgpio->membase + offset);
+-	spin_unlock(&tgpio->lock);
++	spin_unlock_irqrestore(&tgpio->lock, flags);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/gpio/gpio-vf610.c b/drivers/gpio/gpio-vf610.c
+index d3f3a69d49077..314dfaa633857 100644
+--- a/drivers/gpio/gpio-vf610.c
++++ b/drivers/gpio/gpio-vf610.c
+@@ -127,14 +127,14 @@ static int vf610_gpio_direction_output(struct gpio_chip *chip, unsigned gpio,
+ 	unsigned long mask = BIT(gpio);
+ 	u32 val;
+ 
++	vf610_gpio_set(chip, gpio, value);
++
+ 	if (port->sdata && port->sdata->have_paddr) {
+ 		val = vf610_gpio_readl(port->gpio_base + GPIO_PDDR);
+ 		val |= mask;
+ 		vf610_gpio_writel(val, port->gpio_base + GPIO_PDDR);
+ 	}
+ 
+-	vf610_gpio_set(chip, gpio, value);
+-
+ 	return pinctrl_gpio_direction_output(chip->base + gpio);
+ }
+ 
+@@ -247,7 +247,8 @@ static const struct irq_chip vf610_irqchip = {
+ 	.irq_unmask = vf610_gpio_irq_unmask,
+ 	.irq_set_type = vf610_gpio_irq_set_type,
+ 	.irq_set_wake = vf610_gpio_irq_set_wake,
+-	.flags = IRQCHIP_IMMUTABLE,
++	.flags = IRQCHIP_IMMUTABLE | IRQCHIP_MASK_ON_SUSPEND
++			| IRQCHIP_ENABLE_WAKEUP_ON_SUSPEND,
+ 	GPIOCHIP_IRQ_RESOURCE_HELPERS,
+ };
+ 
+diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
+index 97496c0f91330..a775d2bdac94f 100644
+--- a/drivers/gpio/gpiolib-acpi.c
++++ b/drivers/gpio/gpiolib-acpi.c
+@@ -951,6 +951,7 @@ static struct gpio_desc *acpi_get_gpiod_from_data(struct fwnode_handle *fwnode,
+ 	if (!propname)
+ 		return ERR_PTR(-EINVAL);
+ 
++	memset(&lookup, 0, sizeof(lookup));
+ 	lookup.index = index;
+ 
+ 	ret = acpi_gpio_property_lookup(fwnode, propname, index, &lookup);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+index ec1ec08d40584..7a67bb1490159 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+@@ -1094,7 +1094,8 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
+ 			struct drm_gem_object *gobj = dma_buf->priv;
+ 			struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj);
+ 
+-			if (abo->tbo.resource->mem_type == TTM_PL_VRAM)
++			if (abo->tbo.resource &&
++			    abo->tbo.resource->mem_type == TTM_PL_VRAM)
+ 				bo = gem_to_amdgpu_bo(gobj);
+ 		}
+ 		mem = bo->tbo.resource;
+diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
+index d68fe5474676b..7f7a476b6829c 100644
+--- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c
++++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
+@@ -2077,6 +2077,7 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_
+ 		case IP_VERSION(11, 0, 0):
+ 		case IP_VERSION(11, 0, 1):
+ 		case IP_VERSION(11, 0, 2):
++		case IP_VERSION(11, 0, 3):
+ 			*states = ATTR_STATE_SUPPORTED;
+ 			break;
+ 		default:
+diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
+index f448b903e1907..84148a79414b7 100644
+--- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c
++++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
+@@ -692,7 +692,7 @@ static struct ti_sn65dsi86 *bridge_to_ti_sn65dsi86(struct drm_bridge *bridge)
+ 	return container_of(bridge, struct ti_sn65dsi86, bridge);
+ }
+ 
+-static int ti_sn_attach_host(struct ti_sn65dsi86 *pdata)
++static int ti_sn_attach_host(struct auxiliary_device *adev, struct ti_sn65dsi86 *pdata)
+ {
+ 	int val;
+ 	struct mipi_dsi_host *host;
+@@ -707,7 +707,7 @@ static int ti_sn_attach_host(struct ti_sn65dsi86 *pdata)
+ 	if (!host)
+ 		return -EPROBE_DEFER;
+ 
+-	dsi = devm_mipi_dsi_device_register_full(dev, host, &info);
++	dsi = devm_mipi_dsi_device_register_full(&adev->dev, host, &info);
+ 	if (IS_ERR(dsi))
+ 		return PTR_ERR(dsi);
+ 
+@@ -725,7 +725,7 @@ static int ti_sn_attach_host(struct ti_sn65dsi86 *pdata)
+ 
+ 	pdata->dsi = dsi;
+ 
+-	return devm_mipi_dsi_attach(dev, dsi);
++	return devm_mipi_dsi_attach(&adev->dev, dsi);
+ }
+ 
+ static int ti_sn_bridge_attach(struct drm_bridge *bridge,
+@@ -1298,9 +1298,9 @@ static int ti_sn_bridge_probe(struct auxiliary_device *adev,
+ 	struct device_node *np = pdata->dev->of_node;
+ 	int ret;
+ 
+-	pdata->next_bridge = devm_drm_of_get_bridge(pdata->dev, np, 1, 0);
++	pdata->next_bridge = devm_drm_of_get_bridge(&adev->dev, np, 1, 0);
+ 	if (IS_ERR(pdata->next_bridge))
+-		return dev_err_probe(pdata->dev, PTR_ERR(pdata->next_bridge),
++		return dev_err_probe(&adev->dev, PTR_ERR(pdata->next_bridge),
+ 				     "failed to create panel bridge\n");
+ 
+ 	ti_sn_bridge_parse_lanes(pdata, np);
+@@ -1319,9 +1319,9 @@ static int ti_sn_bridge_probe(struct auxiliary_device *adev,
+ 
+ 	drm_bridge_add(&pdata->bridge);
+ 
+-	ret = ti_sn_attach_host(pdata);
++	ret = ti_sn_attach_host(adev, pdata);
+ 	if (ret) {
+-		dev_err_probe(pdata->dev, ret, "failed to attach dsi host\n");
++		dev_err_probe(&adev->dev, ret, "failed to attach dsi host\n");
+ 		goto err_remove_bridge;
+ 	}
+ 
+diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
+index 9271e47d66572..69d855123d3e3 100644
+--- a/drivers/gpu/drm/drm_edid.c
++++ b/drivers/gpu/drm/drm_edid.c
+@@ -123,6 +123,9 @@ static const struct edid_quirk {
+ 	/* AEO model 0 reports 8 bpc, but is a 6 bpc panel */
+ 	EDID_QUIRK('A', 'E', 'O', 0, EDID_QUIRK_FORCE_6BPC),
+ 
++	/* BenQ GW2765 */
++	EDID_QUIRK('B', 'N', 'Q', 0x78d6, EDID_QUIRK_FORCE_8BPC),
++
+ 	/* BOE model on HP Pavilion 15-n233sl reports 8 bpc, but is a 6 bpc panel */
+ 	EDID_QUIRK('B', 'O', 'E', 0x78b, EDID_QUIRK_FORCE_6BPC),
+ 
+diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c
+index 0cb646cb04ee1..d5c15292ae937 100644
+--- a/drivers/gpu/drm/drm_panel_orientation_quirks.c
++++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c
+@@ -38,6 +38,14 @@ static const struct drm_dmi_panel_orientation_data gpd_micropc = {
+ 	.orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
+ };
+ 
++static const struct drm_dmi_panel_orientation_data gpd_onemix2s = {
++	.width = 1200,
++	.height = 1920,
++	.bios_dates = (const char * const []){ "05/21/2018", "10/26/2018",
++		"03/04/2019", NULL },
++	.orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
++};
++
+ static const struct drm_dmi_panel_orientation_data gpd_pocket = {
+ 	.width = 1200,
+ 	.height = 1920,
+@@ -401,6 +409,14 @@ static const struct dmi_system_id orientation_data[] = {
+ 		  DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "LTH17"),
+ 		},
+ 		.driver_data = (void *)&lcd800x1280_rightside_up,
++	}, {	/* One Mix 2S (generic strings, also match on bios date) */
++		.matches = {
++		  DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Default string"),
++		  DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Default string"),
++		  DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "Default string"),
++		  DMI_EXACT_MATCH(DMI_BOARD_NAME, "Default string"),
++		},
++		.driver_data = (void *)&gpd_onemix2s,
+ 	},
+ 	{}
+ };
+diff --git a/drivers/gpu/drm/i915/display/intel_cx0_phy.c b/drivers/gpu/drm/i915/display/intel_cx0_phy.c
+index 719447ce86e70..974dd52e720c1 100644
+--- a/drivers/gpu/drm/i915/display/intel_cx0_phy.c
++++ b/drivers/gpu/drm/i915/display/intel_cx0_phy.c
+@@ -2554,8 +2554,7 @@ static void intel_cx0_phy_lane_reset(struct drm_i915_private *i915,
+ 		drm_warn(&i915->drm, "PHY %c failed to bring out of SOC reset after %dus.\n",
+ 			 phy_name(phy), XELPDP_PORT_BUF_SOC_READY_TIMEOUT_US);
+ 
+-	intel_de_rmw(i915, XELPDP_PORT_BUF_CTL2(port),
+-		     XELPDP_LANE_PIPE_RESET(0) | XELPDP_LANE_PIPE_RESET(1),
++	intel_de_rmw(i915, XELPDP_PORT_BUF_CTL2(port), lane_pipe_reset,
+ 		     lane_pipe_reset);
+ 
+ 	if (__intel_de_wait_for_register(i915, XELPDP_PORT_BUF_CTL2(port),
+diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
+index aa4d842d4c5a8..310654542b42c 100644
+--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
++++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
+@@ -235,6 +235,7 @@ static vm_fault_t i915_error_to_vmf_fault(int err)
+ 	case 0:
+ 	case -EAGAIN:
+ 	case -ENOSPC: /* transient failure to evict? */
++	case -ENOBUFS: /* temporarily out of fences? */
+ 	case -ERESTARTSYS:
+ 	case -EINTR:
+ 	case -EBUSY:
+diff --git a/drivers/gpu/drm/mediatek/mtk_drm_gem.c b/drivers/gpu/drm/mediatek/mtk_drm_gem.c
+index 9f364df52478d..0e0a41b2f57f0 100644
+--- a/drivers/gpu/drm/mediatek/mtk_drm_gem.c
++++ b/drivers/gpu/drm/mediatek/mtk_drm_gem.c
+@@ -239,6 +239,7 @@ int mtk_drm_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map)
+ 	npages = obj->size >> PAGE_SHIFT;
+ 	mtk_gem->pages = kcalloc(npages, sizeof(*mtk_gem->pages), GFP_KERNEL);
+ 	if (!mtk_gem->pages) {
++		sg_free_table(sgt);
+ 		kfree(sgt);
+ 		return -ENOMEM;
+ 	}
+@@ -248,12 +249,15 @@ int mtk_drm_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map)
+ 	mtk_gem->kvaddr = vmap(mtk_gem->pages, npages, VM_MAP,
+ 			       pgprot_writecombine(PAGE_KERNEL));
+ 	if (!mtk_gem->kvaddr) {
++		sg_free_table(sgt);
+ 		kfree(sgt);
+ 		kfree(mtk_gem->pages);
+ 		return -ENOMEM;
+ 	}
+-out:
++	sg_free_table(sgt);
+ 	kfree(sgt);
++
++out:
+ 	iosys_map_set_vaddr(map, mtk_gem->kvaddr);
+ 
+ 	return 0;
+diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/uconn.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/uconn.c
+index 46b057fe1412e..3249e5c1c8930 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/uconn.c
++++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/uconn.c
+@@ -62,6 +62,18 @@ nvkm_uconn_uevent_gpio(struct nvkm_object *object, u64 token, u32 bits)
+ 	return object->client->event(token, &args, sizeof(args.v0));
+ }
+ 
++static bool
++nvkm_connector_is_dp_dms(u8 type)
++{
++	switch (type) {
++	case DCB_CONNECTOR_DMS59_DP0:
++	case DCB_CONNECTOR_DMS59_DP1:
++		return true;
++	default:
++		return false;
++	}
++}
++
+ static int
+ nvkm_uconn_uevent(struct nvkm_object *object, void *argv, u32 argc, struct nvkm_uevent *uevent)
+ {
+@@ -101,7 +113,7 @@ nvkm_uconn_uevent(struct nvkm_object *object, void *argv, u32 argc, struct nvkm_
+ 	if (args->v0.types & NVIF_CONN_EVENT_V0_UNPLUG) bits |= NVKM_GPIO_LO;
+ 	if (args->v0.types & NVIF_CONN_EVENT_V0_IRQ) {
+ 		/* TODO: support DP IRQ on ANX9805 and remove this hack. */
+-		if (!outp->info.location)
++		if (!outp->info.location && !nvkm_connector_is_dp_dms(conn->info.type))
+ 			return -EINVAL;
+ 	}
+ 
+diff --git a/drivers/hid/hid-holtek-kbd.c b/drivers/hid/hid-holtek-kbd.c
+index 403506b9697e7..b346d68a06f5a 100644
+--- a/drivers/hid/hid-holtek-kbd.c
++++ b/drivers/hid/hid-holtek-kbd.c
+@@ -130,6 +130,10 @@ static int holtek_kbd_input_event(struct input_dev *dev, unsigned int type,
+ 		return -ENODEV;
+ 
+ 	boot_hid = usb_get_intfdata(boot_interface);
++	if (list_empty(&boot_hid->inputs)) {
++		hid_err(hid, "no inputs found\n");
++		return -ENODEV;
++	}
+ 	boot_hid_input = list_first_entry(&boot_hid->inputs,
+ 		struct hid_input, list);
+ 
+diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
+index 8a310f8ff20f5..cc0d0186a0d95 100644
+--- a/drivers/hid/hid-ids.h
++++ b/drivers/hid/hid-ids.h
+@@ -425,6 +425,7 @@
+ #define I2C_DEVICE_ID_HP_SPECTRE_X360_13T_AW100	0x29F5
+ #define I2C_DEVICE_ID_HP_SPECTRE_X360_14T_EA100_V1	0x2BED
+ #define I2C_DEVICE_ID_HP_SPECTRE_X360_14T_EA100_V2	0x2BEE
++#define I2C_DEVICE_ID_HP_ENVY_X360_15_EU0556NG		0x2D02
+ 
+ #define USB_VENDOR_ID_ELECOM		0x056e
+ #define USB_DEVICE_ID_ELECOM_BM084	0x0061
+diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
+index 40a5645f8fe81..5e2f721855e59 100644
+--- a/drivers/hid/hid-input.c
++++ b/drivers/hid/hid-input.c
+@@ -406,6 +406,8 @@ static const struct hid_device_id hid_battery_quirks[] = {
+ 	  HID_BATTERY_QUIRK_IGNORE },
+ 	{ HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_HP_SPECTRE_X360_14T_EA100_V2),
+ 	  HID_BATTERY_QUIRK_IGNORE },
++	{ HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_HP_ENVY_X360_15_EU0556NG),
++	  HID_BATTERY_QUIRK_IGNORE },
+ 	{}
+ };
+ 
+diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c
+index 1c00e4121c7ef..08b68f8476dbb 100644
+--- a/drivers/hid/hid-logitech-hidpp.c
++++ b/drivers/hid/hid-logitech-hidpp.c
+@@ -4676,6 +4676,8 @@ static const struct hid_device_id hidpp_devices[] = {
+ 	  HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb008) },
+ 	{ /* MX Master mouse over Bluetooth */
+ 	  HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb012) },
++	{ /* M720 Triathlon mouse over Bluetooth */
++	  HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb015) },
+ 	{ /* MX Ergo trackball over Bluetooth */
+ 	  HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb01d) },
+ 	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb01e) },
+diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
+index 521b2ffb42449..8db4ae05febc8 100644
+--- a/drivers/hid/hid-multitouch.c
++++ b/drivers/hid/hid-multitouch.c
+@@ -2144,6 +2144,10 @@ static const struct hid_device_id mt_devices[] = {
+ 			USB_DEVICE_ID_MTP_STM)},
+ 
+ 	/* Synaptics devices */
++	{ .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT,
++		HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8,
++			USB_VENDOR_ID_SYNAPTICS, 0xcd7e) },
++
+ 	{ .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT,
+ 		HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8,
+ 			USB_VENDOR_ID_SYNAPTICS, 0xce08) },
+diff --git a/drivers/hid/hid-nintendo.c b/drivers/hid/hid-nintendo.c
+index 250f5d2f888ab..10468f727e5bb 100644
+--- a/drivers/hid/hid-nintendo.c
++++ b/drivers/hid/hid-nintendo.c
+@@ -2088,7 +2088,9 @@ static int joycon_read_info(struct joycon_ctlr *ctlr)
+ 	struct joycon_input_report *report;
+ 
+ 	req.subcmd_id = JC_SUBCMD_REQ_DEV_INFO;
++	mutex_lock(&ctlr->output_mutex);
+ 	ret = joycon_send_subcmd(ctlr, &req, 0, HZ);
++	mutex_unlock(&ctlr->output_mutex);
+ 	if (ret) {
+ 		hid_err(ctlr->hdev, "Failed to get joycon info; ret=%d\n", ret);
+ 		return ret;
+@@ -2117,6 +2119,85 @@ static int joycon_read_info(struct joycon_ctlr *ctlr)
+ 	return 0;
+ }
+ 
++static int joycon_init(struct hid_device *hdev)
++{
++	struct joycon_ctlr *ctlr = hid_get_drvdata(hdev);
++	int ret = 0;
++
++	mutex_lock(&ctlr->output_mutex);
++	/* if handshake command fails, assume ble pro controller */
++	if ((jc_type_is_procon(ctlr) || jc_type_is_chrggrip(ctlr)) &&
++	    !joycon_send_usb(ctlr, JC_USB_CMD_HANDSHAKE, HZ)) {
++		hid_dbg(hdev, "detected USB controller\n");
++		/* set baudrate for improved latency */
++		ret = joycon_send_usb(ctlr, JC_USB_CMD_BAUDRATE_3M, HZ);
++		if (ret) {
++			hid_err(hdev, "Failed to set baudrate; ret=%d\n", ret);
++			goto out_unlock;
++		}
++		/* handshake */
++		ret = joycon_send_usb(ctlr, JC_USB_CMD_HANDSHAKE, HZ);
++		if (ret) {
++			hid_err(hdev, "Failed handshake; ret=%d\n", ret);
++			goto out_unlock;
++		}
++		/*
++		 * Set no timeout (to keep controller in USB mode).
++		 * This doesn't send a response, so ignore the timeout.
++		 */
++		joycon_send_usb(ctlr, JC_USB_CMD_NO_TIMEOUT, HZ/10);
++	} else if (jc_type_is_chrggrip(ctlr)) {
++		hid_err(hdev, "Failed charging grip handshake\n");
++		ret = -ETIMEDOUT;
++		goto out_unlock;
++	}
++
++	/* get controller calibration data, and parse it */
++	ret = joycon_request_calibration(ctlr);
++	if (ret) {
++		/*
++		 * We can function with default calibration, but it may be
++		 * inaccurate. Provide a warning, and continue on.
++		 */
++		hid_warn(hdev, "Analog stick positions may be inaccurate\n");
++	}
++
++	/* get IMU calibration data, and parse it */
++	ret = joycon_request_imu_calibration(ctlr);
++	if (ret) {
++		/*
++		 * We can function with default calibration, but it may be
++		 * inaccurate. Provide a warning, and continue on.
++		 */
++		hid_warn(hdev, "Unable to read IMU calibration data\n");
++	}
++
++	/* Set the reporting mode to 0x30, which is the full report mode */
++	ret = joycon_set_report_mode(ctlr);
++	if (ret) {
++		hid_err(hdev, "Failed to set report mode; ret=%d\n", ret);
++		goto out_unlock;
++	}
++
++	/* Enable rumble */
++	ret = joycon_enable_rumble(ctlr);
++	if (ret) {
++		hid_err(hdev, "Failed to enable rumble; ret=%d\n", ret);
++		goto out_unlock;
++	}
++
++	/* Enable the IMU */
++	ret = joycon_enable_imu(ctlr);
++	if (ret) {
++		hid_err(hdev, "Failed to enable the IMU; ret=%d\n", ret);
++		goto out_unlock;
++	}
++
++out_unlock:
++	mutex_unlock(&ctlr->output_mutex);
++	return ret;
++}
++
+ /* Common handler for parsing inputs */
+ static int joycon_ctlr_read_handler(struct joycon_ctlr *ctlr, u8 *data,
+ 							      int size)
+@@ -2248,85 +2329,19 @@ static int nintendo_hid_probe(struct hid_device *hdev,
+ 
+ 	hid_device_io_start(hdev);
+ 
+-	/* Initialize the controller */
+-	mutex_lock(&ctlr->output_mutex);
+-	/* if handshake command fails, assume ble pro controller */
+-	if ((jc_type_is_procon(ctlr) || jc_type_is_chrggrip(ctlr)) &&
+-	    !joycon_send_usb(ctlr, JC_USB_CMD_HANDSHAKE, HZ)) {
+-		hid_dbg(hdev, "detected USB controller\n");
+-		/* set baudrate for improved latency */
+-		ret = joycon_send_usb(ctlr, JC_USB_CMD_BAUDRATE_3M, HZ);
+-		if (ret) {
+-			hid_err(hdev, "Failed to set baudrate; ret=%d\n", ret);
+-			goto err_mutex;
+-		}
+-		/* handshake */
+-		ret = joycon_send_usb(ctlr, JC_USB_CMD_HANDSHAKE, HZ);
+-		if (ret) {
+-			hid_err(hdev, "Failed handshake; ret=%d\n", ret);
+-			goto err_mutex;
+-		}
+-		/*
+-		 * Set no timeout (to keep controller in USB mode).
+-		 * This doesn't send a response, so ignore the timeout.
+-		 */
+-		joycon_send_usb(ctlr, JC_USB_CMD_NO_TIMEOUT, HZ/10);
+-	} else if (jc_type_is_chrggrip(ctlr)) {
+-		hid_err(hdev, "Failed charging grip handshake\n");
+-		ret = -ETIMEDOUT;
+-		goto err_mutex;
+-	}
+-
+-	/* get controller calibration data, and parse it */
+-	ret = joycon_request_calibration(ctlr);
++	ret = joycon_init(hdev);
+ 	if (ret) {
+-		/*
+-		 * We can function with default calibration, but it may be
+-		 * inaccurate. Provide a warning, and continue on.
+-		 */
+-		hid_warn(hdev, "Analog stick positions may be inaccurate\n");
+-	}
+-
+-	/* get IMU calibration data, and parse it */
+-	ret = joycon_request_imu_calibration(ctlr);
+-	if (ret) {
+-		/*
+-		 * We can function with default calibration, but it may be
+-		 * inaccurate. Provide a warning, and continue on.
+-		 */
+-		hid_warn(hdev, "Unable to read IMU calibration data\n");
+-	}
+-
+-	/* Set the reporting mode to 0x30, which is the full report mode */
+-	ret = joycon_set_report_mode(ctlr);
+-	if (ret) {
+-		hid_err(hdev, "Failed to set report mode; ret=%d\n", ret);
+-		goto err_mutex;
+-	}
+-
+-	/* Enable rumble */
+-	ret = joycon_enable_rumble(ctlr);
+-	if (ret) {
+-		hid_err(hdev, "Failed to enable rumble; ret=%d\n", ret);
+-		goto err_mutex;
+-	}
+-
+-	/* Enable the IMU */
+-	ret = joycon_enable_imu(ctlr);
+-	if (ret) {
+-		hid_err(hdev, "Failed to enable the IMU; ret=%d\n", ret);
+-		goto err_mutex;
++		hid_err(hdev, "Failed to initialize controller; ret=%d\n", ret);
++		goto err_close;
+ 	}
+ 
+ 	ret = joycon_read_info(ctlr);
+ 	if (ret) {
+ 		hid_err(hdev, "Failed to retrieve controller info; ret=%d\n",
+ 			ret);
+-		goto err_mutex;
++		goto err_close;
+ 	}
+ 
+-	mutex_unlock(&ctlr->output_mutex);
+-
+ 	/* Initialize the leds */
+ 	ret = joycon_leds_create(ctlr);
+ 	if (ret) {
+@@ -2352,8 +2367,6 @@ static int nintendo_hid_probe(struct hid_device *hdev,
+ 	hid_dbg(hdev, "probe - success\n");
+ 	return 0;
+ 
+-err_mutex:
+-	mutex_unlock(&ctlr->output_mutex);
+ err_close:
+ 	hid_hw_close(hdev);
+ err_stop:
+@@ -2383,6 +2396,20 @@ static void nintendo_hid_remove(struct hid_device *hdev)
+ 	hid_hw_stop(hdev);
+ }
+ 
++#ifdef CONFIG_PM
++
++static int nintendo_hid_resume(struct hid_device *hdev)
++{
++	int ret = joycon_init(hdev);
++
++	if (ret)
++		hid_err(hdev, "Failed to restore controller after resume");
++
++	return ret;
++}
++
++#endif
++
+ static const struct hid_device_id nintendo_hid_devices[] = {
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_NINTENDO,
+ 			 USB_DEVICE_ID_NINTENDO_PROCON) },
+@@ -2404,6 +2431,10 @@ static struct hid_driver nintendo_hid_driver = {
+ 	.probe		= nintendo_hid_probe,
+ 	.remove		= nintendo_hid_remove,
+ 	.raw_event	= nintendo_hid_event,
++
++#ifdef CONFIG_PM
++	.resume		= nintendo_hid_resume,
++#endif
+ };
+ module_hid_driver(nintendo_hid_driver);
+ 
+diff --git a/drivers/i2c/i2c-mux.c b/drivers/i2c/i2c-mux.c
+index 313904be5f3bd..57ff09f18c371 100644
+--- a/drivers/i2c/i2c-mux.c
++++ b/drivers/i2c/i2c-mux.c
+@@ -341,7 +341,7 @@ int i2c_mux_add_adapter(struct i2c_mux_core *muxc,
+ 		priv->adap.lock_ops = &i2c_parent_lock_ops;
+ 
+ 	/* Sanity check on class */
+-	if (i2c_mux_parent_classes(parent) & class)
++	if (i2c_mux_parent_classes(parent) & class & ~I2C_CLASS_DEPRECATED)
+ 		dev_err(&parent->dev,
+ 			"Segment %d behind mux can't share classes with ancestors\n",
+ 			chan_id);
+diff --git a/drivers/iio/light/vcnl4000.c b/drivers/iio/light/vcnl4000.c
+index 7c7362e288213..66433886b7b03 100644
+--- a/drivers/iio/light/vcnl4000.c
++++ b/drivers/iio/light/vcnl4000.c
+@@ -994,7 +994,6 @@ static int vcnl4040_write_event_config(struct iio_dev *indio_dev,
+ 
+ out:
+ 	mutex_unlock(&data->vcnl4000_lock);
+-	data->chip_spec->set_power_state(data, data->ps_int != 0);
+ 
+ 	return ret;
+ }
+diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
+index b6f4be25b31b0..b66aa5de2ddec 100644
+--- a/drivers/mmc/core/block.c
++++ b/drivers/mmc/core/block.c
+@@ -179,6 +179,7 @@ static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
+ 			       struct mmc_queue *mq);
+ static void mmc_blk_hsq_req_done(struct mmc_request *mrq);
+ static int mmc_spi_err_check(struct mmc_card *card);
++static int mmc_blk_busy_cb(void *cb_data, bool *busy);
+ 
+ static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk)
+ {
+@@ -470,7 +471,7 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
+ 	struct mmc_data data = {};
+ 	struct mmc_request mrq = {};
+ 	struct scatterlist sg;
+-	bool r1b_resp, use_r1b_resp = false;
++	bool r1b_resp;
+ 	unsigned int busy_timeout_ms;
+ 	int err;
+ 	unsigned int target_part;
+@@ -551,8 +552,7 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
+ 	busy_timeout_ms = idata->ic.cmd_timeout_ms ? : MMC_BLK_TIMEOUT_MS;
+ 	r1b_resp = (cmd.flags & MMC_RSP_R1B) == MMC_RSP_R1B;
+ 	if (r1b_resp)
+-		use_r1b_resp = mmc_prepare_busy_cmd(card->host, &cmd,
+-						    busy_timeout_ms);
++		mmc_prepare_busy_cmd(card->host, &cmd, busy_timeout_ms);
+ 
+ 	mmc_wait_for_req(card->host, &mrq);
+ 	memcpy(&idata->ic.response, cmd.resp, sizeof(cmd.resp));
+@@ -605,19 +605,28 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
+ 	if (idata->ic.postsleep_min_us)
+ 		usleep_range(idata->ic.postsleep_min_us, idata->ic.postsleep_max_us);
+ 
+-	/* No need to poll when using HW busy detection. */
+-	if ((card->host->caps & MMC_CAP_WAIT_WHILE_BUSY) && use_r1b_resp)
+-		return 0;
+-
+ 	if (mmc_host_is_spi(card->host)) {
+ 		if (idata->ic.write_flag || r1b_resp || cmd.flags & MMC_RSP_SPI_BUSY)
+ 			return mmc_spi_err_check(card);
+ 		return err;
+ 	}
+-	/* Ensure RPMB/R1B command has completed by polling with CMD13. */
+-	if (idata->rpmb || r1b_resp)
+-		err = mmc_poll_for_busy(card, busy_timeout_ms, false,
+-					MMC_BUSY_IO);
++
++	/*
++	 * Ensure RPMB, writes and R1B responses are completed by polling with
++	 * CMD13. Note that, usually we don't need to poll when using HW busy
++	 * detection, but here it's needed since some commands may indicate the
++	 * error through the R1 status bits.
++	 */
++	if (idata->rpmb || idata->ic.write_flag || r1b_resp) {
++		struct mmc_blk_busy_data cb_data = {
++			.card = card,
++		};
++
++		err = __mmc_poll_for_busy(card->host, 0, busy_timeout_ms,
++					  &mmc_blk_busy_cb, &cb_data);
++
++		idata->ic.response[0] = cb_data.status;
++	}
+ 
+ 	return err;
+ }
+diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
+index 89cd48fcec79f..4a4bab9aa7263 100644
+--- a/drivers/mmc/core/mmc.c
++++ b/drivers/mmc/core/mmc.c
+@@ -104,7 +104,7 @@ static int mmc_decode_cid(struct mmc_card *card)
+ 	case 3: /* MMC v3.1 - v3.3 */
+ 	case 4: /* MMC v4 */
+ 		card->cid.manfid	= UNSTUFF_BITS(resp, 120, 8);
+-		card->cid.oemid		= UNSTUFF_BITS(resp, 104, 16);
++		card->cid.oemid		= UNSTUFF_BITS(resp, 104, 8);
+ 		card->cid.prod_name[0]	= UNSTUFF_BITS(resp, 96, 8);
+ 		card->cid.prod_name[1]	= UNSTUFF_BITS(resp, 88, 8);
+ 		card->cid.prod_name[2]	= UNSTUFF_BITS(resp, 80, 8);
+diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
+index f64b9ac76a5cd..5914516df2f7f 100644
+--- a/drivers/mmc/core/sdio.c
++++ b/drivers/mmc/core/sdio.c
+@@ -1089,8 +1089,14 @@ static int mmc_sdio_resume(struct mmc_host *host)
+ 		}
+ 		err = mmc_sdio_reinit_card(host);
+ 	} else if (mmc_card_wake_sdio_irq(host)) {
+-		/* We may have switched to 1-bit mode during suspend */
++		/*
++		 * We may have switched to 1-bit mode during suspend,
++		 * need to hold retuning, because tuning only supprt
++		 * 4-bit mode or 8 bit mode.
++		 */
++		mmc_retune_hold_now(host);
+ 		err = sdio_enable_4bit_bus(host->card);
++		mmc_retune_release(host);
+ 	}
+ 
+ 	if (err)
+diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c
+index 02403ff99e0d4..41111c5502777 100644
+--- a/drivers/mmc/host/mtk-sd.c
++++ b/drivers/mmc/host/mtk-sd.c
+@@ -671,11 +671,11 @@ static void msdc_reset_hw(struct msdc_host *host)
+ 	u32 val;
+ 
+ 	sdr_set_bits(host->base + MSDC_CFG, MSDC_CFG_RST);
+-	readl_poll_timeout(host->base + MSDC_CFG, val, !(val & MSDC_CFG_RST), 0, 0);
++	readl_poll_timeout_atomic(host->base + MSDC_CFG, val, !(val & MSDC_CFG_RST), 0, 0);
+ 
+ 	sdr_set_bits(host->base + MSDC_FIFOCS, MSDC_FIFOCS_CLR);
+-	readl_poll_timeout(host->base + MSDC_FIFOCS, val,
+-			   !(val & MSDC_FIFOCS_CLR), 0, 0);
++	readl_poll_timeout_atomic(host->base + MSDC_FIFOCS, val,
++				  !(val & MSDC_FIFOCS_CLR), 0, 0);
+ 
+ 	val = readl(host->base + MSDC_INT);
+ 	writel(val, host->base + MSDC_INT);
+diff --git a/drivers/mmc/host/sdhci-pci-gli.c b/drivers/mmc/host/sdhci-pci-gli.c
+index ae8c307b7aa7b..109d4b010f978 100644
+--- a/drivers/mmc/host/sdhci-pci-gli.c
++++ b/drivers/mmc/host/sdhci-pci-gli.c
+@@ -1144,42 +1144,6 @@ static u32 sdhci_gl9750_readl(struct sdhci_host *host, int reg)
+ 	return value;
+ }
+ 
+-#ifdef CONFIG_PM_SLEEP
+-static int sdhci_pci_gli_resume(struct sdhci_pci_chip *chip)
+-{
+-	struct sdhci_pci_slot *slot = chip->slots[0];
+-
+-	pci_free_irq_vectors(slot->chip->pdev);
+-	gli_pcie_enable_msi(slot);
+-
+-	return sdhci_pci_resume_host(chip);
+-}
+-
+-static int sdhci_cqhci_gli_resume(struct sdhci_pci_chip *chip)
+-{
+-	struct sdhci_pci_slot *slot = chip->slots[0];
+-	int ret;
+-
+-	ret = sdhci_pci_gli_resume(chip);
+-	if (ret)
+-		return ret;
+-
+-	return cqhci_resume(slot->host->mmc);
+-}
+-
+-static int sdhci_cqhci_gli_suspend(struct sdhci_pci_chip *chip)
+-{
+-	struct sdhci_pci_slot *slot = chip->slots[0];
+-	int ret;
+-
+-	ret = cqhci_suspend(slot->host->mmc);
+-	if (ret)
+-		return ret;
+-
+-	return sdhci_suspend_host(slot->host);
+-}
+-#endif
+-
+ static void gl9763e_hs400_enhanced_strobe(struct mmc_host *mmc,
+ 					  struct mmc_ios *ios)
+ {
+@@ -1420,6 +1384,70 @@ static int gl9763e_runtime_resume(struct sdhci_pci_chip *chip)
+ }
+ #endif
+ 
++#ifdef CONFIG_PM_SLEEP
++static int sdhci_pci_gli_resume(struct sdhci_pci_chip *chip)
++{
++	struct sdhci_pci_slot *slot = chip->slots[0];
++
++	pci_free_irq_vectors(slot->chip->pdev);
++	gli_pcie_enable_msi(slot);
++
++	return sdhci_pci_resume_host(chip);
++}
++
++static int gl9763e_resume(struct sdhci_pci_chip *chip)
++{
++	struct sdhci_pci_slot *slot = chip->slots[0];
++	int ret;
++
++	ret = sdhci_pci_gli_resume(chip);
++	if (ret)
++		return ret;
++
++	ret = cqhci_resume(slot->host->mmc);
++	if (ret)
++		return ret;
++
++	/*
++	 * Disable LPM negotiation to bring device back in sync
++	 * with its runtime_pm state.
++	 */
++	gl9763e_set_low_power_negotiation(slot, false);
++
++	return 0;
++}
++
++static int gl9763e_suspend(struct sdhci_pci_chip *chip)
++{
++	struct sdhci_pci_slot *slot = chip->slots[0];
++	int ret;
++
++	/*
++	 * Certain SoCs can suspend only with the bus in low-
++	 * power state, notably x86 SoCs when using S0ix.
++	 * Re-enable LPM negotiation to allow entering L1 state
++	 * and entering system suspend.
++	 */
++	gl9763e_set_low_power_negotiation(slot, true);
++
++	ret = cqhci_suspend(slot->host->mmc);
++	if (ret)
++		goto err_suspend;
++
++	ret = sdhci_suspend_host(slot->host);
++	if (ret)
++		goto err_suspend_host;
++
++	return 0;
++
++err_suspend_host:
++	cqhci_resume(slot->host->mmc);
++err_suspend:
++	gl9763e_set_low_power_negotiation(slot, false);
++	return ret;
++}
++#endif
++
+ static int gli_probe_slot_gl9763e(struct sdhci_pci_slot *slot)
+ {
+ 	struct pci_dev *pdev = slot->chip->pdev;
+@@ -1527,8 +1555,8 @@ const struct sdhci_pci_fixes sdhci_gl9763e = {
+ 	.probe_slot	= gli_probe_slot_gl9763e,
+ 	.ops            = &sdhci_gl9763e_ops,
+ #ifdef CONFIG_PM_SLEEP
+-	.resume		= sdhci_cqhci_gli_resume,
+-	.suspend	= sdhci_cqhci_gli_suspend,
++	.resume		= gl9763e_resume,
++	.suspend	= gl9763e_suspend,
+ #endif
+ #ifdef CONFIG_PM
+ 	.runtime_suspend = gl9763e_runtime_suspend,
+diff --git a/drivers/mtd/maps/physmap-core.c b/drivers/mtd/maps/physmap-core.c
+index c73854da51363..19dad5a23f944 100644
+--- a/drivers/mtd/maps/physmap-core.c
++++ b/drivers/mtd/maps/physmap-core.c
+@@ -552,6 +552,17 @@ static int physmap_flash_probe(struct platform_device *dev)
+ 		if (info->probe_type) {
+ 			info->mtds[i] = do_map_probe(info->probe_type,
+ 						     &info->maps[i]);
++
++			/* Fall back to mapping region as ROM */
++			if (!info->mtds[i] && IS_ENABLED(CONFIG_MTD_ROM) &&
++			    strcmp(info->probe_type, "map_rom")) {
++				dev_warn(&dev->dev,
++					 "map_probe() failed for type %s\n",
++					 info->probe_type);
++
++				info->mtds[i] = do_map_probe("map_rom",
++							     &info->maps[i]);
++			}
+ 		} else {
+ 			int j;
+ 
+diff --git a/drivers/mtd/nand/raw/arasan-nand-controller.c b/drivers/mtd/nand/raw/arasan-nand-controller.c
+index 906eef70cb6d9..487c139316fea 100644
+--- a/drivers/mtd/nand/raw/arasan-nand-controller.c
++++ b/drivers/mtd/nand/raw/arasan-nand-controller.c
+@@ -515,6 +515,7 @@ static int anfc_write_page_hw_ecc(struct nand_chip *chip, const u8 *buf,
+ 	struct mtd_info *mtd = nand_to_mtd(chip);
+ 	unsigned int len = mtd->writesize + (oob_required ? mtd->oobsize : 0);
+ 	dma_addr_t dma_addr;
++	u8 status;
+ 	int ret;
+ 	struct anfc_op nfc_op = {
+ 		.pkt_reg =
+@@ -561,10 +562,21 @@ static int anfc_write_page_hw_ecc(struct nand_chip *chip, const u8 *buf,
+ 	}
+ 
+ 	/* Spare data is not protected */
+-	if (oob_required)
++	if (oob_required) {
+ 		ret = nand_write_oob_std(chip, page);
++		if (ret)
++			return ret;
++	}
+ 
+-	return ret;
++	/* Check write status on the chip side */
++	ret = nand_status_op(chip, &status);
++	if (ret)
++		return ret;
++
++	if (status & NAND_STATUS_FAIL)
++		return -EIO;
++
++	return 0;
+ }
+ 
+ static int anfc_sel_write_page_hw_ecc(struct nand_chip *chip, const u8 *buf,
+diff --git a/drivers/mtd/nand/raw/marvell_nand.c b/drivers/mtd/nand/raw/marvell_nand.c
+index 30c15e4e1cc0d..576441095012c 100644
+--- a/drivers/mtd/nand/raw/marvell_nand.c
++++ b/drivers/mtd/nand/raw/marvell_nand.c
+@@ -1162,6 +1162,7 @@ static int marvell_nfc_hw_ecc_hmg_do_write_page(struct nand_chip *chip,
+ 		.ndcb[2] = NDCB2_ADDR5_PAGE(page),
+ 	};
+ 	unsigned int oob_bytes = lt->spare_bytes + (raw ? lt->ecc_bytes : 0);
++	u8 status;
+ 	int ret;
+ 
+ 	/* NFCv2 needs more information about the operation being executed */
+@@ -1195,7 +1196,18 @@ static int marvell_nfc_hw_ecc_hmg_do_write_page(struct nand_chip *chip,
+ 
+ 	ret = marvell_nfc_wait_op(chip,
+ 				  PSEC_TO_MSEC(sdr->tPROG_max));
+-	return ret;
++	if (ret)
++		return ret;
++
++	/* Check write status on the chip side */
++	ret = nand_status_op(chip, &status);
++	if (ret)
++		return ret;
++
++	if (status & NAND_STATUS_FAIL)
++		return -EIO;
++
++	return 0;
+ }
+ 
+ static int marvell_nfc_hw_ecc_hmg_write_page_raw(struct nand_chip *chip,
+@@ -1624,6 +1636,7 @@ static int marvell_nfc_hw_ecc_bch_write_page(struct nand_chip *chip,
+ 	int data_len = lt->data_bytes;
+ 	int spare_len = lt->spare_bytes;
+ 	int chunk, ret;
++	u8 status;
+ 
+ 	marvell_nfc_select_target(chip, chip->cur_cs);
+ 
+@@ -1660,6 +1673,14 @@ static int marvell_nfc_hw_ecc_bch_write_page(struct nand_chip *chip,
+ 	if (ret)
+ 		return ret;
+ 
++	/* Check write status on the chip side */
++	ret = nand_status_op(chip, &status);
++	if (ret)
++		return ret;
++
++	if (status & NAND_STATUS_FAIL)
++		return -EIO;
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c
+index a6af521832aa4..458ff93eb88ae 100644
+--- a/drivers/mtd/nand/raw/nand_base.c
++++ b/drivers/mtd/nand/raw/nand_base.c
+@@ -5109,6 +5109,9 @@ static void rawnand_check_cont_read_support(struct nand_chip *chip)
+ {
+ 	struct mtd_info *mtd = nand_to_mtd(chip);
+ 
++	if (!chip->parameters.supports_read_cache)
++		return;
++
+ 	if (chip->read_retries)
+ 		return;
+ 
+diff --git a/drivers/mtd/nand/raw/nand_jedec.c b/drivers/mtd/nand/raw/nand_jedec.c
+index 836757717660b..b3cc8f3605291 100644
+--- a/drivers/mtd/nand/raw/nand_jedec.c
++++ b/drivers/mtd/nand/raw/nand_jedec.c
+@@ -94,6 +94,9 @@ int nand_jedec_detect(struct nand_chip *chip)
+ 		goto free_jedec_param_page;
+ 	}
+ 
++	if (p->opt_cmd[0] & JEDEC_OPT_CMD_READ_CACHE)
++		chip->parameters.supports_read_cache = true;
++
+ 	memorg->pagesize = le32_to_cpu(p->byte_per_page);
+ 	mtd->writesize = memorg->pagesize;
+ 
+diff --git a/drivers/mtd/nand/raw/nand_onfi.c b/drivers/mtd/nand/raw/nand_onfi.c
+index f15ef90aec8cd..861975e44b552 100644
+--- a/drivers/mtd/nand/raw/nand_onfi.c
++++ b/drivers/mtd/nand/raw/nand_onfi.c
+@@ -303,6 +303,9 @@ int nand_onfi_detect(struct nand_chip *chip)
+ 			   ONFI_FEATURE_ADDR_TIMING_MODE, 1);
+ 	}
+ 
++	if (le16_to_cpu(p->opt_cmd) & ONFI_OPT_CMD_READ_CACHE)
++		chip->parameters.supports_read_cache = true;
++
+ 	onfi = kzalloc(sizeof(*onfi), GFP_KERNEL);
+ 	if (!onfi) {
+ 		ret = -ENOMEM;
+diff --git a/drivers/mtd/nand/raw/pl35x-nand-controller.c b/drivers/mtd/nand/raw/pl35x-nand-controller.c
+index 28b7bd7e22eb4..9dd06eeb021e1 100644
+--- a/drivers/mtd/nand/raw/pl35x-nand-controller.c
++++ b/drivers/mtd/nand/raw/pl35x-nand-controller.c
+@@ -513,6 +513,7 @@ static int pl35x_nand_write_page_hwecc(struct nand_chip *chip,
+ 	u32 addr1 = 0, addr2 = 0, row;
+ 	u32 cmd_addr;
+ 	int i, ret;
++	u8 status;
+ 
+ 	ret = pl35x_smc_set_ecc_mode(nfc, chip, PL35X_SMC_ECC_CFG_MODE_APB);
+ 	if (ret)
+@@ -565,6 +566,14 @@ static int pl35x_nand_write_page_hwecc(struct nand_chip *chip,
+ 	if (ret)
+ 		goto disable_ecc_engine;
+ 
++	/* Check write status on the chip side */
++	ret = nand_status_op(chip, &status);
++	if (ret)
++		goto disable_ecc_engine;
++
++	if (status & NAND_STATUS_FAIL)
++		ret = -EIO;
++
+ disable_ecc_engine:
+ 	pl35x_smc_set_ecc_mode(nfc, chip, PL35X_SMC_ECC_CFG_MODE_BYPASS);
+ 
+diff --git a/drivers/mtd/nand/raw/qcom_nandc.c b/drivers/mtd/nand/raw/qcom_nandc.c
+index 72d6168d8a1be..ce8b539de900d 100644
+--- a/drivers/mtd/nand/raw/qcom_nandc.c
++++ b/drivers/mtd/nand/raw/qcom_nandc.c
+@@ -3309,7 +3309,7 @@ err_nandc_alloc:
+ err_aon_clk:
+ 	clk_disable_unprepare(nandc->core_clk);
+ err_core_clk:
+-	dma_unmap_resource(dev, res->start, resource_size(res),
++	dma_unmap_resource(dev, nandc->base_dma, resource_size(res),
+ 			   DMA_BIDIRECTIONAL, 0);
+ 	return ret;
+ }
+diff --git a/drivers/mtd/nand/spi/micron.c b/drivers/mtd/nand/spi/micron.c
+index 50b7295bc9222..12601bc4227a7 100644
+--- a/drivers/mtd/nand/spi/micron.c
++++ b/drivers/mtd/nand/spi/micron.c
+@@ -12,7 +12,7 @@
+ 
+ #define SPINAND_MFR_MICRON		0x2c
+ 
+-#define MICRON_STATUS_ECC_MASK		GENMASK(7, 4)
++#define MICRON_STATUS_ECC_MASK		GENMASK(6, 4)
+ #define MICRON_STATUS_ECC_NO_BITFLIPS	(0 << 4)
+ #define MICRON_STATUS_ECC_1TO3_BITFLIPS	(1 << 4)
+ #define MICRON_STATUS_ECC_4TO6_BITFLIPS	(3 << 4)
+diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
+index 447b06ea4fc9c..a64ebb7f5b712 100644
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -4022,7 +4022,7 @@ static inline const void *bond_pull_data(struct sk_buff *skb,
+ 	if (likely(n <= hlen))
+ 		return data;
+ 	else if (skb && likely(pskb_may_pull(skb, n)))
+-		return skb->head;
++		return skb->data;
+ 
+ 	return NULL;
+ }
+diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
+index 72374b066f64a..cd1f240c90f39 100644
+--- a/drivers/net/dsa/bcm_sf2.c
++++ b/drivers/net/dsa/bcm_sf2.c
+@@ -617,17 +617,16 @@ static int bcm_sf2_mdio_register(struct dsa_switch *ds)
+ 	dn = of_find_compatible_node(NULL, NULL, "brcm,unimac-mdio");
+ 	priv->master_mii_bus = of_mdio_find_bus(dn);
+ 	if (!priv->master_mii_bus) {
+-		of_node_put(dn);
+-		return -EPROBE_DEFER;
++		err = -EPROBE_DEFER;
++		goto err_of_node_put;
+ 	}
+ 
+-	get_device(&priv->master_mii_bus->dev);
+ 	priv->master_mii_dn = dn;
+ 
+ 	priv->slave_mii_bus = mdiobus_alloc();
+ 	if (!priv->slave_mii_bus) {
+-		of_node_put(dn);
+-		return -ENOMEM;
++		err = -ENOMEM;
++		goto err_put_master_mii_bus_dev;
+ 	}
+ 
+ 	priv->slave_mii_bus->priv = priv;
+@@ -684,11 +683,17 @@ static int bcm_sf2_mdio_register(struct dsa_switch *ds)
+ 	}
+ 
+ 	err = mdiobus_register(priv->slave_mii_bus);
+-	if (err && dn) {
+-		mdiobus_free(priv->slave_mii_bus);
+-		of_node_put(dn);
+-	}
++	if (err && dn)
++		goto err_free_slave_mii_bus;
+ 
++	return 0;
++
++err_free_slave_mii_bus:
++	mdiobus_free(priv->slave_mii_bus);
++err_put_master_mii_bus_dev:
++	put_device(&priv->master_mii_bus->dev);
++err_of_node_put:
++	of_node_put(dn);
+ 	return err;
+ }
+ 
+@@ -696,6 +701,7 @@ static void bcm_sf2_mdio_unregister(struct bcm_sf2_priv *priv)
+ {
+ 	mdiobus_unregister(priv->slave_mii_bus);
+ 	mdiobus_free(priv->slave_mii_bus);
++	put_device(&priv->master_mii_bus->dev);
+ 	of_node_put(priv->master_mii_dn);
+ }
+ 
+diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c
+index 5fc64e47568a9..d567e42e17601 100644
+--- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c
++++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c
+@@ -911,7 +911,7 @@ static int csk_wait_memory(struct chtls_dev *cdev,
+ 			   struct sock *sk, long *timeo_p)
+ {
+ 	DEFINE_WAIT_FUNC(wait, woken_wake_function);
+-	int err = 0;
++	int ret, err = 0;
+ 	long current_timeo;
+ 	long vm_wait = 0;
+ 	bool noblock;
+@@ -942,10 +942,13 @@ static int csk_wait_memory(struct chtls_dev *cdev,
+ 
+ 		set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
+ 		sk->sk_write_pending++;
+-		sk_wait_event(sk, &current_timeo, sk->sk_err ||
+-			      (sk->sk_shutdown & SEND_SHUTDOWN) ||
+-			      (csk_mem_free(cdev, sk) && !vm_wait), &wait);
++		ret = sk_wait_event(sk, &current_timeo, sk->sk_err ||
++				    (sk->sk_shutdown & SEND_SHUTDOWN) ||
++				    (csk_mem_free(cdev, sk) && !vm_wait),
++				    &wait);
+ 		sk->sk_write_pending--;
++		if (ret < 0)
++			goto do_error;
+ 
+ 		if (vm_wait) {
+ 			vm_wait -= current_timeo;
+@@ -1348,6 +1351,7 @@ static int chtls_pt_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
+ 	int copied = 0;
+ 	int target;
+ 	long timeo;
++	int ret;
+ 
+ 	buffers_freed = 0;
+ 
+@@ -1423,7 +1427,11 @@ static int chtls_pt_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
+ 		if (copied >= target)
+ 			break;
+ 		chtls_cleanup_rbuf(sk, copied);
+-		sk_wait_data(sk, &timeo, NULL);
++		ret = sk_wait_data(sk, &timeo, NULL);
++		if (ret < 0) {
++			copied = copied ? : ret;
++			goto unlock;
++		}
+ 		continue;
+ found_ok_skb:
+ 		if (!skb->len) {
+@@ -1518,6 +1526,8 @@ skip_copy:
+ 
+ 	if (buffers_freed)
+ 		chtls_cleanup_rbuf(sk, copied);
++
++unlock:
+ 	release_sock(sk);
+ 	return copied;
+ }
+@@ -1534,6 +1544,7 @@ static int peekmsg(struct sock *sk, struct msghdr *msg,
+ 	int copied = 0;
+ 	size_t avail;          /* amount of available data in current skb */
+ 	long timeo;
++	int ret;
+ 
+ 	lock_sock(sk);
+ 	timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
+@@ -1585,7 +1596,12 @@ static int peekmsg(struct sock *sk, struct msghdr *msg,
+ 			release_sock(sk);
+ 			lock_sock(sk);
+ 		} else {
+-			sk_wait_data(sk, &timeo, NULL);
++			ret = sk_wait_data(sk, &timeo, NULL);
++			if (ret < 0) {
++				/* here 'copied' is 0 due to previous checks */
++				copied = ret;
++				break;
++			}
+ 		}
+ 
+ 		if (unlikely(peek_seq != tp->copied_seq)) {
+@@ -1656,6 +1672,7 @@ int chtls_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
+ 	int copied = 0;
+ 	long timeo;
+ 	int target;             /* Read at least this many bytes */
++	int ret;
+ 
+ 	buffers_freed = 0;
+ 
+@@ -1747,7 +1764,11 @@ int chtls_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
+ 		if (copied >= target)
+ 			break;
+ 		chtls_cleanup_rbuf(sk, copied);
+-		sk_wait_data(sk, &timeo, NULL);
++		ret = sk_wait_data(sk, &timeo, NULL);
++		if (ret < 0) {
++			copied = copied ? : ret;
++			goto unlock;
++		}
+ 		continue;
+ 
+ found_ok_skb:
+@@ -1816,6 +1837,7 @@ skip_copy:
+ 	if (buffers_freed)
+ 		chtls_cleanup_rbuf(sk, copied);
+ 
++unlock:
+ 	release_sock(sk);
+ 	return copied;
+ }
+diff --git a/drivers/net/ethernet/google/gve/gve_rx.c b/drivers/net/ethernet/google/gve/gve_rx.c
+index d1da7413dc4de..e84a066aa1a40 100644
+--- a/drivers/net/ethernet/google/gve/gve_rx.c
++++ b/drivers/net/ethernet/google/gve/gve_rx.c
+@@ -146,7 +146,7 @@ static int gve_prefill_rx_pages(struct gve_rx_ring *rx)
+ 		err = gve_rx_alloc_buffer(priv, &priv->pdev->dev, &rx->data.page_info[i],
+ 					  &rx->data.data_ring[i]);
+ 		if (err)
+-			goto alloc_err;
++			goto alloc_err_rda;
+ 	}
+ 
+ 	if (!rx->data.raw_addressing) {
+@@ -171,12 +171,26 @@ static int gve_prefill_rx_pages(struct gve_rx_ring *rx)
+ 	return slots;
+ 
+ alloc_err_qpl:
++	/* Fully free the copy pool pages. */
+ 	while (j--) {
+ 		page_ref_sub(rx->qpl_copy_pool[j].page,
+ 			     rx->qpl_copy_pool[j].pagecnt_bias - 1);
+ 		put_page(rx->qpl_copy_pool[j].page);
+ 	}
+-alloc_err:
++
++	/* Do not fully free QPL pages - only remove the bias added in this
++	 * function with gve_setup_rx_buffer.
++	 */
++	while (i--)
++		page_ref_sub(rx->data.page_info[i].page,
++			     rx->data.page_info[i].pagecnt_bias - 1);
++
++	gve_unassign_qpl(priv, rx->data.qpl->id);
++	rx->data.qpl = NULL;
++
++	return err;
++
++alloc_err_rda:
+ 	while (i--)
+ 		gve_rx_free_buffer(&priv->pdev->dev,
+ 				   &rx->data.page_info[i],
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
+index ed88e38d488b2..95843f0f1a3e8 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
+@@ -1082,7 +1082,7 @@ void i40e_clear_hw(struct i40e_hw *hw)
+ 		     I40E_PFLAN_QALLOC_FIRSTQ_SHIFT;
+ 	j = (val & I40E_PFLAN_QALLOC_LASTQ_MASK) >>
+ 	    I40E_PFLAN_QALLOC_LASTQ_SHIFT;
+-	if (val & I40E_PFLAN_QALLOC_VALID_MASK)
++	if (val & I40E_PFLAN_QALLOC_VALID_MASK && j >= base_queue)
+ 		num_queues = (j - base_queue) + 1;
+ 	else
+ 		num_queues = 0;
+@@ -1092,7 +1092,7 @@ void i40e_clear_hw(struct i40e_hw *hw)
+ 	    I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT;
+ 	j = (val & I40E_PF_VT_PFALLOC_LASTVF_MASK) >>
+ 	    I40E_PF_VT_PFALLOC_LASTVF_SHIFT;
+-	if (val & I40E_PF_VT_PFALLOC_VALID_MASK)
++	if (val & I40E_PF_VT_PFALLOC_VALID_MASK && j >= i)
+ 		num_vfs = (j - i) + 1;
+ 	else
+ 		num_vfs = 0;
+diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
+index 0054d7e64ec31..40234ddf10733 100644
+--- a/drivers/net/ethernet/intel/ice/ice_lib.c
++++ b/drivers/net/ethernet/intel/ice/ice_lib.c
+@@ -1201,8 +1201,7 @@ static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi)
+ 
+ 	ctxt->info.q_opt_rss = ((lut_type << ICE_AQ_VSI_Q_OPT_RSS_LUT_S) &
+ 				ICE_AQ_VSI_Q_OPT_RSS_LUT_M) |
+-				((hash_type << ICE_AQ_VSI_Q_OPT_RSS_HASH_S) &
+-				 ICE_AQ_VSI_Q_OPT_RSS_HASH_M);
++				(hash_type & ICE_AQ_VSI_Q_OPT_RSS_HASH_M);
+ }
+ 
+ static void
+diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
+index c2cdc79308dc1..5c940da8717b4 100644
+--- a/drivers/net/ethernet/intel/ice/ice_main.c
++++ b/drivers/net/ethernet/intel/ice/ice_main.c
+@@ -6,6 +6,7 @@
+ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+ 
+ #include <generated/utsrelease.h>
++#include <linux/crash_dump.h>
+ #include "ice.h"
+ #include "ice_base.h"
+ #include "ice_lib.h"
+@@ -4632,6 +4633,9 @@ static void ice_init_features(struct ice_pf *pf)
+ 
+ static void ice_deinit_features(struct ice_pf *pf)
+ {
++	if (ice_is_safe_mode(pf))
++		return;
++
+ 	ice_deinit_lag(pf);
+ 	if (test_bit(ICE_FLAG_DCB_CAPABLE, pf->flags))
+ 		ice_cfg_lldp_mib_change(&pf->hw, false);
+@@ -4963,6 +4967,20 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
+ 		return -EINVAL;
+ 	}
+ 
++	/* when under a kdump kernel initiate a reset before enabling the
++	 * device in order to clear out any pending DMA transactions. These
++	 * transactions can cause some systems to machine check when doing
++	 * the pcim_enable_device() below.
++	 */
++	if (is_kdump_kernel()) {
++		pci_save_state(pdev);
++		pci_clear_master(pdev);
++		err = pcie_flr(pdev);
++		if (err)
++			return err;
++		pci_restore_state(pdev);
++	}
++
+ 	/* this driver uses devres, see
+ 	 * Documentation/driver-api/driver-model/devres.rst
+ 	 */
+diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
+index dbc518ff82768..5b46ca47c8e59 100644
+--- a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
++++ b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
+@@ -715,20 +715,19 @@ static netdev_tx_t octep_start_xmit(struct sk_buff *skb,
+ 		hw_desc->dptr = tx_buffer->sglist_dma;
+ 	}
+ 
+-	/* Flush the hw descriptor before writing to doorbell */
+-	wmb();
+-
+-	/* Ring Doorbell to notify the NIC there is a new packet */
+-	writel(1, iq->doorbell_reg);
++	netdev_tx_sent_queue(iq->netdev_q, skb->len);
++	skb_tx_timestamp(skb);
+ 	atomic_inc(&iq->instr_pending);
+ 	wi++;
+ 	if (wi == iq->max_count)
+ 		wi = 0;
+ 	iq->host_write_index = wi;
++	/* Flush the hw descriptor before writing to doorbell */
++	wmb();
+ 
+-	netdev_tx_sent_queue(iq->netdev_q, skb->len);
++	/* Ring Doorbell to notify the NIC there is a new packet */
++	writel(1, iq->doorbell_reg);
+ 	iq->stats.instr_posted++;
+-	skb_tx_timestamp(skb);
+ 	return NETDEV_TX_OK;
+ 
+ dma_map_sg_err:
+diff --git a/drivers/net/ethernet/marvell/sky2.h b/drivers/net/ethernet/marvell/sky2.h
+index ddec1627f1a7b..8d0bacf4e49cc 100644
+--- a/drivers/net/ethernet/marvell/sky2.h
++++ b/drivers/net/ethernet/marvell/sky2.h
+@@ -2195,7 +2195,7 @@ struct rx_ring_info {
+ 	struct sk_buff	*skb;
+ 	dma_addr_t	data_addr;
+ 	DEFINE_DMA_UNMAP_LEN(data_size);
+-	dma_addr_t	frag_addr[ETH_JUMBO_MTU >> PAGE_SHIFT];
++	dma_addr_t	frag_addr[ETH_JUMBO_MTU >> PAGE_SHIFT ?: 1];
+ };
+ 
+ enum flow_control {
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
+index 7c0f2adbea000..ad789349c06e6 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
+@@ -848,7 +848,7 @@ static void mlx5_fw_tracer_ownership_change(struct work_struct *work)
+ 
+ 	mlx5_core_dbg(tracer->dev, "FWTracer: ownership changed, current=(%d)\n", tracer->owner);
+ 	if (tracer->owner) {
+-		tracer->owner = false;
++		mlx5_fw_tracer_ownership_acquire(tracer);
+ 		return;
+ 	}
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
+index 1730f6a716eea..b10e40e1a9c14 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
+@@ -24,7 +24,8 @@ static int mlx5e_set_int_port_tunnel(struct mlx5e_priv *priv,
+ 
+ 	route_dev = dev_get_by_index(dev_net(e->out_dev), e->route_dev_ifindex);
+ 
+-	if (!route_dev || !netif_is_ovs_master(route_dev))
++	if (!route_dev || !netif_is_ovs_master(route_dev) ||
++	    attr->parse_attr->filter_dev == e->out_dev)
+ 		goto out;
+ 
+ 	err = mlx5e_set_fwd_to_int_port_actions(priv, attr, e->route_dev_ifindex,
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
+index 40589cebb7730..4fd4c9febab95 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
+@@ -873,11 +873,11 @@ int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
+ 	}
+ 
+ out:
+-	if (flags & XDP_XMIT_FLUSH) {
+-		if (sq->mpwqe.wqe)
+-			mlx5e_xdp_mpwqe_complete(sq);
++	if (sq->mpwqe.wqe)
++		mlx5e_xdp_mpwqe_complete(sq);
++
++	if (flags & XDP_XMIT_FLUSH)
+ 		mlx5e_xmit_xdp_doorbell(sq);
+-	}
+ 
+ 	return nxmit;
+ }
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+index 99b3843396f33..0cd44ef190058 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+@@ -704,7 +704,7 @@ mlx5e_rep_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
+ 
+ 	/* update HW stats in background for next time */
+ 	mlx5e_queue_update_stats(priv);
+-	memcpy(stats, &priv->stats.vf_vport, sizeof(*stats));
++	mlx5e_stats_copy_rep_stats(stats, &priv->stats.rep_stats);
+ }
+ 
+ static int mlx5e_rep_change_mtu(struct net_device *netdev, int new_mtu)
+@@ -772,6 +772,7 @@ static int mlx5e_rep_max_nch_limit(struct mlx5_core_dev *mdev)
+ 
+ static void mlx5e_build_rep_params(struct net_device *netdev)
+ {
++	const bool take_rtnl = netdev->reg_state == NETREG_REGISTERED;
+ 	struct mlx5e_priv *priv = netdev_priv(netdev);
+ 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
+ 	struct mlx5_eswitch_rep *rep = rpriv->rep;
+@@ -797,8 +798,15 @@ static void mlx5e_build_rep_params(struct net_device *netdev)
+ 	/* RQ */
+ 	mlx5e_build_rq_params(mdev, params);
+ 
++	/* If netdev is already registered (e.g. move from nic profile to uplink,
++	 * RTNL lock must be held before triggering netdev notifiers.
++	 */
++	if (take_rtnl)
++		rtnl_lock();
+ 	/* update XDP supported features */
+ 	mlx5e_set_xdp_feature(netdev);
++	if (take_rtnl)
++		rtnl_unlock();
+ 
+ 	/* CQ moderation params */
+ 	params->rx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+index 41d37159e027b..5df970e6e29d5 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+@@ -457,26 +457,41 @@ static int mlx5e_alloc_rx_wqes(struct mlx5e_rq *rq, u16 ix, int wqe_bulk)
+ static int mlx5e_refill_rx_wqes(struct mlx5e_rq *rq, u16 ix, int wqe_bulk)
+ {
+ 	int remaining = wqe_bulk;
+-	int i = 0;
++	int total_alloc = 0;
++	int refill_alloc;
++	int refill;
+ 
+ 	/* The WQE bulk is split into smaller bulks that are sized
+ 	 * according to the page pool cache refill size to avoid overflowing
+ 	 * the page pool cache due to too many page releases at once.
+ 	 */
+ 	do {
+-		int refill = min_t(u16, rq->wqe.info.refill_unit, remaining);
+-		int alloc_count;
++		refill = min_t(u16, rq->wqe.info.refill_unit, remaining);
+ 
+-		mlx5e_free_rx_wqes(rq, ix + i, refill);
+-		alloc_count = mlx5e_alloc_rx_wqes(rq, ix + i, refill);
+-		i += alloc_count;
+-		if (unlikely(alloc_count != refill))
+-			break;
++		mlx5e_free_rx_wqes(rq, ix + total_alloc, refill);
++		refill_alloc = mlx5e_alloc_rx_wqes(rq, ix + total_alloc, refill);
++		if (unlikely(refill_alloc != refill))
++			goto err_free;
+ 
++		total_alloc += refill_alloc;
+ 		remaining -= refill;
+ 	} while (remaining);
+ 
+-	return i;
++	return total_alloc;
++
++err_free:
++	mlx5e_free_rx_wqes(rq, ix, total_alloc + refill_alloc);
++
++	for (int i = 0; i < total_alloc + refill; i++) {
++		int j = mlx5_wq_cyc_ctr2ix(&rq->wqe.wq, ix + i);
++		struct mlx5e_wqe_frag_info *frag;
++
++		frag = get_frag(rq, j);
++		for (int k = 0; k < rq->wqe.info.num_frags; k++, frag++)
++			frag->flags |= BIT(MLX5E_WQE_FRAG_SKIP_RELEASE);
++	}
++
++	return 0;
+ }
+ 
+ static void
+@@ -816,6 +831,8 @@ err_unmap:
+ 		mlx5e_page_release_fragmented(rq, frag_page);
+ 	}
+ 
++	bitmap_fill(wi->skip_release_bitmap, rq->mpwqe.pages_per_wqe);
++
+ err:
+ 	rq->stats->buff_alloc_err++;
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
+index 1ff8a06027dcf..67938b4ea1b90 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
+@@ -475,11 +475,20 @@ struct mlx5e_stats {
+ 	struct mlx5e_vnic_env_stats vnic;
+ 	struct mlx5e_vport_stats vport;
+ 	struct mlx5e_pport_stats pport;
+-	struct rtnl_link_stats64 vf_vport;
+ 	struct mlx5e_pcie_stats pcie;
+ 	struct mlx5e_rep_stats rep_stats;
+ };
+ 
++static inline void mlx5e_stats_copy_rep_stats(struct rtnl_link_stats64 *vf_vport,
++					      struct mlx5e_rep_stats *rep_stats)
++{
++	memset(vf_vport, 0, sizeof(*vf_vport));
++	vf_vport->rx_packets = rep_stats->vport_rx_packets;
++	vf_vport->tx_packets = rep_stats->vport_tx_packets;
++	vf_vport->rx_bytes = rep_stats->vport_rx_bytes;
++	vf_vport->tx_bytes = rep_stats->vport_tx_bytes;
++}
++
+ extern mlx5e_stats_grp_t mlx5e_nic_stats_grps[];
+ unsigned int mlx5e_nic_stats_grps_num(struct mlx5e_priv *priv);
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+index 4b22a91482cec..5797d8607633e 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+@@ -4931,7 +4931,8 @@ static int scan_tc_matchall_fdb_actions(struct mlx5e_priv *priv,
+ 			if (err)
+ 				return err;
+ 
+-			rpriv->prev_vf_vport_stats = priv->stats.vf_vport;
++			mlx5e_stats_copy_rep_stats(&rpriv->prev_vf_vport_stats,
++						   &priv->stats.rep_stats);
+ 			break;
+ 		default:
+ 			NL_SET_ERR_MSG_MOD(extack, "mlx5 supports only police action for matchall");
+@@ -4971,7 +4972,7 @@ void mlx5e_tc_stats_matchall(struct mlx5e_priv *priv,
+ 	u64 dbytes;
+ 	u64 dpkts;
+ 
+-	cur_stats = priv->stats.vf_vport;
++	mlx5e_stats_copy_rep_stats(&cur_stats, &priv->stats.rep_stats);
+ 	dpkts = cur_stats.rx_packets - rpriv->prev_vf_vport_stats.rx_packets;
+ 	dbytes = cur_stats.rx_bytes - rpriv->prev_vf_vport_stats.rx_bytes;
+ 	rpriv->prev_vf_vport_stats = cur_stats;
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+index 6e9b1b183190d..51afb97b9e452 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+@@ -1022,11 +1022,8 @@ const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev)
+ 	return ERR_PTR(err);
+ }
+ 
+-static void mlx5_eswitch_event_handlers_register(struct mlx5_eswitch *esw)
++static void mlx5_eswitch_event_handler_register(struct mlx5_eswitch *esw)
+ {
+-	MLX5_NB_INIT(&esw->nb, eswitch_vport_event, NIC_VPORT_CHANGE);
+-	mlx5_eq_notifier_register(esw->dev, &esw->nb);
+-
+ 	if (esw->mode == MLX5_ESWITCH_OFFLOADS && mlx5_eswitch_is_funcs_handler(esw->dev)) {
+ 		MLX5_NB_INIT(&esw->esw_funcs.nb, mlx5_esw_funcs_changed_handler,
+ 			     ESW_FUNCTIONS_CHANGED);
+@@ -1034,13 +1031,11 @@ static void mlx5_eswitch_event_handlers_register(struct mlx5_eswitch *esw)
+ 	}
+ }
+ 
+-static void mlx5_eswitch_event_handlers_unregister(struct mlx5_eswitch *esw)
++static void mlx5_eswitch_event_handler_unregister(struct mlx5_eswitch *esw)
+ {
+ 	if (esw->mode == MLX5_ESWITCH_OFFLOADS && mlx5_eswitch_is_funcs_handler(esw->dev))
+ 		mlx5_eq_notifier_unregister(esw->dev, &esw->esw_funcs.nb);
+ 
+-	mlx5_eq_notifier_unregister(esw->dev, &esw->nb);
+-
+ 	flush_workqueue(esw->work_queue);
+ }
+ 
+@@ -1419,6 +1414,9 @@ int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int num_vfs)
+ 
+ 	mlx5_eswitch_update_num_of_vfs(esw, num_vfs);
+ 
++	MLX5_NB_INIT(&esw->nb, eswitch_vport_event, NIC_VPORT_CHANGE);
++	mlx5_eq_notifier_register(esw->dev, &esw->nb);
++
+ 	if (esw->mode == MLX5_ESWITCH_LEGACY) {
+ 		err = esw_legacy_enable(esw);
+ 	} else {
+@@ -1431,7 +1429,7 @@ int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int num_vfs)
+ 
+ 	esw->fdb_table.flags |= MLX5_ESW_FDB_CREATED;
+ 
+-	mlx5_eswitch_event_handlers_register(esw);
++	mlx5_eswitch_event_handler_register(esw);
+ 
+ 	esw_info(esw->dev, "Enable: mode(%s), nvfs(%d), necvfs(%d), active vports(%d)\n",
+ 		 esw->mode == MLX5_ESWITCH_LEGACY ? "LEGACY" : "OFFLOADS",
+@@ -1558,7 +1556,8 @@ void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw)
+ 	 */
+ 	mlx5_esw_mode_change_notify(esw, MLX5_ESWITCH_LEGACY);
+ 
+-	mlx5_eswitch_event_handlers_unregister(esw);
++	mlx5_eq_notifier_unregister(esw->dev, &esw->nb);
++	mlx5_eswitch_event_handler_unregister(esw);
+ 
+ 	esw_info(esw->dev, "Disable: mode(%s), nvfs(%d), necvfs(%d), active vports(%d)\n",
+ 		 esw->mode == MLX5_ESWITCH_LEGACY ? "LEGACY" : "OFFLOADS",
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
+index 717a0b3f89bd5..ab5ef254a7483 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
+@@ -113,7 +113,10 @@ static void qed_ll2b_complete_tx_packet(void *cxt,
+ static int qed_ll2_alloc_buffer(struct qed_dev *cdev,
+ 				u8 **data, dma_addr_t *phys_addr)
+ {
+-	*data = kmalloc(cdev->ll2->rx_size, GFP_ATOMIC);
++	size_t size = cdev->ll2->rx_size + NET_SKB_PAD +
++		      SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
++
++	*data = kmalloc(size, GFP_ATOMIC);
+ 	if (!(*data)) {
+ 		DP_INFO(cdev, "Failed to allocate LL2 buffer data\n");
+ 		return -ENOMEM;
+@@ -2589,7 +2592,7 @@ static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params)
+ 	INIT_LIST_HEAD(&cdev->ll2->list);
+ 	spin_lock_init(&cdev->ll2->lock);
+ 
+-	cdev->ll2->rx_size = NET_SKB_PAD + ETH_HLEN +
++	cdev->ll2->rx_size = PRM_DMA_PAD_BYTES_NUM + ETH_HLEN +
+ 			     L1_CACHE_BYTES + params->mtu;
+ 
+ 	/* Allocate memory for LL2.
+diff --git a/drivers/net/mdio/mdio-mux.c b/drivers/net/mdio/mdio-mux.c
+index a881e35233283..bef4cce71287c 100644
+--- a/drivers/net/mdio/mdio-mux.c
++++ b/drivers/net/mdio/mdio-mux.c
+@@ -55,6 +55,27 @@ out:
+ 	return r;
+ }
+ 
++static int mdio_mux_read_c45(struct mii_bus *bus, int phy_id, int dev_addr,
++			     int regnum)
++{
++	struct mdio_mux_child_bus *cb = bus->priv;
++	struct mdio_mux_parent_bus *pb = cb->parent;
++	int r;
++
++	mutex_lock_nested(&pb->mii_bus->mdio_lock, MDIO_MUTEX_MUX);
++	r = pb->switch_fn(pb->current_child, cb->bus_number, pb->switch_data);
++	if (r)
++		goto out;
++
++	pb->current_child = cb->bus_number;
++
++	r = pb->mii_bus->read_c45(pb->mii_bus, phy_id, dev_addr, regnum);
++out:
++	mutex_unlock(&pb->mii_bus->mdio_lock);
++
++	return r;
++}
++
+ /*
+  * The parent bus' lock is used to order access to the switch_fn.
+  */
+@@ -80,6 +101,28 @@ out:
+ 	return r;
+ }
+ 
++static int mdio_mux_write_c45(struct mii_bus *bus, int phy_id, int dev_addr,
++			      int regnum, u16 val)
++{
++	struct mdio_mux_child_bus *cb = bus->priv;
++	struct mdio_mux_parent_bus *pb = cb->parent;
++
++	int r;
++
++	mutex_lock_nested(&pb->mii_bus->mdio_lock, MDIO_MUTEX_MUX);
++	r = pb->switch_fn(pb->current_child, cb->bus_number, pb->switch_data);
++	if (r)
++		goto out;
++
++	pb->current_child = cb->bus_number;
++
++	r = pb->mii_bus->write_c45(pb->mii_bus, phy_id, dev_addr, regnum, val);
++out:
++	mutex_unlock(&pb->mii_bus->mdio_lock);
++
++	return r;
++}
++
+ static int parent_count;
+ 
+ static void mdio_mux_uninit_children(struct mdio_mux_parent_bus *pb)
+@@ -173,6 +216,10 @@ int mdio_mux_init(struct device *dev,
+ 		cb->mii_bus->parent = dev;
+ 		cb->mii_bus->read = mdio_mux_read;
+ 		cb->mii_bus->write = mdio_mux_write;
++		if (parent_bus->read_c45)
++			cb->mii_bus->read_c45 = mdio_mux_read_c45;
++		if (parent_bus->write_c45)
++			cb->mii_bus->write_c45 = mdio_mux_write_c45;
+ 		r = of_mdiobus_register(cb->mii_bus, child_bus_node);
+ 		if (r) {
+ 			mdiobus_free(cb->mii_bus);
+diff --git a/drivers/net/phy/bcm7xxx.c b/drivers/net/phy/bcm7xxx.c
+index f8c17a253f8b3..efe2542d08815 100644
+--- a/drivers/net/phy/bcm7xxx.c
++++ b/drivers/net/phy/bcm7xxx.c
+@@ -894,6 +894,9 @@ static int bcm7xxx_28nm_probe(struct phy_device *phydev)
+ 	.name		= _name,					\
+ 	/* PHY_BASIC_FEATURES */					\
+ 	.flags		= PHY_IS_INTERNAL,				\
++	.get_sset_count	= bcm_phy_get_sset_count,			\
++	.get_strings	= bcm_phy_get_strings,				\
++	.get_stats	= bcm7xxx_28nm_get_phy_stats,			\
+ 	.probe		= bcm7xxx_28nm_probe,				\
+ 	.config_init	= bcm7xxx_16nm_ephy_config_init,		\
+ 	.config_aneg	= genphy_config_aneg,				\
+diff --git a/drivers/net/tun.c b/drivers/net/tun.c
+index 100339bc8b04a..bd3fc4b9d61af 100644
+--- a/drivers/net/tun.c
++++ b/drivers/net/tun.c
+@@ -3068,10 +3068,11 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
+ 	struct net *net = sock_net(&tfile->sk);
+ 	struct tun_struct *tun;
+ 	void __user* argp = (void __user*)arg;
+-	unsigned int ifindex, carrier;
++	unsigned int carrier;
+ 	struct ifreq ifr;
+ 	kuid_t owner;
+ 	kgid_t group;
++	int ifindex;
+ 	int sndbuf;
+ 	int vnet_hdr_sz;
+ 	int le;
+@@ -3127,7 +3128,9 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
+ 		ret = -EFAULT;
+ 		if (copy_from_user(&ifindex, argp, sizeof(ifindex)))
+ 			goto unlock;
+-
++		ret = -EINVAL;
++		if (ifindex < 0)
++			goto unlock;
+ 		ret = 0;
+ 		tfile->ifindex = ifindex;
+ 		goto unlock;
+diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
+index 563ecd27b93ea..17da42fe605c3 100644
+--- a/drivers/net/usb/smsc95xx.c
++++ b/drivers/net/usb/smsc95xx.c
+@@ -897,7 +897,7 @@ static int smsc95xx_reset(struct usbnet *dev)
+ 
+ 	if (timeout >= 100) {
+ 		netdev_warn(dev->net, "timeout waiting for completion of Lite Reset\n");
+-		return ret;
++		return -ETIMEDOUT;
+ 	}
+ 
+ 	ret = smsc95xx_set_mac_address(dev);
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+index 36d70d589aedd..898dca3936435 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+@@ -1612,6 +1612,7 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
+ 		iwl_trans_free_tx_cmd(mvm->trans, info->driver_data[1]);
+ 
+ 		memset(&info->status, 0, sizeof(info->status));
++		info->flags &= ~(IEEE80211_TX_STAT_ACK | IEEE80211_TX_STAT_TX_FILTERED);
+ 
+ 		/* inform mac80211 about what happened with the frame */
+ 		switch (status & TX_STATUS_MSK) {
+@@ -1964,6 +1965,8 @@ static void iwl_mvm_tx_reclaim(struct iwl_mvm *mvm, int sta_id, int tid,
+ 		 */
+ 		if (!is_flush)
+ 			info->flags |= IEEE80211_TX_STAT_ACK;
++		else
++			info->flags &= ~IEEE80211_TX_STAT_ACK;
+ 	}
+ 
+ 	/*
+diff --git a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
+index d1d3632a3ed7b..4ab3a14567b65 100644
+--- a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
++++ b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
+@@ -921,6 +921,14 @@ void mwifiex_11n_rxba_sync_event(struct mwifiex_private *priv,
+ 	while (tlv_buf_left >= sizeof(*tlv_rxba)) {
+ 		tlv_type = le16_to_cpu(tlv_rxba->header.type);
+ 		tlv_len  = le16_to_cpu(tlv_rxba->header.len);
++		if (size_add(sizeof(tlv_rxba->header), tlv_len) > tlv_buf_left) {
++			mwifiex_dbg(priv->adapter, WARN,
++				    "TLV size (%zu) overflows event_buf buf_left=%d\n",
++				    size_add(sizeof(tlv_rxba->header), tlv_len),
++				    tlv_buf_left);
++			return;
++		}
++
+ 		if (tlv_type != TLV_TYPE_RXBA_SYNC) {
+ 			mwifiex_dbg(priv->adapter, ERROR,
+ 				    "Wrong TLV id=0x%x\n", tlv_type);
+@@ -929,6 +937,14 @@ void mwifiex_11n_rxba_sync_event(struct mwifiex_private *priv,
+ 
+ 		tlv_seq_num = le16_to_cpu(tlv_rxba->seq_num);
+ 		tlv_bitmap_len = le16_to_cpu(tlv_rxba->bitmap_len);
++		if (size_add(sizeof(*tlv_rxba), tlv_bitmap_len) > tlv_buf_left) {
++			mwifiex_dbg(priv->adapter, WARN,
++				    "TLV size (%zu) overflows event_buf buf_left=%d\n",
++				    size_add(sizeof(*tlv_rxba), tlv_bitmap_len),
++				    tlv_buf_left);
++			return;
++		}
++
+ 		mwifiex_dbg(priv->adapter, INFO,
+ 			    "%pM tid=%d seq_num=%d bitmap_len=%d\n",
+ 			    tlv_rxba->mac, tlv_rxba->tid, tlv_seq_num,
+diff --git a/drivers/net/wwan/iosm/iosm_ipc_imem.c b/drivers/net/wwan/iosm/iosm_ipc_imem.c
+index 635301d677e18..829515a601b37 100644
+--- a/drivers/net/wwan/iosm/iosm_ipc_imem.c
++++ b/drivers/net/wwan/iosm/iosm_ipc_imem.c
+@@ -4,7 +4,6 @@
+  */
+ 
+ #include <linux/delay.h>
+-#include <linux/pm_runtime.h>
+ 
+ #include "iosm_ipc_chnl_cfg.h"
+ #include "iosm_ipc_devlink.h"
+@@ -632,11 +631,6 @@ static void ipc_imem_run_state_worker(struct work_struct *instance)
+ 	/* Complete all memory stores after setting bit */
+ 	smp_mb__after_atomic();
+ 
+-	if (ipc_imem->pcie->pci->device == INTEL_CP_DEVICE_7560_ID) {
+-		pm_runtime_mark_last_busy(ipc_imem->dev);
+-		pm_runtime_put_autosuspend(ipc_imem->dev);
+-	}
+-
+ 	return;
+ 
+ err_ipc_mux_deinit:
+@@ -1240,7 +1234,6 @@ void ipc_imem_cleanup(struct iosm_imem *ipc_imem)
+ 
+ 	/* forward MDM_NOT_READY to listeners */
+ 	ipc_uevent_send(ipc_imem->dev, UEVENT_MDM_NOT_READY);
+-	pm_runtime_get_sync(ipc_imem->dev);
+ 
+ 	hrtimer_cancel(&ipc_imem->td_alloc_timer);
+ 	hrtimer_cancel(&ipc_imem->tdupdate_timer);
+@@ -1426,16 +1419,6 @@ struct iosm_imem *ipc_imem_init(struct iosm_pcie *pcie, unsigned int device_id,
+ 
+ 		set_bit(IOSM_DEVLINK_INIT, &ipc_imem->flag);
+ 	}
+-
+-	if (!pm_runtime_enabled(ipc_imem->dev))
+-		pm_runtime_enable(ipc_imem->dev);
+-
+-	pm_runtime_set_autosuspend_delay(ipc_imem->dev,
+-					 IPC_MEM_AUTO_SUSPEND_DELAY_MS);
+-	pm_runtime_use_autosuspend(ipc_imem->dev);
+-	pm_runtime_allow(ipc_imem->dev);
+-	pm_runtime_mark_last_busy(ipc_imem->dev);
+-
+ 	return ipc_imem;
+ devlink_channel_fail:
+ 	ipc_devlink_deinit(ipc_imem->ipc_devlink);
+diff --git a/drivers/net/wwan/iosm/iosm_ipc_imem.h b/drivers/net/wwan/iosm/iosm_ipc_imem.h
+index 0144b45e2afb3..5664ac507c902 100644
+--- a/drivers/net/wwan/iosm/iosm_ipc_imem.h
++++ b/drivers/net/wwan/iosm/iosm_ipc_imem.h
+@@ -103,8 +103,6 @@ struct ipc_chnl_cfg;
+ #define FULLY_FUNCTIONAL 0
+ #define IOSM_DEVLINK_INIT 1
+ 
+-#define IPC_MEM_AUTO_SUSPEND_DELAY_MS 5000
+-
+ /* List of the supported UL/DL pipes. */
+ enum ipc_mem_pipes {
+ 	IPC_MEM_PIPE_0 = 0,
+diff --git a/drivers/net/wwan/iosm/iosm_ipc_pcie.c b/drivers/net/wwan/iosm/iosm_ipc_pcie.c
+index 3a259c9abefdf..04517bd3325a2 100644
+--- a/drivers/net/wwan/iosm/iosm_ipc_pcie.c
++++ b/drivers/net/wwan/iosm/iosm_ipc_pcie.c
+@@ -6,7 +6,6 @@
+ #include <linux/acpi.h>
+ #include <linux/bitfield.h>
+ #include <linux/module.h>
+-#include <linux/pm_runtime.h>
+ #include <net/rtnetlink.h>
+ 
+ #include "iosm_ipc_imem.h"
+@@ -438,8 +437,7 @@ static int __maybe_unused ipc_pcie_resume_cb(struct device *dev)
+ 	return 0;
+ }
+ 
+-static DEFINE_RUNTIME_DEV_PM_OPS(iosm_ipc_pm, ipc_pcie_suspend_cb,
+-				 ipc_pcie_resume_cb, NULL);
++static SIMPLE_DEV_PM_OPS(iosm_ipc_pm, ipc_pcie_suspend_cb, ipc_pcie_resume_cb);
+ 
+ static struct pci_driver iosm_ipc_driver = {
+ 	.name = KBUILD_MODNAME,
+diff --git a/drivers/net/wwan/iosm/iosm_ipc_port.c b/drivers/net/wwan/iosm/iosm_ipc_port.c
+index 2ba1ddca3945b..5d5b4183e14a3 100644
+--- a/drivers/net/wwan/iosm/iosm_ipc_port.c
++++ b/drivers/net/wwan/iosm/iosm_ipc_port.c
+@@ -3,8 +3,6 @@
+  * Copyright (C) 2020-21 Intel Corporation.
+  */
+ 
+-#include <linux/pm_runtime.h>
+-
+ #include "iosm_ipc_chnl_cfg.h"
+ #include "iosm_ipc_imem_ops.h"
+ #include "iosm_ipc_port.h"
+@@ -15,16 +13,12 @@ static int ipc_port_ctrl_start(struct wwan_port *port)
+ 	struct iosm_cdev *ipc_port = wwan_port_get_drvdata(port);
+ 	int ret = 0;
+ 
+-	pm_runtime_get_sync(ipc_port->ipc_imem->dev);
+ 	ipc_port->channel = ipc_imem_sys_port_open(ipc_port->ipc_imem,
+ 						   ipc_port->chl_id,
+ 						   IPC_HP_CDEV_OPEN);
+ 	if (!ipc_port->channel)
+ 		ret = -EIO;
+ 
+-	pm_runtime_mark_last_busy(ipc_port->ipc_imem->dev);
+-	pm_runtime_put_autosuspend(ipc_port->ipc_imem->dev);
+-
+ 	return ret;
+ }
+ 
+@@ -33,24 +27,15 @@ static void ipc_port_ctrl_stop(struct wwan_port *port)
+ {
+ 	struct iosm_cdev *ipc_port = wwan_port_get_drvdata(port);
+ 
+-	pm_runtime_get_sync(ipc_port->ipc_imem->dev);
+ 	ipc_imem_sys_port_close(ipc_port->ipc_imem, ipc_port->channel);
+-	pm_runtime_mark_last_busy(ipc_port->ipc_imem->dev);
+-	pm_runtime_put_autosuspend(ipc_port->ipc_imem->dev);
+ }
+ 
+ /* transfer control data to modem */
+ static int ipc_port_ctrl_tx(struct wwan_port *port, struct sk_buff *skb)
+ {
+ 	struct iosm_cdev *ipc_port = wwan_port_get_drvdata(port);
+-	int ret;
+ 
+-	pm_runtime_get_sync(ipc_port->ipc_imem->dev);
+-	ret = ipc_imem_sys_cdev_write(ipc_port, skb);
+-	pm_runtime_mark_last_busy(ipc_port->ipc_imem->dev);
+-	pm_runtime_put_autosuspend(ipc_port->ipc_imem->dev);
+-
+-	return ret;
++	return ipc_imem_sys_cdev_write(ipc_port, skb);
+ }
+ 
+ static const struct wwan_port_ops ipc_wwan_ctrl_ops = {
+diff --git a/drivers/net/wwan/iosm/iosm_ipc_trace.c b/drivers/net/wwan/iosm/iosm_ipc_trace.c
+index 4368373797b69..eeecfa3d10c5a 100644
+--- a/drivers/net/wwan/iosm/iosm_ipc_trace.c
++++ b/drivers/net/wwan/iosm/iosm_ipc_trace.c
+@@ -3,9 +3,7 @@
+  * Copyright (C) 2020-2021 Intel Corporation.
+  */
+ 
+-#include <linux/pm_runtime.h>
+ #include <linux/wwan.h>
+-
+ #include "iosm_ipc_trace.h"
+ 
+ /* sub buffer size and number of sub buffer */
+@@ -99,8 +97,6 @@ static ssize_t ipc_trace_ctrl_file_write(struct file *filp,
+ 	if (ret)
+ 		return ret;
+ 
+-	pm_runtime_get_sync(ipc_trace->ipc_imem->dev);
+-
+ 	mutex_lock(&ipc_trace->trc_mutex);
+ 	if (val == TRACE_ENABLE && ipc_trace->mode != TRACE_ENABLE) {
+ 		ipc_trace->channel = ipc_imem_sys_port_open(ipc_trace->ipc_imem,
+@@ -121,10 +117,6 @@ static ssize_t ipc_trace_ctrl_file_write(struct file *filp,
+ 	ret = count;
+ unlock:
+ 	mutex_unlock(&ipc_trace->trc_mutex);
+-
+-	pm_runtime_mark_last_busy(ipc_trace->ipc_imem->dev);
+-	pm_runtime_put_autosuspend(ipc_trace->ipc_imem->dev);
+-
+ 	return ret;
+ }
+ 
+diff --git a/drivers/net/wwan/iosm/iosm_ipc_wwan.c b/drivers/net/wwan/iosm/iosm_ipc_wwan.c
+index 93d17de08786c..ff747fc79aaf8 100644
+--- a/drivers/net/wwan/iosm/iosm_ipc_wwan.c
++++ b/drivers/net/wwan/iosm/iosm_ipc_wwan.c
+@@ -6,7 +6,6 @@
+ #include <linux/etherdevice.h>
+ #include <linux/if_arp.h>
+ #include <linux/if_link.h>
+-#include <linux/pm_runtime.h>
+ #include <linux/rtnetlink.h>
+ #include <linux/wwan.h>
+ #include <net/pkt_sched.h>
+@@ -52,13 +51,11 @@ static int ipc_wwan_link_open(struct net_device *netdev)
+ 	struct iosm_netdev_priv *priv = wwan_netdev_drvpriv(netdev);
+ 	struct iosm_wwan *ipc_wwan = priv->ipc_wwan;
+ 	int if_id = priv->if_id;
+-	int ret = 0;
+ 
+ 	if (if_id < IP_MUX_SESSION_START ||
+ 	    if_id >= ARRAY_SIZE(ipc_wwan->sub_netlist))
+ 		return -EINVAL;
+ 
+-	pm_runtime_get_sync(ipc_wwan->ipc_imem->dev);
+ 	/* get channel id */
+ 	priv->ch_id = ipc_imem_sys_wwan_open(ipc_wwan->ipc_imem, if_id);
+ 
+@@ -66,8 +63,7 @@ static int ipc_wwan_link_open(struct net_device *netdev)
+ 		dev_err(ipc_wwan->dev,
+ 			"cannot connect wwan0 & id %d to the IPC mem layer",
+ 			if_id);
+-		ret = -ENODEV;
+-		goto err_out;
++		return -ENODEV;
+ 	}
+ 
+ 	/* enable tx path, DL data may follow */
+@@ -76,11 +72,7 @@ static int ipc_wwan_link_open(struct net_device *netdev)
+ 	dev_dbg(ipc_wwan->dev, "Channel id %d allocated to if_id %d",
+ 		priv->ch_id, priv->if_id);
+ 
+-err_out:
+-	pm_runtime_mark_last_busy(ipc_wwan->ipc_imem->dev);
+-	pm_runtime_put_autosuspend(ipc_wwan->ipc_imem->dev);
+-
+-	return ret;
++	return 0;
+ }
+ 
+ /* Bring-down the wwan net link */
+@@ -90,12 +82,9 @@ static int ipc_wwan_link_stop(struct net_device *netdev)
+ 
+ 	netif_stop_queue(netdev);
+ 
+-	pm_runtime_get_sync(priv->ipc_wwan->ipc_imem->dev);
+ 	ipc_imem_sys_wwan_close(priv->ipc_wwan->ipc_imem, priv->if_id,
+ 				priv->ch_id);
+ 	priv->ch_id = -1;
+-	pm_runtime_mark_last_busy(priv->ipc_wwan->ipc_imem->dev);
+-	pm_runtime_put_autosuspend(priv->ipc_wwan->ipc_imem->dev);
+ 
+ 	return 0;
+ }
+@@ -117,7 +106,6 @@ static netdev_tx_t ipc_wwan_link_transmit(struct sk_buff *skb,
+ 	    if_id >= ARRAY_SIZE(ipc_wwan->sub_netlist))
+ 		return -EINVAL;
+ 
+-	pm_runtime_get(ipc_wwan->ipc_imem->dev);
+ 	/* Send the SKB to device for transmission */
+ 	ret = ipc_imem_sys_wwan_transmit(ipc_wwan->ipc_imem,
+ 					 if_id, priv->ch_id, skb);
+@@ -131,14 +119,9 @@ static netdev_tx_t ipc_wwan_link_transmit(struct sk_buff *skb,
+ 		ret = NETDEV_TX_BUSY;
+ 		dev_err(ipc_wwan->dev, "unable to push packets");
+ 	} else {
+-		pm_runtime_mark_last_busy(ipc_wwan->ipc_imem->dev);
+-		pm_runtime_put_autosuspend(ipc_wwan->ipc_imem->dev);
+ 		goto exit;
+ 	}
+ 
+-	pm_runtime_mark_last_busy(ipc_wwan->ipc_imem->dev);
+-	pm_runtime_put_autosuspend(ipc_wwan->ipc_imem->dev);
+-
+ 	return ret;
+ 
+ exit:
+diff --git a/drivers/nvme/host/auth.c b/drivers/nvme/host/auth.c
+index daf5d144a8eaf..064592a5d546a 100644
+--- a/drivers/nvme/host/auth.c
++++ b/drivers/nvme/host/auth.c
+@@ -341,7 +341,7 @@ static int nvme_auth_process_dhchap_success1(struct nvme_ctrl *ctrl,
+ 	struct nvmf_auth_dhchap_success1_data *data = chap->buf;
+ 	size_t size = sizeof(*data);
+ 
+-	if (chap->ctrl_key)
++	if (chap->s2)
+ 		size += chap->hash_len;
+ 
+ 	if (size > CHAP_BUF_SIZE) {
+@@ -825,7 +825,7 @@ static void nvme_queue_auth_work(struct work_struct *work)
+ 		goto fail2;
+ 	}
+ 
+-	if (chap->ctrl_key) {
++	if (chap->s2) {
+ 		/* DH-HMAC-CHAP Step 5: send success2 */
+ 		dev_dbg(ctrl->device, "%s: qid %d send success2\n",
+ 			__func__, chap->qid);
+diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c
+index d39f3219358b9..09a223642bc12 100644
+--- a/drivers/nvme/host/ioctl.c
++++ b/drivers/nvme/host/ioctl.c
+@@ -108,9 +108,13 @@ static void *nvme_add_user_metadata(struct request *req, void __user *ubuf,
+ 	if (!buf)
+ 		goto out;
+ 
+-	ret = -EFAULT;
+-	if ((req_op(req) == REQ_OP_DRV_OUT) && copy_from_user(buf, ubuf, len))
+-		goto out_free_meta;
++	if (req_op(req) == REQ_OP_DRV_OUT) {
++		ret = -EFAULT;
++		if (copy_from_user(buf, ubuf, len))
++			goto out_free_meta;
++	} else {
++		memset(buf, 0, len);
++	}
+ 
+ 	bip = bio_integrity_alloc(bio, GFP_KERNEL, 1);
+ 	if (IS_ERR(bip)) {
+diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
+index 347cb5daebc3c..3f0c9ee09a12b 100644
+--- a/drivers/nvme/host/pci.c
++++ b/drivers/nvme/host/pci.c
+@@ -3329,7 +3329,8 @@ static const struct pci_device_id nvme_id_table[] = {
+ 	{ PCI_VDEVICE(INTEL, 0x0a54),	/* Intel P4500/P4600 */
+ 		.driver_data = NVME_QUIRK_STRIPE_SIZE |
+ 				NVME_QUIRK_DEALLOCATE_ZEROES |
+-				NVME_QUIRK_IGNORE_DEV_SUBNQN, },
++				NVME_QUIRK_IGNORE_DEV_SUBNQN |
++				NVME_QUIRK_BOGUS_NID, },
+ 	{ PCI_VDEVICE(INTEL, 0x0a55),	/* Dell Express Flash P4600 */
+ 		.driver_data = NVME_QUIRK_STRIPE_SIZE |
+ 				NVME_QUIRK_DEALLOCATE_ZEROES, },
+diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
+index 337a624a537ce..a7fea4cbacd75 100644
+--- a/drivers/nvme/host/rdma.c
++++ b/drivers/nvme/host/rdma.c
+@@ -638,6 +638,9 @@ static void __nvme_rdma_stop_queue(struct nvme_rdma_queue *queue)
+ 
+ static void nvme_rdma_stop_queue(struct nvme_rdma_queue *queue)
+ {
++	if (!test_bit(NVME_RDMA_Q_ALLOCATED, &queue->flags))
++		return;
++
+ 	mutex_lock(&queue->queue_lock);
+ 	if (test_and_clear_bit(NVME_RDMA_Q_LIVE, &queue->flags))
+ 		__nvme_rdma_stop_queue(queue);
+diff --git a/drivers/nvme/target/fabrics-cmd-auth.c b/drivers/nvme/target/fabrics-cmd-auth.c
+index 586458f765f17..1d9854484e2e8 100644
+--- a/drivers/nvme/target/fabrics-cmd-auth.c
++++ b/drivers/nvme/target/fabrics-cmd-auth.c
+@@ -333,19 +333,21 @@ done:
+ 			 __func__, ctrl->cntlid, req->sq->qid,
+ 			 status, req->error_loc);
+ 	req->cqe->result.u64 = 0;
+-	nvmet_req_complete(req, status);
+ 	if (req->sq->dhchap_step != NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2 &&
+ 	    req->sq->dhchap_step != NVME_AUTH_DHCHAP_MESSAGE_FAILURE2) {
+ 		unsigned long auth_expire_secs = ctrl->kato ? ctrl->kato : 120;
+ 
+ 		mod_delayed_work(system_wq, &req->sq->auth_expired_work,
+ 				 auth_expire_secs * HZ);
+-		return;
++		goto complete;
+ 	}
+ 	/* Final states, clear up variables */
+ 	nvmet_auth_sq_free(req->sq);
+ 	if (req->sq->dhchap_step == NVME_AUTH_DHCHAP_MESSAGE_FAILURE2)
+ 		nvmet_ctrl_fatal_error(ctrl);
++
++complete:
++	nvmet_req_complete(req, status);
+ }
+ 
+ static int nvmet_auth_challenge(struct nvmet_req *req, void *d, int al)
+@@ -514,11 +516,12 @@ void nvmet_execute_auth_receive(struct nvmet_req *req)
+ 	kfree(d);
+ done:
+ 	req->cqe->result.u64 = 0;
+-	nvmet_req_complete(req, status);
++
+ 	if (req->sq->dhchap_step == NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2)
+ 		nvmet_auth_sq_free(req->sq);
+ 	else if (req->sq->dhchap_step == NVME_AUTH_DHCHAP_MESSAGE_FAILURE1) {
+ 		nvmet_auth_sq_free(req->sq);
+ 		nvmet_ctrl_fatal_error(ctrl);
+ 	}
++	nvmet_req_complete(req, status);
+ }
+diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
+index cd92d7ddf5ed1..197fc2ecb164d 100644
+--- a/drivers/nvme/target/tcp.c
++++ b/drivers/nvme/target/tcp.c
+@@ -372,6 +372,7 @@ static void nvmet_tcp_fatal_error(struct nvmet_tcp_queue *queue)
+ 
+ static void nvmet_tcp_socket_error(struct nvmet_tcp_queue *queue, int status)
+ {
++	queue->rcv_state = NVMET_TCP_RECV_ERR;
+ 	if (status == -EPIPE || status == -ECONNRESET)
+ 		kernel_sock_shutdown(queue->sock, SHUT_RDWR);
+ 	else
+@@ -910,15 +911,11 @@ static int nvmet_tcp_handle_icreq(struct nvmet_tcp_queue *queue)
+ 	iov.iov_len = sizeof(*icresp);
+ 	ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
+ 	if (ret < 0)
+-		goto free_crypto;
++		return ret; /* queue removal will cleanup */
+ 
+ 	queue->state = NVMET_TCP_Q_LIVE;
+ 	nvmet_prepare_receive_pdu(queue);
+ 	return 0;
+-free_crypto:
+-	if (queue->hdr_digest || queue->data_digest)
+-		nvmet_tcp_free_crypto(queue);
+-	return ret;
+ }
+ 
+ static void nvmet_tcp_handle_req_failure(struct nvmet_tcp_queue *queue,
+diff --git a/drivers/phy/motorola/phy-mapphone-mdm6600.c b/drivers/phy/motorola/phy-mapphone-mdm6600.c
+index 1d567604b650d..376d023a0aa90 100644
+--- a/drivers/phy/motorola/phy-mapphone-mdm6600.c
++++ b/drivers/phy/motorola/phy-mapphone-mdm6600.c
+@@ -122,16 +122,10 @@ static int phy_mdm6600_power_on(struct phy *x)
+ {
+ 	struct phy_mdm6600 *ddata = phy_get_drvdata(x);
+ 	struct gpio_desc *enable_gpio = ddata->ctrl_gpios[PHY_MDM6600_ENABLE];
+-	int error;
+ 
+ 	if (!ddata->enabled)
+ 		return -ENODEV;
+ 
+-	error = pinctrl_pm_select_default_state(ddata->dev);
+-	if (error)
+-		dev_warn(ddata->dev, "%s: error with default_state: %i\n",
+-			 __func__, error);
+-
+ 	gpiod_set_value_cansleep(enable_gpio, 1);
+ 
+ 	/* Allow aggressive PM for USB, it's only needed for n_gsm port */
+@@ -160,11 +154,6 @@ static int phy_mdm6600_power_off(struct phy *x)
+ 
+ 	gpiod_set_value_cansleep(enable_gpio, 0);
+ 
+-	error = pinctrl_pm_select_sleep_state(ddata->dev);
+-	if (error)
+-		dev_warn(ddata->dev, "%s: error with sleep_state: %i\n",
+-			 __func__, error);
+-
+ 	return 0;
+ }
+ 
+@@ -456,6 +445,7 @@ static void phy_mdm6600_device_power_off(struct phy_mdm6600 *ddata)
+ {
+ 	struct gpio_desc *reset_gpio =
+ 		ddata->ctrl_gpios[PHY_MDM6600_RESET];
++	int error;
+ 
+ 	ddata->enabled = false;
+ 	phy_mdm6600_cmd(ddata, PHY_MDM6600_CMD_BP_SHUTDOWN_REQ);
+@@ -471,6 +461,17 @@ static void phy_mdm6600_device_power_off(struct phy_mdm6600 *ddata)
+ 	} else {
+ 		dev_err(ddata->dev, "Timed out powering down\n");
+ 	}
++
++	/*
++	 * Keep reset gpio high with padconf internal pull-up resistor to
++	 * prevent modem from waking up during deeper SoC idle states. The
++	 * gpio bank lines can have glitches if not in the always-on wkup
++	 * domain.
++	 */
++	error = pinctrl_pm_select_sleep_state(ddata->dev);
++	if (error)
++		dev_warn(ddata->dev, "%s: error with sleep_state: %i\n",
++			 __func__, error);
+ }
+ 
+ static void phy_mdm6600_deferred_power_on(struct work_struct *work)
+@@ -571,12 +572,6 @@ static int phy_mdm6600_probe(struct platform_device *pdev)
+ 	ddata->dev = &pdev->dev;
+ 	platform_set_drvdata(pdev, ddata);
+ 
+-	/* Active state selected in phy_mdm6600_power_on() */
+-	error = pinctrl_pm_select_sleep_state(ddata->dev);
+-	if (error)
+-		dev_warn(ddata->dev, "%s: error with sleep_state: %i\n",
+-			 __func__, error);
+-
+ 	error = phy_mdm6600_init_lines(ddata);
+ 	if (error)
+ 		return error;
+@@ -627,10 +622,12 @@ idle:
+ 	pm_runtime_put_autosuspend(ddata->dev);
+ 
+ cleanup:
+-	if (error < 0)
++	if (error < 0) {
+ 		phy_mdm6600_device_power_off(ddata);
+-	pm_runtime_disable(ddata->dev);
+-	pm_runtime_dont_use_autosuspend(ddata->dev);
++		pm_runtime_disable(ddata->dev);
++		pm_runtime_dont_use_autosuspend(ddata->dev);
++	}
++
+ 	return error;
+ }
+ 
+@@ -639,6 +636,7 @@ static void phy_mdm6600_remove(struct platform_device *pdev)
+ 	struct phy_mdm6600 *ddata = platform_get_drvdata(pdev);
+ 	struct gpio_desc *reset_gpio = ddata->ctrl_gpios[PHY_MDM6600_RESET];
+ 
++	pm_runtime_get_noresume(ddata->dev);
+ 	pm_runtime_dont_use_autosuspend(ddata->dev);
+ 	pm_runtime_put_sync(ddata->dev);
+ 	pm_runtime_disable(ddata->dev);
+diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-combo.c b/drivers/phy/qualcomm/phy-qcom-qmp-combo.c
+index bebce8c591a30..3e6bec4c4d6ce 100644
+--- a/drivers/phy/qualcomm/phy-qcom-qmp-combo.c
++++ b/drivers/phy/qualcomm/phy-qcom-qmp-combo.c
+@@ -772,10 +772,10 @@ static const struct qmp_phy_init_tbl sm8550_usb3_pcs_tbl[] = {
+ 	QMP_PHY_INIT_CFG(QPHY_USB_V6_PCS_PCS_TX_RX_CONFIG, 0x0c),
+ 	QMP_PHY_INIT_CFG(QPHY_USB_V6_PCS_EQ_CONFIG1, 0x4b),
+ 	QMP_PHY_INIT_CFG(QPHY_USB_V6_PCS_EQ_CONFIG5, 0x10),
+-	QMP_PHY_INIT_CFG(QPHY_USB_V6_PCS_USB3_POWER_STATE_CONFIG1, 0x68),
+ };
+ 
+ static const struct qmp_phy_init_tbl sm8550_usb3_pcs_usb_tbl[] = {
++	QMP_PHY_INIT_CFG(QPHY_USB_V6_PCS_USB3_POWER_STATE_CONFIG1, 0x68),
+ 	QMP_PHY_INIT_CFG(QPHY_USB_V6_PCS_USB3_LFPS_DET_HIGH_COUNT_VAL, 0xf8),
+ 	QMP_PHY_INIT_CFG(QPHY_USB_V6_PCS_USB3_RXEQTRAINING_DFE_TIME_S2, 0x07),
+ 	QMP_PHY_INIT_CFG(QPHY_USB_V6_PCS_USB3_RCVR_DTCT_DLY_U3_L, 0x40),
+@@ -2649,6 +2649,7 @@ static int qmp_combo_usb_power_on(struct phy *phy)
+ 	void __iomem *tx2 = qmp->tx2;
+ 	void __iomem *rx2 = qmp->rx2;
+ 	void __iomem *pcs = qmp->pcs;
++	void __iomem *pcs_usb = qmp->pcs_usb;
+ 	void __iomem *status;
+ 	unsigned int val;
+ 	int ret;
+@@ -2670,6 +2671,9 @@ static int qmp_combo_usb_power_on(struct phy *phy)
+ 
+ 	qmp_combo_configure(pcs, cfg->pcs_tbl, cfg->pcs_tbl_num);
+ 
++	if (pcs_usb)
++		qmp_combo_configure(pcs_usb, cfg->pcs_usb_tbl, cfg->pcs_usb_tbl_num);
++
+ 	if (cfg->has_pwrdn_delay)
+ 		usleep_range(10, 20);
+ 
+diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcs-usb-v6.h b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-usb-v6.h
+index 9510e63ba9d8a..c38530d6776b4 100644
+--- a/drivers/phy/qualcomm/phy-qcom-qmp-pcs-usb-v6.h
++++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-usb-v6.h
+@@ -12,7 +12,7 @@
+ #define QPHY_USB_V6_PCS_LOCK_DETECT_CONFIG3		0xcc
+ #define QPHY_USB_V6_PCS_LOCK_DETECT_CONFIG6		0xd8
+ #define QPHY_USB_V6_PCS_REFGEN_REQ_CONFIG1		0xdc
+-#define QPHY_USB_V6_PCS_USB3_POWER_STATE_CONFIG1	0x90
++#define QPHY_USB_V6_PCS_POWER_STATE_CONFIG1		0x90
+ #define QPHY_USB_V6_PCS_RX_SIGDET_LVL			0x188
+ #define QPHY_USB_V6_PCS_RCVR_DTCT_DLY_P1U2_L		0x190
+ #define QPHY_USB_V6_PCS_RCVR_DTCT_DLY_P1U2_H		0x194
+@@ -23,6 +23,7 @@
+ #define QPHY_USB_V6_PCS_EQ_CONFIG1			0x1dc
+ #define QPHY_USB_V6_PCS_EQ_CONFIG5			0x1ec
+ 
++#define QPHY_USB_V6_PCS_USB3_POWER_STATE_CONFIG1	0x00
+ #define QPHY_USB_V6_PCS_USB3_LFPS_DET_HIGH_COUNT_VAL	0x18
+ #define QPHY_USB_V6_PCS_USB3_RXEQTRAINING_DFE_TIME_S2	0x3c
+ #define QPHY_USB_V6_PCS_USB3_RCVR_DTCT_DLY_U3_L		0x40
+diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-usb.c b/drivers/phy/qualcomm/phy-qcom-qmp-usb.c
+index 466f0a56c82e1..575329004b901 100644
+--- a/drivers/phy/qualcomm/phy-qcom-qmp-usb.c
++++ b/drivers/phy/qualcomm/phy-qcom-qmp-usb.c
+@@ -1480,8 +1480,6 @@ static const struct qmp_phy_init_tbl sc8280xp_usb3_uniphy_pcs_tbl[] = {
+ 	QMP_PHY_INIT_CFG(QPHY_V5_PCS_RCVR_DTCT_DLY_P1U2_H, 0x03),
+ 	QMP_PHY_INIT_CFG(QPHY_V5_PCS_RX_SIGDET_LVL, 0xaa),
+ 	QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCS_TX_RX_CONFIG, 0x0c),
+-	QMP_PHY_INIT_CFG(QPHY_V5_PCS_USB3_RXEQTRAINING_DFE_TIME_S2, 0x07),
+-	QMP_PHY_INIT_CFG(QPHY_V5_PCS_USB3_LFPS_DET_HIGH_COUNT_VAL, 0xf8),
+ 	QMP_PHY_INIT_CFG(QPHY_V5_PCS_CDR_RESET_TIME, 0x0a),
+ 	QMP_PHY_INIT_CFG(QPHY_V5_PCS_ALIGN_DETECT_CONFIG1, 0x88),
+ 	QMP_PHY_INIT_CFG(QPHY_V5_PCS_ALIGN_DETECT_CONFIG2, 0x13),
+@@ -1490,6 +1488,11 @@ static const struct qmp_phy_init_tbl sc8280xp_usb3_uniphy_pcs_tbl[] = {
+ 	QMP_PHY_INIT_CFG(QPHY_V5_PCS_REFGEN_REQ_CONFIG1, 0x21),
+ };
+ 
++static const struct qmp_phy_init_tbl sc8280xp_usb3_uniphy_pcs_usb_tbl[] = {
++	QMP_PHY_INIT_CFG(QPHY_V5_PCS_USB3_RXEQTRAINING_DFE_TIME_S2, 0x07),
++	QMP_PHY_INIT_CFG(QPHY_V5_PCS_USB3_LFPS_DET_HIGH_COUNT_VAL, 0xf8),
++};
++
+ static const struct qmp_phy_init_tbl sa8775p_usb3_uniphy_pcs_tbl[] = {
+ 	QMP_PHY_INIT_CFG(QPHY_V5_PCS_LOCK_DETECT_CONFIG1, 0xc4),
+ 	QMP_PHY_INIT_CFG(QPHY_V5_PCS_LOCK_DETECT_CONFIG2, 0x89),
+@@ -1499,9 +1502,6 @@ static const struct qmp_phy_init_tbl sa8775p_usb3_uniphy_pcs_tbl[] = {
+ 	QMP_PHY_INIT_CFG(QPHY_V5_PCS_RCVR_DTCT_DLY_P1U2_H, 0x03),
+ 	QMP_PHY_INIT_CFG(QPHY_V5_PCS_RX_SIGDET_LVL, 0xaa),
+ 	QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCS_TX_RX_CONFIG, 0x0c),
+-	QMP_PHY_INIT_CFG(QPHY_V5_PCS_USB3_RXEQTRAINING_DFE_TIME_S2, 0x07),
+-	QMP_PHY_INIT_CFG(QPHY_V5_PCS_USB3_LFPS_DET_HIGH_COUNT_VAL, 0xf8),
+-	QMP_PHY_INIT_CFG(QPHY_V5_PCS_USB3_POWER_STATE_CONFIG1, 0x6f),
+ 	QMP_PHY_INIT_CFG(QPHY_V5_PCS_CDR_RESET_TIME, 0x0a),
+ 	QMP_PHY_INIT_CFG(QPHY_V5_PCS_ALIGN_DETECT_CONFIG1, 0x88),
+ 	QMP_PHY_INIT_CFG(QPHY_V5_PCS_ALIGN_DETECT_CONFIG2, 0x13),
+@@ -1510,6 +1510,12 @@ static const struct qmp_phy_init_tbl sa8775p_usb3_uniphy_pcs_tbl[] = {
+ 	QMP_PHY_INIT_CFG(QPHY_V5_PCS_REFGEN_REQ_CONFIG1, 0x21),
+ };
+ 
++static const struct qmp_phy_init_tbl sa8775p_usb3_uniphy_pcs_usb_tbl[] = {
++	QMP_PHY_INIT_CFG(QPHY_V5_PCS_USB3_RXEQTRAINING_DFE_TIME_S2, 0x07),
++	QMP_PHY_INIT_CFG(QPHY_V5_PCS_USB3_LFPS_DET_HIGH_COUNT_VAL, 0xf8),
++	QMP_PHY_INIT_CFG(QPHY_V5_PCS_USB3_POWER_STATE_CONFIG1, 0x6f),
++};
++
+ struct qmp_usb_offsets {
+ 	u16 serdes;
+ 	u16 pcs;
+@@ -1788,6 +1794,8 @@ static const struct qmp_phy_cfg sa8775p_usb3_uniphy_cfg = {
+ 	.rx_tbl_num		= ARRAY_SIZE(sc8280xp_usb3_uniphy_rx_tbl),
+ 	.pcs_tbl		= sa8775p_usb3_uniphy_pcs_tbl,
+ 	.pcs_tbl_num		= ARRAY_SIZE(sa8775p_usb3_uniphy_pcs_tbl),
++	.pcs_usb_tbl		= sa8775p_usb3_uniphy_pcs_usb_tbl,
++	.pcs_usb_tbl_num	= ARRAY_SIZE(sa8775p_usb3_uniphy_pcs_usb_tbl),
+ 	.clk_list		= qmp_v4_phy_clk_l,
+ 	.num_clks		= ARRAY_SIZE(qmp_v4_phy_clk_l),
+ 	.reset_list		= qcm2290_usb3phy_reset_l,
+@@ -1833,6 +1841,8 @@ static const struct qmp_phy_cfg sc8280xp_usb3_uniphy_cfg = {
+ 	.rx_tbl_num		= ARRAY_SIZE(sc8280xp_usb3_uniphy_rx_tbl),
+ 	.pcs_tbl		= sc8280xp_usb3_uniphy_pcs_tbl,
+ 	.pcs_tbl_num		= ARRAY_SIZE(sc8280xp_usb3_uniphy_pcs_tbl),
++	.pcs_usb_tbl		= sc8280xp_usb3_uniphy_pcs_usb_tbl,
++	.pcs_usb_tbl_num	= ARRAY_SIZE(sc8280xp_usb3_uniphy_pcs_usb_tbl),
+ 	.clk_list		= qmp_v4_phy_clk_l,
+ 	.num_clks		= ARRAY_SIZE(qmp_v4_phy_clk_l),
+ 	.reset_list		= qcm2290_usb3phy_reset_l,
+@@ -2233,6 +2243,7 @@ static int qmp_usb_power_on(struct phy *phy)
+ 	void __iomem *tx = qmp->tx;
+ 	void __iomem *rx = qmp->rx;
+ 	void __iomem *pcs = qmp->pcs;
++	void __iomem *pcs_usb = qmp->pcs_usb;
+ 	void __iomem *status;
+ 	unsigned int val;
+ 	int ret;
+@@ -2256,6 +2267,9 @@ static int qmp_usb_power_on(struct phy *phy)
+ 
+ 	qmp_usb_configure(pcs, cfg->pcs_tbl, cfg->pcs_tbl_num);
+ 
++	if (pcs_usb)
++		qmp_usb_configure(pcs_usb, cfg->pcs_usb_tbl, cfg->pcs_usb_tbl_num);
++
+ 	if (cfg->has_pwrdn_delay)
+ 		usleep_range(10, 20);
+ 
+diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
+index b84781cfc2596..401886c813449 100644
+--- a/drivers/pinctrl/core.c
++++ b/drivers/pinctrl/core.c
+@@ -1012,20 +1012,17 @@ static int add_setting(struct pinctrl *p, struct pinctrl_dev *pctldev,
+ 
+ static struct pinctrl *find_pinctrl(struct device *dev)
+ {
+-	struct pinctrl *entry, *p = NULL;
++	struct pinctrl *p;
+ 
+ 	mutex_lock(&pinctrl_list_mutex);
+-
+-	list_for_each_entry(entry, &pinctrl_list, node) {
+-		if (entry->dev == dev) {
+-			p = entry;
+-			kref_get(&p->users);
+-			break;
++	list_for_each_entry(p, &pinctrl_list, node)
++		if (p->dev == dev) {
++			mutex_unlock(&pinctrl_list_mutex);
++			return p;
+ 		}
+-	}
+ 
+ 	mutex_unlock(&pinctrl_list_mutex);
+-	return p;
++	return NULL;
+ }
+ 
+ static void pinctrl_free(struct pinctrl *p, bool inlist);
+@@ -1133,6 +1130,7 @@ struct pinctrl *pinctrl_get(struct device *dev)
+ 	p = find_pinctrl(dev);
+ 	if (p) {
+ 		dev_dbg(dev, "obtain a copy of previously claimed pinctrl\n");
++		kref_get(&p->users);
+ 		return p;
+ 	}
+ 
+diff --git a/drivers/pinctrl/qcom/pinctrl-lpass-lpi.c b/drivers/pinctrl/qcom/pinctrl-lpass-lpi.c
+index fdb6585a92346..65088972dc942 100644
+--- a/drivers/pinctrl/qcom/pinctrl-lpass-lpi.c
++++ b/drivers/pinctrl/qcom/pinctrl-lpass-lpi.c
+@@ -31,7 +31,8 @@ struct lpi_pinctrl {
+ 	char __iomem *tlmm_base;
+ 	char __iomem *slew_base;
+ 	struct clk_bulk_data clks[MAX_LPI_NUM_CLKS];
+-	struct mutex slew_access_lock;
++	/* Protects from concurrent register updates */
++	struct mutex lock;
+ 	DECLARE_BITMAP(ever_gpio, MAX_NR_GPIO);
+ 	const struct lpi_pinctrl_variant_data *data;
+ };
+@@ -102,6 +103,7 @@ static int lpi_gpio_set_mux(struct pinctrl_dev *pctldev, unsigned int function,
+ 	if (WARN_ON(i == g->nfuncs))
+ 		return -EINVAL;
+ 
++	mutex_lock(&pctrl->lock);
+ 	val = lpi_gpio_read(pctrl, pin, LPI_GPIO_CFG_REG);
+ 
+ 	/*
+@@ -127,6 +129,7 @@ static int lpi_gpio_set_mux(struct pinctrl_dev *pctldev, unsigned int function,
+ 
+ 	u32p_replace_bits(&val, i, LPI_GPIO_FUNCTION_MASK);
+ 	lpi_gpio_write(pctrl, pin, LPI_GPIO_CFG_REG, val);
++	mutex_unlock(&pctrl->lock);
+ 
+ 	return 0;
+ }
+@@ -232,14 +235,14 @@ static int lpi_config_set(struct pinctrl_dev *pctldev, unsigned int group,
+ 			if (slew_offset == LPI_NO_SLEW)
+ 				break;
+ 
+-			mutex_lock(&pctrl->slew_access_lock);
++			mutex_lock(&pctrl->lock);
+ 
+ 			sval = ioread32(pctrl->slew_base + LPI_SLEW_RATE_CTL_REG);
+ 			sval &= ~(LPI_SLEW_RATE_MASK << slew_offset);
+ 			sval |= arg << slew_offset;
+ 			iowrite32(sval, pctrl->slew_base + LPI_SLEW_RATE_CTL_REG);
+ 
+-			mutex_unlock(&pctrl->slew_access_lock);
++			mutex_unlock(&pctrl->lock);
+ 			break;
+ 		default:
+ 			return -EINVAL;
+@@ -255,6 +258,7 @@ static int lpi_config_set(struct pinctrl_dev *pctldev, unsigned int group,
+ 		lpi_gpio_write(pctrl, group, LPI_GPIO_VALUE_REG, val);
+ 	}
+ 
++	mutex_lock(&pctrl->lock);
+ 	val = lpi_gpio_read(pctrl, group, LPI_GPIO_CFG_REG);
+ 
+ 	u32p_replace_bits(&val, pullup, LPI_GPIO_PULL_MASK);
+@@ -263,6 +267,7 @@ static int lpi_config_set(struct pinctrl_dev *pctldev, unsigned int group,
+ 	u32p_replace_bits(&val, output_enabled, LPI_GPIO_OE_MASK);
+ 
+ 	lpi_gpio_write(pctrl, group, LPI_GPIO_CFG_REG, val);
++	mutex_unlock(&pctrl->lock);
+ 
+ 	return 0;
+ }
+@@ -464,7 +469,7 @@ int lpi_pinctrl_probe(struct platform_device *pdev)
+ 	pctrl->chip.label = dev_name(dev);
+ 	pctrl->chip.can_sleep = false;
+ 
+-	mutex_init(&pctrl->slew_access_lock);
++	mutex_init(&pctrl->lock);
+ 
+ 	pctrl->ctrl = devm_pinctrl_register(dev, &pctrl->desc, pctrl);
+ 	if (IS_ERR(pctrl->ctrl)) {
+@@ -486,7 +491,7 @@ int lpi_pinctrl_probe(struct platform_device *pdev)
+ 	return 0;
+ 
+ err_pinctrl:
+-	mutex_destroy(&pctrl->slew_access_lock);
++	mutex_destroy(&pctrl->lock);
+ 	clk_bulk_disable_unprepare(MAX_LPI_NUM_CLKS, pctrl->clks);
+ 
+ 	return ret;
+@@ -498,7 +503,7 @@ int lpi_pinctrl_remove(struct platform_device *pdev)
+ 	struct lpi_pinctrl *pctrl = platform_get_drvdata(pdev);
+ 	int i;
+ 
+-	mutex_destroy(&pctrl->slew_access_lock);
++	mutex_destroy(&pctrl->lock);
+ 	clk_bulk_disable_unprepare(MAX_LPI_NUM_CLKS, pctrl->clks);
+ 
+ 	for (i = 0; i < pctrl->data->npins; i++)
+diff --git a/drivers/platform/surface/surface_platform_profile.c b/drivers/platform/surface/surface_platform_profile.c
+index f433a13c3689a..a5a3941b3f43a 100644
+--- a/drivers/platform/surface/surface_platform_profile.c
++++ b/drivers/platform/surface/surface_platform_profile.c
+@@ -159,8 +159,7 @@ static int surface_platform_profile_probe(struct ssam_device *sdev)
+ 	set_bit(PLATFORM_PROFILE_BALANCED_PERFORMANCE, tpd->handler.choices);
+ 	set_bit(PLATFORM_PROFILE_PERFORMANCE, tpd->handler.choices);
+ 
+-	platform_profile_register(&tpd->handler);
+-	return 0;
++	return platform_profile_register(&tpd->handler);
+ }
+ 
+ static void surface_platform_profile_remove(struct ssam_device *sdev)
+diff --git a/drivers/platform/x86/apple-gmux.c b/drivers/platform/x86/apple-gmux.c
+index cadbb557a108b..1417e230edbd8 100644
+--- a/drivers/platform/x86/apple-gmux.c
++++ b/drivers/platform/x86/apple-gmux.c
+@@ -105,6 +105,8 @@ struct apple_gmux_config {
+ #define GMUX_BRIGHTNESS_MASK		0x00ffffff
+ #define GMUX_MAX_BRIGHTNESS		GMUX_BRIGHTNESS_MASK
+ 
++# define MMIO_GMUX_MAX_BRIGHTNESS	0xffff
++
+ static u8 gmux_pio_read8(struct apple_gmux_data *gmux_data, int port)
+ {
+ 	return inb(gmux_data->iostart + port);
+@@ -857,7 +859,17 @@ get_version:
+ 
+ 	memset(&props, 0, sizeof(props));
+ 	props.type = BACKLIGHT_PLATFORM;
+-	props.max_brightness = gmux_read32(gmux_data, GMUX_PORT_MAX_BRIGHTNESS);
++
++	/*
++	 * All MMIO gmux's have 0xffff as max brightness, but some iMacs incorrectly
++	 * report 0x03ff, despite the firmware being happy to set 0xffff as the brightness
++	 * at boot. Force 0xffff for all MMIO gmux's so they all have the correct brightness
++	 * range.
++	 */
++	if (type == APPLE_GMUX_TYPE_MMIO)
++		props.max_brightness = MMIO_GMUX_MAX_BRIGHTNESS;
++	else
++		props.max_brightness = gmux_read32(gmux_data, GMUX_PORT_MAX_BRIGHTNESS);
+ 
+ #if IS_REACHABLE(CONFIG_ACPI_VIDEO)
+ 	register_bdev = acpi_video_get_backlight_type() == acpi_backlight_apple_gmux;
+diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c
+index d85d895fee894..df1db54d4e183 100644
+--- a/drivers/platform/x86/asus-nb-wmi.c
++++ b/drivers/platform/x86/asus-nb-wmi.c
+@@ -531,6 +531,9 @@ static void asus_nb_wmi_quirks(struct asus_wmi_driver *driver)
+ static const struct key_entry asus_nb_wmi_keymap[] = {
+ 	{ KE_KEY, ASUS_WMI_BRN_DOWN, { KEY_BRIGHTNESSDOWN } },
+ 	{ KE_KEY, ASUS_WMI_BRN_UP, { KEY_BRIGHTNESSUP } },
++	{ KE_KEY, 0x2a, { KEY_SELECTIVE_SCREENSHOT } },
++	{ KE_IGNORE, 0x2b, }, /* PrintScreen (also send via PS/2) on newer models */
++	{ KE_IGNORE, 0x2c, }, /* CapsLock (also send via PS/2) on newer models */
+ 	{ KE_KEY, 0x30, { KEY_VOLUMEUP } },
+ 	{ KE_KEY, 0x31, { KEY_VOLUMEDOWN } },
+ 	{ KE_KEY, 0x32, { KEY_MUTE } },
+diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
+index 8bef66a2f0ce7..66b9fa1d76985 100644
+--- a/drivers/platform/x86/asus-wmi.c
++++ b/drivers/platform/x86/asus-wmi.c
+@@ -3268,7 +3268,6 @@ static void asus_wmi_handle_event_code(int code, struct asus_wmi *asus)
+ {
+ 	unsigned int key_value = 1;
+ 	bool autorelease = 1;
+-	int orig_code = code;
+ 
+ 	if (asus->driver->key_filter) {
+ 		asus->driver->key_filter(asus->driver, &code, &key_value,
+@@ -3277,16 +3276,10 @@ static void asus_wmi_handle_event_code(int code, struct asus_wmi *asus)
+ 			return;
+ 	}
+ 
+-	if (code >= NOTIFY_BRNUP_MIN && code <= NOTIFY_BRNUP_MAX)
+-		code = ASUS_WMI_BRN_UP;
+-	else if (code >= NOTIFY_BRNDOWN_MIN && code <= NOTIFY_BRNDOWN_MAX)
+-		code = ASUS_WMI_BRN_DOWN;
+-
+-	if (code == ASUS_WMI_BRN_DOWN || code == ASUS_WMI_BRN_UP) {
+-		if (acpi_video_get_backlight_type() == acpi_backlight_vendor) {
+-			asus_wmi_backlight_notify(asus, orig_code);
+-			return;
+-		}
++	if (acpi_video_get_backlight_type() == acpi_backlight_vendor &&
++	    code >= NOTIFY_BRNUP_MIN && code <= NOTIFY_BRNDOWN_MAX) {
++		asus_wmi_backlight_notify(asus, code);
++		return;
+ 	}
+ 
+ 	if (code == NOTIFY_KBD_BRTUP) {
+diff --git a/drivers/platform/x86/asus-wmi.h b/drivers/platform/x86/asus-wmi.h
+index a478ebfd34dfa..fc41d1b1bb7f8 100644
+--- a/drivers/platform/x86/asus-wmi.h
++++ b/drivers/platform/x86/asus-wmi.h
+@@ -18,7 +18,7 @@
+ #include <linux/i8042.h>
+ 
+ #define ASUS_WMI_KEY_IGNORE (-1)
+-#define ASUS_WMI_BRN_DOWN	0x20
++#define ASUS_WMI_BRN_DOWN	0x2e
+ #define ASUS_WMI_BRN_UP		0x2f
+ 
+ struct module;
+diff --git a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c
+index 1152deaa0078e..33ab207493e3e 100644
+--- a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c
++++ b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c
+@@ -176,7 +176,7 @@ show_uncore_data(initial_max_freq_khz);
+ 
+ static int create_attr_group(struct uncore_data *data, char *name)
+ {
+-	int ret, index = 0;
++	int ret, freq, index = 0;
+ 
+ 	init_attribute_rw(max_freq_khz);
+ 	init_attribute_rw(min_freq_khz);
+@@ -197,7 +197,11 @@ static int create_attr_group(struct uncore_data *data, char *name)
+ 	data->uncore_attrs[index++] = &data->min_freq_khz_dev_attr.attr;
+ 	data->uncore_attrs[index++] = &data->initial_min_freq_khz_dev_attr.attr;
+ 	data->uncore_attrs[index++] = &data->initial_max_freq_khz_dev_attr.attr;
+-	data->uncore_attrs[index++] = &data->current_freq_khz_dev_attr.attr;
++
++	ret = uncore_read_freq(data, &freq);
++	if (!ret)
++		data->uncore_attrs[index++] = &data->current_freq_khz_dev_attr.attr;
++
+ 	data->uncore_attrs[index] = NULL;
+ 
+ 	data->uncore_attr_group.name = name;
+diff --git a/drivers/platform/x86/msi-ec.c b/drivers/platform/x86/msi-ec.c
+index f26a3121092f9..492eb383ee7a9 100644
+--- a/drivers/platform/x86/msi-ec.c
++++ b/drivers/platform/x86/msi-ec.c
+@@ -276,14 +276,13 @@ static struct msi_ec_conf CONF2 __initdata = {
+ 
+ static const char * const ALLOWED_FW_3[] __initconst = {
+ 	"1592EMS1.111",
+-	"E1592IMS.10C",
+ 	NULL
+ };
+ 
+ static struct msi_ec_conf CONF3 __initdata = {
+ 	.allowed_fw = ALLOWED_FW_3,
+ 	.charge_control = {
+-		.address      = 0xef,
++		.address      = 0xd7,
+ 		.offset_start = 0x8a,
+ 		.offset_end   = 0x80,
+ 		.range_min    = 0x8a,
+diff --git a/drivers/platform/x86/touchscreen_dmi.c b/drivers/platform/x86/touchscreen_dmi.c
+index f9301a9382e74..0c67337726984 100644
+--- a/drivers/platform/x86/touchscreen_dmi.c
++++ b/drivers/platform/x86/touchscreen_dmi.c
+@@ -42,6 +42,21 @@ static const struct ts_dmi_data archos_101_cesium_educ_data = {
+ 	.properties     = archos_101_cesium_educ_props,
+ };
+ 
++static const struct property_entry bush_bush_windows_tablet_props[] = {
++	PROPERTY_ENTRY_U32("touchscreen-size-x", 1850),
++	PROPERTY_ENTRY_U32("touchscreen-size-y", 1280),
++	PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
++	PROPERTY_ENTRY_U32("silead,max-fingers", 10),
++	PROPERTY_ENTRY_BOOL("silead,home-button"),
++	PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-bush-bush-windows-tablet.fw"),
++	{ }
++};
++
++static const struct ts_dmi_data bush_bush_windows_tablet_data = {
++	.acpi_name      = "MSSL1680:00",
++	.properties     = bush_bush_windows_tablet_props,
++};
++
+ static const struct property_entry chuwi_hi8_props[] = {
+ 	PROPERTY_ENTRY_U32("touchscreen-size-x", 1665),
+ 	PROPERTY_ENTRY_U32("touchscreen-size-y", 1140),
+@@ -756,6 +771,21 @@ static const struct ts_dmi_data pipo_w11_data = {
+ 	.properties	= pipo_w11_props,
+ };
+ 
++static const struct property_entry positivo_c4128b_props[] = {
++	PROPERTY_ENTRY_U32("touchscreen-min-x", 4),
++	PROPERTY_ENTRY_U32("touchscreen-min-y", 13),
++	PROPERTY_ENTRY_U32("touchscreen-size-x", 1915),
++	PROPERTY_ENTRY_U32("touchscreen-size-y", 1269),
++	PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-positivo-c4128b.fw"),
++	PROPERTY_ENTRY_U32("silead,max-fingers", 10),
++	{ }
++};
++
++static const struct ts_dmi_data positivo_c4128b_data = {
++	.acpi_name	= "MSSL1680:00",
++	.properties	= positivo_c4128b_props,
++};
++
+ static const struct property_entry pov_mobii_wintab_p800w_v20_props[] = {
+ 	PROPERTY_ENTRY_U32("touchscreen-min-x", 32),
+ 	PROPERTY_ENTRY_U32("touchscreen-min-y", 16),
+@@ -1070,6 +1100,13 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "ARCHOS 101 Cesium Educ"),
+ 		},
+ 	},
++	{
++		/* Bush Windows tablet */
++		.driver_data = (void *)&bush_bush_windows_tablet_data,
++		.matches = {
++			DMI_MATCH(DMI_PRODUCT_NAME, "Bush Windows tablet"),
++		},
++	},
+ 	{
+ 		/* Chuwi Hi8 */
+ 		.driver_data = (void *)&chuwi_hi8_data,
+@@ -1480,6 +1517,14 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
+ 			DMI_MATCH(DMI_BIOS_VERSION, "MOMO.G.WI71C.MABMRBA02"),
+ 		},
+ 	},
++	{
++		/* Positivo C4128B */
++		.driver_data = (void *)&positivo_c4128b_data,
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "Positivo Tecnologia SA"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "C4128B-1"),
++		},
++	},
+ 	{
+ 		/* Point of View mobii wintab p800w (v2.0) */
+ 		.driver_data = (void *)&pov_mobii_wintab_p800w_v20_data,
+diff --git a/drivers/power/reset/Kconfig b/drivers/power/reset/Kconfig
+index fff07b2bd77b9..62f592e617339 100644
+--- a/drivers/power/reset/Kconfig
++++ b/drivers/power/reset/Kconfig
+@@ -307,7 +307,7 @@ config NVMEM_REBOOT_MODE
+ 
+ config POWER_MLXBF
+ 	tristate "Mellanox BlueField power handling driver"
+-	depends on (GPIO_MLXBF2 && ACPI)
++	depends on (GPIO_MLXBF2 || GPIO_MLXBF3) && ACPI
+ 	help
+ 	  This driver supports reset or low power mode handling for Mellanox BlueField.
+ 
+diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
+index 2820badc7a126..3137e40fcd3e0 100644
+--- a/drivers/regulator/core.c
++++ b/drivers/regulator/core.c
+@@ -5724,15 +5724,11 @@ wash:
+ 	mutex_lock(&regulator_list_mutex);
+ 	regulator_ena_gpio_free(rdev);
+ 	mutex_unlock(&regulator_list_mutex);
+-	put_device(&rdev->dev);
+-	rdev = NULL;
+ clean:
+ 	if (dangling_of_gpiod)
+ 		gpiod_put(config->ena_gpiod);
+-	if (rdev && rdev->dev.of_node)
+-		of_node_put(rdev->dev.of_node);
+-	kfree(rdev);
+ 	kfree(config);
++	put_device(&rdev->dev);
+ rinse:
+ 	if (dangling_cfg_gpiod)
+ 		gpiod_put(cfg->ena_gpiod);
+diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
+index 3ef636935a547..3ff46fc694f85 100644
+--- a/drivers/s390/cio/css.c
++++ b/drivers/s390/cio/css.c
+@@ -233,17 +233,19 @@ struct subchannel *css_alloc_subchannel(struct subchannel_id schid,
+ 	 */
+ 	ret = dma_set_coherent_mask(&sch->dev, DMA_BIT_MASK(31));
+ 	if (ret)
+-		goto err;
++		goto err_lock;
+ 	/*
+ 	 * But we don't have such restrictions imposed on the stuff that
+ 	 * is handled by the streaming API.
+ 	 */
+ 	ret = dma_set_mask(&sch->dev, DMA_BIT_MASK(64));
+ 	if (ret)
+-		goto err;
++		goto err_lock;
+ 
+ 	return sch;
+ 
++err_lock:
++	kfree(sch->lock);
+ err:
+ 	kfree(sch);
+ 	return ERR_PTR(ret);
+diff --git a/drivers/thunderbolt/tb.c b/drivers/thunderbolt/tb.c
+index 3fb4553a6442b..7c0c46be3fdb5 100644
+--- a/drivers/thunderbolt/tb.c
++++ b/drivers/thunderbolt/tb.c
+@@ -1907,14 +1907,14 @@ static void tb_handle_dp_bandwidth_request(struct work_struct *work)
+ 	in = &sw->ports[ev->port];
+ 	if (!tb_port_is_dpin(in)) {
+ 		tb_port_warn(in, "bandwidth request to non-DP IN adapter\n");
+-		goto unlock;
++		goto put_sw;
+ 	}
+ 
+ 	tb_port_dbg(in, "handling bandwidth allocation request\n");
+ 
+ 	if (!usb4_dp_port_bandwidth_mode_enabled(in)) {
+ 		tb_port_warn(in, "bandwidth allocation mode not enabled\n");
+-		goto unlock;
++		goto put_sw;
+ 	}
+ 
+ 	ret = usb4_dp_port_requested_bandwidth(in);
+@@ -1923,7 +1923,7 @@ static void tb_handle_dp_bandwidth_request(struct work_struct *work)
+ 			tb_port_dbg(in, "no bandwidth request active\n");
+ 		else
+ 			tb_port_warn(in, "failed to read requested bandwidth\n");
+-		goto unlock;
++		goto put_sw;
+ 	}
+ 	requested_bw = ret;
+ 
+@@ -1932,7 +1932,7 @@ static void tb_handle_dp_bandwidth_request(struct work_struct *work)
+ 	tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, NULL);
+ 	if (!tunnel) {
+ 		tb_port_warn(in, "failed to find tunnel\n");
+-		goto unlock;
++		goto put_sw;
+ 	}
+ 
+ 	out = tunnel->dst_port;
+@@ -1959,6 +1959,8 @@ static void tb_handle_dp_bandwidth_request(struct work_struct *work)
+ 		tb_recalc_estimated_bandwidth(tb);
+ 	}
+ 
++put_sw:
++	tb_switch_put(sw);
+ unlock:
+ 	mutex_unlock(&tb->lock);
+ 
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index 7994a4549a6c8..45dcfaadaf98e 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -203,6 +203,9 @@ static void option_instat_callback(struct urb *urb);
+ #define DELL_PRODUCT_5829E_ESIM			0x81e4
+ #define DELL_PRODUCT_5829E			0x81e6
+ 
++#define DELL_PRODUCT_FM101R			0x8213
++#define DELL_PRODUCT_FM101R_ESIM		0x8215
++
+ #define KYOCERA_VENDOR_ID			0x0c88
+ #define KYOCERA_PRODUCT_KPC650			0x17da
+ #define KYOCERA_PRODUCT_KPC680			0x180a
+@@ -1108,6 +1111,8 @@ static const struct usb_device_id option_ids[] = {
+ 	  .driver_info = RSVD(0) | RSVD(6) },
+ 	{ USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5829E_ESIM),
+ 	  .driver_info = RSVD(0) | RSVD(6) },
++	{ USB_DEVICE_INTERFACE_CLASS(DELL_VENDOR_ID, DELL_PRODUCT_FM101R, 0xff) },
++	{ USB_DEVICE_INTERFACE_CLASS(DELL_VENDOR_ID, DELL_PRODUCT_FM101R_ESIM, 0xff) },
+ 	{ USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_E100A) },	/* ADU-E100, ADU-310 */
+ 	{ USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_500A) },
+ 	{ USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_620UW) },
+@@ -1290,6 +1295,7 @@ static const struct usb_device_id option_ids[] = {
+ 	 .driver_info = NCTRL(0) | RSVD(3) },
+ 	{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1033, 0xff),	/* Telit LE910C1-EUX (ECM) */
+ 	 .driver_info = NCTRL(0) },
++	{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1035, 0xff) }, /* Telit LE910C4-WWX (ECM) */
+ 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG0),
+ 	  .driver_info = RSVD(0) | RSVD(1) | NCTRL(2) | RSVD(3) },
+ 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG1),
+@@ -2262,6 +2268,7 @@ static const struct usb_device_id option_ids[] = {
+ 	{ USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1406, 0xff) },			/* GosunCn GM500 ECM/NCM */
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(OPPO_VENDOR_ID, OPPO_PRODUCT_R11, 0xff, 0xff, 0x30) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9191, 0xff, 0xff, 0x30) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9191, 0xff, 0xff, 0x40) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9191, 0xff, 0, 0) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(UNISOC_VENDOR_ID, TOZED_PRODUCT_LT70C, 0xff, 0, 0) },
+ 	{ } /* Terminating entry */
+diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
+index a4cb4b6429870..da519c1b6ad08 100644
+--- a/fs/btrfs/ctree.c
++++ b/fs/btrfs/ctree.c
+@@ -682,18 +682,30 @@ noinline int btrfs_cow_block(struct btrfs_trans_handle *trans,
+ 	u64 search_start;
+ 	int ret;
+ 
+-	if (test_bit(BTRFS_ROOT_DELETING, &root->state))
+-		btrfs_err(fs_info,
+-			"COW'ing blocks on a fs root that's being dropped");
+-
+-	if (trans->transaction != fs_info->running_transaction)
+-		WARN(1, KERN_CRIT "trans %llu running %llu\n",
+-		       trans->transid,
+-		       fs_info->running_transaction->transid);
++	if (unlikely(test_bit(BTRFS_ROOT_DELETING, &root->state))) {
++		btrfs_abort_transaction(trans, -EUCLEAN);
++		btrfs_crit(fs_info,
++		   "attempt to COW block %llu on root %llu that is being deleted",
++			   buf->start, btrfs_root_id(root));
++		return -EUCLEAN;
++	}
+ 
+-	if (trans->transid != fs_info->generation)
+-		WARN(1, KERN_CRIT "trans %llu running %llu\n",
+-		       trans->transid, fs_info->generation);
++	/*
++	 * COWing must happen through a running transaction, which always
++	 * matches the current fs generation (it's a transaction with a state
++	 * less than TRANS_STATE_UNBLOCKED). If it doesn't, then turn the fs
++	 * into error state to prevent the commit of any transaction.
++	 */
++	if (unlikely(trans->transaction != fs_info->running_transaction ||
++		     trans->transid != fs_info->generation)) {
++		btrfs_abort_transaction(trans, -EUCLEAN);
++		btrfs_crit(fs_info,
++"unexpected transaction when attempting to COW block %llu on root %llu, transaction %llu running transaction %llu fs generation %llu",
++			   buf->start, btrfs_root_id(root), trans->transid,
++			   fs_info->running_transaction->transid,
++			   fs_info->generation);
++		return -EUCLEAN;
++	}
+ 
+ 	if (!should_cow_block(trans, root, buf)) {
+ 		*cow_ret = buf;
+@@ -805,8 +817,22 @@ int btrfs_realloc_node(struct btrfs_trans_handle *trans,
+ 	int progress_passed = 0;
+ 	struct btrfs_disk_key disk_key;
+ 
+-	WARN_ON(trans->transaction != fs_info->running_transaction);
+-	WARN_ON(trans->transid != fs_info->generation);
++	/*
++	 * COWing must happen through a running transaction, which always
++	 * matches the current fs generation (it's a transaction with a state
++	 * less than TRANS_STATE_UNBLOCKED). If it doesn't, then turn the fs
++	 * into error state to prevent the commit of any transaction.
++	 */
++	if (unlikely(trans->transaction != fs_info->running_transaction ||
++		     trans->transid != fs_info->generation)) {
++		btrfs_abort_transaction(trans, -EUCLEAN);
++		btrfs_crit(fs_info,
++"unexpected transaction when attempting to reallocate parent %llu for root %llu, transaction %llu running transaction %llu fs generation %llu",
++			   parent->start, btrfs_root_id(root), trans->transid,
++			   fs_info->running_transaction->transid,
++			   fs_info->generation);
++		return -EUCLEAN;
++	}
+ 
+ 	parent_nritems = btrfs_header_nritems(parent);
+ 	blocksize = fs_info->nodesize;
+diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c
+index 6a13cf00218bc..9fe4ccca50a06 100644
+--- a/fs/btrfs/delayed-ref.c
++++ b/fs/btrfs/delayed-ref.c
+@@ -103,24 +103,17 @@ void btrfs_update_delayed_refs_rsv(struct btrfs_trans_handle *trans)
+  * Transfer bytes to our delayed refs rsv.
+  *
+  * @fs_info:   the filesystem
+- * @src:       source block rsv to transfer from
+  * @num_bytes: number of bytes to transfer
+  *
+- * This transfers up to the num_bytes amount from the src rsv to the
++ * This transfers up to the num_bytes amount, previously reserved, to the
+  * delayed_refs_rsv.  Any extra bytes are returned to the space info.
+  */
+ void btrfs_migrate_to_delayed_refs_rsv(struct btrfs_fs_info *fs_info,
+-				       struct btrfs_block_rsv *src,
+ 				       u64 num_bytes)
+ {
+ 	struct btrfs_block_rsv *delayed_refs_rsv = &fs_info->delayed_refs_rsv;
+ 	u64 to_free = 0;
+ 
+-	spin_lock(&src->lock);
+-	src->reserved -= num_bytes;
+-	src->size -= num_bytes;
+-	spin_unlock(&src->lock);
+-
+ 	spin_lock(&delayed_refs_rsv->lock);
+ 	if (delayed_refs_rsv->size > delayed_refs_rsv->reserved) {
+ 		u64 delta = delayed_refs_rsv->size -
+@@ -163,6 +156,8 @@ int btrfs_delayed_refs_rsv_refill(struct btrfs_fs_info *fs_info,
+ 	struct btrfs_block_rsv *block_rsv = &fs_info->delayed_refs_rsv;
+ 	u64 limit = btrfs_calc_delayed_ref_bytes(fs_info, 1);
+ 	u64 num_bytes = 0;
++	u64 refilled_bytes;
++	u64 to_free;
+ 	int ret = -ENOSPC;
+ 
+ 	spin_lock(&block_rsv->lock);
+@@ -178,9 +173,38 @@ int btrfs_delayed_refs_rsv_refill(struct btrfs_fs_info *fs_info,
+ 	ret = btrfs_reserve_metadata_bytes(fs_info, block_rsv, num_bytes, flush);
+ 	if (ret)
+ 		return ret;
+-	btrfs_block_rsv_add_bytes(block_rsv, num_bytes, false);
+-	trace_btrfs_space_reservation(fs_info, "delayed_refs_rsv",
+-				      0, num_bytes, 1);
++
++	/*
++	 * We may have raced with someone else, so check again if we the block
++	 * reserve is still not full and release any excess space.
++	 */
++	spin_lock(&block_rsv->lock);
++	if (block_rsv->reserved < block_rsv->size) {
++		u64 needed = block_rsv->size - block_rsv->reserved;
++
++		if (num_bytes >= needed) {
++			block_rsv->reserved += needed;
++			block_rsv->full = true;
++			to_free = num_bytes - needed;
++			refilled_bytes = needed;
++		} else {
++			block_rsv->reserved += num_bytes;
++			to_free = 0;
++			refilled_bytes = num_bytes;
++		}
++	} else {
++		to_free = num_bytes;
++		refilled_bytes = 0;
++	}
++	spin_unlock(&block_rsv->lock);
++
++	if (to_free > 0)
++		btrfs_space_info_free_bytes_may_use(fs_info, block_rsv->space_info,
++						    to_free);
++
++	if (refilled_bytes > 0)
++		trace_btrfs_space_reservation(fs_info, "delayed_refs_rsv", 0,
++					      refilled_bytes, 1);
+ 	return 0;
+ }
+ 
+diff --git a/fs/btrfs/delayed-ref.h b/fs/btrfs/delayed-ref.h
+index b8e14b0ba5f16..fd9bf2b709c0e 100644
+--- a/fs/btrfs/delayed-ref.h
++++ b/fs/btrfs/delayed-ref.h
+@@ -407,7 +407,6 @@ void btrfs_update_delayed_refs_rsv(struct btrfs_trans_handle *trans);
+ int btrfs_delayed_refs_rsv_refill(struct btrfs_fs_info *fs_info,
+ 				  enum btrfs_reserve_flush_enum flush);
+ void btrfs_migrate_to_delayed_refs_rsv(struct btrfs_fs_info *fs_info,
+-				       struct btrfs_block_rsv *src,
+ 				       u64 num_bytes);
+ bool btrfs_check_space_for_delayed_refs(struct btrfs_fs_info *fs_info);
+ 
+diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
+index 0917c5f39e3d0..2cf8d646085c2 100644
+--- a/fs/btrfs/extent-tree.c
++++ b/fs/btrfs/extent-tree.c
+@@ -1715,12 +1715,12 @@ static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
+ 		parent = ref->parent;
+ 	ref_root = ref->root;
+ 
+-	if (node->ref_mod != 1) {
++	if (unlikely(node->ref_mod != 1)) {
+ 		btrfs_err(trans->fs_info,
+-	"btree block(%llu) has %d references rather than 1: action %d ref_root %llu parent %llu",
++	"btree block %llu has %d references rather than 1: action %d ref_root %llu parent %llu",
+ 			  node->bytenr, node->ref_mod, node->action, ref_root,
+ 			  parent);
+-		return -EIO;
++		return -EUCLEAN;
+ 	}
+ 	if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
+ 		BUG_ON(!extent_op || !extent_op->update_flags);
+diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
+index d27b0d86b8e2c..bf35b6fce8f07 100644
+--- a/fs/btrfs/ioctl.c
++++ b/fs/btrfs/ioctl.c
+@@ -2978,7 +2978,7 @@ static void get_block_group_info(struct list_head *groups_list,
+ static long btrfs_ioctl_space_info(struct btrfs_fs_info *fs_info,
+ 				   void __user *arg)
+ {
+-	struct btrfs_ioctl_space_args space_args;
++	struct btrfs_ioctl_space_args space_args = { 0 };
+ 	struct btrfs_ioctl_space_info space;
+ 	struct btrfs_ioctl_space_info *dest;
+ 	struct btrfs_ioctl_space_info *dest_orig;
+@@ -4338,7 +4338,7 @@ static int _btrfs_ioctl_send(struct inode *inode, void __user *argp, bool compat
+ 
+ 	if (compat) {
+ #if defined(CONFIG_64BIT) && defined(CONFIG_COMPAT)
+-		struct btrfs_ioctl_send_args_32 args32;
++		struct btrfs_ioctl_send_args_32 args32 = { 0 };
+ 
+ 		ret = copy_from_user(&args32, argp, sizeof(args32));
+ 		if (ret)
+diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
+index 5bbd288b9cb54..0554ca2a8e3ba 100644
+--- a/fs/btrfs/transaction.c
++++ b/fs/btrfs/transaction.c
+@@ -625,14 +625,14 @@ start_transaction(struct btrfs_root *root, unsigned int num_items,
+ 			reloc_reserved = true;
+ 		}
+ 
+-		ret = btrfs_block_rsv_add(fs_info, rsv, num_bytes, flush);
++		ret = btrfs_reserve_metadata_bytes(fs_info, rsv, num_bytes, flush);
+ 		if (ret)
+ 			goto reserve_fail;
+ 		if (delayed_refs_bytes) {
+-			btrfs_migrate_to_delayed_refs_rsv(fs_info, rsv,
+-							  delayed_refs_bytes);
++			btrfs_migrate_to_delayed_refs_rsv(fs_info, delayed_refs_bytes);
+ 			num_bytes -= delayed_refs_bytes;
+ 		}
++		btrfs_block_rsv_add_bytes(rsv, num_bytes, true);
+ 
+ 		if (rsv->space_info->force_alloc)
+ 			do_chunk_alloc = true;
+diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
+index 365a1cc0a3c35..a00e7a0bc713d 100644
+--- a/fs/btrfs/tree-log.c
++++ b/fs/btrfs/tree-log.c
+@@ -4722,7 +4722,7 @@ static int btrfs_log_prealloc_extents(struct btrfs_trans_handle *trans,
+ 	struct extent_buffer *leaf;
+ 	int slot;
+ 	int ins_nr = 0;
+-	int start_slot;
++	int start_slot = 0;
+ 	int ret;
+ 
+ 	if (!(inode->flags & BTRFS_INODE_PREALLOC))
+diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
+index 51070c0d4141e..5019e9244d2d2 100644
+--- a/fs/btrfs/volumes.c
++++ b/fs/btrfs/volumes.c
+@@ -5125,7 +5125,7 @@ static void init_alloc_chunk_ctl_policy_regular(
+ 	ASSERT(space_info);
+ 
+ 	ctl->max_chunk_size = READ_ONCE(space_info->chunk_size);
+-	ctl->max_stripe_size = ctl->max_chunk_size;
++	ctl->max_stripe_size = min_t(u64, ctl->max_chunk_size, SZ_1G);
+ 
+ 	if (ctl->type & BTRFS_BLOCK_GROUP_SYSTEM)
+ 		ctl->devs_max = min_t(int, ctl->devs_max, BTRFS_MAX_DEVS_SYS_CHUNK);
+diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
+index aca4b48113945..d532a93e980d7 100644
+--- a/fs/fs-writeback.c
++++ b/fs/fs-writeback.c
+@@ -1535,10 +1535,15 @@ static void requeue_inode(struct inode *inode, struct bdi_writeback *wb,
+ 
+ 	if (wbc->pages_skipped) {
+ 		/*
+-		 * writeback is not making progress due to locked
+-		 * buffers. Skip this inode for now.
++		 * Writeback is not making progress due to locked buffers.
++		 * Skip this inode for now. Although having skipped pages
++		 * is odd for clean inodes, it can happen for some
++		 * filesystems so handle that gracefully.
+ 		 */
+-		redirty_tail_locked(inode, wb);
++		if (inode->i_state & I_DIRTY_ALL)
++			redirty_tail_locked(inode, wb);
++		else
++			inode_cgwb_move_to_attached(inode, wb);
+ 		return;
+ 	}
+ 
+diff --git a/fs/namei.c b/fs/namei.c
+index 567ee547492bc..94565bd7e73f6 100644
+--- a/fs/namei.c
++++ b/fs/namei.c
+@@ -188,7 +188,7 @@ getname_flags(const char __user *filename, int flags, int *empty)
+ 		}
+ 	}
+ 
+-	result->refcnt = 1;
++	atomic_set(&result->refcnt, 1);
+ 	/* The empty path is special. */
+ 	if (unlikely(!len)) {
+ 		if (empty)
+@@ -249,7 +249,7 @@ getname_kernel(const char * filename)
+ 	memcpy((char *)result->name, filename, len);
+ 	result->uptr = NULL;
+ 	result->aname = NULL;
+-	result->refcnt = 1;
++	atomic_set(&result->refcnt, 1);
+ 	audit_getname(result);
+ 
+ 	return result;
+@@ -261,9 +261,10 @@ void putname(struct filename *name)
+ 	if (IS_ERR(name))
+ 		return;
+ 
+-	BUG_ON(name->refcnt <= 0);
++	if (WARN_ON_ONCE(!atomic_read(&name->refcnt)))
++		return;
+ 
+-	if (--name->refcnt > 0)
++	if (!atomic_dec_and_test(&name->refcnt))
+ 		return;
+ 
+ 	if (name->name != name->iname) {
+diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c
+index a1dc338649062..ef817a0475ffa 100644
+--- a/fs/nfs/flexfilelayout/flexfilelayout.c
++++ b/fs/nfs/flexfilelayout/flexfilelayout.c
+@@ -2520,9 +2520,9 @@ ff_layout_mirror_prepare_stats(struct pnfs_layout_hdr *lo,
+ 	return i;
+ }
+ 
+-static int
+-ff_layout_prepare_layoutstats(struct nfs42_layoutstat_args *args)
++static int ff_layout_prepare_layoutstats(struct nfs42_layoutstat_args *args)
+ {
++	struct pnfs_layout_hdr *lo;
+ 	struct nfs4_flexfile_layout *ff_layout;
+ 	const int dev_count = PNFS_LAYOUTSTATS_MAXDEV;
+ 
+@@ -2533,11 +2533,14 @@ ff_layout_prepare_layoutstats(struct nfs42_layoutstat_args *args)
+ 		return -ENOMEM;
+ 
+ 	spin_lock(&args->inode->i_lock);
+-	ff_layout = FF_LAYOUT_FROM_HDR(NFS_I(args->inode)->layout);
+-	args->num_dev = ff_layout_mirror_prepare_stats(&ff_layout->generic_hdr,
+-						       &args->devinfo[0],
+-						       dev_count,
+-						       NFS4_FF_OP_LAYOUTSTATS);
++	lo = NFS_I(args->inode)->layout;
++	if (lo && pnfs_layout_is_valid(lo)) {
++		ff_layout = FF_LAYOUT_FROM_HDR(lo);
++		args->num_dev = ff_layout_mirror_prepare_stats(
++			&ff_layout->generic_hdr, &args->devinfo[0], dev_count,
++			NFS4_FF_OP_LAYOUTSTATS);
++	} else
++		args->num_dev = 0;
+ 	spin_unlock(&args->inode->i_lock);
+ 	if (!args->num_dev) {
+ 		kfree(args->devinfo);
+diff --git a/fs/nfs/nfs42proc.c b/fs/nfs/nfs42proc.c
+index 063e00aff87ed..28704f924612c 100644
+--- a/fs/nfs/nfs42proc.c
++++ b/fs/nfs/nfs42proc.c
+@@ -81,7 +81,8 @@ static int _nfs42_proc_fallocate(struct rpc_message *msg, struct file *filep,
+ 	if (status == 0) {
+ 		if (nfs_should_remove_suid(inode)) {
+ 			spin_lock(&inode->i_lock);
+-			nfs_set_cache_invalid(inode, NFS_INO_INVALID_MODE);
++			nfs_set_cache_invalid(inode,
++				NFS_INO_REVAL_FORCED | NFS_INO_INVALID_MODE);
+ 			spin_unlock(&inode->i_lock);
+ 		}
+ 		status = nfs_post_op_update_inode_force_wcc(inode,
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index 51029e4b60f56..5f088e3eeca1d 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -8870,8 +8870,6 @@ static int _nfs4_proc_exchange_id(struct nfs_client *clp, const struct cred *cre
+ 	/* Save the EXCHANGE_ID verifier session trunk tests */
+ 	memcpy(clp->cl_confirm.data, argp->verifier.data,
+ 	       sizeof(clp->cl_confirm.data));
+-	if (resp->flags & EXCHGID4_FLAG_USE_PNFS_DS)
+-		set_bit(NFS_CS_DS, &clp->cl_flags);
+ out:
+ 	trace_nfs4_exchange_id(clp, status);
+ 	rpc_put_task(task);
+diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
+index 306cba0b9e69a..84343aefbbd64 100644
+--- a/fs/nfs/pnfs.c
++++ b/fs/nfs/pnfs.c
+@@ -2634,31 +2634,44 @@ pnfs_should_return_unused_layout(struct pnfs_layout_hdr *lo,
+ 	return mode == 0;
+ }
+ 
+-static int
+-pnfs_layout_return_unused_byserver(struct nfs_server *server, void *data)
++static int pnfs_layout_return_unused_byserver(struct nfs_server *server,
++					      void *data)
+ {
+ 	const struct pnfs_layout_range *range = data;
++	const struct cred *cred;
+ 	struct pnfs_layout_hdr *lo;
+ 	struct inode *inode;
++	nfs4_stateid stateid;
++	enum pnfs_iomode iomode;
++
+ restart:
+ 	rcu_read_lock();
+ 	list_for_each_entry_rcu(lo, &server->layouts, plh_layouts) {
+-		if (!pnfs_layout_can_be_returned(lo) ||
++		inode = lo->plh_inode;
++		if (!inode || !pnfs_layout_can_be_returned(lo) ||
+ 		    test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags))
+ 			continue;
+-		inode = lo->plh_inode;
+ 		spin_lock(&inode->i_lock);
+-		if (!pnfs_should_return_unused_layout(lo, range)) {
++		if (!lo->plh_inode ||
++		    !pnfs_should_return_unused_layout(lo, range)) {
+ 			spin_unlock(&inode->i_lock);
+ 			continue;
+ 		}
++		pnfs_get_layout_hdr(lo);
++		pnfs_set_plh_return_info(lo, range->iomode, 0);
++		if (pnfs_mark_matching_lsegs_return(lo, &lo->plh_return_segs,
++						    range, 0) != 0 ||
++		    !pnfs_prepare_layoutreturn(lo, &stateid, &cred, &iomode)) {
++			spin_unlock(&inode->i_lock);
++			rcu_read_unlock();
++			pnfs_put_layout_hdr(lo);
++			cond_resched();
++			goto restart;
++		}
+ 		spin_unlock(&inode->i_lock);
+-		inode = pnfs_grab_inode_layout_hdr(lo);
+-		if (!inode)
+-			continue;
+ 		rcu_read_unlock();
+-		pnfs_mark_layout_for_return(inode, range);
+-		iput(inode);
++		pnfs_send_layoutreturn(lo, &stateid, &cred, iomode, false);
++		pnfs_put_layout_hdr(lo);
+ 		cond_resched();
+ 		goto restart;
+ 	}
+diff --git a/fs/nfs/write.c b/fs/nfs/write.c
+index 8c1ee1a1a28f1..9d82d50ce0b12 100644
+--- a/fs/nfs/write.c
++++ b/fs/nfs/write.c
+@@ -788,6 +788,8 @@ static void nfs_inode_add_request(struct nfs_page *req)
+  */
+ static void nfs_inode_remove_request(struct nfs_page *req)
+ {
++	struct nfs_inode *nfsi = NFS_I(nfs_page_to_inode(req));
++
+ 	if (nfs_page_group_sync_on_bit(req, PG_REMOVE)) {
+ 		struct folio *folio = nfs_page_to_folio(req->wb_head);
+ 		struct address_space *mapping = folio_file_mapping(folio);
+@@ -802,8 +804,8 @@ static void nfs_inode_remove_request(struct nfs_page *req)
+ 	}
+ 
+ 	if (test_and_clear_bit(PG_INODE_REF, &req->wb_flags)) {
++		atomic_long_dec(&nfsi->nrequests);
+ 		nfs_release_request(req);
+-		atomic_long_dec(&NFS_I(nfs_page_to_inode(req))->nrequests);
+ 	}
+ }
+ 
+diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
+index f69c451018e33..62fe0b679e586 100644
+--- a/fs/notify/fanotify/fanotify_user.c
++++ b/fs/notify/fanotify/fanotify_user.c
+@@ -1585,16 +1585,25 @@ static int fanotify_test_fsid(struct dentry *dentry, __kernel_fsid_t *fsid)
+ }
+ 
+ /* Check if filesystem can encode a unique fid */
+-static int fanotify_test_fid(struct dentry *dentry)
++static int fanotify_test_fid(struct dentry *dentry, unsigned int flags)
+ {
++	unsigned int mark_type = flags & FANOTIFY_MARK_TYPE_BITS;
++	const struct export_operations *nop = dentry->d_sb->s_export_op;
++
++	/*
++	 * We need to make sure that the filesystem supports encoding of
++	 * file handles so user can use name_to_handle_at() to compare fids
++	 * reported with events to the file handle of watched objects.
++	 */
++	if (!nop)
++		return -EOPNOTSUPP;
++
+ 	/*
+-	 * We need to make sure that the file system supports at least
+-	 * encoding a file handle so user can use name_to_handle_at() to
+-	 * compare fid returned with event to the file handle of watched
+-	 * objects. However, even the relaxed AT_HANDLE_FID flag requires
+-	 * at least empty export_operations for ecoding unique file ids.
++	 * For sb/mount mark, we also need to make sure that the filesystem
++	 * supports decoding file handles, so user has a way to map back the
++	 * reported fids to filesystem objects.
+ 	 */
+-	if (!dentry->d_sb->s_export_op)
++	if (mark_type != FAN_MARK_INODE && !nop->fh_to_dentry)
+ 		return -EOPNOTSUPP;
+ 
+ 	return 0;
+@@ -1812,7 +1821,7 @@ static int do_fanotify_mark(int fanotify_fd, unsigned int flags, __u64 mask,
+ 		if (ret)
+ 			goto path_put_and_out;
+ 
+-		ret = fanotify_test_fid(path.dentry);
++		ret = fanotify_test_fid(path.dentry, flags);
+ 		if (ret)
+ 			goto path_put_and_out;
+ 
+diff --git a/fs/ntfs3/fsntfs.c b/fs/ntfs3/fsntfs.c
+index 33afee0f55593..9ddb2ab23b954 100644
+--- a/fs/ntfs3/fsntfs.c
++++ b/fs/ntfs3/fsntfs.c
+@@ -2461,10 +2461,12 @@ void mark_as_free_ex(struct ntfs_sb_info *sbi, CLST lcn, CLST len, bool trim)
+ {
+ 	CLST end, i, zone_len, zlen;
+ 	struct wnd_bitmap *wnd = &sbi->used.bitmap;
++	bool dirty = false;
+ 
+ 	down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_CLUSTERS);
+ 	if (!wnd_is_used(wnd, lcn, len)) {
+-		ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
++		/* mark volume as dirty out of wnd->rw_lock */
++		dirty = true;
+ 
+ 		end = lcn + len;
+ 		len = 0;
+@@ -2518,6 +2520,8 @@ void mark_as_free_ex(struct ntfs_sb_info *sbi, CLST lcn, CLST len, bool trim)
+ 
+ out:
+ 	up_write(&wnd->rw_lock);
++	if (dirty)
++		ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
+ }
+ 
+ /*
+diff --git a/fs/ntfs3/index.c b/fs/ntfs3/index.c
+index 124c6e822623f..cf92b2433f7a7 100644
+--- a/fs/ntfs3/index.c
++++ b/fs/ntfs3/index.c
+@@ -729,6 +729,9 @@ static struct NTFS_DE *hdr_find_e(const struct ntfs_index *indx,
+ 	u32 total = le32_to_cpu(hdr->total);
+ 	u16 offs[128];
+ 
++	if (unlikely(!cmp))
++		return NULL;
++
+ fill_table:
+ 	if (end > total)
+ 		return NULL;
+diff --git a/fs/ntfs3/ntfs_fs.h b/fs/ntfs3/ntfs_fs.h
+index 629403ede6e5f..788567d71d939 100644
+--- a/fs/ntfs3/ntfs_fs.h
++++ b/fs/ntfs3/ntfs_fs.h
+@@ -42,9 +42,11 @@ enum utf16_endian;
+ #define MINUS_ONE_T			((size_t)(-1))
+ /* Biggest MFT / smallest cluster */
+ #define MAXIMUM_BYTES_PER_MFT		4096
++#define MAXIMUM_SHIFT_BYTES_PER_MFT	12
+ #define NTFS_BLOCKS_PER_MFT_RECORD	(MAXIMUM_BYTES_PER_MFT / 512)
+ 
+ #define MAXIMUM_BYTES_PER_INDEX		4096
++#define MAXIMUM_SHIFT_BYTES_PER_INDEX	12
+ #define NTFS_BLOCKS_PER_INODE		(MAXIMUM_BYTES_PER_INDEX / 512)
+ 
+ /* NTFS specific error code when fixup failed. */
+diff --git a/fs/ntfs3/super.c b/fs/ntfs3/super.c
+index 1a02072b6b0e1..d6b5170253a69 100644
+--- a/fs/ntfs3/super.c
++++ b/fs/ntfs3/super.c
+@@ -855,6 +855,11 @@ static int ntfs_init_from_boot(struct super_block *sb, u32 sector_size,
+ 
+ check_boot:
+ 	err = -EINVAL;
++
++	/* Corrupted image; do not read OOB */
++	if (bh->b_size - sizeof(*boot) < boot_off)
++		goto out;
++
+ 	boot = (struct NTFS_BOOT *)Add2Ptr(bh->b_data, boot_off);
+ 
+ 	if (memcmp(boot->system_id, "NTFS    ", sizeof("NTFS    ") - 1)) {
+@@ -901,9 +906,17 @@ check_boot:
+ 		goto out;
+ 	}
+ 
+-	sbi->record_size = record_size =
+-		boot->record_size < 0 ? 1 << (-boot->record_size) :
+-					(u32)boot->record_size << cluster_bits;
++	if (boot->record_size >= 0) {
++		record_size = (u32)boot->record_size << cluster_bits;
++	} else if (-boot->record_size <= MAXIMUM_SHIFT_BYTES_PER_MFT) {
++		record_size = 1u << (-boot->record_size);
++	} else {
++		ntfs_err(sb, "%s: invalid record size %d.", hint,
++			 boot->record_size);
++		goto out;
++	}
++
++	sbi->record_size = record_size;
+ 	sbi->record_bits = blksize_bits(record_size);
+ 	sbi->attr_size_tr = (5 * record_size >> 4); // ~320 bytes
+ 
+@@ -920,9 +933,15 @@ check_boot:
+ 		goto out;
+ 	}
+ 
+-	sbi->index_size = boot->index_size < 0 ?
+-				  1u << (-boot->index_size) :
+-				  (u32)boot->index_size << cluster_bits;
++	if (boot->index_size >= 0) {
++		sbi->index_size = (u32)boot->index_size << cluster_bits;
++	} else if (-boot->index_size <= MAXIMUM_SHIFT_BYTES_PER_INDEX) {
++		sbi->index_size = 1u << (-boot->index_size);
++	} else {
++		ntfs_err(sb, "%s: invalid index size %d.", hint,
++			 boot->index_size);
++		goto out;
++	}
+ 
+ 	/* Check index record size. */
+ 	if (sbi->index_size < SECTOR_SIZE || !is_power_of_2(sbi->index_size)) {
+diff --git a/fs/ntfs3/xattr.c b/fs/ntfs3/xattr.c
+index 023f314e89501..c59d6f5a725a9 100644
+--- a/fs/ntfs3/xattr.c
++++ b/fs/ntfs3/xattr.c
+@@ -211,7 +211,8 @@ static ssize_t ntfs_list_ea(struct ntfs_inode *ni, char *buffer,
+ 	size = le32_to_cpu(info->size);
+ 
+ 	/* Enumerate all xattrs. */
+-	for (ret = 0, off = 0; off < size; off += ea_size) {
++	ret = 0;
++	for (off = 0; off + sizeof(struct EA_FULL) < size; off += ea_size) {
+ 		ea = Add2Ptr(ea_all, off);
+ 		ea_size = unpacked_ea_size(ea);
+ 
+@@ -219,6 +220,10 @@ static ssize_t ntfs_list_ea(struct ntfs_inode *ni, char *buffer,
+ 			break;
+ 
+ 		if (buffer) {
++			/* Check if we can use field ea->name */
++			if (off + ea_size > size)
++				break;
++
+ 			if (ret + ea->name_len + 1 > bytes_per_buffer) {
+ 				err = -ERANGE;
+ 				goto out;
+diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c
+index 986d37a4c2750..ab32c6b28d400 100644
+--- a/fs/overlayfs/copy_up.c
++++ b/fs/overlayfs/copy_up.c
+@@ -337,7 +337,7 @@ static int ovl_set_timestamps(struct ovl_fs *ofs, struct dentry *upperdentry,
+ {
+ 	struct iattr attr = {
+ 		.ia_valid =
+-		     ATTR_ATIME | ATTR_MTIME | ATTR_ATIME_SET | ATTR_MTIME_SET,
++		     ATTR_ATIME | ATTR_MTIME | ATTR_ATIME_SET | ATTR_MTIME_SET | ATTR_CTIME,
+ 		.ia_atime = stat->atime,
+ 		.ia_mtime = stat->mtime,
+ 	};
+diff --git a/include/linux/fs.h b/include/linux/fs.h
+index 562f2623c9c9f..87a21a18d114a 100644
+--- a/include/linux/fs.h
++++ b/include/linux/fs.h
+@@ -2318,7 +2318,7 @@ struct audit_names;
+ struct filename {
+ 	const char		*name;	/* pointer to actual string */
+ 	const __user char	*uptr;	/* original userland pointer */
+-	int			refcnt;
++	atomic_t		refcnt;
+ 	struct audit_names	*aname;
+ 	const char		iname[];
+ };
+diff --git a/include/linux/mtd/jedec.h b/include/linux/mtd/jedec.h
+index 0b6b59f7cfbdc..56047a4e54c9c 100644
+--- a/include/linux/mtd/jedec.h
++++ b/include/linux/mtd/jedec.h
+@@ -21,6 +21,9 @@ struct jedec_ecc_info {
+ /* JEDEC features */
+ #define JEDEC_FEATURE_16_BIT_BUS	(1 << 0)
+ 
++/* JEDEC Optional Commands */
++#define JEDEC_OPT_CMD_READ_CACHE	BIT(1)
++
+ struct nand_jedec_params {
+ 	/* rev info and features block */
+ 	/* 'J' 'E' 'S' 'D'  */
+diff --git a/include/linux/mtd/onfi.h b/include/linux/mtd/onfi.h
+index a7376f9beddfd..55ab2e4d62f94 100644
+--- a/include/linux/mtd/onfi.h
++++ b/include/linux/mtd/onfi.h
+@@ -55,6 +55,7 @@
+ #define ONFI_SUBFEATURE_PARAM_LEN	4
+ 
+ /* ONFI optional commands SET/GET FEATURES supported? */
++#define ONFI_OPT_CMD_READ_CACHE		BIT(1)
+ #define ONFI_OPT_CMD_SET_GET_FEATURES	BIT(2)
+ 
+ struct nand_onfi_params {
+diff --git a/include/linux/mtd/rawnand.h b/include/linux/mtd/rawnand.h
+index 5159d692f9ce5..e8864c4f3921d 100644
+--- a/include/linux/mtd/rawnand.h
++++ b/include/linux/mtd/rawnand.h
+@@ -225,6 +225,7 @@ struct gpio_desc;
+  * struct nand_parameters - NAND generic parameters from the parameter page
+  * @model: Model name
+  * @supports_set_get_features: The NAND chip supports setting/getting features
++ * @supports_read_cache: The NAND chip supports read cache operations
+  * @set_feature_list: Bitmap of features that can be set
+  * @get_feature_list: Bitmap of features that can be get
+  * @onfi: ONFI specific parameters
+@@ -233,6 +234,7 @@ struct nand_parameters {
+ 	/* Generic parameters */
+ 	const char *model;
+ 	bool supports_set_get_features;
++	bool supports_read_cache;
+ 	DECLARE_BITMAP(set_feature_list, ONFI_FEATURE_NUMBER);
+ 	DECLARE_BITMAP(get_feature_list, ONFI_FEATURE_NUMBER);
+ 
+diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
+index e657916c9509c..227e9d45f61b6 100644
+--- a/include/linux/perf_event.h
++++ b/include/linux/perf_event.h
+@@ -704,6 +704,7 @@ struct perf_event {
+ 	/* The cumulative AND of all event_caps for events in this group. */
+ 	int				group_caps;
+ 
++	unsigned int			group_generation;
+ 	struct perf_event		*group_leader;
+ 	/*
+ 	 * event->pmu will always point to pmu in which this event belongs.
+diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h
+index 7b4dd69555e49..27cc1d4643219 100644
+--- a/include/linux/virtio_net.h
++++ b/include/linux/virtio_net.h
+@@ -3,8 +3,8 @@
+ #define _LINUX_VIRTIO_NET_H
+ 
+ #include <linux/if_vlan.h>
++#include <linux/udp.h>
+ #include <uapi/linux/tcp.h>
+-#include <uapi/linux/udp.h>
+ #include <uapi/linux/virtio_net.h>
+ 
+ static inline bool virtio_net_hdr_match_proto(__be16 protocol, __u8 gso_type)
+@@ -151,9 +151,22 @@ retry:
+ 		unsigned int nh_off = p_off;
+ 		struct skb_shared_info *shinfo = skb_shinfo(skb);
+ 
+-		/* UFO may not include transport header in gso_size. */
+-		if (gso_type & SKB_GSO_UDP)
++		switch (gso_type & ~SKB_GSO_TCP_ECN) {
++		case SKB_GSO_UDP:
++			/* UFO may not include transport header in gso_size. */
+ 			nh_off -= thlen;
++			break;
++		case SKB_GSO_UDP_L4:
++			if (!(hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM))
++				return -EINVAL;
++			if (skb->csum_offset != offsetof(struct udphdr, check))
++				return -EINVAL;
++			if (skb->len - p_off > gso_size * UDP_MAX_SEGMENTS)
++				return -EINVAL;
++			if (gso_type != SKB_GSO_UDP_L4)
++				return -EINVAL;
++			break;
++		}
+ 
+ 		/* Kernel has a special handling for GSO_BY_FRAGS. */
+ 		if (gso_size == GSO_BY_FRAGS)
+diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
+index c0a87558aea71..abb7cb5db9457 100644
+--- a/include/net/bluetooth/hci_core.h
++++ b/include/net/bluetooth/hci_core.h
+@@ -350,7 +350,7 @@ struct hci_dev {
+ 	struct list_head list;
+ 	struct mutex	lock;
+ 
+-	char		name[8];
++	const char	*name;
+ 	unsigned long	flags;
+ 	__u16		id;
+ 	__u8		bus;
+@@ -1426,6 +1426,7 @@ int hci_conn_switch_role(struct hci_conn *conn, __u8 role);
+ void hci_conn_enter_active_mode(struct hci_conn *conn, __u8 force_active);
+ 
+ void hci_conn_failed(struct hci_conn *conn, u8 status);
++u8 hci_conn_set_handle(struct hci_conn *conn, u16 handle);
+ 
+ /*
+  * hci_conn_get() and hci_conn_put() are used to control the life-time of an
+diff --git a/include/net/bluetooth/hci_mon.h b/include/net/bluetooth/hci_mon.h
+index 2d5fcda1bcd05..082f89531b889 100644
+--- a/include/net/bluetooth/hci_mon.h
++++ b/include/net/bluetooth/hci_mon.h
+@@ -56,7 +56,7 @@ struct hci_mon_new_index {
+ 	__u8		type;
+ 	__u8		bus;
+ 	bdaddr_t	bdaddr;
+-	char		name[8];
++	char		name[8] __nonstring;
+ } __packed;
+ #define HCI_MON_NEW_INDEX_SIZE 16
+ 
+diff --git a/include/net/bluetooth/hci_sync.h b/include/net/bluetooth/hci_sync.h
+index b516a0f4a55b8..57eeb07aeb251 100644
+--- a/include/net/bluetooth/hci_sync.h
++++ b/include/net/bluetooth/hci_sync.h
+@@ -5,6 +5,9 @@
+  * Copyright (C) 2021 Intel Corporation
+  */
+ 
++#define UINT_PTR(_handle)		((void *)((uintptr_t)_handle))
++#define PTR_UINT(_ptr)			((uintptr_t)((void *)_ptr))
++
+ typedef int (*hci_cmd_sync_work_func_t)(struct hci_dev *hdev, void *data);
+ typedef void (*hci_cmd_sync_work_destroy_t)(struct hci_dev *hdev, void *data,
+ 					    int err);
+diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
+index f0c13864180e2..15de07d365405 100644
+--- a/include/net/ip_fib.h
++++ b/include/net/ip_fib.h
+@@ -154,6 +154,7 @@ struct fib_info {
+ 	int			fib_nhs;
+ 	bool			fib_nh_is_v6;
+ 	bool			nh_updated;
++	bool			pfsrc_removed;
+ 	struct nexthop		*nh;
+ 	struct rcu_head		rcu;
+ 	struct fib_nh		fib_nh[];
+diff --git a/include/net/netns/xfrm.h b/include/net/netns/xfrm.h
+index bd7c3be4af5d7..423b52eca908d 100644
+--- a/include/net/netns/xfrm.h
++++ b/include/net/netns/xfrm.h
+@@ -50,6 +50,7 @@ struct netns_xfrm {
+ 	struct list_head	policy_all;
+ 	struct hlist_head	*policy_byidx;
+ 	unsigned int		policy_idx_hmask;
++	unsigned int		idx_generator;
+ 	struct hlist_head	policy_inexact[XFRM_POLICY_MAX];
+ 	struct xfrm_policy_hash	policy_bydst[XFRM_POLICY_MAX];
+ 	unsigned int		policy_count[XFRM_POLICY_MAX * 2];
+diff --git a/include/net/sock.h b/include/net/sock.h
+index 4e787285fc66b..fc189910e63fc 100644
+--- a/include/net/sock.h
++++ b/include/net/sock.h
+@@ -336,7 +336,7 @@ struct sk_filter;
+   *	@sk_cgrp_data: cgroup data for this cgroup
+   *	@sk_memcg: this socket's memory cgroup association
+   *	@sk_write_pending: a write to stream socket waits to start
+-  *	@sk_wait_pending: number of threads blocked on this socket
++  *	@sk_disconnects: number of disconnect operations performed on this sock
+   *	@sk_state_change: callback to indicate change in the state of the sock
+   *	@sk_data_ready: callback to indicate there is data to be processed
+   *	@sk_write_space: callback to indicate there is bf sending space available
+@@ -429,7 +429,7 @@ struct sock {
+ 	unsigned int		sk_napi_id;
+ #endif
+ 	int			sk_rcvbuf;
+-	int			sk_wait_pending;
++	int			sk_disconnects;
+ 
+ 	struct sk_filter __rcu	*sk_filter;
+ 	union {
+@@ -1189,8 +1189,7 @@ static inline void sock_rps_reset_rxhash(struct sock *sk)
+ }
+ 
+ #define sk_wait_event(__sk, __timeo, __condition, __wait)		\
+-	({	int __rc;						\
+-		__sk->sk_wait_pending++;				\
++	({	int __rc, __dis = __sk->sk_disconnects;			\
+ 		release_sock(__sk);					\
+ 		__rc = __condition;					\
+ 		if (!__rc) {						\
+@@ -1200,8 +1199,7 @@ static inline void sock_rps_reset_rxhash(struct sock *sk)
+ 		}							\
+ 		sched_annotate_sleep();					\
+ 		lock_sock(__sk);					\
+-		__sk->sk_wait_pending--;				\
+-		__rc = __condition;					\
++		__rc = __dis == __sk->sk_disconnects ? __condition : -EPIPE; \
+ 		__rc;							\
+ 	})
+ 
+diff --git a/include/net/tcp.h b/include/net/tcp.h
+index b1b1e01c69839..7f684806c2912 100644
+--- a/include/net/tcp.h
++++ b/include/net/tcp.h
+@@ -142,6 +142,9 @@ void tcp_time_wait(struct sock *sk, int state, int timeo);
+ #define TCP_RTO_MAX	((unsigned)(120*HZ))
+ #define TCP_RTO_MIN	((unsigned)(HZ/5))
+ #define TCP_TIMEOUT_MIN	(2U) /* Min timeout for TCP timers in jiffies */
++
++#define TCP_TIMEOUT_MIN_US (2*USEC_PER_MSEC) /* Min TCP timeout in microsecs */
++
+ #define TCP_TIMEOUT_INIT ((unsigned)(1*HZ))	/* RFC6298 2.1 initial RTO value	*/
+ #define TCP_TIMEOUT_FALLBACK ((unsigned)(3*HZ))	/* RFC 1122 initial RTO value, now
+ 						 * used as a fallback RTO for the
+diff --git a/include/trace/events/neigh.h b/include/trace/events/neigh.h
+index 5eaa1fa991715..833143d0992e0 100644
+--- a/include/trace/events/neigh.h
++++ b/include/trace/events/neigh.h
+@@ -39,7 +39,6 @@ TRACE_EVENT(neigh_create,
+ 	),
+ 
+ 	TP_fast_assign(
+-		struct in6_addr *pin6;
+ 		__be32 *p32;
+ 
+ 		__entry->family = tbl->family;
+@@ -47,7 +46,6 @@ TRACE_EVENT(neigh_create,
+ 		__entry->entries = atomic_read(&tbl->gc_entries);
+ 		__entry->created = n != NULL;
+ 		__entry->gc_exempt = exempt_from_gc;
+-		pin6 = (struct in6_addr *)__entry->primary_key6;
+ 		p32 = (__be32 *)__entry->primary_key4;
+ 
+ 		if (tbl->family == AF_INET)
+@@ -57,6 +55,8 @@ TRACE_EVENT(neigh_create,
+ 
+ #if IS_ENABLED(CONFIG_IPV6)
+ 		if (tbl->family == AF_INET6) {
++			struct in6_addr *pin6;
++
+ 			pin6 = (struct in6_addr *)__entry->primary_key6;
+ 			*pin6 = *(struct in6_addr *)pkey;
+ 		}
+diff --git a/io_uring/io-wq.c b/io_uring/io-wq.c
+index 2c03bc881edfd..fbab9b2727fde 100644
+--- a/io_uring/io-wq.c
++++ b/io_uring/io-wq.c
+@@ -1130,9 +1130,6 @@ struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data)
+ 	wq = kzalloc(sizeof(struct io_wq), GFP_KERNEL);
+ 	if (!wq)
+ 		return ERR_PTR(-ENOMEM);
+-	ret = cpuhp_state_add_instance_nocalls(io_wq_online, &wq->cpuhp_node);
+-	if (ret)
+-		goto err_wq;
+ 
+ 	refcount_inc(&data->hash->refs);
+ 	wq->hash = data->hash;
+@@ -1165,13 +1162,14 @@ struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data)
+ 	wq->task = get_task_struct(data->task);
+ 	atomic_set(&wq->worker_refs, 1);
+ 	init_completion(&wq->worker_done);
++	ret = cpuhp_state_add_instance_nocalls(io_wq_online, &wq->cpuhp_node);
++	if (ret)
++		goto err;
++
+ 	return wq;
+ err:
+ 	io_wq_put_hash(data->hash);
+-	cpuhp_state_remove_instance_nocalls(io_wq_online, &wq->cpuhp_node);
+-
+ 	free_cpumask_var(wq->cpu_mask);
+-err_wq:
+ 	kfree(wq);
+ 	return ERR_PTR(ret);
+ }
+diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
+index 9736416136a10..3a55d2be8e25b 100644
+--- a/io_uring/io_uring.c
++++ b/io_uring/io_uring.c
+@@ -2666,7 +2666,11 @@ static void io_pages_free(struct page ***pages, int npages)
+ 
+ 	if (!pages)
+ 		return;
++
+ 	page_array = *pages;
++	if (!page_array)
++		return;
++
+ 	for (i = 0; i < npages; i++)
+ 		unpin_user_page(page_array[i]);
+ 	kvfree(page_array);
+@@ -2750,7 +2754,9 @@ static void io_rings_free(struct io_ring_ctx *ctx)
+ 		ctx->sq_sqes = NULL;
+ 	} else {
+ 		io_pages_free(&ctx->ring_pages, ctx->n_ring_pages);
++		ctx->n_ring_pages = 0;
+ 		io_pages_free(&ctx->sqe_pages, ctx->n_sqe_pages);
++		ctx->n_sqe_pages = 0;
+ 	}
+ }
+ 
+diff --git a/kernel/auditsc.c b/kernel/auditsc.c
+index eae5dfe9b9a01..28ea7411ff66f 100644
+--- a/kernel/auditsc.c
++++ b/kernel/auditsc.c
+@@ -2210,7 +2210,7 @@ __audit_reusename(const __user char *uptr)
+ 		if (!n->name)
+ 			continue;
+ 		if (n->name->uptr == uptr) {
+-			n->name->refcnt++;
++			atomic_inc(&n->name->refcnt);
+ 			return n->name;
+ 		}
+ 	}
+@@ -2239,7 +2239,7 @@ void __audit_getname(struct filename *name)
+ 	n->name = name;
+ 	n->name_len = AUDIT_NAME_FULL;
+ 	name->aname = n;
+-	name->refcnt++;
++	atomic_inc(&name->refcnt);
+ }
+ 
+ static inline int audit_copy_fcaps(struct audit_names *name,
+@@ -2371,7 +2371,7 @@ out_alloc:
+ 		return;
+ 	if (name) {
+ 		n->name = name;
+-		name->refcnt++;
++		atomic_inc(&name->refcnt);
+ 	}
+ 
+ out:
+@@ -2498,7 +2498,7 @@ void __audit_inode_child(struct inode *parent,
+ 		if (found_parent) {
+ 			found_child->name = found_parent->name;
+ 			found_child->name_len = AUDIT_NAME_FULL;
+-			found_child->name->refcnt++;
++			atomic_inc(&found_child->name->refcnt);
+ 		}
+ 	}
+ 
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index 78ae7b6f90fdb..b3d800738fc5f 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -1954,6 +1954,7 @@ static void perf_group_attach(struct perf_event *event)
+ 
+ 	list_add_tail(&event->sibling_list, &group_leader->sibling_list);
+ 	group_leader->nr_siblings++;
++	group_leader->group_generation++;
+ 
+ 	perf_event__header_size(group_leader);
+ 
+@@ -2144,6 +2145,7 @@ static void perf_group_detach(struct perf_event *event)
+ 	if (leader != event) {
+ 		list_del_init(&event->sibling_list);
+ 		event->group_leader->nr_siblings--;
++		event->group_leader->group_generation++;
+ 		goto out;
+ 	}
+ 
+@@ -5440,7 +5442,7 @@ static int __perf_read_group_add(struct perf_event *leader,
+ 					u64 read_format, u64 *values)
+ {
+ 	struct perf_event_context *ctx = leader->ctx;
+-	struct perf_event *sub;
++	struct perf_event *sub, *parent;
+ 	unsigned long flags;
+ 	int n = 1; /* skip @nr */
+ 	int ret;
+@@ -5450,6 +5452,33 @@ static int __perf_read_group_add(struct perf_event *leader,
+ 		return ret;
+ 
+ 	raw_spin_lock_irqsave(&ctx->lock, flags);
++	/*
++	 * Verify the grouping between the parent and child (inherited)
++	 * events is still in tact.
++	 *
++	 * Specifically:
++	 *  - leader->ctx->lock pins leader->sibling_list
++	 *  - parent->child_mutex pins parent->child_list
++	 *  - parent->ctx->mutex pins parent->sibling_list
++	 *
++	 * Because parent->ctx != leader->ctx (and child_list nests inside
++	 * ctx->mutex), group destruction is not atomic between children, also
++	 * see perf_event_release_kernel(). Additionally, parent can grow the
++	 * group.
++	 *
++	 * Therefore it is possible to have parent and child groups in a
++	 * different configuration and summing over such a beast makes no sense
++	 * what so ever.
++	 *
++	 * Reject this.
++	 */
++	parent = leader->parent;
++	if (parent &&
++	    (parent->group_generation != leader->group_generation ||
++	     parent->nr_siblings != leader->nr_siblings)) {
++		ret = -ECHILD;
++		goto unlock;
++	}
+ 
+ 	/*
+ 	 * Since we co-schedule groups, {enabled,running} times of siblings
+@@ -5483,8 +5512,9 @@ static int __perf_read_group_add(struct perf_event *leader,
+ 			values[n++] = atomic64_read(&sub->lost_samples);
+ 	}
+ 
++unlock:
+ 	raw_spin_unlock_irqrestore(&ctx->lock, flags);
+-	return 0;
++	return ret;
+ }
+ 
+ static int perf_read_group(struct perf_event *event,
+@@ -5503,10 +5533,6 @@ static int perf_read_group(struct perf_event *event,
+ 
+ 	values[0] = 1 + leader->nr_siblings;
+ 
+-	/*
+-	 * By locking the child_mutex of the leader we effectively
+-	 * lock the child list of all siblings.. XXX explain how.
+-	 */
+ 	mutex_lock(&leader->child_mutex);
+ 
+ 	ret = __perf_read_group_add(leader, read_format, values);
+@@ -13357,6 +13383,7 @@ static int inherit_group(struct perf_event *parent_event,
+ 		    !perf_get_aux_event(child_ctr, leader))
+ 			return -EINVAL;
+ 	}
++	leader->group_generation = parent_event->group_generation;
+ 	return 0;
+ }
+ 
+diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
+index 4492608b7d7f1..458d359f5991c 100644
+--- a/kernel/sched/cpufreq_schedutil.c
++++ b/kernel/sched/cpufreq_schedutil.c
+@@ -350,7 +350,8 @@ static void sugov_update_single_freq(struct update_util_data *hook, u64 time,
+ 	 * Except when the rq is capped by uclamp_max.
+ 	 */
+ 	if (!uclamp_rq_is_capped(cpu_rq(sg_cpu->cpu)) &&
+-	    sugov_cpu_is_busy(sg_cpu) && next_f < sg_policy->next_freq) {
++	    sugov_cpu_is_busy(sg_cpu) && next_f < sg_policy->next_freq &&
++	    !sg_policy->need_freq_update) {
+ 		next_f = sg_policy->next_freq;
+ 
+ 		/* Restore cached freq as next_freq has changed */
+diff --git a/kernel/trace/fprobe.c b/kernel/trace/fprobe.c
+index 3b21f40632582..881f90f0cbcfa 100644
+--- a/kernel/trace/fprobe.c
++++ b/kernel/trace/fprobe.c
+@@ -189,7 +189,7 @@ static int fprobe_init_rethook(struct fprobe *fp, int num)
+ {
+ 	int i, size;
+ 
+-	if (num < 0)
++	if (num <= 0)
+ 		return -EINVAL;
+ 
+ 	if (!fp->exit_handler) {
+@@ -202,8 +202,8 @@ static int fprobe_init_rethook(struct fprobe *fp, int num)
+ 		size = fp->nr_maxactive;
+ 	else
+ 		size = num * num_possible_cpus() * 2;
+-	if (size < 0)
+-		return -E2BIG;
++	if (size <= 0)
++		return -EINVAL;
+ 
+ 	fp->rethook = rethook_alloc((void *)fp, fprobe_exit_handler);
+ 	if (!fp->rethook)
+diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
+index 0cf84a7449f5b..9841589b4af7f 100644
+--- a/kernel/trace/trace_events.c
++++ b/kernel/trace/trace_events.c
+@@ -2777,6 +2777,7 @@ void trace_event_eval_update(struct trace_eval_map **map, int len)
+ 				update_event_fields(call, map[i]);
+ 			}
+ 		}
++		cond_resched();
+ 	}
+ 	up_write(&trace_event_sem);
+ }
+diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
+index 23dba01831f79..92dbb21c69616 100644
+--- a/kernel/trace/trace_kprobe.c
++++ b/kernel/trace/trace_kprobe.c
+@@ -705,6 +705,25 @@ static struct notifier_block trace_kprobe_module_nb = {
+ 	.priority = 1	/* Invoked after kprobe module callback */
+ };
+ 
++static int count_symbols(void *data, unsigned long unused)
++{
++	unsigned int *count = data;
++
++	(*count)++;
++
++	return 0;
++}
++
++static unsigned int number_of_same_symbols(char *func_name)
++{
++	unsigned int count;
++
++	count = 0;
++	kallsyms_on_each_match_symbol(count_symbols, func_name, &count);
++
++	return count;
++}
++
+ static int __trace_kprobe_create(int argc, const char *argv[])
+ {
+ 	/*
+@@ -836,6 +855,31 @@ static int __trace_kprobe_create(int argc, const char *argv[])
+ 		}
+ 	}
+ 
++	if (symbol && !strchr(symbol, ':')) {
++		unsigned int count;
++
++		count = number_of_same_symbols(symbol);
++		if (count > 1) {
++			/*
++			 * Users should use ADDR to remove the ambiguity of
++			 * using KSYM only.
++			 */
++			trace_probe_log_err(0, NON_UNIQ_SYMBOL);
++			ret = -EADDRNOTAVAIL;
++
++			goto error;
++		} else if (count == 0) {
++			/*
++			 * We can return ENOENT earlier than when register the
++			 * kprobe.
++			 */
++			trace_probe_log_err(0, BAD_PROBE_ADDR);
++			ret = -ENOENT;
++
++			goto error;
++		}
++	}
++
+ 	trace_probe_log_set_index(0);
+ 	if (event) {
+ 		ret = traceprobe_parse_event_name(&event, &group, gbuf,
+@@ -1699,6 +1743,7 @@ static int unregister_kprobe_event(struct trace_kprobe *tk)
+ }
+ 
+ #ifdef CONFIG_PERF_EVENTS
++
+ /* create a trace_kprobe, but don't add it to global lists */
+ struct trace_event_call *
+ create_local_trace_kprobe(char *func, void *addr, unsigned long offs,
+@@ -1709,6 +1754,24 @@ create_local_trace_kprobe(char *func, void *addr, unsigned long offs,
+ 	int ret;
+ 	char *event;
+ 
++	if (func) {
++		unsigned int count;
++
++		count = number_of_same_symbols(func);
++		if (count > 1)
++			/*
++			 * Users should use addr to remove the ambiguity of
++			 * using func only.
++			 */
++			return ERR_PTR(-EADDRNOTAVAIL);
++		else if (count == 0)
++			/*
++			 * We can return ENOENT earlier than when register the
++			 * kprobe.
++			 */
++			return ERR_PTR(-ENOENT);
++	}
++
+ 	/*
+ 	 * local trace_kprobes are not added to dyn_event, so they are never
+ 	 * searched in find_trace_kprobe(). Therefore, there is no concern of
+diff --git a/kernel/trace/trace_probe.h b/kernel/trace/trace_probe.h
+index 01ea148723de2..975161164366f 100644
+--- a/kernel/trace/trace_probe.h
++++ b/kernel/trace/trace_probe.h
+@@ -438,6 +438,7 @@ extern int traceprobe_define_arg_fields(struct trace_event_call *event_call,
+ 	C(BAD_MAXACT,		"Invalid maxactive number"),		\
+ 	C(MAXACT_TOO_BIG,	"Maxactive is too big"),		\
+ 	C(BAD_PROBE_ADDR,	"Invalid probed address or symbol"),	\
++	C(NON_UNIQ_SYMBOL,	"The symbol is not unique"),		\
+ 	C(BAD_RETPROBE,		"Retprobe address must be an function entry"), \
+ 	C(NO_TRACEPOINT,	"Tracepoint is not found"),		\
+ 	C(BAD_ADDR_SUFFIX,	"Invalid probed address suffix"), \
+diff --git a/mm/slab_common.c b/mm/slab_common.c
+index 5658da50a2d07..e2701dbb88721 100644
+--- a/mm/slab_common.c
++++ b/mm/slab_common.c
+@@ -864,11 +864,13 @@ void __init setup_kmalloc_cache_index_table(void)
+ 
+ static unsigned int __kmalloc_minalign(void)
+ {
++	unsigned int minalign = dma_get_cache_alignment();
++
+ #ifdef CONFIG_DMA_BOUNCE_UNALIGNED_KMALLOC
+ 	if (io_tlb_default_mem.nslabs)
+-		return ARCH_KMALLOC_MINALIGN;
++		minalign = ARCH_KMALLOC_MINALIGN;
+ #endif
+-	return dma_get_cache_alignment();
++	return max(minalign, arch_slab_minalign());
+ }
+ 
+ void __init
+diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
+index ce76931d11d86..6d6192f514d0f 100644
+--- a/net/bluetooth/hci_conn.c
++++ b/net/bluetooth/hci_conn.c
+@@ -874,7 +874,7 @@ static void bis_cleanup(struct hci_conn *conn)
+ 
+ static int remove_cig_sync(struct hci_dev *hdev, void *data)
+ {
+-	u8 handle = PTR_ERR(data);
++	u8 handle = PTR_UINT(data);
+ 
+ 	return hci_le_remove_cig_sync(hdev, handle);
+ }
+@@ -883,7 +883,8 @@ static int hci_le_remove_cig(struct hci_dev *hdev, u8 handle)
+ {
+ 	bt_dev_dbg(hdev, "handle 0x%2.2x", handle);
+ 
+-	return hci_cmd_sync_queue(hdev, remove_cig_sync, ERR_PTR(handle), NULL);
++	return hci_cmd_sync_queue(hdev, remove_cig_sync, UINT_PTR(handle),
++				  NULL);
+ }
+ 
+ static void find_cis(struct hci_conn *conn, void *data)
+@@ -1248,9 +1249,41 @@ void hci_conn_failed(struct hci_conn *conn, u8 status)
+ 	hci_conn_del(conn);
+ }
+ 
++/* This function requires the caller holds hdev->lock */
++u8 hci_conn_set_handle(struct hci_conn *conn, u16 handle)
++{
++	struct hci_dev *hdev = conn->hdev;
++
++	bt_dev_dbg(hdev, "hcon %p handle 0x%4.4x", conn, handle);
++
++	if (conn->handle == handle)
++		return 0;
++
++	if (handle > HCI_CONN_HANDLE_MAX) {
++		bt_dev_err(hdev, "Invalid handle: 0x%4.4x > 0x%4.4x",
++			   handle, HCI_CONN_HANDLE_MAX);
++		return HCI_ERROR_INVALID_PARAMETERS;
++	}
++
++	/* If abort_reason has been sent it means the connection is being
++	 * aborted and the handle shall not be changed.
++	 */
++	if (conn->abort_reason)
++		return conn->abort_reason;
++
++	conn->handle = handle;
++
++	return 0;
++}
++
+ static void create_le_conn_complete(struct hci_dev *hdev, void *data, int err)
+ {
+-	struct hci_conn *conn = data;
++	struct hci_conn *conn;
++	u16 handle = PTR_UINT(data);
++
++	conn = hci_conn_hash_lookup_handle(hdev, handle);
++	if (!conn)
++		return;
+ 
+ 	bt_dev_dbg(hdev, "err %d", err);
+ 
+@@ -1275,10 +1308,17 @@ done:
+ 
+ static int hci_connect_le_sync(struct hci_dev *hdev, void *data)
+ {
+-	struct hci_conn *conn = data;
++	struct hci_conn *conn;
++	u16 handle = PTR_UINT(data);
++
++	conn = hci_conn_hash_lookup_handle(hdev, handle);
++	if (!conn)
++		return 0;
+ 
+ 	bt_dev_dbg(hdev, "conn %p", conn);
+ 
++	conn->state = BT_CONNECT;
++
+ 	return hci_le_create_conn_sync(hdev, conn);
+ }
+ 
+@@ -1348,10 +1388,10 @@ struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
+ 	conn->sec_level = BT_SECURITY_LOW;
+ 	conn->conn_timeout = conn_timeout;
+ 
+-	conn->state = BT_CONNECT;
+ 	clear_bit(HCI_CONN_SCANNING, &conn->flags);
+ 
+-	err = hci_cmd_sync_queue(hdev, hci_connect_le_sync, conn,
++	err = hci_cmd_sync_queue(hdev, hci_connect_le_sync,
++				 UINT_PTR(conn->handle),
+ 				 create_le_conn_complete);
+ 	if (err) {
+ 		hci_conn_del(conn);
+@@ -1589,6 +1629,15 @@ struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst,
+ 		return ERR_PTR(-EOPNOTSUPP);
+ 	}
+ 
++	/* Reject outgoing connection to device with same BD ADDR against
++	 * CVE-2020-26555
++	 */
++	if (!bacmp(&hdev->bdaddr, dst)) {
++		bt_dev_dbg(hdev, "Reject connection with same BD_ADDR %pMR\n",
++			   dst);
++		return ERR_PTR(-ECONNREFUSED);
++	}
++
+ 	acl = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
+ 	if (!acl) {
+ 		acl = hci_conn_add(hdev, ACL_LINK, dst, HCI_ROLE_MASTER);
+@@ -1719,7 +1768,7 @@ static int hci_le_create_big(struct hci_conn *conn, struct bt_iso_qos *qos)
+ 
+ static int set_cig_params_sync(struct hci_dev *hdev, void *data)
+ {
+-	u8 cig_id = PTR_ERR(data);
++	u8 cig_id = PTR_UINT(data);
+ 	struct hci_conn *conn;
+ 	struct bt_iso_qos *qos;
+ 	struct iso_cig_params pdu;
+@@ -1829,7 +1878,7 @@ static bool hci_le_set_cig_params(struct hci_conn *conn, struct bt_iso_qos *qos)
+ 
+ done:
+ 	if (hci_cmd_sync_queue(hdev, set_cig_params_sync,
+-			       ERR_PTR(qos->ucast.cig), NULL) < 0)
++			       UINT_PTR(qos->ucast.cig), NULL) < 0)
+ 		return false;
+ 
+ 	return true;
+@@ -2364,34 +2413,41 @@ int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type,
+ 	if (!test_bit(HCI_CONN_AUTH, &conn->flags))
+ 		goto auth;
+ 
+-	/* An authenticated FIPS approved combination key has sufficient
+-	 * security for security level 4. */
+-	if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256 &&
+-	    sec_level == BT_SECURITY_FIPS)
+-		goto encrypt;
+-
+-	/* An authenticated combination key has sufficient security for
+-	   security level 3. */
+-	if ((conn->key_type == HCI_LK_AUTH_COMBINATION_P192 ||
+-	     conn->key_type == HCI_LK_AUTH_COMBINATION_P256) &&
+-	    sec_level == BT_SECURITY_HIGH)
+-		goto encrypt;
+-
+-	/* An unauthenticated combination key has sufficient security for
+-	   security level 1 and 2. */
+-	if ((conn->key_type == HCI_LK_UNAUTH_COMBINATION_P192 ||
+-	     conn->key_type == HCI_LK_UNAUTH_COMBINATION_P256) &&
+-	    (sec_level == BT_SECURITY_MEDIUM || sec_level == BT_SECURITY_LOW))
+-		goto encrypt;
+-
+-	/* A combination key has always sufficient security for the security
+-	   levels 1 or 2. High security level requires the combination key
+-	   is generated using maximum PIN code length (16).
+-	   For pre 2.1 units. */
+-	if (conn->key_type == HCI_LK_COMBINATION &&
+-	    (sec_level == BT_SECURITY_MEDIUM || sec_level == BT_SECURITY_LOW ||
+-	     conn->pin_length == 16))
+-		goto encrypt;
++	switch (conn->key_type) {
++	case HCI_LK_AUTH_COMBINATION_P256:
++		/* An authenticated FIPS approved combination key has
++		 * sufficient security for security level 4 or lower.
++		 */
++		if (sec_level <= BT_SECURITY_FIPS)
++			goto encrypt;
++		break;
++	case HCI_LK_AUTH_COMBINATION_P192:
++		/* An authenticated combination key has sufficient security for
++		 * security level 3 or lower.
++		 */
++		if (sec_level <= BT_SECURITY_HIGH)
++			goto encrypt;
++		break;
++	case HCI_LK_UNAUTH_COMBINATION_P192:
++	case HCI_LK_UNAUTH_COMBINATION_P256:
++		/* An unauthenticated combination key has sufficient security
++		 * for security level 2 or lower.
++		 */
++		if (sec_level <= BT_SECURITY_MEDIUM)
++			goto encrypt;
++		break;
++	case HCI_LK_COMBINATION:
++		/* A combination key has always sufficient security for the
++		 * security levels 2 or lower. High security level requires the
++		 * combination key is generated using maximum PIN code length
++		 * (16). For pre 2.1 units.
++		 */
++		if (sec_level <= BT_SECURITY_MEDIUM || conn->pin_length == 16)
++			goto encrypt;
++		break;
++	default:
++		break;
++	}
+ 
+ auth:
+ 	if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags))
+@@ -2836,7 +2892,7 @@ u32 hci_conn_get_phy(struct hci_conn *conn)
+ static int abort_conn_sync(struct hci_dev *hdev, void *data)
+ {
+ 	struct hci_conn *conn;
+-	u16 handle = PTR_ERR(data);
++	u16 handle = PTR_UINT(data);
+ 
+ 	conn = hci_conn_hash_lookup_handle(hdev, handle);
+ 	if (!conn)
+@@ -2862,6 +2918,9 @@ int hci_abort_conn(struct hci_conn *conn, u8 reason)
+ 	/* If the connection is pending check the command opcode since that
+ 	 * might be blocking on hci_cmd_sync_work while waiting its respective
+ 	 * event so we need to hci_cmd_sync_cancel to cancel it.
++	 *
++	 * hci_connect_le serializes the connection attempts so only one
++	 * connection can be in BT_CONNECT at time.
+ 	 */
+ 	if (conn->state == BT_CONNECT && hdev->req_status == HCI_REQ_PEND) {
+ 		switch (hci_skb_event(hdev->sent_cmd)) {
+@@ -2873,6 +2932,6 @@ int hci_abort_conn(struct hci_conn *conn, u8 reason)
+ 		}
+ 	}
+ 
+-	return hci_cmd_sync_queue(hdev, abort_conn_sync, ERR_PTR(conn->handle),
++	return hci_cmd_sync_queue(hdev, abort_conn_sync, UINT_PTR(conn->handle),
+ 				  NULL);
+ }
+diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
+index 26a265d9c59cd..63d4d38863acb 100644
+--- a/net/bluetooth/hci_core.c
++++ b/net/bluetooth/hci_core.c
+@@ -2617,7 +2617,11 @@ int hci_register_dev(struct hci_dev *hdev)
+ 	if (id < 0)
+ 		return id;
+ 
+-	snprintf(hdev->name, sizeof(hdev->name), "hci%d", id);
++	error = dev_set_name(&hdev->dev, "hci%u", id);
++	if (error)
++		return error;
++
++	hdev->name = dev_name(&hdev->dev);
+ 	hdev->id = id;
+ 
+ 	BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
+@@ -2639,8 +2643,6 @@ int hci_register_dev(struct hci_dev *hdev)
+ 	if (!IS_ERR_OR_NULL(bt_debugfs))
+ 		hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
+ 
+-	dev_set_name(&hdev->dev, "%s", hdev->name);
+-
+ 	error = device_add(&hdev->dev);
+ 	if (error < 0)
+ 		goto err_wqueue;
+diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
+index a77234478b2c4..dd70fd5313840 100644
+--- a/net/bluetooth/hci_event.c
++++ b/net/bluetooth/hci_event.c
+@@ -26,6 +26,8 @@
+ /* Bluetooth HCI event handling. */
+ 
+ #include <asm/unaligned.h>
++#include <linux/crypto.h>
++#include <crypto/algapi.h>
+ 
+ #include <net/bluetooth/bluetooth.h>
+ #include <net/bluetooth/hci_core.h>
+@@ -3180,13 +3182,9 @@ static void hci_conn_complete_evt(struct hci_dev *hdev, void *data,
+ 	}
+ 
+ 	if (!status) {
+-		conn->handle = __le16_to_cpu(ev->handle);
+-		if (conn->handle > HCI_CONN_HANDLE_MAX) {
+-			bt_dev_err(hdev, "Invalid handle: 0x%4.4x > 0x%4.4x",
+-				   conn->handle, HCI_CONN_HANDLE_MAX);
+-			status = HCI_ERROR_INVALID_PARAMETERS;
++		status = hci_conn_set_handle(conn, __le16_to_cpu(ev->handle));
++		if (status)
+ 			goto done;
+-		}
+ 
+ 		if (conn->type == ACL_LINK) {
+ 			conn->state = BT_CONFIG;
+@@ -3272,6 +3270,16 @@ static void hci_conn_request_evt(struct hci_dev *hdev, void *data,
+ 
+ 	bt_dev_dbg(hdev, "bdaddr %pMR type 0x%x", &ev->bdaddr, ev->link_type);
+ 
++	/* Reject incoming connection from device with same BD ADDR against
++	 * CVE-2020-26555
++	 */
++	if (hdev && !bacmp(&hdev->bdaddr, &ev->bdaddr)) {
++		bt_dev_dbg(hdev, "Reject connection with same BD_ADDR %pMR\n",
++			   &ev->bdaddr);
++		hci_reject_conn(hdev, &ev->bdaddr);
++		return;
++	}
++
+ 	mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
+ 				      &flags);
+ 
+@@ -3869,11 +3877,9 @@ static u8 hci_cc_le_set_cig_params(struct hci_dev *hdev, void *data,
+ 		if (conn->state != BT_BOUND && conn->state != BT_CONNECT)
+ 			continue;
+ 
+-		conn->handle = __le16_to_cpu(rp->handle[i]);
++		if (hci_conn_set_handle(conn, __le16_to_cpu(rp->handle[i])))
++			continue;
+ 
+-		bt_dev_dbg(hdev, "%p handle 0x%4.4x parent %p", conn,
+-			   conn->handle, conn->parent);
+-		
+ 		if (conn->state == BT_CONNECT)
+ 			pending = true;
+ 	}
+@@ -4725,6 +4731,15 @@ static void hci_link_key_notify_evt(struct hci_dev *hdev, void *data,
+ 	if (!conn)
+ 		goto unlock;
+ 
++	/* Ignore NULL link key against CVE-2020-26555 */
++	if (!crypto_memneq(ev->link_key, ZERO_KEY, HCI_LINK_KEY_SIZE)) {
++		bt_dev_dbg(hdev, "Ignore NULL link key (ZERO KEY) for %pMR",
++			   &ev->bdaddr);
++		hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
++		hci_conn_drop(conn);
++		goto unlock;
++	}
++
+ 	hci_conn_hold(conn);
+ 	conn->disc_timeout = HCI_DISCONN_TIMEOUT;
+ 	hci_conn_drop(conn);
+@@ -5036,11 +5051,8 @@ static void hci_sync_conn_complete_evt(struct hci_dev *hdev, void *data,
+ 
+ 	switch (status) {
+ 	case 0x00:
+-		conn->handle = __le16_to_cpu(ev->handle);
+-		if (conn->handle > HCI_CONN_HANDLE_MAX) {
+-			bt_dev_err(hdev, "Invalid handle: 0x%4.4x > 0x%4.4x",
+-				   conn->handle, HCI_CONN_HANDLE_MAX);
+-			status = HCI_ERROR_INVALID_PARAMETERS;
++		status = hci_conn_set_handle(conn, __le16_to_cpu(ev->handle));
++		if (status) {
+ 			conn->state = BT_CLOSED;
+ 			break;
+ 		}
+@@ -5260,8 +5272,8 @@ static u8 bredr_oob_data_present(struct hci_conn *conn)
+ 		 * available, then do not declare that OOB data is
+ 		 * present.
+ 		 */
+-		if (!memcmp(data->rand256, ZERO_KEY, 16) ||
+-		    !memcmp(data->hash256, ZERO_KEY, 16))
++		if (!crypto_memneq(data->rand256, ZERO_KEY, 16) ||
++		    !crypto_memneq(data->hash256, ZERO_KEY, 16))
+ 			return 0x00;
+ 
+ 		return 0x02;
+@@ -5271,8 +5283,8 @@ static u8 bredr_oob_data_present(struct hci_conn *conn)
+ 	 * not supported by the hardware, then check that if
+ 	 * P-192 data values are present.
+ 	 */
+-	if (!memcmp(data->rand192, ZERO_KEY, 16) ||
+-	    !memcmp(data->hash192, ZERO_KEY, 16))
++	if (!crypto_memneq(data->rand192, ZERO_KEY, 16) ||
++	    !crypto_memneq(data->hash192, ZERO_KEY, 16))
+ 		return 0x00;
+ 
+ 	return 0x01;
+@@ -5289,7 +5301,7 @@ static void hci_io_capa_request_evt(struct hci_dev *hdev, void *data,
+ 	hci_dev_lock(hdev);
+ 
+ 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
+-	if (!conn)
++	if (!conn || !hci_conn_ssp_enabled(conn))
+ 		goto unlock;
+ 
+ 	hci_conn_hold(conn);
+@@ -5536,7 +5548,7 @@ static void hci_simple_pair_complete_evt(struct hci_dev *hdev, void *data,
+ 	hci_dev_lock(hdev);
+ 
+ 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
+-	if (!conn)
++	if (!conn || !hci_conn_ssp_enabled(conn))
+ 		goto unlock;
+ 
+ 	/* Reset the authentication requirement to unknown */
+@@ -6968,12 +6980,20 @@ unlock:
+ 	hci_dev_unlock(hdev);
+ }
+ 
++static int hci_iso_term_big_sync(struct hci_dev *hdev, void *data)
++{
++	u8 handle = PTR_UINT(data);
++
++	return hci_le_terminate_big_sync(hdev, handle,
++					 HCI_ERROR_LOCAL_HOST_TERM);
++}
++
+ static void hci_le_create_big_complete_evt(struct hci_dev *hdev, void *data,
+ 					   struct sk_buff *skb)
+ {
+ 	struct hci_evt_le_create_big_complete *ev = data;
+ 	struct hci_conn *conn;
+-	__u8 bis_idx = 0;
++	__u8 i = 0;
+ 
+ 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
+ 
+@@ -6991,7 +7011,9 @@ static void hci_le_create_big_complete_evt(struct hci_dev *hdev, void *data,
+ 		    conn->iso_qos.bcast.big != ev->handle)
+ 			continue;
+ 
+-		conn->handle = __le16_to_cpu(ev->bis_handle[bis_idx++]);
++		if (hci_conn_set_handle(conn,
++					__le16_to_cpu(ev->bis_handle[i++])))
++			continue;
+ 
+ 		if (!ev->status) {
+ 			conn->state = BT_CONNECTED;
+@@ -7010,16 +7032,17 @@ static void hci_le_create_big_complete_evt(struct hci_dev *hdev, void *data,
+ 		rcu_read_lock();
+ 	}
+ 
+-	if (!ev->status && !bis_idx)
++	rcu_read_unlock();
++
++	if (!ev->status && !i)
+ 		/* If no BISes have been connected for the BIG,
+ 		 * terminate. This is in case all bound connections
+ 		 * have been closed before the BIG creation
+ 		 * has completed.
+ 		 */
+-		hci_le_terminate_big_sync(hdev, ev->handle,
+-					  HCI_ERROR_LOCAL_HOST_TERM);
++		hci_cmd_sync_queue(hdev, hci_iso_term_big_sync,
++				   UINT_PTR(ev->handle), NULL);
+ 
+-	rcu_read_unlock();
+ 	hci_dev_unlock(hdev);
+ }
+ 
+diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
+index 1d249d839819d..484fc2a8e4baa 100644
+--- a/net/bluetooth/hci_sock.c
++++ b/net/bluetooth/hci_sock.c
+@@ -439,7 +439,8 @@ static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
+ 		ni->type = hdev->dev_type;
+ 		ni->bus = hdev->bus;
+ 		bacpy(&ni->bdaddr, &hdev->bdaddr);
+-		memcpy(ni->name, hdev->name, 8);
++		memcpy_and_pad(ni->name, sizeof(ni->name), hdev->name,
++			       strnlen(hdev->name, sizeof(ni->name)), '\0');
+ 
+ 		opcode = cpu_to_le16(HCI_MON_NEW_INDEX);
+ 		break;
+diff --git a/net/bluetooth/hci_sync.c b/net/bluetooth/hci_sync.c
+index 6aaecd6e656bc..360813ab0c4db 100644
+--- a/net/bluetooth/hci_sync.c
++++ b/net/bluetooth/hci_sync.c
+@@ -5290,6 +5290,28 @@ static int hci_connect_cancel_sync(struct hci_dev *hdev, struct hci_conn *conn,
+ 	if (conn->type == LE_LINK)
+ 		return hci_le_connect_cancel_sync(hdev, conn, reason);
+ 
++	if (conn->type == ISO_LINK) {
++		/* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E
++		 * page 1857:
++		 *
++		 * If this command is issued for a CIS on the Central and the
++		 * CIS is successfully terminated before being established,
++		 * then an HCI_LE_CIS_Established event shall also be sent for
++		 * this CIS with the Status Operation Cancelled by Host (0x44).
++		 */
++		if (test_bit(HCI_CONN_CREATE_CIS, &conn->flags))
++			return hci_disconnect_sync(hdev, conn, reason);
++
++		/* CIS with no Create CIS sent have nothing to cancel */
++		if (bacmp(&conn->dst, BDADDR_ANY))
++			return HCI_ERROR_LOCAL_HOST_TERM;
++
++		/* There is no way to cancel a BIS without terminating the BIG
++		 * which is done later on connection cleanup.
++		 */
++		return 0;
++	}
++
+ 	if (hdev->hci_ver < BLUETOOTH_VER_1_2)
+ 		return 0;
+ 
+@@ -5316,11 +5338,27 @@ static int hci_reject_sco_sync(struct hci_dev *hdev, struct hci_conn *conn,
+ 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
+ }
+ 
++static int hci_le_reject_cis_sync(struct hci_dev *hdev, struct hci_conn *conn,
++				  u8 reason)
++{
++	struct hci_cp_le_reject_cis cp;
++
++	memset(&cp, 0, sizeof(cp));
++	cp.handle = cpu_to_le16(conn->handle);
++	cp.reason = reason;
++
++	return __hci_cmd_sync_status(hdev, HCI_OP_LE_REJECT_CIS,
++				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
++}
++
+ static int hci_reject_conn_sync(struct hci_dev *hdev, struct hci_conn *conn,
+ 				u8 reason)
+ {
+ 	struct hci_cp_reject_conn_req cp;
+ 
++	if (conn->type == ISO_LINK)
++		return hci_le_reject_cis_sync(hdev, conn, reason);
++
+ 	if (conn->type == SCO_LINK || conn->type == ESCO_LINK)
+ 		return hci_reject_sco_sync(hdev, conn, reason);
+ 
+@@ -5336,6 +5374,7 @@ int hci_abort_conn_sync(struct hci_dev *hdev, struct hci_conn *conn, u8 reason)
+ {
+ 	int err = 0;
+ 	u16 handle = conn->handle;
++	bool disconnect = false;
+ 	struct hci_conn *c;
+ 
+ 	switch (conn->state) {
+@@ -5350,27 +5389,16 @@ int hci_abort_conn_sync(struct hci_dev *hdev, struct hci_conn *conn, u8 reason)
+ 		err = hci_reject_conn_sync(hdev, conn, reason);
+ 		break;
+ 	case BT_OPEN:
+-		/* Cleanup bises that failed to be established */
+-		if (test_and_clear_bit(HCI_CONN_BIG_SYNC_FAILED, &conn->flags)) {
+-			hci_dev_lock(hdev);
+-			hci_conn_failed(conn, reason);
+-			hci_dev_unlock(hdev);
+-		}
++	case BT_BOUND:
+ 		break;
+ 	default:
+-		hci_dev_lock(hdev);
+-		conn->state = BT_CLOSED;
+-		hci_disconn_cfm(conn, reason);
+-		hci_conn_del(conn);
+-		hci_dev_unlock(hdev);
+-		return 0;
++		disconnect = true;
++		break;
+ 	}
+ 
+ 	hci_dev_lock(hdev);
+ 
+-	/* Check if the connection hasn't been cleanup while waiting
+-	 * commands to complete.
+-	 */
++	/* Check if the connection has been cleaned up concurrently */
+ 	c = hci_conn_hash_lookup_handle(hdev, handle);
+ 	if (!c || c != conn) {
+ 		err = 0;
+@@ -5382,7 +5410,13 @@ int hci_abort_conn_sync(struct hci_dev *hdev, struct hci_conn *conn, u8 reason)
+ 	 * or in case of LE it was still scanning so it can be cleanup
+ 	 * safely.
+ 	 */
+-	hci_conn_failed(conn, reason);
++	if (disconnect) {
++		conn->state = BT_CLOSED;
++		hci_disconn_cfm(conn, reason);
++		hci_conn_del(conn);
++	} else {
++		hci_conn_failed(conn, reason);
++	}
+ 
+ unlock:
+ 	hci_dev_unlock(hdev);
+@@ -6511,7 +6545,7 @@ int hci_get_random_address(struct hci_dev *hdev, bool require_privacy,
+ 
+ static int _update_adv_data_sync(struct hci_dev *hdev, void *data)
+ {
+-	u8 instance = PTR_ERR(data);
++	u8 instance = PTR_UINT(data);
+ 
+ 	return hci_update_adv_data_sync(hdev, instance);
+ }
+@@ -6519,5 +6553,5 @@ static int _update_adv_data_sync(struct hci_dev *hdev, void *data)
+ int hci_update_adv_data(struct hci_dev *hdev, u8 instance)
+ {
+ 	return hci_cmd_sync_queue(hdev, _update_adv_data_sync,
+-				  ERR_PTR(instance), NULL);
++				  UINT_PTR(instance), NULL);
+ }
+diff --git a/net/bluetooth/iso.c b/net/bluetooth/iso.c
+index 42f7b257bdfbc..c8460eb7f5c0b 100644
+--- a/net/bluetooth/iso.c
++++ b/net/bluetooth/iso.c
+@@ -622,18 +622,6 @@ static void iso_sock_kill(struct sock *sk)
+ 	sock_put(sk);
+ }
+ 
+-static void iso_conn_defer_reject(struct hci_conn *conn)
+-{
+-	struct hci_cp_le_reject_cis cp;
+-
+-	BT_DBG("conn %p", conn);
+-
+-	memset(&cp, 0, sizeof(cp));
+-	cp.handle = cpu_to_le16(conn->handle);
+-	cp.reason = HCI_ERROR_REJ_BAD_ADDR;
+-	hci_send_cmd(conn->hdev, HCI_OP_LE_REJECT_CIS, sizeof(cp), &cp);
+-}
+-
+ static void __iso_sock_close(struct sock *sk)
+ {
+ 	BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
+@@ -658,8 +646,6 @@ static void __iso_sock_close(struct sock *sk)
+ 		break;
+ 
+ 	case BT_CONNECT2:
+-		if (iso_pi(sk)->conn->hcon)
+-			iso_conn_defer_reject(iso_pi(sk)->conn->hcon);
+ 		iso_chan_del(sk, ECONNRESET);
+ 		break;
+ 	case BT_CONNECT:
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 968be1c20ca1f..fe8c46c46505b 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -343,7 +343,6 @@ int netdev_name_node_alt_create(struct net_device *dev, const char *name)
+ static void __netdev_name_node_alt_destroy(struct netdev_name_node *name_node)
+ {
+ 	list_del(&name_node->list);
+-	netdev_name_node_del(name_node);
+ 	kfree(name_node->name);
+ 	netdev_name_node_free(name_node);
+ }
+@@ -362,6 +361,8 @@ int netdev_name_node_alt_destroy(struct net_device *dev, const char *name)
+ 	if (name_node == dev->name_node || name_node->dev != dev)
+ 		return -EINVAL;
+ 
++	netdev_name_node_del(name_node);
++	synchronize_rcu();
+ 	__netdev_name_node_alt_destroy(name_node);
+ 
+ 	return 0;
+@@ -378,6 +379,7 @@ static void netdev_name_node_alt_flush(struct net_device *dev)
+ /* Device list insertion */
+ static void list_netdevice(struct net_device *dev)
+ {
++	struct netdev_name_node *name_node;
+ 	struct net *net = dev_net(dev);
+ 
+ 	ASSERT_RTNL();
+@@ -389,6 +391,9 @@ static void list_netdevice(struct net_device *dev)
+ 			   dev_index_hash(net, dev->ifindex));
+ 	write_unlock(&dev_base_lock);
+ 
++	netdev_for_each_altname(dev, name_node)
++		netdev_name_node_add(net, name_node);
++
+ 	dev_base_seq_inc(net);
+ }
+ 
+@@ -397,8 +402,13 @@ static void list_netdevice(struct net_device *dev)
+  */
+ static void unlist_netdevice(struct net_device *dev, bool lock)
+ {
++	struct netdev_name_node *name_node;
++
+ 	ASSERT_RTNL();
+ 
++	netdev_for_each_altname(dev, name_node)
++		netdev_name_node_del(name_node);
++
+ 	/* Unlink dev from the device chain */
+ 	if (lock)
+ 		write_lock(&dev_base_lock);
+@@ -1078,7 +1088,8 @@ static int __dev_alloc_name(struct net *net, const char *name, char *buf)
+ 
+ 		for_each_netdev(net, d) {
+ 			struct netdev_name_node *name_node;
+-			list_for_each_entry(name_node, &d->name_node->list, list) {
++
++			netdev_for_each_altname(d, name_node) {
+ 				if (!sscanf(name_node->name, name, &i))
+ 					continue;
+ 				if (i < 0 || i >= max_netdevices)
+@@ -1115,6 +1126,26 @@ static int __dev_alloc_name(struct net *net, const char *name, char *buf)
+ 	return -ENFILE;
+ }
+ 
++static int dev_prep_valid_name(struct net *net, struct net_device *dev,
++			       const char *want_name, char *out_name)
++{
++	int ret;
++
++	if (!dev_valid_name(want_name))
++		return -EINVAL;
++
++	if (strchr(want_name, '%')) {
++		ret = __dev_alloc_name(net, want_name, out_name);
++		return ret < 0 ? ret : 0;
++	} else if (netdev_name_in_use(net, want_name)) {
++		return -EEXIST;
++	} else if (out_name != want_name) {
++		strscpy(out_name, want_name, IFNAMSIZ);
++	}
++
++	return 0;
++}
++
+ static int dev_alloc_name_ns(struct net *net,
+ 			     struct net_device *dev,
+ 			     const char *name)
+@@ -1152,19 +1183,13 @@ EXPORT_SYMBOL(dev_alloc_name);
+ static int dev_get_valid_name(struct net *net, struct net_device *dev,
+ 			      const char *name)
+ {
+-	BUG_ON(!net);
+-
+-	if (!dev_valid_name(name))
+-		return -EINVAL;
+-
+-	if (strchr(name, '%'))
+-		return dev_alloc_name_ns(net, dev, name);
+-	else if (netdev_name_in_use(net, name))
+-		return -EEXIST;
+-	else if (dev->name != name)
+-		strscpy(dev->name, name, IFNAMSIZ);
++	char buf[IFNAMSIZ];
++	int ret;
+ 
+-	return 0;
++	ret = dev_prep_valid_name(net, dev, name, buf);
++	if (ret >= 0)
++		strscpy(dev->name, buf, IFNAMSIZ);
++	return ret;
+ }
+ 
+ /**
+@@ -10949,7 +10974,9 @@ EXPORT_SYMBOL(unregister_netdev);
+ int __dev_change_net_namespace(struct net_device *dev, struct net *net,
+ 			       const char *pat, int new_ifindex)
+ {
++	struct netdev_name_node *name_node;
+ 	struct net *net_old = dev_net(dev);
++	char new_name[IFNAMSIZ] = {};
+ 	int err, new_nsid;
+ 
+ 	ASSERT_RTNL();
+@@ -10976,10 +11003,15 @@ int __dev_change_net_namespace(struct net_device *dev, struct net *net,
+ 		/* We get here if we can't use the current device name */
+ 		if (!pat)
+ 			goto out;
+-		err = dev_get_valid_name(net, dev, pat);
++		err = dev_prep_valid_name(net, dev, pat, new_name);
+ 		if (err < 0)
+ 			goto out;
+ 	}
++	/* Check that none of the altnames conflicts. */
++	err = -EEXIST;
++	netdev_for_each_altname(dev, name_node)
++		if (netdev_name_in_use(net, name_node->name))
++			goto out;
+ 
+ 	/* Check that new_ifindex isn't used yet. */
+ 	err = -EBUSY;
+@@ -11044,6 +11076,9 @@ int __dev_change_net_namespace(struct net_device *dev, struct net *net,
+ 	kobject_uevent(&dev->dev.kobj, KOBJ_ADD);
+ 	netdev_adjacent_add_links(dev);
+ 
++	if (new_name[0]) /* Rename the netdev to prepared name */
++		strscpy(dev->name, new_name, IFNAMSIZ);
++
+ 	/* Fixup kobjects */
+ 	err = device_rename(&dev->dev, dev->name);
+ 	WARN_ON(err);
+diff --git a/net/core/dev.h b/net/core/dev.h
+index e075e198092cc..fa2e9c5c41224 100644
+--- a/net/core/dev.h
++++ b/net/core/dev.h
+@@ -62,6 +62,9 @@ struct netdev_name_node {
+ int netdev_get_name(struct net *net, char *name, int ifindex);
+ int dev_change_name(struct net_device *dev, const char *newname);
+ 
++#define netdev_for_each_altname(dev, namenode)				\
++	list_for_each_entry((namenode), &(dev)->name_node->list, list)
++
+ int netdev_name_node_alt_create(struct net_device *dev, const char *name);
+ int netdev_name_node_alt_destroy(struct net_device *dev, const char *name);
+ 
+diff --git a/net/core/pktgen.c b/net/core/pktgen.c
+index f56b8d6970147..4d1696677c48c 100644
+--- a/net/core/pktgen.c
++++ b/net/core/pktgen.c
+@@ -669,19 +669,19 @@ static int pktgen_if_show(struct seq_file *seq, void *v)
+ 	seq_puts(seq, "     Flags: ");
+ 
+ 	for (i = 0; i < NR_PKT_FLAGS; i++) {
+-		if (i == F_FLOW_SEQ)
++		if (i == FLOW_SEQ_SHIFT)
+ 			if (!pkt_dev->cflows)
+ 				continue;
+ 
+-		if (pkt_dev->flags & (1 << i))
++		if (pkt_dev->flags & (1 << i)) {
+ 			seq_printf(seq, "%s  ", pkt_flag_names[i]);
+-		else if (i == F_FLOW_SEQ)
+-			seq_puts(seq, "FLOW_RND  ");
+-
+ #ifdef CONFIG_XFRM
+-		if (i == F_IPSEC && pkt_dev->spi)
+-			seq_printf(seq, "spi:%u", pkt_dev->spi);
++			if (i == IPSEC_SHIFT && pkt_dev->spi)
++				seq_printf(seq, "spi:%u  ", pkt_dev->spi);
+ #endif
++		} else if (i == FLOW_SEQ_SHIFT) {
++			seq_puts(seq, "FLOW_RND  ");
++		}
+ 	}
+ 
+ 	seq_puts(seq, "\n");
+diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
+index 00c94d9622b4a..c1a6f58cb2f68 100644
+--- a/net/core/rtnetlink.c
++++ b/net/core/rtnetlink.c
+@@ -5504,13 +5504,11 @@ static unsigned int
+ rtnl_offload_xstats_get_size_hw_s_info_one(const struct net_device *dev,
+ 					   enum netdev_offload_xstats_type type)
+ {
+-	bool enabled = netdev_offload_xstats_enabled(dev, type);
+-
+ 	return nla_total_size(0) +
+ 		/* IFLA_OFFLOAD_XSTATS_HW_S_INFO_REQUEST */
+ 		nla_total_size(sizeof(u8)) +
+ 		/* IFLA_OFFLOAD_XSTATS_HW_S_INFO_USED */
+-		(enabled ? nla_total_size(sizeof(u8)) : 0) +
++		nla_total_size(sizeof(u8)) +
+ 		0;
+ }
+ 
+diff --git a/net/core/stream.c b/net/core/stream.c
+index f5c4e47df1650..96fbcb9bbb30a 100644
+--- a/net/core/stream.c
++++ b/net/core/stream.c
+@@ -117,7 +117,7 @@ EXPORT_SYMBOL(sk_stream_wait_close);
+  */
+ int sk_stream_wait_memory(struct sock *sk, long *timeo_p)
+ {
+-	int err = 0;
++	int ret, err = 0;
+ 	long vm_wait = 0;
+ 	long current_timeo = *timeo_p;
+ 	DEFINE_WAIT_FUNC(wait, woken_wake_function);
+@@ -142,11 +142,13 @@ int sk_stream_wait_memory(struct sock *sk, long *timeo_p)
+ 
+ 		set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
+ 		sk->sk_write_pending++;
+-		sk_wait_event(sk, &current_timeo, READ_ONCE(sk->sk_err) ||
+-						  (READ_ONCE(sk->sk_shutdown) & SEND_SHUTDOWN) ||
+-						  (sk_stream_memory_free(sk) &&
+-						  !vm_wait), &wait);
++		ret = sk_wait_event(sk, &current_timeo, READ_ONCE(sk->sk_err) ||
++				    (READ_ONCE(sk->sk_shutdown) & SEND_SHUTDOWN) ||
++				    (sk_stream_memory_free(sk) && !vm_wait),
++				    &wait);
+ 		sk->sk_write_pending--;
++		if (ret < 0)
++			goto do_error;
+ 
+ 		if (vm_wait) {
+ 			vm_wait -= current_timeo;
+diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
+index 02736b83c3032..0c0ae021b7ff5 100644
+--- a/net/ipv4/af_inet.c
++++ b/net/ipv4/af_inet.c
+@@ -587,7 +587,6 @@ static long inet_wait_for_connect(struct sock *sk, long timeo, int writebias)
+ 
+ 	add_wait_queue(sk_sleep(sk), &wait);
+ 	sk->sk_write_pending += writebias;
+-	sk->sk_wait_pending++;
+ 
+ 	/* Basic assumption: if someone sets sk->sk_err, he _must_
+ 	 * change state of the socket from TCP_SYN_*.
+@@ -603,7 +602,6 @@ static long inet_wait_for_connect(struct sock *sk, long timeo, int writebias)
+ 	}
+ 	remove_wait_queue(sk_sleep(sk), &wait);
+ 	sk->sk_write_pending -= writebias;
+-	sk->sk_wait_pending--;
+ 	return timeo;
+ }
+ 
+@@ -632,6 +630,7 @@ int __inet_stream_connect(struct socket *sock, struct sockaddr *uaddr,
+ 			return -EINVAL;
+ 
+ 		if (uaddr->sa_family == AF_UNSPEC) {
++			sk->sk_disconnects++;
+ 			err = sk->sk_prot->disconnect(sk, flags);
+ 			sock->state = err ? SS_DISCONNECTING : SS_UNCONNECTED;
+ 			goto out;
+@@ -686,6 +685,7 @@ int __inet_stream_connect(struct socket *sock, struct sockaddr *uaddr,
+ 		int writebias = (sk->sk_protocol == IPPROTO_TCP) &&
+ 				tcp_sk(sk)->fastopen_req &&
+ 				tcp_sk(sk)->fastopen_req->data ? 1 : 0;
++		int dis = sk->sk_disconnects;
+ 
+ 		/* Error code is set above */
+ 		if (!timeo || !inet_wait_for_connect(sk, timeo, writebias))
+@@ -694,6 +694,11 @@ int __inet_stream_connect(struct socket *sock, struct sockaddr *uaddr,
+ 		err = sock_intr_errno(timeo);
+ 		if (signal_pending(current))
+ 			goto out;
++
++		if (dis != sk->sk_disconnects) {
++			err = -EPIPE;
++			goto out;
++		}
+ 	}
+ 
+ 	/* Connection was closed by RST, timeout, ICMP error
+@@ -715,6 +720,7 @@ out:
+ sock_error:
+ 	err = sock_error(sk) ? : -ECONNABORTED;
+ 	sock->state = SS_UNCONNECTED;
++	sk->sk_disconnects++;
+ 	if (sk->sk_prot->disconnect(sk, flags))
+ 		sock->state = SS_DISCONNECTING;
+ 	goto out;
+diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
+index 2be2d49225573..d18f0f092fe73 100644
+--- a/net/ipv4/esp4.c
++++ b/net/ipv4/esp4.c
+@@ -732,7 +732,9 @@ static inline int esp_remove_trailer(struct sk_buff *skb)
+ 		skb->csum = csum_block_sub(skb->csum, csumdiff,
+ 					   skb->len - trimlen);
+ 	}
+-	pskb_trim(skb, skb->len - trimlen);
++	ret = pskb_trim(skb, skb->len - trimlen);
++	if (unlikely(ret))
++		return ret;
+ 
+ 	ret = nexthdr[1];
+ 
+diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
+index eafa4a0335157..5eb1b8d302bbd 100644
+--- a/net/ipv4/fib_semantics.c
++++ b/net/ipv4/fib_semantics.c
+@@ -1325,15 +1325,18 @@ __be32 fib_info_update_nhc_saddr(struct net *net, struct fib_nh_common *nhc,
+ 				 unsigned char scope)
+ {
+ 	struct fib_nh *nh;
++	__be32 saddr;
+ 
+ 	if (nhc->nhc_family != AF_INET)
+ 		return inet_select_addr(nhc->nhc_dev, 0, scope);
+ 
+ 	nh = container_of(nhc, struct fib_nh, nh_common);
+-	nh->nh_saddr = inet_select_addr(nh->fib_nh_dev, nh->fib_nh_gw4, scope);
+-	nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid);
++	saddr = inet_select_addr(nh->fib_nh_dev, nh->fib_nh_gw4, scope);
+ 
+-	return nh->nh_saddr;
++	WRITE_ONCE(nh->nh_saddr, saddr);
++	WRITE_ONCE(nh->nh_saddr_genid, atomic_read(&net->ipv4.dev_addr_genid));
++
++	return saddr;
+ }
+ 
+ __be32 fib_result_prefsrc(struct net *net, struct fib_result *res)
+@@ -1347,8 +1350,9 @@ __be32 fib_result_prefsrc(struct net *net, struct fib_result *res)
+ 		struct fib_nh *nh;
+ 
+ 		nh = container_of(nhc, struct fib_nh, nh_common);
+-		if (nh->nh_saddr_genid == atomic_read(&net->ipv4.dev_addr_genid))
+-			return nh->nh_saddr;
++		if (READ_ONCE(nh->nh_saddr_genid) ==
++		    atomic_read(&net->ipv4.dev_addr_genid))
++			return READ_ONCE(nh->nh_saddr);
+ 	}
+ 
+ 	return fib_info_update_nhc_saddr(net, nhc, res->fi->fib_scope);
+@@ -1887,6 +1891,7 @@ int fib_sync_down_addr(struct net_device *dev, __be32 local)
+ 			continue;
+ 		if (fi->fib_prefsrc == local) {
+ 			fi->fib_flags |= RTNH_F_DEAD;
++			fi->pfsrc_removed = true;
+ 			ret++;
+ 		}
+ 	}
+diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
+index d13fb9e76b971..9bdfdab906fe0 100644
+--- a/net/ipv4/fib_trie.c
++++ b/net/ipv4/fib_trie.c
+@@ -2027,6 +2027,7 @@ void fib_table_flush_external(struct fib_table *tb)
+ int fib_table_flush(struct net *net, struct fib_table *tb, bool flush_all)
+ {
+ 	struct trie *t = (struct trie *)tb->tb_data;
++	struct nl_info info = { .nl_net = net };
+ 	struct key_vector *pn = t->kv;
+ 	unsigned long cindex = 1;
+ 	struct hlist_node *tmp;
+@@ -2089,6 +2090,9 @@ int fib_table_flush(struct net *net, struct fib_table *tb, bool flush_all)
+ 
+ 			fib_notify_alias_delete(net, n->key, &n->leaf, fa,
+ 						NULL);
++			if (fi->pfsrc_removed)
++				rtmsg_fib(RTM_DELROUTE, htonl(n->key), fa,
++					  KEYLENGTH - fa->fa_slen, tb->tb_id, &info, 0);
+ 			hlist_del_rcu(&fa->fa_list);
+ 			fib_release_info(fa->fa_info);
+ 			alias_free_mem_rcu(fa);
+diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
+index aeebe88166899..394a498c28232 100644
+--- a/net/ipv4/inet_connection_sock.c
++++ b/net/ipv4/inet_connection_sock.c
+@@ -1145,7 +1145,6 @@ struct sock *inet_csk_clone_lock(const struct sock *sk,
+ 	if (newsk) {
+ 		struct inet_connection_sock *newicsk = inet_csk(newsk);
+ 
+-		newsk->sk_wait_pending = 0;
+ 		inet_sk_set_state(newsk, TCP_SYN_RECV);
+ 		newicsk->icsk_bind_hash = NULL;
+ 		newicsk->icsk_bind2_hash = NULL;
+diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
+index ae5e786a0598d..60cffabfd4788 100644
+--- a/net/ipv4/inet_hashtables.c
++++ b/net/ipv4/inet_hashtables.c
+@@ -148,8 +148,14 @@ static bool inet_bind2_bucket_addr_match(const struct inet_bind2_bucket *tb2,
+ 					 const struct sock *sk)
+ {
+ #if IS_ENABLED(CONFIG_IPV6)
+-	if (sk->sk_family != tb2->family)
+-		return false;
++	if (sk->sk_family != tb2->family) {
++		if (sk->sk_family == AF_INET)
++			return ipv6_addr_v4mapped(&tb2->v6_rcv_saddr) &&
++				tb2->v6_rcv_saddr.s6_addr32[3] == sk->sk_rcv_saddr;
++
++		return ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr) &&
++			sk->sk_v6_rcv_saddr.s6_addr32[3] == tb2->rcv_saddr;
++	}
+ 
+ 	if (sk->sk_family == AF_INET6)
+ 		return ipv6_addr_equal(&tb2->v6_rcv_saddr,
+@@ -799,19 +805,7 @@ static bool inet_bind2_bucket_match(const struct inet_bind2_bucket *tb,
+ 	    tb->l3mdev != l3mdev)
+ 		return false;
+ 
+-#if IS_ENABLED(CONFIG_IPV6)
+-	if (sk->sk_family != tb->family) {
+-		if (sk->sk_family == AF_INET)
+-			return ipv6_addr_v4mapped(&tb->v6_rcv_saddr) &&
+-				tb->v6_rcv_saddr.s6_addr32[3] == sk->sk_rcv_saddr;
+-
+-		return false;
+-	}
+-
+-	if (sk->sk_family == AF_INET6)
+-		return ipv6_addr_equal(&tb->v6_rcv_saddr, &sk->sk_v6_rcv_saddr);
+-#endif
+-	return tb->rcv_saddr == sk->sk_rcv_saddr;
++	return inet_bind2_bucket_addr_match(tb, sk);
+ }
+ 
+ bool inet_bind2_bucket_match_addr_any(const struct inet_bind2_bucket *tb, const struct net *net,
+diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
+index 9cfc07d1e4252..9bdc1b2eaf734 100644
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -829,7 +829,9 @@ ssize_t tcp_splice_read(struct socket *sock, loff_t *ppos,
+ 			 */
+ 			if (!skb_queue_empty(&sk->sk_receive_queue))
+ 				break;
+-			sk_wait_data(sk, &timeo, NULL);
++			ret = sk_wait_data(sk, &timeo, NULL);
++			if (ret < 0)
++				break;
+ 			if (signal_pending(current)) {
+ 				ret = sock_intr_errno(timeo);
+ 				break;
+@@ -2442,7 +2444,11 @@ static int tcp_recvmsg_locked(struct sock *sk, struct msghdr *msg, size_t len,
+ 			__sk_flush_backlog(sk);
+ 		} else {
+ 			tcp_cleanup_rbuf(sk, copied);
+-			sk_wait_data(sk, &timeo, last);
++			err = sk_wait_data(sk, &timeo, last);
++			if (err < 0) {
++				err = copied ? : err;
++				goto out;
++			}
+ 		}
+ 
+ 		if ((flags & MSG_PEEK) &&
+@@ -2966,12 +2972,6 @@ int tcp_disconnect(struct sock *sk, int flags)
+ 	int old_state = sk->sk_state;
+ 	u32 seq;
+ 
+-	/* Deny disconnect if other threads are blocked in sk_wait_event()
+-	 * or inet_wait_for_connect().
+-	 */
+-	if (sk->sk_wait_pending)
+-		return -EBUSY;
+-
+ 	if (old_state != TCP_CLOSE)
+ 		tcp_set_state(sk, TCP_CLOSE);
+ 
+diff --git a/net/ipv4/tcp_bpf.c b/net/ipv4/tcp_bpf.c
+index 3272682030015..53b0d62fd2c2d 100644
+--- a/net/ipv4/tcp_bpf.c
++++ b/net/ipv4/tcp_bpf.c
+@@ -307,6 +307,10 @@ msg_bytes_ready:
+ 		}
+ 
+ 		data = tcp_msg_wait_data(sk, psock, timeo);
++		if (data < 0) {
++			copied = data;
++			goto unlock;
++		}
+ 		if (data && !sk_psock_queue_empty(psock))
+ 			goto msg_bytes_ready;
+ 		copied = -EAGAIN;
+@@ -317,6 +321,8 @@ out:
+ 	tcp_rcv_space_adjust(sk);
+ 	if (copied > 0)
+ 		__tcp_cleanup_rbuf(sk, copied);
++
++unlock:
+ 	release_sock(sk);
+ 	sk_psock_put(sk, psock);
+ 	return copied;
+@@ -351,6 +357,10 @@ msg_bytes_ready:
+ 
+ 		timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
+ 		data = tcp_msg_wait_data(sk, psock, timeo);
++		if (data < 0) {
++			ret = data;
++			goto unlock;
++		}
+ 		if (data) {
+ 			if (!sk_psock_queue_empty(psock))
+ 				goto msg_bytes_ready;
+@@ -361,6 +371,8 @@ msg_bytes_ready:
+ 		copied = -EAGAIN;
+ 	}
+ 	ret = copied;
++
++unlock:
+ 	release_sock(sk);
+ 	sk_psock_put(sk, psock);
+ 	return ret;
+diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
+index 2dbdc26da86e4..38528d110288b 100644
+--- a/net/ipv4/tcp_ipv4.c
++++ b/net/ipv4/tcp_ipv4.c
+@@ -1869,6 +1869,7 @@ bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb,
+ #ifdef CONFIG_TLS_DEVICE
+ 	    tail->decrypted != skb->decrypted ||
+ #endif
++	    !mptcp_skb_can_collapse(tail, skb) ||
+ 	    thtail->doff != th->doff ||
+ 	    memcmp(thtail + 1, th + 1, hdrlen - sizeof(*th)))
+ 		goto no_coalesce;
+diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
+index a8f58f5e99a77..afa819eede6a3 100644
+--- a/net/ipv4/tcp_output.c
++++ b/net/ipv4/tcp_output.c
+@@ -2527,6 +2527,18 @@ static bool tcp_pacing_check(struct sock *sk)
+ 	return true;
+ }
+ 
++static bool tcp_rtx_queue_empty_or_single_skb(const struct sock *sk)
++{
++	const struct rb_node *node = sk->tcp_rtx_queue.rb_node;
++
++	/* No skb in the rtx queue. */
++	if (!node)
++		return true;
++
++	/* Only one skb in rtx queue. */
++	return !node->rb_left && !node->rb_right;
++}
++
+ /* TCP Small Queues :
+  * Control number of packets in qdisc/devices to two packets / or ~1 ms.
+  * (These limits are doubled for retransmits)
+@@ -2564,12 +2576,12 @@ static bool tcp_small_queue_check(struct sock *sk, const struct sk_buff *skb,
+ 		limit += extra_bytes;
+ 	}
+ 	if (refcount_read(&sk->sk_wmem_alloc) > limit) {
+-		/* Always send skb if rtx queue is empty.
++		/* Always send skb if rtx queue is empty or has one skb.
+ 		 * No need to wait for TX completion to call us back,
+ 		 * after softirq/tasklet schedule.
+ 		 * This helps when TX completions are delayed too much.
+ 		 */
+-		if (tcp_rtx_queue_empty(sk))
++		if (tcp_rtx_queue_empty_or_single_skb(sk))
+ 			return false;
+ 
+ 		set_bit(TSQ_THROTTLED, &sk->sk_tsq_flags);
+@@ -2773,7 +2785,7 @@ bool tcp_schedule_loss_probe(struct sock *sk, bool advancing_rto)
+ {
+ 	struct inet_connection_sock *icsk = inet_csk(sk);
+ 	struct tcp_sock *tp = tcp_sk(sk);
+-	u32 timeout, rto_delta_us;
++	u32 timeout, timeout_us, rto_delta_us;
+ 	int early_retrans;
+ 
+ 	/* Don't do any loss probe on a Fast Open connection before 3WHS
+@@ -2797,11 +2809,12 @@ bool tcp_schedule_loss_probe(struct sock *sk, bool advancing_rto)
+ 	 * sample is available then probe after TCP_TIMEOUT_INIT.
+ 	 */
+ 	if (tp->srtt_us) {
+-		timeout = usecs_to_jiffies(tp->srtt_us >> 2);
++		timeout_us = tp->srtt_us >> 2;
+ 		if (tp->packets_out == 1)
+-			timeout += TCP_RTO_MIN;
++			timeout_us += tcp_rto_min_us(sk);
+ 		else
+-			timeout += TCP_TIMEOUT_MIN;
++			timeout_us += TCP_TIMEOUT_MIN_US;
++		timeout = usecs_to_jiffies(timeout_us);
+ 	} else {
+ 		timeout = TCP_TIMEOUT_INIT;
+ 	}
+diff --git a/net/ipv4/tcp_recovery.c b/net/ipv4/tcp_recovery.c
+index acf4869c5d3b5..bba10110fbbc1 100644
+--- a/net/ipv4/tcp_recovery.c
++++ b/net/ipv4/tcp_recovery.c
+@@ -104,7 +104,7 @@ bool tcp_rack_mark_lost(struct sock *sk)
+ 	tp->rack.advanced = 0;
+ 	tcp_rack_detect_loss(sk, &timeout);
+ 	if (timeout) {
+-		timeout = usecs_to_jiffies(timeout) + TCP_TIMEOUT_MIN;
++		timeout = usecs_to_jiffies(timeout + TCP_TIMEOUT_MIN_US);
+ 		inet_csk_reset_xmit_timer(sk, ICSK_TIME_REO_TIMEOUT,
+ 					  timeout, inet_csk(sk)->icsk_rto);
+ 	}
+diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
+index fddd0cbdede15..e023d29e919c1 100644
+--- a/net/ipv6/esp6.c
++++ b/net/ipv6/esp6.c
+@@ -770,7 +770,9 @@ static inline int esp_remove_trailer(struct sk_buff *skb)
+ 		skb->csum = csum_block_sub(skb->csum, csumdiff,
+ 					   skb->len - trimlen);
+ 	}
+-	pskb_trim(skb, skb->len - trimlen);
++	ret = pskb_trim(skb, skb->len - trimlen);
++	if (unlikely(ret))
++		return ret;
+ 
+ 	ret = nexthdr[1];
+ 
+diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
+index eecc5e59da17c..50c278f1c1063 100644
+--- a/net/ipv6/xfrm6_policy.c
++++ b/net/ipv6/xfrm6_policy.c
+@@ -117,10 +117,10 @@ static void xfrm6_dst_destroy(struct dst_entry *dst)
+ {
+ 	struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
+ 
+-	if (likely(xdst->u.rt6.rt6i_idev))
+-		in6_dev_put(xdst->u.rt6.rt6i_idev);
+ 	dst_destroy_metrics_generic(dst);
+ 	rt6_uncached_list_del(&xdst->u.rt6);
++	if (likely(xdst->u.rt6.rt6i_idev))
++		in6_dev_put(xdst->u.rt6.rt6i_idev);
+ 	xfrm_dst_destroy(xdst);
+ }
+ 
+diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
+index e883c41a2163b..0e3a1753a51c6 100644
+--- a/net/mac80211/cfg.c
++++ b/net/mac80211/cfg.c
+@@ -1860,7 +1860,8 @@ static int sta_link_apply_parameters(struct ieee80211_local *local,
+ 	/* VHT can override some HT caps such as the A-MSDU max length */
+ 	if (params->vht_capa)
+ 		ieee80211_vht_cap_ie_to_sta_vht_cap(sdata, sband,
+-						    params->vht_capa, link_sta);
++						    params->vht_capa, NULL,
++						    link_sta);
+ 
+ 	if (params->he_capa)
+ 		ieee80211_he_cap_ie_to_sta_he_cap(sdata, sband,
+diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
+index e1900077bc4b9..5542c93edfba0 100644
+--- a/net/mac80211/ibss.c
++++ b/net/mac80211/ibss.c
+@@ -1072,7 +1072,7 @@ static void ieee80211_update_sta_info(struct ieee80211_sub_if_data *sdata,
+ 						   &chandef);
+ 			memcpy(&cap_ie, elems->vht_cap_elem, sizeof(cap_ie));
+ 			ieee80211_vht_cap_ie_to_sta_vht_cap(sdata, sband,
+-							    &cap_ie,
++							    &cap_ie, NULL,
+ 							    &sta->deflink);
+ 			if (memcmp(&cap, &sta->sta.deflink.vht_cap, sizeof(cap)))
+ 				rates_updated |= true;
+diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
+index f8cd94ba55ccc..2cce9eba6a120 100644
+--- a/net/mac80211/ieee80211_i.h
++++ b/net/mac80211/ieee80211_i.h
+@@ -2142,6 +2142,7 @@ void
+ ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata,
+ 				    struct ieee80211_supported_band *sband,
+ 				    const struct ieee80211_vht_cap *vht_cap_ie,
++				    const struct ieee80211_vht_cap *vht_cap_ie2,
+ 				    struct link_sta_info *link_sta);
+ enum ieee80211_sta_rx_bandwidth
+ ieee80211_sta_cap_rx_bw(struct link_sta_info *link_sta);
+diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c
+index f3d5bb0a59f10..a1e526419e9d2 100644
+--- a/net/mac80211/mesh_plink.c
++++ b/net/mac80211/mesh_plink.c
+@@ -451,7 +451,7 @@ static void mesh_sta_info_init(struct ieee80211_sub_if_data *sdata,
+ 		changed |= IEEE80211_RC_BW_CHANGED;
+ 
+ 	ieee80211_vht_cap_ie_to_sta_vht_cap(sdata, sband,
+-					    elems->vht_cap_elem,
++					    elems->vht_cap_elem, NULL,
+ 					    &sta->deflink);
+ 
+ 	ieee80211_he_cap_ie_to_sta_he_cap(sdata, sband, elems->he_cap,
+diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
+index 24b2833e0e475..0c9198997482b 100644
+--- a/net/mac80211/mlme.c
++++ b/net/mac80211/mlme.c
+@@ -4202,10 +4202,33 @@ static bool ieee80211_assoc_config_link(struct ieee80211_link_data *link,
+ 						  elems->ht_cap_elem,
+ 						  link_sta);
+ 
+-	if (elems->vht_cap_elem && !(link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_VHT))
++	if (elems->vht_cap_elem &&
++	    !(link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_VHT)) {
++		const struct ieee80211_vht_cap *bss_vht_cap = NULL;
++		const struct cfg80211_bss_ies *ies;
++
++		/*
++		 * Cisco AP module 9115 with FW 17.3 has a bug and sends a
++		 * too large maximum MPDU length in the association response
++		 * (indicating 12k) that it cannot actually process ...
++		 * Work around that.
++		 */
++		rcu_read_lock();
++		ies = rcu_dereference(cbss->ies);
++		if (ies) {
++			const struct element *elem;
++
++			elem = cfg80211_find_elem(WLAN_EID_VHT_CAPABILITY,
++						  ies->data, ies->len);
++			if (elem && elem->datalen >= sizeof(*bss_vht_cap))
++				bss_vht_cap = (const void *)elem->data;
++		}
++
+ 		ieee80211_vht_cap_ie_to_sta_vht_cap(sdata, sband,
+ 						    elems->vht_cap_elem,
+-						    link_sta);
++						    bss_vht_cap, link_sta);
++		rcu_read_unlock();
++	}
+ 
+ 	if (elems->he_operation && !(link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_HE) &&
+ 	    elems->he_cap) {
+diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
+index 7fe7280e84374..d45d4be63dd87 100644
+--- a/net/mac80211/tx.c
++++ b/net/mac80211/tx.c
+@@ -665,7 +665,8 @@ ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx)
+ 		}
+ 
+ 		if (unlikely(tx->key && tx->key->flags & KEY_FLAG_TAINTED &&
+-			     !ieee80211_is_deauth(hdr->frame_control)))
++			     !ieee80211_is_deauth(hdr->frame_control)) &&
++			     tx->skb->protocol != tx->sdata->control_port_protocol)
+ 			return TX_DROP;
+ 
+ 		if (!skip_hw && tx->key &&
+diff --git a/net/mac80211/vht.c b/net/mac80211/vht.c
+index c1250aa478083..b3a5c3e96a720 100644
+--- a/net/mac80211/vht.c
++++ b/net/mac80211/vht.c
+@@ -4,7 +4,7 @@
+  *
+  * Portions of this file
+  * Copyright(c) 2015 - 2016 Intel Deutschland GmbH
+- * Copyright (C) 2018 - 2022 Intel Corporation
++ * Copyright (C) 2018 - 2023 Intel Corporation
+  */
+ 
+ #include <linux/ieee80211.h>
+@@ -116,12 +116,14 @@ void
+ ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata,
+ 				    struct ieee80211_supported_band *sband,
+ 				    const struct ieee80211_vht_cap *vht_cap_ie,
++				    const struct ieee80211_vht_cap *vht_cap_ie2,
+ 				    struct link_sta_info *link_sta)
+ {
+ 	struct ieee80211_sta_vht_cap *vht_cap = &link_sta->pub->vht_cap;
+ 	struct ieee80211_sta_vht_cap own_cap;
+ 	u32 cap_info, i;
+ 	bool have_80mhz;
++	u32 mpdu_len;
+ 
+ 	memset(vht_cap, 0, sizeof(*vht_cap));
+ 
+@@ -317,11 +319,21 @@ ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata,
+ 
+ 	link_sta->pub->bandwidth = ieee80211_sta_cur_vht_bw(link_sta);
+ 
++	/*
++	 * Work around the Cisco 9115 FW 17.3 bug by taking the min of
++	 * both reported MPDU lengths.
++	 */
++	mpdu_len = vht_cap->cap & IEEE80211_VHT_CAP_MAX_MPDU_MASK;
++	if (vht_cap_ie2)
++		mpdu_len = min_t(u32, mpdu_len,
++				 le32_get_bits(vht_cap_ie2->vht_cap_info,
++					       IEEE80211_VHT_CAP_MAX_MPDU_MASK));
++
+ 	/*
+ 	 * FIXME - should the amsdu len be per link? store per link
+ 	 * and maintain a minimum?
+ 	 */
+-	switch (vht_cap->cap & IEEE80211_VHT_CAP_MAX_MPDU_MASK) {
++	switch (mpdu_len) {
+ 	case IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454:
+ 		link_sta->pub->agg.max_amsdu_len = IEEE80211_MAX_MPDU_LEN_VHT_11454;
+ 		break;
+diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
+index 679c2732b5d01..636580c4736c9 100644
+--- a/net/mptcp/protocol.c
++++ b/net/mptcp/protocol.c
+@@ -1300,7 +1300,7 @@ alloc_skb:
+ 	if (copy == 0) {
+ 		u64 snd_una = READ_ONCE(msk->snd_una);
+ 
+-		if (snd_una != msk->snd_nxt) {
++		if (snd_una != msk->snd_nxt || tcp_write_queue_tail(ssk)) {
+ 			tcp_remove_empty_skb(ssk);
+ 			return 0;
+ 		}
+@@ -1308,11 +1308,6 @@ alloc_skb:
+ 		zero_window_probe = true;
+ 		data_seq = snd_una - 1;
+ 		copy = 1;
+-
+-		/* all mptcp-level data is acked, no skbs should be present into the
+-		 * ssk write queue
+-		 */
+-		WARN_ON_ONCE(reuse_skb);
+ 	}
+ 
+ 	copy = min_t(size_t, copy, info->limit - info->sent);
+@@ -1341,7 +1336,6 @@ alloc_skb:
+ 	if (reuse_skb) {
+ 		TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_PSH;
+ 		mpext->data_len += copy;
+-		WARN_ON_ONCE(zero_window_probe);
+ 		goto out;
+ 	}
+ 
+@@ -2342,6 +2336,26 @@ bool __mptcp_retransmit_pending_data(struct sock *sk)
+ #define MPTCP_CF_PUSH		BIT(1)
+ #define MPTCP_CF_FASTCLOSE	BIT(2)
+ 
++/* be sure to send a reset only if the caller asked for it, also
++ * clean completely the subflow status when the subflow reaches
++ * TCP_CLOSE state
++ */
++static void __mptcp_subflow_disconnect(struct sock *ssk,
++				       struct mptcp_subflow_context *subflow,
++				       unsigned int flags)
++{
++	if (((1 << ssk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) ||
++	    (flags & MPTCP_CF_FASTCLOSE)) {
++		/* The MPTCP code never wait on the subflow sockets, TCP-level
++		 * disconnect should never fail
++		 */
++		WARN_ON_ONCE(tcp_disconnect(ssk, 0));
++		mptcp_subflow_ctx_reset(subflow);
++	} else {
++		tcp_shutdown(ssk, SEND_SHUTDOWN);
++	}
++}
++
+ /* subflow sockets can be either outgoing (connect) or incoming
+  * (accept).
+  *
+@@ -2379,7 +2393,7 @@ static void __mptcp_close_ssk(struct sock *sk, struct sock *ssk,
+ 	lock_sock_nested(ssk, SINGLE_DEPTH_NESTING);
+ 
+ 	if ((flags & MPTCP_CF_FASTCLOSE) && !__mptcp_check_fallback(msk)) {
+-		/* be sure to force the tcp_disconnect() path,
++		/* be sure to force the tcp_close path
+ 		 * to generate the egress reset
+ 		 */
+ 		ssk->sk_lingertime = 0;
+@@ -2389,12 +2403,8 @@ static void __mptcp_close_ssk(struct sock *sk, struct sock *ssk,
+ 
+ 	need_push = (flags & MPTCP_CF_PUSH) && __mptcp_retransmit_pending_data(sk);
+ 	if (!dispose_it) {
+-		/* The MPTCP code never wait on the subflow sockets, TCP-level
+-		 * disconnect should never fail
+-		 */
+-		WARN_ON_ONCE(tcp_disconnect(ssk, 0));
++		__mptcp_subflow_disconnect(ssk, subflow, flags);
+ 		msk->subflow->state = SS_UNCONNECTED;
+-		mptcp_subflow_ctx_reset(subflow);
+ 		release_sock(ssk);
+ 
+ 		goto out;
+@@ -3069,12 +3079,6 @@ static int mptcp_disconnect(struct sock *sk, int flags)
+ {
+ 	struct mptcp_sock *msk = mptcp_sk(sk);
+ 
+-	/* Deny disconnect if other threads are blocked in sk_wait_event()
+-	 * or inet_wait_for_connect().
+-	 */
+-	if (sk->sk_wait_pending)
+-		return -EBUSY;
+-
+ 	/* We are on the fastopen error path. We can't call straight into the
+ 	 * subflows cleanup code due to lock nesting (we are already under
+ 	 * msk->firstsocket lock).
+@@ -3145,7 +3149,6 @@ struct sock *mptcp_sk_clone_init(const struct sock *sk,
+ 		inet_sk(nsk)->pinet6 = mptcp_inet6_sk(nsk);
+ #endif
+ 
+-	nsk->sk_wait_pending = 0;
+ 	__mptcp_init_sock(nsk);
+ 
+ 	msk = mptcp_sk(nsk);
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
+index be5869366c7d3..e43d9508e7a9c 100644
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -3166,7 +3166,7 @@ int nft_expr_inner_parse(const struct nft_ctx *ctx, const struct nlattr *nla,
+ 	if (err < 0)
+ 		return err;
+ 
+-	if (!tb[NFTA_EXPR_DATA])
++	if (!tb[NFTA_EXPR_DATA] || !tb[NFTA_EXPR_NAME])
+ 		return -EINVAL;
+ 
+ 	type = __nft_expr_type_get(ctx->family, tb[NFTA_EXPR_NAME]);
+@@ -5553,7 +5553,6 @@ static int nf_tables_fill_setelem(struct sk_buff *skb,
+ 	const struct nft_set_ext *ext = nft_set_elem_ext(set, elem->priv);
+ 	unsigned char *b = skb_tail_pointer(skb);
+ 	struct nlattr *nest;
+-	u64 timeout = 0;
+ 
+ 	nest = nla_nest_start_noflag(skb, NFTA_LIST_ELEM);
+ 	if (nest == NULL)
+@@ -5589,15 +5588,11 @@ static int nf_tables_fill_setelem(struct sk_buff *skb,
+ 		         htonl(*nft_set_ext_flags(ext))))
+ 		goto nla_put_failure;
+ 
+-	if (nft_set_ext_exists(ext, NFT_SET_EXT_TIMEOUT)) {
+-		timeout = *nft_set_ext_timeout(ext);
+-		if (nla_put_be64(skb, NFTA_SET_ELEM_TIMEOUT,
+-				 nf_jiffies64_to_msecs(timeout),
+-				 NFTA_SET_ELEM_PAD))
+-			goto nla_put_failure;
+-	} else if (set->flags & NFT_SET_TIMEOUT) {
+-		timeout = READ_ONCE(set->timeout);
+-	}
++	if (nft_set_ext_exists(ext, NFT_SET_EXT_TIMEOUT) &&
++	    nla_put_be64(skb, NFTA_SET_ELEM_TIMEOUT,
++			 nf_jiffies64_to_msecs(*nft_set_ext_timeout(ext)),
++			 NFTA_SET_ELEM_PAD))
++		goto nla_put_failure;
+ 
+ 	if (nft_set_ext_exists(ext, NFT_SET_EXT_EXPIRATION)) {
+ 		u64 expires, now = get_jiffies_64();
+@@ -5612,9 +5607,6 @@ static int nf_tables_fill_setelem(struct sk_buff *skb,
+ 				 nf_jiffies64_to_msecs(expires),
+ 				 NFTA_SET_ELEM_PAD))
+ 			goto nla_put_failure;
+-
+-		if (reset)
+-			*nft_set_ext_expiration(ext) = now + timeout;
+ 	}
+ 
+ 	if (nft_set_ext_exists(ext, NFT_SET_EXT_USERDATA)) {
+diff --git a/net/netfilter/nft_inner.c b/net/netfilter/nft_inner.c
+index 28e2873ba24e4..928312d01eb1d 100644
+--- a/net/netfilter/nft_inner.c
++++ b/net/netfilter/nft_inner.c
+@@ -298,6 +298,7 @@ static int nft_inner_init(const struct nft_ctx *ctx,
+ 	int err;
+ 
+ 	if (!tb[NFTA_INNER_FLAGS] ||
++	    !tb[NFTA_INNER_NUM] ||
+ 	    !tb[NFTA_INNER_HDRSIZE] ||
+ 	    !tb[NFTA_INNER_TYPE] ||
+ 	    !tb[NFTA_INNER_EXPR])
+diff --git a/net/netfilter/nft_payload.c b/net/netfilter/nft_payload.c
+index 120f6d395b98b..0a689c8e0295d 100644
+--- a/net/netfilter/nft_payload.c
++++ b/net/netfilter/nft_payload.c
+@@ -179,7 +179,7 @@ void nft_payload_eval(const struct nft_expr *expr,
+ 
+ 	switch (priv->base) {
+ 	case NFT_PAYLOAD_LL_HEADER:
+-		if (!skb_mac_header_was_set(skb))
++		if (!skb_mac_header_was_set(skb) || skb_mac_header_len(skb) == 0)
+ 			goto err;
+ 
+ 		if (skb_vlan_tag_present(skb) &&
+diff --git a/net/netfilter/nft_set_rbtree.c b/net/netfilter/nft_set_rbtree.c
+index 2660ceab3759d..e34662f4a71e0 100644
+--- a/net/netfilter/nft_set_rbtree.c
++++ b/net/netfilter/nft_set_rbtree.c
+@@ -568,6 +568,8 @@ static void *nft_rbtree_deactivate(const struct net *net,
+ 				   nft_rbtree_interval_end(this)) {
+ 				parent = parent->rb_right;
+ 				continue;
++			} else if (nft_set_elem_expired(&rbe->ext)) {
++				break;
+ 			} else if (!nft_set_elem_active(&rbe->ext, genmask)) {
+ 				parent = parent->rb_left;
+ 				continue;
+diff --git a/net/nfc/nci/spi.c b/net/nfc/nci/spi.c
+index 0935527d1d12b..b68150c971d0b 100644
+--- a/net/nfc/nci/spi.c
++++ b/net/nfc/nci/spi.c
+@@ -151,6 +151,8 @@ static int send_acknowledge(struct nci_spi *nspi, u8 acknowledge)
+ 	int ret;
+ 
+ 	skb = nci_skb_alloc(nspi->ndev, 0, GFP_KERNEL);
++	if (!skb)
++		return -ENOMEM;
+ 
+ 	/* add the NCI SPI header to the start of the buffer */
+ 	hdr = skb_push(skb, NCI_SPI_HDR_LEN);
+diff --git a/net/rfkill/core.c b/net/rfkill/core.c
+index 01fca7a10b4bb..14cc8fe8584bd 100644
+--- a/net/rfkill/core.c
++++ b/net/rfkill/core.c
+@@ -48,6 +48,7 @@ struct rfkill {
+ 	bool			persistent;
+ 	bool			polling_paused;
+ 	bool			suspended;
++	bool			need_sync;
+ 
+ 	const struct rfkill_ops	*ops;
+ 	void			*data;
+@@ -368,6 +369,17 @@ static void rfkill_set_block(struct rfkill *rfkill, bool blocked)
+ 		rfkill_event(rfkill);
+ }
+ 
++static void rfkill_sync(struct rfkill *rfkill)
++{
++	lockdep_assert_held(&rfkill_global_mutex);
++
++	if (!rfkill->need_sync)
++		return;
++
++	rfkill_set_block(rfkill, rfkill_global_states[rfkill->type].cur);
++	rfkill->need_sync = false;
++}
++
+ static void rfkill_update_global_state(enum rfkill_type type, bool blocked)
+ {
+ 	int i;
+@@ -730,6 +742,10 @@ static ssize_t soft_show(struct device *dev, struct device_attribute *attr,
+ {
+ 	struct rfkill *rfkill = to_rfkill(dev);
+ 
++	mutex_lock(&rfkill_global_mutex);
++	rfkill_sync(rfkill);
++	mutex_unlock(&rfkill_global_mutex);
++
+ 	return sysfs_emit(buf, "%d\n", (rfkill->state & RFKILL_BLOCK_SW) ? 1 : 0);
+ }
+ 
+@@ -751,6 +767,7 @@ static ssize_t soft_store(struct device *dev, struct device_attribute *attr,
+ 		return -EINVAL;
+ 
+ 	mutex_lock(&rfkill_global_mutex);
++	rfkill_sync(rfkill);
+ 	rfkill_set_block(rfkill, state);
+ 	mutex_unlock(&rfkill_global_mutex);
+ 
+@@ -783,6 +800,10 @@ static ssize_t state_show(struct device *dev, struct device_attribute *attr,
+ {
+ 	struct rfkill *rfkill = to_rfkill(dev);
+ 
++	mutex_lock(&rfkill_global_mutex);
++	rfkill_sync(rfkill);
++	mutex_unlock(&rfkill_global_mutex);
++
+ 	return sysfs_emit(buf, "%d\n", user_state_from_blocked(rfkill->state));
+ }
+ 
+@@ -805,6 +826,7 @@ static ssize_t state_store(struct device *dev, struct device_attribute *attr,
+ 		return -EINVAL;
+ 
+ 	mutex_lock(&rfkill_global_mutex);
++	rfkill_sync(rfkill);
+ 	rfkill_set_block(rfkill, state == RFKILL_USER_STATE_SOFT_BLOCKED);
+ 	mutex_unlock(&rfkill_global_mutex);
+ 
+@@ -1032,14 +1054,10 @@ static void rfkill_uevent_work(struct work_struct *work)
+ 
+ static void rfkill_sync_work(struct work_struct *work)
+ {
+-	struct rfkill *rfkill;
+-	bool cur;
+-
+-	rfkill = container_of(work, struct rfkill, sync_work);
++	struct rfkill *rfkill = container_of(work, struct rfkill, sync_work);
+ 
+ 	mutex_lock(&rfkill_global_mutex);
+-	cur = rfkill_global_states[rfkill->type].cur;
+-	rfkill_set_block(rfkill, cur);
++	rfkill_sync(rfkill);
+ 	mutex_unlock(&rfkill_global_mutex);
+ }
+ 
+@@ -1087,6 +1105,7 @@ int __must_check rfkill_register(struct rfkill *rfkill)
+ 			round_jiffies_relative(POLL_INTERVAL));
+ 
+ 	if (!rfkill->persistent || rfkill_epo_lock_active) {
++		rfkill->need_sync = true;
+ 		schedule_work(&rfkill->sync_work);
+ 	} else {
+ #ifdef CONFIG_RFKILL_INPUT
+@@ -1161,7 +1180,6 @@ static int rfkill_fop_open(struct inode *inode, struct file *file)
+ 	init_waitqueue_head(&data->read_wait);
+ 
+ 	mutex_lock(&rfkill_global_mutex);
+-	mutex_lock(&data->mtx);
+ 	/*
+ 	 * start getting events from elsewhere but hold mtx to get
+ 	 * startup events added first
+@@ -1171,11 +1189,13 @@ static int rfkill_fop_open(struct inode *inode, struct file *file)
+ 		ev = kzalloc(sizeof(*ev), GFP_KERNEL);
+ 		if (!ev)
+ 			goto free;
++		rfkill_sync(rfkill);
+ 		rfkill_fill_event(&ev->ev, rfkill, RFKILL_OP_ADD);
++		mutex_lock(&data->mtx);
+ 		list_add_tail(&ev->list, &data->events);
++		mutex_unlock(&data->mtx);
+ 	}
+ 	list_add(&data->list, &rfkill_fds);
+-	mutex_unlock(&data->mtx);
+ 	mutex_unlock(&rfkill_global_mutex);
+ 
+ 	file->private_data = data;
+@@ -1183,7 +1203,6 @@ static int rfkill_fop_open(struct inode *inode, struct file *file)
+ 	return stream_open(inode, file);
+ 
+  free:
+-	mutex_unlock(&data->mtx);
+ 	mutex_unlock(&rfkill_global_mutex);
+ 	mutex_destroy(&data->mtx);
+ 	list_for_each_entry_safe(ev, tmp, &data->events, list)
+diff --git a/net/rfkill/rfkill-gpio.c b/net/rfkill/rfkill-gpio.c
+index e9d1b2f2ff0ad..5a81505fba9ac 100644
+--- a/net/rfkill/rfkill-gpio.c
++++ b/net/rfkill/rfkill-gpio.c
+@@ -108,13 +108,13 @@ static int rfkill_gpio_probe(struct platform_device *pdev)
+ 
+ 	rfkill->clk = devm_clk_get(&pdev->dev, NULL);
+ 
+-	gpio = devm_gpiod_get_optional(&pdev->dev, "reset", GPIOD_OUT_LOW);
++	gpio = devm_gpiod_get_optional(&pdev->dev, "reset", GPIOD_ASIS);
+ 	if (IS_ERR(gpio))
+ 		return PTR_ERR(gpio);
+ 
+ 	rfkill->reset_gpio = gpio;
+ 
+-	gpio = devm_gpiod_get_optional(&pdev->dev, "shutdown", GPIOD_OUT_LOW);
++	gpio = devm_gpiod_get_optional(&pdev->dev, "shutdown", GPIOD_ASIS);
+ 	if (IS_ERR(gpio))
+ 		return PTR_ERR(gpio);
+ 
+diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
+index 61d52594ff6d8..54dddc2ff5025 100644
+--- a/net/sched/sch_hfsc.c
++++ b/net/sched/sch_hfsc.c
+@@ -903,6 +903,14 @@ hfsc_change_usc(struct hfsc_class *cl, struct tc_service_curve *usc,
+ 	cl->cl_flags |= HFSC_USC;
+ }
+ 
++static void
++hfsc_upgrade_rt(struct hfsc_class *cl)
++{
++	cl->cl_fsc = cl->cl_rsc;
++	rtsc_init(&cl->cl_virtual, &cl->cl_fsc, cl->cl_vt, cl->cl_total);
++	cl->cl_flags |= HFSC_FSC;
++}
++
+ static const struct nla_policy hfsc_policy[TCA_HFSC_MAX + 1] = {
+ 	[TCA_HFSC_RSC]	= { .len = sizeof(struct tc_service_curve) },
+ 	[TCA_HFSC_FSC]	= { .len = sizeof(struct tc_service_curve) },
+@@ -1012,10 +1020,6 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
+ 		if (parent == NULL)
+ 			return -ENOENT;
+ 	}
+-	if (!(parent->cl_flags & HFSC_FSC) && parent != &q->root) {
+-		NL_SET_ERR_MSG(extack, "Invalid parent - parent class must have FSC");
+-		return -EINVAL;
+-	}
+ 
+ 	if (classid == 0 || TC_H_MAJ(classid ^ sch->handle) != 0)
+ 		return -EINVAL;
+@@ -1066,6 +1070,12 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
+ 	cl->cf_tree = RB_ROOT;
+ 
+ 	sch_tree_lock(sch);
++	/* Check if the inner class is a misconfigured 'rt' */
++	if (!(parent->cl_flags & HFSC_FSC) && parent != &q->root) {
++		NL_SET_ERR_MSG(extack,
++			       "Forced curve change on parent 'rt' to 'sc'");
++		hfsc_upgrade_rt(parent);
++	}
+ 	qdisc_class_hash_insert(&q->clhash, &cl->cl_common);
+ 	list_add_tail(&cl->siblings, &parent->children);
+ 	if (parent->level == 0)
+diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
+index 7c77565c39d19..c0e4e587b4994 100644
+--- a/net/smc/af_smc.c
++++ b/net/smc/af_smc.c
+@@ -2335,7 +2335,7 @@ static int smc_listen_find_device(struct smc_sock *new_smc,
+ 		smc_find_ism_store_rc(rc, ini);
+ 		return (!rc) ? 0 : ini->rc;
+ 	}
+-	return SMC_CLC_DECL_NOSMCDEV;
++	return prfx_rc;
+ }
+ 
+ /* listen worker: finish RDMA setup */
+diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c
+index 2f16f9d179662..814b0169f9723 100644
+--- a/net/sunrpc/auth.c
++++ b/net/sunrpc/auth.c
+@@ -769,9 +769,14 @@ int rpcauth_wrap_req(struct rpc_task *task, struct xdr_stream *xdr)
+  * @task: controlling RPC task
+  * @xdr: xdr_stream containing RPC Reply header
+  *
+- * On success, @xdr is updated to point past the verifier and
+- * zero is returned. Otherwise, @xdr is in an undefined state
+- * and a negative errno is returned.
++ * Return values:
++ *   %0: Verifier is valid. @xdr now points past the verifier.
++ *   %-EIO: Verifier is corrupted or message ended early.
++ *   %-EACCES: Verifier is intact but not valid.
++ *   %-EPROTONOSUPPORT: Server does not support the requested auth type.
++ *
++ * When a negative errno is returned, @xdr is left in an undefined
++ * state.
+  */
+ int
+ rpcauth_checkverf(struct rpc_task *task, struct xdr_stream *xdr)
+diff --git a/net/sunrpc/auth_tls.c b/net/sunrpc/auth_tls.c
+index de7678f8a23d2..87f570fd3b00e 100644
+--- a/net/sunrpc/auth_tls.c
++++ b/net/sunrpc/auth_tls.c
+@@ -129,9 +129,9 @@ static int tls_validate(struct rpc_task *task, struct xdr_stream *xdr)
+ 	if (*p != rpc_auth_null)
+ 		return -EIO;
+ 	if (xdr_stream_decode_opaque_inline(xdr, &str, starttls_len) != starttls_len)
+-		return -EIO;
++		return -EPROTONOSUPPORT;
+ 	if (memcmp(str, starttls_token, starttls_len))
+-		return -EIO;
++		return -EPROTONOSUPPORT;
+ 	return 0;
+ }
+ 
+diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
+index be6be7d785315..9fb0ccabc1a26 100644
+--- a/net/sunrpc/clnt.c
++++ b/net/sunrpc/clnt.c
+@@ -2721,7 +2721,15 @@ out_unparsable:
+ 
+ out_verifier:
+ 	trace_rpc_bad_verifier(task);
+-	goto out_garbage;
++	switch (error) {
++	case -EPROTONOSUPPORT:
++		goto out_err;
++	case -EACCES:
++		/* Re-encode with a fresh cred */
++		fallthrough;
++	default:
++		goto out_garbage;
++	}
+ 
+ out_msg_denied:
+ 	error = -EACCES;
+diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
+index 9f010369100a2..f392718951b1e 100644
+--- a/net/sunrpc/xprtsock.c
++++ b/net/sunrpc/xprtsock.c
+@@ -2645,6 +2645,10 @@ static void xs_tcp_tls_setup_socket(struct work_struct *work)
+ 	rcu_read_lock();
+ 	lower_xprt = rcu_dereference(lower_clnt->cl_xprt);
+ 	rcu_read_unlock();
++
++	if (wait_on_bit_lock(&lower_xprt->state, XPRT_LOCKED, TASK_KILLABLE))
++		goto out_unlock;
++
+ 	status = xs_tls_handshake_sync(lower_xprt, &upper_xprt->xprtsec);
+ 	if (status) {
+ 		trace_rpc_tls_not_started(upper_clnt, upper_xprt);
+@@ -2654,6 +2658,7 @@ static void xs_tcp_tls_setup_socket(struct work_struct *work)
+ 	status = xs_tcp_tls_finish_connecting(lower_xprt, upper_transport);
+ 	if (status)
+ 		goto out_close;
++	xprt_release_write(lower_xprt, NULL);
+ 
+ 	trace_rpc_socket_connect(upper_xprt, upper_transport->sock, 0);
+ 	if (!xprt_test_and_set_connected(upper_xprt)) {
+@@ -2675,6 +2680,7 @@ out_unlock:
+ 	return;
+ 
+ out_close:
++	xprt_release_write(lower_xprt, NULL);
+ 	rpc_shutdown_client(lower_clnt);
+ 
+ 	/* xprt_force_disconnect() wakes tasks with a fixed tk_status code.
+diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c
+index 4a8ee2f6badb9..f3d3fc1c32676 100644
+--- a/net/tls/tls_main.c
++++ b/net/tls/tls_main.c
+@@ -96,8 +96,8 @@ void update_sk_prot(struct sock *sk, struct tls_context *ctx)
+ 
+ int wait_on_pending_writer(struct sock *sk, long *timeo)
+ {
+-	int rc = 0;
+ 	DEFINE_WAIT_FUNC(wait, woken_wake_function);
++	int ret, rc = 0;
+ 
+ 	add_wait_queue(sk_sleep(sk), &wait);
+ 	while (1) {
+@@ -111,9 +111,13 @@ int wait_on_pending_writer(struct sock *sk, long *timeo)
+ 			break;
+ 		}
+ 
+-		if (sk_wait_event(sk, timeo,
+-				  !READ_ONCE(sk->sk_write_pending), &wait))
++		ret = sk_wait_event(sk, timeo,
++				    !READ_ONCE(sk->sk_write_pending), &wait);
++		if (ret) {
++			if (ret < 0)
++				rc = ret;
+ 			break;
++		}
+ 	}
+ 	remove_wait_queue(sk_sleep(sk), &wait);
+ 	return rc;
+diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
+index e047abc600893..ce925f3a52492 100644
+--- a/net/tls/tls_sw.c
++++ b/net/tls/tls_sw.c
+@@ -1288,6 +1288,7 @@ tls_rx_rec_wait(struct sock *sk, struct sk_psock *psock, bool nonblock,
+ 	struct tls_context *tls_ctx = tls_get_ctx(sk);
+ 	struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
+ 	DEFINE_WAIT_FUNC(wait, woken_wake_function);
++	int ret = 0;
+ 	long timeo;
+ 
+ 	timeo = sock_rcvtimeo(sk, nonblock);
+@@ -1299,6 +1300,9 @@ tls_rx_rec_wait(struct sock *sk, struct sk_psock *psock, bool nonblock,
+ 		if (sk->sk_err)
+ 			return sock_error(sk);
+ 
++		if (ret < 0)
++			return ret;
++
+ 		if (!skb_queue_empty(&sk->sk_receive_queue)) {
+ 			tls_strp_check_rcv(&ctx->strp);
+ 			if (tls_strp_msg_ready(ctx))
+@@ -1317,10 +1321,10 @@ tls_rx_rec_wait(struct sock *sk, struct sk_psock *psock, bool nonblock,
+ 		released = true;
+ 		add_wait_queue(sk_sleep(sk), &wait);
+ 		sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
+-		sk_wait_event(sk, &timeo,
+-			      tls_strp_msg_ready(ctx) ||
+-			      !sk_psock_queue_empty(psock),
+-			      &wait);
++		ret = sk_wait_event(sk, &timeo,
++				    tls_strp_msg_ready(ctx) ||
++				    !sk_psock_queue_empty(psock),
++				    &wait);
+ 		sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
+ 		remove_wait_queue(sk_sleep(sk), &wait);
+ 
+@@ -1845,13 +1849,11 @@ tls_read_flush_backlog(struct sock *sk, struct tls_prot_info *prot,
+ 	return sk_flush_backlog(sk);
+ }
+ 
+-static int tls_rx_reader_lock(struct sock *sk, struct tls_sw_context_rx *ctx,
+-			      bool nonblock)
++static int tls_rx_reader_acquire(struct sock *sk, struct tls_sw_context_rx *ctx,
++				 bool nonblock)
+ {
+ 	long timeo;
+-	int err;
+-
+-	lock_sock(sk);
++	int ret;
+ 
+ 	timeo = sock_rcvtimeo(sk, nonblock);
+ 
+@@ -1861,30 +1863,36 @@ static int tls_rx_reader_lock(struct sock *sk, struct tls_sw_context_rx *ctx,
+ 		ctx->reader_contended = 1;
+ 
+ 		add_wait_queue(&ctx->wq, &wait);
+-		sk_wait_event(sk, &timeo,
+-			      !READ_ONCE(ctx->reader_present), &wait);
++		ret = sk_wait_event(sk, &timeo,
++				    !READ_ONCE(ctx->reader_present), &wait);
+ 		remove_wait_queue(&ctx->wq, &wait);
+ 
+-		if (timeo <= 0) {
+-			err = -EAGAIN;
+-			goto err_unlock;
+-		}
+-		if (signal_pending(current)) {
+-			err = sock_intr_errno(timeo);
+-			goto err_unlock;
+-		}
++		if (timeo <= 0)
++			return -EAGAIN;
++		if (signal_pending(current))
++			return sock_intr_errno(timeo);
++		if (ret < 0)
++			return ret;
+ 	}
+ 
+ 	WRITE_ONCE(ctx->reader_present, 1);
+ 
+ 	return 0;
++}
+ 
+-err_unlock:
+-	release_sock(sk);
++static int tls_rx_reader_lock(struct sock *sk, struct tls_sw_context_rx *ctx,
++			      bool nonblock)
++{
++	int err;
++
++	lock_sock(sk);
++	err = tls_rx_reader_acquire(sk, ctx, nonblock);
++	if (err)
++		release_sock(sk);
+ 	return err;
+ }
+ 
+-static void tls_rx_reader_unlock(struct sock *sk, struct tls_sw_context_rx *ctx)
++static void tls_rx_reader_release(struct sock *sk, struct tls_sw_context_rx *ctx)
+ {
+ 	if (unlikely(ctx->reader_contended)) {
+ 		if (wq_has_sleeper(&ctx->wq))
+@@ -1896,6 +1904,11 @@ static void tls_rx_reader_unlock(struct sock *sk, struct tls_sw_context_rx *ctx)
+ 	}
+ 
+ 	WRITE_ONCE(ctx->reader_present, 0);
++}
++
++static void tls_rx_reader_unlock(struct sock *sk, struct tls_sw_context_rx *ctx)
++{
++	tls_rx_reader_release(sk, ctx);
+ 	release_sock(sk);
+ }
+ 
+diff --git a/net/wireless/core.c b/net/wireless/core.c
+index 64e8616171104..acec41c1809a8 100644
+--- a/net/wireless/core.c
++++ b/net/wireless/core.c
+@@ -1622,7 +1622,7 @@ void wiphy_work_queue(struct wiphy *wiphy, struct wiphy_work *work)
+ 		list_add_tail(&work->entry, &rdev->wiphy_work_list);
+ 	spin_unlock_irqrestore(&rdev->wiphy_work_lock, flags);
+ 
+-	schedule_work(&rdev->wiphy_work);
++	queue_work(system_unbound_wq, &rdev->wiphy_work);
+ }
+ EXPORT_SYMBOL_GPL(wiphy_work_queue);
+ 
+diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
+index 705d1cf048309..bf968cdbfbb51 100644
+--- a/net/wireless/nl80211.c
++++ b/net/wireless/nl80211.c
+@@ -5910,6 +5910,21 @@ out:
+ 	nlmsg_free(msg);
+ }
+ 
++static int nl80211_validate_ap_phy_operation(struct cfg80211_ap_settings *params)
++{
++	struct ieee80211_channel *channel = params->chandef.chan;
++
++	if ((params->he_cap ||  params->he_oper) &&
++	    (channel->flags & IEEE80211_CHAN_NO_HE))
++		return -EOPNOTSUPP;
++
++	if ((params->eht_cap || params->eht_oper) &&
++	    (channel->flags & IEEE80211_CHAN_NO_EHT))
++		return -EOPNOTSUPP;
++
++	return 0;
++}
++
+ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info)
+ {
+ 	struct cfg80211_registered_device *rdev = info->user_ptr[0];
+@@ -6179,6 +6194,10 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info)
+ 	if (err)
+ 		goto out_unlock;
+ 
++	err = nl80211_validate_ap_phy_operation(params);
++	if (err)
++		goto out_unlock;
++
+ 	if (info->attrs[NL80211_ATTR_AP_SETTINGS_FLAGS])
+ 		params->flags = nla_get_u32(
+ 			info->attrs[NL80211_ATTR_AP_SETTINGS_FLAGS]);
+@@ -8483,7 +8502,7 @@ static int nl80211_update_mesh_config(struct sk_buff *skb,
+ 	struct cfg80211_registered_device *rdev = info->user_ptr[0];
+ 	struct net_device *dev = info->user_ptr[1];
+ 	struct wireless_dev *wdev = dev->ieee80211_ptr;
+-	struct mesh_config cfg;
++	struct mesh_config cfg = {};
+ 	u32 mask;
+ 	int err;
+ 
+diff --git a/net/wireless/scan.c b/net/wireless/scan.c
+index 0cf1ce7b69342..939deecf0bbef 100644
+--- a/net/wireless/scan.c
++++ b/net/wireless/scan.c
+@@ -908,6 +908,10 @@ static int cfg80211_scan_6ghz(struct cfg80211_registered_device *rdev)
+ 		    !cfg80211_find_ssid_match(ap, request))
+ 			continue;
+ 
++		if (!is_broadcast_ether_addr(request->bssid) &&
++		    !ether_addr_equal(request->bssid, ap->bssid))
++			continue;
++
+ 		if (!request->n_ssids && ap->multi_bss && !ap->transmitted_bssid)
+ 			continue;
+ 
+diff --git a/net/xfrm/xfrm_interface_core.c b/net/xfrm/xfrm_interface_core.c
+index b864740846902..e21cc71095bb2 100644
+--- a/net/xfrm/xfrm_interface_core.c
++++ b/net/xfrm/xfrm_interface_core.c
+@@ -380,8 +380,8 @@ static int xfrmi_rcv_cb(struct sk_buff *skb, int err)
+ 	skb->dev = dev;
+ 
+ 	if (err) {
+-		dev->stats.rx_errors++;
+-		dev->stats.rx_dropped++;
++		DEV_STATS_INC(dev, rx_errors);
++		DEV_STATS_INC(dev, rx_dropped);
+ 
+ 		return 0;
+ 	}
+@@ -426,7 +426,6 @@ static int
+ xfrmi_xmit2(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
+ {
+ 	struct xfrm_if *xi = netdev_priv(dev);
+-	struct net_device_stats *stats = &xi->dev->stats;
+ 	struct dst_entry *dst = skb_dst(skb);
+ 	unsigned int length = skb->len;
+ 	struct net_device *tdev;
+@@ -473,7 +472,7 @@ xfrmi_xmit2(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
+ 	tdev = dst->dev;
+ 
+ 	if (tdev == dev) {
+-		stats->collisions++;
++		DEV_STATS_INC(dev, collisions);
+ 		net_warn_ratelimited("%s: Local routing loop detected!\n",
+ 				     dev->name);
+ 		goto tx_err_dst_release;
+@@ -512,13 +511,13 @@ xmit:
+ 	if (net_xmit_eval(err) == 0) {
+ 		dev_sw_netstats_tx_add(dev, 1, length);
+ 	} else {
+-		stats->tx_errors++;
+-		stats->tx_aborted_errors++;
++		DEV_STATS_INC(dev, tx_errors);
++		DEV_STATS_INC(dev, tx_aborted_errors);
+ 	}
+ 
+ 	return 0;
+ tx_err_link_failure:
+-	stats->tx_carrier_errors++;
++	DEV_STATS_INC(dev, tx_carrier_errors);
+ 	dst_link_failure(skb);
+ tx_err_dst_release:
+ 	dst_release(dst);
+@@ -528,7 +527,6 @@ tx_err_dst_release:
+ static netdev_tx_t xfrmi_xmit(struct sk_buff *skb, struct net_device *dev)
+ {
+ 	struct xfrm_if *xi = netdev_priv(dev);
+-	struct net_device_stats *stats = &xi->dev->stats;
+ 	struct dst_entry *dst = skb_dst(skb);
+ 	struct flowi fl;
+ 	int ret;
+@@ -545,7 +543,7 @@ static netdev_tx_t xfrmi_xmit(struct sk_buff *skb, struct net_device *dev)
+ 			dst = ip6_route_output(dev_net(dev), NULL, &fl.u.ip6);
+ 			if (dst->error) {
+ 				dst_release(dst);
+-				stats->tx_carrier_errors++;
++				DEV_STATS_INC(dev, tx_carrier_errors);
+ 				goto tx_err;
+ 			}
+ 			skb_dst_set(skb, dst);
+@@ -561,7 +559,7 @@ static netdev_tx_t xfrmi_xmit(struct sk_buff *skb, struct net_device *dev)
+ 			fl.u.ip4.flowi4_flags |= FLOWI_FLAG_ANYSRC;
+ 			rt = __ip_route_output_key(dev_net(dev), &fl.u.ip4);
+ 			if (IS_ERR(rt)) {
+-				stats->tx_carrier_errors++;
++				DEV_STATS_INC(dev, tx_carrier_errors);
+ 				goto tx_err;
+ 			}
+ 			skb_dst_set(skb, &rt->dst);
+@@ -580,8 +578,8 @@ static netdev_tx_t xfrmi_xmit(struct sk_buff *skb, struct net_device *dev)
+ 	return NETDEV_TX_OK;
+ 
+ tx_err:
+-	stats->tx_errors++;
+-	stats->tx_dropped++;
++	DEV_STATS_INC(dev, tx_errors);
++	DEV_STATS_INC(dev, tx_dropped);
+ 	kfree_skb(skb);
+ 	return NETDEV_TX_OK;
+ }
+diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
+index d6b405782b636..d24b4d4f620ea 100644
+--- a/net/xfrm/xfrm_policy.c
++++ b/net/xfrm/xfrm_policy.c
+@@ -851,7 +851,7 @@ static void xfrm_policy_inexact_list_reinsert(struct net *net,
+ 		struct hlist_node *newpos = NULL;
+ 		bool matches_s, matches_d;
+ 
+-		if (!policy->bydst_reinsert)
++		if (policy->walk.dead || !policy->bydst_reinsert)
+ 			continue;
+ 
+ 		WARN_ON_ONCE(policy->family != family);
+@@ -1256,8 +1256,11 @@ static void xfrm_hash_rebuild(struct work_struct *work)
+ 		struct xfrm_pol_inexact_bin *bin;
+ 		u8 dbits, sbits;
+ 
++		if (policy->walk.dead)
++			continue;
++
+ 		dir = xfrm_policy_id2dir(policy->index);
+-		if (policy->walk.dead || dir >= XFRM_POLICY_MAX)
++		if (dir >= XFRM_POLICY_MAX)
+ 			continue;
+ 
+ 		if ((dir & XFRM_POLICY_MASK) == XFRM_POLICY_OUT) {
+@@ -1372,8 +1375,6 @@ EXPORT_SYMBOL(xfrm_policy_hash_rebuild);
+  * of an absolute inpredictability of ordering of rules. This will not pass. */
+ static u32 xfrm_gen_index(struct net *net, int dir, u32 index)
+ {
+-	static u32 idx_generator;
+-
+ 	for (;;) {
+ 		struct hlist_head *list;
+ 		struct xfrm_policy *p;
+@@ -1381,8 +1382,8 @@ static u32 xfrm_gen_index(struct net *net, int dir, u32 index)
+ 		int found;
+ 
+ 		if (!index) {
+-			idx = (idx_generator | dir);
+-			idx_generator += 8;
++			idx = (net->xfrm.idx_generator | dir);
++			net->xfrm.idx_generator += 8;
+ 		} else {
+ 			idx = index;
+ 			index = 0;
+@@ -1823,9 +1824,11 @@ int xfrm_policy_flush(struct net *net, u8 type, bool task_valid)
+ 
+ again:
+ 	list_for_each_entry(pol, &net->xfrm.policy_all, walk.all) {
++		if (pol->walk.dead)
++			continue;
++
+ 		dir = xfrm_policy_id2dir(pol->index);
+-		if (pol->walk.dead ||
+-		    dir >= XFRM_POLICY_MAX ||
++		if (dir >= XFRM_POLICY_MAX ||
+ 		    pol->type != type)
+ 			continue;
+ 
+@@ -1862,9 +1865,11 @@ int xfrm_dev_policy_flush(struct net *net, struct net_device *dev,
+ 
+ again:
+ 	list_for_each_entry(pol, &net->xfrm.policy_all, walk.all) {
++		if (pol->walk.dead)
++			continue;
++
+ 		dir = xfrm_policy_id2dir(pol->index);
+-		if (pol->walk.dead ||
+-		    dir >= XFRM_POLICY_MAX ||
++		if (dir >= XFRM_POLICY_MAX ||
+ 		    pol->xdo.dev != dev)
+ 			continue;
+ 
+@@ -3215,7 +3220,7 @@ no_transform:
+ 	}
+ 
+ 	for (i = 0; i < num_pols; i++)
+-		pols[i]->curlft.use_time = ktime_get_real_seconds();
++		WRITE_ONCE(pols[i]->curlft.use_time, ktime_get_real_seconds());
+ 
+ 	if (num_xfrms < 0) {
+ 		/* Prohibit the flow */
+diff --git a/rust/Makefile b/rust/Makefile
+index 4124bfa01798d..467f50a752dbd 100644
+--- a/rust/Makefile
++++ b/rust/Makefile
+@@ -1,5 +1,8 @@
+ # SPDX-License-Identifier: GPL-2.0
+ 
++# Where to place rustdoc generated documentation
++rustdoc_output := $(objtree)/Documentation/output/rust/rustdoc
++
+ obj-$(CONFIG_RUST) += core.o compiler_builtins.o
+ always-$(CONFIG_RUST) += exports_core_generated.h
+ 
+@@ -65,7 +68,7 @@ quiet_cmd_rustdoc = RUSTDOC $(if $(rustdoc_host),H, ) $<
+ 	OBJTREE=$(abspath $(objtree)) \
+ 	$(RUSTDOC) $(if $(rustdoc_host),$(rust_common_flags),$(rust_flags)) \
+ 		$(rustc_target_flags) -L$(objtree)/$(obj) \
+-		--output $(objtree)/$(obj)/doc \
++		--output $(rustdoc_output) \
+ 		--crate-name $(subst rustdoc-,,$@) \
+ 		@$(objtree)/include/generated/rustc_cfg $<
+ 
+@@ -82,15 +85,14 @@ quiet_cmd_rustdoc = RUSTDOC $(if $(rustdoc_host),H, ) $<
+ # and then retouch the generated files.
+ rustdoc: rustdoc-core rustdoc-macros rustdoc-compiler_builtins \
+     rustdoc-alloc rustdoc-kernel
+-	$(Q)cp $(srctree)/Documentation/images/logo.svg $(objtree)/$(obj)/doc
+-	$(Q)cp $(srctree)/Documentation/images/COPYING-logo $(objtree)/$(obj)/doc
+-	$(Q)find $(objtree)/$(obj)/doc -name '*.html' -type f -print0 | xargs -0 sed -Ei \
+-		-e 's:rust-logo\.svg:logo.svg:g' \
+-		-e 's:rust-logo\.png:logo.svg:g' \
+-		-e 's:favicon\.svg:logo.svg:g' \
+-		-e 's:<link rel="alternate icon" type="image/png" href="[./]*favicon-(16x16|32x32)\.png">::g'
+-	$(Q)echo '.logo-container > img { object-fit: contain; }' \
+-		>> $(objtree)/$(obj)/doc/rustdoc.css
++	$(Q)cp $(srctree)/Documentation/images/logo.svg $(rustdoc_output)/static.files/
++	$(Q)cp $(srctree)/Documentation/images/COPYING-logo $(rustdoc_output)/static.files/
++	$(Q)find $(rustdoc_output) -name '*.html' -type f -print0 | xargs -0 sed -Ei \
++		-e 's:rust-logo-[0-9a-f]+\.svg:logo.svg:g' \
++		-e 's:favicon-[0-9a-f]+\.svg:logo.svg:g' \
++		-e 's:<link rel="alternate icon" type="image/png" href="[/.]+/static\.files/favicon-(16x16|32x32)-[0-9a-f]+\.png">::g'
++	$(Q)for f in $(rustdoc_output)/static.files/rustdoc-*.css; do \
++		echo ".logo-container > img { object-fit: contain; }" >> $$f; done
+ 
+ rustdoc-macros: private rustdoc_host = yes
+ rustdoc-macros: private rustc_target_flags = --crate-type proc-macro \
+@@ -154,7 +156,7 @@ quiet_cmd_rustdoc_test = RUSTDOC T $<
+ 		@$(objtree)/include/generated/rustc_cfg \
+ 		$(rustc_target_flags) $(rustdoc_test_target_flags) \
+ 		--sysroot $(objtree)/$(obj)/test/sysroot $(rustdoc_test_quiet) \
+-		-L$(objtree)/$(obj)/test --output $(objtree)/$(obj)/doc \
++		-L$(objtree)/$(obj)/test --output $(rustdoc_output) \
+ 		--crate-name $(subst rusttest-,,$@) $<
+ 
+ # We cannot use `-Zpanic-abort-tests` because some tests are dynamic,
+diff --git a/rust/kernel/error.rs b/rust/kernel/error.rs
+index 05fcab6abfe63..0f29e7b2194c0 100644
+--- a/rust/kernel/error.rs
++++ b/rust/kernel/error.rs
+@@ -37,7 +37,7 @@ pub mod code {
+     declare_err!(E2BIG, "Argument list too long.");
+     declare_err!(ENOEXEC, "Exec format error.");
+     declare_err!(EBADF, "Bad file number.");
+-    declare_err!(ECHILD, "Exec format error.");
++    declare_err!(ECHILD, "No child processes.");
+     declare_err!(EAGAIN, "Try again.");
+     declare_err!(ENOMEM, "Out of memory.");
+     declare_err!(EACCES, "Permission denied.");
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 10703a3df7ea4..c2fbf484b1104 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -7009,6 +7009,24 @@ static void alc287_fixup_bind_dacs(struct hda_codec *codec,
+ 					0x0); /* Make sure 0x14 was disable */
+ 	}
+ }
++/* Fix none verb table of Headset Mic pin */
++static void alc_fixup_headset_mic(struct hda_codec *codec,
++				   const struct hda_fixup *fix, int action)
++{
++	struct alc_spec *spec = codec->spec;
++	static const struct hda_pintbl pincfgs[] = {
++		{ 0x19, 0x03a1103c },
++		{ }
++	};
++
++	switch (action) {
++	case HDA_FIXUP_ACT_PRE_PROBE:
++		snd_hda_apply_pincfgs(codec, pincfgs);
++		alc_update_coef_idx(codec, 0x45, 0xf<<12 | 1<<10, 5<<12);
++		spec->parse_flags |= HDA_PINCFG_HEADSET_MIC;
++		break;
++	}
++}
+ 
+ 
+ enum {
+@@ -7274,6 +7292,7 @@ enum {
+ 	ALC245_FIXUP_HP_X360_MUTE_LEDS,
+ 	ALC287_FIXUP_THINKPAD_I2S_SPK,
+ 	ALC287_FIXUP_MG_RTKC_CSAMP_CS35L41_I2C_THINKPAD,
++	ALC2XX_FIXUP_HEADSET_MIC,
+ };
+ 
+ /* A special fixup for Lenovo C940 and Yoga Duet 7;
+@@ -9372,6 +9391,10 @@ static const struct hda_fixup alc269_fixups[] = {
+ 		.chained = true,
+ 		.chain_id = ALC287_FIXUP_CS35L41_I2C_2_THINKPAD_ACPI,
+ 	},
++	[ALC2XX_FIXUP_HEADSET_MIC] = {
++		.type = HDA_FIXUP_FUNC,
++		.v.func = alc_fixup_headset_mic,
++	},
+ };
+ 
+ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+@@ -9646,6 +9669,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x103c, 0x89c6, "Zbook Fury 17 G9", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x89ca, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
+ 	SND_PCI_QUIRK(0x103c, 0x89d3, "HP EliteBook 645 G9 (MB 89D2)", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
++	SND_PCI_QUIRK(0x103c, 0x8a20, "HP Laptop 15s-fq5xxx", ALC236_FIXUP_HP_MUTE_LED_COEFBIT2),
+ 	SND_PCI_QUIRK(0x103c, 0x8a25, "HP Victus 16-d1xxx (MB 8A25)", ALC245_FIXUP_HP_MUTE_LED_COEFBIT),
+ 	SND_PCI_QUIRK(0x103c, 0x8a78, "HP Dev One", ALC285_FIXUP_HP_LIMIT_INT_MIC_BOOST),
+ 	SND_PCI_QUIRK(0x103c, 0x8aa0, "HP ProBook 440 G9 (MB 8A9E)", ALC236_FIXUP_HP_GPIO_LED),
+@@ -9715,6 +9739,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1043, 0x1517, "Asus Zenbook UX31A", ALC269VB_FIXUP_ASUS_ZENBOOK_UX31A),
+ 	SND_PCI_QUIRK(0x1043, 0x1573, "ASUS GZ301V", ALC285_FIXUP_ASUS_HEADSET_MIC),
+ 	SND_PCI_QUIRK(0x1043, 0x1662, "ASUS GV301QH", ALC294_FIXUP_ASUS_DUAL_SPK),
++	SND_PCI_QUIRK(0x1043, 0x1663, "ASUS GU603ZV", ALC285_FIXUP_ASUS_HEADSET_MIC),
+ 	SND_PCI_QUIRK(0x1043, 0x1683, "ASUS UM3402YAR", ALC287_FIXUP_CS35L41_I2C_2),
+ 	SND_PCI_QUIRK(0x1043, 0x16b2, "ASUS GU603", ALC289_FIXUP_ASUS_GA401),
+ 	SND_PCI_QUIRK(0x1043, 0x16e3, "ASUS UX50", ALC269_FIXUP_STEREO_DMIC),
+@@ -10656,6 +10681,8 @@ static const struct snd_hda_pin_quirk alc269_fallback_pin_fixup_tbl[] = {
+ 	SND_HDA_PIN_QUIRK(0x10ec0274, 0x1028, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB,
+ 		{0x19, 0x40000000},
+ 		{0x1a, 0x40000000}),
++	SND_HDA_PIN_QUIRK(0x10ec0256, 0x1043, "ASUS", ALC2XX_FIXUP_HEADSET_MIC,
++		{0x19, 0x40000000}),
+ 	{}
+ };
+ 
+diff --git a/sound/soc/codecs/cs35l56.c b/sound/soc/codecs/cs35l56.c
+index 7e241908b5f16..4d7ccf682647e 100644
+--- a/sound/soc/codecs/cs35l56.c
++++ b/sound/soc/codecs/cs35l56.c
+@@ -879,7 +879,7 @@ static void cs35l56_patch(struct cs35l56_private *cs35l56)
+ 
+ 	mutex_lock(&cs35l56->irq_lock);
+ 
+-	init_completion(&cs35l56->init_completion);
++	reinit_completion(&cs35l56->init_completion);
+ 
+ 	cs35l56_system_reset(cs35l56);
+ 
+diff --git a/sound/soc/codecs/cs42l42-sdw.c b/sound/soc/codecs/cs42l42-sdw.c
+index 974bae4abfad1..94a66a325303b 100644
+--- a/sound/soc/codecs/cs42l42-sdw.c
++++ b/sound/soc/codecs/cs42l42-sdw.c
+@@ -6,6 +6,7 @@
+ 
+ #include <linux/acpi.h>
+ #include <linux/device.h>
++#include <linux/gpio/consumer.h>
+ #include <linux/iopoll.h>
+ #include <linux/module.h>
+ #include <linux/mod_devicetable.h>
+diff --git a/sound/soc/codecs/wcd938x-sdw.c b/sound/soc/codecs/wcd938x-sdw.c
+index bd0e9fbc12ebf..6405fed9f095f 100644
+--- a/sound/soc/codecs/wcd938x-sdw.c
++++ b/sound/soc/codecs/wcd938x-sdw.c
+@@ -1278,7 +1278,31 @@ static int wcd9380_probe(struct sdw_slave *pdev,
+ 	pm_runtime_set_active(dev);
+ 	pm_runtime_enable(dev);
+ 
+-	return component_add(dev, &wcd938x_sdw_component_ops);
++	ret = component_add(dev, &wcd938x_sdw_component_ops);
++	if (ret)
++		goto err_disable_rpm;
++
++	return 0;
++
++err_disable_rpm:
++	pm_runtime_disable(dev);
++	pm_runtime_set_suspended(dev);
++	pm_runtime_dont_use_autosuspend(dev);
++
++	return ret;
++}
++
++static int wcd9380_remove(struct sdw_slave *pdev)
++{
++	struct device *dev = &pdev->dev;
++
++	component_del(dev, &wcd938x_sdw_component_ops);
++
++	pm_runtime_disable(dev);
++	pm_runtime_set_suspended(dev);
++	pm_runtime_dont_use_autosuspend(dev);
++
++	return 0;
+ }
+ 
+ static const struct sdw_device_id wcd9380_slave_id[] = {
+@@ -1320,6 +1344,7 @@ static const struct dev_pm_ops wcd938x_sdw_pm_ops = {
+ 
+ static struct sdw_driver wcd9380_codec_driver = {
+ 	.probe	= wcd9380_probe,
++	.remove	= wcd9380_remove,
+ 	.ops = &wcd9380_slave_ops,
+ 	.id_table = wcd9380_slave_id,
+ 	.driver = {
+diff --git a/sound/soc/codecs/wcd938x.c b/sound/soc/codecs/wcd938x.c
+index a3c6806613777..d27b919c63b41 100644
+--- a/sound/soc/codecs/wcd938x.c
++++ b/sound/soc/codecs/wcd938x.c
+@@ -3325,8 +3325,10 @@ static int wcd938x_populate_dt_data(struct wcd938x_priv *wcd938x, struct device
+ 		return dev_err_probe(dev, ret, "Failed to get supplies\n");
+ 
+ 	ret = regulator_bulk_enable(WCD938X_MAX_SUPPLY, wcd938x->supplies);
+-	if (ret)
++	if (ret) {
++		regulator_bulk_free(WCD938X_MAX_SUPPLY, wcd938x->supplies);
+ 		return dev_err_probe(dev, ret, "Failed to enable supplies\n");
++	}
+ 
+ 	wcd938x_dt_parse_micbias_info(dev, wcd938x);
+ 
+@@ -3435,7 +3437,8 @@ static int wcd938x_bind(struct device *dev)
+ 	wcd938x->rxdev = wcd938x_sdw_device_get(wcd938x->rxnode);
+ 	if (!wcd938x->rxdev) {
+ 		dev_err(dev, "could not find slave with matching of node\n");
+-		return -EINVAL;
++		ret = -EINVAL;
++		goto err_unbind;
+ 	}
+ 	wcd938x->sdw_priv[AIF1_PB] = dev_get_drvdata(wcd938x->rxdev);
+ 	wcd938x->sdw_priv[AIF1_PB]->wcd938x = wcd938x;
+@@ -3443,46 +3446,47 @@ static int wcd938x_bind(struct device *dev)
+ 	wcd938x->txdev = wcd938x_sdw_device_get(wcd938x->txnode);
+ 	if (!wcd938x->txdev) {
+ 		dev_err(dev, "could not find txslave with matching of node\n");
+-		return -EINVAL;
++		ret = -EINVAL;
++		goto err_put_rxdev;
+ 	}
+ 	wcd938x->sdw_priv[AIF1_CAP] = dev_get_drvdata(wcd938x->txdev);
+ 	wcd938x->sdw_priv[AIF1_CAP]->wcd938x = wcd938x;
+ 	wcd938x->tx_sdw_dev = dev_to_sdw_dev(wcd938x->txdev);
+-	if (!wcd938x->tx_sdw_dev) {
+-		dev_err(dev, "could not get txslave with matching of dev\n");
+-		return -EINVAL;
+-	}
+ 
+ 	/* As TX is main CSR reg interface, which should not be suspended first.
+ 	 * expicilty add the dependency link */
+ 	if (!device_link_add(wcd938x->rxdev, wcd938x->txdev, DL_FLAG_STATELESS |
+ 			    DL_FLAG_PM_RUNTIME)) {
+ 		dev_err(dev, "could not devlink tx and rx\n");
+-		return -EINVAL;
++		ret = -EINVAL;
++		goto err_put_txdev;
+ 	}
+ 
+ 	if (!device_link_add(dev, wcd938x->txdev, DL_FLAG_STATELESS |
+ 					DL_FLAG_PM_RUNTIME)) {
+ 		dev_err(dev, "could not devlink wcd and tx\n");
+-		return -EINVAL;
++		ret = -EINVAL;
++		goto err_remove_rxtx_link;
+ 	}
+ 
+ 	if (!device_link_add(dev, wcd938x->rxdev, DL_FLAG_STATELESS |
+ 					DL_FLAG_PM_RUNTIME)) {
+ 		dev_err(dev, "could not devlink wcd and rx\n");
+-		return -EINVAL;
++		ret = -EINVAL;
++		goto err_remove_tx_link;
+ 	}
+ 
+ 	wcd938x->regmap = dev_get_regmap(&wcd938x->tx_sdw_dev->dev, NULL);
+ 	if (!wcd938x->regmap) {
+ 		dev_err(dev, "could not get TX device regmap\n");
+-		return -EINVAL;
++		ret = -EINVAL;
++		goto err_remove_rx_link;
+ 	}
+ 
+ 	ret = wcd938x_irq_init(wcd938x, dev);
+ 	if (ret) {
+ 		dev_err(dev, "%s: IRQ init failed: %d\n", __func__, ret);
+-		return ret;
++		goto err_remove_rx_link;
+ 	}
+ 
+ 	wcd938x->sdw_priv[AIF1_PB]->slave_irq = wcd938x->virq;
+@@ -3491,27 +3495,45 @@ static int wcd938x_bind(struct device *dev)
+ 	ret = wcd938x_set_micbias_data(wcd938x);
+ 	if (ret < 0) {
+ 		dev_err(dev, "%s: bad micbias pdata\n", __func__);
+-		return ret;
++		goto err_remove_rx_link;
+ 	}
+ 
+ 	ret = snd_soc_register_component(dev, &soc_codec_dev_wcd938x,
+ 					 wcd938x_dais, ARRAY_SIZE(wcd938x_dais));
+-	if (ret)
++	if (ret) {
+ 		dev_err(dev, "%s: Codec registration failed\n",
+ 				__func__);
++		goto err_remove_rx_link;
++	}
+ 
+-	return ret;
++	return 0;
+ 
++err_remove_rx_link:
++	device_link_remove(dev, wcd938x->rxdev);
++err_remove_tx_link:
++	device_link_remove(dev, wcd938x->txdev);
++err_remove_rxtx_link:
++	device_link_remove(wcd938x->rxdev, wcd938x->txdev);
++err_put_txdev:
++	put_device(wcd938x->txdev);
++err_put_rxdev:
++	put_device(wcd938x->rxdev);
++err_unbind:
++	component_unbind_all(dev, wcd938x);
++
++	return ret;
+ }
+ 
+ static void wcd938x_unbind(struct device *dev)
+ {
+ 	struct wcd938x_priv *wcd938x = dev_get_drvdata(dev);
+ 
++	snd_soc_unregister_component(dev);
+ 	device_link_remove(dev, wcd938x->txdev);
+ 	device_link_remove(dev, wcd938x->rxdev);
+ 	device_link_remove(wcd938x->rxdev, wcd938x->txdev);
+-	snd_soc_unregister_component(dev);
++	put_device(wcd938x->txdev);
++	put_device(wcd938x->rxdev);
+ 	component_unbind_all(dev, wcd938x);
+ }
+ 
+@@ -3572,13 +3594,13 @@ static int wcd938x_probe(struct platform_device *pdev)
+ 
+ 	ret = wcd938x_add_slave_components(wcd938x, dev, &match);
+ 	if (ret)
+-		return ret;
++		goto err_disable_regulators;
+ 
+ 	wcd938x_reset(wcd938x);
+ 
+ 	ret = component_master_add_with_match(dev, &wcd938x_comp_ops, match);
+ 	if (ret)
+-		return ret;
++		goto err_disable_regulators;
+ 
+ 	pm_runtime_set_autosuspend_delay(dev, 1000);
+ 	pm_runtime_use_autosuspend(dev);
+@@ -3588,11 +3610,27 @@ static int wcd938x_probe(struct platform_device *pdev)
+ 	pm_runtime_idle(dev);
+ 
+ 	return 0;
++
++err_disable_regulators:
++	regulator_bulk_disable(WCD938X_MAX_SUPPLY, wcd938x->supplies);
++	regulator_bulk_free(WCD938X_MAX_SUPPLY, wcd938x->supplies);
++
++	return ret;
+ }
+ 
+ static void wcd938x_remove(struct platform_device *pdev)
+ {
+-	component_master_del(&pdev->dev, &wcd938x_comp_ops);
++	struct device *dev = &pdev->dev;
++	struct wcd938x_priv *wcd938x = dev_get_drvdata(dev);
++
++	component_master_del(dev, &wcd938x_comp_ops);
++
++	pm_runtime_disable(dev);
++	pm_runtime_set_suspended(dev);
++	pm_runtime_dont_use_autosuspend(dev);
++
++	regulator_bulk_disable(WCD938X_MAX_SUPPLY, wcd938x->supplies);
++	regulator_bulk_free(WCD938X_MAX_SUPPLY, wcd938x->supplies);
+ }
+ 
+ #if defined(CONFIG_OF)
+diff --git a/sound/soc/pxa/pxa-ssp.c b/sound/soc/pxa/pxa-ssp.c
+index 430dd446321e5..452f0caf415b9 100644
+--- a/sound/soc/pxa/pxa-ssp.c
++++ b/sound/soc/pxa/pxa-ssp.c
+@@ -779,7 +779,7 @@ static int pxa_ssp_probe(struct snd_soc_dai *dai)
+ 		if (IS_ERR(priv->extclk)) {
+ 			ret = PTR_ERR(priv->extclk);
+ 			if (ret == -EPROBE_DEFER)
+-				return ret;
++				goto err_priv;
+ 
+ 			priv->extclk = NULL;
+ 		}
+diff --git a/tools/perf/util/dlfilter.c b/tools/perf/util/dlfilter.c
+index e0f822ebb9b97..38a33c7b39084 100644
+--- a/tools/perf/util/dlfilter.c
++++ b/tools/perf/util/dlfilter.c
+@@ -280,13 +280,21 @@ static struct perf_event_attr *dlfilter__attr(void *ctx)
+ 	return &d->evsel->core.attr;
+ }
+ 
++static __s32 code_read(__u64 ip, struct map *map, struct machine *machine, void *buf, __u32 len)
++{
++	u64 offset = map__map_ip(map, ip);
++
++	if (ip + len >= map__end(map))
++		len = map__end(map) - ip;
++
++	return dso__data_read_offset(map__dso(map), machine, offset, buf, len);
++}
++
+ static __s32 dlfilter__object_code(void *ctx, __u64 ip, void *buf, __u32 len)
+ {
+ 	struct dlfilter *d = (struct dlfilter *)ctx;
+ 	struct addr_location *al;
+ 	struct addr_location a;
+-	struct map *map;
+-	u64 offset;
+ 	__s32 ret;
+ 
+ 	if (!d->ctx_valid)
+@@ -296,27 +304,17 @@ static __s32 dlfilter__object_code(void *ctx, __u64 ip, void *buf, __u32 len)
+ 	if (!al)
+ 		return -1;
+ 
+-	map = al->map;
+-
+-	if (map && ip >= map__start(map) && ip < map__end(map) &&
++	if (al->map && ip >= map__start(al->map) && ip < map__end(al->map) &&
+ 	    machine__kernel_ip(d->machine, ip) == machine__kernel_ip(d->machine, d->sample->ip))
+-		goto have_map;
++		return code_read(ip, al->map, d->machine, buf, len);
+ 
+ 	addr_location__init(&a);
++
+ 	thread__find_map_fb(al->thread, d->sample->cpumode, ip, &a);
+-	if (!a.map) {
+-		ret = -1;
+-		goto out;
+-	}
++	ret = a.map ? code_read(ip, a.map, d->machine, buf, len) : -1;
+ 
+-	map = a.map;
+-have_map:
+-	offset = map__map_ip(map, ip);
+-	if (ip + len >= map__end(map))
+-		len = map__end(map) - ip;
+-	ret = dso__data_read_offset(map__dso(map), d->machine, offset, buf, len);
+-out:
+ 	addr_location__exit(&a);
++
+ 	return ret;
+ }
+ 
+diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_non_uniq_symbol.tc b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_non_uniq_symbol.tc
+new file mode 100644
+index 0000000000000..bc9514428dbaf
+--- /dev/null
++++ b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_non_uniq_symbol.tc
+@@ -0,0 +1,13 @@
++#!/bin/sh
++# SPDX-License-Identifier: GPL-2.0
++# description: Test failure of registering kprobe on non unique symbol
++# requires: kprobe_events
++
++SYMBOL='name_show'
++
++# We skip this test on kernel where SYMBOL is unique or does not exist.
++if [ "$(grep -c -E "[[:alnum:]]+ t ${SYMBOL}" /proc/kallsyms)" -le '1' ]; then
++	exit_unsupported
++fi
++
++! echo "p:test_non_unique ${SYMBOL}" > kprobe_events
+diff --git a/tools/testing/selftests/net/mptcp/mptcp_join.sh b/tools/testing/selftests/net/mptcp/mptcp_join.sh
+index d01b73a8ed0f0..621b1964ea6f3 100755
+--- a/tools/testing/selftests/net/mptcp/mptcp_join.sh
++++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh
+@@ -1383,7 +1383,9 @@ chk_rst_nr()
+ 	count=$(get_counter ${ns_tx} "MPTcpExtMPRstTx")
+ 	if [ -z "$count" ]; then
+ 		echo -n "[skip]"
+-	elif [ $count -lt $rst_tx ]; then
++	# accept more rst than expected except if we don't expect any
++	elif { [ $rst_tx -ne 0 ] && [ $count -lt $rst_tx ]; } ||
++	     { [ $rst_tx -eq 0 ] && [ $count -ne 0 ]; }; then
+ 		echo "[fail] got $count MP_RST[s] TX expected $rst_tx"
+ 		fail_test
+ 	else
+@@ -1394,7 +1396,9 @@ chk_rst_nr()
+ 	count=$(get_counter ${ns_rx} "MPTcpExtMPRstRx")
+ 	if [ -z "$count" ]; then
+ 		echo -n "[skip]"
+-	elif [ "$count" -lt "$rst_rx" ]; then
++	# accept more rst than expected except if we don't expect any
++	elif { [ $rst_rx -ne 0 ] && [ $count -lt $rst_rx ]; } ||
++	     { [ $rst_rx -eq 0 ] && [ $count -ne 0 ]; }; then
+ 		echo "[fail] got $count MP_RST[s] RX expected $rst_rx"
+ 		fail_test
+ 	else
+@@ -2282,6 +2286,7 @@ remove_tests()
+ 		chk_join_nr 1 1 1
+ 		chk_rm_tx_nr 1
+ 		chk_rm_nr 1 1
++		chk_rst_nr 0 0
+ 	fi
+ 
+ 	# multiple subflows, remove
+@@ -2294,6 +2299,7 @@ remove_tests()
+ 			run_tests $ns1 $ns2 10.0.1.1 slow
+ 		chk_join_nr 2 2 2
+ 		chk_rm_nr 2 2
++		chk_rst_nr 0 0
+ 	fi
+ 
+ 	# single address, remove
+@@ -2306,6 +2312,7 @@ remove_tests()
+ 		chk_join_nr 1 1 1
+ 		chk_add_nr 1 1
+ 		chk_rm_nr 1 1 invert
++		chk_rst_nr 0 0
+ 	fi
+ 
+ 	# subflow and signal, remove
+@@ -2319,6 +2326,7 @@ remove_tests()
+ 		chk_join_nr 2 2 2
+ 		chk_add_nr 1 1
+ 		chk_rm_nr 1 1
++		chk_rst_nr 0 0
+ 	fi
+ 
+ 	# subflows and signal, remove
+@@ -2333,6 +2341,7 @@ remove_tests()
+ 		chk_join_nr 3 3 3
+ 		chk_add_nr 1 1
+ 		chk_rm_nr 2 2
++		chk_rst_nr 0 0
+ 	fi
+ 
+ 	# addresses remove
+@@ -2347,6 +2356,7 @@ remove_tests()
+ 		chk_join_nr 3 3 3
+ 		chk_add_nr 3 3
+ 		chk_rm_nr 3 3 invert
++		chk_rst_nr 0 0
+ 	fi
+ 
+ 	# invalid addresses remove
+@@ -2361,6 +2371,7 @@ remove_tests()
+ 		chk_join_nr 1 1 1
+ 		chk_add_nr 3 3
+ 		chk_rm_nr 3 1 invert
++		chk_rst_nr 0 0
+ 	fi
+ 
+ 	# subflows and signal, flush
+@@ -2375,6 +2386,7 @@ remove_tests()
+ 		chk_join_nr 3 3 3
+ 		chk_add_nr 1 1
+ 		chk_rm_nr 1 3 invert simult
++		chk_rst_nr 0 0
+ 	fi
+ 
+ 	# subflows flush
+@@ -2394,6 +2406,7 @@ remove_tests()
+ 		else
+ 			chk_rm_nr 3 3
+ 		fi
++		chk_rst_nr 0 0
+ 	fi
+ 
+ 	# addresses flush
+@@ -2408,6 +2421,7 @@ remove_tests()
+ 		chk_join_nr 3 3 3
+ 		chk_add_nr 3 3
+ 		chk_rm_nr 3 3 invert simult
++		chk_rst_nr 0 0
+ 	fi
+ 
+ 	# invalid addresses flush
+@@ -2422,6 +2436,7 @@ remove_tests()
+ 		chk_join_nr 1 1 1
+ 		chk_add_nr 3 3
+ 		chk_rm_nr 3 1 invert
++		chk_rst_nr 0 0
+ 	fi
+ 
+ 	# remove id 0 subflow
+@@ -2433,6 +2448,7 @@ remove_tests()
+ 			run_tests $ns1 $ns2 10.0.1.1 slow
+ 		chk_join_nr 1 1 1
+ 		chk_rm_nr 1 1
++		chk_rst_nr 0 0
+ 	fi
+ 
+ 	# remove id 0 address
+@@ -2445,6 +2461,7 @@ remove_tests()
+ 		chk_join_nr 1 1 1
+ 		chk_add_nr 1 1
+ 		chk_rm_nr 1 1 invert
++		chk_rst_nr 0 0 invert
+ 	fi
+ }
+ 
+diff --git a/tools/testing/selftests/net/openvswitch/openvswitch.sh b/tools/testing/selftests/net/openvswitch/openvswitch.sh
+index 3117a4be0cd04..e175cf2e7b50b 100755
+--- a/tools/testing/selftests/net/openvswitch/openvswitch.sh
++++ b/tools/testing/selftests/net/openvswitch/openvswitch.sh
+@@ -3,6 +3,8 @@
+ #
+ # OVS kernel module self tests
+ 
++trap ovs_exit_sig EXIT TERM INT ERR
++
+ # Kselftest framework requirement - SKIP code is 4.
+ ksft_skip=4
+ 
+@@ -202,7 +204,7 @@ run_test() {
+ 	fi
+ 
+ 	if python3 ovs-dpctl.py -h 2>&1 | \
+-	     grep "Need to install the python" >/dev/null 2>&1; then
++	     grep -E "Need to (install|upgrade) the python" >/dev/null 2>&1; then
+ 		stdbuf -o0 printf "TEST: %-60s  [PYLIB]\n" "${tdesc}"
+ 		return $ksft_skip
+ 	fi
+diff --git a/tools/testing/selftests/net/openvswitch/ovs-dpctl.py b/tools/testing/selftests/net/openvswitch/ovs-dpctl.py
+index 1c8b36bc15d48..0d9bb167ebf4f 100644
+--- a/tools/testing/selftests/net/openvswitch/ovs-dpctl.py
++++ b/tools/testing/selftests/net/openvswitch/ovs-dpctl.py
+@@ -25,8 +25,10 @@ try:
+     from pyroute2.netlink import nlmsg_atoms
+     from pyroute2.netlink.exceptions import NetlinkError
+     from pyroute2.netlink.generic import GenericNetlinkSocket
++    import pyroute2
++
+ except ModuleNotFoundError:
+-    print("Need to install the python pyroute2 package.")
++    print("Need to install the python pyroute2 package >= 0.6.")
+     sys.exit(0)
+ 
+ 
+@@ -732,12 +734,14 @@ class ovskey(nla):
+                 "src",
+                 lambda x: str(ipaddress.IPv4Address(x)),
+                 int,
++                convert_ipv4,
+             ),
+             (
+                 "dst",
+                 "dst",
+-                lambda x: str(ipaddress.IPv6Address(x)),
++                lambda x: str(ipaddress.IPv4Address(x)),
+                 int,
++                convert_ipv4,
+             ),
+             ("tp_src", "tp_src", "%d", int),
+             ("tp_dst", "tp_dst", "%d", int),
+@@ -1457,6 +1461,12 @@ def main(argv):
+     nlmsg_atoms.ovskey = ovskey
+     nlmsg_atoms.ovsactions = ovsactions
+ 
++    # version check for pyroute2
++    prverscheck = pyroute2.__version__.split(".")
++    if int(prverscheck[0]) == 0 and int(prverscheck[1]) < 6:
++        print("Need to upgrade the python pyroute2 package to >= 0.6.")
++        sys.exit(0)
++
+     parser = argparse.ArgumentParser()
+     parser.add_argument(
+         "-v",
+diff --git a/tools/testing/selftests/netfilter/nft_audit.sh b/tools/testing/selftests/netfilter/nft_audit.sh
+index bb34329e02a7f..5267c88496d51 100755
+--- a/tools/testing/selftests/netfilter/nft_audit.sh
++++ b/tools/testing/selftests/netfilter/nft_audit.sh
+@@ -11,6 +11,12 @@ nft --version >/dev/null 2>&1 || {
+ 	exit $SKIP_RC
+ }
+ 
++# Run everything in a separate network namespace
++[ "${1}" != "run" ] && { unshare -n "${0}" run; exit $?; }
++
++# give other scripts a chance to finish - audit_logread sees all activity
++sleep 1
++
+ logfile=$(mktemp)
+ rulefile=$(mktemp)
+ echo "logging into $logfile"


^ permalink raw reply related	[flat|nested] 24+ messages in thread
* [gentoo-commits] proj/linux-patches:6.5 commit in: /
@ 2023-10-22 22:51 Mike Pagano
  0 siblings, 0 replies; 24+ messages in thread
From: Mike Pagano @ 2023-10-22 22:51 UTC (permalink / raw
  To: gentoo-commits

commit:     51bcb2538941016f4ebd47dcc0c878f26a105a0c
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sun Oct 22 22:50:47 2023 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sun Oct 22 22:50:47 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=51bcb253

kbuild: Use CRC32 and a 1MiB dictionary for XZ compressed modules

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README                                 |  4 +++
 2950_kbuild-CRC32-1MB-dict-xz-modules.patch | 38 +++++++++++++++++++++++++++++
 2 files changed, 42 insertions(+)

diff --git a/0000_README b/0000_README
index 97163ce5..0f2b2d88 100644
--- a/0000_README
+++ b/0000_README
@@ -115,6 +115,10 @@ Patch:  2940_handle-gcc-14-last-stmt-rename.patch
 From:   https://lore.kernel.org/all/20230811060545.never.564-kees@kernel.org/#Z31scripts:gcc-plugins:gcc-common.h
 Desc:   gcc-plugins: Rename last_stmt() for GCC 14+
 
+Patch:  2950_kbuild-CRC32-1MB-dict-xz-modules.patch
+From:   https://git.kernel.org/pub/scm/linux/kernel/git/masahiroy/linux-kbuild.git/commit/?h=kbuild&id=fbf5892df21a8ccfcb2fda0fd65bc3169c89ed28
+Desc:   kbuild: Use CRC32 and a 1MiB dictionary for XZ compressed modules
+
 Patch:  3000_Support-printing-firmware-info.patch
 From:   https://bugs.gentoo.org/732852
 Desc:   Print firmware info (Reqs CONFIG_GENTOO_PRINT_FIRMWARE_INFO). Thanks to Georgy Yakovlev

diff --git a/2950_kbuild-CRC32-1MB-dict-xz-modules.patch b/2950_kbuild-CRC32-1MB-dict-xz-modules.patch
new file mode 100644
index 00000000..32090343
--- /dev/null
+++ b/2950_kbuild-CRC32-1MB-dict-xz-modules.patch
@@ -0,0 +1,38 @@
+From fbf5892df21a8ccfcb2fda0fd65bc3169c89ed28 Mon Sep 17 00:00:00 2001
+From: Martin Nybo Andersen <tweek@tweek.dk>
+Date: Fri, 15 Sep 2023 12:15:39 +0200
+Subject: kbuild: Use CRC32 and a 1MiB dictionary for XZ compressed modules
+
+Kmod is now (since kmod commit 09c9f8c5df04 ("libkmod: Use kernel
+decompression when available")) using the kernel decompressor, when
+loading compressed modules.
+
+However, the kernel XZ decompressor is XZ Embedded, which doesn't
+handle CRC64 and dictionaries larger than 1MiB.
+
+Use CRC32 and 1MiB dictionary when XZ compressing and installing
+kernel modules.
+
+Link: https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=1050582
+Signed-off-by: Martin Nybo Andersen <tweek@tweek.dk>
+Signed-off-by: Masahiro Yamada <masahiroy@kernel.org>
+---
+ scripts/Makefile.modinst | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/scripts/Makefile.modinst b/scripts/Makefile.modinst
+index 346f5ec506823..0afd75472679f 100644
+--- a/scripts/Makefile.modinst
++++ b/scripts/Makefile.modinst
+@@ -144,7 +144,7 @@ endif
+ quiet_cmd_gzip = GZIP    $@
+       cmd_gzip = $(KGZIP) -n -f $<
+ quiet_cmd_xz = XZ      $@
+-      cmd_xz = $(XZ) --lzma2=dict=2MiB -f $<
++      cmd_xz = $(XZ) --check=crc32 --lzma2=dict=1MiB -f $<
+ quiet_cmd_zstd = ZSTD    $@
+       cmd_zstd = $(ZSTD) -T0 --rm -f -q $<
+ 
+-- 
+cgit 
+


^ permalink raw reply related	[flat|nested] 24+ messages in thread
* [gentoo-commits] proj/linux-patches:6.5 commit in: /
@ 2023-10-19 22:29 Mike Pagano
  0 siblings, 0 replies; 24+ messages in thread
From: Mike Pagano @ 2023-10-19 22:29 UTC (permalink / raw
  To: gentoo-commits

commit:     c8bb78b3205cb4cea208a46b3f54d9bd3491ac56
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Thu Oct 19 22:23:54 2023 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Thu Oct 19 22:23:54 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=c8bb78b3

Linux patch 6.5.8

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README            |    4 +
 1007_linux-6.5.8.patch | 7171 ++++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 7175 insertions(+)

diff --git a/0000_README b/0000_README
index 86ea0adc..97163ce5 100644
--- a/0000_README
+++ b/0000_README
@@ -71,6 +71,10 @@ Patch:  1006_linux-6.5.7.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.5.7
 
+Patch:  1007_linux-6.5.8.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.5.8
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1007_linux-6.5.8.patch b/1007_linux-6.5.8.patch
new file mode 100644
index 00000000..bfae72ba
--- /dev/null
+++ b/1007_linux-6.5.8.patch
@@ -0,0 +1,7171 @@
+diff --git a/Documentation/devicetree/bindings/interrupt-controller/renesas,rzg2l-irqc.yaml b/Documentation/devicetree/bindings/interrupt-controller/renesas,rzg2l-irqc.yaml
+index 33b90e975e33c..ea7db3618b23e 100644
+--- a/Documentation/devicetree/bindings/interrupt-controller/renesas,rzg2l-irqc.yaml
++++ b/Documentation/devicetree/bindings/interrupt-controller/renesas,rzg2l-irqc.yaml
+@@ -31,8 +31,9 @@ properties:
+       - const: renesas,rzg2l-irqc
+ 
+   '#interrupt-cells':
+-    description: The first cell should contain external interrupt number (IRQ0-7) and the
+-                 second cell is used to specify the flag.
++    description: The first cell should contain a macro RZG2L_{NMI,IRQX} included in the
++                 include/dt-bindings/interrupt-controller/irqc-rzg2l.h and the second
++                 cell is used to specify the flag.
+     const: 2
+ 
+   '#address-cells':
+diff --git a/Documentation/devicetree/bindings/media/nxp,imx7-csi.yaml b/Documentation/devicetree/bindings/media/nxp,imx7-csi.yaml
+index 358019e85d907..326284e151f66 100644
+--- a/Documentation/devicetree/bindings/media/nxp,imx7-csi.yaml
++++ b/Documentation/devicetree/bindings/media/nxp,imx7-csi.yaml
+@@ -59,7 +59,6 @@ allOf:
+         compatible:
+           contains:
+             enum:
+-              - fsl,imx8mq-csi
+               - fsl,imx8mm-csi
+     then:
+       required:
+diff --git a/Documentation/filesystems/overlayfs.rst b/Documentation/filesystems/overlayfs.rst
+index eb7d2c88ddece..8e1b27288afd4 100644
+--- a/Documentation/filesystems/overlayfs.rst
++++ b/Documentation/filesystems/overlayfs.rst
+@@ -339,6 +339,18 @@ The specified lower directories will be stacked beginning from the
+ rightmost one and going left.  In the above example lower1 will be the
+ top, lower2 the middle and lower3 the bottom layer.
+ 
++Note: directory names containing colons can be provided as lower layer by
++escaping the colons with a single backslash.  For example:
++
++  mount -t overlay overlay -olowerdir=/a\:lower\:\:dir /merged
++
++Since kernel version v6.5, directory names containing colons can also
++be provided as lower layer using the fsconfig syscall from new mount api:
++
++  fsconfig(fs_fd, FSCONFIG_SET_STRING, "lowerdir", "/a:lower::dir", 0);
++
++In the latter case, colons in lower layer directory names will be escaped
++as an octal characters (\072) when displayed in /proc/self/mountinfo.
+ 
+ Metadata only copy up
+ ---------------------
+diff --git a/Makefile b/Makefile
+index f9d5970f34413..a687c9a0646cb 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 5
+-SUBLEVEL = 7
++SUBLEVEL = 8
+ EXTRAVERSION =
+ NAME = Hurr durr I'ma ninja sloth
+ 
+diff --git a/arch/arm64/boot/dts/mediatek/mt7622.dtsi b/arch/arm64/boot/dts/mediatek/mt7622.dtsi
+index 36ef2dbe8add4..3ee9266fa8e98 100644
+--- a/arch/arm64/boot/dts/mediatek/mt7622.dtsi
++++ b/arch/arm64/boot/dts/mediatek/mt7622.dtsi
+@@ -905,7 +905,7 @@
+ 		status = "disabled";
+ 	};
+ 
+-	sata_phy: t-phy@1a243000 {
++	sata_phy: t-phy {
+ 		compatible = "mediatek,mt7622-tphy",
+ 			     "mediatek,generic-tphy-v1";
+ 		#address-cells = <2>;
+diff --git a/arch/arm64/boot/dts/mediatek/mt7986a.dtsi b/arch/arm64/boot/dts/mediatek/mt7986a.dtsi
+index 68539ea788dfc..24eda00e320d3 100644
+--- a/arch/arm64/boot/dts/mediatek/mt7986a.dtsi
++++ b/arch/arm64/boot/dts/mediatek/mt7986a.dtsi
+@@ -434,7 +434,7 @@
+ 			};
+ 		};
+ 
+-		pcie_phy: t-phy@11c00000 {
++		pcie_phy: t-phy {
+ 			compatible = "mediatek,mt7986-tphy",
+ 				     "mediatek,generic-tphy-v2";
+ 			#address-cells = <2>;
+diff --git a/arch/arm64/boot/dts/mediatek/mt8195-demo.dts b/arch/arm64/boot/dts/mediatek/mt8195-demo.dts
+index b2485ddfd33bb..5d635085fe3fd 100644
+--- a/arch/arm64/boot/dts/mediatek/mt8195-demo.dts
++++ b/arch/arm64/boot/dts/mediatek/mt8195-demo.dts
+@@ -48,7 +48,7 @@
+ 
+ 	memory@40000000 {
+ 		device_type = "memory";
+-		reg = <0 0x40000000 0 0x80000000>;
++		reg = <0 0x40000000 0x2 0x00000000>;
+ 	};
+ 
+ 	reserved-memory {
+@@ -56,13 +56,8 @@
+ 		#size-cells = <2>;
+ 		ranges;
+ 
+-		/* 2 MiB reserved for ARM Trusted Firmware (BL31) */
+-		bl31_secmon_reserved: secmon@54600000 {
+-			no-map;
+-			reg = <0 0x54600000 0x0 0x200000>;
+-		};
+-
+-		/* 12 MiB reserved for OP-TEE (BL32)
++		/*
++		 * 12 MiB reserved for OP-TEE (BL32)
+ 		 * +-----------------------+ 0x43e0_0000
+ 		 * |      SHMEM 2MiB       |
+ 		 * +-----------------------+ 0x43c0_0000
+@@ -75,6 +70,34 @@
+ 			no-map;
+ 			reg = <0 0x43200000 0 0x00c00000>;
+ 		};
++
++		scp_mem: memory@50000000 {
++			compatible = "shared-dma-pool";
++			reg = <0 0x50000000 0 0x2900000>;
++			no-map;
++		};
++
++		vpu_mem: memory@53000000 {
++			compatible = "shared-dma-pool";
++			reg = <0 0x53000000 0 0x1400000>; /* 20 MB */
++		};
++
++		/* 2 MiB reserved for ARM Trusted Firmware (BL31) */
++		bl31_secmon_mem: memory@54600000 {
++			no-map;
++			reg = <0 0x54600000 0x0 0x200000>;
++		};
++
++		snd_dma_mem: memory@60000000 {
++			compatible = "shared-dma-pool";
++			reg = <0 0x60000000 0 0x1100000>;
++			no-map;
++		};
++
++		apu_mem: memory@62000000 {
++			compatible = "shared-dma-pool";
++			reg = <0 0x62000000 0 0x1400000>; /* 20 MB */
++		};
+ 	};
+ };
+ 
+diff --git a/arch/arm64/boot/dts/mediatek/mt8195.dtsi b/arch/arm64/boot/dts/mediatek/mt8195.dtsi
+index 4dbbf8fdab758..43011bc41da77 100644
+--- a/arch/arm64/boot/dts/mediatek/mt8195.dtsi
++++ b/arch/arm64/boot/dts/mediatek/mt8195.dtsi
+@@ -313,6 +313,7 @@
+ 		interrupts = <GIC_SPI 18 IRQ_TYPE_LEVEL_HIGH 0>;
+ 		cpus = <&cpu0>, <&cpu1>, <&cpu2>, <&cpu3>,
+ 		       <&cpu4>, <&cpu5>, <&cpu6>, <&cpu7>;
++		status = "fail";
+ 	};
+ 
+ 	dmic_codec: dmic-codec {
+diff --git a/arch/arm64/boot/dts/qcom/sm8150.dtsi b/arch/arm64/boot/dts/qcom/sm8150.dtsi
+index a7c3020a5de49..06c53000bb74d 100644
+--- a/arch/arm64/boot/dts/qcom/sm8150.dtsi
++++ b/arch/arm64/boot/dts/qcom/sm8150.dtsi
+@@ -3958,7 +3958,7 @@
+ 
+ 		pdc: interrupt-controller@b220000 {
+ 			compatible = "qcom,sm8150-pdc", "qcom,pdc";
+-			reg = <0 0x0b220000 0 0x400>;
++			reg = <0 0x0b220000 0 0x30000>;
+ 			qcom,pdc-ranges = <0 480 94>, <94 609 31>,
+ 					  <125 63 1>;
+ 			#interrupt-cells = <2>;
+diff --git a/arch/arm64/include/asm/acpi.h b/arch/arm64/include/asm/acpi.h
+index bd68e1b7f29f3..2b9c0e0bd0fba 100644
+--- a/arch/arm64/include/asm/acpi.h
++++ b/arch/arm64/include/asm/acpi.h
+@@ -9,6 +9,7 @@
+ #ifndef _ASM_ACPI_H
+ #define _ASM_ACPI_H
+ 
++#include <linux/cpuidle.h>
+ #include <linux/efi.h>
+ #include <linux/memblock.h>
+ #include <linux/psci.h>
+@@ -42,6 +43,25 @@
+ #define ACPI_MADT_GICC_SPE  (offsetof(struct acpi_madt_generic_interrupt, \
+ 	spe_interrupt) + sizeof(u16))
+ 
++/*
++ * Arm® Functional Fixed Hardware Specification Version 1.2.
++ * Table 2: Arm Architecture context loss flags
++ */
++#define CPUIDLE_CORE_CTXT		BIT(0) /* Core context Lost */
++
++static inline unsigned int arch_get_idle_state_flags(u32 arch_flags)
++{
++	if (arch_flags & CPUIDLE_CORE_CTXT)
++		return CPUIDLE_FLAG_TIMER_STOP;
++
++	return 0;
++}
++#define arch_get_idle_state_flags arch_get_idle_state_flags
++
++#define CPUIDLE_TRACE_CTXT		BIT(1) /* Trace context loss */
++#define CPUIDLE_GICR_CTXT		BIT(2) /* GICR */
++#define CPUIDLE_GICD_CTXT		BIT(3) /* GICD */
++
+ /* Basic configuration for ACPI */
+ #ifdef	CONFIG_ACPI
+ pgprot_t __acpi_get_mem_attribute(phys_addr_t addr);
+diff --git a/arch/powerpc/include/asm/nohash/32/pte-8xx.h b/arch/powerpc/include/asm/nohash/32/pte-8xx.h
+index 1a89ebdc3acc9..0238e6bd0d6c1 100644
+--- a/arch/powerpc/include/asm/nohash/32/pte-8xx.h
++++ b/arch/powerpc/include/asm/nohash/32/pte-8xx.h
+@@ -94,6 +94,13 @@ static inline pte_t pte_wrprotect(pte_t pte)
+ 
+ #define pte_wrprotect pte_wrprotect
+ 
++static inline int pte_read(pte_t pte)
++{
++	return (pte_val(pte) & _PAGE_RO) != _PAGE_NA;
++}
++
++#define pte_read pte_read
++
+ static inline int pte_write(pte_t pte)
+ {
+ 	return !(pte_val(pte) & _PAGE_RO);
+diff --git a/arch/powerpc/include/asm/nohash/64/pgtable.h b/arch/powerpc/include/asm/nohash/64/pgtable.h
+index 287e25864ffae..072048e723c9b 100644
+--- a/arch/powerpc/include/asm/nohash/64/pgtable.h
++++ b/arch/powerpc/include/asm/nohash/64/pgtable.h
+@@ -197,7 +197,7 @@ static inline int __ptep_test_and_clear_young(struct mm_struct *mm,
+ {
+ 	unsigned long old;
+ 
+-	if (pte_young(*ptep))
++	if (!pte_young(*ptep))
+ 		return 0;
+ 	old = pte_update(mm, addr, ptep, _PAGE_ACCESSED, 0, 0);
+ 	return (old & _PAGE_ACCESSED) != 0;
+diff --git a/arch/powerpc/include/asm/nohash/pgtable.h b/arch/powerpc/include/asm/nohash/pgtable.h
+index a6caaaab6f922..3af11981fcd55 100644
+--- a/arch/powerpc/include/asm/nohash/pgtable.h
++++ b/arch/powerpc/include/asm/nohash/pgtable.h
+@@ -25,7 +25,9 @@ static inline int pte_write(pte_t pte)
+ 	return pte_val(pte) & _PAGE_RW;
+ }
+ #endif
++#ifndef pte_read
+ static inline int pte_read(pte_t pte)		{ return 1; }
++#endif
+ static inline int pte_dirty(pte_t pte)		{ return pte_val(pte) & _PAGE_DIRTY; }
+ static inline int pte_special(pte_t pte)	{ return pte_val(pte) & _PAGE_SPECIAL; }
+ static inline int pte_none(pte_t pte)		{ return (pte_val(pte) & ~_PTE_NONE_MASK) == 0; }
+diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
+index fe27d41f9a3de..835797a66d720 100644
+--- a/arch/powerpc/kernel/entry_32.S
++++ b/arch/powerpc/kernel/entry_32.S
+@@ -138,8 +138,9 @@ ret_from_syscall:
+ 	lis	r4,icache_44x_need_flush@ha
+ 	lwz	r5,icache_44x_need_flush@l(r4)
+ 	cmplwi	cr0,r5,0
+-	bne-	2f
++	bne-	.L44x_icache_flush
+ #endif /* CONFIG_PPC_47x */
++.L44x_icache_flush_return:
+ 	kuep_unlock
+ 	lwz	r4,_LINK(r1)
+ 	lwz	r5,_CCR(r1)
+@@ -173,10 +174,11 @@ syscall_exit_finish:
+ 	b	1b
+ 
+ #ifdef CONFIG_44x
+-2:	li	r7,0
++.L44x_icache_flush:
++	li	r7,0
+ 	iccci	r0,r0
+ 	stw	r7,icache_44x_need_flush@l(r4)
+-	b	1b
++	b	.L44x_icache_flush_return
+ #endif  /* CONFIG_44x */
+ 
+ 	.globl	ret_from_fork
+diff --git a/arch/powerpc/platforms/pseries/hvCall.S b/arch/powerpc/platforms/pseries/hvCall.S
+index ca0674b0b683e..69d78c372c592 100644
+--- a/arch/powerpc/platforms/pseries/hvCall.S
++++ b/arch/powerpc/platforms/pseries/hvCall.S
+@@ -185,7 +185,6 @@ _GLOBAL_TOC(plpar_hcall)
+ plpar_hcall_trace:
+ 	HCALL_INST_PRECALL(R5)
+ 
+-	std	r4,STK_PARAM(R4)(r1)
+ 	mr	r0,r4
+ 
+ 	mr	r4,r5
+@@ -197,7 +196,7 @@ plpar_hcall_trace:
+ 
+ 	HVSC
+ 
+-	ld	r12,STK_PARAM(R4)(r1)
++	ld	r12,STACK_FRAME_MIN_SIZE+STK_PARAM(R4)(r1)
+ 	std	r4,0(r12)
+ 	std	r5,8(r12)
+ 	std	r6,16(r12)
+@@ -297,7 +296,6 @@ _GLOBAL_TOC(plpar_hcall9)
+ plpar_hcall9_trace:
+ 	HCALL_INST_PRECALL(R5)
+ 
+-	std	r4,STK_PARAM(R4)(r1)
+ 	mr	r0,r4
+ 
+ 	mr	r4,r5
+diff --git a/arch/riscv/Makefile b/arch/riscv/Makefile
+index 1329e060c5482..b43a6bb7e4dcb 100644
+--- a/arch/riscv/Makefile
++++ b/arch/riscv/Makefile
+@@ -6,7 +6,6 @@
+ # for more details.
+ #
+ 
+-OBJCOPYFLAGS    := -O binary
+ LDFLAGS_vmlinux := -z norelro
+ ifeq ($(CONFIG_RELOCATABLE),y)
+ 	LDFLAGS_vmlinux += -shared -Bsymbolic -z notext --emit-relocs
+diff --git a/arch/riscv/include/asm/kprobes.h b/arch/riscv/include/asm/kprobes.h
+index e7882ccb0fd46..78ea44f767189 100644
+--- a/arch/riscv/include/asm/kprobes.h
++++ b/arch/riscv/include/asm/kprobes.h
+@@ -40,6 +40,15 @@ void arch_remove_kprobe(struct kprobe *p);
+ int kprobe_fault_handler(struct pt_regs *regs, unsigned int trapnr);
+ bool kprobe_breakpoint_handler(struct pt_regs *regs);
+ bool kprobe_single_step_handler(struct pt_regs *regs);
+-
++#else
++static inline bool kprobe_breakpoint_handler(struct pt_regs *regs)
++{
++	return false;
++}
++
++static inline bool kprobe_single_step_handler(struct pt_regs *regs)
++{
++	return false;
++}
+ #endif /* CONFIG_KPROBES */
+ #endif /* _ASM_RISCV_KPROBES_H */
+diff --git a/arch/riscv/include/asm/uprobes.h b/arch/riscv/include/asm/uprobes.h
+index f2183e00fdd20..3fc7deda91902 100644
+--- a/arch/riscv/include/asm/uprobes.h
++++ b/arch/riscv/include/asm/uprobes.h
+@@ -34,7 +34,18 @@ struct arch_uprobe {
+ 	bool simulate;
+ };
+ 
++#ifdef CONFIG_UPROBES
+ bool uprobe_breakpoint_handler(struct pt_regs *regs);
+ bool uprobe_single_step_handler(struct pt_regs *regs);
+-
++#else
++static inline bool uprobe_breakpoint_handler(struct pt_regs *regs)
++{
++	return false;
++}
++
++static inline bool uprobe_single_step_handler(struct pt_regs *regs)
++{
++	return false;
++}
++#endif /* CONFIG_UPROBES */
+ #endif /* _ASM_RISCV_UPROBES_H */
+diff --git a/arch/riscv/kernel/irq.c b/arch/riscv/kernel/irq.c
+index a8efa053c4a52..9cc0a76692715 100644
+--- a/arch/riscv/kernel/irq.c
++++ b/arch/riscv/kernel/irq.c
+@@ -60,7 +60,7 @@ static void init_irq_stacks(void)
+ }
+ #endif /* CONFIG_VMAP_STACK */
+ 
+-#ifdef CONFIG_HAVE_SOFTIRQ_ON_OWN_STACK
++#ifdef CONFIG_SOFTIRQ_ON_OWN_STACK
+ void do_softirq_own_stack(void)
+ {
+ #ifdef CONFIG_IRQ_STACKS
+@@ -92,7 +92,7 @@ void do_softirq_own_stack(void)
+ #endif
+ 		__do_softirq();
+ }
+-#endif /* CONFIG_HAVE_SOFTIRQ_ON_OWN_STACK */
++#endif /* CONFIG_SOFTIRQ_ON_OWN_STACK */
+ 
+ #else
+ static void init_irq_stacks(void) {}
+diff --git a/arch/riscv/kernel/signal.c b/arch/riscv/kernel/signal.c
+index 180d951d36241..21a4d0e111bc5 100644
+--- a/arch/riscv/kernel/signal.c
++++ b/arch/riscv/kernel/signal.c
+@@ -311,13 +311,6 @@ static inline void __user *get_sigframe(struct ksignal *ksig,
+ 	/* Align the stack frame. */
+ 	sp &= ~0xfUL;
+ 
+-	/*
+-	 * Fail if the size of the altstack is not large enough for the
+-	 * sigframe construction.
+-	 */
+-	if (current->sas_ss_size && sp < current->sas_ss_sp)
+-		return (void __user __force *)-1UL;
+-
+ 	return (void __user *)sp;
+ }
+ 
+diff --git a/arch/riscv/kernel/traps.c b/arch/riscv/kernel/traps.c
+index f798c853bede6..cd6f10c73a163 100644
+--- a/arch/riscv/kernel/traps.c
++++ b/arch/riscv/kernel/traps.c
+@@ -13,6 +13,8 @@
+ #include <linux/kdebug.h>
+ #include <linux/uaccess.h>
+ #include <linux/kprobes.h>
++#include <linux/uprobes.h>
++#include <asm/uprobes.h>
+ #include <linux/mm.h>
+ #include <linux/module.h>
+ #include <linux/irq.h>
+@@ -246,22 +248,28 @@ static inline unsigned long get_break_insn_length(unsigned long pc)
+ 	return GET_INSN_LENGTH(insn);
+ }
+ 
++static bool probe_single_step_handler(struct pt_regs *regs)
++{
++	bool user = user_mode(regs);
++
++	return user ? uprobe_single_step_handler(regs) : kprobe_single_step_handler(regs);
++}
++
++static bool probe_breakpoint_handler(struct pt_regs *regs)
++{
++	bool user = user_mode(regs);
++
++	return user ? uprobe_breakpoint_handler(regs) : kprobe_breakpoint_handler(regs);
++}
++
+ void handle_break(struct pt_regs *regs)
+ {
+-#ifdef CONFIG_KPROBES
+-	if (kprobe_single_step_handler(regs))
++	if (probe_single_step_handler(regs))
+ 		return;
+ 
+-	if (kprobe_breakpoint_handler(regs))
+-		return;
+-#endif
+-#ifdef CONFIG_UPROBES
+-	if (uprobe_single_step_handler(regs))
++	if (probe_breakpoint_handler(regs))
+ 		return;
+ 
+-	if (uprobe_breakpoint_handler(regs))
+-		return;
+-#endif
+ 	current->thread.bad_cause = regs->cause;
+ 
+ 	if (user_mode(regs))
+diff --git a/arch/riscv/net/bpf_jit_comp64.c b/arch/riscv/net/bpf_jit_comp64.c
+index c648864c8cd1a..3b4cb713e3684 100644
+--- a/arch/riscv/net/bpf_jit_comp64.c
++++ b/arch/riscv/net/bpf_jit_comp64.c
+@@ -239,7 +239,7 @@ static void __build_epilogue(bool is_tail_call, struct rv_jit_context *ctx)
+ 	emit_addi(RV_REG_SP, RV_REG_SP, stack_adjust, ctx);
+ 	/* Set return value. */
+ 	if (!is_tail_call)
+-		emit_mv(RV_REG_A0, RV_REG_A5, ctx);
++		emit_addiw(RV_REG_A0, RV_REG_A5, 0, ctx);
+ 	emit_jalr(RV_REG_ZERO, is_tail_call ? RV_REG_T3 : RV_REG_RA,
+ 		  is_tail_call ? 20 : 0, /* skip reserved nops and TCC init */
+ 		  ctx);
+@@ -757,8 +757,10 @@ static int invoke_bpf_prog(struct bpf_tramp_link *l, int args_off, int retval_of
+ 	if (ret)
+ 		return ret;
+ 
+-	if (save_ret)
+-		emit_sd(RV_REG_FP, -retval_off, regmap[BPF_REG_0], ctx);
++	if (save_ret) {
++		emit_sd(RV_REG_FP, -retval_off, RV_REG_A0, ctx);
++		emit_sd(RV_REG_FP, -(retval_off - 8), regmap[BPF_REG_0], ctx);
++	}
+ 
+ 	/* update branch with beqz */
+ 	if (ctx->insns) {
+@@ -844,7 +846,7 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im,
+ 
+ 	save_ret = flags & (BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_RET_FENTRY_RET);
+ 	if (save_ret) {
+-		stack_size += 8;
++		stack_size += 16; /* Save both A5 (BPF R0) and A0 */
+ 		retval_off = stack_size;
+ 	}
+ 
+@@ -931,6 +933,7 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im,
+ 		if (ret)
+ 			goto out;
+ 		emit_sd(RV_REG_FP, -retval_off, RV_REG_A0, ctx);
++		emit_sd(RV_REG_FP, -(retval_off - 8), regmap[BPF_REG_0], ctx);
+ 		im->ip_after_call = ctx->insns + ctx->ninsns;
+ 		/* 2 nops reserved for auipc+jalr pair */
+ 		emit(rv_nop(), ctx);
+@@ -962,8 +965,10 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im,
+ 	if (flags & BPF_TRAMP_F_RESTORE_REGS)
+ 		restore_args(nregs, args_off, ctx);
+ 
+-	if (save_ret)
++	if (save_ret) {
+ 		emit_ld(RV_REG_A0, -retval_off, RV_REG_FP, ctx);
++		emit_ld(regmap[BPF_REG_0], -(retval_off - 8), RV_REG_FP, ctx);
++	}
+ 
+ 	emit_ld(RV_REG_S1, -sreg_off, RV_REG_FP, ctx);
+ 
+@@ -1436,7 +1441,8 @@ out_be:
+ 		if (ret)
+ 			return ret;
+ 
+-		emit_mv(bpf_to_rv_reg(BPF_REG_0, ctx), RV_REG_A0, ctx);
++		if (insn->src_reg != BPF_PSEUDO_CALL)
++			emit_mv(bpf_to_rv_reg(BPF_REG_0, ctx), RV_REG_A0, ctx);
+ 		break;
+ 	}
+ 	/* tail call */
+diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
+index 2861e3360affc..e507692e51e71 100644
+--- a/arch/s390/net/bpf_jit_comp.c
++++ b/arch/s390/net/bpf_jit_comp.c
+@@ -2066,6 +2066,7 @@ struct bpf_tramp_jit {
+ 				 * func_addr's original caller
+ 				 */
+ 	int stack_size;		/* Trampoline stack size */
++	int backchain_off;	/* Offset of backchain */
+ 	int stack_args_off;	/* Offset of stack arguments for calling
+ 				 * func_addr, has to be at the top
+ 				 */
+@@ -2086,9 +2087,10 @@ struct bpf_tramp_jit {
+ 				 * for __bpf_prog_enter() return value and
+ 				 * func_addr respectively
+ 				 */
+-	int r14_off;		/* Offset of saved %r14 */
+ 	int run_ctx_off;	/* Offset of struct bpf_tramp_run_ctx */
+ 	int tccnt_off;		/* Offset of saved tailcall counter */
++	int r14_off;		/* Offset of saved %r14, has to be at the
++				 * bottom */
+ 	int do_fexit;		/* do_fexit: label */
+ };
+ 
+@@ -2247,8 +2249,12 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im,
+ 	 * Calculate the stack layout.
+ 	 */
+ 
+-	/* Reserve STACK_FRAME_OVERHEAD bytes for the callees. */
++	/*
++	 * Allocate STACK_FRAME_OVERHEAD bytes for the callees. As the s390x
++	 * ABI requires, put our backchain at the end of the allocated memory.
++	 */
+ 	tjit->stack_size = STACK_FRAME_OVERHEAD;
++	tjit->backchain_off = tjit->stack_size - sizeof(u64);
+ 	tjit->stack_args_off = alloc_stack(tjit, nr_stack_args * sizeof(u64));
+ 	tjit->reg_args_off = alloc_stack(tjit, nr_reg_args * sizeof(u64));
+ 	tjit->ip_off = alloc_stack(tjit, sizeof(u64));
+@@ -2256,16 +2262,25 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im,
+ 	tjit->bpf_args_off = alloc_stack(tjit, nr_bpf_args * sizeof(u64));
+ 	tjit->retval_off = alloc_stack(tjit, sizeof(u64));
+ 	tjit->r7_r8_off = alloc_stack(tjit, 2 * sizeof(u64));
+-	tjit->r14_off = alloc_stack(tjit, sizeof(u64));
+ 	tjit->run_ctx_off = alloc_stack(tjit,
+ 					sizeof(struct bpf_tramp_run_ctx));
+ 	tjit->tccnt_off = alloc_stack(tjit, sizeof(u64));
+-	/* The caller has already reserved STACK_FRAME_OVERHEAD bytes. */
+-	tjit->stack_size -= STACK_FRAME_OVERHEAD;
++	tjit->r14_off = alloc_stack(tjit, sizeof(u64) * 2);
++	/*
++	 * In accordance with the s390x ABI, the caller has allocated
++	 * STACK_FRAME_OVERHEAD bytes for us. 8 of them contain the caller's
++	 * backchain, and the rest we can use.
++	 */
++	tjit->stack_size -= STACK_FRAME_OVERHEAD - sizeof(u64);
+ 	tjit->orig_stack_args_off = tjit->stack_size + STACK_FRAME_OVERHEAD;
+ 
++	/* lgr %r1,%r15 */
++	EMIT4(0xb9040000, REG_1, REG_15);
+ 	/* aghi %r15,-stack_size */
+ 	EMIT4_IMM(0xa70b0000, REG_15, -tjit->stack_size);
++	/* stg %r1,backchain_off(%r15) */
++	EMIT6_DISP_LH(0xe3000000, 0x0024, REG_1, REG_0, REG_15,
++		      tjit->backchain_off);
+ 	/* mvc tccnt_off(4,%r15),stack_size+STK_OFF_TCCNT(%r15) */
+ 	_EMIT6(0xd203f000 | tjit->tccnt_off,
+ 	       0xf000 | (tjit->stack_size + STK_OFF_TCCNT));
+diff --git a/arch/x86/events/utils.c b/arch/x86/events/utils.c
+index 76b1f8bb0fd5f..dab4ed199227f 100644
+--- a/arch/x86/events/utils.c
++++ b/arch/x86/events/utils.c
+@@ -1,5 +1,6 @@
+ // SPDX-License-Identifier: GPL-2.0
+ #include <asm/insn.h>
++#include <linux/mm.h>
+ 
+ #include "perf_event.h"
+ 
+@@ -132,9 +133,9 @@ static int get_branch_type(unsigned long from, unsigned long to, int abort,
+ 		 * The LBR logs any address in the IP, even if the IP just
+ 		 * faulted. This means userspace can control the from address.
+ 		 * Ensure we don't blindly read any address by validating it is
+-		 * a known text address.
++		 * a known text address and not a vsyscall address.
+ 		 */
+-		if (kernel_text_address(from)) {
++		if (kernel_text_address(from) && !in_gate_area_no_mm(from)) {
+ 			addr = (void *)from;
+ 			/*
+ 			 * Assume we can get the maximum possible size
+diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
+index 1d111350197f3..b37abb55e948b 100644
+--- a/arch/x86/include/asm/msr-index.h
++++ b/arch/x86/include/asm/msr-index.h
+@@ -637,12 +637,17 @@
+ /* AMD Last Branch Record MSRs */
+ #define MSR_AMD64_LBR_SELECT			0xc000010e
+ 
+-/* Fam 17h MSRs */
+-#define MSR_F17H_IRPERF			0xc00000e9
++/* Zen4 */
++#define MSR_ZEN4_BP_CFG			0xc001102e
++#define MSR_ZEN4_BP_CFG_SHARED_BTB_FIX_BIT 5
+ 
++/* Zen 2 */
+ #define MSR_ZEN2_SPECTRAL_CHICKEN	0xc00110e3
+ #define MSR_ZEN2_SPECTRAL_CHICKEN_BIT	BIT_ULL(1)
+ 
++/* Fam 17h MSRs */
++#define MSR_F17H_IRPERF			0xc00000e9
++
+ /* Fam 16h MSRs */
+ #define MSR_F16H_L2I_PERF_CTL		0xc0010230
+ #define MSR_F16H_L2I_PERF_CTR		0xc0010231
+diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
+index 600cf25dbfc64..744a4cd5ac8cc 100644
+--- a/arch/x86/include/asm/smp.h
++++ b/arch/x86/include/asm/smp.h
+@@ -134,8 +134,6 @@ void native_send_call_func_ipi(const struct cpumask *mask);
+ void native_send_call_func_single_ipi(int cpu);
+ void x86_idle_thread_init(unsigned int cpu, struct task_struct *idle);
+ 
+-bool smp_park_other_cpus_in_init(void);
+-
+ void smp_store_boot_cpu_info(void);
+ void smp_store_cpu_info(int id);
+ 
+diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
+index 44843a492e69c..49b2855444407 100644
+--- a/arch/x86/kernel/alternative.c
++++ b/arch/x86/kernel/alternative.c
+@@ -403,6 +403,17 @@ void __init_or_module noinline apply_alternatives(struct alt_instr *start,
+ 	u8 insn_buff[MAX_PATCH_LEN];
+ 
+ 	DPRINTK(ALT, "alt table %px, -> %px", start, end);
++
++	/*
++	 * In the case CONFIG_X86_5LEVEL=y, KASAN_SHADOW_START is defined using
++	 * cpu_feature_enabled(X86_FEATURE_LA57) and is therefore patched here.
++	 * During the process, KASAN becomes confused seeing partial LA57
++	 * conversion and triggers a false-positive out-of-bound report.
++	 *
++	 * Disable KASAN until the patching is complete.
++	 */
++	kasan_disable_current();
++
+ 	/*
+ 	 * The scan order should be from start to end. A later scanned
+ 	 * alternative code can overwrite previously scanned alternative code.
+@@ -452,6 +463,8 @@ void __init_or_module noinline apply_alternatives(struct alt_instr *start,
+ 
+ 		text_poke_early(instr, insn_buff, insn_buff_sz);
+ 	}
++
++	kasan_enable_current();
+ }
+ 
+ static inline bool is_jcc32(struct insn *insn)
+diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
+index 28e77c5d6484a..9a3394e8c4259 100644
+--- a/arch/x86/kernel/cpu/amd.c
++++ b/arch/x86/kernel/cpu/amd.c
+@@ -80,6 +80,10 @@ static const int amd_div0[] =
+ 	AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x17, 0x00, 0x0, 0x2f, 0xf),
+ 			   AMD_MODEL_RANGE(0x17, 0x50, 0x0, 0x5f, 0xf));
+ 
++static const int amd_erratum_1485[] =
++	AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x19, 0x10, 0x0, 0x1f, 0xf),
++			   AMD_MODEL_RANGE(0x19, 0x60, 0x0, 0xaf, 0xf));
++
+ static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum)
+ {
+ 	int osvw_id = *erratum++;
+@@ -1149,6 +1153,10 @@ static void init_amd(struct cpuinfo_x86 *c)
+ 		pr_notice_once("AMD Zen1 DIV0 bug detected. Disable SMT for full protection.\n");
+ 		setup_force_cpu_bug(X86_BUG_DIV0);
+ 	}
++
++	if (!cpu_has(c, X86_FEATURE_HYPERVISOR) &&
++	     cpu_has_amd_erratum(c, amd_erratum_1485))
++		msr_set_bit(MSR_ZEN4_BP_CFG, MSR_ZEN4_BP_CFG_SHARED_BTB_FIX_BIT);
+ }
+ 
+ #ifdef CONFIG_X86_32
+diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
+index 7eb18ca7bd45b..cc8ef9bfcb52f 100644
+--- a/arch/x86/kernel/smp.c
++++ b/arch/x86/kernel/smp.c
+@@ -131,7 +131,7 @@ static int smp_stop_nmi_callback(unsigned int val, struct pt_regs *regs)
+ }
+ 
+ /*
+- * Disable virtualization, APIC etc. and park the CPU in a HLT loop
++ * this function calls the 'stop' function on all other CPUs in the system.
+  */
+ DEFINE_IDTENTRY_SYSVEC(sysvec_reboot)
+ {
+@@ -172,17 +172,13 @@ static void native_stop_other_cpus(int wait)
+ 	 * 2) Wait for all other CPUs to report that they reached the
+ 	 *    HLT loop in stop_this_cpu()
+ 	 *
+-	 * 3) If the system uses INIT/STARTUP for CPU bringup, then
+-	 *    send all present CPUs an INIT vector, which brings them
+-	 *    completely out of the way.
++	 * 3) If #2 timed out send an NMI to the CPUs which did not
++	 *    yet report
+ 	 *
+-	 * 4) If #3 is not possible and #2 timed out send an NMI to the
+-	 *    CPUs which did not yet report
+-	 *
+-	 * 5) Wait for all other CPUs to report that they reached the
++	 * 4) Wait for all other CPUs to report that they reached the
+ 	 *    HLT loop in stop_this_cpu()
+ 	 *
+-	 * #4 can obviously race against a CPU reaching the HLT loop late.
++	 * #3 can obviously race against a CPU reaching the HLT loop late.
+ 	 * That CPU will have reported already and the "have all CPUs
+ 	 * reached HLT" condition will be true despite the fact that the
+ 	 * other CPU is still handling the NMI. Again, there is no
+@@ -198,7 +194,7 @@ static void native_stop_other_cpus(int wait)
+ 		/*
+ 		 * Don't wait longer than a second for IPI completion. The
+ 		 * wait request is not checked here because that would
+-		 * prevent an NMI/INIT shutdown in case that not all
++		 * prevent an NMI shutdown attempt in case that not all
+ 		 * CPUs reach shutdown state.
+ 		 */
+ 		timeout = USEC_PER_SEC;
+@@ -206,27 +202,7 @@ static void native_stop_other_cpus(int wait)
+ 			udelay(1);
+ 	}
+ 
+-	/*
+-	 * Park all other CPUs in INIT including "offline" CPUs, if
+-	 * possible. That's a safe place where they can't resume execution
+-	 * of HLT and then execute the HLT loop from overwritten text or
+-	 * page tables.
+-	 *
+-	 * The only downside is a broadcast MCE, but up to the point where
+-	 * the kexec() kernel brought all APs online again an MCE will just
+-	 * make HLT resume and handle the MCE. The machine crashes and burns
+-	 * due to overwritten text, page tables and data. So there is a
+-	 * choice between fire and frying pan. The result is pretty much
+-	 * the same. Chose frying pan until x86 provides a sane mechanism
+-	 * to park a CPU.
+-	 */
+-	if (smp_park_other_cpus_in_init())
+-		goto done;
+-
+-	/*
+-	 * If park with INIT was not possible and the REBOOT_VECTOR didn't
+-	 * take all secondary CPUs offline, try with the NMI.
+-	 */
++	/* if the REBOOT_VECTOR didn't work, try with the NMI */
+ 	if (!cpumask_empty(&cpus_stop_mask)) {
+ 		/*
+ 		 * If NMI IPI is enabled, try to register the stop handler
+@@ -249,7 +225,6 @@ static void native_stop_other_cpus(int wait)
+ 			udelay(1);
+ 	}
+ 
+-done:
+ 	local_irq_save(flags);
+ 	disable_local_APIC();
+ 	mcheck_cpu_clear(this_cpu_ptr(&cpu_info));
+diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
+index 747b83a373a2d..a0f49cc2a6e62 100644
+--- a/arch/x86/kernel/smpboot.c
++++ b/arch/x86/kernel/smpboot.c
+@@ -1346,33 +1346,6 @@ void arch_thaw_secondary_cpus_end(void)
+ 	cache_aps_init();
+ }
+ 
+-bool smp_park_other_cpus_in_init(void)
+-{
+-	unsigned int cpu, this_cpu = smp_processor_id();
+-	unsigned int apicid;
+-
+-	if (apic->wakeup_secondary_cpu_64 || apic->wakeup_secondary_cpu)
+-		return false;
+-
+-	/*
+-	 * If this is a crash stop which does not execute on the boot CPU,
+-	 * then this cannot use the INIT mechanism because INIT to the boot
+-	 * CPU will reset the machine.
+-	 */
+-	if (this_cpu)
+-		return false;
+-
+-	for_each_cpu_and(cpu, &cpus_booted_once_mask, cpu_present_mask) {
+-		if (cpu == this_cpu)
+-			continue;
+-		apicid = apic->cpu_present_to_apicid(cpu);
+-		if (apicid == BAD_APICID)
+-			continue;
+-		send_init_sequence(apicid);
+-	}
+-	return true;
+-}
+-
+ /*
+  * Early setup to make printk work.
+  */
+diff --git a/block/fops.c b/block/fops.c
+index 838ffada53413..47c3e68253e21 100644
+--- a/block/fops.c
++++ b/block/fops.c
+@@ -659,24 +659,35 @@ static long blkdev_fallocate(struct file *file, int mode, loff_t start,
+ 
+ 	filemap_invalidate_lock(inode->i_mapping);
+ 
+-	/* Invalidate the page cache, including dirty pages. */
+-	error = truncate_bdev_range(bdev, file_to_blk_mode(file), start, end);
+-	if (error)
+-		goto fail;
+-
++	/*
++	 * Invalidate the page cache, including dirty pages, for valid
++	 * de-allocate mode calls to fallocate().
++	 */
+ 	switch (mode) {
+ 	case FALLOC_FL_ZERO_RANGE:
+ 	case FALLOC_FL_ZERO_RANGE | FALLOC_FL_KEEP_SIZE:
++		error = truncate_bdev_range(bdev, file_to_blk_mode(file), start, end);
++		if (error)
++			goto fail;
++
+ 		error = blkdev_issue_zeroout(bdev, start >> SECTOR_SHIFT,
+ 					     len >> SECTOR_SHIFT, GFP_KERNEL,
+ 					     BLKDEV_ZERO_NOUNMAP);
+ 		break;
+ 	case FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE:
++		error = truncate_bdev_range(bdev, file_to_blk_mode(file), start, end);
++		if (error)
++			goto fail;
++
+ 		error = blkdev_issue_zeroout(bdev, start >> SECTOR_SHIFT,
+ 					     len >> SECTOR_SHIFT, GFP_KERNEL,
+ 					     BLKDEV_ZERO_NOFALLBACK);
+ 		break;
+ 	case FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE | FALLOC_FL_NO_HIDE_STALE:
++		error = truncate_bdev_range(bdev, file_to_blk_mode(file), start, end);
++		if (error)
++			goto fail;
++
+ 		error = blkdev_issue_discard(bdev, start >> SECTOR_SHIFT,
+ 					     len >> SECTOR_SHIFT, GFP_KERNEL);
+ 		break;
+diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
+index 660834a49c1f9..c95d0edb0be9e 100644
+--- a/drivers/acpi/ec.c
++++ b/drivers/acpi/ec.c
+@@ -1913,6 +1913,17 @@ static const struct dmi_system_id ec_dmi_table[] __initconst = {
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "HP 15-cx0041ur"),
+ 		},
+ 	},
++	{
++		/*
++		 * HP Pavilion Gaming Laptop 15-dk1xxx
++		 * https://github.com/systemd/systemd/issues/28942
++		 */
++		.callback = ec_honor_dsdt_gpe,
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "HP"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion Gaming Laptop 15-dk1xxx"),
++		},
++	},
+ 	{
+ 		/*
+ 		 * Samsung hardware
+diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
+index dc615ef6550a1..3a34a8c425fe4 100644
+--- a/drivers/acpi/processor_idle.c
++++ b/drivers/acpi/processor_idle.c
+@@ -1217,8 +1217,7 @@ static int acpi_processor_setup_lpi_states(struct acpi_processor *pr)
+ 		strscpy(state->desc, lpi->desc, CPUIDLE_DESC_LEN);
+ 		state->exit_latency = lpi->wake_latency;
+ 		state->target_residency = lpi->min_residency;
+-		if (lpi->arch_flags)
+-			state->flags |= CPUIDLE_FLAG_TIMER_STOP;
++		state->flags |= arch_get_idle_state_flags(lpi->arch_flags);
+ 		if (i != 0 && lpi->entry_method == ACPI_CSTATE_FFH)
+ 			state->flags |= CPUIDLE_FLAG_RCU_IDLE;
+ 		state->enter = acpi_idle_lpi_enter;
+diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
+index 32cfa3f4efd3d..297a88587031e 100644
+--- a/drivers/acpi/resource.c
++++ b/drivers/acpi/resource.c
+@@ -439,6 +439,13 @@ static const struct dmi_system_id asus_laptop[] = {
+ 			DMI_MATCH(DMI_BOARD_NAME, "S5602ZA"),
+ 		},
+ 	},
++	{
++		.ident = "Asus ExpertBook B1402CBA",
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
++			DMI_MATCH(DMI_BOARD_NAME, "B1402CBA"),
++		},
++	},
+ 	{
+ 		.ident = "Asus ExpertBook B1502CBA",
+ 		.matches = {
+@@ -500,16 +507,23 @@ static const struct dmi_system_id maingear_laptop[] = {
+ 
+ static const struct dmi_system_id pcspecialist_laptop[] = {
+ 	{
+-		.ident = "PCSpecialist Elimina Pro 16 M",
+-		/*
+-		 * Some models have product-name "Elimina Pro 16 M",
+-		 * others "GM6BGEQ". Match on board-name to match both.
+-		 */
++		/* TongFang GM6BGEQ / PCSpecialist Elimina Pro 16 M, RTX 3050 */
+ 		.matches = {
+-			DMI_MATCH(DMI_SYS_VENDOR, "PCSpecialist"),
+ 			DMI_MATCH(DMI_BOARD_NAME, "GM6BGEQ"),
+ 		},
+ 	},
++	{
++		/* TongFang GM6BG5Q, RTX 4050 */
++		.matches = {
++			DMI_MATCH(DMI_BOARD_NAME, "GM6BG5Q"),
++		},
++	},
++	{
++		/* TongFang GM6BG0Q / PCSpecialist Elimina Pro 16 M, RTX 4060 */
++		.matches = {
++			DMI_MATCH(DMI_BOARD_NAME, "GM6BG0Q"),
++		},
++	},
+ 	{ }
+ };
+ 
+diff --git a/drivers/android/binder.c b/drivers/android/binder.c
+index d720f93d8b19c..ee8d586bb51ef 100644
+--- a/drivers/android/binder.c
++++ b/drivers/android/binder.c
+@@ -4812,6 +4812,8 @@ static void binder_release_work(struct binder_proc *proc,
+ 				"undelivered TRANSACTION_ERROR: %u\n",
+ 				e->cmd);
+ 		} break;
++		case BINDER_WORK_TRANSACTION_PENDING:
++		case BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT:
+ 		case BINDER_WORK_TRANSACTION_COMPLETE: {
+ 			binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
+ 				"undelivered TRANSACTION_COMPLETE\n");
+diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
+index 6ae9cff6b50c5..2a21f4d9500db 100644
+--- a/drivers/ata/libata-core.c
++++ b/drivers/ata/libata-core.c
+@@ -1977,6 +1977,96 @@ retry:
+ 	return rc;
+ }
+ 
++/**
++ *	ata_dev_power_set_standby - Set a device power mode to standby
++ *	@dev: target device
++ *
++ *	Issue a STANDBY IMMEDIATE command to set a device power mode to standby.
++ *	For an HDD device, this spins down the disks.
++ *
++ *	LOCKING:
++ *	Kernel thread context (may sleep).
++ */
++void ata_dev_power_set_standby(struct ata_device *dev)
++{
++	unsigned long ap_flags = dev->link->ap->flags;
++	struct ata_taskfile tf;
++	unsigned int err_mask;
++
++	/* Issue STANDBY IMMEDIATE command only if supported by the device */
++	if (dev->class != ATA_DEV_ATA && dev->class != ATA_DEV_ZAC)
++		return;
++
++	/*
++	 * Some odd clown BIOSes issue spindown on power off (ACPI S4 or S5)
++	 * causing some drives to spin up and down again. For these, do nothing
++	 * if we are being called on shutdown.
++	 */
++	if ((ap_flags & ATA_FLAG_NO_POWEROFF_SPINDOWN) &&
++	    system_state == SYSTEM_POWER_OFF)
++		return;
++
++	if ((ap_flags & ATA_FLAG_NO_HIBERNATE_SPINDOWN) &&
++	    system_entering_hibernation())
++		return;
++
++	ata_tf_init(dev, &tf);
++	tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
++	tf.protocol = ATA_PROT_NODATA;
++	tf.command = ATA_CMD_STANDBYNOW1;
++
++	ata_dev_notice(dev, "Entering standby power mode\n");
++
++	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
++	if (err_mask)
++		ata_dev_err(dev, "STANDBY IMMEDIATE failed (err_mask=0x%x)\n",
++			    err_mask);
++}
++
++/**
++ *	ata_dev_power_set_active -  Set a device power mode to active
++ *	@dev: target device
++ *
++ *	Issue a VERIFY command to enter to ensure that the device is in the
++ *	active power mode. For a spun-down HDD (standby or idle power mode),
++ *	the VERIFY command will complete after the disk spins up.
++ *
++ *	LOCKING:
++ *	Kernel thread context (may sleep).
++ */
++void ata_dev_power_set_active(struct ata_device *dev)
++{
++	struct ata_taskfile tf;
++	unsigned int err_mask;
++
++	/*
++	 * Issue READ VERIFY SECTORS command for 1 sector at lba=0 only
++	 * if supported by the device.
++	 */
++	if (dev->class != ATA_DEV_ATA && dev->class != ATA_DEV_ZAC)
++		return;
++
++	ata_tf_init(dev, &tf);
++	tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
++	tf.protocol = ATA_PROT_NODATA;
++	tf.command = ATA_CMD_VERIFY;
++	tf.nsect = 1;
++	if (dev->flags & ATA_DFLAG_LBA) {
++		tf.flags |= ATA_TFLAG_LBA;
++		tf.device |= ATA_LBA;
++	} else {
++		/* CHS */
++		tf.lbal = 0x1; /* sect */
++	}
++
++	ata_dev_notice(dev, "Entering active power mode\n");
++
++	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
++	if (err_mask)
++		ata_dev_err(dev, "VERIFY failed (err_mask=0x%x)\n",
++			    err_mask);
++}
++
+ /**
+  *	ata_read_log_page - read a specific log page
+  *	@dev: target device
+diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
+index 960ef5c6f2c10..150e7ab62d1ae 100644
+--- a/drivers/ata/libata-eh.c
++++ b/drivers/ata/libata-eh.c
+@@ -106,6 +106,14 @@ static const unsigned int ata_eh_flush_timeouts[] = {
+ 	UINT_MAX,
+ };
+ 
++static const unsigned int ata_eh_pm_timeouts[] = {
++	10000,	/* most drives spin up by 10sec */
++	10000,	/* > 99% working drives spin up before 20sec */
++	35000,	/* give > 30 secs of idleness for outlier devices */
++	 5000,	/* and sweet one last chance */
++	UINT_MAX, /* > 1 min has elapsed, give up */
++};
++
+ static const unsigned int ata_eh_other_timeouts[] = {
+ 	 5000,	/* same rationale as identify timeout */
+ 	10000,	/* ditto */
+@@ -147,6 +155,8 @@ ata_eh_cmd_timeout_table[ATA_EH_CMD_TIMEOUT_TABLE_SIZE] = {
+ 	  .timeouts = ata_eh_other_timeouts, },
+ 	{ .commands = CMDS(ATA_CMD_FLUSH, ATA_CMD_FLUSH_EXT),
+ 	  .timeouts = ata_eh_flush_timeouts },
++	{ .commands = CMDS(ATA_CMD_VERIFY),
++	  .timeouts = ata_eh_pm_timeouts },
+ };
+ #undef CMDS
+ 
+@@ -498,7 +508,19 @@ static void ata_eh_unload(struct ata_port *ap)
+ 	struct ata_device *dev;
+ 	unsigned long flags;
+ 
+-	/* Restore SControl IPM and SPD for the next driver and
++	/*
++	 * Unless we are restarting, transition all enabled devices to
++	 * standby power mode.
++	 */
++	if (system_state != SYSTEM_RESTART) {
++		ata_for_each_link(link, ap, PMP_FIRST) {
++			ata_for_each_dev(dev, link, ENABLED)
++				ata_dev_power_set_standby(dev);
++		}
++	}
++
++	/*
++	 * Restore SControl IPM and SPD for the next driver and
+ 	 * disable attached devices.
+ 	 */
+ 	ata_for_each_link(link, ap, PMP_FIRST) {
+@@ -690,6 +712,10 @@ void ata_scsi_port_error_handler(struct Scsi_Host *host, struct ata_port *ap)
+ 				ehc->saved_xfer_mode[devno] = dev->xfer_mode;
+ 				if (ata_ncq_enabled(dev))
+ 					ehc->saved_ncq_enabled |= 1 << devno;
++
++				/* If we are resuming, wake up the device */
++				if (ap->pflags & ATA_PFLAG_RESUMING)
++					ehc->i.dev_action[devno] |= ATA_EH_SET_ACTIVE;
+ 			}
+ 		}
+ 
+@@ -753,6 +779,8 @@ void ata_scsi_port_error_handler(struct Scsi_Host *host, struct ata_port *ap)
+ 	/* clean up */
+ 	spin_lock_irqsave(ap->lock, flags);
+ 
++	ap->pflags &= ~ATA_PFLAG_RESUMING;
++
+ 	if (ap->pflags & ATA_PFLAG_LOADING)
+ 		ap->pflags &= ~ATA_PFLAG_LOADING;
+ 	else if ((ap->pflags & ATA_PFLAG_SCSI_HOTPLUG) &&
+@@ -1244,6 +1272,13 @@ void ata_eh_detach_dev(struct ata_device *dev)
+ 	struct ata_eh_context *ehc = &link->eh_context;
+ 	unsigned long flags;
+ 
++	/*
++	 * If the device is still enabled, transition it to standby power mode
++	 * (i.e. spin down HDDs).
++	 */
++	if (ata_dev_enabled(dev))
++		ata_dev_power_set_standby(dev);
++
+ 	ata_dev_disable(dev);
+ 
+ 	spin_lock_irqsave(ap->lock, flags);
+@@ -3042,6 +3077,15 @@ static int ata_eh_revalidate_and_attach(struct ata_link *link,
+ 		if (ehc->i.flags & ATA_EHI_DID_RESET)
+ 			readid_flags |= ATA_READID_POSTRESET;
+ 
++		/*
++		 * When resuming, before executing any command, make sure to
++		 * transition the device to the active power mode.
++		 */
++		if ((action & ATA_EH_SET_ACTIVE) && ata_dev_enabled(dev)) {
++			ata_dev_power_set_active(dev);
++			ata_eh_done(link, dev, ATA_EH_SET_ACTIVE);
++		}
++
+ 		if ((action & ATA_EH_REVALIDATE) && ata_dev_enabled(dev)) {
+ 			WARN_ON(dev->class == ATA_DEV_PMP);
+ 
+@@ -4015,6 +4059,7 @@ static void ata_eh_handle_port_suspend(struct ata_port *ap)
+ 	unsigned long flags;
+ 	int rc = 0;
+ 	struct ata_device *dev;
++	struct ata_link *link;
+ 
+ 	/* are we suspending? */
+ 	spin_lock_irqsave(ap->lock, flags);
+@@ -4027,6 +4072,12 @@ static void ata_eh_handle_port_suspend(struct ata_port *ap)
+ 
+ 	WARN_ON(ap->pflags & ATA_PFLAG_SUSPENDED);
+ 
++	/* Set all devices attached to the port in standby mode */
++	ata_for_each_link(link, ap, HOST_FIRST) {
++		ata_for_each_dev(dev, link, ENABLED)
++			ata_dev_power_set_standby(dev);
++	}
++
+ 	/*
+ 	 * If we have a ZPODD attached, check its zero
+ 	 * power ready status before the port is frozen.
+@@ -4109,6 +4160,7 @@ static void ata_eh_handle_port_resume(struct ata_port *ap)
+ 	/* update the flags */
+ 	spin_lock_irqsave(ap->lock, flags);
+ 	ap->pflags &= ~(ATA_PFLAG_PM_PENDING | ATA_PFLAG_SUSPENDED);
++	ap->pflags |= ATA_PFLAG_RESUMING;
+ 	spin_unlock_irqrestore(ap->lock, flags);
+ }
+ #endif /* CONFIG_PM */
+diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
+index ed3146c460910..cdbc382e949b8 100644
+--- a/drivers/ata/libata-scsi.c
++++ b/drivers/ata/libata-scsi.c
+@@ -1100,15 +1100,13 @@ int ata_scsi_dev_config(struct scsi_device *sdev, struct ata_device *dev)
+ 		}
+ 	} else {
+ 		sdev->sector_size = ata_id_logical_sector_size(dev->id);
++
+ 		/*
+-		 * Stop the drive on suspend but do not issue START STOP UNIT
+-		 * on resume as this is not necessary and may fail: the device
+-		 * will be woken up by ata_port_pm_resume() with a port reset
+-		 * and device revalidation.
++		 * Ask the sd driver to issue START STOP UNIT on runtime suspend
++		 * and resume only. For system level suspend/resume, devices
++		 * power state is handled directly by libata EH.
+ 		 */
+-		sdev->manage_system_start_stop = true;
+ 		sdev->manage_runtime_start_stop = true;
+-		sdev->no_start_on_resume = 1;
+ 	}
+ 
+ 	/*
+@@ -1284,7 +1282,7 @@ static unsigned int ata_scsi_start_stop_xlat(struct ata_queued_cmd *qc)
+ 	}
+ 
+ 	if (cdb[4] & 0x1) {
+-		tf->nsect = 1;	/* 1 sector, lba=0 */
++		tf->nsect = 1;  /* 1 sector, lba=0 */
+ 
+ 		if (qc->dev->flags & ATA_DFLAG_LBA) {
+ 			tf->flags |= ATA_TFLAG_LBA;
+@@ -1300,7 +1298,7 @@ static unsigned int ata_scsi_start_stop_xlat(struct ata_queued_cmd *qc)
+ 			tf->lbah = 0x0; /* cyl high */
+ 		}
+ 
+-		tf->command = ATA_CMD_VERIFY;	/* READ VERIFY */
++		tf->command = ATA_CMD_VERIFY;   /* READ VERIFY */
+ 	} else {
+ 		/* Some odd clown BIOSen issue spindown on power off (ACPI S4
+ 		 * or S5) causing some drives to spin up and down again.
+@@ -1310,7 +1308,7 @@ static unsigned int ata_scsi_start_stop_xlat(struct ata_queued_cmd *qc)
+ 			goto skip;
+ 
+ 		if ((qc->ap->flags & ATA_FLAG_NO_HIBERNATE_SPINDOWN) &&
+-		     system_entering_hibernation())
++		    system_entering_hibernation())
+ 			goto skip;
+ 
+ 		/* Issue ATA STANDBY IMMEDIATE command */
+diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
+index 76d0a5937b66a..b4f6cbf6c1d2d 100644
+--- a/drivers/ata/libata.h
++++ b/drivers/ata/libata.h
+@@ -62,6 +62,8 @@ extern int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags);
+ extern int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class,
+ 			      unsigned int readid_flags);
+ extern int ata_dev_configure(struct ata_device *dev);
++extern void ata_dev_power_set_standby(struct ata_device *dev);
++extern void ata_dev_power_set_active(struct ata_device *dev);
+ extern int sata_down_spd_limit(struct ata_link *link, u32 spd_limit);
+ extern int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel);
+ extern unsigned int ata_dev_set_feature(struct ata_device *dev,
+diff --git a/drivers/ata/pata_parport/pata_parport.c b/drivers/ata/pata_parport/pata_parport.c
+index 1af64d435d3c2..cf87bbb52f1ff 100644
+--- a/drivers/ata/pata_parport/pata_parport.c
++++ b/drivers/ata/pata_parport/pata_parport.c
+@@ -51,6 +51,13 @@ static void pata_parport_dev_select(struct ata_port *ap, unsigned int device)
+ 	ata_sff_pause(ap);
+ }
+ 
++static void pata_parport_set_devctl(struct ata_port *ap, u8 ctl)
++{
++	struct pi_adapter *pi = ap->host->private_data;
++
++	pi->proto->write_regr(pi, 1, 6, ctl);
++}
++
+ static bool pata_parport_devchk(struct ata_port *ap, unsigned int device)
+ {
+ 	struct pi_adapter *pi = ap->host->private_data;
+@@ -64,7 +71,7 @@ static bool pata_parport_devchk(struct ata_port *ap, unsigned int device)
+ 	pi->proto->write_regr(pi, 0, ATA_REG_NSECT, 0xaa);
+ 	pi->proto->write_regr(pi, 0, ATA_REG_LBAL, 0x55);
+ 
+-	pi->proto->write_regr(pi, 0, ATA_REG_NSECT, 055);
++	pi->proto->write_regr(pi, 0, ATA_REG_NSECT, 0x55);
+ 	pi->proto->write_regr(pi, 0, ATA_REG_LBAL, 0xaa);
+ 
+ 	nsect = pi->proto->read_regr(pi, 0, ATA_REG_NSECT);
+@@ -252,6 +259,7 @@ static struct ata_port_operations pata_parport_port_ops = {
+ 	.hardreset		= NULL,
+ 
+ 	.sff_dev_select		= pata_parport_dev_select,
++	.sff_set_devctl		= pata_parport_set_devctl,
+ 	.sff_check_status	= pata_parport_check_status,
+ 	.sff_check_altstatus	= pata_parport_check_altstatus,
+ 	.sff_tf_load		= pata_parport_tf_load,
+diff --git a/drivers/counter/counter-chrdev.c b/drivers/counter/counter-chrdev.c
+index 80acdf62794a3..afc94d0062b17 100644
+--- a/drivers/counter/counter-chrdev.c
++++ b/drivers/counter/counter-chrdev.c
+@@ -247,8 +247,8 @@ static int counter_get_ext(const struct counter_comp *const ext,
+ 		if (*id == component_id)
+ 			return 0;
+ 
+-		if (ext->type == COUNTER_COMP_ARRAY) {
+-			element = ext->priv;
++		if (ext[*ext_idx].type == COUNTER_COMP_ARRAY) {
++			element = ext[*ext_idx].priv;
+ 
+ 			if (component_id - *id < element->length)
+ 				return 0;
+diff --git a/drivers/counter/microchip-tcb-capture.c b/drivers/counter/microchip-tcb-capture.c
+index e2d1dc6ca6682..c7af13aca36cf 100644
+--- a/drivers/counter/microchip-tcb-capture.c
++++ b/drivers/counter/microchip-tcb-capture.c
+@@ -98,7 +98,7 @@ static int mchp_tc_count_function_write(struct counter_device *counter,
+ 		priv->qdec_mode = 0;
+ 		/* Set highest rate based on whether soc has gclk or not */
+ 		bmr &= ~(ATMEL_TC_QDEN | ATMEL_TC_POSEN);
+-		if (priv->tc_cfg->has_gclk)
++		if (!priv->tc_cfg->has_gclk)
+ 			cmr |= ATMEL_TC_TIMER_CLOCK2;
+ 		else
+ 			cmr |= ATMEL_TC_TIMER_CLOCK1;
+diff --git a/drivers/dma-buf/dma-fence-unwrap.c b/drivers/dma-buf/dma-fence-unwrap.c
+index c625bb2b5d563..628af51c81af3 100644
+--- a/drivers/dma-buf/dma-fence-unwrap.c
++++ b/drivers/dma-buf/dma-fence-unwrap.c
+@@ -76,16 +76,11 @@ struct dma_fence *__dma_fence_unwrap_merge(unsigned int num_fences,
+ 		dma_fence_unwrap_for_each(tmp, &iter[i], fences[i]) {
+ 			if (!dma_fence_is_signaled(tmp)) {
+ 				++count;
+-			} else if (test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT,
+-					    &tmp->flags)) {
+-				if (ktime_after(tmp->timestamp, timestamp))
+-					timestamp = tmp->timestamp;
+ 			} else {
+-				/*
+-				 * Use the current time if the fence is
+-				 * currently signaling.
+-				 */
+-				timestamp = ktime_get();
++				ktime_t t = dma_fence_timestamp(tmp);
++
++				if (ktime_after(t, timestamp))
++					timestamp = t;
+ 			}
+ 		}
+ 	}
+diff --git a/drivers/dma-buf/sync_file.c b/drivers/dma-buf/sync_file.c
+index af57799c86cee..2e9a316c596a3 100644
+--- a/drivers/dma-buf/sync_file.c
++++ b/drivers/dma-buf/sync_file.c
+@@ -268,13 +268,10 @@ static int sync_fill_fence_info(struct dma_fence *fence,
+ 		sizeof(info->driver_name));
+ 
+ 	info->status = dma_fence_get_status(fence);
+-	while (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags) &&
+-	       !test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags))
+-		cpu_relax();
+ 	info->timestamp_ns =
+-		test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags) ?
+-		ktime_to_ns(fence->timestamp) :
+-		ktime_set(0, 0);
++		dma_fence_is_signaled(fence) ?
++			ktime_to_ns(dma_fence_timestamp(fence)) :
++			ktime_set(0, 0);
+ 
+ 	return info->status;
+ }
+diff --git a/drivers/dma/idxd/device.c b/drivers/dma/idxd/device.c
+index 9a15f0d12c799..97b505f1115ab 100644
+--- a/drivers/dma/idxd/device.c
++++ b/drivers/dma/idxd/device.c
+@@ -492,6 +492,7 @@ static void idxd_cmd_exec(struct idxd_device *idxd, int cmd_code, u32 operand,
+ 	union idxd_command_reg cmd;
+ 	DECLARE_COMPLETION_ONSTACK(done);
+ 	u32 stat;
++	unsigned long flags;
+ 
+ 	if (idxd_device_is_halted(idxd)) {
+ 		dev_warn(&idxd->pdev->dev, "Device is HALTED!\n");
+@@ -505,7 +506,7 @@ static void idxd_cmd_exec(struct idxd_device *idxd, int cmd_code, u32 operand,
+ 	cmd.operand = operand;
+ 	cmd.int_req = 1;
+ 
+-	spin_lock(&idxd->cmd_lock);
++	spin_lock_irqsave(&idxd->cmd_lock, flags);
+ 	wait_event_lock_irq(idxd->cmd_waitq,
+ 			    !test_bit(IDXD_FLAG_CMD_RUNNING, &idxd->flags),
+ 			    idxd->cmd_lock);
+@@ -522,7 +523,7 @@ static void idxd_cmd_exec(struct idxd_device *idxd, int cmd_code, u32 operand,
+ 	 * After command submitted, release lock and go to sleep until
+ 	 * the command completes via interrupt.
+ 	 */
+-	spin_unlock(&idxd->cmd_lock);
++	spin_unlock_irqrestore(&idxd->cmd_lock, flags);
+ 	wait_for_completion(&done);
+ 	stat = ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET);
+ 	spin_lock(&idxd->cmd_lock);
+diff --git a/drivers/dma/mediatek/mtk-uart-apdma.c b/drivers/dma/mediatek/mtk-uart-apdma.c
+index a1517ef1f4a01..0acf6a92a4ad3 100644
+--- a/drivers/dma/mediatek/mtk-uart-apdma.c
++++ b/drivers/dma/mediatek/mtk-uart-apdma.c
+@@ -451,9 +451,8 @@ static int mtk_uart_apdma_device_pause(struct dma_chan *chan)
+ 	mtk_uart_apdma_write(c, VFF_EN, VFF_EN_CLR_B);
+ 	mtk_uart_apdma_write(c, VFF_INT_EN, VFF_INT_EN_CLR_B);
+ 
+-	synchronize_irq(c->irq);
+-
+ 	spin_unlock_irqrestore(&c->vc.lock, flags);
++	synchronize_irq(c->irq);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/dma/stm32-dma.c b/drivers/dma/stm32-dma.c
+index 37674029cb427..592d48ecf241f 100644
+--- a/drivers/dma/stm32-dma.c
++++ b/drivers/dma/stm32-dma.c
+@@ -1113,8 +1113,10 @@ static struct dma_async_tx_descriptor *stm32_dma_prep_slave_sg(
+ 		chan->chan_reg.dma_scr &= ~STM32_DMA_SCR_PFCTRL;
+ 
+ 	/* Activate Double Buffer Mode if DMA triggers STM32 MDMA and more than 1 sg */
+-	if (chan->trig_mdma && sg_len > 1)
++	if (chan->trig_mdma && sg_len > 1) {
+ 		chan->chan_reg.dma_scr |= STM32_DMA_SCR_DBM;
++		chan->chan_reg.dma_scr &= ~STM32_DMA_SCR_CT;
++	}
+ 
+ 	for_each_sg(sgl, sg, sg_len, i) {
+ 		ret = stm32_dma_set_xfer_param(chan, direction, &buswidth,
+@@ -1387,11 +1389,12 @@ static size_t stm32_dma_desc_residue(struct stm32_dma_chan *chan,
+ 
+ 	residue = stm32_dma_get_remaining_bytes(chan);
+ 
+-	if (chan->desc->cyclic && !stm32_dma_is_current_sg(chan)) {
++	if ((chan->desc->cyclic || chan->trig_mdma) && !stm32_dma_is_current_sg(chan)) {
+ 		n_sg++;
+ 		if (n_sg == chan->desc->num_sgs)
+ 			n_sg = 0;
+-		residue = sg_req->len;
++		if (!chan->trig_mdma)
++			residue = sg_req->len;
+ 	}
+ 
+ 	/*
+@@ -1401,7 +1404,7 @@ static size_t stm32_dma_desc_residue(struct stm32_dma_chan *chan,
+ 	 * residue = remaining bytes from NDTR + remaining
+ 	 * periods/sg to be transferred
+ 	 */
+-	if (!chan->desc->cyclic || n_sg != 0)
++	if ((!chan->desc->cyclic && !chan->trig_mdma) || n_sg != 0)
+ 		for (i = n_sg; i < desc->num_sgs; i++)
+ 			residue += desc->sg_req[i].len;
+ 
+diff --git a/drivers/dma/stm32-mdma.c b/drivers/dma/stm32-mdma.c
+index 1d0e9dd72ab39..2e37c47044af5 100644
+--- a/drivers/dma/stm32-mdma.c
++++ b/drivers/dma/stm32-mdma.c
+@@ -778,8 +778,6 @@ static int stm32_mdma_setup_xfer(struct stm32_mdma_chan *chan,
+ 	/* Enable interrupts */
+ 	ccr &= ~STM32_MDMA_CCR_IRQ_MASK;
+ 	ccr |= STM32_MDMA_CCR_TEIE | STM32_MDMA_CCR_CTCIE;
+-	if (sg_len > 1)
+-		ccr |= STM32_MDMA_CCR_BTIE;
+ 	desc->ccr = ccr;
+ 
+ 	return 0;
+@@ -1237,6 +1235,10 @@ static int stm32_mdma_resume(struct dma_chan *c)
+ 	unsigned long flags;
+ 	u32 status, reg;
+ 
++	/* Transfer can be terminated */
++	if (!chan->desc || (stm32_mdma_read(dmadev, STM32_MDMA_CCR(chan->id)) & STM32_MDMA_CCR_EN))
++		return -EPERM;
++
+ 	hwdesc = chan->desc->node[chan->curr_hwdesc].hwdesc;
+ 
+ 	spin_lock_irqsave(&chan->vchan.lock, flags);
+@@ -1317,21 +1319,35 @@ static int stm32_mdma_slave_config(struct dma_chan *c,
+ 
+ static size_t stm32_mdma_desc_residue(struct stm32_mdma_chan *chan,
+ 				      struct stm32_mdma_desc *desc,
+-				      u32 curr_hwdesc)
++				      u32 curr_hwdesc,
++				      struct dma_tx_state *state)
+ {
+ 	struct stm32_mdma_device *dmadev = stm32_mdma_get_dev(chan);
+ 	struct stm32_mdma_hwdesc *hwdesc;
+-	u32 cbndtr, residue, modulo, burst_size;
++	u32 cisr, clar, cbndtr, residue, modulo, burst_size;
+ 	int i;
+ 
++	cisr = stm32_mdma_read(dmadev, STM32_MDMA_CISR(chan->id));
++
+ 	residue = 0;
+-	for (i = curr_hwdesc + 1; i < desc->count; i++) {
++	/* Get the next hw descriptor to process from current transfer */
++	clar = stm32_mdma_read(dmadev, STM32_MDMA_CLAR(chan->id));
++	for (i = desc->count - 1; i >= 0; i--) {
+ 		hwdesc = desc->node[i].hwdesc;
++
++		if (hwdesc->clar == clar)
++			break;/* Current transfer found, stop cumulating */
++
++		/* Cumulate residue of unprocessed hw descriptors */
+ 		residue += STM32_MDMA_CBNDTR_BNDT(hwdesc->cbndtr);
+ 	}
+ 	cbndtr = stm32_mdma_read(dmadev, STM32_MDMA_CBNDTR(chan->id));
+ 	residue += cbndtr & STM32_MDMA_CBNDTR_BNDT_MASK;
+ 
++	state->in_flight_bytes = 0;
++	if (chan->chan_config.m2m_hw && (cisr & STM32_MDMA_CISR_CRQA))
++		state->in_flight_bytes = cbndtr & STM32_MDMA_CBNDTR_BNDT_MASK;
++
+ 	if (!chan->mem_burst)
+ 		return residue;
+ 
+@@ -1361,11 +1377,10 @@ static enum dma_status stm32_mdma_tx_status(struct dma_chan *c,
+ 
+ 	vdesc = vchan_find_desc(&chan->vchan, cookie);
+ 	if (chan->desc && cookie == chan->desc->vdesc.tx.cookie)
+-		residue = stm32_mdma_desc_residue(chan, chan->desc,
+-						  chan->curr_hwdesc);
++		residue = stm32_mdma_desc_residue(chan, chan->desc, chan->curr_hwdesc, state);
+ 	else if (vdesc)
+-		residue = stm32_mdma_desc_residue(chan,
+-						  to_stm32_mdma_desc(vdesc), 0);
++		residue = stm32_mdma_desc_residue(chan, to_stm32_mdma_desc(vdesc), 0, state);
++
+ 	dma_set_residue(state, residue);
+ 
+ 	spin_unlock_irqrestore(&chan->vchan.lock, flags);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c
+index 4620c4712ce32..1005edeea39e5 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c
+@@ -169,6 +169,7 @@ int amdgpu_fru_get_product_info(struct amdgpu_device *adev)
+ 		csum += pia[size - 1];
+ 	if (csum) {
+ 		DRM_ERROR("Bad Product Info Area checksum: 0x%02x", csum);
++		kfree(pia);
+ 		return -EIO;
+ 	}
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+index 05496b97ef930..06ad7a77eb0a9 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+@@ -250,7 +250,7 @@ static inline bool amdgpu_bo_in_cpu_visible_vram(struct amdgpu_bo *bo)
+ 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+ 	struct amdgpu_res_cursor cursor;
+ 
+-	if (bo->tbo.resource->mem_type != TTM_PL_VRAM)
++	if (!bo->tbo.resource || bo->tbo.resource->mem_type != TTM_PL_VRAM)
+ 		return false;
+ 
+ 	amdgpu_res_first(bo->tbo.resource, 0, amdgpu_bo_size(bo), &cursor);
+diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
+index c435f7632e8e8..5ee87965a0781 100644
+--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
++++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
+@@ -157,7 +157,7 @@ void dcn20_update_clocks_update_dentist(struct clk_mgr_internal *clk_mgr, struct
+ 			int32_t N;
+ 			int32_t j;
+ 
+-			if (!pipe_ctx->stream)
++			if (!resource_is_pipe_type(pipe_ctx, OTG_MASTER))
+ 				continue;
+ 			/* Virtual encoders don't have this function */
+ 			if (!stream_enc->funcs->get_fifo_cal_average_level)
+@@ -188,7 +188,7 @@ void dcn20_update_clocks_update_dentist(struct clk_mgr_internal *clk_mgr, struct
+ 			int32_t N;
+ 			int32_t j;
+ 
+-			if (!pipe_ctx->stream)
++			if (!resource_is_pipe_type(pipe_ctx, OTG_MASTER))
+ 				continue;
+ 			/* Virtual encoders don't have this function */
+ 			if (!stream_enc->funcs->get_fifo_cal_average_level)
+diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
+index 5fc78bf927bbc..475161521082c 100644
+--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
++++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
+@@ -355,7 +355,7 @@ static void dcn32_update_clocks_update_dentist(
+ 			int32_t N;
+ 			int32_t j;
+ 
+-			if (!pipe_ctx->stream)
++			if (!resource_is_pipe_type(pipe_ctx, OTG_MASTER))
+ 				continue;
+ 			/* Virtual encoders don't have this function */
+ 			if (!stream_enc->funcs->get_fifo_cal_average_level)
+@@ -401,7 +401,7 @@ static void dcn32_update_clocks_update_dentist(
+ 			int32_t N;
+ 			int32_t j;
+ 
+-			if (!pipe_ctx->stream)
++			if (!resource_is_pipe_type(pipe_ctx, OTG_MASTER))
+ 				continue;
+ 			/* Virtual encoders don't have this function */
+ 			if (!stream_enc->funcs->get_fifo_cal_average_level)
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
+index 1729fb727333c..609048160aa20 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
+@@ -1263,6 +1263,9 @@ static void disable_vbios_mode_if_required(
+ 		if (stream == NULL)
+ 			continue;
+ 
++		if (stream->apply_seamless_boot_optimization)
++			continue;
++
+ 		// only looking for first odm pipe
+ 		if (pipe->prev_odm_pipe)
+ 			continue;
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+index 2f3d9a698486d..f2dd3c166af09 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+@@ -1348,6 +1348,41 @@ struct pipe_ctx *find_idle_secondary_pipe(
+ 	return secondary_pipe;
+ }
+ 
++bool resource_is_pipe_type(const struct pipe_ctx *pipe_ctx, enum pipe_type type)
++{
++#ifdef DBG
++	if (pipe_ctx->stream == NULL) {
++		/* a free pipe with dangling states */
++		ASSERT(!pipe_ctx->plane_state);
++		ASSERT(!pipe_ctx->prev_odm_pipe);
++		ASSERT(!pipe_ctx->next_odm_pipe);
++		ASSERT(!pipe_ctx->top_pipe);
++		ASSERT(!pipe_ctx->bottom_pipe);
++	} else if (pipe_ctx->top_pipe) {
++		/* a secondary DPP pipe must be signed to a plane */
++		ASSERT(pipe_ctx->plane_state)
++	}
++	/* Add more checks here to prevent corrupted pipe ctx. It is very hard
++	* to debug this issue afterwards because we can't pinpoint the code
++	* location causing inconsistent pipe context states.
++	*/
++#endif
++	switch (type) {
++	case OTG_MASTER:
++		return !pipe_ctx->prev_odm_pipe &&
++				!pipe_ctx->top_pipe &&
++				pipe_ctx->stream;
++	case OPP_HEAD:
++		return !pipe_ctx->top_pipe && pipe_ctx->stream;
++	case DPP_PIPE:
++		return pipe_ctx->plane_state && pipe_ctx->stream;
++	case FREE_PIPE:
++		return !pipe_ctx->plane_state && !pipe_ctx->stream;
++	default:
++		return false;
++	}
++}
++
+ struct pipe_ctx *resource_get_head_pipe_for_stream(
+ 		struct resource_context *res_ctx,
+ 		struct dc_stream_state *stream)
+diff --git a/drivers/gpu/drm/amd/display/dc/inc/resource.h b/drivers/gpu/drm/amd/display/dc/inc/resource.h
+index eaeb684c8a48c..3088c6c65731f 100644
+--- a/drivers/gpu/drm/amd/display/dc/inc/resource.h
++++ b/drivers/gpu/drm/amd/display/dc/inc/resource.h
+@@ -153,6 +153,112 @@ bool resource_attach_surfaces_to_context(
+ 		struct dc_state *context,
+ 		const struct resource_pool *pool);
+ 
++/*
++ * pipe types are identified based on MUXes in DCN front end that are capable
++ * of taking input from one DCN pipeline to another DCN pipeline. The name is
++ * in a form of XXXX_YYYY, where XXXX is the DCN front end hardware block the
++ * pipeline ends with and YYYY is the rendering role that the pipe is in.
++ *
++ * For instance OTG_MASTER is a pipe ending with OTG hardware block in its
++ * pipeline and it is in a role of a master pipe for timing generation.
++ *
++ * For quick reference a diagram of each pipe type's areas of responsibility
++ * for outputting timings on the screen is shown below:
++ *
++ *       Timing Active for Stream 0
++ *        __________________________________________________
++ *       |OTG master 0 (OPP head 0)|OPP head 2 (DPP pipe 2) |
++ *       |             (DPP pipe 0)|                        |
++ *       | Top Plane 0             |                        |
++ *       |           ______________|____                    |
++ *       |          |DPP pipe 1    |DPP |                   |
++ *       |          |              |pipe|                   |
++ *       |          |  Bottom      |3   |                   |
++ *       |          |  Plane 1     |    |                   |
++ *       |          |              |    |                   |
++ *       |          |______________|____|                   |
++ *       |                         |                        |
++ *       |                         |                        |
++ *       | ODM slice 0             | ODM slice 1            |
++ *       |_________________________|________________________|
++ *
++ *       Timing Active for Stream 1
++ *        __________________________________________________
++ *       |OTG master 4 (OPP head 4)                         |
++ *       |                                                  |
++ *       |                                                  |
++ *       |                                                  |
++ *       |                                                  |
++ *       |                                                  |
++ *       |               Blank Pixel Data                   |
++ *       |              (generated by DPG4)                 |
++ *       |                                                  |
++ *       |                                                  |
++ *       |                                                  |
++ *       |                                                  |
++ *       |                                                  |
++ *       |__________________________________________________|
++ *
++ *       Inter-pipe Relation
++ *        __________________________________________________
++ *       |PIPE IDX|   DPP PIPES   | OPP HEADS | OTG MASTER  |
++ *       |        |  plane 0      | slice 0   |             |
++ *       |   0    | -------------MPC---------ODM----------- |
++ *       |        |  plane 1    | |         | |             |
++ *       |   1    | ------------- |         | |             |
++ *       |        |  plane 0      | slice 1 | |             |
++ *       |   2    | -------------MPC--------- |             |
++ *       |        |  plane 1    | |           |             |
++ *       |   3    | ------------- |           |             |
++ *       |        |               | blank     |             |
++ *       |   4    |               | ----------------------- |
++ *       |        |               |           |             |
++ *       |   5    |  (FREE)       |           |             |
++ *       |________|_______________|___________|_____________|
++ */
++enum pipe_type {
++	/* free pipe - free pipe is an uninitialized pipe without a stream
++	* associated with it. It is a free DCN pipe resource. It can be
++	* acquired as any type of pipe.
++	*/
++	FREE_PIPE,
++
++	/* OTG master pipe - the master pipe of its OPP head pipes with a
++	* functional OTG. It merges all its OPP head pipes pixel data in ODM
++	* block and output to backend DIG. OTG master pipe is responsible for
++	* generating entire crtc timing to backend DIG. An OTG master pipe may
++	* or may not have a plane. If it has a plane it blends it as the left
++	* most MPC slice of the top most layer. If it doesn't have a plane it
++	* can output pixel data from its OPP head pipes' test pattern
++	* generators (DPG) such as solid black pixel data to blank the screen.
++	*/
++	OTG_MASTER,
++
++	/* OPP head pipe - the head pipe of an MPC blending tree with a
++	* functional OPP outputting to an OTG. OPP head pipe is responsible for
++	* processing output pixels in its own ODM slice. It may or may not have
++	* a plane. If it has a plane it blends it as the top most layer within
++	* its own ODM slice. If it doesn't have a plane it can output pixel
++	* data from its DPG such as solid black pixel data to blank the pixel
++	* data in its own ODM slice. OTG master pipe is also an OPP head pipe
++	* but with more responsibility.
++	*/
++	OPP_HEAD,
++
++	/* DPP pipe - the pipe with a functional DPP outputting to an OPP head
++	* pipe's MPC. DPP pipe is responsible for processing pixel data from
++	* its own MPC slice of a plane. It must be connected to an OPP head
++	* pipe and it must have a plane associated with it.
++	*/
++	DPP_PIPE,
++};
++
++/*
++ * Determine if the input pipe ctx is of a pipe type.
++ * return - true if pipe ctx is of the input type.
++ */
++bool resource_is_pipe_type(const struct pipe_ctx *pipe_ctx, enum pipe_type type);
++
+ struct pipe_ctx *find_idle_secondary_pipe(
+ 		struct resource_context *res_ctx,
+ 		const struct resource_pool *pool,
+diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
+index 41b8066f61fff..09fce8f756283 100644
+--- a/drivers/gpu/drm/drm_atomic_helper.c
++++ b/drivers/gpu/drm/drm_atomic_helper.c
+@@ -290,7 +290,8 @@ static int
+ update_connector_routing(struct drm_atomic_state *state,
+ 			 struct drm_connector *connector,
+ 			 struct drm_connector_state *old_connector_state,
+-			 struct drm_connector_state *new_connector_state)
++			 struct drm_connector_state *new_connector_state,
++			 bool added_by_user)
+ {
+ 	const struct drm_connector_helper_funcs *funcs;
+ 	struct drm_encoder *new_encoder;
+@@ -339,9 +340,13 @@ update_connector_routing(struct drm_atomic_state *state,
+ 	 * there's a chance the connector may have been destroyed during the
+ 	 * process, but it's better to ignore that then cause
+ 	 * drm_atomic_helper_resume() to fail.
++	 *
++	 * Last, we want to ignore connector registration when the connector
++	 * was not pulled in the atomic state by user-space (ie, was pulled
++	 * in by the driver, e.g. when updating a DP-MST stream).
+ 	 */
+ 	if (!state->duplicated && drm_connector_is_unregistered(connector) &&
+-	    crtc_state->active) {
++	    added_by_user && crtc_state->active) {
+ 		drm_dbg_atomic(connector->dev,
+ 			       "[CONNECTOR:%d:%s] is not registered\n",
+ 			       connector->base.id, connector->name);
+@@ -620,7 +625,10 @@ drm_atomic_helper_check_modeset(struct drm_device *dev,
+ 	struct drm_connector *connector;
+ 	struct drm_connector_state *old_connector_state, *new_connector_state;
+ 	int i, ret;
+-	unsigned int connectors_mask = 0;
++	unsigned int connectors_mask = 0, user_connectors_mask = 0;
++
++	for_each_oldnew_connector_in_state(state, connector, old_connector_state, new_connector_state, i)
++		user_connectors_mask |= BIT(i);
+ 
+ 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
+ 		bool has_connectors =
+@@ -685,7 +693,8 @@ drm_atomic_helper_check_modeset(struct drm_device *dev,
+ 		 */
+ 		ret = update_connector_routing(state, connector,
+ 					       old_connector_state,
+-					       new_connector_state);
++					       new_connector_state,
++					       BIT(i) & user_connectors_mask);
+ 		if (ret)
+ 			return ret;
+ 		if (old_connector_state->crtc) {
+diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
+index 78dcae201cc64..916d55f2b711e 100644
+--- a/drivers/gpu/drm/drm_gem.c
++++ b/drivers/gpu/drm/drm_gem.c
+@@ -537,7 +537,7 @@ struct page **drm_gem_get_pages(struct drm_gem_object *obj)
+ 	struct page **pages;
+ 	struct folio *folio;
+ 	struct folio_batch fbatch;
+-	int i, j, npages;
++	long i, j, npages;
+ 
+ 	if (WARN_ON(!obj->filp))
+ 		return ERR_PTR(-EINVAL);
+@@ -561,11 +561,13 @@ struct page **drm_gem_get_pages(struct drm_gem_object *obj)
+ 
+ 	i = 0;
+ 	while (i < npages) {
++		long nr;
+ 		folio = shmem_read_folio_gfp(mapping, i,
+ 				mapping_gfp_mask(mapping));
+ 		if (IS_ERR(folio))
+ 			goto fail;
+-		for (j = 0; j < folio_nr_pages(folio); j++, i++)
++		nr = min(npages - i, folio_nr_pages(folio));
++		for (j = 0; j < nr; j++, i++)
+ 			pages[i] = folio_file_page(folio, i);
+ 
+ 		/* Make sure shmem keeps __GFP_DMA32 allocated pages in the
+diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
+index 1f65bb33dd212..a8551ce322de2 100644
+--- a/drivers/gpu/drm/i915/i915_gem.c
++++ b/drivers/gpu/drm/i915/i915_gem.c
+@@ -1199,6 +1199,13 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
+ 			goto err_unlock;
+ 	}
+ 
++	/*
++	 * Register engines early to ensure the engine list is in its final
++	 * rb-tree form, lowering the amount of code that has to deal with
++	 * the intermediate llist state.
++	 */
++	intel_engines_driver_register(dev_priv);
++
+ 	return 0;
+ 
+ 	/*
+@@ -1246,8 +1253,6 @@ err_unlock:
+ void i915_gem_driver_register(struct drm_i915_private *i915)
+ {
+ 	i915_gem_driver_register__shrinker(i915);
+-
+-	intel_engines_driver_register(i915);
+ }
+ 
+ void i915_gem_driver_unregister(struct drm_i915_private *i915)
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
+index c2aaaded07ed6..0be195f9149c5 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
+@@ -119,6 +119,7 @@ static u64 _dpu_plane_calc_bw(const struct dpu_mdss_cfg *catalog,
+ 	struct dpu_sw_pipe_cfg *pipe_cfg)
+ {
+ 	int src_width, src_height, dst_height, fps;
++	u64 plane_pixel_rate, plane_bit_rate;
+ 	u64 plane_prefill_bw;
+ 	u64 plane_bw;
+ 	u32 hw_latency_lines;
+@@ -136,13 +137,12 @@ static u64 _dpu_plane_calc_bw(const struct dpu_mdss_cfg *catalog,
+ 	scale_factor = src_height > dst_height ?
+ 		mult_frac(src_height, 1, dst_height) : 1;
+ 
+-	plane_bw =
+-		src_width * mode->vtotal * fps * fmt->bpp *
+-		scale_factor;
++	plane_pixel_rate = src_width * mode->vtotal * fps;
++	plane_bit_rate = plane_pixel_rate * fmt->bpp;
+ 
+-	plane_prefill_bw =
+-		src_width * hw_latency_lines * fps * fmt->bpp *
+-		scale_factor * mode->vtotal;
++	plane_bw = plane_bit_rate * scale_factor;
++
++	plane_prefill_bw = plane_bw * hw_latency_lines;
+ 
+ 	if ((vbp+vpw) > hw_latency_lines)
+ 		do_div(plane_prefill_bw, (vbp+vpw));
+@@ -733,9 +733,11 @@ static int dpu_plane_check_inline_rotation(struct dpu_plane *pdpu,
+ static int dpu_plane_atomic_check_pipe(struct dpu_plane *pdpu,
+ 		struct dpu_sw_pipe *pipe,
+ 		struct dpu_sw_pipe_cfg *pipe_cfg,
+-		const struct dpu_format *fmt)
++		const struct dpu_format *fmt,
++		const struct drm_display_mode *mode)
+ {
+ 	uint32_t min_src_size;
++	struct dpu_kms *kms = _dpu_plane_get_kms(&pdpu->base);
+ 
+ 	min_src_size = DPU_FORMAT_IS_YUV(fmt) ? 2 : 1;
+ 
+@@ -774,6 +776,12 @@ static int dpu_plane_atomic_check_pipe(struct dpu_plane *pdpu,
+ 		return -EINVAL;
+ 	}
+ 
++	/* max clk check */
++	if (_dpu_plane_calc_clk(mode, pipe_cfg) > kms->perf.max_core_clk_rate) {
++		DPU_DEBUG_PLANE(pdpu, "plane exceeds max mdp core clk limits\n");
++		return -E2BIG;
++	}
++
+ 	return 0;
+ }
+ 
+@@ -899,12 +907,13 @@ static int dpu_plane_atomic_check(struct drm_plane *plane,
+ 		r_pipe_cfg->dst_rect.x1 = pipe_cfg->dst_rect.x2;
+ 	}
+ 
+-	ret = dpu_plane_atomic_check_pipe(pdpu, pipe, pipe_cfg, fmt);
++	ret = dpu_plane_atomic_check_pipe(pdpu, pipe, pipe_cfg, fmt, &crtc_state->adjusted_mode);
+ 	if (ret)
+ 		return ret;
+ 
+ 	if (r_pipe->sspp) {
+-		ret = dpu_plane_atomic_check_pipe(pdpu, r_pipe, r_pipe_cfg, fmt);
++		ret = dpu_plane_atomic_check_pipe(pdpu, r_pipe, r_pipe_cfg, fmt,
++						  &crtc_state->adjusted_mode);
+ 		if (ret)
+ 			return ret;
+ 	}
+diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.c b/drivers/gpu/drm/msm/dp/dp_ctrl.c
+index a7a5c7e0ab923..77a8d9366ed7b 100644
+--- a/drivers/gpu/drm/msm/dp/dp_ctrl.c
++++ b/drivers/gpu/drm/msm/dp/dp_ctrl.c
+@@ -1774,13 +1774,6 @@ int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl)
+ 		return rc;
+ 
+ 	while (--link_train_max_retries) {
+-		rc = dp_ctrl_reinitialize_mainlink(ctrl);
+-		if (rc) {
+-			DRM_ERROR("Failed to reinitialize mainlink. rc=%d\n",
+-					rc);
+-			break;
+-		}
+-
+ 		training_step = DP_TRAINING_NONE;
+ 		rc = dp_ctrl_setup_main_link(ctrl, &training_step);
+ 		if (rc == 0) {
+@@ -1832,6 +1825,12 @@ int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl)
+ 			/* stop link training before start re training  */
+ 			dp_ctrl_clear_training_pattern(ctrl);
+ 		}
++
++		rc = dp_ctrl_reinitialize_mainlink(ctrl);
++		if (rc) {
++			DRM_ERROR("Failed to reinitialize mainlink. rc=%d\n", rc);
++			break;
++		}
+ 	}
+ 
+ 	if (ctrl->link->sink_request & DP_TEST_LINK_PHY_TEST_PATTERN)
+diff --git a/drivers/gpu/drm/msm/dp/dp_link.c b/drivers/gpu/drm/msm/dp/dp_link.c
+index 42427129acea8..6375daaeb98e1 100644
+--- a/drivers/gpu/drm/msm/dp/dp_link.c
++++ b/drivers/gpu/drm/msm/dp/dp_link.c
+@@ -1090,7 +1090,7 @@ int dp_link_process_request(struct dp_link *dp_link)
+ 	} else if (dp_link_read_psr_error_status(link)) {
+ 		DRM_ERROR("PSR IRQ_HPD received\n");
+ 	} else if (dp_link_psr_capability_changed(link)) {
+-		drm_dbg_dp(link->drm_dev, "PSR Capability changed");
++		drm_dbg_dp(link->drm_dev, "PSR Capability changed\n");
+ 	} else {
+ 		ret = dp_link_process_link_status_update(link);
+ 		if (!ret) {
+@@ -1107,7 +1107,7 @@ int dp_link_process_request(struct dp_link *dp_link)
+ 		}
+ 	}
+ 
+-	drm_dbg_dp(link->drm_dev, "sink request=%#x",
++	drm_dbg_dp(link->drm_dev, "sink request=%#x\n",
+ 				dp_link->sink_request);
+ 	return ret;
+ }
+diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
+index 3f6dfb4f9d5a6..9ac62651eb756 100644
+--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
++++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
+@@ -1075,9 +1075,21 @@ static void dsi_wait4video_done(struct msm_dsi_host *msm_host)
+ 
+ static void dsi_wait4video_eng_busy(struct msm_dsi_host *msm_host)
+ {
++	u32 data;
++
+ 	if (!(msm_host->mode_flags & MIPI_DSI_MODE_VIDEO))
+ 		return;
+ 
++	data = dsi_read(msm_host, REG_DSI_STATUS0);
++
++	/* if video mode engine is not busy, its because
++	 * either timing engine was not turned on or the
++	 * DSI controller has finished transmitting the video
++	 * data already, so no need to wait in those cases
++	 */
++	if (!(data & DSI_STATUS0_VIDEO_MODE_ENGINE_BUSY))
++		return;
++
+ 	if (msm_host->power_on && msm_host->enabled) {
+ 		dsi_wait4video_done(msm_host);
+ 		/* delay 4 ms to skip BLLP */
+@@ -1887,10 +1899,9 @@ int msm_dsi_host_init(struct msm_dsi *msm_dsi)
+ 	}
+ 
+ 	msm_host->irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
+-	if (msm_host->irq < 0) {
+-		ret = msm_host->irq;
+-		dev_err(&pdev->dev, "failed to get irq: %d\n", ret);
+-		return ret;
++	if (!msm_host->irq) {
++		dev_err(&pdev->dev, "failed to get irq\n");
++		return -EINVAL;
+ 	}
+ 
+ 	/* do not autoenable, will be enabled later */
+diff --git a/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c b/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
+index dc276c346fd1a..dcc5e79cfe879 100644
+--- a/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
++++ b/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
+@@ -1343,9 +1343,7 @@ static const struct panel_init_cmd starry_himax83102_j02_init_cmd[] = {
+ 	_INIT_DCS_CMD(0xB1, 0x01, 0xBF, 0x11),
+ 	_INIT_DCS_CMD(0xCB, 0x86),
+ 	_INIT_DCS_CMD(0xD2, 0x3C, 0xFA),
+-	_INIT_DCS_CMD(0xE9, 0xC5),
+-	_INIT_DCS_CMD(0xD3, 0x00, 0x00, 0x00, 0x00, 0x80, 0x0C, 0x01),
+-	_INIT_DCS_CMD(0xE9, 0x3F),
++	_INIT_DCS_CMD(0xD3, 0x00, 0x00, 0x44, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x0C, 0x01),
+ 	_INIT_DCS_CMD(0xE7, 0x02, 0x00, 0x28, 0x01, 0x7E, 0x0F, 0x7E, 0x10, 0xA0, 0x00, 0x00, 0x20, 0x40, 0x50, 0x40),
+ 	_INIT_DCS_CMD(0xBD, 0x02),
+ 	_INIT_DCS_CMD(0xD8, 0xFF, 0xFF, 0xBF, 0xFE, 0xAA, 0xA0, 0xFF, 0xFF, 0xBF, 0xFE, 0xAA, 0xA0),
+diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
+index 506371c427451..5a3a622fc672f 100644
+--- a/drivers/gpu/drm/scheduler/sched_main.c
++++ b/drivers/gpu/drm/scheduler/sched_main.c
+@@ -929,7 +929,7 @@ drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched)
+ 
+ 		if (next) {
+ 			next->s_fence->scheduled.timestamp =
+-				job->s_fence->finished.timestamp;
++				dma_fence_timestamp(&job->s_fence->finished);
+ 			/* start TO timer for next job */
+ 			drm_sched_start_timeout(sched);
+ 		}
+diff --git a/drivers/gpu/drm/tiny/simpledrm.c b/drivers/gpu/drm/tiny/simpledrm.c
+index 25e11ef11c4ce..79112b1957d25 100644
+--- a/drivers/gpu/drm/tiny/simpledrm.c
++++ b/drivers/gpu/drm/tiny/simpledrm.c
+@@ -745,7 +745,7 @@ static struct simpledrm_device *simpledrm_device_create(struct drm_driver *drv,
+ 
+ 		ret = devm_aperture_acquire_from_firmware(dev, res->start, resource_size(res));
+ 		if (ret) {
+-			drm_err(dev, "could not acquire memory range %pr: %d\n", &res, ret);
++			drm_err(dev, "could not acquire memory range %pr: %d\n", res, ret);
+ 			return ERR_PTR(ret);
+ 		}
+ 
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
+index c43853597776f..2bfac3aad7b7d 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
+@@ -34,6 +34,8 @@
+ 
+ static void vmw_bo_release(struct vmw_bo *vbo)
+ {
++	WARN_ON(vbo->tbo.base.funcs &&
++		kref_read(&vbo->tbo.base.refcount) != 0);
+ 	vmw_bo_unmap(vbo);
+ 	drm_gem_object_release(&vbo->tbo.base);
+ }
+@@ -497,7 +499,7 @@ static int vmw_user_bo_synccpu_release(struct drm_file *filp,
+ 		if (!(flags & drm_vmw_synccpu_allow_cs)) {
+ 			atomic_dec(&vmw_bo->cpu_writers);
+ 		}
+-		vmw_user_bo_unref(vmw_bo);
++		vmw_user_bo_unref(&vmw_bo);
+ 	}
+ 
+ 	return ret;
+@@ -539,7 +541,7 @@ int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data,
+ 			return ret;
+ 
+ 		ret = vmw_user_bo_synccpu_grab(vbo, arg->flags);
+-		vmw_user_bo_unref(vbo);
++		vmw_user_bo_unref(&vbo);
+ 		if (unlikely(ret != 0)) {
+ 			if (ret == -ERESTARTSYS || ret == -EBUSY)
+ 				return -EBUSY;
+@@ -612,7 +614,6 @@ int vmw_user_bo_lookup(struct drm_file *filp,
+ 	}
+ 
+ 	*out = to_vmw_bo(gobj);
+-	ttm_bo_get(&(*out)->tbo);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h
+index 1d433fceed3d8..0d496dc9c6af7 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h
+@@ -195,12 +195,19 @@ static inline struct vmw_bo *vmw_bo_reference(struct vmw_bo *buf)
+ 	return buf;
+ }
+ 
+-static inline void vmw_user_bo_unref(struct vmw_bo *vbo)
++static inline struct vmw_bo *vmw_user_bo_ref(struct vmw_bo *vbo)
+ {
+-	if (vbo) {
+-		ttm_bo_put(&vbo->tbo);
+-		drm_gem_object_put(&vbo->tbo.base);
+-	}
++	drm_gem_object_get(&vbo->tbo.base);
++	return vbo;
++}
++
++static inline void vmw_user_bo_unref(struct vmw_bo **buf)
++{
++	struct vmw_bo *tmp_buf = *buf;
++
++	*buf = NULL;
++	if (tmp_buf)
++		drm_gem_object_put(&tmp_buf->tbo.base);
+ }
+ 
+ static inline struct vmw_bo *to_vmw_bo(struct drm_gem_object *gobj)
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
+index c0b24d1cacbf0..a7c07692262b8 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
+@@ -432,7 +432,7 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
+ 	 * for the new COTable. Initially pin the buffer object to make sure
+ 	 * we can use tryreserve without failure.
+ 	 */
+-	ret = vmw_bo_create(dev_priv, &bo_params, &buf);
++	ret = vmw_gem_object_create(dev_priv, &bo_params, &buf);
+ 	if (ret) {
+ 		DRM_ERROR("Failed initializing new cotable MOB.\n");
+ 		goto out_done;
+@@ -502,7 +502,7 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
+ 
+ 	vmw_resource_mob_attach(res);
+ 	/* Let go of the old mob. */
+-	vmw_bo_unreference(&old_buf);
++	vmw_user_bo_unref(&old_buf);
+ 	res->id = vcotbl->type;
+ 
+ 	ret = dma_resv_reserve_fences(bo->base.resv, 1);
+@@ -521,7 +521,7 @@ out_map_new:
+ out_wait:
+ 	ttm_bo_unpin(bo);
+ 	ttm_bo_unreserve(bo);
+-	vmw_bo_unreference(&buf);
++	vmw_user_bo_unref(&buf);
+ 
+ out_done:
+ 	MKS_STAT_TIME_POP(MKSSTAT_KERN_COTABLE_RESIZE);
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+index 58bfdf203ecae..3cd5090dedfc5 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+@@ -853,6 +853,10 @@ static inline bool vmw_resource_mob_attached(const struct vmw_resource *res)
+ /**
+  * GEM related functionality - vmwgfx_gem.c
+  */
++struct vmw_bo_params;
++int vmw_gem_object_create(struct vmw_private *vmw,
++			  struct vmw_bo_params *params,
++			  struct vmw_bo **p_vbo);
+ extern int vmw_gem_object_create_with_handle(struct vmw_private *dev_priv,
+ 					     struct drm_file *filp,
+ 					     uint32_t size,
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+index 98e0723ca6f5e..36987ef3fc300 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+@@ -1151,7 +1151,7 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
+ 				 SVGAMobId *id,
+ 				 struct vmw_bo **vmw_bo_p)
+ {
+-	struct vmw_bo *vmw_bo;
++	struct vmw_bo *vmw_bo, *tmp_bo;
+ 	uint32_t handle = *id;
+ 	struct vmw_relocation *reloc;
+ 	int ret;
+@@ -1164,7 +1164,8 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
+ 	}
+ 	vmw_bo_placement_set(vmw_bo, VMW_BO_DOMAIN_MOB, VMW_BO_DOMAIN_MOB);
+ 	ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo);
+-	vmw_user_bo_unref(vmw_bo);
++	tmp_bo = vmw_bo;
++	vmw_user_bo_unref(&tmp_bo);
+ 	if (unlikely(ret != 0))
+ 		return ret;
+ 
+@@ -1206,7 +1207,7 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
+ 				   SVGAGuestPtr *ptr,
+ 				   struct vmw_bo **vmw_bo_p)
+ {
+-	struct vmw_bo *vmw_bo;
++	struct vmw_bo *vmw_bo, *tmp_bo;
+ 	uint32_t handle = ptr->gmrId;
+ 	struct vmw_relocation *reloc;
+ 	int ret;
+@@ -1220,7 +1221,8 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
+ 	vmw_bo_placement_set(vmw_bo, VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM,
+ 			     VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM);
+ 	ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo);
+-	vmw_user_bo_unref(vmw_bo);
++	tmp_bo = vmw_bo;
++	vmw_user_bo_unref(&tmp_bo);
+ 	if (unlikely(ret != 0))
+ 		return ret;
+ 
+@@ -1619,7 +1621,7 @@ static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
+ {
+ 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetTextureState);
+ 	SVGA3dTextureState *last_state = (SVGA3dTextureState *)
+-	  ((unsigned long) header + header->size + sizeof(header));
++	  ((unsigned long) header + header->size + sizeof(*header));
+ 	SVGA3dTextureState *cur_state = (SVGA3dTextureState *)
+ 		((unsigned long) header + sizeof(*cmd));
+ 	struct vmw_resource *ctx;
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
+index c0da89e16e6fa..8b1eb0061610c 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
+@@ -111,6 +111,20 @@ static const struct drm_gem_object_funcs vmw_gem_object_funcs = {
+ 	.vm_ops = &vmw_vm_ops,
+ };
+ 
++int vmw_gem_object_create(struct vmw_private *vmw,
++			  struct vmw_bo_params *params,
++			  struct vmw_bo **p_vbo)
++{
++	int ret = vmw_bo_create(vmw, params, p_vbo);
++
++	if (ret != 0)
++		goto out_no_bo;
++
++	(*p_vbo)->tbo.base.funcs = &vmw_gem_object_funcs;
++out_no_bo:
++	return ret;
++}
++
+ int vmw_gem_object_create_with_handle(struct vmw_private *dev_priv,
+ 				      struct drm_file *filp,
+ 				      uint32_t size,
+@@ -126,12 +140,10 @@ int vmw_gem_object_create_with_handle(struct vmw_private *dev_priv,
+ 		.pin = false
+ 	};
+ 
+-	ret = vmw_bo_create(dev_priv, &params, p_vbo);
++	ret = vmw_gem_object_create(dev_priv, &params, p_vbo);
+ 	if (ret != 0)
+ 		goto out_no_bo;
+ 
+-	(*p_vbo)->tbo.base.funcs = &vmw_gem_object_funcs;
+-
+ 	ret = drm_gem_handle_create(filp, &(*p_vbo)->tbo.base, handle);
+ out_no_bo:
+ 	return ret;
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+index 1489ad73c103f..818b7f109f538 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+@@ -1471,8 +1471,8 @@ static int vmw_create_bo_proxy(struct drm_device *dev,
+ 	/* Reserve and switch the backing mob. */
+ 	mutex_lock(&res->dev_priv->cmdbuf_mutex);
+ 	(void) vmw_resource_reserve(res, false, true);
+-	vmw_bo_unreference(&res->guest_memory_bo);
+-	res->guest_memory_bo = vmw_bo_reference(bo_mob);
++	vmw_user_bo_unref(&res->guest_memory_bo);
++	res->guest_memory_bo = vmw_user_bo_ref(bo_mob);
+ 	res->guest_memory_offset = 0;
+ 	vmw_resource_unreserve(res, false, false, false, NULL, 0);
+ 	mutex_unlock(&res->dev_priv->cmdbuf_mutex);
+@@ -1666,7 +1666,7 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
+ err_out:
+ 	/* vmw_user_lookup_handle takes one ref so does new_fb */
+ 	if (bo)
+-		vmw_user_bo_unref(bo);
++		vmw_user_bo_unref(&bo);
+ 	if (surface)
+ 		vmw_surface_unreference(&surface);
+ 
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
+index fb85f244c3d02..c45b4724e4141 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
+@@ -451,7 +451,7 @@ int vmw_overlay_ioctl(struct drm_device *dev, void *data,
+ 
+ 	ret = vmw_overlay_update_stream(dev_priv, buf, arg, true);
+ 
+-	vmw_user_bo_unref(buf);
++	vmw_user_bo_unref(&buf);
+ 
+ out_unlock:
+ 	mutex_unlock(&overlay->mutex);
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+index 71eeabf001c87..ca300c7427d28 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+@@ -141,7 +141,7 @@ static void vmw_resource_release(struct kref *kref)
+ 		if (res->coherent)
+ 			vmw_bo_dirty_release(res->guest_memory_bo);
+ 		ttm_bo_unreserve(bo);
+-		vmw_bo_unreference(&res->guest_memory_bo);
++		vmw_user_bo_unref(&res->guest_memory_bo);
+ 	}
+ 
+ 	if (likely(res->hw_destroy != NULL)) {
+@@ -338,7 +338,7 @@ static int vmw_resource_buf_alloc(struct vmw_resource *res,
+ 		return 0;
+ 	}
+ 
+-	ret = vmw_bo_create(res->dev_priv, &bo_params, &gbo);
++	ret = vmw_gem_object_create(res->dev_priv, &bo_params, &gbo);
+ 	if (unlikely(ret != 0))
+ 		goto out_no_bo;
+ 
+@@ -457,11 +457,11 @@ void vmw_resource_unreserve(struct vmw_resource *res,
+ 			vmw_resource_mob_detach(res);
+ 			if (res->coherent)
+ 				vmw_bo_dirty_release(res->guest_memory_bo);
+-			vmw_bo_unreference(&res->guest_memory_bo);
++			vmw_user_bo_unref(&res->guest_memory_bo);
+ 		}
+ 
+ 		if (new_guest_memory_bo) {
+-			res->guest_memory_bo = vmw_bo_reference(new_guest_memory_bo);
++			res->guest_memory_bo = vmw_user_bo_ref(new_guest_memory_bo);
+ 
+ 			/*
+ 			 * The validation code should already have added a
+@@ -551,7 +551,7 @@ out_no_reserve:
+ 	ttm_bo_put(val_buf->bo);
+ 	val_buf->bo = NULL;
+ 	if (guest_memory_dirty)
+-		vmw_bo_unreference(&res->guest_memory_bo);
++		vmw_user_bo_unref(&res->guest_memory_bo);
+ 
+ 	return ret;
+ }
+@@ -727,7 +727,7 @@ int vmw_resource_validate(struct vmw_resource *res, bool intr,
+ 		goto out_no_validate;
+ 	else if (!res->func->needs_guest_memory && res->guest_memory_bo) {
+ 		WARN_ON_ONCE(vmw_resource_mob_attached(res));
+-		vmw_bo_unreference(&res->guest_memory_bo);
++		vmw_user_bo_unref(&res->guest_memory_bo);
+ 	}
+ 
+ 	return 0;
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
+index 1e81ff2422cf6..a01ca3226d0af 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
+@@ -180,7 +180,7 @@ static int vmw_gb_shader_init(struct vmw_private *dev_priv,
+ 
+ 	res->guest_memory_size = size;
+ 	if (byte_code) {
+-		res->guest_memory_bo = vmw_bo_reference(byte_code);
++		res->guest_memory_bo = vmw_user_bo_ref(byte_code);
+ 		res->guest_memory_offset = offset;
+ 	}
+ 	shader->size = size;
+@@ -809,7 +809,7 @@ static int vmw_shader_define(struct drm_device *dev, struct drm_file *file_priv,
+ 				    shader_type, num_input_sig,
+ 				    num_output_sig, tfile, shader_handle);
+ out_bad_arg:
+-	vmw_user_bo_unref(buffer);
++	vmw_user_bo_unref(&buffer);
+ 	return ret;
+ }
+ 
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+index 5db403ee8261d..3829be282ff00 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+@@ -686,9 +686,6 @@ static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
+ 	    container_of(base, struct vmw_user_surface, prime.base);
+ 	struct vmw_resource *res = &user_srf->srf.res;
+ 
+-	if (res->guest_memory_bo)
+-		drm_gem_object_put(&res->guest_memory_bo->tbo.base);
+-
+ 	*p_base = NULL;
+ 	vmw_resource_unreference(&res);
+ }
+@@ -855,23 +852,21 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
+ 	 * expect a backup buffer to be present.
+ 	 */
+ 	if (dev_priv->has_mob && req->shareable) {
+-		uint32_t backup_handle;
+-
+-		ret = vmw_gem_object_create_with_handle(dev_priv,
+-							file_priv,
+-							res->guest_memory_size,
+-							&backup_handle,
+-							&res->guest_memory_bo);
++		struct vmw_bo_params params = {
++			.domain = VMW_BO_DOMAIN_SYS,
++			.busy_domain = VMW_BO_DOMAIN_SYS,
++			.bo_type = ttm_bo_type_device,
++			.size = res->guest_memory_size,
++			.pin = false
++		};
++
++		ret = vmw_gem_object_create(dev_priv,
++					    &params,
++					    &res->guest_memory_bo);
+ 		if (unlikely(ret != 0)) {
+ 			vmw_resource_unreference(&res);
+ 			goto out_unlock;
+ 		}
+-		vmw_bo_reference(res->guest_memory_bo);
+-		/*
+-		 * We don't expose the handle to the userspace and surface
+-		 * already holds a gem reference
+-		 */
+-		drm_gem_handle_delete(file_priv, backup_handle);
+ 	}
+ 
+ 	tmp = vmw_resource_reference(&srf->res);
+@@ -1512,7 +1507,7 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
+ 		if (ret == 0) {
+ 			if (res->guest_memory_bo->tbo.base.size < res->guest_memory_size) {
+ 				VMW_DEBUG_USER("Surface backup buffer too small.\n");
+-				vmw_bo_unreference(&res->guest_memory_bo);
++				vmw_user_bo_unref(&res->guest_memory_bo);
+ 				ret = -EINVAL;
+ 				goto out_unlock;
+ 			} else {
+@@ -1526,8 +1521,6 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
+ 							res->guest_memory_size,
+ 							&backup_handle,
+ 							&res->guest_memory_bo);
+-		if (ret == 0)
+-			vmw_bo_reference(res->guest_memory_bo);
+ 	}
+ 
+ 	if (unlikely(ret != 0)) {
+diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c
+index 09ba2086c95ce..1c00e4121c7ef 100644
+--- a/drivers/hid/hid-logitech-hidpp.c
++++ b/drivers/hid/hid-logitech-hidpp.c
+@@ -4515,7 +4515,8 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id)
+ 			goto hid_hw_init_fail;
+ 	}
+ 
+-	hidpp_connect_event(hidpp);
++	schedule_work(&hidpp->work);
++	flush_work(&hidpp->work);
+ 
+ 	if (will_restart) {
+ 		/* Reset the HID node state */
+diff --git a/drivers/hwtracing/coresight/coresight-tmc-etr.c b/drivers/hwtracing/coresight/coresight-tmc-etr.c
+index 66dc5f97a0098..6132c5b3db9c7 100644
+--- a/drivers/hwtracing/coresight/coresight-tmc-etr.c
++++ b/drivers/hwtracing/coresight/coresight-tmc-etr.c
+@@ -1173,16 +1173,6 @@ static struct etr_buf *tmc_etr_get_sysfs_buffer(struct coresight_device *csdev)
+ 		goto out;
+ 	}
+ 
+-	/*
+-	 * In sysFS mode we can have multiple writers per sink.  Since this
+-	 * sink is already enabled no memory is needed and the HW need not be
+-	 * touched, even if the buffer size has changed.
+-	 */
+-	if (drvdata->mode == CS_MODE_SYSFS) {
+-		atomic_inc(&csdev->refcnt);
+-		goto out;
+-	}
+-
+ 	/*
+ 	 * If we don't have a buffer or it doesn't match the requested size,
+ 	 * use the buffer allocated above. Otherwise reuse the existing buffer.
+@@ -1204,7 +1194,7 @@ out:
+ 
+ static int tmc_enable_etr_sink_sysfs(struct coresight_device *csdev)
+ {
+-	int ret;
++	int ret = 0;
+ 	unsigned long flags;
+ 	struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+ 	struct etr_buf *sysfs_buf = tmc_etr_get_sysfs_buffer(csdev);
+@@ -1213,12 +1203,24 @@ static int tmc_enable_etr_sink_sysfs(struct coresight_device *csdev)
+ 		return PTR_ERR(sysfs_buf);
+ 
+ 	spin_lock_irqsave(&drvdata->spinlock, flags);
++
++	/*
++	 * In sysFS mode we can have multiple writers per sink.  Since this
++	 * sink is already enabled no memory is needed and the HW need not be
++	 * touched, even if the buffer size has changed.
++	 */
++	if (drvdata->mode == CS_MODE_SYSFS) {
++		atomic_inc(&csdev->refcnt);
++		goto out;
++	}
++
+ 	ret = tmc_etr_enable_hw(drvdata, sysfs_buf);
+ 	if (!ret) {
+ 		drvdata->mode = CS_MODE_SYSFS;
+ 		atomic_inc(&csdev->refcnt);
+ 	}
+ 
++out:
+ 	spin_unlock_irqrestore(&drvdata->spinlock, flags);
+ 
+ 	if (!ret)
+diff --git a/drivers/iio/adc/ad7192.c b/drivers/iio/adc/ad7192.c
+index 7bc3ebfe8081b..ce96bf13fdfe0 100644
+--- a/drivers/iio/adc/ad7192.c
++++ b/drivers/iio/adc/ad7192.c
+@@ -177,6 +177,7 @@ struct ad7192_chip_info {
+ struct ad7192_state {
+ 	const struct ad7192_chip_info	*chip_info;
+ 	struct regulator		*avdd;
++	struct regulator		*vref;
+ 	struct clk			*mclk;
+ 	u16				int_vref_mv;
+ 	u32				fclk;
+@@ -1014,10 +1015,30 @@ static int ad7192_probe(struct spi_device *spi)
+ 	if (ret)
+ 		return dev_err_probe(&spi->dev, ret, "Failed to enable specified DVdd supply\n");
+ 
+-	ret = regulator_get_voltage(st->avdd);
+-	if (ret < 0) {
+-		dev_err(&spi->dev, "Device tree error, reference voltage undefined\n");
+-		return ret;
++	st->vref = devm_regulator_get_optional(&spi->dev, "vref");
++	if (IS_ERR(st->vref)) {
++		if (PTR_ERR(st->vref) != -ENODEV)
++			return PTR_ERR(st->vref);
++
++		ret = regulator_get_voltage(st->avdd);
++		if (ret < 0)
++			return dev_err_probe(&spi->dev, ret,
++					     "Device tree error, AVdd voltage undefined\n");
++	} else {
++		ret = regulator_enable(st->vref);
++		if (ret) {
++			dev_err(&spi->dev, "Failed to enable specified Vref supply\n");
++			return ret;
++		}
++
++		ret = devm_add_action_or_reset(&spi->dev, ad7192_reg_disable, st->vref);
++		if (ret)
++			return ret;
++
++		ret = regulator_get_voltage(st->vref);
++		if (ret < 0)
++			return dev_err_probe(&spi->dev, ret,
++					     "Device tree error, Vref voltage undefined\n");
+ 	}
+ 	st->int_vref_mv = ret / 1000;
+ 
+diff --git a/drivers/iio/adc/imx8qxp-adc.c b/drivers/iio/adc/imx8qxp-adc.c
+index f5a0fc9e64c54..fff6e5a2d9569 100644
+--- a/drivers/iio/adc/imx8qxp-adc.c
++++ b/drivers/iio/adc/imx8qxp-adc.c
+@@ -38,8 +38,8 @@
+ #define IMX8QXP_ADR_ADC_FCTRL		0x30
+ #define IMX8QXP_ADR_ADC_SWTRIG		0x34
+ #define IMX8QXP_ADR_ADC_TCTRL(tid)	(0xc0 + (tid) * 4)
+-#define IMX8QXP_ADR_ADC_CMDH(cid)	(0x100 + (cid) * 8)
+-#define IMX8QXP_ADR_ADC_CMDL(cid)	(0x104 + (cid) * 8)
++#define IMX8QXP_ADR_ADC_CMDL(cid)	(0x100 + (cid) * 8)
++#define IMX8QXP_ADR_ADC_CMDH(cid)	(0x104 + (cid) * 8)
+ #define IMX8QXP_ADR_ADC_RESFIFO		0x300
+ #define IMX8QXP_ADR_ADC_TST		0xffc
+ 
+diff --git a/drivers/iio/addac/Kconfig b/drivers/iio/addac/Kconfig
+index 877f9124803c9..397544f23b850 100644
+--- a/drivers/iio/addac/Kconfig
++++ b/drivers/iio/addac/Kconfig
+@@ -24,6 +24,8 @@ config AD74413R
+ 	depends on GPIOLIB && SPI
+ 	select REGMAP_SPI
+ 	select CRC8
++	select IIO_BUFFER
++	select IIO_TRIGGERED_BUFFER
+ 	help
+ 	  Say yes here to build support for Analog Devices AD74412R/AD74413R
+ 	  quad-channel software configurable input/output solution.
+diff --git a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c
+index b72d39fc2434e..6bfe5d6847e75 100644
+--- a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c
++++ b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c
+@@ -190,8 +190,11 @@ int cros_ec_sensors_push_data(struct iio_dev *indio_dev,
+ 	/*
+ 	 * Ignore samples if the buffer is not set: it is needed if the ODR is
+ 	 * set but the buffer is not enabled yet.
++	 *
++	 * Note: iio_device_claim_buffer_mode() returns -EBUSY if the buffer
++	 * is not enabled.
+ 	 */
+-	if (!iio_buffer_enabled(indio_dev))
++	if (iio_device_claim_buffer_mode(indio_dev) < 0)
+ 		return 0;
+ 
+ 	out = (s16 *)st->samples;
+@@ -210,6 +213,7 @@ int cros_ec_sensors_push_data(struct iio_dev *indio_dev,
+ 	iio_push_to_buffers_with_timestamp(indio_dev, st->samples,
+ 					   timestamp + delta);
+ 
++	iio_device_release_buffer_mode(indio_dev);
+ 	return 0;
+ }
+ EXPORT_SYMBOL_GPL(cros_ec_sensors_push_data);
+diff --git a/drivers/iio/dac/ad3552r.c b/drivers/iio/dac/ad3552r.c
+index d5ea1a1be1226..a492e8f2fc0fb 100644
+--- a/drivers/iio/dac/ad3552r.c
++++ b/drivers/iio/dac/ad3552r.c
+@@ -140,8 +140,8 @@ enum ad3552r_ch_vref_select {
+ };
+ 
+ enum ad3542r_id {
+-	AD3542R_ID = 0x4008,
+-	AD3552R_ID = 0x4009,
++	AD3542R_ID = 0x4009,
++	AD3552R_ID = 0x4008,
+ };
+ 
+ enum ad3552r_ch_output_range {
+diff --git a/drivers/iio/frequency/admv1013.c b/drivers/iio/frequency/admv1013.c
+index 8c8e0bbfc99f2..8d9066c352bf5 100644
+--- a/drivers/iio/frequency/admv1013.c
++++ b/drivers/iio/frequency/admv1013.c
+@@ -351,9 +351,9 @@ static int admv1013_update_mixer_vgate(struct admv1013_state *st)
+ 	if (vcm < 0)
+ 		return vcm;
+ 
+-	if (vcm < 1800000)
++	if (vcm <= 1800000)
+ 		mixer_vgate = (2389 * vcm / 1000000 + 8100) / 100;
+-	else if (vcm > 1800000 && vcm < 2600000)
++	else if (vcm > 1800000 && vcm <= 2600000)
+ 		mixer_vgate = (2375 * vcm / 1000000 + 125) / 100;
+ 	else
+ 		return -EINVAL;
+diff --git a/drivers/iio/imu/bno055/Kconfig b/drivers/iio/imu/bno055/Kconfig
+index fa79b1ac4f85b..83e53acfbe880 100644
+--- a/drivers/iio/imu/bno055/Kconfig
++++ b/drivers/iio/imu/bno055/Kconfig
+@@ -2,6 +2,8 @@
+ 
+ config BOSCH_BNO055
+ 	tristate
++	select IIO_BUFFER
++	select IIO_TRIGGERED_BUFFER
+ 
+ config BOSCH_BNO055_SERIAL
+ 	tristate "Bosch BNO055 attached via UART"
+diff --git a/drivers/iio/pressure/bmp280-core.c b/drivers/iio/pressure/bmp280-core.c
+index 6089f3f9d8f4b..a2ef1373a274e 100644
+--- a/drivers/iio/pressure/bmp280-core.c
++++ b/drivers/iio/pressure/bmp280-core.c
+@@ -2179,7 +2179,7 @@ int bmp280_common_probe(struct device *dev,
+ 	 * however as it happens, the BMP085 shares the chip ID of BMP180
+ 	 * so we look for an IRQ if we have that.
+ 	 */
+-	if (irq > 0 || (chip_id  == BMP180_CHIP_ID)) {
++	if (irq > 0 && (chip_id  == BMP180_CHIP_ID)) {
+ 		ret = bmp085_fetch_eoc_irq(dev, name, irq, data);
+ 		if (ret)
+ 			return ret;
+diff --git a/drivers/iio/pressure/dps310.c b/drivers/iio/pressure/dps310.c
+index b10dbf5cf4940..1ff091b2f764d 100644
+--- a/drivers/iio/pressure/dps310.c
++++ b/drivers/iio/pressure/dps310.c
+@@ -57,8 +57,8 @@
+ #define  DPS310_RESET_MAGIC	0x09
+ #define DPS310_COEF_BASE	0x10
+ 
+-/* Make sure sleep time is <= 20ms for usleep_range */
+-#define DPS310_POLL_SLEEP_US(t)		min(20000, (t) / 8)
++/* Make sure sleep time is <= 30ms for usleep_range */
++#define DPS310_POLL_SLEEP_US(t)		min(30000, (t) / 8)
+ /* Silently handle error in rate value here */
+ #define DPS310_POLL_TIMEOUT_US(rc)	((rc) <= 0 ? 1000000 : 1000000 / (rc))
+ 
+@@ -402,8 +402,8 @@ static int dps310_reset_wait(struct dps310_data *data)
+ 	if (rc)
+ 		return rc;
+ 
+-	/* Wait for device chip access: 2.5ms in specification */
+-	usleep_range(2500, 12000);
++	/* Wait for device chip access: 15ms in specification */
++	usleep_range(15000, 55000);
+ 	return 0;
+ }
+ 
+diff --git a/drivers/iio/pressure/ms5611_core.c b/drivers/iio/pressure/ms5611_core.c
+index 627497e61a635..2fc706f9d8ae7 100644
+--- a/drivers/iio/pressure/ms5611_core.c
++++ b/drivers/iio/pressure/ms5611_core.c
+@@ -76,7 +76,7 @@ static bool ms5611_prom_is_valid(u16 *prom, size_t len)
+ 
+ 	crc = (crc >> 12) & 0x000F;
+ 
+-	return crc_orig != 0x0000 && crc == crc_orig;
++	return crc == crc_orig;
+ }
+ 
+ static int ms5611_read_prom(struct iio_dev *indio_dev)
+diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
+index ced615b5ea096..040ba2224f9ff 100644
+--- a/drivers/infiniband/hw/cxgb4/cm.c
++++ b/drivers/infiniband/hw/cxgb4/cm.c
+@@ -1965,6 +1965,9 @@ static int send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid)
+ 	int win;
+ 
+ 	skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
++	if (!skb)
++		return -ENOMEM;
++
+ 	req = __skb_put_zero(skb, sizeof(*req));
+ 	req->op_compl = htonl(WR_OP_V(FW_OFLD_CONNECTION_WR));
+ 	req->len16_pkd = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*req), 16)));
+diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
+index cdb193317c3b6..fab2e7240ef22 100644
+--- a/drivers/input/joystick/xpad.c
++++ b/drivers/input/joystick/xpad.c
+@@ -130,6 +130,7 @@ static const struct xpad_device {
+ 	{ 0x0079, 0x18d4, "GPD Win 2 X-Box Controller", 0, XTYPE_XBOX360 },
+ 	{ 0x03eb, 0xff01, "Wooting One (Legacy)", 0, XTYPE_XBOX360 },
+ 	{ 0x03eb, 0xff02, "Wooting Two (Legacy)", 0, XTYPE_XBOX360 },
++	{ 0x03f0, 0x0495, "HyperX Clutch Gladiate", 0, XTYPE_XBOXONE },
+ 	{ 0x044f, 0x0f00, "Thrustmaster Wheel", 0, XTYPE_XBOX },
+ 	{ 0x044f, 0x0f03, "Thrustmaster Wheel", 0, XTYPE_XBOX },
+ 	{ 0x044f, 0x0f07, "Thrustmaster, Inc. Controller", 0, XTYPE_XBOX },
+@@ -271,6 +272,7 @@ static const struct xpad_device {
+ 	{ 0x1038, 0x1430, "SteelSeries Stratus Duo", 0, XTYPE_XBOX360 },
+ 	{ 0x1038, 0x1431, "SteelSeries Stratus Duo", 0, XTYPE_XBOX360 },
+ 	{ 0x11c9, 0x55f0, "Nacon GC-100XF", 0, XTYPE_XBOX360 },
++	{ 0x11ff, 0x0511, "PXN V900", 0, XTYPE_XBOX360 },
+ 	{ 0x1209, 0x2882, "Ardwiino Controller", 0, XTYPE_XBOX360 },
+ 	{ 0x12ab, 0x0004, "Honey Bee Xbox360 dancepad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
+ 	{ 0x12ab, 0x0301, "PDP AFTERGLOW AX.1", 0, XTYPE_XBOX360 },
+@@ -457,6 +459,7 @@ static const struct usb_device_id xpad_table[] = {
+ 	{ USB_INTERFACE_INFO('X', 'B', 0) },	/* Xbox USB-IF not-approved class */
+ 	XPAD_XBOX360_VENDOR(0x0079),		/* GPD Win 2 controller */
+ 	XPAD_XBOX360_VENDOR(0x03eb),		/* Wooting Keyboards (Legacy) */
++	XPAD_XBOXONE_VENDOR(0x03f0),		/* HP HyperX Xbox One controllers */
+ 	XPAD_XBOX360_VENDOR(0x044f),		/* Thrustmaster Xbox 360 controllers */
+ 	XPAD_XBOX360_VENDOR(0x045e),		/* Microsoft Xbox 360 controllers */
+ 	XPAD_XBOXONE_VENDOR(0x045e),		/* Microsoft Xbox One controllers */
+@@ -475,6 +478,7 @@ static const struct usb_device_id xpad_table[] = {
+ 	XPAD_XBOX360_VENDOR(0x1038),		/* SteelSeries controllers */
+ 	XPAD_XBOXONE_VENDOR(0x10f5),		/* Turtle Beach Controllers */
+ 	XPAD_XBOX360_VENDOR(0x11c9),		/* Nacon GC100XF */
++	XPAD_XBOX360_VENDOR(0x11ff),		/* PXN V900 */
+ 	XPAD_XBOX360_VENDOR(0x1209),		/* Ardwiino Controllers */
+ 	XPAD_XBOX360_VENDOR(0x12ab),		/* Xbox 360 dance pads */
+ 	XPAD_XBOX360_VENDOR(0x1430),		/* RedOctane Xbox 360 controllers */
+diff --git a/drivers/input/misc/powermate.c b/drivers/input/misc/powermate.c
+index c1c733a9cb890..db2ba89adaefa 100644
+--- a/drivers/input/misc/powermate.c
++++ b/drivers/input/misc/powermate.c
+@@ -425,6 +425,7 @@ static void powermate_disconnect(struct usb_interface *intf)
+ 		pm->requires_update = 0;
+ 		usb_kill_urb(pm->irq);
+ 		input_unregister_device(pm->input);
++		usb_kill_urb(pm->config);
+ 		usb_free_urb(pm->irq);
+ 		usb_free_urb(pm->config);
+ 		powermate_free_buffers(interface_to_usbdev(intf), pm);
+diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
+index 2118b2075f437..4e38229404b4b 100644
+--- a/drivers/input/mouse/elantech.c
++++ b/drivers/input/mouse/elantech.c
+@@ -2114,6 +2114,7 @@ static int elantech_setup_ps2(struct psmouse *psmouse,
+ 	psmouse->protocol_handler = elantech_process_byte;
+ 	psmouse->disconnect = elantech_disconnect;
+ 	psmouse->reconnect = elantech_reconnect;
++	psmouse->fast_reconnect = NULL;
+ 	psmouse->pktsize = info->hw_version > 1 ? 6 : 4;
+ 
+ 	return 0;
+diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
+index ada299ec5bba5..cefc74b3b34b1 100644
+--- a/drivers/input/mouse/synaptics.c
++++ b/drivers/input/mouse/synaptics.c
+@@ -1623,6 +1623,7 @@ static int synaptics_init_ps2(struct psmouse *psmouse,
+ 	psmouse->set_rate = synaptics_set_rate;
+ 	psmouse->disconnect = synaptics_disconnect;
+ 	psmouse->reconnect = synaptics_reconnect;
++	psmouse->fast_reconnect = NULL;
+ 	psmouse->cleanup = synaptics_reset;
+ 	/* Synaptics can usually stay in sync without extra help */
+ 	psmouse->resync_time = 0;
+diff --git a/drivers/input/serio/i8042-acpipnpio.h b/drivers/input/serio/i8042-acpipnpio.h
+index 1724d6cb8649d..9c39553d30fa2 100644
+--- a/drivers/input/serio/i8042-acpipnpio.h
++++ b/drivers/input/serio/i8042-acpipnpio.h
+@@ -618,6 +618,14 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = {
+ 		},
+ 		.driver_data = (void *)(SERIO_QUIRK_NOMUX)
+ 	},
++	{
++		/* Fujitsu Lifebook E5411 */
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU CLIENT COMPUTING LIMITED"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E5411"),
++		},
++		.driver_data = (void *)(SERIO_QUIRK_NOAUX)
++	},
+ 	{
+ 		/* Gigabyte M912 */
+ 		.matches = {
+diff --git a/drivers/input/touchscreen/goodix.c b/drivers/input/touchscreen/goodix.c
+index f5aa240739f97..0f727dbc7232f 100644
+--- a/drivers/input/touchscreen/goodix.c
++++ b/drivers/input/touchscreen/goodix.c
+@@ -900,6 +900,25 @@ static int goodix_add_acpi_gpio_mappings(struct goodix_ts_data *ts)
+ 		dev_info(dev, "No ACPI GpioInt resource, assuming that the GPIO order is reset, int\n");
+ 		ts->irq_pin_access_method = IRQ_PIN_ACCESS_ACPI_GPIO;
+ 		gpio_mapping = acpi_goodix_int_last_gpios;
++	} else if (ts->gpio_count == 1 && ts->gpio_int_idx == 0) {
++		/*
++		 * On newer devices there is only 1 GpioInt resource and _PS0
++		 * does the whole reset sequence for us.
++		 */
++		acpi_device_fix_up_power(ACPI_COMPANION(dev));
++
++		/*
++		 * Before the _PS0 call the int GPIO may have been in output
++		 * mode and the call should have put the int GPIO in input mode,
++		 * but the GPIO subsys cached state may still think it is
++		 * in output mode, causing gpiochip_lock_as_irq() failure.
++		 *
++		 * Add a mapping for the int GPIO to make the
++		 * gpiod_int = gpiod_get(..., GPIOD_IN) call succeed,
++		 * which will explicitly set the direction to input.
++		 */
++		ts->irq_pin_access_method = IRQ_PIN_ACCESS_NONE;
++		gpio_mapping = acpi_goodix_int_first_gpios;
+ 	} else {
+ 		dev_warn(dev, "Unexpected ACPI resources: gpio_count %d, gpio_int_idx %d\n",
+ 			 ts->gpio_count, ts->gpio_int_idx);
+diff --git a/drivers/irqchip/irq-renesas-rzg2l.c b/drivers/irqchip/irq-renesas-rzg2l.c
+index 4bbfa2b0a4df9..2cee5477be6b6 100644
+--- a/drivers/irqchip/irq-renesas-rzg2l.c
++++ b/drivers/irqchip/irq-renesas-rzg2l.c
+@@ -118,7 +118,7 @@ static void rzg2l_irqc_irq_disable(struct irq_data *d)
+ 
+ 		raw_spin_lock(&priv->lock);
+ 		reg = readl_relaxed(priv->base + TSSR(tssr_index));
+-		reg &= ~(TSSEL_MASK << tssr_offset);
++		reg &= ~(TSSEL_MASK << TSSEL_SHIFT(tssr_offset));
+ 		writel_relaxed(reg, priv->base + TSSR(tssr_index));
+ 		raw_spin_unlock(&priv->lock);
+ 	}
+diff --git a/drivers/mcb/mcb-core.c b/drivers/mcb/mcb-core.c
+index 978fdfc19a06a..0cac5bead84fa 100644
+--- a/drivers/mcb/mcb-core.c
++++ b/drivers/mcb/mcb-core.c
+@@ -387,17 +387,13 @@ EXPORT_SYMBOL_NS_GPL(mcb_free_dev, MCB);
+ 
+ static int __mcb_bus_add_devices(struct device *dev, void *data)
+ {
+-	struct mcb_device *mdev = to_mcb_device(dev);
+ 	int retval;
+ 
+-	if (mdev->is_added)
+-		return 0;
+-
+ 	retval = device_attach(dev);
+-	if (retval < 0)
++	if (retval < 0) {
+ 		dev_err(dev, "Error adding device (%d)\n", retval);
+-
+-	mdev->is_added = true;
++		return retval;
++	}
+ 
+ 	return 0;
+ }
+diff --git a/drivers/mcb/mcb-parse.c b/drivers/mcb/mcb-parse.c
+index 2aef990f379f7..656b6b71c7682 100644
+--- a/drivers/mcb/mcb-parse.c
++++ b/drivers/mcb/mcb-parse.c
+@@ -99,8 +99,6 @@ static int chameleon_parse_gdd(struct mcb_bus *bus,
+ 	mdev->mem.end = mdev->mem.start + size - 1;
+ 	mdev->mem.flags = IORESOURCE_MEM;
+ 
+-	mdev->is_added = false;
+-
+ 	ret = mcb_device_register(bus, mdev);
+ 	if (ret < 0)
+ 		goto err;
+diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
+index 1dc6227d353ec..dc0463bf3c2cf 100644
+--- a/drivers/md/dm-crypt.c
++++ b/drivers/md/dm-crypt.c
+@@ -753,7 +753,8 @@ static int crypt_iv_eboiv_gen(struct crypt_config *cc, u8 *iv,
+ 	int err;
+ 	u8 *buf;
+ 
+-	reqsize = ALIGN(crypto_skcipher_reqsize(tfm), __alignof__(__le64));
++	reqsize = sizeof(*req) + crypto_skcipher_reqsize(tfm);
++	reqsize = ALIGN(reqsize, __alignof__(__le64));
+ 
+ 	req = kmalloc(reqsize + cc->iv_size, GFP_NOIO);
+ 	if (!req)
+diff --git a/drivers/media/v4l2-core/v4l2-subdev.c b/drivers/media/v4l2-core/v4l2-subdev.c
+index 2ec179cd12643..a7c1415f46e4d 100644
+--- a/drivers/media/v4l2-core/v4l2-subdev.c
++++ b/drivers/media/v4l2-core/v4l2-subdev.c
+@@ -517,6 +517,13 @@ static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg,
+ 				       V4L2_SUBDEV_CLIENT_CAP_STREAMS;
+ 	int rval;
+ 
++	/*
++	 * If the streams API is not enabled, remove V4L2_SUBDEV_CAP_STREAMS.
++	 * Remove this when the API is no longer experimental.
++	 */
++	if (!v4l2_subdev_enable_streams_api)
++		streams_subdev = false;
++
+ 	switch (cmd) {
+ 	case VIDIOC_SUBDEV_QUERYCAP: {
+ 		struct v4l2_subdev_capability *cap = arg;
+diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
+index e626de33e735d..716bba8cc2017 100644
+--- a/drivers/net/can/Kconfig
++++ b/drivers/net/can/Kconfig
+@@ -185,7 +185,7 @@ config CAN_SLCAN
+ 
+ config CAN_SUN4I
+ 	tristate "Allwinner A10 CAN controller"
+-	depends on MACH_SUN4I || MACH_SUN7I || RISCV || COMPILE_TEST
++	depends on MACH_SUN4I || MACH_SUN7I || (RISCV && ARCH_SUNXI) || COMPILE_TEST
+ 	help
+ 	  Say Y here if you want to use CAN controller found on Allwinner
+ 	  A10/A20/D1 SoCs.
+diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c
+index 0ada0e160e936..743c2eb62b877 100644
+--- a/drivers/net/can/sja1000/sja1000.c
++++ b/drivers/net/can/sja1000/sja1000.c
+@@ -392,7 +392,13 @@ static irqreturn_t sja1000_reset_interrupt(int irq, void *dev_id)
+ 	struct net_device *dev = (struct net_device *)dev_id;
+ 
+ 	netdev_dbg(dev, "performing a soft reset upon overrun\n");
+-	sja1000_start(dev);
++
++	netif_tx_lock(dev);
++
++	can_free_echo_skb(dev, 0, NULL);
++	sja1000_set_mode(dev, CAN_MODE_START);
++
++	netif_tx_unlock(dev);
+ 
+ 	return IRQ_HANDLED;
+ }
+diff --git a/drivers/net/dsa/qca/qca8k-8xxx.c b/drivers/net/dsa/qca/qca8k-8xxx.c
+index efe9380d4a15d..252089929eb92 100644
+--- a/drivers/net/dsa/qca/qca8k-8xxx.c
++++ b/drivers/net/dsa/qca/qca8k-8xxx.c
+@@ -505,8 +505,8 @@ qca8k_bulk_read(void *ctx, const void *reg_buf, size_t reg_len,
+ 		void *val_buf, size_t val_len)
+ {
+ 	int i, count = val_len / sizeof(u32), ret;
+-	u32 reg = *(u32 *)reg_buf & U16_MAX;
+ 	struct qca8k_priv *priv = ctx;
++	u32 reg = *(u16 *)reg_buf;
+ 
+ 	if (priv->mgmt_master &&
+ 	    !qca8k_read_eth(priv, reg, val_buf, val_len))
+@@ -527,8 +527,8 @@ qca8k_bulk_gather_write(void *ctx, const void *reg_buf, size_t reg_len,
+ 			const void *val_buf, size_t val_len)
+ {
+ 	int i, count = val_len / sizeof(u32), ret;
+-	u32 reg = *(u32 *)reg_buf & U16_MAX;
+ 	struct qca8k_priv *priv = ctx;
++	u32 reg = *(u16 *)reg_buf;
+ 	u32 *val = (u32 *)val_buf;
+ 
+ 	if (priv->mgmt_master &&
+@@ -666,6 +666,15 @@ qca8k_phy_eth_command(struct qca8k_priv *priv, bool read, int phy,
+ 		goto err_read_skb;
+ 	}
+ 
++	/* It seems that accessing the switch's internal PHYs via management
++	 * packets still uses the MDIO bus within the switch internally, and
++	 * these accesses can conflict with external MDIO accesses to other
++	 * devices on the MDIO bus.
++	 * We therefore need to lock the MDIO bus onto which the switch is
++	 * connected.
++	 */
++	mutex_lock(&priv->bus->mdio_lock);
++
+ 	/* Actually start the request:
+ 	 * 1. Send mdio master packet
+ 	 * 2. Busy Wait for mdio master command
+@@ -678,6 +687,7 @@ qca8k_phy_eth_command(struct qca8k_priv *priv, bool read, int phy,
+ 	mgmt_master = priv->mgmt_master;
+ 	if (!mgmt_master) {
+ 		mutex_unlock(&mgmt_eth_data->mutex);
++		mutex_unlock(&priv->bus->mdio_lock);
+ 		ret = -EINVAL;
+ 		goto err_mgmt_master;
+ 	}
+@@ -765,6 +775,7 @@ exit:
+ 				    QCA8K_ETHERNET_TIMEOUT);
+ 
+ 	mutex_unlock(&mgmt_eth_data->mutex);
++	mutex_unlock(&priv->bus->mdio_lock);
+ 
+ 	return ret;
+ 
+diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+index 29cc609880712..ea88ac04ab9ad 100644
+--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+@@ -28,6 +28,9 @@ static inline void ixgbe_alloc_vf_macvlans(struct ixgbe_adapter *adapter,
+ 	struct vf_macvlans *mv_list;
+ 	int num_vf_macvlans, i;
+ 
++	/* Initialize list of VF macvlans */
++	INIT_LIST_HEAD(&adapter->vf_mvs.l);
++
+ 	num_vf_macvlans = hw->mac.num_rar_entries -
+ 			  (IXGBE_MAX_PF_MACVLANS + 1 + num_vfs);
+ 	if (!num_vf_macvlans)
+@@ -36,8 +39,6 @@ static inline void ixgbe_alloc_vf_macvlans(struct ixgbe_adapter *adapter,
+ 	mv_list = kcalloc(num_vf_macvlans, sizeof(struct vf_macvlans),
+ 			  GFP_KERNEL);
+ 	if (mv_list) {
+-		/* Initialize list of VF macvlans */
+-		INIT_LIST_HEAD(&adapter->vf_mvs.l);
+ 		for (i = 0; i < num_vf_macvlans; i++) {
+ 			mv_list[i].vf = -1;
+ 			mv_list[i].free = true;
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c
+index 59b138214af2f..6cc7a78968fc1 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c
+@@ -1357,10 +1357,12 @@ static int cn10k_mdo_upd_txsa(struct macsec_context *ctx)
+ 
+ 	if (netif_running(secy->netdev)) {
+ 		/* Keys cannot be changed after creation */
+-		err = cn10k_write_tx_sa_pn(pfvf, txsc, sa_num,
+-					   sw_tx_sa->next_pn);
+-		if (err)
+-			return err;
++		if (ctx->sa.update_pn) {
++			err = cn10k_write_tx_sa_pn(pfvf, txsc, sa_num,
++						   sw_tx_sa->next_pn);
++			if (err)
++				return err;
++		}
+ 
+ 		err = cn10k_mcs_link_tx_sa2sc(pfvf, secy, txsc,
+ 					      sa_num, sw_tx_sa->active);
+@@ -1529,6 +1531,9 @@ static int cn10k_mdo_upd_rxsa(struct macsec_context *ctx)
+ 		if (err)
+ 			return err;
+ 
++		if (!ctx->sa.update_pn)
++			return 0;
++
+ 		err = cn10k_mcs_write_rx_sa_pn(pfvf, rxsc, sa_num,
+ 					       rx_sa->next_pn);
+ 		if (err)
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
+index 20ecc90d203e0..379e1510b70c0 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
+@@ -1401,6 +1401,7 @@ int otx2_pool_init(struct otx2_nic *pfvf, u16 pool_id,
+ 		return 0;
+ 	}
+ 
++	pp_params.order = get_order(buf_size);
+ 	pp_params.flags = PP_FLAG_PAGE_FRAG | PP_FLAG_DMA_MAP;
+ 	pp_params.pool_size = min(OTX2_PAGE_POOL_SZ, numptrs);
+ 	pp_params.nid = NUMA_NO_NODE;
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
+index 592b165530ffa..6bce55978aa72 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
+@@ -612,7 +612,7 @@ static int mlx5e_macsec_upd_txsa(struct macsec_context *ctx)
+ 		goto out;
+ 	}
+ 
+-	if (tx_sa->next_pn != ctx_tx_sa->next_pn_halves.lower) {
++	if (ctx->sa.update_pn) {
+ 		netdev_err(netdev, "MACsec offload: update TX sa %d PN isn't supported\n",
+ 			   assoc_num);
+ 		err = -EINVAL;
+@@ -1017,7 +1017,7 @@ static int mlx5e_macsec_upd_rxsa(struct macsec_context *ctx)
+ 		goto out;
+ 	}
+ 
+-	if (rx_sa->next_pn != ctx_rx_sa->next_pn_halves.lower) {
++	if (ctx->sa.update_pn) {
+ 		netdev_err(ctx->netdev,
+ 			   "MACsec offload update RX sa %d PN isn't supported\n",
+ 			   assoc_num);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+index f7b494125eee8..0cbe822ab084f 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+@@ -3952,13 +3952,14 @@ static int set_feature_rx_fcs(struct net_device *netdev, bool enable)
+ 	struct mlx5e_channels *chs = &priv->channels;
+ 	struct mlx5e_params new_params;
+ 	int err;
++	bool rx_ts_over_crc = !enable;
+ 
+ 	mutex_lock(&priv->state_lock);
+ 
+ 	new_params = chs->params;
+ 	new_params.scatter_fcs_en = enable;
+ 	err = mlx5e_safe_switch_params(priv, &new_params, mlx5e_set_rx_port_ts_wrap,
+-				       &new_params.scatter_fcs_en, true);
++				       &rx_ts_over_crc, true);
+ 	mutex_unlock(&priv->state_lock);
+ 	return err;
+ }
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c
+index bb8eeb86edf75..52c2fe3644d4b 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c
+@@ -310,8 +310,8 @@ const struct mlxsw_sp_nve_ops mlxsw_sp1_nve_vxlan_ops = {
+ 	.fdb_clear_offload = mlxsw_sp_nve_vxlan_clear_offload,
+ };
+ 
+-static bool mlxsw_sp2_nve_vxlan_learning_set(struct mlxsw_sp *mlxsw_sp,
+-					     bool learning_en)
++static int mlxsw_sp2_nve_vxlan_learning_set(struct mlxsw_sp *mlxsw_sp,
++					    bool learning_en)
+ {
+ 	char tnpc_pl[MLXSW_REG_TNPC_LEN];
+ 
+diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.c b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c
+index f21cf1f40f987..153533cd8f086 100644
+--- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.c
++++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c
+@@ -210,6 +210,7 @@ nfp_flower_cmsg_merge_hint_rx(struct nfp_app *app, struct sk_buff *skb)
+ 	unsigned int msg_len = nfp_flower_cmsg_get_data_len(skb);
+ 	struct nfp_flower_cmsg_merge_hint *msg;
+ 	struct nfp_fl_payload *sub_flows[2];
++	struct nfp_flower_priv *priv;
+ 	int err, i, flow_cnt;
+ 
+ 	msg = nfp_flower_cmsg_get_data(skb);
+@@ -228,14 +229,15 @@ nfp_flower_cmsg_merge_hint_rx(struct nfp_app *app, struct sk_buff *skb)
+ 		return;
+ 	}
+ 
+-	rtnl_lock();
++	priv = app->priv;
++	mutex_lock(&priv->nfp_fl_lock);
+ 	for (i = 0; i < flow_cnt; i++) {
+ 		u32 ctx = be32_to_cpu(msg->flow[i].host_ctx);
+ 
+ 		sub_flows[i] = nfp_flower_get_fl_payload_from_ctx(app, ctx);
+ 		if (!sub_flows[i]) {
+ 			nfp_flower_cmsg_warn(app, "Invalid flow in merge hint\n");
+-			goto err_rtnl_unlock;
++			goto err_mutex_unlock;
+ 		}
+ 	}
+ 
+@@ -244,8 +246,8 @@ nfp_flower_cmsg_merge_hint_rx(struct nfp_app *app, struct sk_buff *skb)
+ 	if (err == -ENOMEM)
+ 		nfp_flower_cmsg_warn(app, "Flow merge memory fail.\n");
+ 
+-err_rtnl_unlock:
+-	rtnl_unlock();
++err_mutex_unlock:
++	mutex_unlock(&priv->nfp_fl_lock);
+ }
+ 
+ static void
+diff --git a/drivers/net/ethernet/netronome/nfp/flower/conntrack.c b/drivers/net/ethernet/netronome/nfp/flower/conntrack.c
+index 73032173ac4eb..308ca22b89096 100644
+--- a/drivers/net/ethernet/netronome/nfp/flower/conntrack.c
++++ b/drivers/net/ethernet/netronome/nfp/flower/conntrack.c
+@@ -2130,8 +2130,6 @@ nfp_fl_ct_offload_nft_flow(struct nfp_fl_ct_zone_entry *zt, struct flow_cls_offl
+ 	struct nfp_fl_ct_flow_entry *ct_entry;
+ 	struct netlink_ext_ack *extack = NULL;
+ 
+-	ASSERT_RTNL();
+-
+ 	extack = flow->common.extack;
+ 	switch (flow->command) {
+ 	case FLOW_CLS_REPLACE:
+@@ -2177,9 +2175,13 @@ int nfp_fl_ct_handle_nft_flow(enum tc_setup_type type, void *type_data, void *cb
+ 
+ 	switch (type) {
+ 	case TC_SETUP_CLSFLOWER:
+-		rtnl_lock();
++		while (!mutex_trylock(&zt->priv->nfp_fl_lock)) {
++			if (!zt->nft) /* avoid deadlock */
++				return err;
++			msleep(20);
++		}
+ 		err = nfp_fl_ct_offload_nft_flow(zt, flow);
+-		rtnl_unlock();
++		mutex_unlock(&zt->priv->nfp_fl_lock);
+ 		break;
+ 	default:
+ 		return -EOPNOTSUPP;
+@@ -2207,6 +2209,7 @@ int nfp_fl_ct_del_flow(struct nfp_fl_ct_map_entry *ct_map_ent)
+ 	struct nfp_fl_ct_flow_entry *ct_entry;
+ 	struct nfp_fl_ct_zone_entry *zt;
+ 	struct rhashtable *m_table;
++	struct nf_flowtable *nft;
+ 
+ 	if (!ct_map_ent)
+ 		return -ENOENT;
+@@ -2225,8 +2228,12 @@ int nfp_fl_ct_del_flow(struct nfp_fl_ct_map_entry *ct_map_ent)
+ 		if (ct_map_ent->cookie > 0)
+ 			kfree(ct_map_ent);
+ 
+-		if (!zt->pre_ct_count) {
+-			zt->nft = NULL;
++		if (!zt->pre_ct_count && zt->nft) {
++			nft = zt->nft;
++			zt->nft = NULL; /* avoid deadlock */
++			nf_flow_table_offload_del_cb(nft,
++						     nfp_fl_ct_handle_nft_flow,
++						     zt);
+ 			nfp_fl_ct_clean_nft_entries(zt);
+ 		}
+ 		break;
+diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.h b/drivers/net/ethernet/netronome/nfp/flower/main.h
+index 40372545148ef..2b7c947ff4f2a 100644
+--- a/drivers/net/ethernet/netronome/nfp/flower/main.h
++++ b/drivers/net/ethernet/netronome/nfp/flower/main.h
+@@ -297,6 +297,7 @@ struct nfp_fl_internal_ports {
+  * @predt_list:		List to keep track of decap pretun flows
+  * @neigh_table:	Table to keep track of neighbor entries
+  * @predt_lock:		Lock to serialise predt/neigh table updates
++ * @nfp_fl_lock:	Lock to protect the flow offload operation
+  */
+ struct nfp_flower_priv {
+ 	struct nfp_app *app;
+@@ -339,6 +340,7 @@ struct nfp_flower_priv {
+ 	struct list_head predt_list;
+ 	struct rhashtable neigh_table;
+ 	spinlock_t predt_lock; /* Lock to serialise predt/neigh table updates */
++	struct mutex nfp_fl_lock; /* Protect the flow operation */
+ };
+ 
+ /**
+diff --git a/drivers/net/ethernet/netronome/nfp/flower/metadata.c b/drivers/net/ethernet/netronome/nfp/flower/metadata.c
+index 0f06ef6e24bf4..80e4675582bfb 100644
+--- a/drivers/net/ethernet/netronome/nfp/flower/metadata.c
++++ b/drivers/net/ethernet/netronome/nfp/flower/metadata.c
+@@ -528,6 +528,8 @@ int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count,
+ 	if (err)
+ 		goto err_free_stats_ctx_table;
+ 
++	mutex_init(&priv->nfp_fl_lock);
++
+ 	err = rhashtable_init(&priv->ct_zone_table, &nfp_zone_table_params);
+ 	if (err)
+ 		goto err_free_merge_table;
+diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c
+index 18328eb7f5c33..24334deb62a14 100644
+--- a/drivers/net/ethernet/netronome/nfp/flower/offload.c
++++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c
+@@ -1009,8 +1009,6 @@ int nfp_flower_merge_offloaded_flows(struct nfp_app *app,
+ 	u64 parent_ctx = 0;
+ 	int err;
+ 
+-	ASSERT_RTNL();
+-
+ 	if (sub_flow1 == sub_flow2 ||
+ 	    nfp_flower_is_merge_flow(sub_flow1) ||
+ 	    nfp_flower_is_merge_flow(sub_flow2))
+@@ -1727,19 +1725,30 @@ static int
+ nfp_flower_repr_offload(struct nfp_app *app, struct net_device *netdev,
+ 			struct flow_cls_offload *flower)
+ {
++	struct nfp_flower_priv *priv = app->priv;
++	int ret;
++
+ 	if (!eth_proto_is_802_3(flower->common.protocol))
+ 		return -EOPNOTSUPP;
+ 
++	mutex_lock(&priv->nfp_fl_lock);
+ 	switch (flower->command) {
+ 	case FLOW_CLS_REPLACE:
+-		return nfp_flower_add_offload(app, netdev, flower);
++		ret = nfp_flower_add_offload(app, netdev, flower);
++		break;
+ 	case FLOW_CLS_DESTROY:
+-		return nfp_flower_del_offload(app, netdev, flower);
++		ret = nfp_flower_del_offload(app, netdev, flower);
++		break;
+ 	case FLOW_CLS_STATS:
+-		return nfp_flower_get_stats(app, netdev, flower);
++		ret = nfp_flower_get_stats(app, netdev, flower);
++		break;
+ 	default:
+-		return -EOPNOTSUPP;
++		ret = -EOPNOTSUPP;
++		break;
+ 	}
++	mutex_unlock(&priv->nfp_fl_lock);
++
++	return ret;
+ }
+ 
+ static int nfp_flower_setup_tc_block_cb(enum tc_setup_type type,
+@@ -1778,6 +1787,7 @@ static int nfp_flower_setup_tc_block(struct net_device *netdev,
+ 	repr_priv = repr->app_priv;
+ 	repr_priv->block_shared = f->block_shared;
+ 	f->driver_block_list = &nfp_block_cb_list;
++	f->unlocked_driver_cb = true;
+ 
+ 	switch (f->command) {
+ 	case FLOW_BLOCK_BIND:
+@@ -1876,6 +1886,8 @@ nfp_flower_setup_indr_tc_block(struct net_device *netdev, struct Qdisc *sch, str
+ 	     nfp_flower_internal_port_can_offload(app, netdev)))
+ 		return -EOPNOTSUPP;
+ 
++	f->unlocked_driver_cb = true;
++
+ 	switch (f->command) {
+ 	case FLOW_BLOCK_BIND:
+ 		cb_priv = nfp_flower_indr_block_cb_priv_lookup(app, netdev);
+diff --git a/drivers/net/ethernet/netronome/nfp/flower/qos_conf.c b/drivers/net/ethernet/netronome/nfp/flower/qos_conf.c
+index 99052a925d9ec..e7180b4793c7d 100644
+--- a/drivers/net/ethernet/netronome/nfp/flower/qos_conf.c
++++ b/drivers/net/ethernet/netronome/nfp/flower/qos_conf.c
+@@ -523,25 +523,31 @@ int nfp_flower_setup_qos_offload(struct nfp_app *app, struct net_device *netdev,
+ {
+ 	struct netlink_ext_ack *extack = flow->common.extack;
+ 	struct nfp_flower_priv *fl_priv = app->priv;
++	int ret;
+ 
+ 	if (!(fl_priv->flower_ext_feats & NFP_FL_FEATS_VF_RLIM)) {
+ 		NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support qos rate limit offload");
+ 		return -EOPNOTSUPP;
+ 	}
+ 
++	mutex_lock(&fl_priv->nfp_fl_lock);
+ 	switch (flow->command) {
+ 	case TC_CLSMATCHALL_REPLACE:
+-		return nfp_flower_install_rate_limiter(app, netdev, flow,
+-						       extack);
++		ret = nfp_flower_install_rate_limiter(app, netdev, flow, extack);
++		break;
+ 	case TC_CLSMATCHALL_DESTROY:
+-		return nfp_flower_remove_rate_limiter(app, netdev, flow,
+-						      extack);
++		ret = nfp_flower_remove_rate_limiter(app, netdev, flow, extack);
++		break;
+ 	case TC_CLSMATCHALL_STATS:
+-		return nfp_flower_stats_rate_limiter(app, netdev, flow,
+-						     extack);
++		ret = nfp_flower_stats_rate_limiter(app, netdev, flow, extack);
++		break;
+ 	default:
+-		return -EOPNOTSUPP;
++		ret = -EOPNOTSUPP;
++		break;
+ 	}
++	mutex_unlock(&fl_priv->nfp_fl_lock);
++
++	return ret;
+ }
+ 
+ /* Offload tc action, currently only for tc police */
+diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
+index 4d6b3b7d6abb3..ef8f205f8ce1f 100644
+--- a/drivers/net/ethernet/renesas/ravb_main.c
++++ b/drivers/net/ethernet/renesas/ravb_main.c
+@@ -2168,6 +2168,8 @@ static int ravb_close(struct net_device *ndev)
+ 			of_phy_deregister_fixed_link(np);
+ 	}
+ 
++	cancel_work_sync(&priv->work);
++
+ 	if (info->multi_irqs) {
+ 		free_irq(priv->tx_irqs[RAVB_NC], ndev);
+ 		free_irq(priv->rx_irqs[RAVB_NC], ndev);
+@@ -2892,8 +2894,6 @@ static int ravb_remove(struct platform_device *pdev)
+ 	clk_disable_unprepare(priv->gptp_clk);
+ 	clk_disable_unprepare(priv->refclk);
+ 
+-	dma_free_coherent(ndev->dev.parent, priv->desc_bat_size, priv->desc_bat,
+-			  priv->desc_bat_dma);
+ 	/* Set reset mode */
+ 	ravb_write(ndev, CCC_OPC_RESET, CCC);
+ 	unregister_netdev(ndev);
+@@ -2901,6 +2901,8 @@ static int ravb_remove(struct platform_device *pdev)
+ 		netif_napi_del(&priv->napi[RAVB_NC]);
+ 	netif_napi_del(&priv->napi[RAVB_BE]);
+ 	ravb_mdio_release(priv);
++	dma_free_coherent(ndev->dev.parent, priv->desc_bat_size, priv->desc_bat,
++			  priv->desc_bat_dma);
+ 	pm_runtime_put_sync(&pdev->dev);
+ 	pm_runtime_disable(&pdev->dev);
+ 	reset_control_assert(priv->rstc);
+diff --git a/drivers/net/ethernet/renesas/rswitch.c b/drivers/net/ethernet/renesas/rswitch.c
+index 660cbfe344d2c..fb9a520f42078 100644
+--- a/drivers/net/ethernet/renesas/rswitch.c
++++ b/drivers/net/ethernet/renesas/rswitch.c
+@@ -1255,7 +1255,7 @@ static void rswitch_adjust_link(struct net_device *ndev)
+ 		phy_print_status(phydev);
+ 		if (phydev->link)
+ 			phy_power_on(rdev->serdes);
+-		else
++		else if (rdev->serdes->power_count)
+ 			phy_power_off(rdev->serdes);
+ 
+ 		rdev->etha->link = phydev->link;
+@@ -1942,15 +1942,17 @@ static void rswitch_deinit(struct rswitch_private *priv)
+ 	rswitch_gwca_hw_deinit(priv);
+ 	rcar_gen4_ptp_unregister(priv->ptp_priv);
+ 
+-	for (i = 0; i < RSWITCH_NUM_PORTS; i++) {
++	rswitch_for_each_enabled_port(priv, i) {
+ 		struct rswitch_device *rdev = priv->rdev[i];
+ 
+-		phy_exit(priv->rdev[i]->serdes);
+-		rswitch_ether_port_deinit_one(rdev);
+ 		unregister_netdev(rdev->ndev);
+-		rswitch_device_free(priv, i);
++		rswitch_ether_port_deinit_one(rdev);
++		phy_exit(priv->rdev[i]->serdes);
+ 	}
+ 
++	for (i = 0; i < RSWITCH_NUM_PORTS; i++)
++		rswitch_device_free(priv, i);
++
+ 	rswitch_gwca_ts_queue_free(priv);
+ 	rswitch_gwca_linkfix_free(priv);
+ 
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+index 6931973028aef..e840cadb2d75a 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+@@ -5940,33 +5940,6 @@ static irqreturn_t stmmac_msi_intr_rx(int irq, void *data)
+ 	return IRQ_HANDLED;
+ }
+ 
+-#ifdef CONFIG_NET_POLL_CONTROLLER
+-/* Polling receive - used by NETCONSOLE and other diagnostic tools
+- * to allow network I/O with interrupts disabled.
+- */
+-static void stmmac_poll_controller(struct net_device *dev)
+-{
+-	struct stmmac_priv *priv = netdev_priv(dev);
+-	int i;
+-
+-	/* If adapter is down, do nothing */
+-	if (test_bit(STMMAC_DOWN, &priv->state))
+-		return;
+-
+-	if (priv->plat->multi_msi_en) {
+-		for (i = 0; i < priv->plat->rx_queues_to_use; i++)
+-			stmmac_msi_intr_rx(0, &priv->dma_conf.rx_queue[i]);
+-
+-		for (i = 0; i < priv->plat->tx_queues_to_use; i++)
+-			stmmac_msi_intr_tx(0, &priv->dma_conf.tx_queue[i]);
+-	} else {
+-		disable_irq(dev->irq);
+-		stmmac_interrupt(dev->irq, dev);
+-		enable_irq(dev->irq);
+-	}
+-}
+-#endif
+-
+ /**
+  *  stmmac_ioctl - Entry point for the Ioctl
+  *  @dev: Device pointer.
+@@ -6802,9 +6775,6 @@ static const struct net_device_ops stmmac_netdev_ops = {
+ 	.ndo_eth_ioctl = stmmac_ioctl,
+ 	.ndo_setup_tc = stmmac_setup_tc,
+ 	.ndo_select_queue = stmmac_select_queue,
+-#ifdef CONFIG_NET_POLL_CONTROLLER
+-	.ndo_poll_controller = stmmac_poll_controller,
+-#endif
+ 	.ndo_set_mac_address = stmmac_set_mac_address,
+ 	.ndo_vlan_rx_add_vid = stmmac_vlan_rx_add_vid,
+ 	.ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid,
+diff --git a/drivers/net/ieee802154/ca8210.c b/drivers/net/ieee802154/ca8210.c
+index f9b10e84de067..9cb99dc65db34 100644
+--- a/drivers/net/ieee802154/ca8210.c
++++ b/drivers/net/ieee802154/ca8210.c
+@@ -2741,7 +2741,6 @@ static int ca8210_register_ext_clock(struct spi_device *spi)
+ 	struct device_node *np = spi->dev.of_node;
+ 	struct ca8210_priv *priv = spi_get_drvdata(spi);
+ 	struct ca8210_platform_data *pdata = spi->dev.platform_data;
+-	int ret = 0;
+ 
+ 	if (!np)
+ 		return -EFAULT;
+@@ -2758,18 +2757,8 @@ static int ca8210_register_ext_clock(struct spi_device *spi)
+ 		dev_crit(&spi->dev, "Failed to register external clk\n");
+ 		return PTR_ERR(priv->clk);
+ 	}
+-	ret = of_clk_add_provider(np, of_clk_src_simple_get, priv->clk);
+-	if (ret) {
+-		clk_unregister(priv->clk);
+-		dev_crit(
+-			&spi->dev,
+-			"Failed to register external clock as clock provider\n"
+-		);
+-	} else {
+-		dev_info(&spi->dev, "External clock set as clock provider\n");
+-	}
+ 
+-	return ret;
++	return of_clk_add_provider(np, of_clk_src_simple_get, priv->clk);
+ }
+ 
+ /**
+@@ -2781,8 +2770,8 @@ static void ca8210_unregister_ext_clock(struct spi_device *spi)
+ {
+ 	struct ca8210_priv *priv = spi_get_drvdata(spi);
+ 
+-	if (!priv->clk)
+-		return
++	if (IS_ERR_OR_NULL(priv->clk))
++		return;
+ 
+ 	of_clk_del_provider(spi->dev.of_node);
+ 	clk_unregister(priv->clk);
+diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
+index 2d64650f4eb3c..1c60548c1ddde 100644
+--- a/drivers/net/macsec.c
++++ b/drivers/net/macsec.c
+@@ -2394,6 +2394,7 @@ static int macsec_upd_txsa(struct sk_buff *skb, struct genl_info *info)
+ 
+ 		ctx.sa.assoc_num = assoc_num;
+ 		ctx.sa.tx_sa = tx_sa;
++		ctx.sa.update_pn = !!prev_pn.full64;
+ 		ctx.secy = secy;
+ 
+ 		ret = macsec_offload(ops->mdo_upd_txsa, &ctx);
+@@ -2487,6 +2488,7 @@ static int macsec_upd_rxsa(struct sk_buff *skb, struct genl_info *info)
+ 
+ 		ctx.sa.assoc_num = assoc_num;
+ 		ctx.sa.rx_sa = rx_sa;
++		ctx.sa.update_pn = !!prev_pn.full64;
+ 		ctx.secy = secy;
+ 
+ 		ret = macsec_offload(ops->mdo_upd_rxsa, &ctx);
+diff --git a/drivers/net/phy/mscc/mscc_macsec.c b/drivers/net/phy/mscc/mscc_macsec.c
+index 018253a573b88..4f39ba63a9a91 100644
+--- a/drivers/net/phy/mscc/mscc_macsec.c
++++ b/drivers/net/phy/mscc/mscc_macsec.c
+@@ -849,6 +849,9 @@ static int vsc8584_macsec_upd_rxsa(struct macsec_context *ctx)
+ 	struct macsec_flow *flow;
+ 	int ret;
+ 
++	if (ctx->sa.update_pn)
++		return -EINVAL;
++
+ 	flow = vsc8584_macsec_find_flow(ctx, MACSEC_INGR);
+ 	if (IS_ERR(flow))
+ 		return PTR_ERR(flow);
+@@ -900,6 +903,9 @@ static int vsc8584_macsec_upd_txsa(struct macsec_context *ctx)
+ 	struct macsec_flow *flow;
+ 	int ret;
+ 
++	if (ctx->sa.update_pn)
++		return -EINVAL;
++
+ 	flow = vsc8584_macsec_find_flow(ctx, MACSEC_EGR);
+ 	if (IS_ERR(flow))
+ 		return PTR_ERR(flow);
+diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c
+index 48d7d278631e9..99ec1d4a972db 100644
+--- a/drivers/net/usb/dm9601.c
++++ b/drivers/net/usb/dm9601.c
+@@ -222,13 +222,18 @@ static int dm9601_mdio_read(struct net_device *netdev, int phy_id, int loc)
+ 	struct usbnet *dev = netdev_priv(netdev);
+ 
+ 	__le16 res;
++	int err;
+ 
+ 	if (phy_id) {
+ 		netdev_dbg(dev->net, "Only internal phy supported\n");
+ 		return 0;
+ 	}
+ 
+-	dm_read_shared_word(dev, 1, loc, &res);
++	err = dm_read_shared_word(dev, 1, loc, &res);
++	if (err < 0) {
++		netdev_err(dev->net, "MDIO read error: %d\n", err);
++		return err;
++	}
+ 
+ 	netdev_dbg(dev->net,
+ 		   "dm9601_mdio_read() phy_id=0x%02x, loc=0x%02x, returns=0x%04x\n",
+diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
+index f3f2c07423a6a..fc3bb63b9ac3e 100644
+--- a/drivers/net/xen-netback/interface.c
++++ b/drivers/net/xen-netback/interface.c
+@@ -41,8 +41,6 @@
+ #include <asm/xen/hypercall.h>
+ #include <xen/balloon.h>
+ 
+-#define XENVIF_QUEUE_LENGTH 32
+-
+ /* Number of bytes allowed on the internal guest Rx queue. */
+ #define XENVIF_RX_QUEUE_BYTES (XEN_NETIF_RX_RING_SIZE/2 * PAGE_SIZE)
+ 
+@@ -530,8 +528,6 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
+ 	dev->features = dev->hw_features | NETIF_F_RXCSUM;
+ 	dev->ethtool_ops = &xenvif_ethtool_ops;
+ 
+-	dev->tx_queue_len = XENVIF_QUEUE_LENGTH;
+-
+ 	dev->min_mtu = ETH_MIN_MTU;
+ 	dev->max_mtu = ETH_MAX_MTU - VLAN_ETH_HLEN;
+ 
+diff --git a/drivers/perf/arm-cmn.c b/drivers/perf/arm-cmn.c
+index b8c15878bc86d..43aba01399bef 100644
+--- a/drivers/perf/arm-cmn.c
++++ b/drivers/perf/arm-cmn.c
+@@ -1862,7 +1862,7 @@ static irqreturn_t arm_cmn_handle_irq(int irq, void *dev_id)
+ 		u64 delta;
+ 		int i;
+ 
+-		for (i = 0; i < CMN_DTM_NUM_COUNTERS; i++) {
++		for (i = 0; i < CMN_DT_NUM_COUNTERS; i++) {
+ 			if (status & (1U << i)) {
+ 				ret = IRQ_HANDLED;
+ 				if (WARN_ON(!dtc->counters[i]))
+diff --git a/drivers/phy/freescale/phy-fsl-lynx-28g.c b/drivers/phy/freescale/phy-fsl-lynx-28g.c
+index 569f12af2aafa..0a8b40edc3f31 100644
+--- a/drivers/phy/freescale/phy-fsl-lynx-28g.c
++++ b/drivers/phy/freescale/phy-fsl-lynx-28g.c
+@@ -126,6 +126,10 @@ struct lynx_28g_lane {
+ struct lynx_28g_priv {
+ 	void __iomem *base;
+ 	struct device *dev;
++	/* Serialize concurrent access to registers shared between lanes,
++	 * like PCCn
++	 */
++	spinlock_t pcc_lock;
+ 	struct lynx_28g_pll pll[LYNX_28G_NUM_PLL];
+ 	struct lynx_28g_lane lane[LYNX_28G_NUM_LANE];
+ 
+@@ -396,6 +400,8 @@ static int lynx_28g_set_mode(struct phy *phy, enum phy_mode mode, int submode)
+ 	if (powered_up)
+ 		lynx_28g_power_off(phy);
+ 
++	spin_lock(&priv->pcc_lock);
++
+ 	switch (submode) {
+ 	case PHY_INTERFACE_MODE_SGMII:
+ 	case PHY_INTERFACE_MODE_1000BASEX:
+@@ -412,6 +418,8 @@ static int lynx_28g_set_mode(struct phy *phy, enum phy_mode mode, int submode)
+ 	lane->interface = submode;
+ 
+ out:
++	spin_unlock(&priv->pcc_lock);
++
+ 	/* Power up the lane if necessary */
+ 	if (powered_up)
+ 		lynx_28g_power_on(phy);
+@@ -507,11 +515,12 @@ static void lynx_28g_cdr_lock_check(struct work_struct *work)
+ 	for (i = 0; i < LYNX_28G_NUM_LANE; i++) {
+ 		lane = &priv->lane[i];
+ 
+-		if (!lane->init)
+-			continue;
++		mutex_lock(&lane->phy->mutex);
+ 
+-		if (!lane->powered_up)
++		if (!lane->init || !lane->powered_up) {
++			mutex_unlock(&lane->phy->mutex);
+ 			continue;
++		}
+ 
+ 		rrstctl = lynx_28g_lane_read(lane, LNaRRSTCTL);
+ 		if (!(rrstctl & LYNX_28G_LNaRRSTCTL_CDR_LOCK)) {
+@@ -520,6 +529,8 @@ static void lynx_28g_cdr_lock_check(struct work_struct *work)
+ 				rrstctl = lynx_28g_lane_read(lane, LNaRRSTCTL);
+ 			} while (!(rrstctl & LYNX_28G_LNaRRSTCTL_RST_DONE));
+ 		}
++
++		mutex_unlock(&lane->phy->mutex);
+ 	}
+ 	queue_delayed_work(system_power_efficient_wq, &priv->cdr_check,
+ 			   msecs_to_jiffies(1000));
+@@ -592,6 +603,7 @@ static int lynx_28g_probe(struct platform_device *pdev)
+ 
+ 	dev_set_drvdata(dev, priv);
+ 
++	spin_lock_init(&priv->pcc_lock);
+ 	INIT_DELAYED_WORK(&priv->cdr_check, lynx_28g_cdr_lock_check);
+ 
+ 	queue_delayed_work(system_power_efficient_wq, &priv->cdr_check,
+@@ -603,6 +615,14 @@ static int lynx_28g_probe(struct platform_device *pdev)
+ 	return PTR_ERR_OR_ZERO(provider);
+ }
+ 
++static void lynx_28g_remove(struct platform_device *pdev)
++{
++	struct device *dev = &pdev->dev;
++	struct lynx_28g_priv *priv = dev_get_drvdata(dev);
++
++	cancel_delayed_work_sync(&priv->cdr_check);
++}
++
+ static const struct of_device_id lynx_28g_of_match_table[] = {
+ 	{ .compatible = "fsl,lynx-28g" },
+ 	{ },
+@@ -611,6 +631,7 @@ MODULE_DEVICE_TABLE(of, lynx_28g_of_match_table);
+ 
+ static struct platform_driver lynx_28g_driver = {
+ 	.probe	= lynx_28g_probe,
++	.remove_new = lynx_28g_remove,
+ 	.driver	= {
+ 		.name = "lynx-28g",
+ 		.of_match_table = lynx_28g_of_match_table,
+diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
+index 401886c813449..b84781cfc2596 100644
+--- a/drivers/pinctrl/core.c
++++ b/drivers/pinctrl/core.c
+@@ -1012,17 +1012,20 @@ static int add_setting(struct pinctrl *p, struct pinctrl_dev *pctldev,
+ 
+ static struct pinctrl *find_pinctrl(struct device *dev)
+ {
+-	struct pinctrl *p;
++	struct pinctrl *entry, *p = NULL;
+ 
+ 	mutex_lock(&pinctrl_list_mutex);
+-	list_for_each_entry(p, &pinctrl_list, node)
+-		if (p->dev == dev) {
+-			mutex_unlock(&pinctrl_list_mutex);
+-			return p;
++
++	list_for_each_entry(entry, &pinctrl_list, node) {
++		if (entry->dev == dev) {
++			p = entry;
++			kref_get(&p->users);
++			break;
+ 		}
++	}
+ 
+ 	mutex_unlock(&pinctrl_list_mutex);
+-	return NULL;
++	return p;
+ }
+ 
+ static void pinctrl_free(struct pinctrl *p, bool inlist);
+@@ -1130,7 +1133,6 @@ struct pinctrl *pinctrl_get(struct device *dev)
+ 	p = find_pinctrl(dev);
+ 	if (p) {
+ 		dev_dbg(dev, "obtain a copy of previously claimed pinctrl\n");
+-		kref_get(&p->users);
+ 		return p;
+ 	}
+ 
+diff --git a/drivers/pinctrl/nuvoton/pinctrl-wpcm450.c b/drivers/pinctrl/nuvoton/pinctrl-wpcm450.c
+index 2d1c1652cfd9d..8a9961ac87128 100644
+--- a/drivers/pinctrl/nuvoton/pinctrl-wpcm450.c
++++ b/drivers/pinctrl/nuvoton/pinctrl-wpcm450.c
+@@ -1062,13 +1062,13 @@ static int wpcm450_gpio_register(struct platform_device *pdev,
+ 		if (ret < 0)
+ 			return ret;
+ 
+-		gpio = &pctrl->gpio_bank[reg];
+-		gpio->pctrl = pctrl;
+-
+ 		if (reg >= WPCM450_NUM_BANKS)
+ 			return dev_err_probe(dev, -EINVAL,
+ 					     "GPIO index %d out of range!\n", reg);
+ 
++		gpio = &pctrl->gpio_bank[reg];
++		gpio->pctrl = pctrl;
++
+ 		bank = &wpcm450_banks[reg];
+ 		gpio->bank = bank;
+ 
+diff --git a/drivers/pinctrl/renesas/Kconfig b/drivers/pinctrl/renesas/Kconfig
+index 77730dc548ed3..c8d519ca53eb7 100644
+--- a/drivers/pinctrl/renesas/Kconfig
++++ b/drivers/pinctrl/renesas/Kconfig
+@@ -235,6 +235,7 @@ config PINCTRL_RZN1
+ 	depends on OF
+ 	depends on ARCH_RZN1 || COMPILE_TEST
+ 	select GENERIC_PINCONF
++	select PINMUX
+ 	help
+ 	  This selects pinctrl driver for Renesas RZ/N1 devices.
+ 
+diff --git a/drivers/pinctrl/starfive/pinctrl-starfive-jh7110.c b/drivers/pinctrl/starfive/pinctrl-starfive-jh7110.c
+index 5fe729b4a03de..72747ad497b5e 100644
+--- a/drivers/pinctrl/starfive/pinctrl-starfive-jh7110.c
++++ b/drivers/pinctrl/starfive/pinctrl-starfive-jh7110.c
+@@ -968,8 +968,6 @@ int jh7110_pinctrl_probe(struct platform_device *pdev)
+ 	if (ret)
+ 		return dev_err_probe(dev, ret, "could not register gpiochip\n");
+ 
+-	irq_domain_set_pm_device(sfp->gc.irq.domain, dev);
+-
+ 	dev_info(dev, "StarFive GPIO chip registered %d GPIOs\n", sfp->gc.ngpio);
+ 
+ 	return pinctrl_enable(sfp->pctl);
+diff --git a/drivers/platform/x86/hp/hp-wmi.c b/drivers/platform/x86/hp/hp-wmi.c
+index e76e5458db350..8ebb7be52ee72 100644
+--- a/drivers/platform/x86/hp/hp-wmi.c
++++ b/drivers/platform/x86/hp/hp-wmi.c
+@@ -1548,7 +1548,13 @@ static const struct dev_pm_ops hp_wmi_pm_ops = {
+ 	.restore  = hp_wmi_resume_handler,
+ };
+ 
+-static struct platform_driver hp_wmi_driver = {
++/*
++ * hp_wmi_bios_remove() lives in .exit.text. For drivers registered via
++ * module_platform_driver_probe() this is ok because they cannot get unbound at
++ * runtime. So mark the driver struct with __refdata to prevent modpost
++ * triggering a section mismatch warning.
++ */
++static struct platform_driver hp_wmi_driver __refdata = {
+ 	.driver = {
+ 		.name = "hp-wmi",
+ 		.pm = &hp_wmi_pm_ops,
+diff --git a/drivers/platform/x86/think-lmi.c b/drivers/platform/x86/think-lmi.c
+index 79346881cadb1..aee869769843f 100644
+--- a/drivers/platform/x86/think-lmi.c
++++ b/drivers/platform/x86/think-lmi.c
+@@ -1248,6 +1248,24 @@ static void tlmi_release_attr(void)
+ 	kset_unregister(tlmi_priv.authentication_kset);
+ }
+ 
++static int tlmi_validate_setting_name(struct kset *attribute_kset, char *name)
++{
++	struct kobject *duplicate;
++
++	if (!strcmp(name, "Reserved"))
++		return -EINVAL;
++
++	duplicate = kset_find_obj(attribute_kset, name);
++	if (duplicate) {
++		pr_debug("Duplicate attribute name found - %s\n", name);
++		/* kset_find_obj() returns a reference */
++		kobject_put(duplicate);
++		return -EBUSY;
++	}
++
++	return 0;
++}
++
+ static int tlmi_sysfs_init(void)
+ {
+ 	int i, ret;
+@@ -1276,10 +1294,8 @@ static int tlmi_sysfs_init(void)
+ 			continue;
+ 
+ 		/* check for duplicate or reserved values */
+-		if (kset_find_obj(tlmi_priv.attribute_kset, tlmi_priv.setting[i]->display_name) ||
+-		    !strcmp(tlmi_priv.setting[i]->display_name, "Reserved")) {
+-			pr_debug("duplicate or reserved attribute name found - %s\n",
+-				tlmi_priv.setting[i]->display_name);
++		if (tlmi_validate_setting_name(tlmi_priv.attribute_kset,
++					       tlmi_priv.setting[i]->display_name) < 0) {
+ 			kfree(tlmi_priv.setting[i]->possible_values);
+ 			kfree(tlmi_priv.setting[i]);
+ 			tlmi_priv.setting[i] = NULL;
+diff --git a/drivers/power/supply/qcom_battmgr.c b/drivers/power/supply/qcom_battmgr.c
+index de77df97b3a44..ec163d1bcd189 100644
+--- a/drivers/power/supply/qcom_battmgr.c
++++ b/drivers/power/supply/qcom_battmgr.c
+@@ -105,7 +105,7 @@ struct qcom_battmgr_property_request {
+ 
+ struct qcom_battmgr_update_request {
+ 	struct pmic_glink_hdr hdr;
+-	u32 battery_id;
++	__le32 battery_id;
+ };
+ 
+ struct qcom_battmgr_charge_time_request {
+@@ -1282,9 +1282,9 @@ static void qcom_battmgr_enable_worker(struct work_struct *work)
+ {
+ 	struct qcom_battmgr *battmgr = container_of(work, struct qcom_battmgr, enable_work);
+ 	struct qcom_battmgr_enable_request req = {
+-		.hdr.owner = PMIC_GLINK_OWNER_BATTMGR,
+-		.hdr.type = PMIC_GLINK_NOTIFY,
+-		.hdr.opcode = BATTMGR_REQUEST_NOTIFICATION,
++		.hdr.owner = cpu_to_le32(PMIC_GLINK_OWNER_BATTMGR),
++		.hdr.type = cpu_to_le32(PMIC_GLINK_NOTIFY),
++		.hdr.opcode = cpu_to_le32(BATTMGR_REQUEST_NOTIFICATION),
+ 	};
+ 	int ret;
+ 
+diff --git a/drivers/s390/net/Kconfig b/drivers/s390/net/Kconfig
+index 9c67b97faba2b..610ad8a801a57 100644
+--- a/drivers/s390/net/Kconfig
++++ b/drivers/s390/net/Kconfig
+@@ -103,7 +103,7 @@ config CCWGROUP
+ 
+ config ISM
+ 	tristate "Support for ISM vPCI Adapter"
+-	depends on PCI && SMC
++	depends on PCI
+ 	default n
+ 	help
+ 	  Select this option if you want to use the Internal Shared Memory
+diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
+index 902655d759476..44680f65ea145 100644
+--- a/drivers/scsi/scsi_scan.c
++++ b/drivers/scsi/scsi_scan.c
+@@ -1627,12 +1627,13 @@ int scsi_rescan_device(struct scsi_device *sdev)
+ 	device_lock(dev);
+ 
+ 	/*
+-	 * Bail out if the device is not running. Otherwise, the rescan may
+-	 * block waiting for commands to be executed, with us holding the
+-	 * device lock. This can result in a potential deadlock in the power
+-	 * management core code when system resume is on-going.
++	 * Bail out if the device or its queue are not running. Otherwise,
++	 * the rescan may block waiting for commands to be executed, with us
++	 * holding the device lock. This can result in a potential deadlock
++	 * in the power management core code when system resume is on-going.
+ 	 */
+-	if (sdev->sdev_state != SDEV_RUNNING) {
++	if (sdev->sdev_state != SDEV_RUNNING ||
++	    blk_queue_pm_only(sdev->request_queue)) {
+ 		ret = -EWOULDBLOCK;
+ 		goto unlock;
+ 	}
+diff --git a/drivers/tee/amdtee/core.c b/drivers/tee/amdtee/core.c
+index 372d64756ed64..3c15f6a9e91c0 100644
+--- a/drivers/tee/amdtee/core.c
++++ b/drivers/tee/amdtee/core.c
+@@ -217,12 +217,12 @@ unlock:
+ 	return rc;
+ }
+ 
++/* mutex must be held by caller */
+ static void destroy_session(struct kref *ref)
+ {
+ 	struct amdtee_session *sess = container_of(ref, struct amdtee_session,
+ 						   refcount);
+ 
+-	mutex_lock(&session_list_mutex);
+ 	list_del(&sess->list_node);
+ 	mutex_unlock(&session_list_mutex);
+ 	kfree(sess);
+@@ -272,7 +272,8 @@ int amdtee_open_session(struct tee_context *ctx,
+ 	if (arg->ret != TEEC_SUCCESS) {
+ 		pr_err("open_session failed %d\n", arg->ret);
+ 		handle_unload_ta(ta_handle);
+-		kref_put(&sess->refcount, destroy_session);
++		kref_put_mutex(&sess->refcount, destroy_session,
++			       &session_list_mutex);
+ 		goto out;
+ 	}
+ 
+@@ -290,7 +291,8 @@ int amdtee_open_session(struct tee_context *ctx,
+ 		pr_err("reached maximum session count %d\n", TEE_NUM_SESSIONS);
+ 		handle_close_session(ta_handle, session_info);
+ 		handle_unload_ta(ta_handle);
+-		kref_put(&sess->refcount, destroy_session);
++		kref_put_mutex(&sess->refcount, destroy_session,
++			       &session_list_mutex);
+ 		rc = -ENOMEM;
+ 		goto out;
+ 	}
+@@ -331,7 +333,7 @@ int amdtee_close_session(struct tee_context *ctx, u32 session)
+ 	handle_close_session(ta_handle, session_info);
+ 	handle_unload_ta(ta_handle);
+ 
+-	kref_put(&sess->refcount, destroy_session);
++	kref_put_mutex(&sess->refcount, destroy_session, &session_list_mutex);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/thunderbolt/icm.c b/drivers/thunderbolt/icm.c
+index dbdcad8d73bf6..d8b9c734abd36 100644
+--- a/drivers/thunderbolt/icm.c
++++ b/drivers/thunderbolt/icm.c
+@@ -41,6 +41,7 @@
+ #define PHY_PORT_CS1_LINK_STATE_SHIFT	26
+ 
+ #define ICM_TIMEOUT			5000	/* ms */
++#define ICM_RETRIES			3
+ #define ICM_APPROVE_TIMEOUT		10000	/* ms */
+ #define ICM_MAX_LINK			4
+ 
+@@ -296,10 +297,9 @@ static bool icm_copy(struct tb_cfg_request *req, const struct ctl_pkg *pkg)
+ 
+ static int icm_request(struct tb *tb, const void *request, size_t request_size,
+ 		       void *response, size_t response_size, size_t npackets,
+-		       unsigned int timeout_msec)
++		       int retries, unsigned int timeout_msec)
+ {
+ 	struct icm *icm = tb_priv(tb);
+-	int retries = 3;
+ 
+ 	do {
+ 		struct tb_cfg_request *req;
+@@ -410,7 +410,7 @@ static int icm_fr_get_route(struct tb *tb, u8 link, u8 depth, u64 *route)
+ 		return -ENOMEM;
+ 
+ 	ret = icm_request(tb, &request, sizeof(request), switches,
+-			  sizeof(*switches), npackets, ICM_TIMEOUT);
++			  sizeof(*switches), npackets, ICM_RETRIES, ICM_TIMEOUT);
+ 	if (ret)
+ 		goto err_free;
+ 
+@@ -463,7 +463,7 @@ icm_fr_driver_ready(struct tb *tb, enum tb_security_level *security_level,
+ 
+ 	memset(&reply, 0, sizeof(reply));
+ 	ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
+-			  1, ICM_TIMEOUT);
++			  1, ICM_RETRIES, ICM_TIMEOUT);
+ 	if (ret)
+ 		return ret;
+ 
+@@ -488,7 +488,7 @@ static int icm_fr_approve_switch(struct tb *tb, struct tb_switch *sw)
+ 	memset(&reply, 0, sizeof(reply));
+ 	/* Use larger timeout as establishing tunnels can take some time */
+ 	ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
+-			  1, ICM_APPROVE_TIMEOUT);
++			  1, ICM_RETRIES, ICM_APPROVE_TIMEOUT);
+ 	if (ret)
+ 		return ret;
+ 
+@@ -515,7 +515,7 @@ static int icm_fr_add_switch_key(struct tb *tb, struct tb_switch *sw)
+ 
+ 	memset(&reply, 0, sizeof(reply));
+ 	ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
+-			  1, ICM_TIMEOUT);
++			  1, ICM_RETRIES, ICM_TIMEOUT);
+ 	if (ret)
+ 		return ret;
+ 
+@@ -543,7 +543,7 @@ static int icm_fr_challenge_switch_key(struct tb *tb, struct tb_switch *sw,
+ 
+ 	memset(&reply, 0, sizeof(reply));
+ 	ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
+-			  1, ICM_TIMEOUT);
++			  1, ICM_RETRIES, ICM_TIMEOUT);
+ 	if (ret)
+ 		return ret;
+ 
+@@ -577,7 +577,7 @@ static int icm_fr_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
+ 
+ 	memset(&reply, 0, sizeof(reply));
+ 	ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
+-			  1, ICM_TIMEOUT);
++			  1, ICM_RETRIES, ICM_TIMEOUT);
+ 	if (ret)
+ 		return ret;
+ 
+@@ -1020,7 +1020,7 @@ icm_tr_driver_ready(struct tb *tb, enum tb_security_level *security_level,
+ 
+ 	memset(&reply, 0, sizeof(reply));
+ 	ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
+-			  1, 20000);
++			  1, 10, 2000);
+ 	if (ret)
+ 		return ret;
+ 
+@@ -1053,7 +1053,7 @@ static int icm_tr_approve_switch(struct tb *tb, struct tb_switch *sw)
+ 
+ 	memset(&reply, 0, sizeof(reply));
+ 	ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
+-			  1, ICM_APPROVE_TIMEOUT);
++			  1, ICM_RETRIES, ICM_APPROVE_TIMEOUT);
+ 	if (ret)
+ 		return ret;
+ 
+@@ -1081,7 +1081,7 @@ static int icm_tr_add_switch_key(struct tb *tb, struct tb_switch *sw)
+ 
+ 	memset(&reply, 0, sizeof(reply));
+ 	ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
+-			  1, ICM_TIMEOUT);
++			  1, ICM_RETRIES, ICM_TIMEOUT);
+ 	if (ret)
+ 		return ret;
+ 
+@@ -1110,7 +1110,7 @@ static int icm_tr_challenge_switch_key(struct tb *tb, struct tb_switch *sw,
+ 
+ 	memset(&reply, 0, sizeof(reply));
+ 	ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
+-			  1, ICM_TIMEOUT);
++			  1, ICM_RETRIES, ICM_TIMEOUT);
+ 	if (ret)
+ 		return ret;
+ 
+@@ -1144,7 +1144,7 @@ static int icm_tr_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
+ 
+ 	memset(&reply, 0, sizeof(reply));
+ 	ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
+-			  1, ICM_TIMEOUT);
++			  1, ICM_RETRIES, ICM_TIMEOUT);
+ 	if (ret)
+ 		return ret;
+ 
+@@ -1170,7 +1170,7 @@ static int icm_tr_xdomain_tear_down(struct tb *tb, struct tb_xdomain *xd,
+ 
+ 	memset(&reply, 0, sizeof(reply));
+ 	ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
+-			  1, ICM_TIMEOUT);
++			  1, ICM_RETRIES, ICM_TIMEOUT);
+ 	if (ret)
+ 		return ret;
+ 
+@@ -1496,7 +1496,7 @@ icm_ar_driver_ready(struct tb *tb, enum tb_security_level *security_level,
+ 
+ 	memset(&reply, 0, sizeof(reply));
+ 	ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
+-			  1, ICM_TIMEOUT);
++			  1, ICM_RETRIES, ICM_TIMEOUT);
+ 	if (ret)
+ 		return ret;
+ 
+@@ -1522,7 +1522,7 @@ static int icm_ar_get_route(struct tb *tb, u8 link, u8 depth, u64 *route)
+ 
+ 	memset(&reply, 0, sizeof(reply));
+ 	ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
+-			  1, ICM_TIMEOUT);
++			  1, ICM_RETRIES, ICM_TIMEOUT);
+ 	if (ret)
+ 		return ret;
+ 
+@@ -1543,7 +1543,7 @@ static int icm_ar_get_boot_acl(struct tb *tb, uuid_t *uuids, size_t nuuids)
+ 
+ 	memset(&reply, 0, sizeof(reply));
+ 	ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
+-			  1, ICM_TIMEOUT);
++			  1, ICM_RETRIES, ICM_TIMEOUT);
+ 	if (ret)
+ 		return ret;
+ 
+@@ -1604,7 +1604,7 @@ static int icm_ar_set_boot_acl(struct tb *tb, const uuid_t *uuids,
+ 
+ 	memset(&reply, 0, sizeof(reply));
+ 	ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
+-			  1, ICM_TIMEOUT);
++			  1, ICM_RETRIES, ICM_TIMEOUT);
+ 	if (ret)
+ 		return ret;
+ 
+@@ -1626,7 +1626,7 @@ icm_icl_driver_ready(struct tb *tb, enum tb_security_level *security_level,
+ 
+ 	memset(&reply, 0, sizeof(reply));
+ 	ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
+-			  1, 20000);
++			  1, ICM_RETRIES, 20000);
+ 	if (ret)
+ 		return ret;
+ 
+@@ -2298,7 +2298,7 @@ static int icm_usb4_switch_op(struct tb_switch *sw, u16 opcode, u32 *metadata,
+ 
+ 	memset(&reply, 0, sizeof(reply));
+ 	ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
+-			  1, ICM_TIMEOUT);
++			  1, ICM_RETRIES, ICM_TIMEOUT);
+ 	if (ret)
+ 		return ret;
+ 
+diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c
+index 7ea63bb317140..c15c3b9b50d91 100644
+--- a/drivers/thunderbolt/switch.c
++++ b/drivers/thunderbolt/switch.c
+@@ -2724,6 +2724,13 @@ int tb_switch_lane_bonding_enable(struct tb_switch *sw)
+ 	    !tb_port_is_width_supported(down, TB_LINK_WIDTH_DUAL))
+ 		return 0;
+ 
++	/*
++	 * Both lanes need to be in CL0. Here we assume lane 0 already be in
++	 * CL0 and check just for lane 1.
++	 */
++	if (tb_wait_for_port(down->dual_link_port, false) <= 0)
++		return -ENOTCONN;
++
+ 	ret = tb_port_lane_bonding_enable(up);
+ 	if (ret) {
+ 		tb_port_warn(up, "failed to enable lane bonding\n");
+diff --git a/drivers/thunderbolt/tmu.c b/drivers/thunderbolt/tmu.c
+index 0dfd1e083994e..ae88e657dca5e 100644
+--- a/drivers/thunderbolt/tmu.c
++++ b/drivers/thunderbolt/tmu.c
+@@ -382,7 +382,7 @@ static int tmu_mode_init(struct tb_switch *sw)
+ 		} else if (ucap && tb_port_tmu_is_unidirectional(up)) {
+ 			if (tmu_rates[TB_SWITCH_TMU_MODE_LOWRES] == rate)
+ 				sw->tmu.mode = TB_SWITCH_TMU_MODE_LOWRES;
+-			else if (tmu_rates[TB_SWITCH_TMU_MODE_LOWRES] == rate)
++			else if (tmu_rates[TB_SWITCH_TMU_MODE_HIFI_UNI] == rate)
+ 				sw->tmu.mode = TB_SWITCH_TMU_MODE_HIFI_UNI;
+ 		} else if (rate) {
+ 			sw->tmu.mode = TB_SWITCH_TMU_MODE_HIFI_BI;
+diff --git a/drivers/thunderbolt/xdomain.c b/drivers/thunderbolt/xdomain.c
+index 5b5566862318b..9803f0bbf20d1 100644
+--- a/drivers/thunderbolt/xdomain.c
++++ b/drivers/thunderbolt/xdomain.c
+@@ -703,6 +703,27 @@ out_unlock:
+ 	mutex_unlock(&xdomain_lock);
+ }
+ 
++static void start_handshake(struct tb_xdomain *xd)
++{
++	xd->state = XDOMAIN_STATE_INIT;
++	queue_delayed_work(xd->tb->wq, &xd->state_work,
++			   msecs_to_jiffies(XDOMAIN_SHORT_TIMEOUT));
++}
++
++/* Can be called from state_work */
++static void __stop_handshake(struct tb_xdomain *xd)
++{
++	cancel_delayed_work_sync(&xd->properties_changed_work);
++	xd->properties_changed_retries = 0;
++	xd->state_retries = 0;
++}
++
++static void stop_handshake(struct tb_xdomain *xd)
++{
++	cancel_delayed_work_sync(&xd->state_work);
++	__stop_handshake(xd);
++}
++
+ static void tb_xdp_handle_request(struct work_struct *work)
+ {
+ 	struct xdomain_request_work *xw = container_of(work, typeof(*xw), work);
+@@ -765,6 +786,15 @@ static void tb_xdp_handle_request(struct work_struct *work)
+ 	case UUID_REQUEST:
+ 		tb_dbg(tb, "%llx: received XDomain UUID request\n", route);
+ 		ret = tb_xdp_uuid_response(ctl, route, sequence, uuid);
++		/*
++		 * If we've stopped the discovery with an error such as
++		 * timing out, we will restart the handshake now that we
++		 * received UUID request from the remote host.
++		 */
++		if (!ret && xd && xd->state == XDOMAIN_STATE_ERROR) {
++			dev_dbg(&xd->dev, "restarting handshake\n");
++			start_handshake(xd);
++		}
+ 		break;
+ 
+ 	case LINK_STATE_STATUS_REQUEST:
+@@ -1521,6 +1551,13 @@ static void tb_xdomain_queue_properties_changed(struct tb_xdomain *xd)
+ 			   msecs_to_jiffies(XDOMAIN_SHORT_TIMEOUT));
+ }
+ 
++static void tb_xdomain_failed(struct tb_xdomain *xd)
++{
++	xd->state = XDOMAIN_STATE_ERROR;
++	queue_delayed_work(xd->tb->wq, &xd->state_work,
++			   msecs_to_jiffies(XDOMAIN_DEFAULT_TIMEOUT));
++}
++
+ static void tb_xdomain_state_work(struct work_struct *work)
+ {
+ 	struct tb_xdomain *xd = container_of(work, typeof(*xd), state_work.work);
+@@ -1547,7 +1584,7 @@ static void tb_xdomain_state_work(struct work_struct *work)
+ 		if (ret) {
+ 			if (ret == -EAGAIN)
+ 				goto retry_state;
+-			xd->state = XDOMAIN_STATE_ERROR;
++			tb_xdomain_failed(xd);
+ 		} else {
+ 			tb_xdomain_queue_properties_changed(xd);
+ 			if (xd->bonding_possible)
+@@ -1612,7 +1649,7 @@ static void tb_xdomain_state_work(struct work_struct *work)
+ 		if (ret) {
+ 			if (ret == -EAGAIN)
+ 				goto retry_state;
+-			xd->state = XDOMAIN_STATE_ERROR;
++			tb_xdomain_failed(xd);
+ 		} else {
+ 			xd->state = XDOMAIN_STATE_ENUMERATED;
+ 		}
+@@ -1623,6 +1660,8 @@ static void tb_xdomain_state_work(struct work_struct *work)
+ 		break;
+ 
+ 	case XDOMAIN_STATE_ERROR:
++		dev_dbg(&xd->dev, "discovery failed, stopping handshake\n");
++		__stop_handshake(xd);
+ 		break;
+ 
+ 	default:
+@@ -1833,21 +1872,6 @@ static void tb_xdomain_release(struct device *dev)
+ 	kfree(xd);
+ }
+ 
+-static void start_handshake(struct tb_xdomain *xd)
+-{
+-	xd->state = XDOMAIN_STATE_INIT;
+-	queue_delayed_work(xd->tb->wq, &xd->state_work,
+-			   msecs_to_jiffies(XDOMAIN_SHORT_TIMEOUT));
+-}
+-
+-static void stop_handshake(struct tb_xdomain *xd)
+-{
+-	cancel_delayed_work_sync(&xd->properties_changed_work);
+-	cancel_delayed_work_sync(&xd->state_work);
+-	xd->properties_changed_retries = 0;
+-	xd->state_retries = 0;
+-}
+-
+ static int __maybe_unused tb_xdomain_suspend(struct device *dev)
+ {
+ 	stop_handshake(tb_to_xdomain(dev));
+diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c
+index d48a82f1634e9..445235ee53b3d 100644
+--- a/drivers/tty/serial/8250/8250_omap.c
++++ b/drivers/tty/serial/8250/8250_omap.c
+@@ -1618,7 +1618,7 @@ static int omap8250_suspend(struct device *dev)
+ {
+ 	struct omap8250_priv *priv = dev_get_drvdata(dev);
+ 	struct uart_8250_port *up = serial8250_get_port(priv->line);
+-	int err;
++	int err = 0;
+ 
+ 	serial8250_suspend_port(priv->line);
+ 
+@@ -1628,7 +1628,8 @@ static int omap8250_suspend(struct device *dev)
+ 	if (!device_may_wakeup(dev))
+ 		priv->wer = 0;
+ 	serial_out(up, UART_OMAP_WER, priv->wer);
+-	err = pm_runtime_force_suspend(dev);
++	if (uart_console(&up->port) && console_suspend_enabled)
++		err = pm_runtime_force_suspend(dev);
+ 	flush_work(&priv->qos_work);
+ 
+ 	return err;
+@@ -1637,11 +1638,15 @@ static int omap8250_suspend(struct device *dev)
+ static int omap8250_resume(struct device *dev)
+ {
+ 	struct omap8250_priv *priv = dev_get_drvdata(dev);
++	struct uart_8250_port *up = serial8250_get_port(priv->line);
+ 	int err;
+ 
+-	err = pm_runtime_force_resume(dev);
+-	if (err)
+-		return err;
++	if (uart_console(&up->port) && console_suspend_enabled) {
++		err = pm_runtime_force_resume(dev);
++		if (err)
++			return err;
++	}
++
+ 	serial8250_resume_port(priv->line);
+ 	/* Paired with pm_runtime_resume_and_get() in omap8250_suspend() */
+ 	pm_runtime_mark_last_busy(dev);
+@@ -1718,16 +1723,6 @@ static int omap8250_runtime_suspend(struct device *dev)
+ 
+ 	if (priv->line >= 0)
+ 		up = serial8250_get_port(priv->line);
+-	/*
+-	 * When using 'no_console_suspend', the console UART must not be
+-	 * suspended. Since driver suspend is managed by runtime suspend,
+-	 * preventing runtime suspend (by returning error) will keep device
+-	 * active during suspend.
+-	 */
+-	if (priv->is_suspending && !console_suspend_enabled) {
+-		if (up && uart_console(&up->port))
+-			return -EBUSY;
+-	}
+ 
+ 	if (priv->habit & UART_ERRATA_CLOCK_DISABLE) {
+ 		int ret;
+diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
+index 831d033611e61..83c419ac78bca 100644
+--- a/drivers/tty/serial/serial_core.c
++++ b/drivers/tty/serial/serial_core.c
+@@ -157,7 +157,7 @@ static void __uart_start(struct tty_struct *tty)
+ 	 * enabled, serial_port_runtime_resume() calls start_tx() again
+ 	 * after enabling the device.
+ 	 */
+-	if (pm_runtime_active(&port_dev->dev))
++	if (!pm_runtime_enabled(port->dev) || pm_runtime_active(port->dev))
+ 		port->ops->start_tx(port);
+ 	pm_runtime_mark_last_busy(&port_dev->dev);
+ 	pm_runtime_put_autosuspend(&port_dev->dev);
+@@ -1410,12 +1410,18 @@ static void uart_set_rs485_termination(struct uart_port *port,
+ static int uart_rs485_config(struct uart_port *port)
+ {
+ 	struct serial_rs485 *rs485 = &port->rs485;
++	unsigned long flags;
+ 	int ret;
+ 
++	if (!(rs485->flags & SER_RS485_ENABLED))
++		return 0;
++
+ 	uart_sanitize_serial_rs485(port, rs485);
+ 	uart_set_rs485_termination(port, rs485);
+ 
++	spin_lock_irqsave(&port->lock, flags);
+ 	ret = port->rs485_config(port, NULL, rs485);
++	spin_unlock_irqrestore(&port->lock, flags);
+ 	if (ret)
+ 		memset(rs485, 0, sizeof(*rs485));
+ 
+@@ -2480,11 +2486,10 @@ int uart_resume_port(struct uart_driver *drv, struct uart_port *uport)
+ 			if (ret == 0) {
+ 				if (tty)
+ 					uart_change_line_settings(tty, state, NULL);
++				uart_rs485_config(uport);
+ 				spin_lock_irq(&uport->lock);
+ 				if (!(uport->rs485.flags & SER_RS485_ENABLED))
+ 					ops->set_mctrl(uport, uport->mctrl);
+-				else
+-					uart_rs485_config(uport);
+ 				ops->start_tx(uport);
+ 				spin_unlock_irq(&uport->lock);
+ 				tty_port_set_initialized(port, true);
+@@ -2593,10 +2598,10 @@ uart_configure_port(struct uart_driver *drv, struct uart_state *state,
+ 		port->mctrl &= TIOCM_DTR;
+ 		if (!(port->rs485.flags & SER_RS485_ENABLED))
+ 			port->ops->set_mctrl(port, port->mctrl);
+-		else
+-			uart_rs485_config(port);
+ 		spin_unlock_irqrestore(&port->lock, flags);
+ 
++		uart_rs485_config(port);
++
+ 		/*
+ 		 * If this driver supports console, and it hasn't been
+ 		 * successfully registered yet, try to re-register it.
+diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
+index 80c48eb6bf85c..3349c46e5fa2c 100644
+--- a/drivers/ufs/core/ufshcd.c
++++ b/drivers/ufs/core/ufshcd.c
+@@ -6955,7 +6955,7 @@ static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag)
+ 			mask, 0, 1000, 1000);
+ 
+ 	dev_err(hba->dev, "Clearing task management function with tag %d %s\n",
+-		tag, err ? "succeeded" : "failed");
++		tag, err < 0 ? "failed" : "succeeded");
+ 
+ out:
+ 	return err;
+diff --git a/drivers/usb/cdns3/cdnsp-gadget.c b/drivers/usb/cdns3/cdnsp-gadget.c
+index fff9ec9c391fa..4b67749edb997 100644
+--- a/drivers/usb/cdns3/cdnsp-gadget.c
++++ b/drivers/usb/cdns3/cdnsp-gadget.c
+@@ -1125,6 +1125,9 @@ static int cdnsp_gadget_ep_dequeue(struct usb_ep *ep,
+ 	unsigned long flags;
+ 	int ret;
+ 
++	if (request->status != -EINPROGRESS)
++		return 0;
++
+ 	if (!pep->endpoint.desc) {
+ 		dev_err(pdev->dev,
+ 			"%s: can't dequeue to disabled endpoint\n",
+diff --git a/drivers/usb/cdns3/core.h b/drivers/usb/cdns3/core.h
+index 4a4dbc2c15615..81a9c9d6be08b 100644
+--- a/drivers/usb/cdns3/core.h
++++ b/drivers/usb/cdns3/core.h
+@@ -131,8 +131,7 @@ void cdns_set_active(struct cdns *cdns, u8 set_active);
+ #else /* CONFIG_PM_SLEEP */
+ static inline int cdns_resume(struct cdns *cdns)
+ { return 0; }
+-static inline int cdns_set_active(struct cdns *cdns, u8 set_active)
+-{ return 0; }
++static inline void cdns_set_active(struct cdns *cdns, u8 set_active) { }
+ static inline int cdns_suspend(struct cdns *cdns)
+ { return 0; }
+ #endif /* CONFIG_PM_SLEEP */
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
+index 26a27ff504085..f2c561ae4bfeb 100644
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -151,6 +151,10 @@ int usb_device_supports_lpm(struct usb_device *udev)
+ 	if (udev->quirks & USB_QUIRK_NO_LPM)
+ 		return 0;
+ 
++	/* Skip if the device BOS descriptor couldn't be read */
++	if (!udev->bos)
++		return 0;
++
+ 	/* USB 2.1 (and greater) devices indicate LPM support through
+ 	 * their USB 2.0 Extended Capabilities BOS descriptor.
+ 	 */
+@@ -327,6 +331,10 @@ static void usb_set_lpm_parameters(struct usb_device *udev)
+ 	if (!udev->lpm_capable || udev->speed < USB_SPEED_SUPER)
+ 		return;
+ 
++	/* Skip if the device BOS descriptor couldn't be read */
++	if (!udev->bos)
++		return;
++
+ 	hub = usb_hub_to_struct_hub(udev->parent);
+ 	/* It doesn't take time to transition the roothub into U0, since it
+ 	 * doesn't have an upstream link.
+@@ -2720,13 +2728,17 @@ out_authorized:
+ static enum usb_ssp_rate get_port_ssp_rate(struct usb_device *hdev,
+ 					   u32 ext_portstatus)
+ {
+-	struct usb_ssp_cap_descriptor *ssp_cap = hdev->bos->ssp_cap;
++	struct usb_ssp_cap_descriptor *ssp_cap;
+ 	u32 attr;
+ 	u8 speed_id;
+ 	u8 ssac;
+ 	u8 lanes;
+ 	int i;
+ 
++	if (!hdev->bos)
++		goto out;
++
++	ssp_cap = hdev->bos->ssp_cap;
+ 	if (!ssp_cap)
+ 		goto out;
+ 
+@@ -4244,8 +4256,15 @@ static void usb_enable_link_state(struct usb_hcd *hcd, struct usb_device *udev,
+ 		enum usb3_link_state state)
+ {
+ 	int timeout;
+-	__u8 u1_mel = udev->bos->ss_cap->bU1devExitLat;
+-	__le16 u2_mel = udev->bos->ss_cap->bU2DevExitLat;
++	__u8 u1_mel;
++	__le16 u2_mel;
++
++	/* Skip if the device BOS descriptor couldn't be read */
++	if (!udev->bos)
++		return;
++
++	u1_mel = udev->bos->ss_cap->bU1devExitLat;
++	u2_mel = udev->bos->ss_cap->bU2DevExitLat;
+ 
+ 	/* If the device says it doesn't have *any* exit latency to come out of
+ 	 * U1 or U2, it's probably lying.  Assume it doesn't implement that link
+diff --git a/drivers/usb/core/hub.h b/drivers/usb/core/hub.h
+index 37897afd1b649..d44dd7f6623ee 100644
+--- a/drivers/usb/core/hub.h
++++ b/drivers/usb/core/hub.h
+@@ -153,7 +153,7 @@ static inline int hub_is_superspeedplus(struct usb_device *hdev)
+ {
+ 	return (hdev->descriptor.bDeviceProtocol == USB_HUB_PR_SS &&
+ 		le16_to_cpu(hdev->descriptor.bcdUSB) >= 0x0310 &&
+-		hdev->bos->ssp_cap);
++		hdev->bos && hdev->bos->ssp_cap);
+ }
+ 
+ static inline unsigned hub_power_on_good_delay(struct usb_hub *hub)
+diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
+index 9c6bf054f15d4..343d2570189ff 100644
+--- a/drivers/usb/dwc3/core.c
++++ b/drivers/usb/dwc3/core.c
+@@ -279,9 +279,46 @@ int dwc3_core_soft_reset(struct dwc3 *dwc)
+ 	 * XHCI driver will reset the host block. If dwc3 was configured for
+ 	 * host-only mode or current role is host, then we can return early.
+ 	 */
+-	if (dwc->dr_mode == USB_DR_MODE_HOST || dwc->current_dr_role == DWC3_GCTL_PRTCAP_HOST)
++	if (dwc->current_dr_role == DWC3_GCTL_PRTCAP_HOST)
+ 		return 0;
+ 
++	/*
++	 * If the dr_mode is host and the dwc->current_dr_role is not the
++	 * corresponding DWC3_GCTL_PRTCAP_HOST, then the dwc3_core_init_mode
++	 * isn't executed yet. Ensure the phy is ready before the controller
++	 * updates the GCTL.PRTCAPDIR or other settings by soft-resetting
++	 * the phy.
++	 *
++	 * Note: GUSB3PIPECTL[n] and GUSB2PHYCFG[n] are port settings where n
++	 * is port index. If this is a multiport host, then we need to reset
++	 * all active ports.
++	 */
++	if (dwc->dr_mode == USB_DR_MODE_HOST) {
++		u32 usb3_port;
++		u32 usb2_port;
++
++		usb3_port = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0));
++		usb3_port |= DWC3_GUSB3PIPECTL_PHYSOFTRST;
++		dwc3_writel(dwc->regs, DWC3_GUSB3PIPECTL(0), usb3_port);
++
++		usb2_port = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
++		usb2_port |= DWC3_GUSB2PHYCFG_PHYSOFTRST;
++		dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), usb2_port);
++
++		/* Small delay for phy reset assertion */
++		usleep_range(1000, 2000);
++
++		usb3_port &= ~DWC3_GUSB3PIPECTL_PHYSOFTRST;
++		dwc3_writel(dwc->regs, DWC3_GUSB3PIPECTL(0), usb3_port);
++
++		usb2_port &= ~DWC3_GUSB2PHYCFG_PHYSOFTRST;
++		dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), usb2_port);
++
++		/* Wait for clock synchronization */
++		msleep(50);
++		return 0;
++	}
++
+ 	reg = dwc3_readl(dwc->regs, DWC3_DCTL);
+ 	reg |= DWC3_DCTL_CSFTRST;
+ 	reg &= ~DWC3_DCTL_RUN_STOP;
+diff --git a/drivers/usb/gadget/function/f_ncm.c b/drivers/usb/gadget/function/f_ncm.c
+index 424bb3b666dbd..faf90a2174194 100644
+--- a/drivers/usb/gadget/function/f_ncm.c
++++ b/drivers/usb/gadget/function/f_ncm.c
+@@ -1171,7 +1171,8 @@ static int ncm_unwrap_ntb(struct gether *port,
+ 			  struct sk_buff_head *list)
+ {
+ 	struct f_ncm	*ncm = func_to_ncm(&port->func);
+-	__le16		*tmp = (void *) skb->data;
++	unsigned char	*ntb_ptr = skb->data;
++	__le16		*tmp;
+ 	unsigned	index, index2;
+ 	int		ndp_index;
+ 	unsigned	dg_len, dg_len2;
+@@ -1184,6 +1185,10 @@ static int ncm_unwrap_ntb(struct gether *port,
+ 	const struct ndp_parser_opts *opts = ncm->parser_opts;
+ 	unsigned	crc_len = ncm->is_crc ? sizeof(uint32_t) : 0;
+ 	int		dgram_counter;
++	int		to_process = skb->len;
++
++parse_ntb:
++	tmp = (__le16 *)ntb_ptr;
+ 
+ 	/* dwSignature */
+ 	if (get_unaligned_le32(tmp) != opts->nth_sign) {
+@@ -1230,7 +1235,7 @@ static int ncm_unwrap_ntb(struct gether *port,
+ 		 * walk through NDP
+ 		 * dwSignature
+ 		 */
+-		tmp = (void *)(skb->data + ndp_index);
++		tmp = (__le16 *)(ntb_ptr + ndp_index);
+ 		if (get_unaligned_le32(tmp) != ncm->ndp_sign) {
+ 			INFO(port->func.config->cdev, "Wrong NDP SIGN\n");
+ 			goto err;
+@@ -1287,11 +1292,11 @@ static int ncm_unwrap_ntb(struct gether *port,
+ 			if (ncm->is_crc) {
+ 				uint32_t crc, crc2;
+ 
+-				crc = get_unaligned_le32(skb->data +
++				crc = get_unaligned_le32(ntb_ptr +
+ 							 index + dg_len -
+ 							 crc_len);
+ 				crc2 = ~crc32_le(~0,
+-						 skb->data + index,
++						 ntb_ptr + index,
+ 						 dg_len - crc_len);
+ 				if (crc != crc2) {
+ 					INFO(port->func.config->cdev,
+@@ -1318,7 +1323,7 @@ static int ncm_unwrap_ntb(struct gether *port,
+ 							 dg_len - crc_len);
+ 			if (skb2 == NULL)
+ 				goto err;
+-			skb_put_data(skb2, skb->data + index,
++			skb_put_data(skb2, ntb_ptr + index,
+ 				     dg_len - crc_len);
+ 
+ 			skb_queue_tail(list, skb2);
+@@ -1331,10 +1336,17 @@ static int ncm_unwrap_ntb(struct gether *port,
+ 		} while (ndp_len > 2 * (opts->dgram_item_len * 2));
+ 	} while (ndp_index);
+ 
+-	dev_consume_skb_any(skb);
+-
+ 	VDBG(port->func.config->cdev,
+ 	     "Parsed NTB with %d frames\n", dgram_counter);
++
++	to_process -= block_len;
++	if (to_process != 0) {
++		ntb_ptr = (unsigned char *)(ntb_ptr + block_len);
++		goto parse_ntb;
++	}
++
++	dev_consume_skb_any(skb);
++
+ 	return 0;
+ err:
+ 	skb_queue_purge(list);
+diff --git a/drivers/usb/gadget/udc/udc-xilinx.c b/drivers/usb/gadget/udc/udc-xilinx.c
+index a4a7b90a97e70..3e5d82097c712 100644
+--- a/drivers/usb/gadget/udc/udc-xilinx.c
++++ b/drivers/usb/gadget/udc/udc-xilinx.c
+@@ -499,11 +499,13 @@ static int xudc_eptxrx(struct xusb_ep *ep, struct xusb_req *req,
+ 		/* Get the Buffer address and copy the transmit data.*/
+ 		eprambase = (u32 __force *)(udc->addr + ep->rambase);
+ 		if (ep->is_in) {
+-			memcpy(eprambase, bufferptr, bytestosend);
++			memcpy_toio((void __iomem *)eprambase, bufferptr,
++				    bytestosend);
+ 			udc->write_fn(udc->addr, ep->offset +
+ 				      XUSB_EP_BUF0COUNT_OFFSET, bufferlen);
+ 		} else {
+-			memcpy(bufferptr, eprambase, bytestosend);
++			memcpy_toio((void __iomem *)bufferptr, eprambase,
++				    bytestosend);
+ 		}
+ 		/*
+ 		 * Enable the buffer for transmission.
+@@ -517,11 +519,13 @@ static int xudc_eptxrx(struct xusb_ep *ep, struct xusb_req *req,
+ 		eprambase = (u32 __force *)(udc->addr + ep->rambase +
+ 			     ep->ep_usb.maxpacket);
+ 		if (ep->is_in) {
+-			memcpy(eprambase, bufferptr, bytestosend);
++			memcpy_toio((void __iomem *)eprambase, bufferptr,
++				    bytestosend);
+ 			udc->write_fn(udc->addr, ep->offset +
+ 				      XUSB_EP_BUF1COUNT_OFFSET, bufferlen);
+ 		} else {
+-			memcpy(bufferptr, eprambase, bytestosend);
++			memcpy_toio((void __iomem *)bufferptr, eprambase,
++				    bytestosend);
+ 		}
+ 		/*
+ 		 * Enable the buffer for transmission.
+@@ -1023,7 +1027,7 @@ static int __xudc_ep0_queue(struct xusb_ep *ep0, struct xusb_req *req)
+ 			   udc->addr);
+ 		length = req->usb_req.actual = min_t(u32, length,
+ 						     EP0_MAX_PACKET);
+-		memcpy(corebuf, req->usb_req.buf, length);
++		memcpy_toio((void __iomem *)corebuf, req->usb_req.buf, length);
+ 		udc->write_fn(udc->addr, XUSB_EP_BUF0COUNT_OFFSET, length);
+ 		udc->write_fn(udc->addr, XUSB_BUFFREADY_OFFSET, 1);
+ 	} else {
+@@ -1752,7 +1756,7 @@ static void xudc_handle_setup(struct xusb_udc *udc)
+ 
+ 	/* Load up the chapter 9 command buffer.*/
+ 	ep0rambase = (u32 __force *) (udc->addr + XUSB_SETUP_PKT_ADDR_OFFSET);
+-	memcpy(&setup, ep0rambase, 8);
++	memcpy_toio((void __iomem *)&setup, ep0rambase, 8);
+ 
+ 	udc->setup = setup;
+ 	udc->setup.wValue = cpu_to_le16(setup.wValue);
+@@ -1839,7 +1843,7 @@ static void xudc_ep0_out(struct xusb_udc *udc)
+ 			     (ep0->rambase << 2));
+ 		buffer = req->usb_req.buf + req->usb_req.actual;
+ 		req->usb_req.actual = req->usb_req.actual + bytes_to_rx;
+-		memcpy(buffer, ep0rambase, bytes_to_rx);
++		memcpy_toio((void __iomem *)buffer, ep0rambase, bytes_to_rx);
+ 
+ 		if (req->usb_req.length == req->usb_req.actual) {
+ 			/* Data transfer completed get ready for Status stage */
+@@ -1915,7 +1919,7 @@ static void xudc_ep0_in(struct xusb_udc *udc)
+ 				     (ep0->rambase << 2));
+ 			buffer = req->usb_req.buf + req->usb_req.actual;
+ 			req->usb_req.actual = req->usb_req.actual + length;
+-			memcpy(ep0rambase, buffer, length);
++			memcpy_toio((void __iomem *)ep0rambase, buffer, length);
+ 		}
+ 		udc->write_fn(udc->addr, XUSB_EP_BUF0COUNT_OFFSET, count);
+ 		udc->write_fn(udc->addr, XUSB_BUFFREADY_OFFSET, 1);
+diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
+index 0054d02239e28..0df5d807a77e8 100644
+--- a/drivers/usb/host/xhci-hub.c
++++ b/drivers/usb/host/xhci-hub.c
+@@ -1062,19 +1062,19 @@ static void xhci_get_usb3_port_status(struct xhci_port *port, u32 *status,
+ 		*status |= USB_PORT_STAT_C_CONFIG_ERROR << 16;
+ 
+ 	/* USB3 specific wPortStatus bits */
+-	if (portsc & PORT_POWER) {
++	if (portsc & PORT_POWER)
+ 		*status |= USB_SS_PORT_STAT_POWER;
+-		/* link state handling */
+-		if (link_state == XDEV_U0)
+-			bus_state->suspended_ports &= ~(1 << portnum);
+-	}
+ 
+-	/* remote wake resume signaling complete */
+-	if (bus_state->port_remote_wakeup & (1 << portnum) &&
++	/* no longer suspended or resuming */
++	if (link_state != XDEV_U3 &&
+ 	    link_state != XDEV_RESUME &&
+ 	    link_state != XDEV_RECOVERY) {
+-		bus_state->port_remote_wakeup &= ~(1 << portnum);
+-		usb_hcd_end_port_resume(&hcd->self, portnum);
++		/* remote wake resume signaling complete */
++		if (bus_state->port_remote_wakeup & (1 << portnum)) {
++			bus_state->port_remote_wakeup &= ~(1 << portnum);
++			usb_hcd_end_port_resume(&hcd->self, portnum);
++		}
++		bus_state->suspended_ports &= ~(1 << portnum);
+ 	}
+ 
+ 	xhci_hub_report_usb3_link_state(xhci, status, portsc);
+@@ -1131,6 +1131,7 @@ static void xhci_get_usb2_port_status(struct xhci_port *port, u32 *status,
+ 			usb_hcd_end_port_resume(&port->rhub->hcd->self, portnum);
+ 		}
+ 		port->rexit_active = 0;
++		bus_state->suspended_ports &= ~(1 << portnum);
+ 	}
+ }
+ 
+diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
+index 19a402123de02..e318ac8d30c11 100644
+--- a/drivers/usb/host/xhci-mem.c
++++ b/drivers/usb/host/xhci-mem.c
+@@ -2288,8 +2288,8 @@ xhci_add_interrupter(struct xhci_hcd *xhci, struct xhci_interrupter *ir,
+ 	writel(erst_size, &ir->ir_set->erst_size);
+ 
+ 	erst_base = xhci_read_64(xhci, &ir->ir_set->erst_base);
+-	erst_base &= ERST_PTR_MASK;
+-	erst_base |= (ir->erst.erst_dma_addr & (u64) ~ERST_PTR_MASK);
++	erst_base &= ERST_BASE_RSVDP;
++	erst_base |= ir->erst.erst_dma_addr & ~ERST_BASE_RSVDP;
+ 	xhci_write_64(xhci, erst_base, &ir->ir_set->erst_base);
+ 
+ 	/* Set the event ring dequeue address of this interrupter */
+diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
+index 1dde53f6eb317..3e5dc0723a8fc 100644
+--- a/drivers/usb/host/xhci-ring.c
++++ b/drivers/usb/host/xhci-ring.c
+@@ -798,7 +798,7 @@ static void xhci_giveback_urb_in_irq(struct xhci_hcd *xhci,
+ static void xhci_unmap_td_bounce_buffer(struct xhci_hcd *xhci,
+ 		struct xhci_ring *ring, struct xhci_td *td)
+ {
+-	struct device *dev = xhci_to_hcd(xhci)->self.controller;
++	struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
+ 	struct xhci_segment *seg = td->bounce_seg;
+ 	struct urb *urb = td->urb;
+ 	size_t len;
+@@ -2996,7 +2996,8 @@ static int xhci_handle_event(struct xhci_hcd *xhci, struct xhci_interrupter *ir)
+  */
+ static void xhci_update_erst_dequeue(struct xhci_hcd *xhci,
+ 				     struct xhci_interrupter *ir,
+-				     union xhci_trb *event_ring_deq)
++				     union xhci_trb *event_ring_deq,
++				     bool clear_ehb)
+ {
+ 	u64 temp_64;
+ 	dma_addr_t deq;
+@@ -3017,12 +3018,13 @@ static void xhci_update_erst_dequeue(struct xhci_hcd *xhci,
+ 			return;
+ 
+ 		/* Update HC event ring dequeue pointer */
+-		temp_64 &= ERST_PTR_MASK;
++		temp_64 &= ERST_DESI_MASK;
+ 		temp_64 |= ((u64) deq & (u64) ~ERST_PTR_MASK);
+ 	}
+ 
+ 	/* Clear the event handler busy flag (RW1C) */
+-	temp_64 |= ERST_EHB;
++	if (clear_ehb)
++		temp_64 |= ERST_EHB;
+ 	xhci_write_64(xhci, temp_64, &ir->ir_set->erst_dequeue);
+ }
+ 
+@@ -3103,7 +3105,7 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd)
+ 	while (xhci_handle_event(xhci, ir) > 0) {
+ 		if (event_loop++ < TRBS_PER_SEGMENT / 2)
+ 			continue;
+-		xhci_update_erst_dequeue(xhci, ir, event_ring_deq);
++		xhci_update_erst_dequeue(xhci, ir, event_ring_deq, false);
+ 		event_ring_deq = ir->event_ring->dequeue;
+ 
+ 		/* ring is half-full, force isoc trbs to interrupt more often */
+@@ -3113,7 +3115,7 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd)
+ 		event_loop = 0;
+ 	}
+ 
+-	xhci_update_erst_dequeue(xhci, ir, event_ring_deq);
++	xhci_update_erst_dequeue(xhci, ir, event_ring_deq, true);
+ 	ret = IRQ_HANDLED;
+ 
+ out:
+@@ -3469,7 +3471,7 @@ static u32 xhci_td_remainder(struct xhci_hcd *xhci, int transferred,
+ static int xhci_align_td(struct xhci_hcd *xhci, struct urb *urb, u32 enqd_len,
+ 			 u32 *trb_buff_len, struct xhci_segment *seg)
+ {
+-	struct device *dev = xhci_to_hcd(xhci)->self.controller;
++	struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
+ 	unsigned int unalign;
+ 	unsigned int max_pkt;
+ 	u32 new_buff_len;
+diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
+index 7e282b4522c0a..5df370482521f 100644
+--- a/drivers/usb/host/xhci.h
++++ b/drivers/usb/host/xhci.h
+@@ -514,7 +514,7 @@ struct xhci_intr_reg {
+ #define	ERST_SIZE_MASK		(0xffff << 16)
+ 
+ /* erst_base bitmasks */
+-#define ERST_BASE_RSVDP		(0x3f)
++#define ERST_BASE_RSVDP		(GENMASK_ULL(5, 0))
+ 
+ /* erst_dequeue bitmasks */
+ /* Dequeue ERST Segment Index (DESI) - Segment number (or alias)
+diff --git a/drivers/usb/misc/onboard_usb_hub.c b/drivers/usb/misc/onboard_usb_hub.c
+index 83f14ca1d38e7..36dd4971b7808 100644
+--- a/drivers/usb/misc/onboard_usb_hub.c
++++ b/drivers/usb/misc/onboard_usb_hub.c
+@@ -409,6 +409,7 @@ static void onboard_hub_usbdev_disconnect(struct usb_device *udev)
+ static const struct usb_device_id onboard_hub_id_table[] = {
+ 	{ USB_DEVICE(VENDOR_ID_GENESYS, 0x0608) }, /* Genesys Logic GL850G USB 2.0 */
+ 	{ USB_DEVICE(VENDOR_ID_GENESYS, 0x0610) }, /* Genesys Logic GL852G USB 2.0 */
++	{ USB_DEVICE(VENDOR_ID_MICROCHIP, 0x2412) }, /* USB2412 USB 2.0 */
+ 	{ USB_DEVICE(VENDOR_ID_MICROCHIP, 0x2514) }, /* USB2514B USB 2.0 */
+ 	{ USB_DEVICE(VENDOR_ID_MICROCHIP, 0x2517) }, /* USB2517 USB 2.0 */
+ 	{ USB_DEVICE(VENDOR_ID_REALTEK, 0x0411) }, /* RTS5411 USB 3.1 */
+diff --git a/drivers/usb/misc/onboard_usb_hub.h b/drivers/usb/misc/onboard_usb_hub.h
+index aca5f50eb0da7..f136dffb4a101 100644
+--- a/drivers/usb/misc/onboard_usb_hub.h
++++ b/drivers/usb/misc/onboard_usb_hub.h
+@@ -35,6 +35,7 @@ static const struct onboard_hub_pdata vialab_vl817_data = {
+ };
+ 
+ static const struct of_device_id onboard_hub_match[] = {
++	{ .compatible = "usb424,2412", .data = &microchip_usb424_data, },
+ 	{ .compatible = "usb424,2514", .data = &microchip_usb424_data, },
+ 	{ .compatible = "usb424,2517", .data = &microchip_usb424_data, },
+ 	{ .compatible = "usb451,8140", .data = &ti_tusb8041_data, },
+diff --git a/drivers/usb/musb/musb_debugfs.c b/drivers/usb/musb/musb_debugfs.c
+index 78c726a71b177..2d623284edf63 100644
+--- a/drivers/usb/musb/musb_debugfs.c
++++ b/drivers/usb/musb/musb_debugfs.c
+@@ -39,7 +39,7 @@ static const struct musb_register_map musb_regmap[] = {
+ 	{ "IntrUsbE",	MUSB_INTRUSBE,	8 },
+ 	{ "DevCtl",	MUSB_DEVCTL,	8 },
+ 	{ "VControl",	0x68,		32 },
+-	{ "HWVers",	0x69,		16 },
++	{ "HWVers",	MUSB_HWVERS,	16 },
+ 	{ "LinkInfo",	MUSB_LINKINFO,	8 },
+ 	{ "VPLen",	MUSB_VPLEN,	8 },
+ 	{ "HS_EOF1",	MUSB_HS_EOF1,	8 },
+diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
+index a02c29216955a..bc45077811679 100644
+--- a/drivers/usb/musb/musb_host.c
++++ b/drivers/usb/musb/musb_host.c
+@@ -321,10 +321,16 @@ static void musb_advance_schedule(struct musb *musb, struct urb *urb,
+ 	musb_giveback(musb, urb, status);
+ 	qh->is_ready = ready;
+ 
++	/*
++	 * musb->lock had been unlocked in musb_giveback, so qh may
++	 * be freed, need to get it again
++	 */
++	qh = musb_ep_get_qh(hw_ep, is_in);
++
+ 	/* reclaim resources (and bandwidth) ASAP; deschedule it, and
+ 	 * invalidate qh as soon as list_empty(&hep->urb_list)
+ 	 */
+-	if (list_empty(&qh->hep->urb_list)) {
++	if (qh && list_empty(&qh->hep->urb_list)) {
+ 		struct list_head	*head;
+ 		struct dma_controller	*dma = musb->dma_controller;
+ 
+@@ -2398,6 +2404,7 @@ static int musb_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
+ 		 * and its URB list has emptied, recycle this qh.
+ 		 */
+ 		if (ready && list_empty(&qh->hep->urb_list)) {
++			musb_ep_set_qh(qh->hw_ep, is_in, NULL);
+ 			qh->hep->hcpriv = NULL;
+ 			list_del(&qh->ring);
+ 			kfree(qh);
+diff --git a/drivers/usb/typec/altmodes/displayport.c b/drivers/usb/typec/altmodes/displayport.c
+index cdf8261e22dbd..459e5eb1a7890 100644
+--- a/drivers/usb/typec/altmodes/displayport.c
++++ b/drivers/usb/typec/altmodes/displayport.c
+@@ -304,6 +304,11 @@ static int dp_altmode_vdm(struct typec_altmode *alt,
+ 			typec_altmode_update_active(alt, false);
+ 			dp->data.status = 0;
+ 			dp->data.conf = 0;
++			if (dp->hpd) {
++				drm_connector_oob_hotplug_event(dp->connector_fwnode);
++				dp->hpd = false;
++				sysfs_notify(&dp->alt->dev.kobj, "displayport", "hpd");
++			}
+ 			break;
+ 		case DP_CMD_STATUS_UPDATE:
+ 			dp->data.status = *vdo;
+diff --git a/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_pdphy.c b/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_pdphy.c
+index 4e1b846627d20..d29f9506e5f12 100644
+--- a/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_pdphy.c
++++ b/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_pdphy.c
+@@ -383,10 +383,6 @@ static int qcom_pmic_typec_pdphy_enable(struct pmic_typec_pdphy *pmic_typec_pdph
+ 	struct device *dev = pmic_typec_pdphy->dev;
+ 	int ret;
+ 
+-	ret = regulator_enable(pmic_typec_pdphy->vdd_pdphy);
+-	if (ret)
+-		return ret;
+-
+ 	/* PD 2.0, DR=TYPEC_DEVICE, PR=TYPEC_SINK */
+ 	ret = regmap_update_bits(pmic_typec_pdphy->regmap,
+ 				 pmic_typec_pdphy->base + USB_PDPHY_MSG_CONFIG_REG,
+@@ -424,8 +420,6 @@ static int qcom_pmic_typec_pdphy_disable(struct pmic_typec_pdphy *pmic_typec_pdp
+ 	ret = regmap_write(pmic_typec_pdphy->regmap,
+ 			   pmic_typec_pdphy->base + USB_PDPHY_EN_CONTROL_REG, 0);
+ 
+-	regulator_disable(pmic_typec_pdphy->vdd_pdphy);
+-
+ 	return ret;
+ }
+ 
+@@ -449,6 +443,10 @@ int qcom_pmic_typec_pdphy_start(struct pmic_typec_pdphy *pmic_typec_pdphy,
+ 	int i;
+ 	int ret;
+ 
++	ret = regulator_enable(pmic_typec_pdphy->vdd_pdphy);
++	if (ret)
++		return ret;
++
+ 	pmic_typec_pdphy->tcpm_port = tcpm_port;
+ 
+ 	ret = pmic_typec_pdphy_reset(pmic_typec_pdphy);
+@@ -469,6 +467,8 @@ void qcom_pmic_typec_pdphy_stop(struct pmic_typec_pdphy *pmic_typec_pdphy)
+ 		disable_irq(pmic_typec_pdphy->irq_data[i].irq);
+ 
+ 	qcom_pmic_typec_pdphy_reset_on(pmic_typec_pdphy);
++
++	regulator_disable(pmic_typec_pdphy->vdd_pdphy);
+ }
+ 
+ struct pmic_typec_pdphy *qcom_pmic_typec_pdphy_alloc(struct device *dev)
+diff --git a/drivers/usb/typec/ucsi/psy.c b/drivers/usb/typec/ucsi/psy.c
+index 384b42267f1fc..b35c6e07911e9 100644
+--- a/drivers/usb/typec/ucsi/psy.c
++++ b/drivers/usb/typec/ucsi/psy.c
+@@ -37,6 +37,15 @@ static int ucsi_psy_get_scope(struct ucsi_connector *con,
+ 	struct device *dev = con->ucsi->dev;
+ 
+ 	device_property_read_u8(dev, "scope", &scope);
++	if (scope == POWER_SUPPLY_SCOPE_UNKNOWN) {
++		u32 mask = UCSI_CAP_ATTR_POWER_AC_SUPPLY |
++			   UCSI_CAP_ATTR_BATTERY_CHARGING;
++
++		if (con->ucsi->cap.attributes & mask)
++			scope = POWER_SUPPLY_SCOPE_SYSTEM;
++		else
++			scope = POWER_SUPPLY_SCOPE_DEVICE;
++	}
+ 	val->intval = scope;
+ 	return 0;
+ }
+diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c
+index f6901319639d9..30675948d8d93 100644
+--- a/drivers/usb/typec/ucsi/ucsi.c
++++ b/drivers/usb/typec/ucsi/ucsi.c
+@@ -787,6 +787,7 @@ static void ucsi_unregister_partner(struct ucsi_connector *con)
+ 
+ 	typec_set_mode(con->port, TYPEC_STATE_SAFE);
+ 
++	typec_partner_set_usb_power_delivery(con->partner, NULL);
+ 	ucsi_unregister_partner_pdos(con);
+ 	ucsi_unregister_altmodes(con, UCSI_RECIPIENT_SOP);
+ 	typec_unregister_partner(con->partner);
+@@ -884,6 +885,7 @@ static void ucsi_handle_connector_change(struct work_struct *work)
+ 	if (ret < 0) {
+ 		dev_err(ucsi->dev, "%s: GET_CONNECTOR_STATUS failed (%d)\n",
+ 			__func__, ret);
++		clear_bit(EVENT_PENDING, &con->ucsi->flags);
+ 		goto out_unlock;
+ 	}
+ 
+diff --git a/fs/ceph/file.c b/fs/ceph/file.c
+index 63efe5389783c..aeac54eba2994 100644
+--- a/fs/ceph/file.c
++++ b/fs/ceph/file.c
+@@ -2559,7 +2559,7 @@ static ssize_t __ceph_copy_file_range(struct file *src_file, loff_t src_off,
+ 		ret = do_splice_direct(src_file, &src_off, dst_file,
+ 				       &dst_off, src_objlen, flags);
+ 		/* Abort on short copies or on error */
+-		if (ret < src_objlen) {
++		if (ret < (long)src_objlen) {
+ 			dout("Failed partial copy (%zd)\n", ret);
+ 			goto out;
+ 		}
+diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
+index 8e5f41d452839..29fa35347b88c 100644
+--- a/fs/ceph/inode.c
++++ b/fs/ceph/inode.c
+@@ -655,9 +655,7 @@ int ceph_fill_file_size(struct inode *inode, int issued,
+ 			ci->i_truncate_seq = truncate_seq;
+ 
+ 			/* the MDS should have revoked these caps */
+-			WARN_ON_ONCE(issued & (CEPH_CAP_FILE_EXCL |
+-					       CEPH_CAP_FILE_RD |
+-					       CEPH_CAP_FILE_WR |
++			WARN_ON_ONCE(issued & (CEPH_CAP_FILE_RD |
+ 					       CEPH_CAP_FILE_LAZYIO));
+ 			/*
+ 			 * If we hold relevant caps, or in the case where we're
+diff --git a/fs/file.c b/fs/file.c
+index 3fd003a8604f8..568a98178007c 100644
+--- a/fs/file.c
++++ b/fs/file.c
+@@ -668,7 +668,7 @@ EXPORT_SYMBOL(close_fd); /* for ksys_close() */
+ 
+ /**
+  * last_fd - return last valid index into fd table
+- * @cur_fds: files struct
++ * @fdt: File descriptor table.
+  *
+  * Context: Either rcu read lock or files_lock must be held.
+  *
+@@ -723,6 +723,7 @@ static inline void __range_close(struct files_struct *cur_fds, unsigned int fd,
+  *
+  * @fd:     starting file descriptor to close
+  * @max_fd: last file descriptor to close
++ * @flags:  CLOSE_RANGE flags.
+  *
+  * This closes a range of file descriptors. All file descriptors
+  * from @fd up to and including @max_fd are closed.
+diff --git a/fs/fs_context.c b/fs/fs_context.c
+index 375023e40161d..896e89acac5c2 100644
+--- a/fs/fs_context.c
++++ b/fs/fs_context.c
+@@ -162,6 +162,10 @@ EXPORT_SYMBOL(vfs_parse_fs_param);
+ 
+ /**
+  * vfs_parse_fs_string - Convenience function to just parse a string.
++ * @fc: Filesystem context.
++ * @key: Parameter name.
++ * @value: Default value.
++ * @v_size: Maximum number of bytes in the value.
+  */
+ int vfs_parse_fs_string(struct fs_context *fc, const char *key,
+ 			const char *value, size_t v_size)
+@@ -188,17 +192,19 @@ int vfs_parse_fs_string(struct fs_context *fc, const char *key,
+ EXPORT_SYMBOL(vfs_parse_fs_string);
+ 
+ /**
+- * generic_parse_monolithic - Parse key[=val][,key[=val]]* mount data
+- * @ctx: The superblock configuration to fill in.
++ * vfs_parse_monolithic_sep - Parse key[=val][,key[=val]]* mount data
++ * @fc: The superblock configuration to fill in.
+  * @data: The data to parse
++ * @sep: callback for separating next option
+  *
+- * Parse a blob of data that's in key[=val][,key[=val]]* form.  This can be
+- * called from the ->monolithic_mount_data() fs_context operation.
++ * Parse a blob of data that's in key[=val][,key[=val]]* form with a custom
++ * option separator callback.
+  *
+  * Returns 0 on success or the error returned by the ->parse_option() fs_context
+  * operation on failure.
+  */
+-int generic_parse_monolithic(struct fs_context *fc, void *data)
++int vfs_parse_monolithic_sep(struct fs_context *fc, void *data,
++			     char *(*sep)(char **))
+ {
+ 	char *options = data, *key;
+ 	int ret = 0;
+@@ -210,7 +216,7 @@ int generic_parse_monolithic(struct fs_context *fc, void *data)
+ 	if (ret)
+ 		return ret;
+ 
+-	while ((key = strsep(&options, ",")) != NULL) {
++	while ((key = sep(&options)) != NULL) {
+ 		if (*key) {
+ 			size_t v_len = 0;
+ 			char *value = strchr(key, '=');
+@@ -229,6 +235,28 @@ int generic_parse_monolithic(struct fs_context *fc, void *data)
+ 
+ 	return ret;
+ }
++EXPORT_SYMBOL(vfs_parse_monolithic_sep);
++
++static char *vfs_parse_comma_sep(char **s)
++{
++	return strsep(s, ",");
++}
++
++/**
++ * generic_parse_monolithic - Parse key[=val][,key[=val]]* mount data
++ * @fc: The superblock configuration to fill in.
++ * @data: The data to parse
++ *
++ * Parse a blob of data that's in key[=val][,key[=val]]* form.  This can be
++ * called from the ->monolithic_mount_data() fs_context operation.
++ *
++ * Returns 0 on success or the error returned by the ->parse_option() fs_context
++ * operation on failure.
++ */
++int generic_parse_monolithic(struct fs_context *fc, void *data)
++{
++	return vfs_parse_monolithic_sep(fc, data, vfs_parse_comma_sep);
++}
+ EXPORT_SYMBOL(generic_parse_monolithic);
+ 
+ /**
+@@ -354,7 +382,7 @@ void fc_drop_locked(struct fs_context *fc)
+ static void legacy_fs_context_free(struct fs_context *fc);
+ 
+ /**
+- * vfs_dup_fc_config: Duplicate a filesystem context.
++ * vfs_dup_fs_context - Duplicate a filesystem context.
+  * @src_fc: The context to copy.
+  */
+ struct fs_context *vfs_dup_fs_context(struct fs_context *src_fc)
+@@ -400,7 +428,9 @@ EXPORT_SYMBOL(vfs_dup_fs_context);
+ 
+ /**
+  * logfc - Log a message to a filesystem context
+- * @fc: The filesystem context to log to.
++ * @log: The filesystem context to log to, or NULL to use printk.
++ * @prefix: A string to prefix the output with, or NULL.
++ * @level: 'w' for a warning, 'e' for an error.  Anything else is a notice.
+  * @fmt: The format of the buffer.
+  */
+ void logfc(struct fc_log *log, const char *prefix, char level, const char *fmt, ...)
+diff --git a/fs/ioctl.c b/fs/ioctl.c
+index 5b2481cd47501..d413e0b8f6c29 100644
+--- a/fs/ioctl.c
++++ b/fs/ioctl.c
+@@ -109,9 +109,6 @@ static int ioctl_fibmap(struct file *filp, int __user *p)
+  * Returns 0 on success, -errno on error, 1 if this was the last
+  * extent that will fit in user array.
+  */
+-#define SET_UNKNOWN_FLAGS	(FIEMAP_EXTENT_DELALLOC)
+-#define SET_NO_UNMOUNTED_IO_FLAGS	(FIEMAP_EXTENT_DATA_ENCRYPTED)
+-#define SET_NOT_ALIGNED_FLAGS	(FIEMAP_EXTENT_DATA_TAIL|FIEMAP_EXTENT_DATA_INLINE)
+ int fiemap_fill_next_extent(struct fiemap_extent_info *fieinfo, u64 logical,
+ 			    u64 phys, u64 len, u32 flags)
+ {
+@@ -127,6 +124,10 @@ int fiemap_fill_next_extent(struct fiemap_extent_info *fieinfo, u64 logical,
+ 	if (fieinfo->fi_extents_mapped >= fieinfo->fi_extents_max)
+ 		return 1;
+ 
++#define SET_UNKNOWN_FLAGS	(FIEMAP_EXTENT_DELALLOC)
++#define SET_NO_UNMOUNTED_IO_FLAGS	(FIEMAP_EXTENT_DATA_ENCRYPTED)
++#define SET_NOT_ALIGNED_FLAGS	(FIEMAP_EXTENT_DATA_TAIL|FIEMAP_EXTENT_DATA_INLINE)
++
+ 	if (flags & SET_UNKNOWN_FLAGS)
+ 		flags |= FIEMAP_EXTENT_UNKNOWN;
+ 	if (flags & SET_NO_UNMOUNTED_IO_FLAGS)
+@@ -877,6 +878,9 @@ out:
+ #ifdef CONFIG_COMPAT
+ /**
+  * compat_ptr_ioctl - generic implementation of .compat_ioctl file operation
++ * @file: The file to operate on.
++ * @cmd: The ioctl command number.
++ * @arg: The argument to the ioctl.
+  *
+  * This is not normally called as a function, but instead set in struct
+  * file_operations as
+diff --git a/fs/kernel_read_file.c b/fs/kernel_read_file.c
+index 5d826274570ca..c429c42a68679 100644
+--- a/fs/kernel_read_file.c
++++ b/fs/kernel_read_file.c
+@@ -8,16 +8,16 @@
+ /**
+  * kernel_read_file() - read file contents into a kernel buffer
+  *
+- * @file	file to read from
+- * @offset	where to start reading from (see below).
+- * @buf		pointer to a "void *" buffer for reading into (if
++ * @file:	file to read from
++ * @offset:	where to start reading from (see below).
++ * @buf:	pointer to a "void *" buffer for reading into (if
+  *		*@buf is NULL, a buffer will be allocated, and
+  *		@buf_size will be ignored)
+- * @buf_size	size of buf, if already allocated. If @buf not
++ * @buf_size:	size of buf, if already allocated. If @buf not
+  *		allocated, this is the largest size to allocate.
+- * @file_size	if non-NULL, the full size of @file will be
++ * @file_size:	if non-NULL, the full size of @file will be
+  *		written here.
+- * @id		the kernel_read_file_id identifying the type of
++ * @id:		the kernel_read_file_id identifying the type of
+  *		file contents being read (for LSMs to examine)
+  *
+  * @offset must be 0 unless both @buf and @file_size are non-NULL
+diff --git a/fs/namei.c b/fs/namei.c
+index 2bae29ea52ffa..567ee547492bc 100644
+--- a/fs/namei.c
++++ b/fs/namei.c
+@@ -643,6 +643,8 @@ static bool nd_alloc_stack(struct nameidata *nd)
+ 
+ /**
+  * path_connected - Verify that a dentry is below mnt.mnt_root
++ * @mnt: The mountpoint to check.
++ * @dentry: The dentry to check.
+  *
+  * Rename can sometimes move a file or directory outside of a bind
+  * mount, path_connected allows those cases to be detected.
+@@ -1083,6 +1085,7 @@ fs_initcall(init_fs_namei_sysctls);
+ /**
+  * may_follow_link - Check symlink following for unsafe situations
+  * @nd: nameidata pathwalk data
++ * @inode: Used for idmapping.
+  *
+  * In the case of the sysctl_protected_symlinks sysctl being enabled,
+  * CAP_DAC_OVERRIDE needs to be specifically ignored if the symlink is
+diff --git a/fs/open.c b/fs/open.c
+index e6ead0f199649..7c9647a8f219d 100644
+--- a/fs/open.c
++++ b/fs/open.c
+@@ -1150,7 +1150,7 @@ EXPORT_SYMBOL_GPL(kernel_file_open);
+  * backing_file_open - open a backing file for kernel internal use
+  * @path:	path of the file to open
+  * @flags:	open flags
+- * @path:	path of the backing file
++ * @real_path:	path of the backing file
+  * @cred:	credentials for open
+  *
+  * Open a backing file for a stackable filesystem (e.g., overlayfs).
+@@ -1546,7 +1546,7 @@ SYSCALL_DEFINE1(close, unsigned int, fd)
+ }
+ 
+ /**
+- * close_range() - Close all file descriptors in a given range.
++ * sys_close_range() - Close all file descriptors in a given range.
+  *
+  * @fd:     starting file descriptor to close
+  * @max_fd: last file descriptor to close
+diff --git a/fs/overlayfs/ovl_entry.h b/fs/overlayfs/ovl_entry.h
+index 306e1ecdc96d3..2b703521871ea 100644
+--- a/fs/overlayfs/ovl_entry.h
++++ b/fs/overlayfs/ovl_entry.h
+@@ -8,6 +8,7 @@
+ struct ovl_config {
+ 	char *upperdir;
+ 	char *workdir;
++	char **lowerdirs;
+ 	bool default_permissions;
+ 	int redirect_mode;
+ 	bool index;
+@@ -38,17 +39,8 @@ struct ovl_layer {
+ 	int idx;
+ 	/* One fsid per unique underlying sb (upper fsid == 0) */
+ 	int fsid;
+-	char *name;
+ };
+ 
+-/*
+- * ovl_free_fs() relies on @mnt being the first member when unmounting
+- * the private mounts created for each layer. Let's check both the
+- * offset and type.
+- */
+-static_assert(offsetof(struct ovl_layer, mnt) == 0);
+-static_assert(__same_type(typeof_member(struct ovl_layer, mnt), struct vfsmount *));
+-
+ struct ovl_path {
+ 	const struct ovl_layer *layer;
+ 	struct dentry *dentry;
+diff --git a/fs/overlayfs/params.c b/fs/overlayfs/params.c
+index a63160dbb0f95..644badb13fe01 100644
+--- a/fs/overlayfs/params.c
++++ b/fs/overlayfs/params.c
+@@ -120,6 +120,34 @@ const struct fs_parameter_spec ovl_parameter_spec[] = {
+ 	{}
+ };
+ 
++static char *ovl_next_opt(char **s)
++{
++	char *sbegin = *s;
++	char *p;
++
++	if (sbegin == NULL)
++		return NULL;
++
++	for (p = sbegin; *p; p++) {
++		if (*p == '\\') {
++			p++;
++			if (!*p)
++				break;
++		} else if (*p == ',') {
++			*p = '\0';
++			*s = p + 1;
++			return sbegin;
++		}
++	}
++	*s = NULL;
++	return sbegin;
++}
++
++static int ovl_parse_monolithic(struct fs_context *fc, void *data)
++{
++	return vfs_parse_monolithic_sep(fc, data, ovl_next_opt);
++}
++
+ static ssize_t ovl_parse_param_split_lowerdirs(char *str)
+ {
+ 	ssize_t nr_layers = 1, nr_colons = 0;
+@@ -127,7 +155,8 @@ static ssize_t ovl_parse_param_split_lowerdirs(char *str)
+ 
+ 	for (s = d = str;; s++, d++) {
+ 		if (*s == '\\') {
+-			s++;
++			/* keep esc chars in split lowerdir */
++			*d++ = *s++;
+ 		} else if (*s == ':') {
+ 			bool next_colon = (*(s + 1) == ':');
+ 
+@@ -202,7 +231,7 @@ static void ovl_unescape(char *s)
+ 	}
+ }
+ 
+-static int ovl_mount_dir(const char *name, struct path *path)
++static int ovl_mount_dir(const char *name, struct path *path, bool upper)
+ {
+ 	int err = -ENOMEM;
+ 	char *tmp = kstrdup(name, GFP_KERNEL);
+@@ -211,7 +240,7 @@ static int ovl_mount_dir(const char *name, struct path *path)
+ 		ovl_unescape(tmp);
+ 		err = ovl_mount_dir_noesc(tmp, path);
+ 
+-		if (!err && path->dentry->d_flags & DCACHE_OP_REAL) {
++		if (!err && upper && path->dentry->d_flags & DCACHE_OP_REAL) {
+ 			pr_err("filesystem on '%s' not supported as upperdir\n",
+ 			       tmp);
+ 			path_put_init(path);
+@@ -232,7 +261,7 @@ static int ovl_parse_param_upperdir(const char *name, struct fs_context *fc,
+ 	struct path path;
+ 	char *dup;
+ 
+-	err = ovl_mount_dir(name, &path);
++	err = ovl_mount_dir(name, &path, true);
+ 	if (err)
+ 		return err;
+ 
+@@ -284,12 +313,6 @@ static void ovl_parse_param_drop_lowerdir(struct ovl_fs_context *ctx)
+  *     Set "/lower1", "/lower2", and "/lower3" as lower layers and
+  *     "/data1" and "/data2" as data lower layers. Any existing lower
+  *     layers are replaced.
+- * (2) lowerdir=:/lower4
+- *     Append "/lower4" to current stack of lower layers. This requires
+- *     that there already is at least one lower layer configured.
+- * (3) lowerdir=::/lower5
+- *     Append data "/lower5" as data lower layer. This requires that
+- *     there's at least one regular lower layer present.
+  */
+ static int ovl_parse_param_lowerdir(const char *name, struct fs_context *fc)
+ {
+@@ -311,49 +334,9 @@ static int ovl_parse_param_lowerdir(const char *name, struct fs_context *fc)
+ 		return 0;
+ 	}
+ 
+-	if (strncmp(name, "::", 2) == 0) {
+-		/*
+-		 * This is a data layer.
+-		 * There must be at least one regular lower layer
+-		 * specified.
+-		 */
+-		if (ctx->nr == 0) {
+-			pr_err("data lower layers without regular lower layers not allowed");
+-			return -EINVAL;
+-		}
+-
+-		/* Skip the leading "::". */
+-		name += 2;
+-		data_layer = true;
+-		/*
+-		 * A data layer is automatically an append as there
+-		 * must've been at least one regular lower layer.
+-		 */
+-		append = true;
+-	} else if (*name == ':') {
+-		/*
+-		 * This is a regular lower layer.
+-		 * If users want to append a layer enforce that they
+-		 * have already specified a first layer before. It's
+-		 * better to be strict.
+-		 */
+-		if (ctx->nr == 0) {
+-			pr_err("cannot append layer if no previous layer has been specified");
+-			return -EINVAL;
+-		}
+-
+-		/*
+-		 * Once a sequence of data layers has started regular
+-		 * lower layers are forbidden.
+-		 */
+-		if (ctx->nr_data > 0) {
+-			pr_err("regular lower layers cannot follow data lower layers");
+-			return -EINVAL;
+-		}
+-
+-		/* Skip the leading ":". */
+-		name++;
+-		append = true;
++	if (*name == ':') {
++		pr_err("cannot append lower layer");
++		return -EINVAL;
+ 	}
+ 
+ 	dup = kstrdup(name, GFP_KERNEL);
+@@ -435,7 +418,7 @@ static int ovl_parse_param_lowerdir(const char *name, struct fs_context *fc)
+ 		l = &ctx->lower[nr];
+ 		memset(l, 0, sizeof(*l));
+ 
+-		err = ovl_mount_dir_noesc(dup_iter, &l->path);
++		err = ovl_mount_dir(dup_iter, &l->path, false);
+ 		if (err)
+ 			goto out_put;
+ 
+@@ -642,6 +625,7 @@ static int ovl_reconfigure(struct fs_context *fc)
+ }
+ 
+ static const struct fs_context_operations ovl_context_ops = {
++	.parse_monolithic = ovl_parse_monolithic,
+ 	.parse_param = ovl_parse_param,
+ 	.get_tree    = ovl_get_tree,
+ 	.reconfigure = ovl_reconfigure,
+@@ -712,12 +696,12 @@ void ovl_free_fs(struct ovl_fs *ofs)
+ 	if (ofs->upperdir_locked)
+ 		ovl_inuse_unlock(ovl_upper_mnt(ofs)->mnt_root);
+ 
+-	/* Hack!  Reuse ofs->layers as a vfsmount array before freeing it */
+-	mounts = (struct vfsmount **) ofs->layers;
++	/* Reuse ofs->config.lowerdirs as a vfsmount array before freeing it */
++	mounts = (struct vfsmount **) ofs->config.lowerdirs;
+ 	for (i = 0; i < ofs->numlayer; i++) {
+ 		iput(ofs->layers[i].trap);
++		kfree(ofs->config.lowerdirs[i]);
+ 		mounts[i] = ofs->layers[i].mnt;
+-		kfree(ofs->layers[i].name);
+ 	}
+ 	kern_unmount_array(mounts, ofs->numlayer);
+ 	kfree(ofs->layers);
+@@ -725,6 +709,7 @@ void ovl_free_fs(struct ovl_fs *ofs)
+ 		free_anon_bdev(ofs->fs[i].pseudo_dev);
+ 	kfree(ofs->fs);
+ 
++	kfree(ofs->config.lowerdirs);
+ 	kfree(ofs->config.upperdir);
+ 	kfree(ofs->config.workdir);
+ 	if (ofs->creator_cred)
+@@ -874,16 +859,23 @@ int ovl_show_options(struct seq_file *m, struct dentry *dentry)
+ 	struct super_block *sb = dentry->d_sb;
+ 	struct ovl_fs *ofs = sb->s_fs_info;
+ 	size_t nr, nr_merged_lower = ofs->numlayer - ofs->numdatalayer;
+-	const struct ovl_layer *data_layers = &ofs->layers[nr_merged_lower];
+-
+-	/* ofs->layers[0] is the upper layer */
+-	seq_printf(m, ",lowerdir=%s", ofs->layers[1].name);
+-	/* dump regular lower layers */
+-	for (nr = 2; nr < nr_merged_lower; nr++)
+-		seq_printf(m, ":%s", ofs->layers[nr].name);
+-	/* dump data lower layers */
+-	for (nr = 0; nr < ofs->numdatalayer; nr++)
+-		seq_printf(m, "::%s", data_layers[nr].name);
++
++	/*
++	 * lowerdirs[] starts from offset 1, then
++	 * >= 0 regular lower layers prefixed with : and
++	 * >= 0 data-only lower layers prefixed with ::
++	 *
++	 * we need to escase comma and space like seq_show_option() does and
++	 * we also need to escape the colon separator from lowerdir paths.
++	 */
++	seq_puts(m, ",lowerdir=");
++	for (nr = 1; nr < ofs->numlayer; nr++) {
++		if (nr > 1)
++			seq_putc(m, ':');
++		if (nr >= nr_merged_lower)
++			seq_putc(m, ':');
++		seq_escape(m, ofs->config.lowerdirs[nr], ":, \t\n\\");
++	}
+ 	if (ofs->config.upperdir) {
+ 		seq_show_option(m, "upperdir", ofs->config.upperdir);
+ 		seq_show_option(m, "workdir", ofs->config.workdir);
+diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
+index 1090c68e5b051..80a70eaa30d90 100644
+--- a/fs/overlayfs/super.c
++++ b/fs/overlayfs/super.c
+@@ -565,11 +565,6 @@ static int ovl_get_upper(struct super_block *sb, struct ovl_fs *ofs,
+ 	upper_layer->idx = 0;
+ 	upper_layer->fsid = 0;
+ 
+-	err = -ENOMEM;
+-	upper_layer->name = kstrdup(ofs->config.upperdir, GFP_KERNEL);
+-	if (!upper_layer->name)
+-		goto out;
+-
+ 	/*
+ 	 * Inherit SB_NOSEC flag from upperdir.
+ 	 *
+@@ -1113,7 +1108,8 @@ static int ovl_get_layers(struct super_block *sb, struct ovl_fs *ofs,
+ 		layers[ofs->numlayer].idx = ofs->numlayer;
+ 		layers[ofs->numlayer].fsid = fsid;
+ 		layers[ofs->numlayer].fs = &ofs->fs[fsid];
+-		layers[ofs->numlayer].name = l->name;
++		/* Store for printing lowerdir=... in ovl_show_options() */
++		ofs->config.lowerdirs[ofs->numlayer] = l->name;
+ 		l->name = NULL;
+ 		ofs->numlayer++;
+ 		ofs->fs[fsid].is_lower = true;
+@@ -1358,8 +1354,16 @@ int ovl_fill_super(struct super_block *sb, struct fs_context *fc)
+ 	if (!layers)
+ 		goto out_err;
+ 
++	ofs->config.lowerdirs = kcalloc(ctx->nr + 1, sizeof(char *), GFP_KERNEL);
++	if (!ofs->config.lowerdirs) {
++		kfree(layers);
++		goto out_err;
++	}
+ 	ofs->layers = layers;
+-	/* Layer 0 is reserved for upper even if there's no upper */
++	/*
++	 * Layer 0 is reserved for upper even if there's no upper.
++	 * For consistency, config.lowerdirs[0] is NULL.
++	 */
+ 	ofs->numlayer = 1;
+ 
+ 	sb->s_stack_depth = 0;
+diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
+index c7afe433d991a..bfa423ae16e3d 100644
+--- a/fs/quota/dquot.c
++++ b/fs/quota/dquot.c
+@@ -233,19 +233,18 @@ static void put_quota_format(struct quota_format_type *fmt)
+  * All dquots are placed to the end of inuse_list when first created, and this
+  * list is used for invalidate operation, which must look at every dquot.
+  *
+- * When the last reference of a dquot will be dropped, the dquot will be
+- * added to releasing_dquots. We'd then queue work item which would call
++ * When the last reference of a dquot is dropped, the dquot is added to
++ * releasing_dquots. We'll then queue work item which will call
+  * synchronize_srcu() and after that perform the final cleanup of all the
+- * dquots on the list. Both releasing_dquots and free_dquots use the
+- * dq_free list_head in the dquot struct. When a dquot is removed from
+- * releasing_dquots, a reference count is always subtracted, and if
+- * dq_count == 0 at that point, the dquot will be added to the free_dquots.
++ * dquots on the list. Each cleaned up dquot is moved to free_dquots list.
++ * Both releasing_dquots and free_dquots use the dq_free list_head in the dquot
++ * struct.
+  *
+- * Unused dquots (dq_count == 0) are added to the free_dquots list when freed,
+- * and this list is searched whenever we need an available dquot.  Dquots are
+- * removed from the list as soon as they are used again, and
+- * dqstats.free_dquots gives the number of dquots on the list. When
+- * dquot is invalidated it's completely released from memory.
++ * Unused and cleaned up dquots are in the free_dquots list and this list is
++ * searched whenever we need an available dquot. Dquots are removed from the
++ * list as soon as they are used again and dqstats.free_dquots gives the number
++ * of dquots on the list. When dquot is invalidated it's completely released
++ * from memory.
+  *
+  * Dirty dquots are added to the dqi_dirty_list of quota_info when mark
+  * dirtied, and this list is searched when writing dirty dquots back to
+@@ -321,6 +320,7 @@ static inline void put_dquot_last(struct dquot *dquot)
+ static inline void put_releasing_dquots(struct dquot *dquot)
+ {
+ 	list_add_tail(&dquot->dq_free, &releasing_dquots);
++	set_bit(DQ_RELEASING_B, &dquot->dq_flags);
+ }
+ 
+ static inline void remove_free_dquot(struct dquot *dquot)
+@@ -328,8 +328,10 @@ static inline void remove_free_dquot(struct dquot *dquot)
+ 	if (list_empty(&dquot->dq_free))
+ 		return;
+ 	list_del_init(&dquot->dq_free);
+-	if (!atomic_read(&dquot->dq_count))
++	if (!test_bit(DQ_RELEASING_B, &dquot->dq_flags))
+ 		dqstats_dec(DQST_FREE_DQUOTS);
++	else
++		clear_bit(DQ_RELEASING_B, &dquot->dq_flags);
+ }
+ 
+ static inline void put_inuse(struct dquot *dquot)
+@@ -581,12 +583,6 @@ restart:
+ 			continue;
+ 		/* Wait for dquot users */
+ 		if (atomic_read(&dquot->dq_count)) {
+-			/* dquot in releasing_dquots, flush and retry */
+-			if (!list_empty(&dquot->dq_free)) {
+-				spin_unlock(&dq_list_lock);
+-				goto restart;
+-			}
+-
+ 			atomic_inc(&dquot->dq_count);
+ 			spin_unlock(&dq_list_lock);
+ 			/*
+@@ -605,6 +601,15 @@ restart:
+ 			 * restart. */
+ 			goto restart;
+ 		}
++		/*
++		 * The last user already dropped its reference but dquot didn't
++		 * get fully cleaned up yet. Restart the scan which flushes the
++		 * work cleaning up released dquots.
++		 */
++		if (test_bit(DQ_RELEASING_B, &dquot->dq_flags)) {
++			spin_unlock(&dq_list_lock);
++			goto restart;
++		}
+ 		/*
+ 		 * Quota now has no users and it has been written on last
+ 		 * dqput()
+@@ -696,6 +701,13 @@ int dquot_writeback_dquots(struct super_block *sb, int type)
+ 						 dq_dirty);
+ 
+ 			WARN_ON(!dquot_active(dquot));
++			/* If the dquot is releasing we should not touch it */
++			if (test_bit(DQ_RELEASING_B, &dquot->dq_flags)) {
++				spin_unlock(&dq_list_lock);
++				flush_delayed_work(&quota_release_work);
++				spin_lock(&dq_list_lock);
++				continue;
++			}
+ 
+ 			/* Now we have active dquot from which someone is
+  			 * holding reference so we can safely just increase
+@@ -809,18 +821,18 @@ static void quota_release_workfn(struct work_struct *work)
+ 	/* Exchange the list head to avoid livelock. */
+ 	list_replace_init(&releasing_dquots, &rls_head);
+ 	spin_unlock(&dq_list_lock);
++	synchronize_srcu(&dquot_srcu);
+ 
+ restart:
+-	synchronize_srcu(&dquot_srcu);
+ 	spin_lock(&dq_list_lock);
+ 	while (!list_empty(&rls_head)) {
+ 		dquot = list_first_entry(&rls_head, struct dquot, dq_free);
+-		/* Dquot got used again? */
+-		if (atomic_read(&dquot->dq_count) > 1) {
+-			remove_free_dquot(dquot);
+-			atomic_dec(&dquot->dq_count);
+-			continue;
+-		}
++		WARN_ON_ONCE(atomic_read(&dquot->dq_count));
++		/*
++		 * Note that DQ_RELEASING_B protects us from racing with
++		 * invalidate_dquots() calls so we are safe to work with the
++		 * dquot even after we drop dq_list_lock.
++		 */
+ 		if (dquot_dirty(dquot)) {
+ 			spin_unlock(&dq_list_lock);
+ 			/* Commit dquot before releasing */
+@@ -834,7 +846,6 @@ restart:
+ 		}
+ 		/* Dquot is inactive and clean, now move it to free list */
+ 		remove_free_dquot(dquot);
+-		atomic_dec(&dquot->dq_count);
+ 		put_dquot_last(dquot);
+ 	}
+ 	spin_unlock(&dq_list_lock);
+@@ -875,6 +886,7 @@ void dqput(struct dquot *dquot)
+ 	BUG_ON(!list_empty(&dquot->dq_free));
+ #endif
+ 	put_releasing_dquots(dquot);
++	atomic_dec(&dquot->dq_count);
+ 	spin_unlock(&dq_list_lock);
+ 	queue_delayed_work(system_unbound_wq, &quota_release_work, 1);
+ }
+@@ -963,7 +975,7 @@ we_slept:
+ 		dqstats_inc(DQST_LOOKUPS);
+ 	}
+ 	/* Wait for dq_lock - after this we know that either dquot_release() is
+-	 * already finished or it will be canceled due to dq_count > 1 test */
++	 * already finished or it will be canceled due to dq_count > 0 test */
+ 	wait_on_dquot(dquot);
+ 	/* Read the dquot / allocate space in quota file */
+ 	if (!dquot_active(dquot)) {
+diff --git a/fs/smb/server/vfs_cache.c b/fs/smb/server/vfs_cache.c
+index f41f8d6108ce9..85bd6213e98c7 100644
+--- a/fs/smb/server/vfs_cache.c
++++ b/fs/smb/server/vfs_cache.c
+@@ -106,7 +106,7 @@ int ksmbd_query_inode_status(struct inode *inode)
+ 	ci = __ksmbd_inode_lookup(inode);
+ 	if (ci) {
+ 		ret = KSMBD_INODE_STATUS_OK;
+-		if (ci->m_flags & S_DEL_PENDING)
++		if (ci->m_flags & (S_DEL_PENDING | S_DEL_ON_CLS))
+ 			ret = KSMBD_INODE_STATUS_PENDING_DELETE;
+ 		atomic_dec(&ci->m_count);
+ 	}
+@@ -116,7 +116,7 @@ int ksmbd_query_inode_status(struct inode *inode)
+ 
+ bool ksmbd_inode_pending_delete(struct ksmbd_file *fp)
+ {
+-	return (fp->f_ci->m_flags & S_DEL_PENDING);
++	return (fp->f_ci->m_flags & (S_DEL_PENDING | S_DEL_ON_CLS));
+ }
+ 
+ void ksmbd_set_inode_pending_delete(struct ksmbd_file *fp)
+diff --git a/include/linux/acpi.h b/include/linux/acpi.h
+index 641dc48439873..42c22a6ff48d0 100644
+--- a/include/linux/acpi.h
++++ b/include/linux/acpi.h
+@@ -1476,6 +1476,15 @@ static inline int lpit_read_residency_count_address(u64 *address)
+ }
+ #endif
+ 
++#ifdef CONFIG_ACPI_PROCESSOR_IDLE
++#ifndef arch_get_idle_state_flags
++static inline unsigned int arch_get_idle_state_flags(u32 arch_flags)
++{
++	return 0;
++}
++#endif
++#endif /* CONFIG_ACPI_PROCESSOR_IDLE */
++
+ #ifdef CONFIG_ACPI_PPTT
+ int acpi_pptt_cpu_is_thread(unsigned int cpu);
+ int find_acpi_cpu_topology(unsigned int cpu, int level);
+diff --git a/include/linux/dma-fence.h b/include/linux/dma-fence.h
+index 0d678e9a7b248..ebe78bd3d121d 100644
+--- a/include/linux/dma-fence.h
++++ b/include/linux/dma-fence.h
+@@ -568,6 +568,25 @@ static inline void dma_fence_set_error(struct dma_fence *fence,
+ 	fence->error = error;
+ }
+ 
++/**
++ * dma_fence_timestamp - helper to get the completion timestamp of a fence
++ * @fence: fence to get the timestamp from.
++ *
++ * After a fence is signaled the timestamp is updated with the signaling time,
++ * but setting the timestamp can race with tasks waiting for the signaling. This
++ * helper busy waits for the correct timestamp to appear.
++ */
++static inline ktime_t dma_fence_timestamp(struct dma_fence *fence)
++{
++	if (WARN_ON(!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)))
++		return ktime_get();
++
++	while (!test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags))
++		cpu_relax();
++
++	return fence->timestamp;
++}
++
+ signed long dma_fence_wait_timeout(struct dma_fence *,
+ 				   bool intr, signed long timeout);
+ signed long dma_fence_wait_any_timeout(struct dma_fence **fences,
+diff --git a/include/linux/fs_context.h b/include/linux/fs_context.h
+index ff6341e09925b..ae556dc8e18fe 100644
+--- a/include/linux/fs_context.h
++++ b/include/linux/fs_context.h
+@@ -135,6 +135,8 @@ extern struct fs_context *vfs_dup_fs_context(struct fs_context *fc);
+ extern int vfs_parse_fs_param(struct fs_context *fc, struct fs_parameter *param);
+ extern int vfs_parse_fs_string(struct fs_context *fc, const char *key,
+ 			       const char *value, size_t v_size);
++int vfs_parse_monolithic_sep(struct fs_context *fc, void *data,
++			     char *(*sep)(char **));
+ extern int generic_parse_monolithic(struct fs_context *fc, void *data);
+ extern int vfs_get_tree(struct fs_context *fc);
+ extern void put_fs_context(struct fs_context *fc);
+diff --git a/include/linux/libata.h b/include/linux/libata.h
+index 19ddc6c804008..747de50b68585 100644
+--- a/include/linux/libata.h
++++ b/include/linux/libata.h
+@@ -192,6 +192,7 @@ enum {
+ 	ATA_PFLAG_UNLOADING	= (1 << 9), /* driver is being unloaded */
+ 	ATA_PFLAG_UNLOADED	= (1 << 10), /* driver is unloaded */
+ 
++	ATA_PFLAG_RESUMING	= (1 << 16),  /* port is being resumed */
+ 	ATA_PFLAG_SUSPENDED	= (1 << 17), /* port is suspended (power) */
+ 	ATA_PFLAG_PM_PENDING	= (1 << 18), /* PM operation pending */
+ 	ATA_PFLAG_INIT_GTM_VALID = (1 << 19), /* initial gtm data valid */
+@@ -318,9 +319,10 @@ enum {
+ 	ATA_EH_ENABLE_LINK	= (1 << 3),
+ 	ATA_EH_PARK		= (1 << 5), /* unload heads and stop I/O */
+ 	ATA_EH_GET_SUCCESS_SENSE = (1 << 6), /* Get sense data for successful cmd */
++	ATA_EH_SET_ACTIVE	= (1 << 7), /* Set a device to active power mode */
+ 
+ 	ATA_EH_PERDEV_MASK	= ATA_EH_REVALIDATE | ATA_EH_PARK |
+-				  ATA_EH_GET_SUCCESS_SENSE,
++				  ATA_EH_GET_SUCCESS_SENSE | ATA_EH_SET_ACTIVE,
+ 	ATA_EH_ALL_ACTIONS	= ATA_EH_REVALIDATE | ATA_EH_RESET |
+ 				  ATA_EH_ENABLE_LINK,
+ 
+@@ -358,7 +360,7 @@ enum {
+ 	/* This should match the actual table size of
+ 	 * ata_eh_cmd_timeout_table in libata-eh.c.
+ 	 */
+-	ATA_EH_CMD_TIMEOUT_TABLE_SIZE = 7,
++	ATA_EH_CMD_TIMEOUT_TABLE_SIZE = 8,
+ 
+ 	/* Horkage types. May be set by libata or controller on drives
+ 	   (some horkage may be drive/controller pair dependent */
+diff --git a/include/linux/mcb.h b/include/linux/mcb.h
+index 1e5893138afe0..0b971b24a804b 100644
+--- a/include/linux/mcb.h
++++ b/include/linux/mcb.h
+@@ -63,7 +63,6 @@ static inline struct mcb_bus *to_mcb_bus(struct device *dev)
+ struct mcb_device {
+ 	struct device dev;
+ 	struct mcb_bus *bus;
+-	bool is_added;
+ 	struct mcb_driver *driver;
+ 	u16 id;
+ 	int inst;
+diff --git a/include/linux/quota.h b/include/linux/quota.h
+index fd692b4a41d5f..07071e64abf3d 100644
+--- a/include/linux/quota.h
++++ b/include/linux/quota.h
+@@ -285,7 +285,9 @@ static inline void dqstats_dec(unsigned int type)
+ #define DQ_FAKE_B	3	/* no limits only usage */
+ #define DQ_READ_B	4	/* dquot was read into memory */
+ #define DQ_ACTIVE_B	5	/* dquot is active (dquot_release not called) */
+-#define DQ_LASTSET_B	6	/* Following 6 bits (see QIF_) are reserved\
++#define DQ_RELEASING_B	6	/* dquot is in releasing_dquots list waiting
++				 * to be cleaned up */
++#define DQ_LASTSET_B	7	/* Following 6 bits (see QIF_) are reserved\
+ 				 * for the mask of entries set via SETQUOTA\
+ 				 * quotactl. They are set under dq_data_lock\
+ 				 * and the quota format handling dquot can\
+diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h
+index 11a4becff3a98..4fa4ef0a173a3 100644
+--- a/include/linux/quotaops.h
++++ b/include/linux/quotaops.h
+@@ -57,7 +57,7 @@ static inline bool dquot_is_busy(struct dquot *dquot)
+ {
+ 	if (test_bit(DQ_MOD_B, &dquot->dq_flags))
+ 		return true;
+-	if (atomic_read(&dquot->dq_count) > 1)
++	if (atomic_read(&dquot->dq_count) > 0)
+ 		return true;
+ 	return false;
+ }
+diff --git a/include/net/macsec.h b/include/net/macsec.h
+index 441ed8fd4b5f6..41c1884a3e419 100644
+--- a/include/net/macsec.h
++++ b/include/net/macsec.h
+@@ -258,6 +258,7 @@ struct macsec_context {
+ 	struct macsec_secy *secy;
+ 	struct macsec_rx_sc *rx_sc;
+ 	struct {
++		bool update_pn;
+ 		unsigned char assoc_num;
+ 		u8 key[MACSEC_MAX_KEY_LEN];
+ 		union {
+diff --git a/include/uapi/linux/if_packet.h b/include/uapi/linux/if_packet.h
+index 4d0ad22f83b56..9efc42382fdb9 100644
+--- a/include/uapi/linux/if_packet.h
++++ b/include/uapi/linux/if_packet.h
+@@ -18,11 +18,7 @@ struct sockaddr_ll {
+ 	unsigned short	sll_hatype;
+ 	unsigned char	sll_pkttype;
+ 	unsigned char	sll_halen;
+-	union {
+-		unsigned char	sll_addr[8];
+-		/* Actual length is in sll_halen. */
+-		__DECLARE_FLEX_ARRAY(unsigned char, sll_addr_flex);
+-	};
++	unsigned char	sll_addr[8];
+ };
+ 
+ /* Packet types */
+diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
+index 93fd32f2957b7..104681258d24f 100644
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -14255,7 +14255,7 @@ static int check_return_code(struct bpf_verifier_env *env)
+ 	struct tnum enforce_attach_type_range = tnum_unknown;
+ 	const struct bpf_prog *prog = env->prog;
+ 	struct bpf_reg_state *reg;
+-	struct tnum range = tnum_range(0, 1);
++	struct tnum range = tnum_range(0, 1), const_0 = tnum_const(0);
+ 	enum bpf_prog_type prog_type = resolve_prog_type(env->prog);
+ 	int err;
+ 	struct bpf_func_state *frame = env->cur_state->frame[0];
+@@ -14303,8 +14303,8 @@ static int check_return_code(struct bpf_verifier_env *env)
+ 			return -EINVAL;
+ 		}
+ 
+-		if (!tnum_in(tnum_const(0), reg->var_off)) {
+-			verbose_invalid_scalar(env, reg, &range, "async callback", "R0");
++		if (!tnum_in(const_0, reg->var_off)) {
++			verbose_invalid_scalar(env, reg, &const_0, "async callback", "R0");
+ 			return -EINVAL;
+ 		}
+ 		return 0;
+diff --git a/kernel/cgroup/cgroup-v1.c b/kernel/cgroup/cgroup-v1.c
+index 83044312bc413..0a10aba49aa44 100644
+--- a/kernel/cgroup/cgroup-v1.c
++++ b/kernel/cgroup/cgroup-v1.c
+@@ -360,10 +360,9 @@ static int pidlist_array_load(struct cgroup *cgrp, enum cgroup_filetype type,
+ 	}
+ 	css_task_iter_end(&it);
+ 	length = n;
+-	/* now sort & (if procs) strip out duplicates */
++	/* now sort & strip out duplicates (tgids or recycled thread PIDs) */
+ 	sort(array, length, sizeof(pid_t), cmppid, NULL);
+-	if (type == CGROUP_FILE_PROCS)
+-		length = pidlist_uniq(array, length);
++	length = pidlist_uniq(array, length);
+ 
+ 	l = cgroup_pidlist_find_create(cgrp, type);
+ 	if (!l) {
+diff --git a/kernel/workqueue.c b/kernel/workqueue.c
+index e51ab3d4765eb..e4a37d7a6752d 100644
+--- a/kernel/workqueue.c
++++ b/kernel/workqueue.c
+@@ -5741,9 +5741,13 @@ static int workqueue_apply_unbound_cpumask(const cpumask_var_t unbound_cpumask)
+ 	list_for_each_entry(wq, &workqueues, list) {
+ 		if (!(wq->flags & WQ_UNBOUND))
+ 			continue;
++
+ 		/* creating multiple pwqs breaks ordering guarantee */
+-		if (wq->flags & __WQ_ORDERED)
+-			continue;
++		if (!list_empty(&wq->pwqs)) {
++			if (wq->flags & __WQ_ORDERED_EXPLICIT)
++				continue;
++			wq->flags &= ~__WQ_ORDERED;
++		}
+ 
+ 		ctx = apply_wqattrs_prepare(wq, wq->unbound_attrs, unbound_cpumask);
+ 		if (!ctx) {
+diff --git a/net/can/isotp.c b/net/can/isotp.c
+index f02b5d3e47335..d1c6f206f4295 100644
+--- a/net/can/isotp.c
++++ b/net/can/isotp.c
+@@ -948,21 +948,18 @@ static int isotp_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
+ 	if (!so->bound || so->tx.state == ISOTP_SHUTDOWN)
+ 		return -EADDRNOTAVAIL;
+ 
+-wait_free_buffer:
+-	/* we do not support multiple buffers - for now */
+-	if (wq_has_sleeper(&so->wait) && (msg->msg_flags & MSG_DONTWAIT))
+-		return -EAGAIN;
++	while (cmpxchg(&so->tx.state, ISOTP_IDLE, ISOTP_SENDING) != ISOTP_IDLE) {
++		/* we do not support multiple buffers - for now */
++		if (msg->msg_flags & MSG_DONTWAIT)
++			return -EAGAIN;
+ 
+-	/* wait for complete transmission of current pdu */
+-	err = wait_event_interruptible(so->wait, so->tx.state == ISOTP_IDLE);
+-	if (err)
+-		goto err_event_drop;
+-
+-	if (cmpxchg(&so->tx.state, ISOTP_IDLE, ISOTP_SENDING) != ISOTP_IDLE) {
+ 		if (so->tx.state == ISOTP_SHUTDOWN)
+ 			return -EADDRNOTAVAIL;
+ 
+-		goto wait_free_buffer;
++		/* wait for complete transmission of current pdu */
++		err = wait_event_interruptible(so->wait, so->tx.state == ISOTP_IDLE);
++		if (err)
++			goto err_event_drop;
+ 	}
+ 
+ 	/* PDU size > default => try max_pdu_size */
+diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
+index 5eb4898cccd4c..04a0f35f9407f 100644
+--- a/net/ceph/messenger.c
++++ b/net/ceph/messenger.c
+@@ -459,8 +459,8 @@ int ceph_tcp_connect(struct ceph_connection *con)
+ 	set_sock_callbacks(sock, con);
+ 
+ 	con_sock_state_connecting(con);
+-	ret = sock->ops->connect(sock, (struct sockaddr *)&ss, sizeof(ss),
+-				 O_NONBLOCK);
++	ret = kernel_connect(sock, (struct sockaddr *)&ss, sizeof(ss),
++			     O_NONBLOCK);
+ 	if (ret == -EINPROGRESS) {
+ 		dout("connect %s EINPROGRESS sk_state = %u\n",
+ 		     ceph_pr_addr(&con->peer_addr),
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 69a3e544676c4..968be1c20ca1f 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -3285,15 +3285,19 @@ int skb_checksum_help(struct sk_buff *skb)
+ 
+ 	offset = skb_checksum_start_offset(skb);
+ 	ret = -EINVAL;
+-	if (WARN_ON_ONCE(offset >= skb_headlen(skb))) {
++	if (unlikely(offset >= skb_headlen(skb))) {
+ 		DO_ONCE_LITE(skb_dump, KERN_ERR, skb, false);
++		WARN_ONCE(true, "offset (%d) >= skb_headlen() (%u)\n",
++			  offset, skb_headlen(skb));
+ 		goto out;
+ 	}
+ 	csum = skb_checksum(skb, offset, skb->len - offset, 0);
+ 
+ 	offset += skb->csum_offset;
+-	if (WARN_ON_ONCE(offset + sizeof(__sum16) > skb_headlen(skb))) {
++	if (unlikely(offset + sizeof(__sum16) > skb_headlen(skb))) {
+ 		DO_ONCE_LITE(skb_dump, KERN_ERR, skb, false);
++		WARN_ONCE(true, "offset+2 (%zu) > skb_headlen() (%u)\n",
++			  offset + sizeof(__sum16), skb_headlen(skb));
+ 		goto out;
+ 	}
+ 	ret = skb_ensure_writable(skb, offset + sizeof(__sum16));
+diff --git a/net/devlink/health.c b/net/devlink/health.c
+index 194340a8bb863..8c6a2e5140d4d 100644
+--- a/net/devlink/health.c
++++ b/net/devlink/health.c
+@@ -58,7 +58,6 @@ struct devlink_health_reporter {
+ 	struct devlink *devlink;
+ 	struct devlink_port *devlink_port;
+ 	struct devlink_fmsg *dump_fmsg;
+-	struct mutex dump_lock; /* lock parallel read/write from dump buffers */
+ 	u64 graceful_period;
+ 	bool auto_recover;
+ 	bool auto_dump;
+@@ -125,7 +124,6 @@ __devlink_health_reporter_create(struct devlink *devlink,
+ 	reporter->graceful_period = graceful_period;
+ 	reporter->auto_recover = !!ops->recover;
+ 	reporter->auto_dump = !!ops->dump;
+-	mutex_init(&reporter->dump_lock);
+ 	return reporter;
+ }
+ 
+@@ -226,7 +224,6 @@ EXPORT_SYMBOL_GPL(devlink_health_reporter_create);
+ static void
+ devlink_health_reporter_free(struct devlink_health_reporter *reporter)
+ {
+-	mutex_destroy(&reporter->dump_lock);
+ 	if (reporter->dump_fmsg)
+ 		devlink_fmsg_free(reporter->dump_fmsg);
+ 	kfree(reporter);
+@@ -609,10 +606,10 @@ int devlink_health_report(struct devlink_health_reporter *reporter,
+ 	}
+ 
+ 	if (reporter->auto_dump) {
+-		mutex_lock(&reporter->dump_lock);
++		devl_lock(devlink);
+ 		/* store current dump of current error, for later analysis */
+ 		devlink_health_do_dump(reporter, priv_ctx, NULL);
+-		mutex_unlock(&reporter->dump_lock);
++		devl_unlock(devlink);
+ 	}
+ 
+ 	if (!reporter->auto_recover)
+@@ -1246,7 +1243,7 @@ out:
+ }
+ 
+ static struct devlink_health_reporter *
+-devlink_health_reporter_get_from_cb(struct netlink_callback *cb)
++devlink_health_reporter_get_from_cb_lock(struct netlink_callback *cb)
+ {
+ 	const struct genl_dumpit_info *info = genl_dumpit_info(cb);
+ 	struct devlink_health_reporter *reporter;
+@@ -1256,10 +1253,12 @@ devlink_health_reporter_get_from_cb(struct netlink_callback *cb)
+ 	devlink = devlink_get_from_attrs_lock(sock_net(cb->skb->sk), attrs);
+ 	if (IS_ERR(devlink))
+ 		return NULL;
+-	devl_unlock(devlink);
+ 
+ 	reporter = devlink_health_reporter_get_from_attrs(devlink, attrs);
+-	devlink_put(devlink);
++	if (!reporter) {
++		devl_unlock(devlink);
++		devlink_put(devlink);
++	}
+ 	return reporter;
+ }
+ 
+@@ -1268,16 +1267,20 @@ int devlink_nl_cmd_health_reporter_dump_get_dumpit(struct sk_buff *skb,
+ {
+ 	struct devlink_nl_dump_state *state = devlink_dump_state(cb);
+ 	struct devlink_health_reporter *reporter;
++	struct devlink *devlink;
+ 	int err;
+ 
+-	reporter = devlink_health_reporter_get_from_cb(cb);
++	reporter = devlink_health_reporter_get_from_cb_lock(cb);
+ 	if (!reporter)
+ 		return -EINVAL;
+ 
+-	if (!reporter->ops->dump)
++	devlink = reporter->devlink;
++	if (!reporter->ops->dump) {
++		devl_unlock(devlink);
++		devlink_put(devlink);
+ 		return -EOPNOTSUPP;
++	}
+ 
+-	mutex_lock(&reporter->dump_lock);
+ 	if (!state->idx) {
+ 		err = devlink_health_do_dump(reporter, NULL, cb->extack);
+ 		if (err)
+@@ -1293,7 +1296,8 @@ int devlink_nl_cmd_health_reporter_dump_get_dumpit(struct sk_buff *skb,
+ 	err = devlink_fmsg_dumpit(reporter->dump_fmsg, skb, cb,
+ 				  DEVLINK_CMD_HEALTH_REPORTER_DUMP_GET);
+ unlock:
+-	mutex_unlock(&reporter->dump_lock);
++	devl_unlock(devlink);
++	devlink_put(devlink);
+ 	return err;
+ }
+ 
+@@ -1310,9 +1314,7 @@ int devlink_nl_cmd_health_reporter_dump_clear_doit(struct sk_buff *skb,
+ 	if (!reporter->ops->dump)
+ 		return -EOPNOTSUPP;
+ 
+-	mutex_lock(&reporter->dump_lock);
+ 	devlink_health_dump_clear(reporter);
+-	mutex_unlock(&reporter->dump_lock);
+ 	return 0;
+ }
+ 
+diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
+index 37fd9537423f1..a8f58f5e99a77 100644
+--- a/net/ipv4/tcp_output.c
++++ b/net/ipv4/tcp_output.c
+@@ -2441,6 +2441,7 @@ static int tcp_mtu_probe(struct sock *sk)
+ 
+ 	/* build the payload, and be prepared to abort if this fails. */
+ 	if (tcp_clone_payload(sk, nskb, probe_size)) {
++		tcp_skb_tsorted_anchor_cleanup(nskb);
+ 		consume_skb(nskb);
+ 		return -1;
+ 	}
+diff --git a/net/mctp/route.c b/net/mctp/route.c
+index ab62fe447038a..7a47a58aa54b4 100644
+--- a/net/mctp/route.c
++++ b/net/mctp/route.c
+@@ -737,6 +737,8 @@ struct mctp_route *mctp_route_lookup(struct net *net, unsigned int dnet,
+ {
+ 	struct mctp_route *tmp, *rt = NULL;
+ 
++	rcu_read_lock();
++
+ 	list_for_each_entry_rcu(tmp, &net->mctp.routes, list) {
+ 		/* TODO: add metrics */
+ 		if (mctp_rt_match_eid(tmp, dnet, daddr)) {
+@@ -747,21 +749,29 @@ struct mctp_route *mctp_route_lookup(struct net *net, unsigned int dnet,
+ 		}
+ 	}
+ 
++	rcu_read_unlock();
++
+ 	return rt;
+ }
+ 
+ static struct mctp_route *mctp_route_lookup_null(struct net *net,
+ 						 struct net_device *dev)
+ {
+-	struct mctp_route *rt;
++	struct mctp_route *tmp, *rt = NULL;
+ 
+-	list_for_each_entry_rcu(rt, &net->mctp.routes, list) {
+-		if (rt->dev->dev == dev && rt->type == RTN_LOCAL &&
+-		    refcount_inc_not_zero(&rt->refs))
+-			return rt;
++	rcu_read_lock();
++
++	list_for_each_entry_rcu(tmp, &net->mctp.routes, list) {
++		if (tmp->dev->dev == dev && tmp->type == RTN_LOCAL &&
++		    refcount_inc_not_zero(&tmp->refs)) {
++			rt = tmp;
++			break;
++		}
+ 	}
+ 
+-	return NULL;
++	rcu_read_unlock();
++
++	return rt;
+ }
+ 
+ static int mctp_do_fragment_route(struct mctp_route *rt, struct sk_buff *skb,
+diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
+index f6684c052e3ba..c8a686eee7c21 100644
+--- a/net/netfilter/ipvs/ip_vs_sync.c
++++ b/net/netfilter/ipvs/ip_vs_sync.c
+@@ -1441,7 +1441,7 @@ static int bind_mcastif_addr(struct socket *sock, struct net_device *dev)
+ 	sin.sin_addr.s_addr  = addr;
+ 	sin.sin_port         = 0;
+ 
+-	return sock->ops->bind(sock, (struct sockaddr*)&sin, sizeof(sin));
++	return kernel_bind(sock, (struct sockaddr *)&sin, sizeof(sin));
+ }
+ 
+ static void get_mcast_sockaddr(union ipvs_sockaddr *sa, int *salen,
+@@ -1548,7 +1548,7 @@ static int make_receive_sock(struct netns_ipvs *ipvs, int id,
+ 
+ 	get_mcast_sockaddr(&mcast_addr, &salen, &ipvs->bcfg, id);
+ 	sock->sk->sk_bound_dev_if = dev->ifindex;
+-	result = sock->ops->bind(sock, (struct sockaddr *)&mcast_addr, salen);
++	result = kernel_bind(sock, (struct sockaddr *)&mcast_addr, salen);
+ 	if (result < 0) {
+ 		pr_err("Error binding to the multicast addr\n");
+ 		goto error;
+diff --git a/net/nfc/llcp_core.c b/net/nfc/llcp_core.c
+index 6705bb895e239..1dac28136e6a3 100644
+--- a/net/nfc/llcp_core.c
++++ b/net/nfc/llcp_core.c
+@@ -203,17 +203,13 @@ static struct nfc_llcp_sock *nfc_llcp_sock_get(struct nfc_llcp_local *local,
+ 
+ 		if (tmp_sock->ssap == ssap && tmp_sock->dsap == dsap) {
+ 			llcp_sock = tmp_sock;
++			sock_hold(&llcp_sock->sk);
+ 			break;
+ 		}
+ 	}
+ 
+ 	read_unlock(&local->sockets.lock);
+ 
+-	if (llcp_sock == NULL)
+-		return NULL;
+-
+-	sock_hold(&llcp_sock->sk);
+-
+ 	return llcp_sock;
+ }
+ 
+@@ -346,7 +342,8 @@ static int nfc_llcp_wks_sap(const char *service_name, size_t service_name_len)
+ 
+ static
+ struct nfc_llcp_sock *nfc_llcp_sock_from_sn(struct nfc_llcp_local *local,
+-					    const u8 *sn, size_t sn_len)
++					    const u8 *sn, size_t sn_len,
++					    bool needref)
+ {
+ 	struct sock *sk;
+ 	struct nfc_llcp_sock *llcp_sock, *tmp_sock;
+@@ -382,6 +379,8 @@ struct nfc_llcp_sock *nfc_llcp_sock_from_sn(struct nfc_llcp_local *local,
+ 
+ 		if (memcmp(sn, tmp_sock->service_name, sn_len) == 0) {
+ 			llcp_sock = tmp_sock;
++			if (needref)
++				sock_hold(&llcp_sock->sk);
+ 			break;
+ 		}
+ 	}
+@@ -423,7 +422,8 @@ u8 nfc_llcp_get_sdp_ssap(struct nfc_llcp_local *local,
+ 		 * to this service name.
+ 		 */
+ 		if (nfc_llcp_sock_from_sn(local, sock->service_name,
+-					  sock->service_name_len) != NULL) {
++					  sock->service_name_len,
++					  false) != NULL) {
+ 			mutex_unlock(&local->sdp_lock);
+ 
+ 			return LLCP_SAP_MAX;
+@@ -824,16 +824,7 @@ out:
+ static struct nfc_llcp_sock *nfc_llcp_sock_get_sn(struct nfc_llcp_local *local,
+ 						  const u8 *sn, size_t sn_len)
+ {
+-	struct nfc_llcp_sock *llcp_sock;
+-
+-	llcp_sock = nfc_llcp_sock_from_sn(local, sn, sn_len);
+-
+-	if (llcp_sock == NULL)
+-		return NULL;
+-
+-	sock_hold(&llcp_sock->sk);
+-
+-	return llcp_sock;
++	return nfc_llcp_sock_from_sn(local, sn, sn_len, true);
+ }
+ 
+ static const u8 *nfc_llcp_connect_sn(const struct sk_buff *skb, size_t *sn_len)
+@@ -1298,7 +1289,8 @@ static void nfc_llcp_recv_snl(struct nfc_llcp_local *local,
+ 			}
+ 
+ 			llcp_sock = nfc_llcp_sock_from_sn(local, service_name,
+-							  service_name_len);
++							  service_name_len,
++							  true);
+ 			if (!llcp_sock) {
+ 				sap = 0;
+ 				goto add_snl;
+@@ -1318,6 +1310,7 @@ static void nfc_llcp_recv_snl(struct nfc_llcp_local *local,
+ 
+ 				if (sap == LLCP_SAP_MAX) {
+ 					sap = 0;
++					nfc_llcp_sock_put(llcp_sock);
+ 					goto add_snl;
+ 				}
+ 
+@@ -1335,6 +1328,7 @@ static void nfc_llcp_recv_snl(struct nfc_llcp_local *local,
+ 
+ 			pr_debug("%p %d\n", llcp_sock, sap);
+ 
++			nfc_llcp_sock_put(llcp_sock);
+ add_snl:
+ 			sdp = nfc_llcp_build_sdres_tlv(tid, sap);
+ 			if (sdp == NULL)
+diff --git a/net/nfc/nci/core.c b/net/nfc/nci/core.c
+index fff755dde30d6..6c9592d051206 100644
+--- a/net/nfc/nci/core.c
++++ b/net/nfc/nci/core.c
+@@ -909,6 +909,11 @@ static int nci_activate_target(struct nfc_dev *nfc_dev,
+ 		return -EINVAL;
+ 	}
+ 
++	if (protocol >= NFC_PROTO_MAX) {
++		pr_err("the requested nfc protocol is invalid\n");
++		return -EINVAL;
++	}
++
+ 	if (!(nci_target->supported_protocols & (1 << protocol))) {
+ 		pr_err("target does not support the requested protocol 0x%x\n",
+ 		       protocol);
+diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
+index a2935bd18ed98..a39b2a0dd5425 100644
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -3605,7 +3605,12 @@ static int packet_getname(struct socket *sock, struct sockaddr *uaddr,
+ 	if (dev) {
+ 		sll->sll_hatype = dev->type;
+ 		sll->sll_halen = dev->addr_len;
+-		memcpy(sll->sll_addr_flex, dev->dev_addr, dev->addr_len);
++
++		/* Let __fortify_memcpy_chk() know the actual buffer size. */
++		memcpy(((struct sockaddr_storage *)sll)->__data +
++		       offsetof(struct sockaddr_ll, sll_addr) -
++		       offsetofend(struct sockaddr_ll, sll_family),
++		       dev->dev_addr, dev->addr_len);
+ 	} else {
+ 		sll->sll_hatype = 0;	/* Bad: we have no ARPHRD_UNSPEC */
+ 		sll->sll_halen = 0;
+diff --git a/net/rds/tcp_connect.c b/net/rds/tcp_connect.c
+index d788c6d28986f..a0046e99d6df7 100644
+--- a/net/rds/tcp_connect.c
++++ b/net/rds/tcp_connect.c
+@@ -145,7 +145,7 @@ int rds_tcp_conn_path_connect(struct rds_conn_path *cp)
+ 		addrlen = sizeof(sin);
+ 	}
+ 
+-	ret = sock->ops->bind(sock, addr, addrlen);
++	ret = kernel_bind(sock, addr, addrlen);
+ 	if (ret) {
+ 		rdsdebug("bind failed with %d at address %pI6c\n",
+ 			 ret, &conn->c_laddr);
+diff --git a/net/rds/tcp_listen.c b/net/rds/tcp_listen.c
+index 014fa24418c12..53b3535a1e4a8 100644
+--- a/net/rds/tcp_listen.c
++++ b/net/rds/tcp_listen.c
+@@ -306,7 +306,7 @@ struct socket *rds_tcp_listen_init(struct net *net, bool isv6)
+ 		addr_len = sizeof(*sin);
+ 	}
+ 
+-	ret = sock->ops->bind(sock, (struct sockaddr *)&ss, addr_len);
++	ret = kernel_bind(sock, (struct sockaddr *)&ss, addr_len);
+ 	if (ret < 0) {
+ 		rdsdebug("could not bind %s listener socket: %d\n",
+ 			 isv6 ? "IPv6" : "IPv4", ret);
+diff --git a/net/smc/Kconfig b/net/smc/Kconfig
+index 1ab3c5a2c5ada..746be39967683 100644
+--- a/net/smc/Kconfig
++++ b/net/smc/Kconfig
+@@ -2,6 +2,7 @@
+ config SMC
+ 	tristate "SMC socket protocol family"
+ 	depends on INET && INFINIBAND
++	depends on m || ISM != m
+ 	help
+ 	  SMC-R provides a "sockets over RDMA" solution making use of
+ 	  RDMA over Converged Ethernet (RoCE) technology to upgrade
+diff --git a/net/smc/smc_stats.h b/net/smc/smc_stats.h
+index aa8928975cc63..9d32058db2b5d 100644
+--- a/net/smc/smc_stats.h
++++ b/net/smc/smc_stats.h
+@@ -92,13 +92,14 @@ do { \
+ 	typeof(_smc_stats) stats = (_smc_stats); \
+ 	typeof(_tech) t = (_tech); \
+ 	typeof(_len) l = (_len); \
+-	int _pos = fls64((l) >> 13); \
++	int _pos; \
+ 	typeof(_rc) r = (_rc); \
+ 	int m = SMC_BUF_MAX - 1; \
+ 	this_cpu_inc((*stats).smc[t].key ## _cnt); \
+-	if (r <= 0) \
++	if (r <= 0 || l <= 0) \
+ 		break; \
+-	_pos = (_pos < m) ? ((l == 1 << (_pos + 12)) ? _pos - 1 : _pos) : m; \
++	_pos = fls64((l - 1) >> 13); \
++	_pos = (_pos <= m) ? _pos : m; \
+ 	this_cpu_inc((*stats).smc[t].key ## _pd.buf[_pos]); \
+ 	this_cpu_add((*stats).smc[t].key ## _bytes, r); \
+ } \
+@@ -138,9 +139,12 @@ while (0)
+ do { \
+ 	typeof(_len) _l = (_len); \
+ 	typeof(_tech) t = (_tech); \
+-	int _pos = fls((_l) >> 13); \
++	int _pos; \
+ 	int m = SMC_BUF_MAX - 1; \
+-	_pos = (_pos < m) ? ((_l == 1 << (_pos + 12)) ? _pos - 1 : _pos) : m; \
++	if (_l <= 0) \
++		break; \
++	_pos = fls((_l - 1) >> 13); \
++	_pos = (_pos <= m) ? _pos : m; \
+ 	this_cpu_inc((*(_smc_stats)).smc[t].k ## _rmbsize.buf[_pos]); \
+ } \
+ while (0)
+diff --git a/net/socket.c b/net/socket.c
+index 95942c1786e50..2751c8e9674fe 100644
+--- a/net/socket.c
++++ b/net/socket.c
+@@ -3467,7 +3467,11 @@ static long compat_sock_ioctl(struct file *file, unsigned int cmd,
+ 
+ int kernel_bind(struct socket *sock, struct sockaddr *addr, int addrlen)
+ {
+-	return sock->ops->bind(sock, addr, addrlen);
++	struct sockaddr_storage address;
++
++	memcpy(&address, addr, addrlen);
++
++	return sock->ops->bind(sock, (struct sockaddr *)&address, addrlen);
+ }
+ EXPORT_SYMBOL(kernel_bind);
+ 
+diff --git a/net/xdp/xsk_queue.c b/net/xdp/xsk_queue.c
+index f8905400ee07a..d2c2640300171 100644
+--- a/net/xdp/xsk_queue.c
++++ b/net/xdp/xsk_queue.c
+@@ -34,6 +34,16 @@ struct xsk_queue *xskq_create(u32 nentries, bool umem_queue)
+ 	q->ring_mask = nentries - 1;
+ 
+ 	size = xskq_get_ring_size(q, umem_queue);
++
++	/* size which is overflowing or close to SIZE_MAX will become 0 in
++	 * PAGE_ALIGN(), checking SIZE_MAX is enough due to the previous
++	 * is_power_of_2(), the rest will be handled by vmalloc_user()
++	 */
++	if (unlikely(size == SIZE_MAX)) {
++		kfree(q);
++		return NULL;
++	}
++
+ 	size = PAGE_ALIGN(size);
+ 
+ 	q->ring = vmalloc_user(size);
+diff --git a/security/keys/trusted-keys/trusted_core.c b/security/keys/trusted-keys/trusted_core.c
+index c6fc50d67214c..85fb5c22529a7 100644
+--- a/security/keys/trusted-keys/trusted_core.c
++++ b/security/keys/trusted-keys/trusted_core.c
+@@ -44,13 +44,12 @@ static const struct trusted_key_source trusted_key_sources[] = {
+ #endif
+ };
+ 
+-DEFINE_STATIC_CALL_NULL(trusted_key_init, *trusted_key_sources[0].ops->init);
+ DEFINE_STATIC_CALL_NULL(trusted_key_seal, *trusted_key_sources[0].ops->seal);
+ DEFINE_STATIC_CALL_NULL(trusted_key_unseal,
+ 			*trusted_key_sources[0].ops->unseal);
+ DEFINE_STATIC_CALL_NULL(trusted_key_get_random,
+ 			*trusted_key_sources[0].ops->get_random);
+-DEFINE_STATIC_CALL_NULL(trusted_key_exit, *trusted_key_sources[0].ops->exit);
++static void (*trusted_key_exit)(void);
+ static unsigned char migratable;
+ 
+ enum {
+@@ -359,19 +358,16 @@ static int __init init_trusted(void)
+ 		if (!get_random)
+ 			get_random = kernel_get_random;
+ 
+-		static_call_update(trusted_key_init,
+-				   trusted_key_sources[i].ops->init);
+ 		static_call_update(trusted_key_seal,
+ 				   trusted_key_sources[i].ops->seal);
+ 		static_call_update(trusted_key_unseal,
+ 				   trusted_key_sources[i].ops->unseal);
+ 		static_call_update(trusted_key_get_random,
+ 				   get_random);
+-		static_call_update(trusted_key_exit,
+-				   trusted_key_sources[i].ops->exit);
++		trusted_key_exit = trusted_key_sources[i].ops->exit;
+ 		migratable = trusted_key_sources[i].ops->migratable;
+ 
+-		ret = static_call(trusted_key_init)();
++		ret = trusted_key_sources[i].ops->init();
+ 		if (!ret)
+ 			break;
+ 	}
+@@ -388,7 +384,8 @@ static int __init init_trusted(void)
+ 
+ static void __exit cleanup_trusted(void)
+ {
+-	static_call_cond(trusted_key_exit)();
++	if (trusted_key_exit)
++		(*trusted_key_exit)();
+ }
+ 
+ late_initcall(init_trusted);
+diff --git a/sound/pci/hda/cs35l41_hda.c b/sound/pci/hda/cs35l41_hda.c
+index ce5faa6205170..9ba77e685126a 100644
+--- a/sound/pci/hda/cs35l41_hda.c
++++ b/sound/pci/hda/cs35l41_hda.c
+@@ -178,10 +178,14 @@ static int cs35l41_request_firmware_files_spkid(struct cs35l41_hda *cs35l41,
+ 					    cs35l41->speaker_id, "wmfw");
+ 	if (!ret) {
+ 		/* try cirrus/part-dspN-fwtype-sub<-spkidN><-ampname>.bin */
+-		return cs35l41_request_firmware_file(cs35l41, coeff_firmware, coeff_filename,
+-						     CS35L41_FIRMWARE_ROOT,
+-						     cs35l41->acpi_subsystem_id, cs35l41->amp_name,
+-						     cs35l41->speaker_id, "bin");
++		ret = cs35l41_request_firmware_file(cs35l41, coeff_firmware, coeff_filename,
++						    CS35L41_FIRMWARE_ROOT,
++						    cs35l41->acpi_subsystem_id, cs35l41->amp_name,
++						    cs35l41->speaker_id, "bin");
++		if (ret)
++			goto coeff_err;
++
++		return 0;
+ 	}
+ 
+ 	/* try cirrus/part-dspN-fwtype-sub<-ampname>.wmfw */
+@@ -190,10 +194,14 @@ static int cs35l41_request_firmware_files_spkid(struct cs35l41_hda *cs35l41,
+ 					    cs35l41->amp_name, -1, "wmfw");
+ 	if (!ret) {
+ 		/* try cirrus/part-dspN-fwtype-sub<-spkidN><-ampname>.bin */
+-		return cs35l41_request_firmware_file(cs35l41, coeff_firmware, coeff_filename,
+-						     CS35L41_FIRMWARE_ROOT,
+-						     cs35l41->acpi_subsystem_id, cs35l41->amp_name,
+-						     cs35l41->speaker_id, "bin");
++		ret = cs35l41_request_firmware_file(cs35l41, coeff_firmware, coeff_filename,
++						    CS35L41_FIRMWARE_ROOT,
++						    cs35l41->acpi_subsystem_id, cs35l41->amp_name,
++						    cs35l41->speaker_id, "bin");
++		if (ret)
++			goto coeff_err;
++
++		return 0;
+ 	}
+ 
+ 	/* try cirrus/part-dspN-fwtype-sub<-spkidN>.wmfw */
+@@ -208,10 +216,14 @@ static int cs35l41_request_firmware_files_spkid(struct cs35l41_hda *cs35l41,
+ 						    cs35l41->amp_name, cs35l41->speaker_id, "bin");
+ 		if (ret)
+ 			/* try cirrus/part-dspN-fwtype-sub<-spkidN>.bin */
+-			return cs35l41_request_firmware_file(cs35l41, coeff_firmware,
+-							     coeff_filename, CS35L41_FIRMWARE_ROOT,
+-							     cs35l41->acpi_subsystem_id, NULL,
+-							     cs35l41->speaker_id, "bin");
++			ret = cs35l41_request_firmware_file(cs35l41, coeff_firmware,
++							    coeff_filename, CS35L41_FIRMWARE_ROOT,
++							    cs35l41->acpi_subsystem_id, NULL,
++							    cs35l41->speaker_id, "bin");
++		if (ret)
++			goto coeff_err;
++
++		return 0;
+ 	}
+ 
+ 	/* try cirrus/part-dspN-fwtype-sub.wmfw */
+@@ -226,12 +238,50 @@ static int cs35l41_request_firmware_files_spkid(struct cs35l41_hda *cs35l41,
+ 						    cs35l41->speaker_id, "bin");
+ 		if (ret)
+ 			/* try cirrus/part-dspN-fwtype-sub<-spkidN>.bin */
+-			return cs35l41_request_firmware_file(cs35l41, coeff_firmware,
+-							     coeff_filename, CS35L41_FIRMWARE_ROOT,
+-							     cs35l41->acpi_subsystem_id, NULL,
+-							     cs35l41->speaker_id, "bin");
++			ret = cs35l41_request_firmware_file(cs35l41, coeff_firmware,
++							    coeff_filename, CS35L41_FIRMWARE_ROOT,
++							    cs35l41->acpi_subsystem_id, NULL,
++							    cs35l41->speaker_id, "bin");
++		if (ret)
++			goto coeff_err;
++	}
++
++	return ret;
++coeff_err:
++	release_firmware(*wmfw_firmware);
++	kfree(*wmfw_filename);
++	return ret;
++}
++
++static int cs35l41_fallback_firmware_file(struct cs35l41_hda *cs35l41,
++					  const struct firmware **wmfw_firmware,
++					  char **wmfw_filename,
++					  const struct firmware **coeff_firmware,
++					  char **coeff_filename)
++{
++	int ret;
++
++	/* Handle fallback */
++	dev_warn(cs35l41->dev, "Falling back to default firmware.\n");
++
++	/* fallback try cirrus/part-dspN-fwtype.wmfw */
++	ret = cs35l41_request_firmware_file(cs35l41, wmfw_firmware, wmfw_filename,
++					    CS35L41_FIRMWARE_ROOT, NULL, NULL, -1, "wmfw");
++	if (ret)
++		goto err;
++
++	/* fallback try cirrus/part-dspN-fwtype.bin */
++	ret = cs35l41_request_firmware_file(cs35l41, coeff_firmware, coeff_filename,
++					    CS35L41_FIRMWARE_ROOT, NULL, NULL, -1, "bin");
++	if (ret) {
++		release_firmware(*wmfw_firmware);
++		kfree(*wmfw_filename);
++		goto err;
+ 	}
++	return 0;
+ 
++err:
++	dev_warn(cs35l41->dev, "Unable to find firmware and tuning\n");
+ 	return ret;
+ }
+ 
+@@ -247,7 +297,6 @@ static int cs35l41_request_firmware_files(struct cs35l41_hda *cs35l41,
+ 		ret = cs35l41_request_firmware_files_spkid(cs35l41, wmfw_firmware, wmfw_filename,
+ 							   coeff_firmware, coeff_filename);
+ 		goto out;
+-
+ 	}
+ 
+ 	/* try cirrus/part-dspN-fwtype-sub<-ampname>.wmfw */
+@@ -260,6 +309,9 @@ static int cs35l41_request_firmware_files(struct cs35l41_hda *cs35l41,
+ 						    CS35L41_FIRMWARE_ROOT,
+ 						    cs35l41->acpi_subsystem_id, cs35l41->amp_name,
+ 						    -1, "bin");
++		if (ret)
++			goto coeff_err;
++
+ 		goto out;
+ 	}
+ 
+@@ -279,32 +331,23 @@ static int cs35l41_request_firmware_files(struct cs35l41_hda *cs35l41,
+ 							    CS35L41_FIRMWARE_ROOT,
+ 							    cs35l41->acpi_subsystem_id, NULL, -1,
+ 							    "bin");
++		if (ret)
++			goto coeff_err;
+ 	}
+ 
+ out:
+-	if (!ret)
+-		return 0;
++	if (ret)
++		/* if all attempts at finding firmware fail, try fallback */
++		goto fallback;
+ 
+-	/* Handle fallback */
+-	dev_warn(cs35l41->dev, "Falling back to default firmware.\n");
++	return 0;
+ 
++coeff_err:
+ 	release_firmware(*wmfw_firmware);
+ 	kfree(*wmfw_filename);
+-
+-	/* fallback try cirrus/part-dspN-fwtype.wmfw */
+-	ret = cs35l41_request_firmware_file(cs35l41, wmfw_firmware, wmfw_filename,
+-					    CS35L41_FIRMWARE_ROOT, NULL, NULL, -1, "wmfw");
+-	if (!ret)
+-		/* fallback try cirrus/part-dspN-fwtype.bin */
+-		ret = cs35l41_request_firmware_file(cs35l41, coeff_firmware, coeff_filename,
+-						    CS35L41_FIRMWARE_ROOT, NULL, NULL, -1, "bin");
+-
+-	if (ret) {
+-		release_firmware(*wmfw_firmware);
+-		kfree(*wmfw_filename);
+-		dev_warn(cs35l41->dev, "Unable to find firmware and tuning\n");
+-	}
+-	return ret;
++fallback:
++	return cs35l41_fallback_firmware_file(cs35l41, wmfw_firmware, wmfw_filename,
++					      coeff_firmware, coeff_filename);
+ }
+ 
+ #if IS_ENABLED(CONFIG_EFI)
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 4a13747b2b0f3..10703a3df7ea4 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -4639,6 +4639,22 @@ static void alc236_fixup_hp_mute_led_coefbit2(struct hda_codec *codec,
+ 	}
+ }
+ 
++static void alc245_fixup_hp_mute_led_coefbit(struct hda_codec *codec,
++					  const struct hda_fixup *fix,
++					  int action)
++{
++	struct alc_spec *spec = codec->spec;
++
++	if (action == HDA_FIXUP_ACT_PRE_PROBE) {
++		spec->mute_led_polarity = 0;
++		spec->mute_led_coef.idx = 0x0b;
++		spec->mute_led_coef.mask = 3 << 2;
++		spec->mute_led_coef.on = 2 << 2;
++		spec->mute_led_coef.off = 1 << 2;
++		snd_hda_gen_add_mute_led_cdev(codec, coef_mute_led_set);
++	}
++}
++
+ /* turn on/off mic-mute LED per capture hook by coef bit */
+ static int coef_micmute_led_set(struct led_classdev *led_cdev,
+ 				enum led_brightness brightness)
+@@ -6972,6 +6988,29 @@ static void alc295_fixup_dell_inspiron_top_speakers(struct hda_codec *codec,
+ 	}
+ }
+ 
++/* Forcibly assign NID 0x03 to HP while NID 0x02 to SPK */
++static void alc287_fixup_bind_dacs(struct hda_codec *codec,
++				    const struct hda_fixup *fix, int action)
++{
++	struct alc_spec *spec = codec->spec;
++	static const hda_nid_t conn[] = { 0x02, 0x03 }; /* exclude 0x06 */
++	static const hda_nid_t preferred_pairs[] = {
++		0x17, 0x02, 0x21, 0x03, 0
++	};
++
++	if (action != HDA_FIXUP_ACT_PRE_PROBE)
++		return;
++
++	snd_hda_override_conn_list(codec, 0x17, ARRAY_SIZE(conn), conn);
++	spec->gen.preferred_dacs = preferred_pairs;
++	spec->gen.auto_mute_via_amp = 1;
++	if (spec->gen.autocfg.speaker_pins[0] != 0x14) {
++		snd_hda_codec_write_cache(codec, 0x14, 0, AC_VERB_SET_PIN_WIDGET_CONTROL,
++					0x0); /* Make sure 0x14 was disable */
++	}
++}
++
++
+ enum {
+ 	ALC269_FIXUP_GPIO2,
+ 	ALC269_FIXUP_SONY_VAIO,
+@@ -7231,6 +7270,10 @@ enum {
+ 	ALC295_FIXUP_DELL_INSPIRON_TOP_SPEAKERS,
+ 	ALC236_FIXUP_DELL_DUAL_CODECS,
+ 	ALC287_FIXUP_CS35L41_I2C_2_THINKPAD_ACPI,
++	ALC245_FIXUP_HP_MUTE_LED_COEFBIT,
++	ALC245_FIXUP_HP_X360_MUTE_LEDS,
++	ALC287_FIXUP_THINKPAD_I2S_SPK,
++	ALC287_FIXUP_MG_RTKC_CSAMP_CS35L41_I2C_THINKPAD,
+ };
+ 
+ /* A special fixup for Lenovo C940 and Yoga Duet 7;
+@@ -9309,6 +9352,26 @@ static const struct hda_fixup alc269_fixups[] = {
+ 		.chained = true,
+ 		.chain_id = ALC269_FIXUP_THINKPAD_ACPI,
+ 	},
++	[ALC245_FIXUP_HP_MUTE_LED_COEFBIT] = {
++		.type = HDA_FIXUP_FUNC,
++		.v.func = alc245_fixup_hp_mute_led_coefbit,
++	},
++	[ALC245_FIXUP_HP_X360_MUTE_LEDS] = {
++		.type = HDA_FIXUP_FUNC,
++		.v.func = alc245_fixup_hp_mute_led_coefbit,
++		.chained = true,
++		.chain_id = ALC245_FIXUP_HP_GPIO_LED
++	},
++	[ALC287_FIXUP_THINKPAD_I2S_SPK] = {
++		.type = HDA_FIXUP_FUNC,
++		.v.func = alc287_fixup_bind_dacs,
++	},
++	[ALC287_FIXUP_MG_RTKC_CSAMP_CS35L41_I2C_THINKPAD] = {
++		.type = HDA_FIXUP_FUNC,
++		.v.func = alc287_fixup_bind_dacs,
++		.chained = true,
++		.chain_id = ALC287_FIXUP_CS35L41_I2C_2_THINKPAD_ACPI,
++	},
+ };
+ 
+ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+@@ -9551,6 +9614,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x103c, 0x8870, "HP ZBook Fury 15.6 Inch G8 Mobile Workstation PC", ALC285_FIXUP_HP_GPIO_AMP_INIT),
+ 	SND_PCI_QUIRK(0x103c, 0x8873, "HP ZBook Studio 15.6 Inch G8 Mobile Workstation PC", ALC285_FIXUP_HP_GPIO_AMP_INIT),
+ 	SND_PCI_QUIRK(0x103c, 0x887a, "HP Laptop 15s-eq2xxx", ALC236_FIXUP_HP_MUTE_LED_COEFBIT2),
++	SND_PCI_QUIRK(0x103c, 0x888a, "HP ENVY x360 Convertible 15-eu0xxx", ALC245_FIXUP_HP_X360_MUTE_LEDS),
+ 	SND_PCI_QUIRK(0x103c, 0x888d, "HP ZBook Power 15.6 inch G8 Mobile Workstation PC", ALC236_FIXUP_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8895, "HP EliteBook 855 G8 Notebook PC", ALC285_FIXUP_HP_SPEAKERS_MICMUTE_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8896, "HP EliteBook 855 G8 Notebook PC", ALC285_FIXUP_HP_MUTE_LED),
+@@ -9582,6 +9646,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x103c, 0x89c6, "Zbook Fury 17 G9", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x89ca, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
+ 	SND_PCI_QUIRK(0x103c, 0x89d3, "HP EliteBook 645 G9 (MB 89D2)", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
++	SND_PCI_QUIRK(0x103c, 0x8a25, "HP Victus 16-d1xxx (MB 8A25)", ALC245_FIXUP_HP_MUTE_LED_COEFBIT),
+ 	SND_PCI_QUIRK(0x103c, 0x8a78, "HP Dev One", ALC285_FIXUP_HP_LIMIT_INT_MIC_BOOST),
+ 	SND_PCI_QUIRK(0x103c, 0x8aa0, "HP ProBook 440 G9 (MB 8A9E)", ALC236_FIXUP_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8aa3, "HP ProBook 450 G9 (MB 8AA1)", ALC236_FIXUP_HP_GPIO_LED),
+@@ -9720,7 +9785,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x10ec, 0x124c, "Intel Reference board", ALC295_FIXUP_CHROME_BOOK),
+ 	SND_PCI_QUIRK(0x10ec, 0x1252, "Intel Reference board", ALC295_FIXUP_CHROME_BOOK),
+ 	SND_PCI_QUIRK(0x10ec, 0x1254, "Intel Reference board", ALC295_FIXUP_CHROME_BOOK),
+-	SND_PCI_QUIRK(0x10ec, 0x12cc, "Intel Reference board", ALC225_FIXUP_HEADSET_JACK),
++	SND_PCI_QUIRK(0x10ec, 0x12cc, "Intel Reference board", ALC295_FIXUP_CHROME_BOOK),
+ 	SND_PCI_QUIRK(0x10f7, 0x8338, "Panasonic CF-SZ6", ALC269_FIXUP_HEADSET_MODE),
+ 	SND_PCI_QUIRK(0x144d, 0xc109, "Samsung Ativ book 9 (NP900X3G)", ALC269_FIXUP_INV_DMIC),
+ 	SND_PCI_QUIRK(0x144d, 0xc169, "Samsung Notebook 9 Pen (NP930SBE-K01US)", ALC298_FIXUP_SAMSUNG_AMP),
+@@ -9854,14 +9919,14 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x17aa, 0x22be, "Thinkpad X1 Carbon 8th", ALC285_FIXUP_THINKPAD_HEADSET_JACK),
+ 	SND_PCI_QUIRK(0x17aa, 0x22c1, "Thinkpad P1 Gen 3", ALC285_FIXUP_THINKPAD_NO_BASS_SPK_HEADSET_JACK),
+ 	SND_PCI_QUIRK(0x17aa, 0x22c2, "Thinkpad X1 Extreme Gen 3", ALC285_FIXUP_THINKPAD_NO_BASS_SPK_HEADSET_JACK),
+-	SND_PCI_QUIRK(0x17aa, 0x22f1, "Thinkpad", ALC287_FIXUP_CS35L41_I2C_2_THINKPAD_ACPI),
+-	SND_PCI_QUIRK(0x17aa, 0x22f2, "Thinkpad", ALC287_FIXUP_CS35L41_I2C_2_THINKPAD_ACPI),
+-	SND_PCI_QUIRK(0x17aa, 0x22f3, "Thinkpad", ALC287_FIXUP_CS35L41_I2C_2_THINKPAD_ACPI),
+-	SND_PCI_QUIRK(0x17aa, 0x2316, "Thinkpad P1 Gen 6", ALC287_FIXUP_CS35L41_I2C_2_THINKPAD_ACPI),
+-	SND_PCI_QUIRK(0x17aa, 0x2317, "Thinkpad P1 Gen 6", ALC287_FIXUP_CS35L41_I2C_2_THINKPAD_ACPI),
+-	SND_PCI_QUIRK(0x17aa, 0x2318, "Thinkpad Z13 Gen2", ALC287_FIXUP_CS35L41_I2C_2_THINKPAD_ACPI),
+-	SND_PCI_QUIRK(0x17aa, 0x2319, "Thinkpad Z16 Gen2", ALC287_FIXUP_CS35L41_I2C_2_THINKPAD_ACPI),
+-	SND_PCI_QUIRK(0x17aa, 0x231a, "Thinkpad Z16 Gen2", ALC287_FIXUP_CS35L41_I2C_2_THINKPAD_ACPI),
++	SND_PCI_QUIRK(0x17aa, 0x22f1, "Thinkpad", ALC287_FIXUP_MG_RTKC_CSAMP_CS35L41_I2C_THINKPAD),
++	SND_PCI_QUIRK(0x17aa, 0x22f2, "Thinkpad", ALC287_FIXUP_MG_RTKC_CSAMP_CS35L41_I2C_THINKPAD),
++	SND_PCI_QUIRK(0x17aa, 0x22f3, "Thinkpad", ALC287_FIXUP_MG_RTKC_CSAMP_CS35L41_I2C_THINKPAD),
++	SND_PCI_QUIRK(0x17aa, 0x2316, "Thinkpad P1 Gen 6", ALC287_FIXUP_MG_RTKC_CSAMP_CS35L41_I2C_THINKPAD),
++	SND_PCI_QUIRK(0x17aa, 0x2317, "Thinkpad P1 Gen 6", ALC287_FIXUP_MG_RTKC_CSAMP_CS35L41_I2C_THINKPAD),
++	SND_PCI_QUIRK(0x17aa, 0x2318, "Thinkpad Z13 Gen2", ALC287_FIXUP_MG_RTKC_CSAMP_CS35L41_I2C_THINKPAD),
++	SND_PCI_QUIRK(0x17aa, 0x2319, "Thinkpad Z16 Gen2", ALC287_FIXUP_MG_RTKC_CSAMP_CS35L41_I2C_THINKPAD),
++	SND_PCI_QUIRK(0x17aa, 0x231a, "Thinkpad Z16 Gen2", ALC287_FIXUP_MG_RTKC_CSAMP_CS35L41_I2C_THINKPAD),
+ 	SND_PCI_QUIRK(0x17aa, 0x30bb, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
+ 	SND_PCI_QUIRK(0x17aa, 0x30e2, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
+ 	SND_PCI_QUIRK(0x17aa, 0x310c, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
+@@ -9943,7 +10008,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x8086, 0x2074, "Intel NUC 8", ALC233_FIXUP_INTEL_NUC8_DMIC),
+ 	SND_PCI_QUIRK(0x8086, 0x2080, "Intel NUC 8 Rugged", ALC256_FIXUP_INTEL_NUC8_RUGGED),
+ 	SND_PCI_QUIRK(0x8086, 0x2081, "Intel NUC 10", ALC256_FIXUP_INTEL_NUC10),
+-	SND_PCI_QUIRK(0x8086, 0x3038, "Intel NUC 13", ALC225_FIXUP_HEADSET_JACK),
++	SND_PCI_QUIRK(0x8086, 0x3038, "Intel NUC 13", ALC295_FIXUP_CHROME_BOOK),
+ 	SND_PCI_QUIRK(0xf111, 0x0001, "Framework Laptop", ALC295_FIXUP_FRAMEWORK_LAPTOP_MIC_NO_PRESENCE),
+ 
+ #if 0
+@@ -10425,6 +10490,10 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
+ 		{0x17, 0x90170111},
+ 		{0x19, 0x03a11030},
+ 		{0x21, 0x03211020}),
++	SND_HDA_PIN_QUIRK(0x10ec0287, 0x17aa, "Lenovo", ALC287_FIXUP_THINKPAD_I2S_SPK,
++		{0x17, 0x90170110},
++		{0x19, 0x03a11030},
++		{0x21, 0x03211020}),
+ 	SND_HDA_PIN_QUIRK(0x10ec0286, 0x1025, "Acer", ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE,
+ 		{0x12, 0x90a60130},
+ 		{0x17, 0x90170110},
+diff --git a/sound/soc/amd/yc/acp6x-mach.c b/sound/soc/amd/yc/acp6x-mach.c
+index 5cc774b3da05c..6153f91926132 100644
+--- a/sound/soc/amd/yc/acp6x-mach.c
++++ b/sound/soc/amd/yc/acp6x-mach.c
+@@ -234,6 +234,13 @@ static const struct dmi_system_id yc_acp_quirk_table[] = {
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "82V2"),
+ 		}
+ 	},
++	{
++		.driver_data = &acp6x_card,
++		.matches = {
++			DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "82YM"),
++		}
++	},
+ 	{
+ 		.driver_data = &acp6x_card,
+ 		.matches = {
+diff --git a/sound/soc/codecs/hdmi-codec.c b/sound/soc/codecs/hdmi-codec.c
+index d21f69f053422..d661bc9255f92 100644
+--- a/sound/soc/codecs/hdmi-codec.c
++++ b/sound/soc/codecs/hdmi-codec.c
+@@ -531,7 +531,10 @@ static int hdmi_codec_fill_codec_params(struct snd_soc_dai *dai,
+ 	hp->sample_rate = sample_rate;
+ 	hp->channels = channels;
+ 
+-	hcp->chmap_idx = idx;
++	if (pcm_audio)
++		hcp->chmap_idx = ca_id;
++	else
++		hcp->chmap_idx = HDMI_CODEC_CHMAP_IDX_UNKNOWN;
+ 
+ 	return 0;
+ }
+diff --git a/sound/soc/fsl/fsl_sai.c b/sound/soc/fsl/fsl_sai.c
+index f7676d30c82fd..0497bebae0eca 100644
+--- a/sound/soc/fsl/fsl_sai.c
++++ b/sound/soc/fsl/fsl_sai.c
+@@ -710,10 +710,15 @@ static void fsl_sai_config_disable(struct fsl_sai *sai, int dir)
+ {
+ 	unsigned int ofs = sai->soc_data->reg_offset;
+ 	bool tx = dir == TX;
+-	u32 xcsr, count = 100;
++	u32 xcsr, count = 100, mask;
++
++	if (sai->soc_data->mclk_with_tere && sai->mclk_direction_output)
++		mask = FSL_SAI_CSR_TERE;
++	else
++		mask = FSL_SAI_CSR_TERE | FSL_SAI_CSR_BCE;
+ 
+ 	regmap_update_bits(sai->regmap, FSL_SAI_xCSR(tx, ofs),
+-			   FSL_SAI_CSR_TERE | FSL_SAI_CSR_BCE, 0);
++			   mask, 0);
+ 
+ 	/* TERE will remain set till the end of current frame */
+ 	do {
+diff --git a/sound/soc/generic/simple-card-utils.c b/sound/soc/generic/simple-card-utils.c
+index 3019626b05927..a6fe350c3373a 100644
+--- a/sound/soc/generic/simple-card-utils.c
++++ b/sound/soc/generic/simple-card-utils.c
+@@ -310,7 +310,8 @@ int asoc_simple_startup(struct snd_pcm_substream *substream)
+ 		if (fixed_sysclk % props->mclk_fs) {
+ 			dev_err(rtd->dev, "fixed sysclk %u not divisible by mclk_fs %u\n",
+ 				fixed_sysclk, props->mclk_fs);
+-			return -EINVAL;
++			ret = -EINVAL;
++			goto codec_err;
+ 		}
+ 		ret = snd_pcm_hw_constraint_minmax(substream->runtime, SNDRV_PCM_HW_PARAM_RATE,
+ 			fixed_rate, fixed_rate);
+diff --git a/sound/soc/intel/boards/sof_es8336.c b/sound/soc/intel/boards/sof_es8336.c
+index d6c38d8ea2ffb..ee57bfbca7845 100644
+--- a/sound/soc/intel/boards/sof_es8336.c
++++ b/sound/soc/intel/boards/sof_es8336.c
+@@ -798,6 +798,16 @@ static const struct platform_device_id board_ids[] = {
+ 					SOF_ES8336_SPEAKERS_EN_GPIO1_QUIRK |
+ 					SOF_ES8336_JD_INVERTED),
+ 	},
++	{
++		.name = "mtl_es83x6_c1_h02",
++		.driver_data = (kernel_ulong_t)(SOF_ES8336_SSP_CODEC(1) |
++					SOF_NO_OF_HDMI_CAPTURE_SSP(2) |
++					SOF_HDMI_CAPTURE_1_SSP(0) |
++					SOF_HDMI_CAPTURE_2_SSP(2) |
++					SOF_SSP_HDMI_CAPTURE_PRESENT |
++					SOF_ES8336_SPEAKERS_EN_GPIO1_QUIRK |
++					SOF_ES8336_JD_INVERTED),
++	},
+ 	{ }
+ };
+ MODULE_DEVICE_TABLE(platform, board_ids);
+diff --git a/sound/soc/intel/boards/sof_sdw.c b/sound/soc/intel/boards/sof_sdw.c
+index c86f8f9a61003..93544eac23ad8 100644
+--- a/sound/soc/intel/boards/sof_sdw.c
++++ b/sound/soc/intel/boards/sof_sdw.c
+@@ -385,6 +385,16 @@ static const struct dmi_system_id sof_sdw_quirk_table[] = {
+ 		/* No Jack */
+ 		.driver_data = (void *)SOF_SDW_TGL_HDMI,
+ 	},
++	{
++		.callback = sof_sdw_quirk_cb,
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc"),
++			DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "0B14"),
++		},
++		/* No Jack */
++		.driver_data = (void *)SOF_SDW_TGL_HDMI,
++	},
++
+ 	{
+ 		.callback = sof_sdw_quirk_cb,
+ 		.matches = {
+diff --git a/sound/soc/intel/common/soc-acpi-intel-adl-match.c b/sound/soc/intel/common/soc-acpi-intel-adl-match.c
+index bcd66e0094b4b..c4b57cca6b228 100644
+--- a/sound/soc/intel/common/soc-acpi-intel-adl-match.c
++++ b/sound/soc/intel/common/soc-acpi-intel-adl-match.c
+@@ -648,18 +648,18 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_adl_sdw_machines[] = {
+ 		.drv_name = "sof_sdw",
+ 		.sof_tplg_filename = "sof-adl-rt1316-l2-mono-rt714-l3.tplg",
+ 	},
+-	{
+-		.link_mask = 0x3, /* rt1316 on link1 & rt714 on link0 */
+-		.links = adl_sdw_rt1316_link1_rt714_link0,
+-		.drv_name = "sof_sdw",
+-		.sof_tplg_filename = "sof-adl-rt1316-l1-mono-rt714-l0.tplg",
+-	},
+ 	{
+ 		.link_mask = 0x7, /* rt714 on link0 & two rt1316s on link1 and link2 */
+ 		.links = adl_sdw_rt1316_link12_rt714_link0,
+ 		.drv_name = "sof_sdw",
+ 		.sof_tplg_filename = "sof-adl-rt1316-l12-rt714-l0.tplg",
+ 	},
++	{
++		.link_mask = 0x3, /* rt1316 on link1 & rt714 on link0 */
++		.links = adl_sdw_rt1316_link1_rt714_link0,
++		.drv_name = "sof_sdw",
++		.sof_tplg_filename = "sof-adl-rt1316-l1-mono-rt714-l0.tplg",
++	},
+ 	{
+ 		.link_mask = 0x5, /* 2 active links required */
+ 		.links = adl_sdw_rt1316_link2_rt714_link0,
+diff --git a/sound/soc/intel/common/soc-acpi-intel-mtl-match.c b/sound/soc/intel/common/soc-acpi-intel-mtl-match.c
+index ed9821adc1d9d..d0bfefa7c9ecf 100644
+--- a/sound/soc/intel/common/soc-acpi-intel-mtl-match.c
++++ b/sound/soc/intel/common/soc-acpi-intel-mtl-match.c
+@@ -30,6 +30,16 @@ static const struct snd_soc_acpi_codecs mtl_rt5682_rt5682s_hp = {
+ 	.codecs = {"10EC5682", "RTL5682"},
+ };
+ 
++static const struct snd_soc_acpi_codecs mtl_lt6911_hdmi = {
++	.num_codecs = 1,
++	.codecs = {"INTC10B0"}
++};
++
++static const struct snd_soc_acpi_codecs mtl_essx_83x6 = {
++	.num_codecs = 3,
++	.codecs = { "ESSX8316", "ESSX8326", "ESSX8336"},
++};
++
+ struct snd_soc_acpi_mach snd_soc_acpi_intel_mtl_machines[] = {
+ 	{
+ 		.comp_ids = &mtl_rt5682_rt5682s_hp,
+@@ -45,6 +55,13 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_mtl_machines[] = {
+ 		.quirk_data = &mtl_max98360a_amp,
+ 		.sof_tplg_filename = "sof-mtl-max98360a-rt5682.tplg",
+ 	},
++	{
++		.comp_ids = &mtl_essx_83x6,
++		.drv_name = "mtl_es83x6_c1_h02",
++		.machine_quirk = snd_soc_acpi_codec_list,
++		.quirk_data = &mtl_lt6911_hdmi,
++		.sof_tplg_filename = "sof-mtl-es83x6-ssp1-hdmi-ssp02.tplg",
++	},
+ 	{
+ 		.comp_ids = &mtl_rt5682_rt5682s_hp,
+ 		.drv_name = "mtl_rt1019_rt5682",
+@@ -52,6 +69,14 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_mtl_machines[] = {
+ 		.quirk_data = &mtl_rt1019p_amp,
+ 		.sof_tplg_filename = "sof-mtl-rt1019-rt5682.tplg",
+ 	},
++	{
++		.comp_ids = &mtl_essx_83x6,
++		.drv_name = "sof-essx8336",
++		.sof_tplg_filename = "sof-mtl-es8336", /* the tplg suffix is added at run time */
++		.tplg_quirk_mask = SND_SOC_ACPI_TPLG_INTEL_SSP_NUMBER |
++					SND_SOC_ACPI_TPLG_INTEL_SSP_MSB |
++					SND_SOC_ACPI_TPLG_INTEL_DMIC_NUMBER,
++	},
+ 	{},
+ };
+ EXPORT_SYMBOL_GPL(snd_soc_acpi_intel_mtl_machines);
+diff --git a/sound/soc/sof/amd/pci-rmb.c b/sound/soc/sof/amd/pci-rmb.c
+index 58b3092425f1a..874230ffcbcc6 100644
+--- a/sound/soc/sof/amd/pci-rmb.c
++++ b/sound/soc/sof/amd/pci-rmb.c
+@@ -34,7 +34,6 @@ static const struct sof_amd_acp_desc rembrandt_chip_info = {
+ 	.dsp_intr_base	= ACP6X_DSP_SW_INTR_BASE,
+ 	.sram_pte_offset = ACP6X_SRAM_PTE_OFFSET,
+ 	.hw_semaphore_offset = ACP6X_AXI2DAGB_SEM_0,
+-	.acp_clkmux_sel = ACP6X_CLKMUX_SEL,
+ 	.fusion_dsp_offset = ACP6X_DSP_FUSION_RUNSTALL,
+ };
+ 
+diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
+index 9105ec623120a..783a2493707ea 100644
+--- a/sound/usb/mixer.c
++++ b/sound/usb/mixer.c
+@@ -1204,6 +1204,13 @@ static void volume_control_quirks(struct usb_mixer_elem_info *cval,
+ 			cval->res = 16;
+ 		}
+ 		break;
++	case USB_ID(0x1bcf, 0x2283): /* NexiGo N930AF FHD Webcam */
++		if (!strcmp(kctl->id.name, "Mic Capture Volume")) {
++			usb_audio_info(chip,
++				"set resolution quirk: cval->res = 16\n");
++			cval->res = 16;
++		}
++		break;
+ 	}
+ }
+ 
+diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
+index 598659d761cc9..4e64842245e19 100644
+--- a/sound/usb/quirks.c
++++ b/sound/usb/quirks.c
+@@ -1994,7 +1994,11 @@ void snd_usb_audioformat_attributes_quirk(struct snd_usb_audio *chip,
+ 		/* mic works only when ep packet size is set to wMaxPacketSize */
+ 		fp->attributes |= UAC_EP_CS_ATTR_FILL_MAX;
+ 		break;
+-
++	case USB_ID(0x3511, 0x2b1e): /* Opencomm2 UC USB Bluetooth dongle */
++		/* mic works only when ep pitch control is not set */
++		if (stream == SNDRV_PCM_STREAM_CAPTURE)
++			fp->attributes &= ~UAC_EP_CS_ATTR_PITCH_CONTROL;
++		break;
+ 	}
+ }
+ 
+@@ -2173,6 +2177,8 @@ static const struct usb_audio_quirk_flags_table quirk_flags_table[] = {
+ 		   QUIRK_FLAG_FIXED_RATE),
+ 	DEVICE_FLG(0x0ecb, 0x2069, /* JBL Quantum810 Wireless */
+ 		   QUIRK_FLAG_FIXED_RATE),
++	DEVICE_FLG(0x1bcf, 0x2283, /* NexiGo N930AF FHD Webcam */
++		   QUIRK_FLAG_GET_SAMPLE_RATE),
+ 
+ 	/* Vendor matches */
+ 	VENDOR_FLG(0x045e, /* MS Lifecam */


^ permalink raw reply related	[flat|nested] 24+ messages in thread
* [gentoo-commits] proj/linux-patches:6.5 commit in: /
@ 2023-10-18 20:01 Mike Pagano
  0 siblings, 0 replies; 24+ messages in thread
From: Mike Pagano @ 2023-10-18 20:01 UTC (permalink / raw
  To: gentoo-commits

commit:     10a9a6bf7b7d62357104ce0079711f0ccdbde3e2
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed Oct 18 19:54:56 2023 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed Oct 18 19:54:56 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=10a9a6bf

gcc-plugins: Rename last_stmt() for GCC 14+

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README                               |  4 ++++
 2940_handle-gcc-14-last-stmt-rename.patch | 31 +++++++++++++++++++++++++++++++
 2 files changed, 35 insertions(+)

diff --git a/0000_README b/0000_README
index 665fa5a2..86ea0adc 100644
--- a/0000_README
+++ b/0000_README
@@ -107,6 +107,10 @@ Patch:  2930_tar_override.patch
 From:   https://lore.kernel.org/lkml/20230412082743.350699-1-mgorny@gentoo.org/#t
 Desc:   kheaders: make it possible to override TAR
 
+Patch:  2940_handle-gcc-14-last-stmt-rename.patch
+From:   https://lore.kernel.org/all/20230811060545.never.564-kees@kernel.org/#Z31scripts:gcc-plugins:gcc-common.h
+Desc:   gcc-plugins: Rename last_stmt() for GCC 14+
+
 Patch:  3000_Support-printing-firmware-info.patch
 From:   https://bugs.gentoo.org/732852
 Desc:   Print firmware info (Reqs CONFIG_GENTOO_PRINT_FIRMWARE_INFO). Thanks to Georgy Yakovlev

diff --git a/2940_handle-gcc-14-last-stmt-rename.patch b/2940_handle-gcc-14-last-stmt-rename.patch
new file mode 100644
index 00000000..b04ce8da
--- /dev/null
+++ b/2940_handle-gcc-14-last-stmt-rename.patch
@@ -0,0 +1,31 @@
+From: Kees Cook <keescook@chromium.org>
+To: linux-hardening@vger.kernel.org
+Cc: Kees Cook <keescook@chromium.org>, linux-kernel@vger.kernel.org
+Subject: [PATCH] gcc-plugins: Rename last_stmt() for GCC 14+
+Date: Thu, 10 Aug 2023 23:05:49 -0700	[thread overview]
+Message-ID: <20230811060545.never.564-kees@kernel.org> (raw)
+
+In GCC 14, last_stmt() was renamed to last_nondebug_stmt(). Add a helper
+macro to handle the renaming.
+
+Cc: linux-hardening@vger.kernel.org
+Signed-off-by: Kees Cook <keescook@chromium.org>
+---
+ scripts/gcc-plugins/gcc-common.h | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/scripts/gcc-plugins/gcc-common.h b/scripts/gcc-plugins/gcc-common.h
+index 84c730da36dd..1ae39b9f4a95 100644
+--- a/scripts/gcc-plugins/gcc-common.h
++++ b/scripts/gcc-plugins/gcc-common.h
+@@ -440,4 +440,8 @@ static inline void debug_gimple_stmt(const_gimple s)
+ #define SET_DECL_MODE(decl, mode)	DECL_MODE(decl) = (mode)
+ #endif
+ 
++#if BUILDING_GCC_VERSION >= 14000
++#define last_stmt(x)			last_nondebug_stmt(x)
++#endif
++
+ #endif
+-- 
+2.34.1


^ permalink raw reply related	[flat|nested] 24+ messages in thread
* [gentoo-commits] proj/linux-patches:6.5 commit in: /
@ 2023-10-17 22:58 Mike Pagano
  0 siblings, 0 replies; 24+ messages in thread
From: Mike Pagano @ 2023-10-17 22:58 UTC (permalink / raw
  To: gentoo-commits

commit:     94bc701a20b843aee37fe6700f5a4e2c9c0d15a2
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Tue Oct 17 22:53:48 2023 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Tue Oct 17 22:53:48 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=94bc701a

kheaders: make it possible to override TAR

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README             |  4 +++
 2930_tar_override.patch | 65 +++++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 69 insertions(+)

diff --git a/0000_README b/0000_README
index 7be69feb..665fa5a2 100644
--- a/0000_README
+++ b/0000_README
@@ -103,6 +103,10 @@ Patch:  2920_sign-file-patch-for-libressl.patch
 From:   https://bugs.gentoo.org/717166
 Desc:   sign-file: full functionality with modern LibreSSL
 
+Patch:  2930_tar_override.patch
+From:   https://lore.kernel.org/lkml/20230412082743.350699-1-mgorny@gentoo.org/#t
+Desc:   kheaders: make it possible to override TAR
+
 Patch:  3000_Support-printing-firmware-info.patch
 From:   https://bugs.gentoo.org/732852
 Desc:   Print firmware info (Reqs CONFIG_GENTOO_PRINT_FIRMWARE_INFO). Thanks to Georgy Yakovlev

diff --git a/2930_tar_override.patch b/2930_tar_override.patch
new file mode 100644
index 00000000..001ff800
--- /dev/null
+++ b/2930_tar_override.patch
@@ -0,0 +1,65 @@
+From: "Michał Górny" <mgorny@gentoo.org>
+To: Dmitry Goldin <dgoldin+lkml@protonmail.ch>
+Cc: "Masahiro Yamada" <yamada.masahiro@socionext.com>,
+	linux-kernel@vger.kernel.org, "Michał Górny" <mgorny@gentoo.org>,
+	"Sam James" <sam@gentoo.org>,
+	"Masahiro Yamada" <masahiroy@kernel.org>
+Subject: [PATCH v2] kheaders: make it possible to override TAR
+Date: Wed, 12 Apr 2023 10:27:43 +0200	[thread overview]
+Message-ID: <20230412082743.350699-1-mgorny@gentoo.org> (raw)
+In-Reply-To: <CAK7LNATfrxu7BK0ZRq+qSjObiz6GpS3U5L=12vDys5_yy=Mdow@mail.gmail.com>
+
+Commit 86cdd2fdc4e39c388d39c7ba2396d1a9dfd66226 ("kheaders: make headers
+archive reproducible") introduced a number of options specific to GNU
+tar to the `tar` invocation in `gen_kheaders.sh` script.  This causes
+the script to fail to work on systems where `tar` is not GNU tar.  This
+can occur e.g. on recent Gentoo Linux installations that support using
+bsdtar from libarchive instead.
+
+Add a `TAR` make variable to make it possible to override the tar
+executable used, e.g. by specifying:
+
+  make TAR=gtar
+
+Link: https://bugs.gentoo.org/884061
+Reported-by: Sam James <sam@gentoo.org>
+Tested-by: Sam James <sam@gentoo.org>
+Co-developed-by: Masahiro Yamada <masahiroy@kernel.org>
+Signed-off-by: Michał Górny <mgorny@gentoo.org>
+---
+ Makefile               | 3 ++-
+ kernel/gen_kheaders.sh | 2 +-
+ 2 files changed, 3 insertions(+), 2 deletions(-)
+
+diff --git a/Makefile b/Makefile
+index 5aeea3d98..50045059c 100644
+--- a/Makefile
++++ b/Makefile
+@@ -520,6 +520,7 @@ LZMA		= lzma
+ LZ4		= lz4c
+ XZ		= xz
+ ZSTD		= zstd
++TAR		= tar
+ 
+ PAHOLE_FLAGS	= $(shell PAHOLE=$(PAHOLE) $(srctree)/scripts/pahole-flags.sh)
+ 
+@@ -599,7 +600,7 @@ export RUSTC RUSTDOC RUSTFMT RUSTC_OR_CLIPPY_QUIET RUSTC_OR_CLIPPY BINDGEN CARGO
+ export HOSTRUSTC KBUILD_HOSTRUSTFLAGS
+ export CPP AR NM STRIP OBJCOPY OBJDUMP READELF PAHOLE RESOLVE_BTFIDS LEX YACC AWK INSTALLKERNEL
+ export PERL PYTHON3 CHECK CHECKFLAGS MAKE UTS_MACHINE HOSTCXX
+-export KGZIP KBZIP2 KLZOP LZMA LZ4 XZ ZSTD
++export KGZIP KBZIP2 KLZOP LZMA LZ4 XZ ZSTD TAR
+ export KBUILD_HOSTCXXFLAGS KBUILD_HOSTLDFLAGS KBUILD_HOSTLDLIBS LDFLAGS_MODULE
+ export KBUILD_USERCFLAGS KBUILD_USERLDFLAGS
+ 
+--- a/kernel/gen_kheaders.sh	2023-10-17 18:43:00.365033993 -0400
++++ b/kernel/gen_kheaders.sh	2023-10-17 18:43:36.301750445 -0400
+@@ -83,7 +83,7 @@ find $cpio_dir -type f -print0 |
+ 	xargs -0 -P8 -n1 perl -pi -e 'BEGIN {undef $/;}; s/\/\*((?!SPDX).)*?\*\///smg;'
+ 
+ # Create archive and try to normalize metadata for reproducibility.
+-tar "${KBUILD_BUILD_TIMESTAMP:+--mtime=$KBUILD_BUILD_TIMESTAMP}" \
++${TAR:-tar} "${KBUILD_BUILD_TIMESTAMP:+--mtime=$KBUILD_BUILD_TIMESTAMP}" \
+     --owner=0 --group=0 --sort=name --numeric-owner \
+     -I $XZ -cf $tarfile -C $cpio_dir/ . > /dev/null
+ 


^ permalink raw reply related	[flat|nested] 24+ messages in thread
* [gentoo-commits] proj/linux-patches:6.5 commit in: /
@ 2023-10-10 22:53 Mike Pagano
  0 siblings, 0 replies; 24+ messages in thread
From: Mike Pagano @ 2023-10-10 22:53 UTC (permalink / raw
  To: gentoo-commits

commit:     97b54929d630ced854153d13442c059d8ebd7d33
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Tue Oct 10 22:53:38 2023 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Tue Oct 10 22:53:38 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=97b54929

Linux patch 6.5.7

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README            |    4 +
 1006_linux-6.5.7.patch | 7467 ++++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 7471 insertions(+)

diff --git a/0000_README b/0000_README
index ffd65d42..7be69feb 100644
--- a/0000_README
+++ b/0000_README
@@ -67,6 +67,10 @@ Patch:  1005_linux-6.5.6.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.5.6
 
+Patch:  1006_linux-6.5.7.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.5.7
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1006_linux-6.5.7.patch b/1006_linux-6.5.7.patch
new file mode 100644
index 00000000..030a6b41
--- /dev/null
+++ b/1006_linux-6.5.7.patch
@@ -0,0 +1,7467 @@
+diff --git a/Documentation/arch/arm64/silicon-errata.rst b/Documentation/arch/arm64/silicon-errata.rst
+index 0ac452333eb4f..daa1e2ccc0d79 100644
+--- a/Documentation/arch/arm64/silicon-errata.rst
++++ b/Documentation/arch/arm64/silicon-errata.rst
+@@ -63,6 +63,8 @@ stable kernels.
+ +----------------+-----------------+-----------------+-----------------------------+
+ | ARM            | Cortex-A510     | #1902691        | ARM64_ERRATUM_1902691       |
+ +----------------+-----------------+-----------------+-----------------------------+
++| ARM            | Cortex-A520     | #2966298        | ARM64_ERRATUM_2966298       |
+++----------------+-----------------+-----------------+-----------------------------+
+ | ARM            | Cortex-A53      | #826319         | ARM64_ERRATUM_826319        |
+ +----------------+-----------------+-----------------+-----------------------------+
+ | ARM            | Cortex-A53      | #827319         | ARM64_ERRATUM_827319        |
+diff --git a/Documentation/networking/ip-sysctl.rst b/Documentation/networking/ip-sysctl.rst
+index 4a010a7cde7f8..5590f2f5201c8 100644
+--- a/Documentation/networking/ip-sysctl.rst
++++ b/Documentation/networking/ip-sysctl.rst
+@@ -2287,6 +2287,14 @@ accept_ra_min_hop_limit - INTEGER
+ 
+ 	Default: 1
+ 
++accept_ra_min_lft - INTEGER
++	Minimum acceptable lifetime value in Router Advertisement.
++
++	RA sections with a lifetime less than this value shall be
++	ignored. Zero lifetimes stay unaffected.
++
++	Default: 0
++
+ accept_ra_pinfo - BOOLEAN
+ 	Learn Prefix Information in Router Advertisement.
+ 
+diff --git a/Makefile b/Makefile
+index 81f14b15592f0..f9d5970f34413 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 5
+-SUBLEVEL = 6
++SUBLEVEL = 7
+ EXTRAVERSION =
+ NAME = Hurr durr I'ma ninja sloth
+ 
+diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
+index a2511b30d0f67..73085b30b3092 100644
+--- a/arch/arm64/Kconfig
++++ b/arch/arm64/Kconfig
+@@ -1038,6 +1038,19 @@ config ARM64_ERRATUM_2645198
+ 
+ 	  If unsure, say Y.
+ 
++config ARM64_ERRATUM_2966298
++	bool "Cortex-A520: 2966298: workaround for speculatively executed unprivileged load"
++	default y
++	help
++	  This option adds the workaround for ARM Cortex-A520 erratum 2966298.
++
++	  On an affected Cortex-A520 core, a speculatively executed unprivileged
++	  load might leak data from a privileged level via a cache side channel.
++
++	  Work around this problem by executing a TLBI before returning to EL0.
++
++	  If unsure, say Y.
++
+ config CAVIUM_ERRATUM_22375
+ 	bool "Cavium erratum 22375, 24313"
+ 	default y
+diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
+index 96e50227f940e..5bba393760557 100644
+--- a/arch/arm64/include/asm/cpufeature.h
++++ b/arch/arm64/include/asm/cpufeature.h
+@@ -663,7 +663,7 @@ static inline bool supports_clearbhb(int scope)
+ 		isar2 = read_sanitised_ftr_reg(SYS_ID_AA64ISAR2_EL1);
+ 
+ 	return cpuid_feature_extract_unsigned_field(isar2,
+-						    ID_AA64ISAR2_EL1_BC_SHIFT);
++						    ID_AA64ISAR2_EL1_CLRBHB_SHIFT);
+ }
+ 
+ const struct cpumask *system_32bit_el0_cpumask(void);
+diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h
+index 5f6f84837a490..74d00feb62f03 100644
+--- a/arch/arm64/include/asm/cputype.h
++++ b/arch/arm64/include/asm/cputype.h
+@@ -79,6 +79,7 @@
+ #define ARM_CPU_PART_CORTEX_A78AE	0xD42
+ #define ARM_CPU_PART_CORTEX_X1		0xD44
+ #define ARM_CPU_PART_CORTEX_A510	0xD46
++#define ARM_CPU_PART_CORTEX_A520	0xD80
+ #define ARM_CPU_PART_CORTEX_A710	0xD47
+ #define ARM_CPU_PART_CORTEX_A715	0xD4D
+ #define ARM_CPU_PART_CORTEX_X2		0xD48
+@@ -148,6 +149,7 @@
+ #define MIDR_CORTEX_A78AE	MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A78AE)
+ #define MIDR_CORTEX_X1	MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X1)
+ #define MIDR_CORTEX_A510 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A510)
++#define MIDR_CORTEX_A520 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A520)
+ #define MIDR_CORTEX_A710 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A710)
+ #define MIDR_CORTEX_A715 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A715)
+ #define MIDR_CORTEX_X2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X2)
+diff --git a/arch/arm64/include/asm/hwcap.h b/arch/arm64/include/asm/hwcap.h
+index 692b1ec663b27..521267478d187 100644
+--- a/arch/arm64/include/asm/hwcap.h
++++ b/arch/arm64/include/asm/hwcap.h
+@@ -138,6 +138,7 @@
+ #define KERNEL_HWCAP_SME_B16B16		__khwcap2_feature(SME_B16B16)
+ #define KERNEL_HWCAP_SME_F16F16		__khwcap2_feature(SME_F16F16)
+ #define KERNEL_HWCAP_MOPS		__khwcap2_feature(MOPS)
++#define KERNEL_HWCAP_HBC		__khwcap2_feature(HBC)
+ 
+ /*
+  * This yields a mask that user programs can use to figure out what
+diff --git a/arch/arm64/include/uapi/asm/hwcap.h b/arch/arm64/include/uapi/asm/hwcap.h
+index a2cac4305b1e0..53026f45a5092 100644
+--- a/arch/arm64/include/uapi/asm/hwcap.h
++++ b/arch/arm64/include/uapi/asm/hwcap.h
+@@ -103,5 +103,6 @@
+ #define HWCAP2_SME_B16B16	(1UL << 41)
+ #define HWCAP2_SME_F16F16	(1UL << 42)
+ #define HWCAP2_MOPS		(1UL << 43)
++#define HWCAP2_HBC		(1UL << 44)
+ 
+ #endif /* _UAPI__ASM_HWCAP_H */
+diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
+index be66e94a21bda..5706e74c55786 100644
+--- a/arch/arm64/kernel/cpu_errata.c
++++ b/arch/arm64/kernel/cpu_errata.c
+@@ -730,6 +730,14 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
+ 		.cpu_enable = cpu_clear_bf16_from_user_emulation,
+ 	},
+ #endif
++#ifdef CONFIG_ARM64_ERRATUM_2966298
++	{
++		.desc = "ARM erratum 2966298",
++		.capability = ARM64_WORKAROUND_2966298,
++		/* Cortex-A520 r0p0 - r0p1 */
++		ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A520, 0, 0, 1),
++	},
++#endif
+ #ifdef CONFIG_AMPERE_ERRATUM_AC03_CPU_38
+ 	{
+ 		.desc = "AmpereOne erratum AC03_CPU_38",
+diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
+index f9d456fe132d8..2c0b8444fea67 100644
+--- a/arch/arm64/kernel/cpufeature.c
++++ b/arch/arm64/kernel/cpufeature.c
+@@ -222,7 +222,8 @@ static const struct arm64_ftr_bits ftr_id_aa64isar1[] = {
+ static const struct arm64_ftr_bits ftr_id_aa64isar2[] = {
+ 	ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64ISAR2_EL1_CSSC_SHIFT, 4, 0),
+ 	ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64ISAR2_EL1_RPRFM_SHIFT, 4, 0),
+-	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_HIGHER_SAFE, ID_AA64ISAR2_EL1_BC_SHIFT, 4, 0),
++	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR2_EL1_CLRBHB_SHIFT, 4, 0),
++	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR2_EL1_BC_SHIFT, 4, 0),
+ 	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR2_EL1_MOPS_SHIFT, 4, 0),
+ 	ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH),
+ 		       FTR_STRICT, FTR_EXACT, ID_AA64ISAR2_EL1_APA3_SHIFT, 4, 0),
+@@ -2844,6 +2845,7 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
+ 	HWCAP_CAP(ID_AA64ISAR2_EL1, RPRES, IMP, CAP_HWCAP, KERNEL_HWCAP_RPRES),
+ 	HWCAP_CAP(ID_AA64ISAR2_EL1, WFxT, IMP, CAP_HWCAP, KERNEL_HWCAP_WFXT),
+ 	HWCAP_CAP(ID_AA64ISAR2_EL1, MOPS, IMP, CAP_HWCAP, KERNEL_HWCAP_MOPS),
++	HWCAP_CAP(ID_AA64ISAR2_EL1, BC, IMP, CAP_HWCAP, KERNEL_HWCAP_HBC),
+ #ifdef CONFIG_ARM64_SME
+ 	HWCAP_CAP(ID_AA64PFR1_EL1, SME, IMP, CAP_HWCAP, KERNEL_HWCAP_SME),
+ 	HWCAP_CAP(ID_AA64SMFR0_EL1, FA64, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_FA64),
+diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c
+index 58622dc859177..98fda85005353 100644
+--- a/arch/arm64/kernel/cpuinfo.c
++++ b/arch/arm64/kernel/cpuinfo.c
+@@ -126,6 +126,7 @@ static const char *const hwcap_str[] = {
+ 	[KERNEL_HWCAP_SME_B16B16]	= "smeb16b16",
+ 	[KERNEL_HWCAP_SME_F16F16]	= "smef16f16",
+ 	[KERNEL_HWCAP_MOPS]		= "mops",
++	[KERNEL_HWCAP_HBC]		= "hbc",
+ };
+ 
+ #ifdef CONFIG_COMPAT
+diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
+index 6ad61de03d0a0..a6030913cd58c 100644
+--- a/arch/arm64/kernel/entry.S
++++ b/arch/arm64/kernel/entry.S
+@@ -428,6 +428,10 @@ alternative_else_nop_endif
+ 	ldp	x28, x29, [sp, #16 * 14]
+ 
+ 	.if	\el == 0
++alternative_if ARM64_WORKAROUND_2966298
++	tlbi	vale1, xzr
++	dsb	nsh
++alternative_else_nop_endif
+ alternative_if_not ARM64_UNMAP_KERNEL_AT_EL0
+ 	ldr	lr, [sp, #S_LR]
+ 	add	sp, sp, #PT_REGS_SIZE		// restore sp
+diff --git a/arch/arm64/tools/cpucaps b/arch/arm64/tools/cpucaps
+index c80ed4f3cbcee..c28a9b995f3c6 100644
+--- a/arch/arm64/tools/cpucaps
++++ b/arch/arm64/tools/cpucaps
+@@ -83,6 +83,7 @@ WORKAROUND_2077057
+ WORKAROUND_2457168
+ WORKAROUND_2645198
+ WORKAROUND_2658417
++WORKAROUND_2966298
+ WORKAROUND_AMPERE_AC03_CPU_38
+ WORKAROUND_TRBE_OVERWRITE_FILL_MODE
+ WORKAROUND_TSB_FLUSH_FAILURE
+diff --git a/arch/arm64/tools/sysreg b/arch/arm64/tools/sysreg
+index 65866bf819c33..ffc81afa6caca 100644
+--- a/arch/arm64/tools/sysreg
++++ b/arch/arm64/tools/sysreg
+@@ -1347,7 +1347,11 @@ UnsignedEnum	51:48	RPRFM
+ 	0b0000	NI
+ 	0b0001	IMP
+ EndEnum
+-Res0	47:28
++Res0	47:32
++UnsignedEnum	31:28	CLRBHB
++	0b0000	NI
++	0b0001	IMP
++EndEnum
+ UnsignedEnum	27:24	PAC_frac
+ 	0b0000	NI
+ 	0b0001	IMP
+diff --git a/arch/parisc/include/asm/ldcw.h b/arch/parisc/include/asm/ldcw.h
+index 6d28b5514699a..ee9e071859b2f 100644
+--- a/arch/parisc/include/asm/ldcw.h
++++ b/arch/parisc/include/asm/ldcw.h
+@@ -2,39 +2,42 @@
+ #ifndef __PARISC_LDCW_H
+ #define __PARISC_LDCW_H
+ 
+-#ifndef CONFIG_PA20
+ /* Because kmalloc only guarantees 8-byte alignment for kmalloc'd data,
+    and GCC only guarantees 8-byte alignment for stack locals, we can't
+    be assured of 16-byte alignment for atomic lock data even if we
+    specify "__attribute ((aligned(16)))" in the type declaration.  So,
+    we use a struct containing an array of four ints for the atomic lock
+    type and dynamically select the 16-byte aligned int from the array
+-   for the semaphore.  */
++   for the semaphore. */
++
++/* From: "Jim Hull" <jim.hull of hp.com>
++   I've attached a summary of the change, but basically, for PA 2.0, as
++   long as the ",CO" (coherent operation) completer is implemented, then the
++   16-byte alignment requirement for ldcw and ldcd is relaxed, and instead
++   they only require "natural" alignment (4-byte for ldcw, 8-byte for
++   ldcd).
++
++   Although the cache control hint is accepted by all PA 2.0 processors,
++   it is only implemented on PA8800/PA8900 CPUs. Prior PA8X00 CPUs still
++   require 16-byte alignment. If the address is unaligned, the operation
++   of the instruction is undefined. The ldcw instruction does not generate
++   unaligned data reference traps so misaligned accesses are not detected.
++   This hid the problem for years. So, restore the 16-byte alignment dropped
++   by Kyle McMartin in "Remove __ldcw_align for PA-RISC 2.0 processors". */
+ 
+ #define __PA_LDCW_ALIGNMENT	16
+-#define __PA_LDCW_ALIGN_ORDER	4
+ #define __ldcw_align(a) ({					\
+ 	unsigned long __ret = (unsigned long) &(a)->lock[0];	\
+ 	__ret = (__ret + __PA_LDCW_ALIGNMENT - 1)		\
+ 		& ~(__PA_LDCW_ALIGNMENT - 1);			\
+ 	(volatile unsigned int *) __ret;			\
+ })
+-#define __LDCW	"ldcw"
+ 
+-#else /*CONFIG_PA20*/
+-/* From: "Jim Hull" <jim.hull of hp.com>
+-   I've attached a summary of the change, but basically, for PA 2.0, as
+-   long as the ",CO" (coherent operation) completer is specified, then the
+-   16-byte alignment requirement for ldcw and ldcd is relaxed, and instead
+-   they only require "natural" alignment (4-byte for ldcw, 8-byte for
+-   ldcd). */
+-
+-#define __PA_LDCW_ALIGNMENT	4
+-#define __PA_LDCW_ALIGN_ORDER	2
+-#define __ldcw_align(a) (&(a)->slock)
++#ifdef CONFIG_PA20
+ #define __LDCW	"ldcw,co"
+-
+-#endif /*!CONFIG_PA20*/
++#else
++#define __LDCW	"ldcw"
++#endif
+ 
+ /* LDCW, the only atomic read-write operation PA-RISC has. *sigh*.
+    We don't explicitly expose that "*a" may be written as reload
+diff --git a/arch/parisc/include/asm/spinlock_types.h b/arch/parisc/include/asm/spinlock_types.h
+index efd06a897c6a3..7b986b09dba84 100644
+--- a/arch/parisc/include/asm/spinlock_types.h
++++ b/arch/parisc/include/asm/spinlock_types.h
+@@ -9,15 +9,10 @@
+ #ifndef __ASSEMBLY__
+ 
+ typedef struct {
+-#ifdef CONFIG_PA20
+-	volatile unsigned int slock;
+-# define __ARCH_SPIN_LOCK_UNLOCKED { __ARCH_SPIN_LOCK_UNLOCKED_VAL }
+-#else
+ 	volatile unsigned int lock[4];
+ # define __ARCH_SPIN_LOCK_UNLOCKED	\
+ 	{ { __ARCH_SPIN_LOCK_UNLOCKED_VAL, __ARCH_SPIN_LOCK_UNLOCKED_VAL, \
+ 	    __ARCH_SPIN_LOCK_UNLOCKED_VAL, __ARCH_SPIN_LOCK_UNLOCKED_VAL } }
+-#endif
+ } arch_spinlock_t;
+ 
+ 
+diff --git a/arch/parisc/kernel/smp.c b/arch/parisc/kernel/smp.c
+index 4098f9a0964b9..2019c1f04bd03 100644
+--- a/arch/parisc/kernel/smp.c
++++ b/arch/parisc/kernel/smp.c
+@@ -440,7 +440,9 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle)
+ 	if (cpu_online(cpu))
+ 		return 0;
+ 
+-	if (num_online_cpus() < setup_max_cpus && smp_boot_one_cpu(cpu, tidle))
++	if (num_online_cpus() < nr_cpu_ids &&
++		num_online_cpus() < setup_max_cpus &&
++		smp_boot_one_cpu(cpu, tidle))
+ 		return -EIO;
+ 
+ 	return cpu_online(cpu) ? 0 : -EIO;
+diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
+index de2fb12120d2e..2861e3360affc 100644
+--- a/arch/s390/net/bpf_jit_comp.c
++++ b/arch/s390/net/bpf_jit_comp.c
+@@ -2513,7 +2513,7 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image,
+ 			return -E2BIG;
+ 	}
+ 
+-	return ret;
++	return tjit.common.prg;
+ }
+ 
+ bool bpf_jit_supports_subprog_tailcalls(void)
+diff --git a/arch/x86/events/amd/core.c b/arch/x86/events/amd/core.c
+index abadd5f234254..e24976593a298 100644
+--- a/arch/x86/events/amd/core.c
++++ b/arch/x86/events/amd/core.c
+@@ -534,8 +534,12 @@ static void amd_pmu_cpu_reset(int cpu)
+ 	/* Clear enable bits i.e. PerfCntrGlobalCtl.PerfCntrEn */
+ 	wrmsrl(MSR_AMD64_PERF_CNTR_GLOBAL_CTL, 0);
+ 
+-	/* Clear overflow bits i.e. PerfCntrGLobalStatus.PerfCntrOvfl */
+-	wrmsrl(MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR, amd_pmu_global_cntr_mask);
++	/*
++	 * Clear freeze and overflow bits i.e. PerfCntrGLobalStatus.LbrFreeze
++	 * and PerfCntrGLobalStatus.PerfCntrOvfl
++	 */
++	wrmsrl(MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR,
++	       GLOBAL_STATUS_LBRS_FROZEN | amd_pmu_global_cntr_mask);
+ }
+ 
+ static int amd_pmu_cpu_prepare(int cpu)
+@@ -570,6 +574,7 @@ static void amd_pmu_cpu_starting(int cpu)
+ 	int i, nb_id;
+ 
+ 	cpuc->perf_ctr_virt_mask = AMD64_EVENTSEL_HOSTONLY;
++	amd_pmu_cpu_reset(cpu);
+ 
+ 	if (!x86_pmu.amd_nb_constraints)
+ 		return;
+@@ -591,8 +596,6 @@ static void amd_pmu_cpu_starting(int cpu)
+ 
+ 	cpuc->amd_nb->nb_id = nb_id;
+ 	cpuc->amd_nb->refcnt++;
+-
+-	amd_pmu_cpu_reset(cpu);
+ }
+ 
+ static void amd_pmu_cpu_dead(int cpu)
+@@ -601,6 +604,7 @@ static void amd_pmu_cpu_dead(int cpu)
+ 
+ 	kfree(cpuhw->lbr_sel);
+ 	cpuhw->lbr_sel = NULL;
++	amd_pmu_cpu_reset(cpu);
+ 
+ 	if (!x86_pmu.amd_nb_constraints)
+ 		return;
+@@ -613,8 +617,6 @@ static void amd_pmu_cpu_dead(int cpu)
+ 
+ 		cpuhw->amd_nb = NULL;
+ 	}
+-
+-	amd_pmu_cpu_reset(cpu);
+ }
+ 
+ static inline void amd_pmu_set_global_ctl(u64 ctl)
+@@ -884,7 +886,7 @@ static int amd_pmu_v2_handle_irq(struct pt_regs *regs)
+ 	struct hw_perf_event *hwc;
+ 	struct perf_event *event;
+ 	int handled = 0, idx;
+-	u64 status, mask;
++	u64 reserved, status, mask;
+ 	bool pmu_enabled;
+ 
+ 	/*
+@@ -909,6 +911,14 @@ static int amd_pmu_v2_handle_irq(struct pt_regs *regs)
+ 		status &= ~GLOBAL_STATUS_LBRS_FROZEN;
+ 	}
+ 
++	reserved = status & ~amd_pmu_global_cntr_mask;
++	if (reserved)
++		pr_warn_once("Reserved PerfCntrGlobalStatus bits are set (0x%llx), please consider updating microcode\n",
++			     reserved);
++
++	/* Clear any reserved bits set by buggy microcode */
++	status &= amd_pmu_global_cntr_mask;
++
+ 	for (idx = 0; idx < x86_pmu.num_counters; idx++) {
+ 		if (!test_bit(idx, cpuc->active_mask))
+ 			continue;
+diff --git a/arch/x86/kernel/sev-shared.c b/arch/x86/kernel/sev-shared.c
+index 2eabccde94fb3..dcf325b7b0229 100644
+--- a/arch/x86/kernel/sev-shared.c
++++ b/arch/x86/kernel/sev-shared.c
+@@ -256,7 +256,7 @@ static int __sev_cpuid_hv(u32 fn, int reg_idx, u32 *reg)
+ 	return 0;
+ }
+ 
+-static int sev_cpuid_hv(struct cpuid_leaf *leaf)
++static int __sev_cpuid_hv_msr(struct cpuid_leaf *leaf)
+ {
+ 	int ret;
+ 
+@@ -279,6 +279,45 @@ static int sev_cpuid_hv(struct cpuid_leaf *leaf)
+ 	return ret;
+ }
+ 
++static int __sev_cpuid_hv_ghcb(struct ghcb *ghcb, struct es_em_ctxt *ctxt, struct cpuid_leaf *leaf)
++{
++	u32 cr4 = native_read_cr4();
++	int ret;
++
++	ghcb_set_rax(ghcb, leaf->fn);
++	ghcb_set_rcx(ghcb, leaf->subfn);
++
++	if (cr4 & X86_CR4_OSXSAVE)
++		/* Safe to read xcr0 */
++		ghcb_set_xcr0(ghcb, xgetbv(XCR_XFEATURE_ENABLED_MASK));
++	else
++		/* xgetbv will cause #UD - use reset value for xcr0 */
++		ghcb_set_xcr0(ghcb, 1);
++
++	ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_CPUID, 0, 0);
++	if (ret != ES_OK)
++		return ret;
++
++	if (!(ghcb_rax_is_valid(ghcb) &&
++	      ghcb_rbx_is_valid(ghcb) &&
++	      ghcb_rcx_is_valid(ghcb) &&
++	      ghcb_rdx_is_valid(ghcb)))
++		return ES_VMM_ERROR;
++
++	leaf->eax = ghcb->save.rax;
++	leaf->ebx = ghcb->save.rbx;
++	leaf->ecx = ghcb->save.rcx;
++	leaf->edx = ghcb->save.rdx;
++
++	return ES_OK;
++}
++
++static int sev_cpuid_hv(struct ghcb *ghcb, struct es_em_ctxt *ctxt, struct cpuid_leaf *leaf)
++{
++	return ghcb ? __sev_cpuid_hv_ghcb(ghcb, ctxt, leaf)
++		    : __sev_cpuid_hv_msr(leaf);
++}
++
+ /*
+  * This may be called early while still running on the initial identity
+  * mapping. Use RIP-relative addressing to obtain the correct address
+@@ -388,19 +427,20 @@ snp_cpuid_get_validated_func(struct cpuid_leaf *leaf)
+ 	return false;
+ }
+ 
+-static void snp_cpuid_hv(struct cpuid_leaf *leaf)
++static void snp_cpuid_hv(struct ghcb *ghcb, struct es_em_ctxt *ctxt, struct cpuid_leaf *leaf)
+ {
+-	if (sev_cpuid_hv(leaf))
++	if (sev_cpuid_hv(ghcb, ctxt, leaf))
+ 		sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_CPUID_HV);
+ }
+ 
+-static int snp_cpuid_postprocess(struct cpuid_leaf *leaf)
++static int snp_cpuid_postprocess(struct ghcb *ghcb, struct es_em_ctxt *ctxt,
++				 struct cpuid_leaf *leaf)
+ {
+ 	struct cpuid_leaf leaf_hv = *leaf;
+ 
+ 	switch (leaf->fn) {
+ 	case 0x1:
+-		snp_cpuid_hv(&leaf_hv);
++		snp_cpuid_hv(ghcb, ctxt, &leaf_hv);
+ 
+ 		/* initial APIC ID */
+ 		leaf->ebx = (leaf_hv.ebx & GENMASK(31, 24)) | (leaf->ebx & GENMASK(23, 0));
+@@ -419,7 +459,7 @@ static int snp_cpuid_postprocess(struct cpuid_leaf *leaf)
+ 		break;
+ 	case 0xB:
+ 		leaf_hv.subfn = 0;
+-		snp_cpuid_hv(&leaf_hv);
++		snp_cpuid_hv(ghcb, ctxt, &leaf_hv);
+ 
+ 		/* extended APIC ID */
+ 		leaf->edx = leaf_hv.edx;
+@@ -467,7 +507,7 @@ static int snp_cpuid_postprocess(struct cpuid_leaf *leaf)
+ 		}
+ 		break;
+ 	case 0x8000001E:
+-		snp_cpuid_hv(&leaf_hv);
++		snp_cpuid_hv(ghcb, ctxt, &leaf_hv);
+ 
+ 		/* extended APIC ID */
+ 		leaf->eax = leaf_hv.eax;
+@@ -488,7 +528,7 @@ static int snp_cpuid_postprocess(struct cpuid_leaf *leaf)
+  * Returns -EOPNOTSUPP if feature not enabled. Any other non-zero return value
+  * should be treated as fatal by caller.
+  */
+-static int snp_cpuid(struct cpuid_leaf *leaf)
++static int snp_cpuid(struct ghcb *ghcb, struct es_em_ctxt *ctxt, struct cpuid_leaf *leaf)
+ {
+ 	const struct snp_cpuid_table *cpuid_table = snp_cpuid_get_table();
+ 
+@@ -522,7 +562,7 @@ static int snp_cpuid(struct cpuid_leaf *leaf)
+ 			return 0;
+ 	}
+ 
+-	return snp_cpuid_postprocess(leaf);
++	return snp_cpuid_postprocess(ghcb, ctxt, leaf);
+ }
+ 
+ /*
+@@ -544,14 +584,14 @@ void __init do_vc_no_ghcb(struct pt_regs *regs, unsigned long exit_code)
+ 	leaf.fn = fn;
+ 	leaf.subfn = subfn;
+ 
+-	ret = snp_cpuid(&leaf);
++	ret = snp_cpuid(NULL, NULL, &leaf);
+ 	if (!ret)
+ 		goto cpuid_done;
+ 
+ 	if (ret != -EOPNOTSUPP)
+ 		goto fail;
+ 
+-	if (sev_cpuid_hv(&leaf))
++	if (__sev_cpuid_hv_msr(&leaf))
+ 		goto fail;
+ 
+ cpuid_done:
+@@ -848,14 +888,15 @@ static enum es_result vc_handle_ioio(struct ghcb *ghcb, struct es_em_ctxt *ctxt)
+ 	return ret;
+ }
+ 
+-static int vc_handle_cpuid_snp(struct pt_regs *regs)
++static int vc_handle_cpuid_snp(struct ghcb *ghcb, struct es_em_ctxt *ctxt)
+ {
++	struct pt_regs *regs = ctxt->regs;
+ 	struct cpuid_leaf leaf;
+ 	int ret;
+ 
+ 	leaf.fn = regs->ax;
+ 	leaf.subfn = regs->cx;
+-	ret = snp_cpuid(&leaf);
++	ret = snp_cpuid(ghcb, ctxt, &leaf);
+ 	if (!ret) {
+ 		regs->ax = leaf.eax;
+ 		regs->bx = leaf.ebx;
+@@ -874,7 +915,7 @@ static enum es_result vc_handle_cpuid(struct ghcb *ghcb,
+ 	enum es_result ret;
+ 	int snp_cpuid_ret;
+ 
+-	snp_cpuid_ret = vc_handle_cpuid_snp(regs);
++	snp_cpuid_ret = vc_handle_cpuid_snp(ghcb, ctxt);
+ 	if (!snp_cpuid_ret)
+ 		return ES_OK;
+ 	if (snp_cpuid_ret != -EOPNOTSUPP)
+diff --git a/arch/x86/kernel/sev.c b/arch/x86/kernel/sev.c
+index 1ee7bed453ded..2b0342a03c1ba 100644
+--- a/arch/x86/kernel/sev.c
++++ b/arch/x86/kernel/sev.c
+@@ -868,8 +868,7 @@ void snp_set_memory_private(unsigned long vaddr, unsigned long npages)
+ 
+ void snp_accept_memory(phys_addr_t start, phys_addr_t end)
+ {
+-	unsigned long vaddr;
+-	unsigned int npages;
++	unsigned long vaddr, npages;
+ 
+ 	if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
+ 		return;
+diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
+index 76bf185a73c65..6ae9cff6b50c5 100644
+--- a/drivers/ata/libata-core.c
++++ b/drivers/ata/libata-core.c
+@@ -5245,11 +5245,27 @@ static const unsigned int ata_port_suspend_ehi = ATA_EHI_QUIET
+ 
+ static void ata_port_suspend(struct ata_port *ap, pm_message_t mesg)
+ {
++	/*
++	 * We are about to suspend the port, so we do not care about
++	 * scsi_rescan_device() calls scheduled by previous resume operations.
++	 * The next resume will schedule the rescan again. So cancel any rescan
++	 * that is not done yet.
++	 */
++	cancel_delayed_work_sync(&ap->scsi_rescan_task);
++
+ 	ata_port_request_pm(ap, mesg, 0, ata_port_suspend_ehi, false);
+ }
+ 
+ static void ata_port_suspend_async(struct ata_port *ap, pm_message_t mesg)
+ {
++	/*
++	 * We are about to suspend the port, so we do not care about
++	 * scsi_rescan_device() calls scheduled by previous resume operations.
++	 * The next resume will schedule the rescan again. So cancel any rescan
++	 * that is not done yet.
++	 */
++	cancel_delayed_work_sync(&ap->scsi_rescan_task);
++
+ 	ata_port_request_pm(ap, mesg, 0, ata_port_suspend_ehi, true);
+ }
+ 
+diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
+index 702812285d8f0..ed3146c460910 100644
+--- a/drivers/ata/libata-scsi.c
++++ b/drivers/ata/libata-scsi.c
+@@ -4900,7 +4900,7 @@ void ata_scsi_dev_rescan(struct work_struct *work)
+ 	struct ata_link *link;
+ 	struct ata_device *dev;
+ 	unsigned long flags;
+-	bool delay_rescan = false;
++	int ret = 0;
+ 
+ 	mutex_lock(&ap->scsi_scan_mutex);
+ 	spin_lock_irqsave(ap->lock, flags);
+@@ -4909,37 +4909,34 @@ void ata_scsi_dev_rescan(struct work_struct *work)
+ 		ata_for_each_dev(dev, link, ENABLED) {
+ 			struct scsi_device *sdev = dev->sdev;
+ 
++			/*
++			 * If the port was suspended before this was scheduled,
++			 * bail out.
++			 */
++			if (ap->pflags & ATA_PFLAG_SUSPENDED)
++				goto unlock;
++
+ 			if (!sdev)
+ 				continue;
+ 			if (scsi_device_get(sdev))
+ 				continue;
+ 
+-			/*
+-			 * If the rescan work was scheduled because of a resume
+-			 * event, the port is already fully resumed, but the
+-			 * SCSI device may not yet be fully resumed. In such
+-			 * case, executing scsi_rescan_device() may cause a
+-			 * deadlock with the PM code on device_lock(). Prevent
+-			 * this by giving up and retrying rescan after a short
+-			 * delay.
+-			 */
+-			delay_rescan = sdev->sdev_gendev.power.is_suspended;
+-			if (delay_rescan) {
+-				scsi_device_put(sdev);
+-				break;
+-			}
+-
+ 			spin_unlock_irqrestore(ap->lock, flags);
+-			scsi_rescan_device(&(sdev->sdev_gendev));
++			ret = scsi_rescan_device(sdev);
+ 			scsi_device_put(sdev);
+ 			spin_lock_irqsave(ap->lock, flags);
++
++			if (ret)
++				goto unlock;
+ 		}
+ 	}
+ 
++unlock:
+ 	spin_unlock_irqrestore(ap->lock, flags);
+ 	mutex_unlock(&ap->scsi_scan_mutex);
+ 
+-	if (delay_rescan)
++	/* Reschedule with a delay if scsi_rescan_device() returned an error */
++	if (ret)
+ 		schedule_delayed_work(&ap->scsi_rescan_task,
+ 				      msecs_to_jiffies(5));
+ }
+diff --git a/drivers/base/regmap/regcache-rbtree.c b/drivers/base/regmap/regcache-rbtree.c
+index 06788965aa293..31d7bc682910c 100644
+--- a/drivers/base/regmap/regcache-rbtree.c
++++ b/drivers/base/regmap/regcache-rbtree.c
+@@ -453,7 +453,8 @@ static int regcache_rbtree_write(struct regmap *map, unsigned int reg,
+ 		if (!rbnode)
+ 			return -ENOMEM;
+ 		regcache_rbtree_set_register(map, rbnode,
+-					     reg - rbnode->base_reg, value);
++					     (reg - rbnode->base_reg) / map->reg_stride,
++					     value);
+ 		regcache_rbtree_insert(map, &rbtree_ctx->root, rbnode);
+ 		rbtree_ctx->cached_rbnode = rbnode;
+ 	}
+diff --git a/drivers/gpio/gpio-aspeed.c b/drivers/gpio/gpio-aspeed.c
+index da33bbbdacb95..58f107194fdaf 100644
+--- a/drivers/gpio/gpio-aspeed.c
++++ b/drivers/gpio/gpio-aspeed.c
+@@ -973,7 +973,7 @@ static int aspeed_gpio_set_config(struct gpio_chip *chip, unsigned int offset,
+ 	else if (param == PIN_CONFIG_BIAS_DISABLE ||
+ 			param == PIN_CONFIG_BIAS_PULL_DOWN ||
+ 			param == PIN_CONFIG_DRIVE_STRENGTH)
+-		return pinctrl_gpio_set_config(offset, config);
++		return pinctrl_gpio_set_config(chip->base + offset, config);
+ 	else if (param == PIN_CONFIG_DRIVE_OPEN_DRAIN ||
+ 			param == PIN_CONFIG_DRIVE_OPEN_SOURCE)
+ 		/* Return -ENOTSUPP to trigger emulation, as per datasheet */
+diff --git a/drivers/gpio/gpio-pxa.c b/drivers/gpio/gpio-pxa.c
+index a1630ed4b7416..d92650aecb06a 100644
+--- a/drivers/gpio/gpio-pxa.c
++++ b/drivers/gpio/gpio-pxa.c
+@@ -238,6 +238,7 @@ static bool pxa_gpio_has_pinctrl(void)
+ 	switch (gpio_type) {
+ 	case PXA3XX_GPIO:
+ 	case MMP2_GPIO:
++	case MMP_GPIO:
+ 		return false;
+ 
+ 	default:
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index 2168dc92c6704..8940ee73f2dfe 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -2308,7 +2308,7 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
+ 		adev->flags |= AMD_IS_PX;
+ 
+ 	if (!(adev->flags & AMD_IS_APU)) {
+-		parent = pci_upstream_bridge(adev->pdev);
++		parent = pcie_find_root_port(adev->pdev);
+ 		adev->has_pr3 = parent ? pci_pr3_present(parent) : false;
+ 	}
+ 
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+index f0800c0c5168c..9119b0df2419f 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+@@ -2081,36 +2081,41 @@ static int sienna_cichlid_display_disable_memory_clock_switch(struct smu_context
+ 	return ret;
+ }
+ 
++#define MAX(a, b)	((a) > (b) ? (a) : (b))
++
+ static int sienna_cichlid_update_pcie_parameters(struct smu_context *smu,
+ 					 uint32_t pcie_gen_cap,
+ 					 uint32_t pcie_width_cap)
+ {
+ 	struct smu_11_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
+ 	struct smu_11_0_pcie_table *pcie_table = &dpm_context->dpm_tables.pcie_table;
+-	u32 smu_pcie_arg;
++	uint8_t *table_member1, *table_member2;
++	uint32_t min_gen_speed, max_gen_speed;
++	uint32_t min_lane_width, max_lane_width;
++	uint32_t smu_pcie_arg;
+ 	int ret, i;
+ 
+-	/* PCIE gen speed and lane width override */
+-	if (!amdgpu_device_pcie_dynamic_switching_supported()) {
+-		if (pcie_table->pcie_gen[NUM_LINK_LEVELS - 1] < pcie_gen_cap)
+-			pcie_gen_cap = pcie_table->pcie_gen[NUM_LINK_LEVELS - 1];
++	GET_PPTABLE_MEMBER(PcieGenSpeed, &table_member1);
++	GET_PPTABLE_MEMBER(PcieLaneCount, &table_member2);
+ 
+-		if (pcie_table->pcie_lane[NUM_LINK_LEVELS - 1] < pcie_width_cap)
+-			pcie_width_cap = pcie_table->pcie_lane[NUM_LINK_LEVELS - 1];
++	min_gen_speed = MAX(0, table_member1[0]);
++	max_gen_speed = MIN(pcie_gen_cap, table_member1[1]);
++	min_gen_speed = min_gen_speed > max_gen_speed ?
++			max_gen_speed : min_gen_speed;
++	min_lane_width = MAX(1, table_member2[0]);
++	max_lane_width = MIN(pcie_width_cap, table_member2[1]);
++	min_lane_width = min_lane_width > max_lane_width ?
++			 max_lane_width : min_lane_width;
+ 
+-		/* Force all levels to use the same settings */
+-		for (i = 0; i < NUM_LINK_LEVELS; i++) {
+-			pcie_table->pcie_gen[i] = pcie_gen_cap;
+-			pcie_table->pcie_lane[i] = pcie_width_cap;
+-		}
++	if (!amdgpu_device_pcie_dynamic_switching_supported()) {
++		pcie_table->pcie_gen[0] = max_gen_speed;
++		pcie_table->pcie_lane[0] = max_lane_width;
+ 	} else {
+-		for (i = 0; i < NUM_LINK_LEVELS; i++) {
+-			if (pcie_table->pcie_gen[i] > pcie_gen_cap)
+-				pcie_table->pcie_gen[i] = pcie_gen_cap;
+-			if (pcie_table->pcie_lane[i] > pcie_width_cap)
+-				pcie_table->pcie_lane[i] = pcie_width_cap;
+-		}
++		pcie_table->pcie_gen[0] = min_gen_speed;
++		pcie_table->pcie_lane[0] = min_lane_width;
+ 	}
++	pcie_table->pcie_gen[1] = max_gen_speed;
++	pcie_table->pcie_lane[1] = max_lane_width;
+ 
+ 	for (i = 0; i < NUM_LINK_LEVELS; i++) {
+ 		smu_pcie_arg = (i << 16 |
+diff --git a/drivers/gpu/drm/i915/gt/gen8_engine_cs.c b/drivers/gpu/drm/i915/gt/gen8_engine_cs.c
+index 2702ad4c26c88..60e0a317ed445 100644
+--- a/drivers/gpu/drm/i915/gt/gen8_engine_cs.c
++++ b/drivers/gpu/drm/i915/gt/gen8_engine_cs.c
+@@ -271,8 +271,17 @@ int gen12_emit_flush_rcs(struct i915_request *rq, u32 mode)
+ 		if (GRAPHICS_VER_FULL(rq->i915) >= IP_VER(12, 70))
+ 			bit_group_0 |= PIPE_CONTROL_CCS_FLUSH;
+ 
++		/*
++		 * L3 fabric flush is needed for AUX CCS invalidation
++		 * which happens as part of pipe-control so we can
++		 * ignore PIPE_CONTROL_FLUSH_L3. Also PIPE_CONTROL_FLUSH_L3
++		 * deals with Protected Memory which is not needed for
++		 * AUX CCS invalidation and lead to unwanted side effects.
++		 */
++		if (mode & EMIT_FLUSH)
++			bit_group_1 |= PIPE_CONTROL_FLUSH_L3;
++
+ 		bit_group_1 |= PIPE_CONTROL_TILE_CACHE_FLUSH;
+-		bit_group_1 |= PIPE_CONTROL_FLUSH_L3;
+ 		bit_group_1 |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
+ 		bit_group_1 |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
+ 		/* Wa_1409600907:tgl,adl-p */
+diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
+index e11c1c8036769..dc456c86e9569 100644
+--- a/drivers/hid/Kconfig
++++ b/drivers/hid/Kconfig
+@@ -792,6 +792,7 @@ config HID_NVIDIA_SHIELD
+ 	tristate "NVIDIA SHIELD devices"
+ 	depends on USB_HID
+ 	depends on BT_HIDP
++	depends on LEDS_CLASS
+ 	help
+ 	  Support for NVIDIA SHIELD accessories.
+ 
+diff --git a/drivers/hid/hid-nvidia-shield.c b/drivers/hid/hid-nvidia-shield.c
+index 9c44974135079..1ce9e42f57c71 100644
+--- a/drivers/hid/hid-nvidia-shield.c
++++ b/drivers/hid/hid-nvidia-shield.c
+@@ -482,7 +482,7 @@ static inline int thunderstrike_led_create(struct thunderstrike *ts)
+ 
+ 	led->name = "thunderstrike:blue:led";
+ 	led->max_brightness = 1;
+-	led->flags = LED_CORE_SUSPENDRESUME;
++	led->flags = LED_CORE_SUSPENDRESUME | LED_RETAIN_AT_SHUTDOWN;
+ 	led->brightness_get = &thunderstrike_led_get_brightness;
+ 	led->brightness_set = &thunderstrike_led_set_brightness;
+ 
+@@ -694,6 +694,7 @@ err_stop:
+ err_haptics:
+ 	if (ts->haptics_dev)
+ 		input_unregister_device(ts->haptics_dev);
++	led_classdev_unregister(&ts->led_dev);
+ 	return ret;
+ }
+ 
+diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c
+index dd942061fd775..ebc0aa4e4345f 100644
+--- a/drivers/hid/hid-sony.c
++++ b/drivers/hid/hid-sony.c
+@@ -2155,6 +2155,8 @@ static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id)
+ 	return ret;
+ 
+ err:
++	usb_free_urb(sc->ghl_urb);
++
+ 	hid_hw_stop(hdev);
+ 	return ret;
+ }
+diff --git a/drivers/hid/intel-ish-hid/ipc/pci-ish.c b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
+index 55cb25038e632..710fda5f19e1c 100644
+--- a/drivers/hid/intel-ish-hid/ipc/pci-ish.c
++++ b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
+@@ -133,6 +133,14 @@ static int enable_gpe(struct device *dev)
+ 	}
+ 	wakeup = &adev->wakeup;
+ 
++	/*
++	 * Call acpi_disable_gpe(), so that reference count
++	 * gpe_event_info->runtime_count doesn't overflow.
++	 * When gpe_event_info->runtime_count = 0, the call
++	 * to acpi_disable_gpe() simply return.
++	 */
++	acpi_disable_gpe(wakeup->gpe_device, wakeup->gpe_number);
++
+ 	acpi_sts = acpi_enable_gpe(wakeup->gpe_device, wakeup->gpe_number);
+ 	if (ACPI_FAILURE(acpi_sts)) {
+ 		dev_err(dev, "enable ose_gpe failed\n");
+diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
+index 9891c7dc2af56..1b301e8253bb7 100644
+--- a/drivers/infiniband/core/cma.c
++++ b/drivers/infiniband/core/cma.c
+@@ -4946,7 +4946,7 @@ static int cma_iboe_join_multicast(struct rdma_id_private *id_priv,
+ 	int err = 0;
+ 	struct sockaddr *addr = (struct sockaddr *)&mc->addr;
+ 	struct net_device *ndev = NULL;
+-	struct ib_sa_multicast ib;
++	struct ib_sa_multicast ib = {};
+ 	enum ib_gid_type gid_type;
+ 	bool send_only;
+ 
+diff --git a/drivers/infiniband/core/cma_configfs.c b/drivers/infiniband/core/cma_configfs.c
+index 7b68b3ea979f7..f2fb2d8a65970 100644
+--- a/drivers/infiniband/core/cma_configfs.c
++++ b/drivers/infiniband/core/cma_configfs.c
+@@ -217,7 +217,7 @@ static int make_cma_ports(struct cma_dev_group *cma_dev_group,
+ 		return -ENOMEM;
+ 
+ 	for (i = 0; i < ports_num; i++) {
+-		char port_str[10];
++		char port_str[11];
+ 
+ 		ports[i].port_num = i + 1;
+ 		snprintf(port_str, sizeof(port_str), "%u", i + 1);
+diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c
+index d5d3e4f0de779..6d1dbc9787590 100644
+--- a/drivers/infiniband/core/nldev.c
++++ b/drivers/infiniband/core/nldev.c
+@@ -2529,6 +2529,7 @@ static const struct rdma_nl_cbs nldev_cb_table[RDMA_NLDEV_NUM_OPS] = {
+ 	},
+ 	[RDMA_NLDEV_CMD_SYS_SET] = {
+ 		.doit = nldev_set_sys_set_doit,
++		.flags = RDMA_NL_ADMIN_PERM,
+ 	},
+ 	[RDMA_NLDEV_CMD_STAT_SET] = {
+ 		.doit = nldev_stat_set_doit,
+diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
+index 7c9c79c139411..508d6712e14dd 100644
+--- a/drivers/infiniband/core/uverbs_main.c
++++ b/drivers/infiniband/core/uverbs_main.c
+@@ -535,7 +535,7 @@ static ssize_t verify_hdr(struct ib_uverbs_cmd_hdr *hdr,
+ 	if (hdr->in_words * 4 != count)
+ 		return -EINVAL;
+ 
+-	if (count < method_elm->req_size + sizeof(hdr)) {
++	if (count < method_elm->req_size + sizeof(*hdr)) {
+ 		/*
+ 		 * rdma-core v18 and v19 have a bug where they send DESTROY_CQ
+ 		 * with a 16 byte write instead of 24. Old kernels didn't
+diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
+index bc3aea4592b9d..27da7f9f4ebdc 100644
+--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
++++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
+@@ -664,7 +664,6 @@ static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw,
+ 		blocked = cookie & RCFW_CMD_IS_BLOCKING;
+ 		cookie &= RCFW_MAX_COOKIE_VALUE;
+ 		crsqe = &rcfw->crsqe_tbl[cookie];
+-		crsqe->is_in_used = false;
+ 
+ 		if (WARN_ONCE(test_bit(FIRMWARE_STALL_DETECTED,
+ 				       &rcfw->cmdq.flags),
+@@ -680,8 +679,14 @@ static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw,
+ 			atomic_dec(&rcfw->timeout_send);
+ 
+ 		if (crsqe->is_waiter_alive) {
+-			if (crsqe->resp)
++			if (crsqe->resp) {
+ 				memcpy(crsqe->resp, qp_event, sizeof(*qp_event));
++				/* Insert write memory barrier to ensure that
++				 * response data is copied before clearing the
++				 * flags
++				 */
++				smp_wmb();
++			}
+ 			if (!blocked)
+ 				wait_cmds++;
+ 		}
+@@ -693,6 +698,8 @@ static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw,
+ 		if (!is_waiter_alive)
+ 			crsqe->resp = NULL;
+ 
++		crsqe->is_in_used = false;
++
+ 		hwq->cons += req_size;
+ 
+ 		/* This is a case to handle below scenario -
+diff --git a/drivers/infiniband/hw/mlx4/sysfs.c b/drivers/infiniband/hw/mlx4/sysfs.c
+index 24ee79aa2122e..88f534cf690e9 100644
+--- a/drivers/infiniband/hw/mlx4/sysfs.c
++++ b/drivers/infiniband/hw/mlx4/sysfs.c
+@@ -223,7 +223,7 @@ void del_sysfs_port_mcg_attr(struct mlx4_ib_dev *device, int port_num,
+ static int add_port_entries(struct mlx4_ib_dev *device, int port_num)
+ {
+ 	int i;
+-	char buff[11];
++	char buff[12];
+ 	struct mlx4_ib_iov_port *port = NULL;
+ 	int ret = 0 ;
+ 	struct ib_port_attr attr;
+diff --git a/drivers/infiniband/hw/mlx5/fs.c b/drivers/infiniband/hw/mlx5/fs.c
+index 1e419e080b535..520034acf73aa 100644
+--- a/drivers/infiniband/hw/mlx5/fs.c
++++ b/drivers/infiniband/hw/mlx5/fs.c
+@@ -2470,8 +2470,8 @@ destroy_res:
+ 	mlx5_steering_anchor_destroy_res(ft_prio);
+ put_flow_table:
+ 	put_flow_table(dev, ft_prio, true);
+-	mutex_unlock(&dev->flow_db->lock);
+ free_obj:
++	mutex_unlock(&dev->flow_db->lock);
+ 	kfree(obj);
+ 
+ 	return err;
+diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
+index f0b394ed7452a..666e737371b76 100644
+--- a/drivers/infiniband/hw/mlx5/main.c
++++ b/drivers/infiniband/hw/mlx5/main.c
+@@ -2070,7 +2070,7 @@ static inline char *mmap_cmd2str(enum mlx5_ib_mmap_cmd cmd)
+ 	case MLX5_IB_MMAP_DEVICE_MEM:
+ 		return "Device Memory";
+ 	default:
+-		return NULL;
++		return "Unknown";
+ 	}
+ }
+ 
+diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
+index 2017ede100a62..2f81e69f7b300 100644
+--- a/drivers/infiniband/hw/mlx5/mr.c
++++ b/drivers/infiniband/hw/mlx5/mr.c
+@@ -301,7 +301,8 @@ static int get_mkc_octo_size(unsigned int access_mode, unsigned int ndescs)
+ 
+ static void set_cache_mkc(struct mlx5_cache_ent *ent, void *mkc)
+ {
+-	set_mkc_access_pd_addr_fields(mkc, 0, 0, ent->dev->umrc.pd);
++	set_mkc_access_pd_addr_fields(mkc, ent->rb_key.access_flags, 0,
++				      ent->dev->umrc.pd);
+ 	MLX5_SET(mkc, mkc, free, 1);
+ 	MLX5_SET(mkc, mkc, umr_en, 1);
+ 	MLX5_SET(mkc, mkc, access_mode_1_0, ent->rb_key.access_mode & 0x3);
+@@ -1024,19 +1025,26 @@ void mlx5_mkey_cache_cleanup(struct mlx5_ib_dev *dev)
+ 	if (!dev->cache.wq)
+ 		return;
+ 
+-	cancel_delayed_work_sync(&dev->cache.remove_ent_dwork);
+ 	mutex_lock(&dev->cache.rb_lock);
+ 	for (node = rb_first(root); node; node = rb_next(node)) {
+ 		ent = rb_entry(node, struct mlx5_cache_ent, node);
+ 		xa_lock_irq(&ent->mkeys);
+ 		ent->disabled = true;
+ 		xa_unlock_irq(&ent->mkeys);
+-		cancel_delayed_work_sync(&ent->dwork);
+ 	}
++	mutex_unlock(&dev->cache.rb_lock);
++
++	/*
++	 * After all entries are disabled and will not reschedule on WQ,
++	 * flush it and all async commands.
++	 */
++	flush_workqueue(dev->cache.wq);
+ 
+ 	mlx5_mkey_cache_debugfs_cleanup(dev);
+ 	mlx5_cmd_cleanup_async_ctx(&dev->async_ctx);
+ 
++	/* At this point all entries are disabled and have no concurrent work. */
++	mutex_lock(&dev->cache.rb_lock);
+ 	node = rb_first(root);
+ 	while (node) {
+ 		ent = rb_entry(node, struct mlx5_cache_ent, node);
+diff --git a/drivers/infiniband/sw/siw/siw_cm.c b/drivers/infiniband/sw/siw/siw_cm.c
+index a2605178f4eda..43e776073f49f 100644
+--- a/drivers/infiniband/sw/siw/siw_cm.c
++++ b/drivers/infiniband/sw/siw/siw_cm.c
+@@ -976,6 +976,7 @@ static void siw_accept_newconn(struct siw_cep *cep)
+ 			siw_cep_put(cep);
+ 			new_cep->listen_cep = NULL;
+ 			if (rv) {
++				siw_cancel_mpatimer(new_cep);
+ 				siw_cep_set_free(new_cep);
+ 				goto error;
+ 			}
+@@ -1100,9 +1101,12 @@ static void siw_cm_work_handler(struct work_struct *w)
+ 				/*
+ 				 * Socket close before MPA request received.
+ 				 */
+-				siw_dbg_cep(cep, "no mpareq: drop listener\n");
+-				siw_cep_put(cep->listen_cep);
+-				cep->listen_cep = NULL;
++				if (cep->listen_cep) {
++					siw_dbg_cep(cep,
++						"no mpareq: drop listener\n");
++					siw_cep_put(cep->listen_cep);
++					cep->listen_cep = NULL;
++				}
+ 			}
+ 		}
+ 		release_cep = 1;
+@@ -1227,7 +1231,11 @@ static void siw_cm_llp_data_ready(struct sock *sk)
+ 	if (!cep)
+ 		goto out;
+ 
+-	siw_dbg_cep(cep, "state: %d\n", cep->state);
++	siw_dbg_cep(cep, "cep state: %d, socket state %d\n",
++		    cep->state, sk->sk_state);
++
++	if (sk->sk_state != TCP_ESTABLISHED)
++		goto out;
+ 
+ 	switch (cep->state) {
+ 	case SIW_EPSTATE_RDMA_MODE:
+diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
+index 1574218764e0a..2916e77f589b8 100644
+--- a/drivers/infiniband/ulp/srp/ib_srp.c
++++ b/drivers/infiniband/ulp/srp/ib_srp.c
+@@ -2784,7 +2784,6 @@ static int srp_abort(struct scsi_cmnd *scmnd)
+ 	u32 tag;
+ 	u16 ch_idx;
+ 	struct srp_rdma_ch *ch;
+-	int ret;
+ 
+ 	shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n");
+ 
+@@ -2798,19 +2797,14 @@ static int srp_abort(struct scsi_cmnd *scmnd)
+ 	shost_printk(KERN_ERR, target->scsi_host,
+ 		     "Sending SRP abort for tag %#x\n", tag);
+ 	if (srp_send_tsk_mgmt(ch, tag, scmnd->device->lun,
+-			      SRP_TSK_ABORT_TASK, NULL) == 0)
+-		ret = SUCCESS;
+-	else if (target->rport->state == SRP_RPORT_LOST)
+-		ret = FAST_IO_FAIL;
+-	else
+-		ret = FAILED;
+-	if (ret == SUCCESS) {
++			      SRP_TSK_ABORT_TASK, NULL) == 0) {
+ 		srp_free_req(ch, req, scmnd, 0);
+-		scmnd->result = DID_ABORT << 16;
+-		scsi_done(scmnd);
++		return SUCCESS;
+ 	}
++	if (target->rport->state == SRP_RPORT_LOST)
++		return FAST_IO_FAIL;
+ 
+-	return ret;
++	return FAILED;
+ }
+ 
+ static int srp_reset_device(struct scsi_cmnd *scmnd)
+diff --git a/drivers/iommu/apple-dart.c b/drivers/iommu/apple-dart.c
+index 8af64b57f0483..f4d8804082545 100644
+--- a/drivers/iommu/apple-dart.c
++++ b/drivers/iommu/apple-dart.c
+@@ -671,8 +671,7 @@ static int apple_dart_attach_dev(struct iommu_domain *domain,
+ 		return ret;
+ 
+ 	switch (domain->type) {
+-	case IOMMU_DOMAIN_DMA:
+-	case IOMMU_DOMAIN_UNMANAGED:
++	default:
+ 		ret = apple_dart_domain_add_streams(dart_domain, cfg);
+ 		if (ret)
+ 			return ret;
+diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
+index 9b0dc35056019..6ccbae9b93a14 100644
+--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
++++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
+@@ -1895,18 +1895,23 @@ static void __arm_smmu_tlb_inv_range(struct arm_smmu_cmdq_ent *cmd,
+ 		/* Get the leaf page size */
+ 		tg = __ffs(smmu_domain->domain.pgsize_bitmap);
+ 
++		num_pages = size >> tg;
++
+ 		/* Convert page size of 12,14,16 (log2) to 1,2,3 */
+ 		cmd->tlbi.tg = (tg - 10) / 2;
+ 
+ 		/*
+-		 * Determine what level the granule is at. For non-leaf, io-pgtable
+-		 * assumes .tlb_flush_walk can invalidate multiple levels at once,
+-		 * so ignore the nominal last-level granule and leave TTL=0.
++		 * Determine what level the granule is at. For non-leaf, both
++		 * io-pgtable and SVA pass a nominal last-level granule because
++		 * they don't know what level(s) actually apply, so ignore that
++		 * and leave TTL=0. However for various errata reasons we still
++		 * want to use a range command, so avoid the SVA corner case
++		 * where both scale and num could be 0 as well.
+ 		 */
+ 		if (cmd->tlbi.leaf)
+ 			cmd->tlbi.ttl = 4 - ((ilog2(granule) - 3) / (tg - 3));
+-
+-		num_pages = size >> tg;
++		else if ((num_pages & CMDQ_TLBI_RANGE_NUM_MAX) == 1)
++			num_pages++;
+ 	}
+ 
+ 	cmds.num = 0;
+diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
+index 5c8c5cdc36cf5..4a9d9e82847d5 100644
+--- a/drivers/iommu/intel/iommu.c
++++ b/drivers/iommu/intel/iommu.c
+@@ -3004,13 +3004,6 @@ static int iommu_suspend(void)
+ 	struct intel_iommu *iommu = NULL;
+ 	unsigned long flag;
+ 
+-	for_each_active_iommu(iommu, drhd) {
+-		iommu->iommu_state = kcalloc(MAX_SR_DMAR_REGS, sizeof(u32),
+-					     GFP_KERNEL);
+-		if (!iommu->iommu_state)
+-			goto nomem;
+-	}
+-
+ 	iommu_flush_all();
+ 
+ 	for_each_active_iommu(iommu, drhd) {
+@@ -3030,12 +3023,6 @@ static int iommu_suspend(void)
+ 		raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
+ 	}
+ 	return 0;
+-
+-nomem:
+-	for_each_active_iommu(iommu, drhd)
+-		kfree(iommu->iommu_state);
+-
+-	return -ENOMEM;
+ }
+ 
+ static void iommu_resume(void)
+@@ -3067,9 +3054,6 @@ static void iommu_resume(void)
+ 
+ 		raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
+ 	}
+-
+-	for_each_active_iommu(iommu, drhd)
+-		kfree(iommu->iommu_state);
+ }
+ 
+ static struct syscore_ops iommu_syscore_ops = {
+diff --git a/drivers/iommu/intel/iommu.h b/drivers/iommu/intel/iommu.h
+index 1c5e1d88862ba..c74f15161d2a5 100644
+--- a/drivers/iommu/intel/iommu.h
++++ b/drivers/iommu/intel/iommu.h
+@@ -680,7 +680,7 @@ struct intel_iommu {
+ 	struct iopf_queue *iopf_queue;
+ 	unsigned char iopfq_name[16];
+ 	struct q_inval  *qi;            /* Queued invalidation info */
+-	u32 *iommu_state; /* Store iommu states between suspend and resume.*/
++	u32 iommu_state[MAX_SR_DMAR_REGS]; /* Store iommu states between suspend and resume.*/
+ 
+ #ifdef CONFIG_IRQ_REMAP
+ 	struct ir_table *ir_table;	/* Interrupt remapping info */
+diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c
+index c2764891a779c..ef27f9f1e17ef 100644
+--- a/drivers/iommu/mtk_iommu.c
++++ b/drivers/iommu/mtk_iommu.c
+@@ -258,7 +258,7 @@ struct mtk_iommu_data {
+ 	struct device			*smicomm_dev;
+ 
+ 	struct mtk_iommu_bank_data	*bank;
+-	struct mtk_iommu_domain		*share_dom; /* For 2 HWs share pgtable */
++	struct mtk_iommu_domain		*share_dom;
+ 
+ 	struct regmap			*pericfg;
+ 	struct mutex			mutex; /* Protect m4u_group/m4u_dom above */
+@@ -625,8 +625,8 @@ static int mtk_iommu_domain_finalise(struct mtk_iommu_domain *dom,
+ 	struct mtk_iommu_domain	*share_dom = data->share_dom;
+ 	const struct mtk_iommu_iova_region *region;
+ 
+-	/* Always use share domain in sharing pgtable case */
+-	if (MTK_IOMMU_HAS_FLAG(data->plat_data, SHARE_PGTABLE) && share_dom) {
++	/* Share pgtable when 2 MM IOMMU share the pgtable or one IOMMU use multiple iova ranges */
++	if (share_dom) {
+ 		dom->iop = share_dom->iop;
+ 		dom->cfg = share_dom->cfg;
+ 		dom->domain.pgsize_bitmap = share_dom->cfg.pgsize_bitmap;
+@@ -659,8 +659,7 @@ static int mtk_iommu_domain_finalise(struct mtk_iommu_domain *dom,
+ 	/* Update our support page sizes bitmap */
+ 	dom->domain.pgsize_bitmap = dom->cfg.pgsize_bitmap;
+ 
+-	if (MTK_IOMMU_HAS_FLAG(data->plat_data, SHARE_PGTABLE))
+-		data->share_dom = dom;
++	data->share_dom = dom;
+ 
+ update_iova_region:
+ 	/* Update the iova region for this domain */
+diff --git a/drivers/leds/led-core.c b/drivers/leds/led-core.c
+index 04f9ea675f2ce..214ed81eb0e92 100644
+--- a/drivers/leds/led-core.c
++++ b/drivers/leds/led-core.c
+@@ -479,10 +479,6 @@ int led_compose_name(struct device *dev, struct led_init_data *init_data,
+ 
+ 	led_parse_fwnode_props(dev, fwnode, &props);
+ 
+-	/* We want to label LEDs that can produce full range of colors
+-	 * as RGB, not multicolor */
+-	BUG_ON(props.color == LED_COLOR_ID_MULTI);
+-
+ 	if (props.label) {
+ 		/*
+ 		 * If init_data.devicename is NULL, then it indicates that
+diff --git a/drivers/md/dm-zoned-target.c b/drivers/md/dm-zoned-target.c
+index ad8e670a2f9be..b487f7acc860f 100644
+--- a/drivers/md/dm-zoned-target.c
++++ b/drivers/md/dm-zoned-target.c
+@@ -748,17 +748,16 @@ err:
+ /*
+  * Cleanup zoned device information.
+  */
+-static void dmz_put_zoned_device(struct dm_target *ti)
++static void dmz_put_zoned_devices(struct dm_target *ti)
+ {
+ 	struct dmz_target *dmz = ti->private;
+ 	int i;
+ 
+-	for (i = 0; i < dmz->nr_ddevs; i++) {
+-		if (dmz->ddev[i]) {
++	for (i = 0; i < dmz->nr_ddevs; i++)
++		if (dmz->ddev[i])
+ 			dm_put_device(ti, dmz->ddev[i]);
+-			dmz->ddev[i] = NULL;
+-		}
+-	}
++
++	kfree(dmz->ddev);
+ }
+ 
+ static int dmz_fixup_devices(struct dm_target *ti)
+@@ -948,7 +947,7 @@ err_bio:
+ err_meta:
+ 	dmz_dtr_metadata(dmz->metadata);
+ err_dev:
+-	dmz_put_zoned_device(ti);
++	dmz_put_zoned_devices(ti);
+ err:
+ 	kfree(dmz->dev);
+ 	kfree(dmz);
+@@ -978,7 +977,7 @@ static void dmz_dtr(struct dm_target *ti)
+ 
+ 	bioset_exit(&dmz->bio_set);
+ 
+-	dmz_put_zoned_device(ti);
++	dmz_put_zoned_devices(ti);
+ 
+ 	mutex_destroy(&dmz->chunk_lock);
+ 
+diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
+index 85b3004594e03..e6ac4d0d94d86 100644
+--- a/drivers/md/raid5.c
++++ b/drivers/md/raid5.c
+@@ -854,6 +854,13 @@ struct stripe_head *raid5_get_active_stripe(struct r5conf *conf,
+ 
+ 		set_bit(R5_INACTIVE_BLOCKED, &conf->cache_state);
+ 		r5l_wake_reclaim(conf->log, 0);
++
++		/* release batch_last before wait to avoid risk of deadlock */
++		if (ctx && ctx->batch_last) {
++			raid5_release_stripe(ctx->batch_last);
++			ctx->batch_last = NULL;
++		}
++
+ 		wait_event_lock_irq(conf->wait_for_stripe,
+ 				    is_inactive_blocked(conf, hash),
+ 				    *(conf->hash_locks + hash));
+diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
+index 8b91a55ec0d28..8ee51e49fced5 100644
+--- a/drivers/mtd/ubi/build.c
++++ b/drivers/mtd/ubi/build.c
+@@ -894,6 +894,13 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
+ 		return -EINVAL;
+ 	}
+ 
++	/* UBI cannot work on flashes with zero erasesize. */
++	if (!mtd->erasesize) {
++		pr_err("ubi: refuse attaching mtd%d - zero erasesize flash is not supported\n",
++			mtd->index);
++		return -EINVAL;
++	}
++
+ 	if (ubi_num == UBI_DEV_NUM_AUTO) {
+ 		/* Search for an empty slot in the @ubi_devices array */
+ 		for (ubi_num = 0; ubi_num < UBI_MAX_DEVICES; ubi_num++)
+diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
+index 7af2f08a62f14..0d4b236d1e344 100644
+--- a/drivers/net/dsa/mv88e6xxx/chip.c
++++ b/drivers/net/dsa/mv88e6xxx/chip.c
+@@ -3040,14 +3040,16 @@ static void mv88e6xxx_hardware_reset(struct mv88e6xxx_chip *chip)
+ 		 * from the wrong location resulting in the switch booting
+ 		 * to wrong mode and inoperable.
+ 		 */
+-		mv88e6xxx_g1_wait_eeprom_done(chip);
++		if (chip->info->ops->get_eeprom)
++			mv88e6xxx_g2_eeprom_wait(chip);
+ 
+ 		gpiod_set_value_cansleep(gpiod, 1);
+ 		usleep_range(10000, 20000);
+ 		gpiod_set_value_cansleep(gpiod, 0);
+ 		usleep_range(10000, 20000);
+ 
+-		mv88e6xxx_g1_wait_eeprom_done(chip);
++		if (chip->info->ops->get_eeprom)
++			mv88e6xxx_g2_eeprom_wait(chip);
+ 	}
+ }
+ 
+diff --git a/drivers/net/dsa/mv88e6xxx/global1.c b/drivers/net/dsa/mv88e6xxx/global1.c
+index 2fa55a6435910..174c773b38c2b 100644
+--- a/drivers/net/dsa/mv88e6xxx/global1.c
++++ b/drivers/net/dsa/mv88e6xxx/global1.c
+@@ -75,37 +75,6 @@ static int mv88e6xxx_g1_wait_init_ready(struct mv88e6xxx_chip *chip)
+ 	return mv88e6xxx_g1_wait_bit(chip, MV88E6XXX_G1_STS, bit, 1);
+ }
+ 
+-void mv88e6xxx_g1_wait_eeprom_done(struct mv88e6xxx_chip *chip)
+-{
+-	const unsigned long timeout = jiffies + 1 * HZ;
+-	u16 val;
+-	int err;
+-
+-	/* Wait up to 1 second for the switch to finish reading the
+-	 * EEPROM.
+-	 */
+-	while (time_before(jiffies, timeout)) {
+-		err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_STS, &val);
+-		if (err) {
+-			dev_err(chip->dev, "Error reading status");
+-			return;
+-		}
+-
+-		/* If the switch is still resetting, it may not
+-		 * respond on the bus, and so MDIO read returns
+-		 * 0xffff. Differentiate between that, and waiting for
+-		 * the EEPROM to be done by bit 0 being set.
+-		 */
+-		if (val != 0xffff &&
+-		    val & BIT(MV88E6XXX_G1_STS_IRQ_EEPROM_DONE))
+-			return;
+-
+-		usleep_range(1000, 2000);
+-	}
+-
+-	dev_err(chip->dev, "Timeout waiting for EEPROM done");
+-}
+-
+ /* Offset 0x01: Switch MAC Address Register Bytes 0 & 1
+  * Offset 0x02: Switch MAC Address Register Bytes 2 & 3
+  * Offset 0x03: Switch MAC Address Register Bytes 4 & 5
+diff --git a/drivers/net/dsa/mv88e6xxx/global1.h b/drivers/net/dsa/mv88e6xxx/global1.h
+index c99ddd117fe6e..1095261f5b490 100644
+--- a/drivers/net/dsa/mv88e6xxx/global1.h
++++ b/drivers/net/dsa/mv88e6xxx/global1.h
+@@ -282,7 +282,6 @@ int mv88e6xxx_g1_set_switch_mac(struct mv88e6xxx_chip *chip, u8 *addr);
+ int mv88e6185_g1_reset(struct mv88e6xxx_chip *chip);
+ int mv88e6352_g1_reset(struct mv88e6xxx_chip *chip);
+ int mv88e6250_g1_reset(struct mv88e6xxx_chip *chip);
+-void mv88e6xxx_g1_wait_eeprom_done(struct mv88e6xxx_chip *chip);
+ 
+ int mv88e6185_g1_ppu_enable(struct mv88e6xxx_chip *chip);
+ int mv88e6185_g1_ppu_disable(struct mv88e6xxx_chip *chip);
+diff --git a/drivers/net/dsa/mv88e6xxx/global2.c b/drivers/net/dsa/mv88e6xxx/global2.c
+index 937a01f2ba75e..b2b5f6ba438f4 100644
+--- a/drivers/net/dsa/mv88e6xxx/global2.c
++++ b/drivers/net/dsa/mv88e6xxx/global2.c
+@@ -340,7 +340,7 @@ int mv88e6xxx_g2_pot_clear(struct mv88e6xxx_chip *chip)
+  * Offset 0x15: EEPROM Addr (for 8-bit data access)
+  */
+ 
+-static int mv88e6xxx_g2_eeprom_wait(struct mv88e6xxx_chip *chip)
++int mv88e6xxx_g2_eeprom_wait(struct mv88e6xxx_chip *chip)
+ {
+ 	int bit = __bf_shf(MV88E6XXX_G2_EEPROM_CMD_BUSY);
+ 	int err;
+diff --git a/drivers/net/dsa/mv88e6xxx/global2.h b/drivers/net/dsa/mv88e6xxx/global2.h
+index 7e091965582b7..d9434f7cae538 100644
+--- a/drivers/net/dsa/mv88e6xxx/global2.h
++++ b/drivers/net/dsa/mv88e6xxx/global2.h
+@@ -365,6 +365,7 @@ int mv88e6xxx_g2_trunk_clear(struct mv88e6xxx_chip *chip);
+ 
+ int mv88e6xxx_g2_device_mapping_write(struct mv88e6xxx_chip *chip, int target,
+ 				      int port);
++int mv88e6xxx_g2_eeprom_wait(struct mv88e6xxx_chip *chip);
+ 
+ extern const struct mv88e6xxx_irq_ops mv88e6097_watchdog_ops;
+ extern const struct mv88e6xxx_irq_ops mv88e6250_watchdog_ops;
+diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
+index 832a2ae019509..a8d79ee350f8d 100644
+--- a/drivers/net/ethernet/ibm/ibmveth.c
++++ b/drivers/net/ethernet/ibm/ibmveth.c
+@@ -1303,24 +1303,23 @@ static void ibmveth_rx_csum_helper(struct sk_buff *skb,
+ 	 * the user space for finding a flow. During this process, OVS computes
+ 	 * checksum on the first packet when CHECKSUM_PARTIAL flag is set.
+ 	 *
+-	 * So, re-compute TCP pseudo header checksum when configured for
+-	 * trunk mode.
++	 * So, re-compute TCP pseudo header checksum.
+ 	 */
++
+ 	if (iph_proto == IPPROTO_TCP) {
+ 		struct tcphdr *tcph = (struct tcphdr *)(skb->data + iphlen);
++
+ 		if (tcph->check == 0x0000) {
+ 			/* Recompute TCP pseudo header checksum  */
+-			if (adapter->is_active_trunk) {
+-				tcphdrlen = skb->len - iphlen;
+-				if (skb_proto == ETH_P_IP)
+-					tcph->check =
+-					 ~csum_tcpudp_magic(iph->saddr,
+-					iph->daddr, tcphdrlen, iph_proto, 0);
+-				else if (skb_proto == ETH_P_IPV6)
+-					tcph->check =
+-					 ~csum_ipv6_magic(&iph6->saddr,
+-					&iph6->daddr, tcphdrlen, iph_proto, 0);
+-			}
++			tcphdrlen = skb->len - iphlen;
++			if (skb_proto == ETH_P_IP)
++				tcph->check =
++				 ~csum_tcpudp_magic(iph->saddr,
++				iph->daddr, tcphdrlen, iph_proto, 0);
++			else if (skb_proto == ETH_P_IPV6)
++				tcph->check =
++				 ~csum_ipv6_magic(&iph6->saddr,
++				&iph6->daddr, tcphdrlen, iph_proto, 0);
+ 			/* Setup SKB fields for checksum offload */
+ 			skb_partial_csum_set(skb, iphlen,
+ 					     offsetof(struct tcphdr, check));
+diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.c b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
+index dcf628b1fccd9..33ac6c4a8928f 100644
+--- a/drivers/net/ethernet/intel/ice/ice_virtchnl.c
++++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
+@@ -2615,12 +2615,14 @@ static int ice_vc_query_rxdid(struct ice_vf *vf)
+ 		goto err;
+ 	}
+ 
+-	/* Read flexiflag registers to determine whether the
+-	 * corresponding RXDID is configured and supported or not.
+-	 * Since Legacy 16byte descriptor format is not supported,
+-	 * start from Legacy 32byte descriptor.
++	/* RXDIDs supported by DDP package can be read from the register
++	 * to get the supported RXDID bitmap. But the legacy 32byte RXDID
++	 * is not listed in DDP package, add it in the bitmap manually.
++	 * Legacy 16byte descriptor is not supported.
+ 	 */
+-	for (i = ICE_RXDID_LEGACY_1; i < ICE_FLEX_DESC_RXDID_MAX_NUM; i++) {
++	rxdid->supported_rxdids |= BIT(ICE_RXDID_LEGACY_1);
++
++	for (i = ICE_RXDID_FLEX_NIC; i < ICE_FLEX_DESC_RXDID_MAX_NUM; i++) {
+ 		regval = rd32(hw, GLFLXP_RXDID_FLAGS(i, 0));
+ 		if ((regval >> GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_S)
+ 			& GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_M)
+diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+index 7f0807672071f..fa7fd42a9f024 100644
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+@@ -3036,8 +3036,8 @@ static irqreturn_t mtk_handle_irq_rx(int irq, void *_eth)
+ 
+ 	eth->rx_events++;
+ 	if (likely(napi_schedule_prep(&eth->rx_napi))) {
+-		__napi_schedule(&eth->rx_napi);
+ 		mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask);
++		__napi_schedule(&eth->rx_napi);
+ 	}
+ 
+ 	return IRQ_HANDLED;
+@@ -3049,8 +3049,8 @@ static irqreturn_t mtk_handle_irq_tx(int irq, void *_eth)
+ 
+ 	eth->tx_events++;
+ 	if (likely(napi_schedule_prep(&eth->tx_napi))) {
+-		__napi_schedule(&eth->tx_napi);
+ 		mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
++		__napi_schedule(&eth->tx_napi);
+ 	}
+ 
+ 	return IRQ_HANDLED;
+diff --git a/drivers/net/ethernet/microchip/Kconfig b/drivers/net/ethernet/microchip/Kconfig
+index 329e374b9539c..43ba71e82260c 100644
+--- a/drivers/net/ethernet/microchip/Kconfig
++++ b/drivers/net/ethernet/microchip/Kconfig
+@@ -46,6 +46,7 @@ config LAN743X
+ 	tristate "LAN743x support"
+ 	depends on PCI
+ 	depends on PTP_1588_CLOCK_OPTIONAL
++	select PHYLIB
+ 	select FIXED_PHY
+ 	select CRC16
+ 	select CRC32
+diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c
+index c2ad0921e893c..3cfbebda7d58b 100644
+--- a/drivers/net/ethernet/microsoft/mana/mana_en.c
++++ b/drivers/net/ethernet/microsoft/mana/mana_en.c
+@@ -89,63 +89,137 @@ static unsigned int mana_checksum_info(struct sk_buff *skb)
+ 	return 0;
+ }
+ 
++static void mana_add_sge(struct mana_tx_package *tp, struct mana_skb_head *ash,
++			 int sg_i, dma_addr_t da, int sge_len, u32 gpa_mkey)
++{
++	ash->dma_handle[sg_i] = da;
++	ash->size[sg_i] = sge_len;
++
++	tp->wqe_req.sgl[sg_i].address = da;
++	tp->wqe_req.sgl[sg_i].mem_key = gpa_mkey;
++	tp->wqe_req.sgl[sg_i].size = sge_len;
++}
++
+ static int mana_map_skb(struct sk_buff *skb, struct mana_port_context *apc,
+-			struct mana_tx_package *tp)
++			struct mana_tx_package *tp, int gso_hs)
+ {
+ 	struct mana_skb_head *ash = (struct mana_skb_head *)skb->head;
++	int hsg = 1; /* num of SGEs of linear part */
+ 	struct gdma_dev *gd = apc->ac->gdma_dev;
++	int skb_hlen = skb_headlen(skb);
++	int sge0_len, sge1_len = 0;
+ 	struct gdma_context *gc;
+ 	struct device *dev;
+ 	skb_frag_t *frag;
+ 	dma_addr_t da;
++	int sg_i;
+ 	int i;
+ 
+ 	gc = gd->gdma_context;
+ 	dev = gc->dev;
+-	da = dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE);
+ 
++	if (gso_hs && gso_hs < skb_hlen) {
++		sge0_len = gso_hs;
++		sge1_len = skb_hlen - gso_hs;
++	} else {
++		sge0_len = skb_hlen;
++	}
++
++	da = dma_map_single(dev, skb->data, sge0_len, DMA_TO_DEVICE);
+ 	if (dma_mapping_error(dev, da))
+ 		return -ENOMEM;
+ 
+-	ash->dma_handle[0] = da;
+-	ash->size[0] = skb_headlen(skb);
++	mana_add_sge(tp, ash, 0, da, sge0_len, gd->gpa_mkey);
+ 
+-	tp->wqe_req.sgl[0].address = ash->dma_handle[0];
+-	tp->wqe_req.sgl[0].mem_key = gd->gpa_mkey;
+-	tp->wqe_req.sgl[0].size = ash->size[0];
++	if (sge1_len) {
++		sg_i = 1;
++		da = dma_map_single(dev, skb->data + sge0_len, sge1_len,
++				    DMA_TO_DEVICE);
++		if (dma_mapping_error(dev, da))
++			goto frag_err;
++
++		mana_add_sge(tp, ash, sg_i, da, sge1_len, gd->gpa_mkey);
++		hsg = 2;
++	}
+ 
+ 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
++		sg_i = hsg + i;
++
+ 		frag = &skb_shinfo(skb)->frags[i];
+ 		da = skb_frag_dma_map(dev, frag, 0, skb_frag_size(frag),
+ 				      DMA_TO_DEVICE);
+-
+ 		if (dma_mapping_error(dev, da))
+ 			goto frag_err;
+ 
+-		ash->dma_handle[i + 1] = da;
+-		ash->size[i + 1] = skb_frag_size(frag);
+-
+-		tp->wqe_req.sgl[i + 1].address = ash->dma_handle[i + 1];
+-		tp->wqe_req.sgl[i + 1].mem_key = gd->gpa_mkey;
+-		tp->wqe_req.sgl[i + 1].size = ash->size[i + 1];
++		mana_add_sge(tp, ash, sg_i, da, skb_frag_size(frag),
++			     gd->gpa_mkey);
+ 	}
+ 
+ 	return 0;
+ 
+ frag_err:
+-	for (i = i - 1; i >= 0; i--)
+-		dma_unmap_page(dev, ash->dma_handle[i + 1], ash->size[i + 1],
++	for (i = sg_i - 1; i >= hsg; i--)
++		dma_unmap_page(dev, ash->dma_handle[i], ash->size[i],
+ 			       DMA_TO_DEVICE);
+ 
+-	dma_unmap_single(dev, ash->dma_handle[0], ash->size[0], DMA_TO_DEVICE);
++	for (i = hsg - 1; i >= 0; i--)
++		dma_unmap_single(dev, ash->dma_handle[i], ash->size[i],
++				 DMA_TO_DEVICE);
+ 
+ 	return -ENOMEM;
+ }
+ 
++/* Handle the case when GSO SKB linear length is too large.
++ * MANA NIC requires GSO packets to put only the packet header to SGE0.
++ * So, we need 2 SGEs for the skb linear part which contains more than the
++ * header.
++ * Return a positive value for the number of SGEs, or a negative value
++ * for an error.
++ */
++static int mana_fix_skb_head(struct net_device *ndev, struct sk_buff *skb,
++			     int gso_hs)
++{
++	int num_sge = 1 + skb_shinfo(skb)->nr_frags;
++	int skb_hlen = skb_headlen(skb);
++
++	if (gso_hs < skb_hlen) {
++		num_sge++;
++	} else if (gso_hs > skb_hlen) {
++		if (net_ratelimit())
++			netdev_err(ndev,
++				   "TX nonlinear head: hs:%d, skb_hlen:%d\n",
++				   gso_hs, skb_hlen);
++
++		return -EINVAL;
++	}
++
++	return num_sge;
++}
++
++/* Get the GSO packet's header size */
++static int mana_get_gso_hs(struct sk_buff *skb)
++{
++	int gso_hs;
++
++	if (skb->encapsulation) {
++		gso_hs = skb_inner_tcp_all_headers(skb);
++	} else {
++		if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
++			gso_hs = skb_transport_offset(skb) +
++				 sizeof(struct udphdr);
++		} else {
++			gso_hs = skb_tcp_all_headers(skb);
++		}
++	}
++
++	return gso_hs;
++}
++
+ netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+ {
+ 	enum mana_tx_pkt_format pkt_fmt = MANA_SHORT_PKT_FMT;
+ 	struct mana_port_context *apc = netdev_priv(ndev);
++	int gso_hs = 0; /* zero for non-GSO pkts */
+ 	u16 txq_idx = skb_get_queue_mapping(skb);
+ 	struct gdma_dev *gd = apc->ac->gdma_dev;
+ 	bool ipv4 = false, ipv6 = false;
+@@ -157,7 +231,6 @@ netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+ 	struct mana_txq *txq;
+ 	struct mana_cq *cq;
+ 	int err, len;
+-	u16 ihs;
+ 
+ 	if (unlikely(!apc->port_is_up))
+ 		goto tx_drop;
+@@ -207,19 +280,6 @@ netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+ 	pkg.wqe_req.client_data_unit = 0;
+ 
+ 	pkg.wqe_req.num_sge = 1 + skb_shinfo(skb)->nr_frags;
+-	WARN_ON_ONCE(pkg.wqe_req.num_sge > MAX_TX_WQE_SGL_ENTRIES);
+-
+-	if (pkg.wqe_req.num_sge <= ARRAY_SIZE(pkg.sgl_array)) {
+-		pkg.wqe_req.sgl = pkg.sgl_array;
+-	} else {
+-		pkg.sgl_ptr = kmalloc_array(pkg.wqe_req.num_sge,
+-					    sizeof(struct gdma_sge),
+-					    GFP_ATOMIC);
+-		if (!pkg.sgl_ptr)
+-			goto tx_drop_count;
+-
+-		pkg.wqe_req.sgl = pkg.sgl_ptr;
+-	}
+ 
+ 	if (skb->protocol == htons(ETH_P_IP))
+ 		ipv4 = true;
+@@ -227,6 +287,26 @@ netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+ 		ipv6 = true;
+ 
+ 	if (skb_is_gso(skb)) {
++		int num_sge;
++
++		gso_hs = mana_get_gso_hs(skb);
++
++		num_sge = mana_fix_skb_head(ndev, skb, gso_hs);
++		if (num_sge > 0)
++			pkg.wqe_req.num_sge = num_sge;
++		else
++			goto tx_drop_count;
++
++		u64_stats_update_begin(&tx_stats->syncp);
++		if (skb->encapsulation) {
++			tx_stats->tso_inner_packets++;
++			tx_stats->tso_inner_bytes += skb->len - gso_hs;
++		} else {
++			tx_stats->tso_packets++;
++			tx_stats->tso_bytes += skb->len - gso_hs;
++		}
++		u64_stats_update_end(&tx_stats->syncp);
++
+ 		pkg.tx_oob.s_oob.is_outer_ipv4 = ipv4;
+ 		pkg.tx_oob.s_oob.is_outer_ipv6 = ipv6;
+ 
+@@ -250,28 +330,6 @@ netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+ 						 &ipv6_hdr(skb)->daddr, 0,
+ 						 IPPROTO_TCP, 0);
+ 		}
+-
+-		if (skb->encapsulation) {
+-			ihs = skb_inner_tcp_all_headers(skb);
+-			u64_stats_update_begin(&tx_stats->syncp);
+-			tx_stats->tso_inner_packets++;
+-			tx_stats->tso_inner_bytes += skb->len - ihs;
+-			u64_stats_update_end(&tx_stats->syncp);
+-		} else {
+-			if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
+-				ihs = skb_transport_offset(skb) + sizeof(struct udphdr);
+-			} else {
+-				ihs = skb_tcp_all_headers(skb);
+-				if (ipv6_has_hopopt_jumbo(skb))
+-					ihs -= sizeof(struct hop_jumbo_hdr);
+-			}
+-
+-			u64_stats_update_begin(&tx_stats->syncp);
+-			tx_stats->tso_packets++;
+-			tx_stats->tso_bytes += skb->len - ihs;
+-			u64_stats_update_end(&tx_stats->syncp);
+-		}
+-
+ 	} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ 		csum_type = mana_checksum_info(skb);
+ 
+@@ -294,11 +352,25 @@ netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+ 		} else {
+ 			/* Can't do offload of this type of checksum */
+ 			if (skb_checksum_help(skb))
+-				goto free_sgl_ptr;
++				goto tx_drop_count;
+ 		}
+ 	}
+ 
+-	if (mana_map_skb(skb, apc, &pkg)) {
++	WARN_ON_ONCE(pkg.wqe_req.num_sge > MAX_TX_WQE_SGL_ENTRIES);
++
++	if (pkg.wqe_req.num_sge <= ARRAY_SIZE(pkg.sgl_array)) {
++		pkg.wqe_req.sgl = pkg.sgl_array;
++	} else {
++		pkg.sgl_ptr = kmalloc_array(pkg.wqe_req.num_sge,
++					    sizeof(struct gdma_sge),
++					    GFP_ATOMIC);
++		if (!pkg.sgl_ptr)
++			goto tx_drop_count;
++
++		pkg.wqe_req.sgl = pkg.sgl_ptr;
++	}
++
++	if (mana_map_skb(skb, apc, &pkg, gso_hs)) {
+ 		u64_stats_update_begin(&tx_stats->syncp);
+ 		tx_stats->mana_map_err++;
+ 		u64_stats_update_end(&tx_stats->syncp);
+@@ -1256,11 +1328,16 @@ static void mana_unmap_skb(struct sk_buff *skb, struct mana_port_context *apc)
+ 	struct mana_skb_head *ash = (struct mana_skb_head *)skb->head;
+ 	struct gdma_context *gc = apc->ac->gdma_dev->gdma_context;
+ 	struct device *dev = gc->dev;
+-	int i;
++	int hsg, i;
+ 
+-	dma_unmap_single(dev, ash->dma_handle[0], ash->size[0], DMA_TO_DEVICE);
++	/* Number of SGEs of linear part */
++	hsg = (skb_is_gso(skb) && skb_headlen(skb) > ash->size[0]) ? 2 : 1;
+ 
+-	for (i = 1; i < skb_shinfo(skb)->nr_frags + 1; i++)
++	for (i = 0; i < hsg; i++)
++		dma_unmap_single(dev, ash->dma_handle[i], ash->size[i],
++				 DMA_TO_DEVICE);
++
++	for (i = hsg; i < skb_shinfo(skb)->nr_frags + hsg; i++)
+ 		dma_unmap_page(dev, ash->dma_handle[i], ash->size[i],
+ 			       DMA_TO_DEVICE);
+ }
+@@ -1315,19 +1392,23 @@ static void mana_poll_tx_cq(struct mana_cq *cq)
+ 		case CQE_TX_VPORT_IDX_OUT_OF_RANGE:
+ 		case CQE_TX_VPORT_DISABLED:
+ 		case CQE_TX_VLAN_TAGGING_VIOLATION:
+-			WARN_ONCE(1, "TX: CQE error %d: ignored.\n",
+-				  cqe_oob->cqe_hdr.cqe_type);
++			if (net_ratelimit())
++				netdev_err(ndev, "TX: CQE error %d\n",
++					   cqe_oob->cqe_hdr.cqe_type);
++
+ 			apc->eth_stats.tx_cqe_err++;
+ 			break;
+ 
+ 		default:
+-			/* If the CQE type is unexpected, log an error, assert,
+-			 * and go through the error path.
++			/* If the CQE type is unknown, log an error,
++			 * and still free the SKB, update tail, etc.
+ 			 */
+-			WARN_ONCE(1, "TX: Unexpected CQE type %d: HW BUG?\n",
+-				  cqe_oob->cqe_hdr.cqe_type);
++			if (net_ratelimit())
++				netdev_err(ndev, "TX: unknown CQE type %d\n",
++					   cqe_oob->cqe_hdr.cqe_type);
++
+ 			apc->eth_stats.tx_cqe_unknown_type++;
+-			return;
++			break;
+ 		}
+ 
+ 		if (WARN_ON_ONCE(txq->gdma_txq_id != completions[i].wq_num))
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.h b/drivers/net/ethernet/qlogic/qed/qed_ll2.h
+index 0bfc375161ed6..a174c6fc626ac 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.h
++++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.h
+@@ -110,9 +110,9 @@ struct qed_ll2_info {
+ 	enum core_tx_dest tx_dest;
+ 	u8 tx_stats_en;
+ 	bool main_func_queue;
++	struct qed_ll2_cbs cbs;
+ 	struct qed_ll2_rx_queue rx_queue;
+ 	struct qed_ll2_tx_queue tx_queue;
+-	struct qed_ll2_cbs cbs;
+ };
+ 
+ extern const struct qed_ll2_ops qed_ll2_ops_pass;
+diff --git a/drivers/net/ethernet/renesas/rswitch.c b/drivers/net/ethernet/renesas/rswitch.c
+index 449ed1f5624c9..660cbfe344d2c 100644
+--- a/drivers/net/ethernet/renesas/rswitch.c
++++ b/drivers/net/ethernet/renesas/rswitch.c
+@@ -4,6 +4,7 @@
+  * Copyright (C) 2022 Renesas Electronics Corporation
+  */
+ 
++#include <linux/clk.h>
+ #include <linux/dma-mapping.h>
+ #include <linux/err.h>
+ #include <linux/etherdevice.h>
+@@ -799,6 +800,7 @@ static int rswitch_poll(struct napi_struct *napi, int budget)
+ 	struct net_device *ndev = napi->dev;
+ 	struct rswitch_private *priv;
+ 	struct rswitch_device *rdev;
++	unsigned long flags;
+ 	int quota = budget;
+ 
+ 	rdev = netdev_priv(ndev);
+@@ -817,8 +819,10 @@ retry:
+ 	netif_wake_subqueue(ndev, 0);
+ 
+ 	if (napi_complete_done(napi, budget - quota)) {
++		spin_lock_irqsave(&priv->lock, flags);
+ 		rswitch_enadis_data_irq(priv, rdev->tx_queue->index, true);
+ 		rswitch_enadis_data_irq(priv, rdev->rx_queue->index, true);
++		spin_unlock_irqrestore(&priv->lock, flags);
+ 	}
+ 
+ out:
+@@ -835,8 +839,10 @@ static void rswitch_queue_interrupt(struct net_device *ndev)
+ 	struct rswitch_device *rdev = netdev_priv(ndev);
+ 
+ 	if (napi_schedule_prep(&rdev->napi)) {
++		spin_lock(&rdev->priv->lock);
+ 		rswitch_enadis_data_irq(rdev->priv, rdev->tx_queue->index, false);
+ 		rswitch_enadis_data_irq(rdev->priv, rdev->rx_queue->index, false);
++		spin_unlock(&rdev->priv->lock);
+ 		__napi_schedule(&rdev->napi);
+ 	}
+ }
+@@ -1044,7 +1050,7 @@ static void rswitch_rmac_setting(struct rswitch_etha *etha, const u8 *mac)
+ static void rswitch_etha_enable_mii(struct rswitch_etha *etha)
+ {
+ 	rswitch_modify(etha->addr, MPIC, MPIC_PSMCS_MASK | MPIC_PSMHT_MASK,
+-		       MPIC_PSMCS(0x05) | MPIC_PSMHT(0x06));
++		       MPIC_PSMCS(etha->psmcs) | MPIC_PSMHT(0x06));
+ 	rswitch_modify(etha->addr, MPSM, 0, MPSM_MFF_C45);
+ }
+ 
+@@ -1430,14 +1436,17 @@ static void rswitch_ether_port_deinit_all(struct rswitch_private *priv)
+ static int rswitch_open(struct net_device *ndev)
+ {
+ 	struct rswitch_device *rdev = netdev_priv(ndev);
++	unsigned long flags;
+ 
+ 	phy_start(ndev->phydev);
+ 
+ 	napi_enable(&rdev->napi);
+ 	netif_start_queue(ndev);
+ 
++	spin_lock_irqsave(&rdev->priv->lock, flags);
+ 	rswitch_enadis_data_irq(rdev->priv, rdev->tx_queue->index, true);
+ 	rswitch_enadis_data_irq(rdev->priv, rdev->rx_queue->index, true);
++	spin_unlock_irqrestore(&rdev->priv->lock, flags);
+ 
+ 	if (bitmap_empty(rdev->priv->opened_ports, RSWITCH_NUM_PORTS))
+ 		iowrite32(GWCA_TS_IRQ_BIT, rdev->priv->addr + GWTSDIE);
+@@ -1451,6 +1460,7 @@ static int rswitch_stop(struct net_device *ndev)
+ {
+ 	struct rswitch_device *rdev = netdev_priv(ndev);
+ 	struct rswitch_gwca_ts_info *ts_info, *ts_info2;
++	unsigned long flags;
+ 
+ 	netif_tx_stop_all_queues(ndev);
+ 	bitmap_clear(rdev->priv->opened_ports, rdev->port, 1);
+@@ -1466,8 +1476,10 @@ static int rswitch_stop(struct net_device *ndev)
+ 		kfree(ts_info);
+ 	}
+ 
++	spin_lock_irqsave(&rdev->priv->lock, flags);
+ 	rswitch_enadis_data_irq(rdev->priv, rdev->tx_queue->index, false);
+ 	rswitch_enadis_data_irq(rdev->priv, rdev->rx_queue->index, false);
++	spin_unlock_irqrestore(&rdev->priv->lock, flags);
+ 
+ 	phy_stop(ndev->phydev);
+ 	napi_disable(&rdev->napi);
+@@ -1670,6 +1682,12 @@ static void rswitch_etha_init(struct rswitch_private *priv, int index)
+ 	etha->index = index;
+ 	etha->addr = priv->addr + RSWITCH_ETHA_OFFSET + index * RSWITCH_ETHA_SIZE;
+ 	etha->coma_addr = priv->addr;
++
++	/* MPIC.PSMCS = (clk [MHz] / (MDC frequency [MHz] * 2) - 1.
++	 * Calculating PSMCS value as MDC frequency = 2.5MHz. So, multiply
++	 * both the numerator and the denominator by 10.
++	 */
++	etha->psmcs = clk_get_rate(priv->clk) / 100000 / (25 * 2) - 1;
+ }
+ 
+ static int rswitch_device_alloc(struct rswitch_private *priv, int index)
+@@ -1869,6 +1887,11 @@ static int renesas_eth_sw_probe(struct platform_device *pdev)
+ 	priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ 	if (!priv)
+ 		return -ENOMEM;
++	spin_lock_init(&priv->lock);
++
++	priv->clk = devm_clk_get(&pdev->dev, NULL);
++	if (IS_ERR(priv->clk))
++		return PTR_ERR(priv->clk);
+ 
+ 	priv->ptp_priv = rcar_gen4_ptp_alloc(pdev);
+ 	if (!priv->ptp_priv)
+diff --git a/drivers/net/ethernet/renesas/rswitch.h b/drivers/net/ethernet/renesas/rswitch.h
+index bb9ed971a97ca..13a401cebd8b7 100644
+--- a/drivers/net/ethernet/renesas/rswitch.h
++++ b/drivers/net/ethernet/renesas/rswitch.h
+@@ -915,6 +915,7 @@ struct rswitch_etha {
+ 	bool external_phy;
+ 	struct mii_bus *mii;
+ 	phy_interface_t phy_interface;
++	u32 psmcs;
+ 	u8 mac_addr[MAX_ADDR_LEN];
+ 	int link;
+ 	int speed;
+@@ -1011,6 +1012,9 @@ struct rswitch_private {
+ 	struct rswitch_etha etha[RSWITCH_NUM_PORTS];
+ 	struct rswitch_mfwd mfwd;
+ 
++	spinlock_t lock;	/* lock interrupt registers' control */
++	struct clk *clk;
++
+ 	bool gwca_halt;
+ };
+ 
+diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
+index bdb4de59a6727..28c8ca5fba6c5 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
+@@ -105,6 +105,7 @@ struct stm32_ops {
+ 	int (*parse_data)(struct stm32_dwmac *dwmac,
+ 			  struct device *dev);
+ 	u32 syscfg_eth_mask;
++	bool clk_rx_enable_in_suspend;
+ };
+ 
+ static int stm32_dwmac_init(struct plat_stmmacenet_data *plat_dat)
+@@ -122,7 +123,8 @@ static int stm32_dwmac_init(struct plat_stmmacenet_data *plat_dat)
+ 	if (ret)
+ 		return ret;
+ 
+-	if (!dwmac->dev->power.is_suspended) {
++	if (!dwmac->ops->clk_rx_enable_in_suspend ||
++	    !dwmac->dev->power.is_suspended) {
+ 		ret = clk_prepare_enable(dwmac->clk_rx);
+ 		if (ret) {
+ 			clk_disable_unprepare(dwmac->clk_tx);
+@@ -514,7 +516,8 @@ static struct stm32_ops stm32mp1_dwmac_data = {
+ 	.suspend = stm32mp1_suspend,
+ 	.resume = stm32mp1_resume,
+ 	.parse_data = stm32mp1_parse_data,
+-	.syscfg_eth_mask = SYSCFG_MP1_ETH_MASK
++	.syscfg_eth_mask = SYSCFG_MP1_ETH_MASK,
++	.clk_rx_enable_in_suspend = true
+ };
+ 
+ static const struct of_device_id stm32_dwmac_match[] = {
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+index 231152ee5a323..5a3bd30d6c220 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+@@ -901,7 +901,7 @@ static int __maybe_unused stmmac_pltfr_resume(struct device *dev)
+ 	struct platform_device *pdev = to_platform_device(dev);
+ 	int ret;
+ 
+-	ret = stmmac_pltfr_init(pdev, priv->plat->bsp_priv);
++	ret = stmmac_pltfr_init(pdev, priv->plat);
+ 	if (ret)
+ 		return ret;
+ 
+diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+index bebcfd5e6b579..a3d952f67ae32 100644
+--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c
++++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+@@ -1749,6 +1749,7 @@ static int am65_cpsw_nuss_init_tx_chns(struct am65_cpsw_common *common)
+ 		if (tx_chn->irq <= 0) {
+ 			dev_err(dev, "Failed to get tx dma irq %d\n",
+ 				tx_chn->irq);
++			ret = tx_chn->irq ?: -ENXIO;
+ 			goto err;
+ 		}
+ 
+diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
+index 5d6454fedb3f1..78ad2da3ee29b 100644
+--- a/drivers/net/usb/smsc75xx.c
++++ b/drivers/net/usb/smsc75xx.c
+@@ -90,7 +90,9 @@ static int __must_check __smsc75xx_read_reg(struct usbnet *dev, u32 index,
+ 	ret = fn(dev, USB_VENDOR_REQUEST_READ_REGISTER, USB_DIR_IN
+ 		 | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ 		 0, index, &buf, 4);
+-	if (unlikely(ret < 0)) {
++	if (unlikely(ret < 4)) {
++		ret = ret < 0 ? ret : -ENODATA;
++
+ 		netdev_warn(dev->net, "Failed to read reg index 0x%08x: %d\n",
+ 			    index, ret);
+ 		return ret;
+diff --git a/drivers/net/wan/fsl_ucc_hdlc.c b/drivers/net/wan/fsl_ucc_hdlc.c
+index 47c2ad7a3e429..fd50bb313b924 100644
+--- a/drivers/net/wan/fsl_ucc_hdlc.c
++++ b/drivers/net/wan/fsl_ucc_hdlc.c
+@@ -34,6 +34,8 @@
+ #define TDM_PPPOHT_SLIC_MAXIN
+ #define RX_BD_ERRORS (R_CD_S | R_OV_S | R_CR_S | R_AB_S | R_NO_S | R_LG_S)
+ 
++static int uhdlc_close(struct net_device *dev);
++
+ static struct ucc_tdm_info utdm_primary_info = {
+ 	.uf_info = {
+ 		.tsa = 0,
+@@ -708,6 +710,7 @@ static int uhdlc_open(struct net_device *dev)
+ 	hdlc_device *hdlc = dev_to_hdlc(dev);
+ 	struct ucc_hdlc_private *priv = hdlc->priv;
+ 	struct ucc_tdm *utdm = priv->utdm;
++	int rc = 0;
+ 
+ 	if (priv->hdlc_busy != 1) {
+ 		if (request_irq(priv->ut_info->uf_info.irq,
+@@ -731,10 +734,13 @@ static int uhdlc_open(struct net_device *dev)
+ 		napi_enable(&priv->napi);
+ 		netdev_reset_queue(dev);
+ 		netif_start_queue(dev);
+-		hdlc_open(dev);
++
++		rc = hdlc_open(dev);
++		if (rc)
++			uhdlc_close(dev);
+ 	}
+ 
+-	return 0;
++	return rc;
+ }
+ 
+ static void uhdlc_memclean(struct ucc_hdlc_private *priv)
+@@ -824,6 +830,8 @@ static int uhdlc_close(struct net_device *dev)
+ 	netdev_reset_queue(dev);
+ 	priv->hdlc_busy = 0;
+ 
++	hdlc_close(dev);
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h
+index bece26741d3a3..611d1a6aabb9e 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h
+@@ -442,7 +442,12 @@ struct brcmf_scan_params_v2_le {
+ 				 * fixed parameter portion is assumed, otherwise
+ 				 * ssid in the fixed portion is ignored
+ 				 */
+-	__le16 channel_list[1];	/* list of chanspecs */
++	union {
++		__le16 padding;	/* Reserve space for at least 1 entry for abort
++				 * which uses an on stack brcmf_scan_params_v2_le
++				 */
++		DECLARE_FLEX_ARRAY(__le16, channel_list);	/* chanspecs */
++	};
+ };
+ 
+ struct brcmf_scan_results {
+@@ -702,7 +707,7 @@ struct brcmf_sta_info_le {
+ 
+ struct brcmf_chanspec_list {
+ 	__le32	count;		/* # of entries */
+-	__le32	element[1];	/* variable length uint32 list */
++	__le32  element[];	/* variable length uint32 list */
+ };
+ 
+ /*
+diff --git a/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h b/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h
+index f5e08988dc7bf..06d6f7f664308 100644
+--- a/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h
++++ b/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h
+@@ -310,9 +310,9 @@ struct iwl_fw_ini_fifo_hdr {
+ struct iwl_fw_ini_error_dump_range {
+ 	__le32 range_data_size;
+ 	union {
+-		__le32 internal_base_addr;
+-		__le64 dram_base_addr;
+-		__le32 page_num;
++		__le32 internal_base_addr __packed;
++		__le64 dram_base_addr __packed;
++		__le32 page_num __packed;
+ 		struct iwl_fw_ini_fifo_hdr fifo_hdr;
+ 		struct iwl_cmd_header fw_pkt_hdr;
+ 	};
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+index 1f5db65a088d3..1d5ee4330f29f 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+@@ -802,7 +802,7 @@ out:
+ 		mvm->nvm_data->bands[0].n_channels = 1;
+ 		mvm->nvm_data->bands[0].n_bitrates = 1;
+ 		mvm->nvm_data->bands[0].bitrates =
+-			(void *)((u8 *)mvm->nvm_data->channels + 1);
++			(void *)(mvm->nvm_data->channels + 1);
+ 		mvm->nvm_data->bands[0].bitrates->hw_value = 10;
+ 	}
+ 
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c
+index 8b6c641772ee6..b719843e94576 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c
+@@ -731,73 +731,78 @@ static void iwl_mvm_mld_vif_cfg_changed_station(struct iwl_mvm *mvm,
+ 
+ 	mvmvif->associated = vif->cfg.assoc;
+ 
+-	if (!(changes & BSS_CHANGED_ASSOC))
+-		return;
+-
+-	if (vif->cfg.assoc) {
+-		/* clear statistics to get clean beacon counter */
+-		iwl_mvm_request_statistics(mvm, true);
+-		iwl_mvm_sf_update(mvm, vif, false);
+-		iwl_mvm_power_vif_assoc(mvm, vif);
+-
+-		for_each_mvm_vif_valid_link(mvmvif, i) {
+-			memset(&mvmvif->link[i]->beacon_stats, 0,
+-			       sizeof(mvmvif->link[i]->beacon_stats));
++	if (changes & BSS_CHANGED_ASSOC) {
++		if (vif->cfg.assoc) {
++			/* clear statistics to get clean beacon counter */
++			iwl_mvm_request_statistics(mvm, true);
++			iwl_mvm_sf_update(mvm, vif, false);
++			iwl_mvm_power_vif_assoc(mvm, vif);
++
++			for_each_mvm_vif_valid_link(mvmvif, i) {
++				memset(&mvmvif->link[i]->beacon_stats, 0,
++				       sizeof(mvmvif->link[i]->beacon_stats));
++
++				if (vif->p2p) {
++					iwl_mvm_update_smps(mvm, vif,
++							    IWL_MVM_SMPS_REQ_PROT,
++							    IEEE80211_SMPS_DYNAMIC, i);
++				}
++
++				rcu_read_lock();
++				link_conf = rcu_dereference(vif->link_conf[i]);
++				if (link_conf && !link_conf->dtim_period)
++					protect = true;
++				rcu_read_unlock();
++			}
+ 
+-			if (vif->p2p) {
+-				iwl_mvm_update_smps(mvm, vif,
+-						    IWL_MVM_SMPS_REQ_PROT,
+-						    IEEE80211_SMPS_DYNAMIC, i);
++			if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) &&
++			    protect) {
++				/* If we're not restarting and still haven't
++				 * heard a beacon (dtim period unknown) then
++				 * make sure we still have enough minimum time
++				 * remaining in the time event, since the auth
++				 * might actually have taken quite a while
++				 * (especially for SAE) and so the remaining
++				 * time could be small without us having heard
++				 * a beacon yet.
++				 */
++				iwl_mvm_protect_assoc(mvm, vif, 0);
+ 			}
+ 
+-			rcu_read_lock();
+-			link_conf = rcu_dereference(vif->link_conf[i]);
+-			if (link_conf && !link_conf->dtim_period)
+-				protect = true;
+-			rcu_read_unlock();
+-		}
++			iwl_mvm_sf_update(mvm, vif, false);
++
++			/* FIXME: need to decide about misbehaving AP handling */
++			iwl_mvm_power_vif_assoc(mvm, vif);
++		} else if (iwl_mvm_mld_vif_have_valid_ap_sta(mvmvif)) {
++			iwl_mvm_mei_host_disassociated(mvm);
+ 
+-		if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) &&
+-		    protect) {
+-			/* If we're not restarting and still haven't
+-			 * heard a beacon (dtim period unknown) then
+-			 * make sure we still have enough minimum time
+-			 * remaining in the time event, since the auth
+-			 * might actually have taken quite a while
+-			 * (especially for SAE) and so the remaining
+-			 * time could be small without us having heard
+-			 * a beacon yet.
++			/* If update fails - SF might be running in associated
++			 * mode while disassociated - which is forbidden.
+ 			 */
+-			iwl_mvm_protect_assoc(mvm, vif, 0);
++			ret = iwl_mvm_sf_update(mvm, vif, false);
++			WARN_ONCE(ret &&
++				  !test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED,
++					    &mvm->status),
++				  "Failed to update SF upon disassociation\n");
++
++			/* If we get an assert during the connection (after the
++			 * station has been added, but before the vif is set
++			 * to associated), mac80211 will re-add the station and
++			 * then configure the vif. Since the vif is not
++			 * associated, we would remove the station here and
++			 * this would fail the recovery.
++			 */
++			iwl_mvm_mld_vif_delete_all_stas(mvm, vif);
+ 		}
+ 
+-		iwl_mvm_sf_update(mvm, vif, false);
+-
+-		/* FIXME: need to decide about misbehaving AP handling */
+-		iwl_mvm_power_vif_assoc(mvm, vif);
+-	} else if (iwl_mvm_mld_vif_have_valid_ap_sta(mvmvif)) {
+-		iwl_mvm_mei_host_disassociated(mvm);
+-
+-		/* If update fails - SF might be running in associated
+-		 * mode while disassociated - which is forbidden.
+-		 */
+-		ret = iwl_mvm_sf_update(mvm, vif, false);
+-		WARN_ONCE(ret &&
+-			  !test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED,
+-				    &mvm->status),
+-			  "Failed to update SF upon disassociation\n");
+-
+-		/* If we get an assert during the connection (after the
+-		 * station has been added, but before the vif is set
+-		 * to associated), mac80211 will re-add the station and
+-		 * then configure the vif. Since the vif is not
+-		 * associated, we would remove the station here and
+-		 * this would fail the recovery.
+-		 */
+-		iwl_mvm_mld_vif_delete_all_stas(mvm, vif);
++		iwl_mvm_bss_info_changed_station_assoc(mvm, vif, changes);
+ 	}
+ 
+-	iwl_mvm_bss_info_changed_station_assoc(mvm, vif, changes);
++	if (changes & BSS_CHANGED_PS) {
++		ret = iwl_mvm_power_update_mac(mvm);
++		if (ret)
++			IWL_ERR(mvm, "failed to update power mode\n");
++	}
+ }
+ 
+ static void
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
+index c1d9ce7534688..3cbe2c0b8d6bc 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
+@@ -2342,7 +2342,7 @@ iwl_mvm_scan_umac_fill_general_p_v12(struct iwl_mvm *mvm,
+ 	if (gen_flags & IWL_UMAC_SCAN_GEN_FLAGS_V2_FRAGMENTED_LMAC2)
+ 		gp->num_of_fragments[SCAN_HB_LMAC_IDX] = IWL_SCAN_NUM_OF_FRAGS;
+ 
+-	if (version < 12) {
++	if (version < 16) {
+ 		gp->scan_start_mac_or_link_id = scan_vif->id;
+ 	} else {
+ 		struct iwl_mvm_vif_link_info *link_info;
+diff --git a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
+index 391793a16adca..d1d3632a3ed7b 100644
+--- a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
++++ b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
+@@ -965,8 +965,8 @@ void mwifiex_11n_rxba_sync_event(struct mwifiex_private *priv,
+ 			}
+ 		}
+ 
+-		tlv_buf_left -= (sizeof(*tlv_rxba) + tlv_len);
+-		tmp = (u8 *)tlv_rxba + tlv_len + sizeof(*tlv_rxba);
++		tlv_buf_left -= (sizeof(tlv_rxba->header) + tlv_len);
++		tmp = (u8 *)tlv_rxba  + sizeof(tlv_rxba->header) + tlv_len;
+ 		tlv_rxba = (struct mwifiex_ie_types_rxba_sync *)tmp;
+ 	}
+ }
+diff --git a/drivers/net/wireless/marvell/mwifiex/sta_rx.c b/drivers/net/wireless/marvell/mwifiex/sta_rx.c
+index 65420ad674167..257737137cd70 100644
+--- a/drivers/net/wireless/marvell/mwifiex/sta_rx.c
++++ b/drivers/net/wireless/marvell/mwifiex/sta_rx.c
+@@ -86,7 +86,8 @@ int mwifiex_process_rx_packet(struct mwifiex_private *priv,
+ 	rx_pkt_len = le16_to_cpu(local_rx_pd->rx_pkt_length);
+ 	rx_pkt_hdr = (void *)local_rx_pd + rx_pkt_off;
+ 
+-	if (sizeof(*rx_pkt_hdr) + rx_pkt_off > skb->len) {
++	if (sizeof(rx_pkt_hdr->eth803_hdr) + sizeof(rfc1042_header) +
++	    rx_pkt_off > skb->len) {
+ 		mwifiex_dbg(priv->adapter, ERROR,
+ 			    "wrong rx packet offset: len=%d, rx_pkt_off=%d\n",
+ 			    skb->len, rx_pkt_off);
+@@ -95,12 +96,13 @@ int mwifiex_process_rx_packet(struct mwifiex_private *priv,
+ 		return -1;
+ 	}
+ 
+-	if ((!memcmp(&rx_pkt_hdr->rfc1042_hdr, bridge_tunnel_header,
+-		     sizeof(bridge_tunnel_header))) ||
+-	    (!memcmp(&rx_pkt_hdr->rfc1042_hdr, rfc1042_header,
+-		     sizeof(rfc1042_header)) &&
+-	     ntohs(rx_pkt_hdr->rfc1042_hdr.snap_type) != ETH_P_AARP &&
+-	     ntohs(rx_pkt_hdr->rfc1042_hdr.snap_type) != ETH_P_IPX)) {
++	if (sizeof(*rx_pkt_hdr) + rx_pkt_off <= skb->len &&
++	    ((!memcmp(&rx_pkt_hdr->rfc1042_hdr, bridge_tunnel_header,
++		      sizeof(bridge_tunnel_header))) ||
++	     (!memcmp(&rx_pkt_hdr->rfc1042_hdr, rfc1042_header,
++		      sizeof(rfc1042_header)) &&
++	      ntohs(rx_pkt_hdr->rfc1042_hdr.snap_type) != ETH_P_AARP &&
++	      ntohs(rx_pkt_hdr->rfc1042_hdr.snap_type) != ETH_P_IPX))) {
+ 		/*
+ 		 *  Replace the 803 header and rfc1042 header (llc/snap) with an
+ 		 *    EthernetII header, keep the src/dst and snap_type
+diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c
+index 465190ebaf1c4..f539913aadf86 100644
+--- a/drivers/net/wireless/mediatek/mt76/dma.c
++++ b/drivers/net/wireless/mediatek/mt76/dma.c
+@@ -93,13 +93,13 @@ __mt76_get_rxwi(struct mt76_dev *dev)
+ {
+ 	struct mt76_txwi_cache *t = NULL;
+ 
+-	spin_lock(&dev->wed_lock);
++	spin_lock_bh(&dev->wed_lock);
+ 	if (!list_empty(&dev->rxwi_cache)) {
+ 		t = list_first_entry(&dev->rxwi_cache, struct mt76_txwi_cache,
+ 				     list);
+ 		list_del(&t->list);
+ 	}
+-	spin_unlock(&dev->wed_lock);
++	spin_unlock_bh(&dev->wed_lock);
+ 
+ 	return t;
+ }
+@@ -145,9 +145,9 @@ mt76_put_rxwi(struct mt76_dev *dev, struct mt76_txwi_cache *t)
+ 	if (!t)
+ 		return;
+ 
+-	spin_lock(&dev->wed_lock);
++	spin_lock_bh(&dev->wed_lock);
+ 	list_add(&t->list, &dev->rxwi_cache);
+-	spin_unlock(&dev->wed_lock);
++	spin_unlock_bh(&dev->wed_lock);
+ }
+ EXPORT_SYMBOL_GPL(mt76_put_rxwi);
+ 
+diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.c b/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.c
+index 0acabba2d1a50..5d402cf2951cb 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.c
++++ b/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.c
+@@ -131,15 +131,8 @@ u8 mt76x02_get_lna_gain(struct mt76x02_dev *dev,
+ 			s8 *lna_2g, s8 *lna_5g,
+ 			struct ieee80211_channel *chan)
+ {
+-	u16 val;
+ 	u8 lna;
+ 
+-	val = mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_1);
+-	if (val & MT_EE_NIC_CONF_1_LNA_EXT_2G)
+-		*lna_2g = 0;
+-	if (val & MT_EE_NIC_CONF_1_LNA_EXT_5G)
+-		memset(lna_5g, 0, sizeof(s8) * 3);
+-
+ 	if (chan->band == NL80211_BAND_2GHZ)
+ 		lna = *lna_2g;
+ 	else if (chan->hw_value <= 64)
+diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c
+index d5809408d1d37..8c01855885ce3 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c
++++ b/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c
+@@ -256,7 +256,8 @@ void mt76x2_read_rx_gain(struct mt76x02_dev *dev)
+ 	struct ieee80211_channel *chan = dev->mphy.chandef.chan;
+ 	int channel = chan->hw_value;
+ 	s8 lna_5g[3], lna_2g;
+-	u8 lna;
++	bool use_lna;
++	u8 lna = 0;
+ 	u16 val;
+ 
+ 	if (chan->band == NL80211_BAND_2GHZ)
+@@ -275,7 +276,15 @@ void mt76x2_read_rx_gain(struct mt76x02_dev *dev)
+ 	dev->cal.rx.mcu_gain |= (lna_5g[1] & 0xff) << 16;
+ 	dev->cal.rx.mcu_gain |= (lna_5g[2] & 0xff) << 24;
+ 
+-	lna = mt76x02_get_lna_gain(dev, &lna_2g, lna_5g, chan);
++	val = mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_1);
++	if (chan->band == NL80211_BAND_2GHZ)
++		use_lna = !(val & MT_EE_NIC_CONF_1_LNA_EXT_2G);
++	else
++		use_lna = !(val & MT_EE_NIC_CONF_1_LNA_EXT_5G);
++
++	if (use_lna)
++		lna = mt76x02_get_lna_gain(dev, &lna_2g, lna_5g, chan);
++
+ 	dev->cal.rx.lna_gain = mt76x02_sign_extend(lna, 8);
+ }
+ EXPORT_SYMBOL_GPL(mt76x2_read_rx_gain);
+diff --git a/drivers/net/wireless/realtek/rtw88/rtw8723d.h b/drivers/net/wireless/realtek/rtw88/rtw8723d.h
+index 3642a2c7f80c9..2434e2480cbe2 100644
+--- a/drivers/net/wireless/realtek/rtw88/rtw8723d.h
++++ b/drivers/net/wireless/realtek/rtw88/rtw8723d.h
+@@ -46,6 +46,7 @@ struct rtw8723du_efuse {
+ 	u8 vender_id[2];                /* 0x100 */
+ 	u8 product_id[2];               /* 0x102 */
+ 	u8 usb_option;                  /* 0x104 */
++	u8 res5[2];			/* 0x105 */
+ 	u8 mac_addr[ETH_ALEN];          /* 0x107 */
+ };
+ 
+diff --git a/drivers/of/dynamic.c b/drivers/of/dynamic.c
+index 4999636eaa926..f7bb73cf821e6 100644
+--- a/drivers/of/dynamic.c
++++ b/drivers/of/dynamic.c
+@@ -927,13 +927,13 @@ int of_changeset_action(struct of_changeset *ocs, unsigned long action,
+ {
+ 	struct of_changeset_entry *ce;
+ 
++	if (WARN_ON(action >= ARRAY_SIZE(action_names)))
++		return -EINVAL;
++
+ 	ce = kzalloc(sizeof(*ce), GFP_KERNEL);
+ 	if (!ce)
+ 		return -ENOMEM;
+ 
+-	if (WARN_ON(action >= ARRAY_SIZE(action_names)))
+-		return -EINVAL;
+-
+ 	/* get a reference to the node */
+ 	ce->action = action;
+ 	ce->np = of_node_get(np);
+diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c
+index 7a87a47eb7edb..5cbcdd46ed47f 100644
+--- a/drivers/pci/controller/dwc/pcie-qcom.c
++++ b/drivers/pci/controller/dwc/pcie-qcom.c
+@@ -43,7 +43,6 @@
+ #define PARF_PHY_REFCLK				0x4c
+ #define PARF_CONFIG_BITS			0x50
+ #define PARF_DBI_BASE_ADDR			0x168
+-#define PARF_SLV_ADDR_SPACE_SIZE_2_3_3		0x16c /* Register offset specific to IP ver 2.3.3 */
+ #define PARF_MHI_CLOCK_RESET_CTRL		0x174
+ #define PARF_AXI_MSTR_WR_ADDR_HALT		0x178
+ #define PARF_AXI_MSTR_WR_ADDR_HALT_V2		0x1a8
+@@ -797,8 +796,7 @@ static int qcom_pcie_post_init_2_3_3(struct qcom_pcie *pcie)
+ 	u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
+ 	u32 val;
+ 
+-	writel(SLV_ADDR_SPACE_SZ,
+-		pcie->parf + PARF_SLV_ADDR_SPACE_SIZE_2_3_3);
++	writel(SLV_ADDR_SPACE_SZ, pcie->parf + PARF_SLV_ADDR_SPACE_SIZE);
+ 
+ 	val = readl(pcie->parf + PARF_PHY_CTRL);
+ 	val &= ~PHY_TEST_PWR_DOWN;
+diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
+index ae9baf801681d..41ee3dd8cecb8 100644
+--- a/drivers/pci/pci-driver.c
++++ b/drivers/pci/pci-driver.c
+@@ -572,7 +572,19 @@ static void pci_pm_default_resume_early(struct pci_dev *pci_dev)
+ 
+ static void pci_pm_bridge_power_up_actions(struct pci_dev *pci_dev)
+ {
+-	pci_bridge_wait_for_secondary_bus(pci_dev, "resume");
++	int ret;
++
++	ret = pci_bridge_wait_for_secondary_bus(pci_dev, "resume");
++	if (ret) {
++		/*
++		 * The downstream link failed to come up, so mark the
++		 * devices below as disconnected to make sure we don't
++		 * attempt to resume them.
++		 */
++		pci_walk_bus(pci_dev->subordinate, pci_dev_set_disconnected,
++			     NULL);
++		return;
++	}
+ 
+ 	/*
+ 	 * When powering on a bridge from D3cold, the whole hierarchy may be
+diff --git a/drivers/platform/x86/intel/ifs/runtest.c b/drivers/platform/x86/intel/ifs/runtest.c
+index 1061eb7ec3998..43c864add778f 100644
+--- a/drivers/platform/x86/intel/ifs/runtest.c
++++ b/drivers/platform/x86/intel/ifs/runtest.c
+@@ -331,14 +331,15 @@ int do_core_test(int cpu, struct device *dev)
+ 	switch (test->test_num) {
+ 	case IFS_TYPE_SAF:
+ 		if (!ifsd->loaded)
+-			return -EPERM;
+-		ifs_test_core(cpu, dev);
++			ret = -EPERM;
++		else
++			ifs_test_core(cpu, dev);
+ 		break;
+ 	case IFS_TYPE_ARRAY_BIST:
+ 		ifs_array_test_core(cpu, dev);
+ 		break;
+ 	default:
+-		return -EINVAL;
++		ret = -EINVAL;
+ 	}
+ out:
+ 	cpus_read_unlock();
+diff --git a/drivers/ptp/ptp_ocp.c b/drivers/ptp/ptp_ocp.c
+index 20a974ced8d6c..a7a6947ab4bc5 100644
+--- a/drivers/ptp/ptp_ocp.c
++++ b/drivers/ptp/ptp_ocp.c
+@@ -3998,7 +3998,6 @@ ptp_ocp_device_init(struct ptp_ocp *bp, struct pci_dev *pdev)
+ 	return 0;
+ 
+ out:
+-	ptp_ocp_dev_release(&bp->dev);
+ 	put_device(&bp->dev);
+ 	return err;
+ }
+diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
+index d8e1caaf207e1..2820badc7a126 100644
+--- a/drivers/regulator/core.c
++++ b/drivers/regulator/core.c
+@@ -5542,6 +5542,8 @@ regulator_register(struct device *dev,
+ 		goto rinse;
+ 	}
+ 	device_initialize(&rdev->dev);
++	dev_set_drvdata(&rdev->dev, rdev);
++	rdev->dev.class = &regulator_class;
+ 	spin_lock_init(&rdev->err_lock);
+ 
+ 	/*
+@@ -5603,11 +5605,9 @@ regulator_register(struct device *dev,
+ 		rdev->supply_name = regulator_desc->supply_name;
+ 
+ 	/* register with sysfs */
+-	rdev->dev.class = &regulator_class;
+ 	rdev->dev.parent = config->dev;
+ 	dev_set_name(&rdev->dev, "regulator.%lu",
+ 		    (unsigned long) atomic_inc_return(&regulator_no));
+-	dev_set_drvdata(&rdev->dev, rdev);
+ 
+ 	/* set regulator constraints */
+ 	if (init_data)
+diff --git a/drivers/regulator/mt6358-regulator.c b/drivers/regulator/mt6358-regulator.c
+index b9cda2210c330..65fbd95f1dbb0 100644
+--- a/drivers/regulator/mt6358-regulator.c
++++ b/drivers/regulator/mt6358-regulator.c
+@@ -43,7 +43,7 @@ struct mt6358_regulator_info {
+ 	.desc = {	\
+ 		.name = #vreg,	\
+ 		.of_match = of_match_ptr(match),	\
+-		.ops = &mt6358_volt_range_ops,	\
++		.ops = &mt6358_buck_ops,	\
+ 		.type = REGULATOR_VOLTAGE,	\
+ 		.id = MT6358_ID_##vreg,		\
+ 		.owner = THIS_MODULE,		\
+@@ -139,7 +139,7 @@ struct mt6358_regulator_info {
+ 	.desc = {	\
+ 		.name = #vreg,	\
+ 		.of_match = of_match_ptr(match),	\
+-		.ops = &mt6358_volt_range_ops,	\
++		.ops = &mt6358_buck_ops,	\
+ 		.type = REGULATOR_VOLTAGE,	\
+ 		.id = MT6366_ID_##vreg,		\
+ 		.owner = THIS_MODULE,		\
+@@ -450,7 +450,7 @@ static unsigned int mt6358_regulator_get_mode(struct regulator_dev *rdev)
+ 	}
+ }
+ 
+-static const struct regulator_ops mt6358_volt_range_ops = {
++static const struct regulator_ops mt6358_buck_ops = {
+ 	.list_voltage = regulator_list_voltage_linear,
+ 	.map_voltage = regulator_map_voltage_linear,
+ 	.set_voltage_sel = regulator_set_voltage_sel_regmap,
+@@ -464,6 +464,18 @@ static const struct regulator_ops mt6358_volt_range_ops = {
+ 	.get_mode = mt6358_regulator_get_mode,
+ };
+ 
++static const struct regulator_ops mt6358_volt_range_ops = {
++	.list_voltage = regulator_list_voltage_linear,
++	.map_voltage = regulator_map_voltage_linear,
++	.set_voltage_sel = regulator_set_voltage_sel_regmap,
++	.get_voltage_sel = mt6358_get_buck_voltage_sel,
++	.set_voltage_time_sel = regulator_set_voltage_time_sel,
++	.enable = regulator_enable_regmap,
++	.disable = regulator_disable_regmap,
++	.is_enabled = regulator_is_enabled_regmap,
++	.get_status = mt6358_get_status,
++};
++
+ static const struct regulator_ops mt6358_volt_table_ops = {
+ 	.list_voltage = regulator_list_voltage_table,
+ 	.map_voltage = regulator_map_voltage_iterate,
+diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
+index df782646e856f..ab2f35bc294da 100644
+--- a/drivers/s390/scsi/zfcp_aux.c
++++ b/drivers/s390/scsi/zfcp_aux.c
+@@ -518,12 +518,12 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn,
+ 	if (port) {
+ 		put_device(&port->dev);
+ 		retval = -EEXIST;
+-		goto err_out;
++		goto err_put;
+ 	}
+ 
+ 	port = kzalloc(sizeof(struct zfcp_port), GFP_KERNEL);
+ 	if (!port)
+-		goto err_out;
++		goto err_put;
+ 
+ 	rwlock_init(&port->unit_list_lock);
+ 	INIT_LIST_HEAD(&port->unit_list);
+@@ -546,7 +546,7 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn,
+ 
+ 	if (dev_set_name(&port->dev, "0x%016llx", (unsigned long long)wwpn)) {
+ 		kfree(port);
+-		goto err_out;
++		goto err_put;
+ 	}
+ 	retval = -EINVAL;
+ 
+@@ -563,7 +563,8 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn,
+ 
+ 	return port;
+ 
+-err_out:
++err_put:
+ 	zfcp_ccw_adapter_put(adapter);
++err_out:
+ 	return ERR_PTR(retval);
+ }
+diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
+index 3f062e4013ab6..013a9a334972e 100644
+--- a/drivers/scsi/aacraid/commsup.c
++++ b/drivers/scsi/aacraid/commsup.c
+@@ -1451,7 +1451,7 @@ retry_next:
+ #endif
+ 				break;
+ 			}
+-			scsi_rescan_device(&device->sdev_gendev);
++			scsi_rescan_device(device);
+ 			break;
+ 
+ 		default:
+diff --git a/drivers/scsi/mvumi.c b/drivers/scsi/mvumi.c
+index 73aa7059b5569..6cfbac518085d 100644
+--- a/drivers/scsi/mvumi.c
++++ b/drivers/scsi/mvumi.c
+@@ -1500,7 +1500,7 @@ static void mvumi_rescan_devices(struct mvumi_hba *mhba, int id)
+ 
+ 	sdev = scsi_device_lookup(mhba->shost, 0, id, 0);
+ 	if (sdev) {
+-		scsi_rescan_device(&sdev->sdev_gendev);
++		scsi_rescan_device(sdev);
+ 		scsi_device_put(sdev);
+ 	}
+ }
+diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
+index ad9afae49544a..ca5eb058d5c7e 100644
+--- a/drivers/scsi/scsi_lib.c
++++ b/drivers/scsi/scsi_lib.c
+@@ -2458,7 +2458,7 @@ static void scsi_evt_emit(struct scsi_device *sdev, struct scsi_event *evt)
+ 		envp[idx++] = "SDEV_MEDIA_CHANGE=1";
+ 		break;
+ 	case SDEV_EVT_INQUIRY_CHANGE_REPORTED:
+-		scsi_rescan_device(&sdev->sdev_gendev);
++		scsi_rescan_device(sdev);
+ 		envp[idx++] = "SDEV_UA=INQUIRY_DATA_HAS_CHANGED";
+ 		break;
+ 	case SDEV_EVT_CAPACITY_CHANGE_REPORTED:
+diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
+index f42388ecb0248..65c993c979095 100644
+--- a/drivers/scsi/scsi_priv.h
++++ b/drivers/scsi/scsi_priv.h
+@@ -138,7 +138,6 @@ extern int scsi_complete_async_scans(void);
+ extern int scsi_scan_host_selected(struct Scsi_Host *, unsigned int,
+ 				   unsigned int, u64, enum scsi_scan_mode);
+ extern void scsi_forget_host(struct Scsi_Host *);
+-extern void scsi_rescan_device(struct device *);
+ 
+ /* scsi_sysctl.c */
+ #ifdef CONFIG_SYSCTL
+diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
+index 97669657a9976..902655d759476 100644
+--- a/drivers/scsi/scsi_scan.c
++++ b/drivers/scsi/scsi_scan.c
+@@ -1619,12 +1619,24 @@ int scsi_add_device(struct Scsi_Host *host, uint channel,
+ }
+ EXPORT_SYMBOL(scsi_add_device);
+ 
+-void scsi_rescan_device(struct device *dev)
++int scsi_rescan_device(struct scsi_device *sdev)
+ {
+-	struct scsi_device *sdev = to_scsi_device(dev);
++	struct device *dev = &sdev->sdev_gendev;
++	int ret = 0;
+ 
+ 	device_lock(dev);
+ 
++	/*
++	 * Bail out if the device is not running. Otherwise, the rescan may
++	 * block waiting for commands to be executed, with us holding the
++	 * device lock. This can result in a potential deadlock in the power
++	 * management core code when system resume is on-going.
++	 */
++	if (sdev->sdev_state != SDEV_RUNNING) {
++		ret = -EWOULDBLOCK;
++		goto unlock;
++	}
++
+ 	scsi_attach_vpd(sdev);
+ 	scsi_cdl_check(sdev);
+ 
+@@ -1638,7 +1650,11 @@ void scsi_rescan_device(struct device *dev)
+ 			drv->rescan(dev);
+ 		module_put(dev->driver->owner);
+ 	}
++
++unlock:
+ 	device_unlock(dev);
++
++	return ret;
+ }
+ EXPORT_SYMBOL(scsi_rescan_device);
+ 
+diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
+index 60317676e45f1..24f6eefb68030 100644
+--- a/drivers/scsi/scsi_sysfs.c
++++ b/drivers/scsi/scsi_sysfs.c
+@@ -747,7 +747,7 @@ static ssize_t
+ store_rescan_field (struct device *dev, struct device_attribute *attr,
+ 		    const char *buf, size_t count)
+ {
+-	scsi_rescan_device(dev);
++	scsi_rescan_device(to_scsi_device(dev));
+ 	return count;
+ }
+ static DEVICE_ATTR(rescan, S_IWUSR, NULL, store_rescan_field);
+@@ -840,7 +840,7 @@ store_state_field(struct device *dev, struct device_attribute *attr,
+ 		 * waiting for pending I/O to finish.
+ 		 */
+ 		blk_mq_run_hw_queues(sdev->request_queue, true);
+-		scsi_rescan_device(dev);
++		scsi_rescan_device(sdev);
+ 	}
+ 
+ 	return ret == 0 ? count : -EINVAL;
+diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
+index 6aaaa7ebca377..ed694d9399648 100644
+--- a/drivers/scsi/smartpqi/smartpqi_init.c
++++ b/drivers/scsi/smartpqi/smartpqi_init.c
+@@ -2257,7 +2257,7 @@ static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info,
+ 			device->advertised_queue_depth = device->queue_depth;
+ 			scsi_change_queue_depth(device->sdev, device->advertised_queue_depth);
+ 			if (device->rescan) {
+-				scsi_rescan_device(&device->sdev->sdev_gendev);
++				scsi_rescan_device(device->sdev);
+ 				device->rescan = false;
+ 			}
+ 		}
+diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
+index 047ffaf7d42a9..a80a9e27ff9ee 100644
+--- a/drivers/scsi/storvsc_drv.c
++++ b/drivers/scsi/storvsc_drv.c
+@@ -472,7 +472,7 @@ static void storvsc_device_scan(struct work_struct *work)
+ 	sdev = scsi_device_lookup(wrk->host, 0, wrk->tgt_id, wrk->lun);
+ 	if (!sdev)
+ 		goto done;
+-	scsi_rescan_device(&sdev->sdev_gendev);
++	scsi_rescan_device(sdev);
+ 	scsi_device_put(sdev);
+ 
+ done:
+diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
+index bd5633667d015..9d1bdcdc13312 100644
+--- a/drivers/scsi/virtio_scsi.c
++++ b/drivers/scsi/virtio_scsi.c
+@@ -325,7 +325,7 @@ static void virtscsi_handle_param_change(struct virtio_scsi *vscsi,
+ 	/* Handle "Parameters changed", "Mode parameters changed", and
+ 	   "Capacity data has changed".  */
+ 	if (asc == 0x2a && (ascq == 0x00 || ascq == 0x01 || ascq == 0x09))
+-		scsi_rescan_device(&sdev->sdev_gendev);
++		scsi_rescan_device(sdev);
+ 
+ 	scsi_device_put(sdev);
+ }
+diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
+index b7ac60f4a2194..b6523d4b9259e 100644
+--- a/drivers/target/target_core_device.c
++++ b/drivers/target/target_core_device.c
+@@ -843,7 +843,6 @@ sector_t target_to_linux_sector(struct se_device *dev, sector_t lb)
+ EXPORT_SYMBOL(target_to_linux_sector);
+ 
+ struct devices_idr_iter {
+-	struct config_item *prev_item;
+ 	int (*fn)(struct se_device *dev, void *data);
+ 	void *data;
+ };
+@@ -853,11 +852,9 @@ static int target_devices_idr_iter(int id, void *p, void *data)
+ {
+ 	struct devices_idr_iter *iter = data;
+ 	struct se_device *dev = p;
++	struct config_item *item;
+ 	int ret;
+ 
+-	config_item_put(iter->prev_item);
+-	iter->prev_item = NULL;
+-
+ 	/*
+ 	 * We add the device early to the idr, so it can be used
+ 	 * by backend modules during configuration. We do not want
+@@ -867,12 +864,13 @@ static int target_devices_idr_iter(int id, void *p, void *data)
+ 	if (!target_dev_configured(dev))
+ 		return 0;
+ 
+-	iter->prev_item = config_item_get_unless_zero(&dev->dev_group.cg_item);
+-	if (!iter->prev_item)
++	item = config_item_get_unless_zero(&dev->dev_group.cg_item);
++	if (!item)
+ 		return 0;
+ 	mutex_unlock(&device_mutex);
+ 
+ 	ret = iter->fn(dev, iter->data);
++	config_item_put(item);
+ 
+ 	mutex_lock(&device_mutex);
+ 	return ret;
+@@ -895,7 +893,6 @@ int target_for_each_device(int (*fn)(struct se_device *dev, void *data),
+ 	mutex_lock(&device_mutex);
+ 	ret = idr_for_each(&devices_idr, target_devices_idr_iter, &iter);
+ 	mutex_unlock(&device_mutex);
+-	config_item_put(iter.prev_item);
+ 	return ret;
+ }
+ 
+diff --git a/drivers/vhost/vringh.c b/drivers/vhost/vringh.c
+index 955d938eb6633..7b8fd977f71cc 100644
+--- a/drivers/vhost/vringh.c
++++ b/drivers/vhost/vringh.c
+@@ -123,8 +123,18 @@ static inline ssize_t vringh_iov_xfer(struct vringh *vrh,
+ 		done += partlen;
+ 		len -= partlen;
+ 		ptr += partlen;
++		iov->consumed += partlen;
++		iov->iov[iov->i].iov_len -= partlen;
++		iov->iov[iov->i].iov_base += partlen;
+ 
+-		vringh_kiov_advance(iov, partlen);
++		if (!iov->iov[iov->i].iov_len) {
++			/* Fix up old iov element then increment. */
++			iov->iov[iov->i].iov_len = iov->consumed;
++			iov->iov[iov->i].iov_base -= iov->consumed;
++
++			iov->consumed = 0;
++			iov->i++;
++		}
+ 	}
+ 	return done;
+ }
+diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
+index 3bdd5b59661de..c803714d0f0d1 100644
+--- a/drivers/xen/events/events_base.c
++++ b/drivers/xen/events/events_base.c
+@@ -33,6 +33,7 @@
+ #include <linux/slab.h>
+ #include <linux/irqnr.h>
+ #include <linux/pci.h>
++#include <linux/rcupdate.h>
+ #include <linux/spinlock.h>
+ #include <linux/cpuhotplug.h>
+ #include <linux/atomic.h>
+@@ -96,6 +97,7 @@ enum xen_irq_type {
+ struct irq_info {
+ 	struct list_head list;
+ 	struct list_head eoi_list;
++	struct rcu_work rwork;
+ 	short refcnt;
+ 	u8 spurious_cnt;
+ 	u8 is_accounted;
+@@ -146,23 +148,13 @@ const struct evtchn_ops *evtchn_ops;
+  */
+ static DEFINE_MUTEX(irq_mapping_update_lock);
+ 
+-/*
+- * Lock protecting event handling loop against removing event channels.
+- * Adding of event channels is no issue as the associated IRQ becomes active
+- * only after everything is setup (before request_[threaded_]irq() the handler
+- * can't be entered for an event, as the event channel will be unmasked only
+- * then).
+- */
+-static DEFINE_RWLOCK(evtchn_rwlock);
+-
+ /*
+  * Lock hierarchy:
+  *
+  * irq_mapping_update_lock
+- *   evtchn_rwlock
+- *     IRQ-desc lock
+- *       percpu eoi_list_lock
+- *         irq_info->lock
++ *   IRQ-desc lock
++ *     percpu eoi_list_lock
++ *       irq_info->lock
+  */
+ 
+ static LIST_HEAD(xen_irq_list_head);
+@@ -306,6 +298,22 @@ static void channels_on_cpu_inc(struct irq_info *info)
+ 	info->is_accounted = 1;
+ }
+ 
++static void delayed_free_irq(struct work_struct *work)
++{
++	struct irq_info *info = container_of(to_rcu_work(work), struct irq_info,
++					     rwork);
++	unsigned int irq = info->irq;
++
++	/* Remove the info pointer only now, with no potential users left. */
++	set_info_for_irq(irq, NULL);
++
++	kfree(info);
++
++	/* Legacy IRQ descriptors are managed by the arch. */
++	if (irq >= nr_legacy_irqs())
++		irq_free_desc(irq);
++}
++
+ /* Constructors for packed IRQ information. */
+ static int xen_irq_info_common_setup(struct irq_info *info,
+ 				     unsigned irq,
+@@ -668,33 +676,36 @@ static void xen_irq_lateeoi_worker(struct work_struct *work)
+ 
+ 	eoi = container_of(to_delayed_work(work), struct lateeoi_work, delayed);
+ 
+-	read_lock_irqsave(&evtchn_rwlock, flags);
++	rcu_read_lock();
+ 
+ 	while (true) {
+-		spin_lock(&eoi->eoi_list_lock);
++		spin_lock_irqsave(&eoi->eoi_list_lock, flags);
+ 
+ 		info = list_first_entry_or_null(&eoi->eoi_list, struct irq_info,
+ 						eoi_list);
+ 
+-		if (info == NULL || now < info->eoi_time) {
+-			spin_unlock(&eoi->eoi_list_lock);
++		if (info == NULL)
++			break;
++
++		if (now < info->eoi_time) {
++			mod_delayed_work_on(info->eoi_cpu, system_wq,
++					    &eoi->delayed,
++					    info->eoi_time - now);
+ 			break;
+ 		}
+ 
+ 		list_del_init(&info->eoi_list);
+ 
+-		spin_unlock(&eoi->eoi_list_lock);
++		spin_unlock_irqrestore(&eoi->eoi_list_lock, flags);
+ 
+ 		info->eoi_time = 0;
+ 
+ 		xen_irq_lateeoi_locked(info, false);
+ 	}
+ 
+-	if (info)
+-		mod_delayed_work_on(info->eoi_cpu, system_wq,
+-				    &eoi->delayed, info->eoi_time - now);
++	spin_unlock_irqrestore(&eoi->eoi_list_lock, flags);
+ 
+-	read_unlock_irqrestore(&evtchn_rwlock, flags);
++	rcu_read_unlock();
+ }
+ 
+ static void xen_cpu_init_eoi(unsigned int cpu)
+@@ -709,16 +720,15 @@ static void xen_cpu_init_eoi(unsigned int cpu)
+ void xen_irq_lateeoi(unsigned int irq, unsigned int eoi_flags)
+ {
+ 	struct irq_info *info;
+-	unsigned long flags;
+ 
+-	read_lock_irqsave(&evtchn_rwlock, flags);
++	rcu_read_lock();
+ 
+ 	info = info_for_irq(irq);
+ 
+ 	if (info)
+ 		xen_irq_lateeoi_locked(info, eoi_flags & XEN_EOI_FLAG_SPURIOUS);
+ 
+-	read_unlock_irqrestore(&evtchn_rwlock, flags);
++	rcu_read_unlock();
+ }
+ EXPORT_SYMBOL_GPL(xen_irq_lateeoi);
+ 
+@@ -732,6 +742,7 @@ static void xen_irq_init(unsigned irq)
+ 
+ 	info->type = IRQT_UNBOUND;
+ 	info->refcnt = -1;
++	INIT_RCU_WORK(&info->rwork, delayed_free_irq);
+ 
+ 	set_info_for_irq(irq, info);
+ 	/*
+@@ -789,31 +800,18 @@ static int __must_check xen_allocate_irq_gsi(unsigned gsi)
+ static void xen_free_irq(unsigned irq)
+ {
+ 	struct irq_info *info = info_for_irq(irq);
+-	unsigned long flags;
+ 
+ 	if (WARN_ON(!info))
+ 		return;
+ 
+-	write_lock_irqsave(&evtchn_rwlock, flags);
+-
+ 	if (!list_empty(&info->eoi_list))
+ 		lateeoi_list_del(info);
+ 
+ 	list_del(&info->list);
+ 
+-	set_info_for_irq(irq, NULL);
+-
+ 	WARN_ON(info->refcnt > 0);
+ 
+-	write_unlock_irqrestore(&evtchn_rwlock, flags);
+-
+-	kfree(info);
+-
+-	/* Legacy IRQ descriptors are managed by the arch. */
+-	if (irq < nr_legacy_irqs())
+-		return;
+-
+-	irq_free_desc(irq);
++	queue_rcu_work(system_wq, &info->rwork);
+ }
+ 
+ /* Not called for lateeoi events. */
+@@ -1711,7 +1709,14 @@ static int __xen_evtchn_do_upcall(void)
+ 	int cpu = smp_processor_id();
+ 	struct evtchn_loop_ctrl ctrl = { 0 };
+ 
+-	read_lock(&evtchn_rwlock);
++	/*
++	 * When closing an event channel the associated IRQ must not be freed
++	 * until all cpus have left the event handling loop. This is ensured
++	 * by taking the rcu_read_lock() while handling events, as freeing of
++	 * the IRQ is handled via queue_rcu_work() _after_ closing the event
++	 * channel.
++	 */
++	rcu_read_lock();
+ 
+ 	do {
+ 		vcpu_info->evtchn_upcall_pending = 0;
+@@ -1724,7 +1729,7 @@ static int __xen_evtchn_do_upcall(void)
+ 
+ 	} while (vcpu_info->evtchn_upcall_pending);
+ 
+-	read_unlock(&evtchn_rwlock);
++	rcu_read_unlock();
+ 
+ 	/*
+ 	 * Increment irq_epoch only now to defer EOIs only for
+diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h
+index d47a927b3504d..90e60ad9db620 100644
+--- a/fs/btrfs/btrfs_inode.h
++++ b/fs/btrfs/btrfs_inode.h
+@@ -501,9 +501,6 @@ int btrfs_run_delalloc_range(struct btrfs_inode *inode, struct page *locked_page
+ 			     u64 start, u64 end, int *page_started,
+ 			     unsigned long *nr_written, struct writeback_control *wbc);
+ int btrfs_writepage_cow_fixup(struct page *page);
+-void btrfs_writepage_endio_finish_ordered(struct btrfs_inode *inode,
+-					  struct page *page, u64 start,
+-					  u64 end, bool uptodate);
+ int btrfs_encoded_io_compression_from_extent(struct btrfs_fs_info *fs_info,
+ 					     int compress_type);
+ int btrfs_encoded_read_regular_fill_pages(struct btrfs_inode *inode,
+diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
+index 7cc0ed7532793..d8461c9aa2445 100644
+--- a/fs/btrfs/extent_io.c
++++ b/fs/btrfs/extent_io.c
+@@ -497,31 +497,6 @@ static void end_page_read(struct page *page, bool uptodate, u64 start, u32 len)
+ 		btrfs_subpage_end_reader(fs_info, page, start, len);
+ }
+ 
+-/* lots and lots of room for performance fixes in the end_bio funcs */
+-
+-void end_extent_writepage(struct page *page, int err, u64 start, u64 end)
+-{
+-	struct btrfs_inode *inode;
+-	const bool uptodate = (err == 0);
+-	int ret = 0;
+-
+-	ASSERT(page && page->mapping);
+-	inode = BTRFS_I(page->mapping->host);
+-	btrfs_writepage_endio_finish_ordered(inode, page, start, end, uptodate);
+-
+-	if (!uptodate) {
+-		const struct btrfs_fs_info *fs_info = inode->root->fs_info;
+-		u32 len;
+-
+-		ASSERT(end + 1 - start <= U32_MAX);
+-		len = end + 1 - start;
+-
+-		btrfs_page_clear_uptodate(fs_info, page, start, len);
+-		ret = err < 0 ? err : -EIO;
+-		mapping_set_error(page->mapping, ret);
+-	}
+-}
+-
+ /*
+  * after a writepage IO is done, we need to:
+  * clear the uptodate bits on error
+@@ -558,10 +533,8 @@ static void end_bio_extent_writepage(struct btrfs_bio *bbio)
+ 				   bvec->bv_offset, bvec->bv_len);
+ 
+ 		btrfs_finish_ordered_extent(bbio->ordered, page, start, len, !error);
+-		if (error) {
+-			btrfs_page_clear_uptodate(fs_info, page, start, len);
++		if (error)
+ 			mapping_set_error(page->mapping, error);
+-		}
+ 		btrfs_page_clear_writeback(fs_info, page, start, len);
+ 	}
+ 
+@@ -1382,6 +1355,7 @@ static noinline_for_stack int __extent_writepage_io(struct btrfs_inode *inode,
+ 
+ 	bio_ctrl->end_io_func = end_bio_extent_writepage;
+ 	while (cur <= end) {
++		u32 len = end - cur + 1;
+ 		u64 disk_bytenr;
+ 		u64 em_end;
+ 		u64 dirty_range_start = cur;
+@@ -1389,8 +1363,8 @@ static noinline_for_stack int __extent_writepage_io(struct btrfs_inode *inode,
+ 		u32 iosize;
+ 
+ 		if (cur >= i_size) {
+-			btrfs_writepage_endio_finish_ordered(inode, page, cur,
+-							     end, true);
++			btrfs_mark_ordered_io_finished(inode, page, cur, len,
++						       true);
+ 			/*
+ 			 * This range is beyond i_size, thus we don't need to
+ 			 * bother writing back.
+@@ -1399,7 +1373,7 @@ static noinline_for_stack int __extent_writepage_io(struct btrfs_inode *inode,
+ 			 * writeback the sectors with subpage dirty bits,
+ 			 * causing writeback without ordered extent.
+ 			 */
+-			btrfs_page_clear_dirty(fs_info, page, cur, end + 1 - cur);
++			btrfs_page_clear_dirty(fs_info, page, cur, len);
+ 			break;
+ 		}
+ 
+@@ -1410,7 +1384,7 @@ static noinline_for_stack int __extent_writepage_io(struct btrfs_inode *inode,
+ 			continue;
+ 		}
+ 
+-		em = btrfs_get_extent(inode, NULL, 0, cur, end - cur + 1);
++		em = btrfs_get_extent(inode, NULL, 0, cur, len);
+ 		if (IS_ERR(em)) {
+ 			ret = PTR_ERR_OR_ZERO(em);
+ 			goto out_error;
+@@ -1486,7 +1460,6 @@ static int __extent_writepage(struct page *page, struct btrfs_bio_ctrl *bio_ctrl
+ 	struct folio *folio = page_folio(page);
+ 	struct inode *inode = page->mapping->host;
+ 	const u64 page_start = page_offset(page);
+-	const u64 page_end = page_start + PAGE_SIZE - 1;
+ 	int ret;
+ 	int nr = 0;
+ 	size_t pg_offset;
+@@ -1530,8 +1503,11 @@ done:
+ 		set_page_writeback(page);
+ 		end_page_writeback(page);
+ 	}
+-	if (ret)
+-		end_extent_writepage(page, ret, page_start, page_end);
++	if (ret) {
++		btrfs_mark_ordered_io_finished(BTRFS_I(inode), page, page_start,
++					       PAGE_SIZE, !ret);
++		mapping_set_error(page->mapping, ret);
++	}
+ 	unlock_page(page);
+ 	ASSERT(ret <= 0);
+ 	return ret;
+@@ -1696,8 +1672,6 @@ static void extent_buffer_write_end_io(struct btrfs_bio *bbio)
+ 		struct page *page = bvec->bv_page;
+ 		u32 len = bvec->bv_len;
+ 
+-		if (!uptodate)
+-			btrfs_page_clear_uptodate(fs_info, page, start, len);
+ 		btrfs_page_clear_writeback(fs_info, page, start, len);
+ 		bio_offset += len;
+ 	}
+@@ -2249,6 +2223,7 @@ int extent_write_locked_range(struct inode *inode, u64 start, u64 end,
+ 
+ 	while (cur <= end) {
+ 		u64 cur_end = min(round_down(cur, PAGE_SIZE) + PAGE_SIZE - 1, end);
++		u32 cur_len = cur_end + 1 - cur;
+ 		struct page *page;
+ 		int nr = 0;
+ 
+@@ -2272,9 +2247,12 @@ int extent_write_locked_range(struct inode *inode, u64 start, u64 end,
+ 			set_page_writeback(page);
+ 			end_page_writeback(page);
+ 		}
+-		if (ret)
+-			end_extent_writepage(page, ret, cur, cur_end);
+-		btrfs_page_unlock_writer(fs_info, page, cur, cur_end + 1 - cur);
++		if (ret) {
++			btrfs_mark_ordered_io_finished(BTRFS_I(inode), page,
++						       cur, cur_len, !ret);
++			mapping_set_error(page->mapping, ret);
++		}
++		btrfs_page_unlock_writer(fs_info, page, cur, cur_len);
+ 		if (ret < 0) {
+ 			found_error = true;
+ 			first_error = ret;
+diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
+index f61b7896320a1..e7b293717dc14 100644
+--- a/fs/btrfs/extent_io.h
++++ b/fs/btrfs/extent_io.h
+@@ -284,8 +284,6 @@ void btrfs_clear_buffer_dirty(struct btrfs_trans_handle *trans,
+ 
+ int btrfs_alloc_page_array(unsigned int nr_pages, struct page **page_array);
+ 
+-void end_extent_writepage(struct page *page, int err, u64 start, u64 end);
+-
+ #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
+ bool find_lock_delalloc_range(struct inode *inode,
+ 			     struct page *locked_page, u64 *start,
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index d5c112f6091b1..0f4498dfa30c9 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -423,11 +423,10 @@ static inline void btrfs_cleanup_ordered_extents(struct btrfs_inode *inode,
+ 
+ 	while (index <= end_index) {
+ 		/*
+-		 * For locked page, we will call end_extent_writepage() on it
+-		 * in run_delalloc_range() for the error handling.  That
+-		 * end_extent_writepage() function will call
+-		 * btrfs_mark_ordered_io_finished() to clear page Ordered and
+-		 * run the ordered extent accounting.
++		 * For locked page, we will call btrfs_mark_ordered_io_finished
++		 * through btrfs_mark_ordered_io_finished() on it
++		 * in run_delalloc_range() for the error handling, which will
++		 * clear page Ordered and run the ordered extent accounting.
+ 		 *
+ 		 * Here we can't just clear the Ordered bit, or
+ 		 * btrfs_mark_ordered_io_finished() would skip the accounting
+@@ -1157,11 +1156,13 @@ static int submit_uncompressed_range(struct btrfs_inode *inode,
+ 		btrfs_cleanup_ordered_extents(inode, locked_page, start, end - start + 1);
+ 		if (locked_page) {
+ 			const u64 page_start = page_offset(locked_page);
+-			const u64 page_end = page_start + PAGE_SIZE - 1;
+ 
+ 			set_page_writeback(locked_page);
+ 			end_page_writeback(locked_page);
+-			end_extent_writepage(locked_page, ret, page_start, page_end);
++			btrfs_mark_ordered_io_finished(inode, locked_page,
++						       page_start, PAGE_SIZE,
++						       !ret);
++			mapping_set_error(locked_page->mapping, ret);
+ 			unlock_page(locked_page);
+ 		}
+ 		return ret;
+@@ -2840,23 +2841,19 @@ struct btrfs_writepage_fixup {
+ 
+ static void btrfs_writepage_fixup_worker(struct btrfs_work *work)
+ {
+-	struct btrfs_writepage_fixup *fixup;
++	struct btrfs_writepage_fixup *fixup =
++		container_of(work, struct btrfs_writepage_fixup, work);
+ 	struct btrfs_ordered_extent *ordered;
+ 	struct extent_state *cached_state = NULL;
+ 	struct extent_changeset *data_reserved = NULL;
+-	struct page *page;
+-	struct btrfs_inode *inode;
+-	u64 page_start;
+-	u64 page_end;
++	struct page *page = fixup->page;
++	struct btrfs_inode *inode = fixup->inode;
++	struct btrfs_fs_info *fs_info = inode->root->fs_info;
++	u64 page_start = page_offset(page);
++	u64 page_end = page_offset(page) + PAGE_SIZE - 1;
+ 	int ret = 0;
+ 	bool free_delalloc_space = true;
+ 
+-	fixup = container_of(work, struct btrfs_writepage_fixup, work);
+-	page = fixup->page;
+-	inode = fixup->inode;
+-	page_start = page_offset(page);
+-	page_end = page_offset(page) + PAGE_SIZE - 1;
+-
+ 	/*
+ 	 * This is similar to page_mkwrite, we need to reserve the space before
+ 	 * we take the page lock.
+@@ -2949,10 +2946,11 @@ out_page:
+ 		 * to reflect the errors and clean the page.
+ 		 */
+ 		mapping_set_error(page->mapping, ret);
+-		end_extent_writepage(page, ret, page_start, page_end);
++		btrfs_mark_ordered_io_finished(inode, page, page_start,
++					       PAGE_SIZE, !ret);
+ 		clear_page_dirty_for_io(page);
+ 	}
+-	btrfs_page_clear_checked(inode->root->fs_info, page, page_start, PAGE_SIZE);
++	btrfs_page_clear_checked(fs_info, page, page_start, PAGE_SIZE);
+ 	unlock_page(page);
+ 	put_page(page);
+ 	kfree(fixup);
+@@ -3391,15 +3389,6 @@ int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered)
+ 	return btrfs_finish_one_ordered(ordered);
+ }
+ 
+-void btrfs_writepage_endio_finish_ordered(struct btrfs_inode *inode,
+-					  struct page *page, u64 start,
+-					  u64 end, bool uptodate)
+-{
+-	trace_btrfs_writepage_end_io_hook(inode, start, end, uptodate);
+-
+-	btrfs_mark_ordered_io_finished(inode, page, start, end + 1 - start, uptodate);
+-}
+-
+ /*
+  * Verify the checksum for a single sector without any extra action that depend
+  * on the type of I/O.
+diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
+index 5b1aac3fc8e4a..eea5215280dfe 100644
+--- a/fs/btrfs/ordered-data.c
++++ b/fs/btrfs/ordered-data.c
+@@ -410,6 +410,10 @@ void btrfs_mark_ordered_io_finished(struct btrfs_inode *inode,
+ 	unsigned long flags;
+ 	u64 cur = file_offset;
+ 
++	trace_btrfs_writepage_end_io_hook(inode, file_offset,
++					  file_offset + num_bytes - 1,
++					  uptodate);
++
+ 	spin_lock_irqsave(&tree->lock, flags);
+ 	while (cur < file_offset + num_bytes) {
+ 		u64 entry_end;
+diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h
+index 8e9fa23bd7fed..f800d3f7d4a9a 100644
+--- a/fs/btrfs/transaction.h
++++ b/fs/btrfs/transaction.h
+@@ -218,8 +218,8 @@ do {								\
+ 			(errno))) {					\
+ 			/* Stack trace printed. */			\
+ 		} else {						\
+-			btrfs_debug((trans)->fs_info,			\
+-				    "Transaction aborted (error %d)", \
++			btrfs_err((trans)->fs_info,			\
++				  "Transaction aborted (error %d)",	\
+ 				  (errno));			\
+ 		}						\
+ 	}							\
+diff --git a/fs/erofs/decompressor_lzma.c b/fs/erofs/decompressor_lzma.c
+index 73091fbe3ea45..dee10d22ada96 100644
+--- a/fs/erofs/decompressor_lzma.c
++++ b/fs/erofs/decompressor_lzma.c
+@@ -217,9 +217,12 @@ again:
+ 			strm->buf.out_size = min_t(u32, outlen,
+ 						   PAGE_SIZE - pageofs);
+ 			outlen -= strm->buf.out_size;
+-			if (!rq->out[no] && rq->fillgaps)	/* deduped */
++			if (!rq->out[no] && rq->fillgaps) {	/* deduped */
+ 				rq->out[no] = erofs_allocpage(pagepool,
+ 						GFP_KERNEL | __GFP_NOFAIL);
++				set_page_private(rq->out[no],
++						 Z_EROFS_SHORTLIVED_PAGE);
++			}
+ 			if (rq->out[no])
+ 				strm->buf.out = kmap(rq->out[no]) + pageofs;
+ 			pageofs = 0;
+diff --git a/fs/erofs/super.c b/fs/erofs/super.c
+index 566f68ddfa36e..31a103399412e 100644
+--- a/fs/erofs/super.c
++++ b/fs/erofs/super.c
+@@ -238,7 +238,7 @@ static int erofs_init_device(struct erofs_buf *buf, struct super_block *sb,
+ 		return PTR_ERR(ptr);
+ 	dis = ptr + erofs_blkoff(sb, *pos);
+ 
+-	if (!dif->path) {
++	if (!sbi->devs->flatdev && !dif->path) {
+ 		if (!dis->tag[0]) {
+ 			erofs_err(sb, "empty device tag @ pos %llu", *pos);
+ 			return -EINVAL;
+diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
+index 597ae4535fe33..9a5d911a7edc7 100644
+--- a/fs/nfs/nfs4state.c
++++ b/fs/nfs/nfs4state.c
+@@ -2714,6 +2714,13 @@ static void nfs4_state_manager(struct nfs_client *clp)
+ 		nfs4_end_drain_session(clp);
+ 		nfs4_clear_state_manager_bit(clp);
+ 
++		if (test_bit(NFS4CLNT_RUN_MANAGER, &clp->cl_state) &&
++		    !test_and_set_bit(NFS4CLNT_MANAGER_RUNNING,
++				      &clp->cl_state)) {
++			memflags = memalloc_nofs_save();
++			continue;
++		}
++
+ 		if (!test_and_set_bit(NFS4CLNT_RECALL_RUNNING, &clp->cl_state)) {
+ 			if (test_and_clear_bit(NFS4CLNT_DELEGRETURN, &clp->cl_state)) {
+ 				nfs_client_return_marked_delegations(clp);
+diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
+index cc8977498c483..1090c68e5b051 100644
+--- a/fs/overlayfs/super.c
++++ b/fs/overlayfs/super.c
+@@ -101,8 +101,8 @@ static int ovl_revalidate_real(struct dentry *d, unsigned int flags, bool weak)
+ static int ovl_dentry_revalidate_common(struct dentry *dentry,
+ 					unsigned int flags, bool weak)
+ {
+-	struct ovl_entry *oe = OVL_E(dentry);
+-	struct ovl_path *lowerstack = ovl_lowerstack(oe);
++	struct ovl_entry *oe;
++	struct ovl_path *lowerstack;
+ 	struct inode *inode = d_inode_rcu(dentry);
+ 	struct dentry *upper;
+ 	unsigned int i;
+@@ -112,6 +112,8 @@ static int ovl_dentry_revalidate_common(struct dentry *dentry,
+ 	if (!inode)
+ 		return -ECHILD;
+ 
++	oe = OVL_I_E(inode);
++	lowerstack = ovl_lowerstack(oe);
+ 	upper = ovl_i_dentry_upper(inode);
+ 	if (upper)
+ 		ret = ovl_revalidate_real(upper, flags, weak);
+@@ -164,6 +166,7 @@ static void ovl_free_inode(struct inode *inode)
+ 	struct ovl_inode *oi = OVL_I(inode);
+ 
+ 	kfree(oi->redirect);
++	kfree(oi->oe);
+ 	mutex_destroy(&oi->lock);
+ 	kmem_cache_free(ovl_inode_cachep, oi);
+ }
+@@ -173,7 +176,7 @@ static void ovl_destroy_inode(struct inode *inode)
+ 	struct ovl_inode *oi = OVL_I(inode);
+ 
+ 	dput(oi->__upperdentry);
+-	ovl_free_entry(oi->oe);
++	ovl_stack_put(ovl_lowerstack(oi->oe), ovl_numlower(oi->oe));
+ 	if (S_ISDIR(inode->i_mode))
+ 		ovl_dir_cache_free(inode);
+ 	else
+diff --git a/fs/smb/client/connect.c b/fs/smb/client/connect.c
+index 2456b5dd439cd..352e251c41132 100644
+--- a/fs/smb/client/connect.c
++++ b/fs/smb/client/connect.c
+@@ -2890,9 +2890,9 @@ bind_socket(struct TCP_Server_Info *server)
+ 	if (server->srcaddr.ss_family != AF_UNSPEC) {
+ 		/* Bind to the specified local IP address */
+ 		struct socket *socket = server->ssocket;
+-		rc = socket->ops->bind(socket,
+-				       (struct sockaddr *) &server->srcaddr,
+-				       sizeof(server->srcaddr));
++		rc = kernel_bind(socket,
++				 (struct sockaddr *) &server->srcaddr,
++				 sizeof(server->srcaddr));
+ 		if (rc < 0) {
+ 			struct sockaddr_in *saddr4;
+ 			struct sockaddr_in6 *saddr6;
+@@ -3041,8 +3041,8 @@ generic_ip_connect(struct TCP_Server_Info *server)
+ 		 socket->sk->sk_sndbuf,
+ 		 socket->sk->sk_rcvbuf, socket->sk->sk_rcvtimeo);
+ 
+-	rc = socket->ops->connect(socket, saddr, slen,
+-				  server->noblockcnt ? O_NONBLOCK : 0);
++	rc = kernel_connect(socket, saddr, slen,
++			    server->noblockcnt ? O_NONBLOCK : 0);
+ 	/*
+ 	 * When mounting SMB root file systems, we do not want to block in
+ 	 * connect. Otherwise bail out and then let cifs_reconnect() perform
+diff --git a/fs/smb/server/connection.c b/fs/smb/server/connection.c
+index 2a717d158f02e..00a87b4acd5c5 100644
+--- a/fs/smb/server/connection.c
++++ b/fs/smb/server/connection.c
+@@ -84,6 +84,8 @@ struct ksmbd_conn *ksmbd_conn_alloc(void)
+ 	spin_lock_init(&conn->llist_lock);
+ 	INIT_LIST_HEAD(&conn->lock_list);
+ 
++	init_rwsem(&conn->session_lock);
++
+ 	down_write(&conn_list_lock);
+ 	list_add(&conn->conns_list, &conn_list);
+ 	up_write(&conn_list_lock);
+diff --git a/fs/smb/server/connection.h b/fs/smb/server/connection.h
+index ad8dfaa48ffb3..335fdd714d595 100644
+--- a/fs/smb/server/connection.h
++++ b/fs/smb/server/connection.h
+@@ -50,6 +50,7 @@ struct ksmbd_conn {
+ 	struct nls_table		*local_nls;
+ 	struct unicode_map		*um;
+ 	struct list_head		conns_list;
++	struct rw_semaphore		session_lock;
+ 	/* smb session 1 per user */
+ 	struct xarray			sessions;
+ 	unsigned long			last_active;
+diff --git a/fs/smb/server/mgmt/user_session.c b/fs/smb/server/mgmt/user_session.c
+index 8a5dcab05614f..b8be14a96cf66 100644
+--- a/fs/smb/server/mgmt/user_session.c
++++ b/fs/smb/server/mgmt/user_session.c
+@@ -174,7 +174,7 @@ static void ksmbd_expire_session(struct ksmbd_conn *conn)
+ 	unsigned long id;
+ 	struct ksmbd_session *sess;
+ 
+-	down_write(&sessions_table_lock);
++	down_write(&conn->session_lock);
+ 	xa_for_each(&conn->sessions, id, sess) {
+ 		if (sess->state != SMB2_SESSION_VALID ||
+ 		    time_after(jiffies,
+@@ -185,7 +185,7 @@ static void ksmbd_expire_session(struct ksmbd_conn *conn)
+ 			continue;
+ 		}
+ 	}
+-	up_write(&sessions_table_lock);
++	up_write(&conn->session_lock);
+ }
+ 
+ int ksmbd_session_register(struct ksmbd_conn *conn,
+@@ -227,7 +227,9 @@ void ksmbd_sessions_deregister(struct ksmbd_conn *conn)
+ 			}
+ 		}
+ 	}
++	up_write(&sessions_table_lock);
+ 
++	down_write(&conn->session_lock);
+ 	xa_for_each(&conn->sessions, id, sess) {
+ 		unsigned long chann_id;
+ 		struct channel *chann;
+@@ -244,7 +246,7 @@ void ksmbd_sessions_deregister(struct ksmbd_conn *conn)
+ 			ksmbd_session_destroy(sess);
+ 		}
+ 	}
+-	up_write(&sessions_table_lock);
++	up_write(&conn->session_lock);
+ }
+ 
+ struct ksmbd_session *ksmbd_session_lookup(struct ksmbd_conn *conn,
+@@ -252,9 +254,11 @@ struct ksmbd_session *ksmbd_session_lookup(struct ksmbd_conn *conn,
+ {
+ 	struct ksmbd_session *sess;
+ 
++	down_read(&conn->session_lock);
+ 	sess = xa_load(&conn->sessions, id);
+ 	if (sess)
+ 		sess->last_active = jiffies;
++	up_read(&conn->session_lock);
+ 	return sess;
+ }
+ 
+diff --git a/fs/smb/server/smb2pdu.c b/fs/smb/server/smb2pdu.c
+index 687b750a35bf7..144a4e23efcd8 100644
+--- a/fs/smb/server/smb2pdu.c
++++ b/fs/smb/server/smb2pdu.c
+@@ -7029,10 +7029,6 @@ skip:
+ 
+ 				ksmbd_debug(SMB,
+ 					    "would have to wait for getting lock\n");
+-				spin_lock(&work->conn->llist_lock);
+-				list_add_tail(&smb_lock->clist,
+-					      &work->conn->lock_list);
+-				spin_unlock(&work->conn->llist_lock);
+ 				list_add(&smb_lock->llist, &rollback_list);
+ 
+ 				argv = kmalloc(sizeof(void *), GFP_KERNEL);
+@@ -7063,9 +7059,6 @@ skip:
+ 
+ 				if (work->state != KSMBD_WORK_ACTIVE) {
+ 					list_del(&smb_lock->llist);
+-					spin_lock(&work->conn->llist_lock);
+-					list_del(&smb_lock->clist);
+-					spin_unlock(&work->conn->llist_lock);
+ 					locks_free_lock(flock);
+ 
+ 					if (work->state == KSMBD_WORK_CANCELLED) {
+@@ -7087,19 +7080,16 @@ skip:
+ 				}
+ 
+ 				list_del(&smb_lock->llist);
+-				spin_lock(&work->conn->llist_lock);
+-				list_del(&smb_lock->clist);
+-				spin_unlock(&work->conn->llist_lock);
+ 				release_async_work(work);
+ 				goto retry;
+ 			} else if (!rc) {
++				list_add(&smb_lock->llist, &rollback_list);
+ 				spin_lock(&work->conn->llist_lock);
+ 				list_add_tail(&smb_lock->clist,
+ 					      &work->conn->lock_list);
+ 				list_add_tail(&smb_lock->flist,
+ 					      &fp->lock_list);
+ 				spin_unlock(&work->conn->llist_lock);
+-				list_add(&smb_lock->llist, &rollback_list);
+ 				ksmbd_debug(SMB, "successful in taking lock\n");
+ 			} else {
+ 				goto out;
+@@ -8036,10 +8026,10 @@ static void smb20_oplock_break_ack(struct ksmbd_work *work)
+ 		goto err_out;
+ 	}
+ 
+-	opinfo_put(opinfo);
+-	ksmbd_fd_put(work, fp);
+ 	opinfo->op_state = OPLOCK_STATE_NONE;
+ 	wake_up_interruptible_all(&opinfo->oplock_q);
++	opinfo_put(opinfo);
++	ksmbd_fd_put(work, fp);
+ 
+ 	rsp->StructureSize = cpu_to_le16(24);
+ 	rsp->OplockLevel = rsp_oplevel;
+diff --git a/include/linux/bpf.h b/include/linux/bpf.h
+index 477d91b926b35..6ba9d3ed8f0b0 100644
+--- a/include/linux/bpf.h
++++ b/include/linux/bpf.h
+@@ -1294,7 +1294,7 @@ static inline int bpf_trampoline_unlink_prog(struct bpf_tramp_link *link,
+ static inline struct bpf_trampoline *bpf_trampoline_get(u64 key,
+ 							struct bpf_attach_target_info *tgt_info)
+ {
+-	return ERR_PTR(-EOPNOTSUPP);
++	return NULL;
+ }
+ static inline void bpf_trampoline_put(struct bpf_trampoline *tr) {}
+ #define DEFINE_BPF_DISPATCHER(name)
+diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
+index 644e69354cba6..af8a771a053c5 100644
+--- a/include/linux/ipv6.h
++++ b/include/linux/ipv6.h
+@@ -33,6 +33,7 @@ struct ipv6_devconf {
+ 	__s32		accept_ra_defrtr;
+ 	__u32		ra_defrtr_metric;
+ 	__s32		accept_ra_min_hop_limit;
++	__s32		accept_ra_min_lft;
+ 	__s32		accept_ra_pinfo;
+ 	__s32		ignore_routes_with_linkdown;
+ #ifdef CONFIG_IPV6_ROUTER_PREF
+diff --git a/include/linux/maple_tree.h b/include/linux/maple_tree.h
+index 295548cca8b36..c6a19ff5fb437 100644
+--- a/include/linux/maple_tree.h
++++ b/include/linux/maple_tree.h
+@@ -420,6 +420,8 @@ struct ma_wr_state {
+ #define MAS_ROOT	((struct maple_enode *)5UL)
+ #define MAS_NONE	((struct maple_enode *)9UL)
+ #define MAS_PAUSE	((struct maple_enode *)17UL)
++#define MAS_OVERFLOW	((struct maple_enode *)33UL)
++#define MAS_UNDERFLOW	((struct maple_enode *)65UL)
+ #define MA_ERROR(err) \
+ 		((struct maple_enode *)(((unsigned long)err << 2) | 2UL))
+ 
+@@ -503,6 +505,15 @@ static inline bool mas_is_paused(const struct ma_state *mas)
+ 	return mas->node == MAS_PAUSE;
+ }
+ 
++/* Check if the mas is pointing to a node or not */
++static inline bool mas_is_active(struct ma_state *mas)
++{
++	if ((unsigned long)mas->node >= MAPLE_RESERVED_RANGE)
++		return true;
++
++	return false;
++}
++
+ /**
+  * mas_reset() - Reset a Maple Tree operation state.
+  * @mas: Maple Tree operation state.
+diff --git a/include/linux/netfilter/nf_conntrack_sctp.h b/include/linux/netfilter/nf_conntrack_sctp.h
+index 625f491b95de8..fb31312825ae5 100644
+--- a/include/linux/netfilter/nf_conntrack_sctp.h
++++ b/include/linux/netfilter/nf_conntrack_sctp.h
+@@ -9,6 +9,7 @@ struct ip_ct_sctp {
+ 	enum sctp_conntrack state;
+ 
+ 	__be32 vtag[IP_CT_DIR_MAX];
++	u8 init[IP_CT_DIR_MAX];
+ 	u8 last_dir;
+ 	u8 flags;
+ };
+diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
+index d6fa7c8767ad3..641c6edc9b81d 100644
+--- a/include/net/cfg80211.h
++++ b/include/net/cfg80211.h
+@@ -5942,6 +5942,7 @@ void wiphy_delayed_work_cancel(struct wiphy *wiphy,
+  * @event_lock: (private) lock for event list
+  * @owner_nlportid: (private) owner socket port ID
+  * @nl_owner_dead: (private) owner socket went away
++ * @cqm_rssi_work: (private) CQM RSSI reporting work
+  * @cqm_config: (private) nl80211 RSSI monitor state
+  * @pmsr_list: (private) peer measurement requests
+  * @pmsr_lock: (private) peer measurements requests/results lock
+@@ -6014,7 +6015,8 @@ struct wireless_dev {
+ 	} wext;
+ #endif
+ 
+-	struct cfg80211_cqm_config *cqm_config;
++	struct wiphy_work cqm_rssi_work;
++	struct cfg80211_cqm_config __rcu *cqm_config;
+ 
+ 	struct list_head pmsr_list;
+ 	spinlock_t pmsr_lock;
+@@ -7232,7 +7234,7 @@ struct cfg80211_rx_assoc_resp {
+ 	int uapsd_queues;
+ 	const u8 *ap_mld_addr;
+ 	struct {
+-		const u8 *addr;
++		u8 addr[ETH_ALEN] __aligned(2);
+ 		struct cfg80211_bss *bss;
+ 		u16 status;
+ 	} links[IEEE80211_MLD_MAX_NUM_LINKS];
+diff --git a/include/net/mana/mana.h b/include/net/mana/mana.h
+index 024ad8ddb27e5..571cc011b0ec5 100644
+--- a/include/net/mana/mana.h
++++ b/include/net/mana/mana.h
+@@ -101,9 +101,10 @@ struct mana_txq {
+ 
+ /* skb data and frags dma mappings */
+ struct mana_skb_head {
+-	dma_addr_t dma_handle[MAX_SKB_FRAGS + 1];
++	/* GSO pkts may have 2 SGEs for the linear part*/
++	dma_addr_t dma_handle[MAX_SKB_FRAGS + 2];
+ 
+-	u32 size[MAX_SKB_FRAGS + 1];
++	u32 size[MAX_SKB_FRAGS + 2];
+ };
+ 
+ #define MANA_HEADROOM sizeof(struct mana_skb_head)
+diff --git a/include/net/neighbour.h b/include/net/neighbour.h
+index f6a8ecc6b1fa7..ccc4a0f8b4ad8 100644
+--- a/include/net/neighbour.h
++++ b/include/net/neighbour.h
+@@ -541,7 +541,7 @@ static inline int neigh_output(struct neighbour *n, struct sk_buff *skb,
+ 	    READ_ONCE(hh->hh_len))
+ 		return neigh_hh_output(hh, skb);
+ 
+-	return n->output(n, skb);
++	return READ_ONCE(n->output)(n, skb);
+ }
+ 
+ static inline struct neighbour *
+diff --git a/include/net/tcp.h b/include/net/tcp.h
+index 10fc5c5928f71..b1b1e01c69839 100644
+--- a/include/net/tcp.h
++++ b/include/net/tcp.h
+@@ -350,12 +350,14 @@ ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos,
+ struct sk_buff *tcp_stream_alloc_skb(struct sock *sk, gfp_t gfp,
+ 				     bool force_schedule);
+ 
+-static inline void tcp_dec_quickack_mode(struct sock *sk,
+-					 const unsigned int pkts)
++static inline void tcp_dec_quickack_mode(struct sock *sk)
+ {
+ 	struct inet_connection_sock *icsk = inet_csk(sk);
+ 
+ 	if (icsk->icsk_ack.quick) {
++		/* How many ACKs S/ACKing new data have we sent? */
++		const unsigned int pkts = inet_csk_ack_scheduled(sk) ? 1 : 0;
++
+ 		if (pkts >= icsk->icsk_ack.quick) {
+ 			icsk->icsk_ack.quick = 0;
+ 			/* Leaving quickack mode we deflate ATO. */
+diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
+index a2b8d30c4c803..4c2dc8150c6d7 100644
+--- a/include/scsi/scsi_host.h
++++ b/include/scsi/scsi_host.h
+@@ -764,7 +764,7 @@ scsi_template_proc_dir(const struct scsi_host_template *sht);
+ #define scsi_template_proc_dir(sht) NULL
+ #endif
+ extern void scsi_scan_host(struct Scsi_Host *);
+-extern void scsi_rescan_device(struct device *);
++extern int scsi_rescan_device(struct scsi_device *sdev);
+ extern void scsi_remove_host(struct Scsi_Host *);
+ extern struct Scsi_Host *scsi_host_get(struct Scsi_Host *);
+ extern int scsi_host_busy(struct Scsi_Host *shost);
+diff --git a/include/uapi/linux/ipv6.h b/include/uapi/linux/ipv6.h
+index ac56605fe9bca..cf592d7b630fe 100644
+--- a/include/uapi/linux/ipv6.h
++++ b/include/uapi/linux/ipv6.h
+@@ -198,6 +198,7 @@ enum {
+ 	DEVCONF_IOAM6_ID_WIDE,
+ 	DEVCONF_NDISC_EVICT_NOCARRIER,
+ 	DEVCONF_ACCEPT_UNTRACKED_NA,
++	DEVCONF_ACCEPT_RA_MIN_LFT,
+ 	DEVCONF_MAX
+ };
+ 
+diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
+index a1562f2cf3f3c..9736416136a10 100644
+--- a/io_uring/io_uring.c
++++ b/io_uring/io_uring.c
+@@ -2678,7 +2678,7 @@ static void *__io_uaddr_map(struct page ***pages, unsigned short *npages,
+ {
+ 	struct page **page_array;
+ 	unsigned int nr_pages;
+-	int ret;
++	int ret, i;
+ 
+ 	*npages = 0;
+ 
+@@ -2708,6 +2708,20 @@ err:
+ 	 */
+ 	if (page_array[0] != page_array[ret - 1])
+ 		goto err;
++
++	/*
++	 * Can't support mapping user allocated ring memory on 32-bit archs
++	 * where it could potentially reside in highmem. Just fail those with
++	 * -EINVAL, just like we did on kernels that didn't support this
++	 * feature.
++	 */
++	for (i = 0; i < nr_pages; i++) {
++		if (PageHighMem(page_array[i])) {
++			ret = -EINVAL;
++			goto err;
++		}
++	}
++
+ 	*pages = page_array;
+ 	*npages = nr_pages;
+ 	return page_to_virt(page_array[0]);
+diff --git a/io_uring/io_uring.h b/io_uring/io_uring.h
+index 12769bad5cee0..5cecabf3e5051 100644
+--- a/io_uring/io_uring.h
++++ b/io_uring/io_uring.h
+@@ -87,20 +87,33 @@ bool __io_alloc_req_refill(struct io_ring_ctx *ctx);
+ bool io_match_task_safe(struct io_kiocb *head, struct task_struct *task,
+ 			bool cancel_all);
+ 
+-#define io_lockdep_assert_cq_locked(ctx)				\
+-	do {								\
+-		lockdep_assert(in_task());				\
+-									\
+-		if (ctx->flags & IORING_SETUP_IOPOLL) {			\
+-			lockdep_assert_held(&ctx->uring_lock);		\
+-		} else if (!ctx->task_complete) {			\
+-			lockdep_assert_held(&ctx->completion_lock);	\
+-		} else if (ctx->submitter_task->flags & PF_EXITING) {	\
+-			lockdep_assert(current_work());			\
+-		} else {						\
+-			lockdep_assert(current == ctx->submitter_task);	\
+-		}							\
+-	} while (0)
++#if defined(CONFIG_PROVE_LOCKING)
++static inline void io_lockdep_assert_cq_locked(struct io_ring_ctx *ctx)
++{
++	lockdep_assert(in_task());
++
++	if (ctx->flags & IORING_SETUP_IOPOLL) {
++		lockdep_assert_held(&ctx->uring_lock);
++	} else if (!ctx->task_complete) {
++		lockdep_assert_held(&ctx->completion_lock);
++	} else if (ctx->submitter_task) {
++		/*
++		 * ->submitter_task may be NULL and we can still post a CQE,
++		 * if the ring has been setup with IORING_SETUP_R_DISABLED.
++		 * Not from an SQE, as those cannot be submitted, but via
++		 * updating tagged resources.
++		 */
++		if (ctx->submitter_task->flags & PF_EXITING)
++			lockdep_assert(current_work());
++		else
++			lockdep_assert(current == ctx->submitter_task);
++	}
++}
++#else
++static inline void io_lockdep_assert_cq_locked(struct io_ring_ctx *ctx)
++{
++}
++#endif
+ 
+ static inline void io_req_task_work_add(struct io_kiocb *req)
+ {
+diff --git a/io_uring/kbuf.c b/io_uring/kbuf.c
+index 2f0181521c98e..7ca6f4f7819ce 100644
+--- a/io_uring/kbuf.c
++++ b/io_uring/kbuf.c
+@@ -481,7 +481,7 @@ static int io_pin_pbuf_ring(struct io_uring_buf_reg *reg,
+ {
+ 	struct io_uring_buf_ring *br;
+ 	struct page **pages;
+-	int nr_pages;
++	int i, nr_pages;
+ 
+ 	pages = io_pin_pages(reg->ring_addr,
+ 			     flex_array_size(br, bufs, reg->ring_entries),
+@@ -489,6 +489,17 @@ static int io_pin_pbuf_ring(struct io_uring_buf_reg *reg,
+ 	if (IS_ERR(pages))
+ 		return PTR_ERR(pages);
+ 
++	/*
++	 * Apparently some 32-bit boxes (ARM) will return highmem pages,
++	 * which then need to be mapped. We could support that, but it'd
++	 * complicate the code and slowdown the common cases quite a bit.
++	 * So just error out, returning -EINVAL just like we did on kernels
++	 * that didn't support mapped buffer rings.
++	 */
++	for (i = 0; i < nr_pages; i++)
++		if (PageHighMem(pages[i]))
++			goto error_unpin;
++
+ 	br = page_address(pages[0]);
+ #ifdef SHM_COLOUR
+ 	/*
+@@ -500,13 +511,8 @@ static int io_pin_pbuf_ring(struct io_uring_buf_reg *reg,
+ 	 * should use IOU_PBUF_RING_MMAP instead, and liburing will handle
+ 	 * this transparently.
+ 	 */
+-	if ((reg->ring_addr | (unsigned long) br) & (SHM_COLOUR - 1)) {
+-		int i;
+-
+-		for (i = 0; i < nr_pages; i++)
+-			unpin_user_page(pages[i]);
+-		return -EINVAL;
+-	}
++	if ((reg->ring_addr | (unsigned long) br) & (SHM_COLOUR - 1))
++		goto error_unpin;
+ #endif
+ 	bl->buf_pages = pages;
+ 	bl->buf_nr_pages = nr_pages;
+@@ -514,6 +520,11 @@ static int io_pin_pbuf_ring(struct io_uring_buf_reg *reg,
+ 	bl->is_mapped = 1;
+ 	bl->is_mmap = 0;
+ 	return 0;
++error_unpin:
++	for (i = 0; i < nr_pages; i++)
++		unpin_user_page(pages[i]);
++	kvfree(pages);
++	return -EINVAL;
+ }
+ 
+ static int io_alloc_pbuf_ring(struct io_uring_buf_reg *reg,
+diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
+index 9cdba4ce23d2b..93fd32f2957b7 100644
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -4039,11 +4039,9 @@ static int __mark_chain_precision(struct bpf_verifier_env *env, int regno)
+ 				bitmap_from_u64(mask, bt_reg_mask(bt));
+ 				for_each_set_bit(i, mask, 32) {
+ 					reg = &st->frame[0]->regs[i];
+-					if (reg->type != SCALAR_VALUE) {
+-						bt_clear_reg(bt, i);
+-						continue;
+-					}
+-					reg->precise = true;
++					bt_clear_reg(bt, i);
++					if (reg->type == SCALAR_VALUE)
++						reg->precise = true;
+ 				}
+ 				return 0;
+ 			}
+diff --git a/lib/maple_tree.c b/lib/maple_tree.c
+index f723024e14266..b2f1c90c18fa0 100644
+--- a/lib/maple_tree.c
++++ b/lib/maple_tree.c
+@@ -255,6 +255,22 @@ bool mas_is_err(struct ma_state *mas)
+ 	return xa_is_err(mas->node);
+ }
+ 
++static __always_inline bool mas_is_overflow(struct ma_state *mas)
++{
++	if (unlikely(mas->node == MAS_OVERFLOW))
++		return true;
++
++	return false;
++}
++
++static __always_inline bool mas_is_underflow(struct ma_state *mas)
++{
++	if (unlikely(mas->node == MAS_UNDERFLOW))
++		return true;
++
++	return false;
++}
++
+ static inline bool mas_searchable(struct ma_state *mas)
+ {
+ 	if (mas_is_none(mas))
+@@ -4560,10 +4576,13 @@ no_entry:
+  *
+  * @mas: The maple state
+  * @max: The minimum starting range
++ * @empty: Can be empty
++ * @set_underflow: Set the @mas->node to underflow state on limit.
+  *
+  * Return: The entry in the previous slot which is possibly NULL
+  */
+-static void *mas_prev_slot(struct ma_state *mas, unsigned long min, bool empty)
++static void *mas_prev_slot(struct ma_state *mas, unsigned long min, bool empty,
++			   bool set_underflow)
+ {
+ 	void *entry;
+ 	void __rcu **slots;
+@@ -4580,7 +4599,6 @@ retry:
+ 	if (unlikely(mas_rewalk_if_dead(mas, node, save_point)))
+ 		goto retry;
+ 
+-again:
+ 	if (mas->min <= min) {
+ 		pivot = mas_safe_min(mas, pivots, mas->offset);
+ 
+@@ -4588,9 +4606,10 @@ again:
+ 			goto retry;
+ 
+ 		if (pivot <= min)
+-			return NULL;
++			goto underflow;
+ 	}
+ 
++again:
+ 	if (likely(mas->offset)) {
+ 		mas->offset--;
+ 		mas->last = mas->index - 1;
+@@ -4602,7 +4621,7 @@ again:
+ 		}
+ 
+ 		if (mas_is_none(mas))
+-			return NULL;
++			goto underflow;
+ 
+ 		mas->last = mas->max;
+ 		node = mas_mn(mas);
+@@ -4619,10 +4638,19 @@ again:
+ 	if (likely(entry))
+ 		return entry;
+ 
+-	if (!empty)
++	if (!empty) {
++		if (mas->index <= min)
++			goto underflow;
++
+ 		goto again;
++	}
+ 
+ 	return entry;
++
++underflow:
++	if (set_underflow)
++		mas->node = MAS_UNDERFLOW;
++	return NULL;
+ }
+ 
+ /*
+@@ -4712,10 +4740,13 @@ no_entry:
+  * @mas: The maple state
+  * @max: The maximum starting range
+  * @empty: Can be empty
++ * @set_overflow: Should @mas->node be set to overflow when the limit is
++ * reached.
+  *
+  * Return: The entry in the next slot which is possibly NULL
+  */
+-static void *mas_next_slot(struct ma_state *mas, unsigned long max, bool empty)
++static void *mas_next_slot(struct ma_state *mas, unsigned long max, bool empty,
++			   bool set_overflow)
+ {
+ 	void __rcu **slots;
+ 	unsigned long *pivots;
+@@ -4734,22 +4765,22 @@ retry:
+ 	if (unlikely(mas_rewalk_if_dead(mas, node, save_point)))
+ 		goto retry;
+ 
+-again:
+ 	if (mas->max >= max) {
+ 		if (likely(mas->offset < data_end))
+ 			pivot = pivots[mas->offset];
+ 		else
+-			return NULL; /* must be mas->max */
++			goto overflow;
+ 
+ 		if (unlikely(mas_rewalk_if_dead(mas, node, save_point)))
+ 			goto retry;
+ 
+ 		if (pivot >= max)
+-			return NULL;
++			goto overflow;
+ 	}
+ 
+ 	if (likely(mas->offset < data_end)) {
+ 		mas->index = pivots[mas->offset] + 1;
++again:
+ 		mas->offset++;
+ 		if (likely(mas->offset < data_end))
+ 			mas->last = pivots[mas->offset];
+@@ -4761,8 +4792,11 @@ again:
+ 			goto retry;
+ 		}
+ 
+-		if (mas_is_none(mas))
++		if (WARN_ON_ONCE(mas_is_none(mas))) {
++			mas->node = MAS_OVERFLOW;
+ 			return NULL;
++			goto overflow;
++		}
+ 
+ 		mas->offset = 0;
+ 		mas->index = mas->min;
+@@ -4781,12 +4815,20 @@ again:
+ 		return entry;
+ 
+ 	if (!empty) {
+-		if (!mas->offset)
+-			data_end = 2;
++		if (mas->last >= max)
++			goto overflow;
++
++		mas->index = mas->last + 1;
++		/* Node cannot end on NULL, so it's safe to short-cut here */
+ 		goto again;
+ 	}
+ 
+ 	return entry;
++
++overflow:
++	if (set_overflow)
++		mas->node = MAS_OVERFLOW;
++	return NULL;
+ }
+ 
+ /*
+@@ -4796,17 +4838,20 @@ again:
+  *
+  * Set the @mas->node to the next entry and the range_start to
+  * the beginning value for the entry.  Does not check beyond @limit.
+- * Sets @mas->index and @mas->last to the limit if it is hit.
++ * Sets @mas->index and @mas->last to the range, Does not update @mas->index and
++ * @mas->last on overflow.
+  * Restarts on dead nodes.
+  *
+  * Return: the next entry or %NULL.
+  */
+ static inline void *mas_next_entry(struct ma_state *mas, unsigned long limit)
+ {
+-	if (mas->last >= limit)
++	if (mas->last >= limit) {
++		mas->node = MAS_OVERFLOW;
+ 		return NULL;
++	}
+ 
+-	return mas_next_slot(mas, limit, false);
++	return mas_next_slot(mas, limit, false, true);
+ }
+ 
+ /*
+@@ -4982,7 +5027,7 @@ void *mas_walk(struct ma_state *mas)
+ {
+ 	void *entry;
+ 
+-	if (mas_is_none(mas) || mas_is_paused(mas) || mas_is_ptr(mas))
++	if (!mas_is_active(mas) || !mas_is_start(mas))
+ 		mas->node = MAS_START;
+ retry:
+ 	entry = mas_state_walk(mas);
+@@ -5439,19 +5484,42 @@ static inline void mte_destroy_walk(struct maple_enode *enode,
+ 
+ static void mas_wr_store_setup(struct ma_wr_state *wr_mas)
+ {
+-	if (unlikely(mas_is_paused(wr_mas->mas)))
+-		mas_reset(wr_mas->mas);
++	if (!mas_is_active(wr_mas->mas)) {
++		if (mas_is_start(wr_mas->mas))
++			return;
+ 
+-	if (!mas_is_start(wr_mas->mas)) {
+-		if (mas_is_none(wr_mas->mas)) {
+-			mas_reset(wr_mas->mas);
+-		} else {
+-			wr_mas->r_max = wr_mas->mas->max;
+-			wr_mas->type = mte_node_type(wr_mas->mas->node);
+-			if (mas_is_span_wr(wr_mas))
+-				mas_reset(wr_mas->mas);
+-		}
++		if (unlikely(mas_is_paused(wr_mas->mas)))
++			goto reset;
++
++		if (unlikely(mas_is_none(wr_mas->mas)))
++			goto reset;
++
++		if (unlikely(mas_is_overflow(wr_mas->mas)))
++			goto reset;
++
++		if (unlikely(mas_is_underflow(wr_mas->mas)))
++			goto reset;
+ 	}
++
++	/*
++	 * A less strict version of mas_is_span_wr() where we allow spanning
++	 * writes within this node.  This is to stop partial walks in
++	 * mas_prealloc() from being reset.
++	 */
++	if (wr_mas->mas->last > wr_mas->mas->max)
++		goto reset;
++
++	if (wr_mas->entry)
++		return;
++
++	if (mte_is_leaf(wr_mas->mas->node) &&
++	    wr_mas->mas->last == wr_mas->mas->max)
++		goto reset;
++
++	return;
++
++reset:
++	mas_reset(wr_mas->mas);
+ }
+ 
+ /* Interface */
+@@ -5682,8 +5750,25 @@ static inline bool mas_next_setup(struct ma_state *mas, unsigned long max,
+ {
+ 	bool was_none = mas_is_none(mas);
+ 
+-	if (mas_is_none(mas) || mas_is_paused(mas))
++	if (unlikely(mas->last >= max)) {
++		mas->node = MAS_OVERFLOW;
++		return true;
++	}
++
++	if (mas_is_active(mas))
++		return false;
++
++	if (mas_is_none(mas) || mas_is_paused(mas)) {
++		mas->node = MAS_START;
++	} else if (mas_is_overflow(mas)) {
++		/* Overflowed before, but the max changed */
+ 		mas->node = MAS_START;
++	} else if (mas_is_underflow(mas)) {
++		mas->node = MAS_START;
++		*entry = mas_walk(mas);
++		if (*entry)
++			return true;
++	}
+ 
+ 	if (mas_is_start(mas))
+ 		*entry = mas_walk(mas); /* Retries on dead nodes handled by mas_walk */
+@@ -5702,6 +5787,7 @@ static inline bool mas_next_setup(struct ma_state *mas, unsigned long max,
+ 
+ 	if (mas_is_none(mas))
+ 		return true;
++
+ 	return false;
+ }
+ 
+@@ -5724,7 +5810,7 @@ void *mas_next(struct ma_state *mas, unsigned long max)
+ 		return entry;
+ 
+ 	/* Retries on dead nodes handled by mas_next_slot */
+-	return mas_next_slot(mas, max, false);
++	return mas_next_slot(mas, max, false, true);
+ }
+ EXPORT_SYMBOL_GPL(mas_next);
+ 
+@@ -5747,7 +5833,7 @@ void *mas_next_range(struct ma_state *mas, unsigned long max)
+ 		return entry;
+ 
+ 	/* Retries on dead nodes handled by mas_next_slot */
+-	return mas_next_slot(mas, max, true);
++	return mas_next_slot(mas, max, true, true);
+ }
+ EXPORT_SYMBOL_GPL(mas_next_range);
+ 
+@@ -5774,18 +5860,31 @@ EXPORT_SYMBOL_GPL(mt_next);
+ static inline bool mas_prev_setup(struct ma_state *mas, unsigned long min,
+ 		void **entry)
+ {
+-	if (mas->index <= min)
+-		goto none;
++	if (unlikely(mas->index <= min)) {
++		mas->node = MAS_UNDERFLOW;
++		return true;
++	}
+ 
+-	if (mas_is_none(mas) || mas_is_paused(mas))
++	if (mas_is_active(mas))
++		return false;
++
++	if (mas_is_overflow(mas)) {
+ 		mas->node = MAS_START;
++		*entry = mas_walk(mas);
++		if (*entry)
++			return true;
++	}
+ 
+-	if (mas_is_start(mas)) {
+-		mas_walk(mas);
+-		if (!mas->index)
+-			goto none;
++	if (mas_is_none(mas) || mas_is_paused(mas)) {
++		mas->node = MAS_START;
++	} else if (mas_is_underflow(mas)) {
++		/* underflowed before but the min changed */
++		mas->node = MAS_START;
+ 	}
+ 
++	if (mas_is_start(mas))
++		mas_walk(mas);
++
+ 	if (unlikely(mas_is_ptr(mas))) {
+ 		if (!mas->index)
+ 			goto none;
+@@ -5830,7 +5929,7 @@ void *mas_prev(struct ma_state *mas, unsigned long min)
+ 	if (mas_prev_setup(mas, min, &entry))
+ 		return entry;
+ 
+-	return mas_prev_slot(mas, min, false);
++	return mas_prev_slot(mas, min, false, true);
+ }
+ EXPORT_SYMBOL_GPL(mas_prev);
+ 
+@@ -5853,7 +5952,7 @@ void *mas_prev_range(struct ma_state *mas, unsigned long min)
+ 	if (mas_prev_setup(mas, min, &entry))
+ 		return entry;
+ 
+-	return mas_prev_slot(mas, min, true);
++	return mas_prev_slot(mas, min, true, true);
+ }
+ EXPORT_SYMBOL_GPL(mas_prev_range);
+ 
+@@ -5907,24 +6006,35 @@ EXPORT_SYMBOL_GPL(mas_pause);
+ static inline bool mas_find_setup(struct ma_state *mas, unsigned long max,
+ 		void **entry)
+ {
+-	*entry = NULL;
++	if (mas_is_active(mas)) {
++		if (mas->last < max)
++			return false;
+ 
+-	if (unlikely(mas_is_none(mas))) {
++		return true;
++	}
++
++	if (mas_is_paused(mas)) {
+ 		if (unlikely(mas->last >= max))
+ 			return true;
+ 
+-		mas->index = mas->last;
++		mas->index = ++mas->last;
+ 		mas->node = MAS_START;
+-	} else if (unlikely(mas_is_paused(mas))) {
++	} else if (mas_is_none(mas)) {
+ 		if (unlikely(mas->last >= max))
+ 			return true;
+ 
++		mas->index = mas->last;
+ 		mas->node = MAS_START;
+-		mas->index = ++mas->last;
+-	} else if (unlikely(mas_is_ptr(mas)))
+-		goto ptr_out_of_range;
++	} else if (mas_is_overflow(mas) || mas_is_underflow(mas)) {
++		if (mas->index > max) {
++			mas->node = MAS_OVERFLOW;
++			return true;
++		}
++
++		mas->node = MAS_START;
++	}
+ 
+-	if (unlikely(mas_is_start(mas))) {
++	if (mas_is_start(mas)) {
+ 		/* First run or continue */
+ 		if (mas->index > max)
+ 			return true;
+@@ -5974,7 +6084,7 @@ void *mas_find(struct ma_state *mas, unsigned long max)
+ 		return entry;
+ 
+ 	/* Retries on dead nodes handled by mas_next_slot */
+-	return mas_next_slot(mas, max, false);
++	return mas_next_slot(mas, max, false, false);
+ }
+ EXPORT_SYMBOL_GPL(mas_find);
+ 
+@@ -5992,13 +6102,13 @@ EXPORT_SYMBOL_GPL(mas_find);
+  */
+ void *mas_find_range(struct ma_state *mas, unsigned long max)
+ {
+-	void *entry;
++	void *entry = NULL;
+ 
+ 	if (mas_find_setup(mas, max, &entry))
+ 		return entry;
+ 
+ 	/* Retries on dead nodes handled by mas_next_slot */
+-	return mas_next_slot(mas, max, true);
++	return mas_next_slot(mas, max, true, false);
+ }
+ EXPORT_SYMBOL_GPL(mas_find_range);
+ 
+@@ -6013,26 +6123,36 @@ EXPORT_SYMBOL_GPL(mas_find_range);
+ static inline bool mas_find_rev_setup(struct ma_state *mas, unsigned long min,
+ 		void **entry)
+ {
+-	*entry = NULL;
+-
+-	if (unlikely(mas_is_none(mas))) {
+-		if (mas->index <= min)
+-			goto none;
++	if (mas_is_active(mas)) {
++		if (mas->index > min)
++			return false;
+ 
+-		mas->last = mas->index;
+-		mas->node = MAS_START;
++		return true;
+ 	}
+ 
+-	if (unlikely(mas_is_paused(mas))) {
++	if (mas_is_paused(mas)) {
+ 		if (unlikely(mas->index <= min)) {
+ 			mas->node = MAS_NONE;
+ 			return true;
+ 		}
+ 		mas->node = MAS_START;
+ 		mas->last = --mas->index;
++	} else if (mas_is_none(mas)) {
++		if (mas->index <= min)
++			goto none;
++
++		mas->last = mas->index;
++		mas->node = MAS_START;
++	} else if (mas_is_underflow(mas) || mas_is_overflow(mas)) {
++		if (mas->last <= min) {
++			mas->node = MAS_UNDERFLOW;
++			return true;
++		}
++
++		mas->node = MAS_START;
+ 	}
+ 
+-	if (unlikely(mas_is_start(mas))) {
++	if (mas_is_start(mas)) {
+ 		/* First run or continue */
+ 		if (mas->index < min)
+ 			return true;
+@@ -6083,13 +6203,13 @@ none:
+  */
+ void *mas_find_rev(struct ma_state *mas, unsigned long min)
+ {
+-	void *entry;
++	void *entry = NULL;
+ 
+ 	if (mas_find_rev_setup(mas, min, &entry))
+ 		return entry;
+ 
+ 	/* Retries on dead nodes handled by mas_prev_slot */
+-	return mas_prev_slot(mas, min, false);
++	return mas_prev_slot(mas, min, false, false);
+ 
+ }
+ EXPORT_SYMBOL_GPL(mas_find_rev);
+@@ -6109,13 +6229,13 @@ EXPORT_SYMBOL_GPL(mas_find_rev);
+  */
+ void *mas_find_range_rev(struct ma_state *mas, unsigned long min)
+ {
+-	void *entry;
++	void *entry = NULL;
+ 
+ 	if (mas_find_rev_setup(mas, min, &entry))
+ 		return entry;
+ 
+ 	/* Retries on dead nodes handled by mas_prev_slot */
+-	return mas_prev_slot(mas, min, true);
++	return mas_prev_slot(mas, min, true, false);
+ }
+ EXPORT_SYMBOL_GPL(mas_find_range_rev);
+ 
+diff --git a/lib/test_maple_tree.c b/lib/test_maple_tree.c
+index 8d4c92cbdd0cf..4e7fd364f0f1f 100644
+--- a/lib/test_maple_tree.c
++++ b/lib/test_maple_tree.c
+@@ -2039,7 +2039,7 @@ static noinline void __init next_prev_test(struct maple_tree *mt)
+ 	MT_BUG_ON(mt, val != NULL);
+ 	MT_BUG_ON(mt, mas.index != 0);
+ 	MT_BUG_ON(mt, mas.last != 5);
+-	MT_BUG_ON(mt, mas.node != MAS_NONE);
++	MT_BUG_ON(mt, mas.node != MAS_UNDERFLOW);
+ 
+ 	mas.index = 0;
+ 	mas.last = 5;
+@@ -2790,6 +2790,7 @@ static noinline void __init check_empty_area_fill(struct maple_tree *mt)
+  *		exists	MAS_NONE	active		range
+  *		exists	active		active		range
+  *		DNE	active		active		set to last range
++ *		ERANGE	active		MAS_OVERFLOW	last range
+  *
+  * Function	ENTRY	Start		Result		index & last
+  * mas_prev()
+@@ -2818,6 +2819,7 @@ static noinline void __init check_empty_area_fill(struct maple_tree *mt)
+  *		any	MAS_ROOT	MAS_NONE	0
+  *		exists	active		active		range
+  *		DNE	active		active		last range
++ *		ERANGE	active		MAS_UNDERFLOW	last range
+  *
+  * Function	ENTRY	Start		Result		index & last
+  * mas_find()
+@@ -2828,7 +2830,7 @@ static noinline void __init check_empty_area_fill(struct maple_tree *mt)
+  *		DNE	MAS_START	MAS_NONE	0
+  *		DNE	MAS_PAUSE	MAS_NONE	0
+  *		DNE	MAS_ROOT	MAS_NONE	0
+- *		DNE	MAS_NONE	MAS_NONE	0
++ *		DNE	MAS_NONE	MAS_NONE	1
+  *				if index ==  0
+  *		exists	MAS_START	MAS_ROOT	0
+  *		exists	MAS_PAUSE	MAS_ROOT	0
+@@ -2840,7 +2842,7 @@ static noinline void __init check_empty_area_fill(struct maple_tree *mt)
+  *		DNE	MAS_START	active		set to max
+  *		exists	MAS_PAUSE	active		range
+  *		DNE	MAS_PAUSE	active		set to max
+- *		exists	MAS_NONE	active		range
++ *		exists	MAS_NONE	active		range (start at last)
+  *		exists	active		active		range
+  *		DNE	active		active		last range (max < last)
+  *
+@@ -2865,7 +2867,7 @@ static noinline void __init check_empty_area_fill(struct maple_tree *mt)
+  *		DNE	MAS_START	active		set to min
+  *		exists	MAS_PAUSE	active		range
+  *		DNE	MAS_PAUSE	active		set to min
+- *		exists	MAS_NONE	active		range
++ *		exists	MAS_NONE	active		range (start at index)
+  *		exists	active		active		range
+  *		DNE	active		active		last range (min > index)
+  *
+@@ -2912,10 +2914,10 @@ static noinline void __init check_state_handling(struct maple_tree *mt)
+ 	mtree_store_range(mt, 0, 0, ptr, GFP_KERNEL);
+ 
+ 	mas_lock(&mas);
+-	/* prev: Start -> none */
++	/* prev: Start -> underflow*/
+ 	entry = mas_prev(&mas, 0);
+ 	MT_BUG_ON(mt, entry != NULL);
+-	MT_BUG_ON(mt, mas.node != MAS_NONE);
++	MT_BUG_ON(mt, mas.node != MAS_UNDERFLOW);
+ 
+ 	/* prev: Start -> root */
+ 	mas_set(&mas, 10);
+@@ -2942,7 +2944,7 @@ static noinline void __init check_state_handling(struct maple_tree *mt)
+ 	MT_BUG_ON(mt, entry != NULL);
+ 	MT_BUG_ON(mt, mas.node != MAS_NONE);
+ 
+-	/* next: start -> none */
++	/* next: start -> none*/
+ 	mas_set(&mas, 10);
+ 	entry = mas_next(&mas, ULONG_MAX);
+ 	MT_BUG_ON(mt, mas.index != 1);
+@@ -3141,25 +3143,46 @@ static noinline void __init check_state_handling(struct maple_tree *mt)
+ 	MT_BUG_ON(mt, mas.last != 0x2500);
+ 	MT_BUG_ON(mt, !mas_active(mas));
+ 
+-	/* next:active -> active out of range*/
++	/* next:active -> active beyond data */
+ 	entry = mas_next(&mas, 0x2999);
+ 	MT_BUG_ON(mt, entry != NULL);
+ 	MT_BUG_ON(mt, mas.index != 0x2501);
+ 	MT_BUG_ON(mt, mas.last != 0x2fff);
+ 	MT_BUG_ON(mt, !mas_active(mas));
+ 
+-	/* Continue after out of range*/
++	/* Continue after last range ends after max */
+ 	entry = mas_next(&mas, ULONG_MAX);
+ 	MT_BUG_ON(mt, entry != ptr3);
+ 	MT_BUG_ON(mt, mas.index != 0x3000);
+ 	MT_BUG_ON(mt, mas.last != 0x3500);
+ 	MT_BUG_ON(mt, !mas_active(mas));
+ 
+-	/* next:active -> active out of range*/
++	/* next:active -> active continued */
++	entry = mas_next(&mas, ULONG_MAX);
++	MT_BUG_ON(mt, entry != NULL);
++	MT_BUG_ON(mt, mas.index != 0x3501);
++	MT_BUG_ON(mt, mas.last != ULONG_MAX);
++	MT_BUG_ON(mt, !mas_active(mas));
++
++	/* next:active -> overflow  */
+ 	entry = mas_next(&mas, ULONG_MAX);
+ 	MT_BUG_ON(mt, entry != NULL);
+ 	MT_BUG_ON(mt, mas.index != 0x3501);
+ 	MT_BUG_ON(mt, mas.last != ULONG_MAX);
++	MT_BUG_ON(mt, mas.node != MAS_OVERFLOW);
++
++	/* next:overflow -> overflow  */
++	entry = mas_next(&mas, ULONG_MAX);
++	MT_BUG_ON(mt, entry != NULL);
++	MT_BUG_ON(mt, mas.index != 0x3501);
++	MT_BUG_ON(mt, mas.last != ULONG_MAX);
++	MT_BUG_ON(mt, mas.node != MAS_OVERFLOW);
++
++	/* prev:overflow -> active  */
++	entry = mas_prev(&mas, 0);
++	MT_BUG_ON(mt, entry != ptr3);
++	MT_BUG_ON(mt, mas.index != 0x3000);
++	MT_BUG_ON(mt, mas.last != 0x3500);
+ 	MT_BUG_ON(mt, !mas_active(mas));
+ 
+ 	/* next: none -> active, skip value at location */
+@@ -3180,11 +3203,46 @@ static noinline void __init check_state_handling(struct maple_tree *mt)
+ 	MT_BUG_ON(mt, mas.last != 0x1500);
+ 	MT_BUG_ON(mt, !mas_active(mas));
+ 
+-	/* prev:active -> active out of range*/
++	/* prev:active -> active spanning end range */
++	entry = mas_prev(&mas, 0x0100);
++	MT_BUG_ON(mt, entry != NULL);
++	MT_BUG_ON(mt, mas.index != 0);
++	MT_BUG_ON(mt, mas.last != 0x0FFF);
++	MT_BUG_ON(mt, !mas_active(mas));
++
++	/* prev:active -> underflow */
++	entry = mas_prev(&mas, 0);
++	MT_BUG_ON(mt, entry != NULL);
++	MT_BUG_ON(mt, mas.index != 0);
++	MT_BUG_ON(mt, mas.last != 0x0FFF);
++	MT_BUG_ON(mt, mas.node != MAS_UNDERFLOW);
++
++	/* prev:underflow -> underflow */
+ 	entry = mas_prev(&mas, 0);
+ 	MT_BUG_ON(mt, entry != NULL);
+ 	MT_BUG_ON(mt, mas.index != 0);
+ 	MT_BUG_ON(mt, mas.last != 0x0FFF);
++	MT_BUG_ON(mt, mas.node != MAS_UNDERFLOW);
++
++	/* next:underflow -> active */
++	entry = mas_next(&mas, ULONG_MAX);
++	MT_BUG_ON(mt, entry != ptr);
++	MT_BUG_ON(mt, mas.index != 0x1000);
++	MT_BUG_ON(mt, mas.last != 0x1500);
++	MT_BUG_ON(mt, !mas_active(mas));
++
++	/* prev:first value -> underflow */
++	entry = mas_prev(&mas, 0x1000);
++	MT_BUG_ON(mt, entry != NULL);
++	MT_BUG_ON(mt, mas.index != 0x1000);
++	MT_BUG_ON(mt, mas.last != 0x1500);
++	MT_BUG_ON(mt, mas.node != MAS_UNDERFLOW);
++
++	/* find:underflow -> first value */
++	entry = mas_find(&mas, ULONG_MAX);
++	MT_BUG_ON(mt, entry != ptr);
++	MT_BUG_ON(mt, mas.index != 0x1000);
++	MT_BUG_ON(mt, mas.last != 0x1500);
+ 	MT_BUG_ON(mt, !mas_active(mas));
+ 
+ 	/* prev: pause ->active */
+@@ -3198,14 +3256,14 @@ static noinline void __init check_state_handling(struct maple_tree *mt)
+ 	MT_BUG_ON(mt, mas.last != 0x2500);
+ 	MT_BUG_ON(mt, !mas_active(mas));
+ 
+-	/* prev:active -> active out of range*/
++	/* prev:active -> active spanning min */
+ 	entry = mas_prev(&mas, 0x1600);
+ 	MT_BUG_ON(mt, entry != NULL);
+ 	MT_BUG_ON(mt, mas.index != 0x1501);
+ 	MT_BUG_ON(mt, mas.last != 0x1FFF);
+ 	MT_BUG_ON(mt, !mas_active(mas));
+ 
+-	/* prev: active ->active, continue*/
++	/* prev: active ->active, continue */
+ 	entry = mas_prev(&mas, 0);
+ 	MT_BUG_ON(mt, entry != ptr);
+ 	MT_BUG_ON(mt, mas.index != 0x1000);
+@@ -3252,7 +3310,7 @@ static noinline void __init check_state_handling(struct maple_tree *mt)
+ 	MT_BUG_ON(mt, mas.last != 0x2FFF);
+ 	MT_BUG_ON(mt, !mas_active(mas));
+ 
+-	/* find: none ->active */
++	/* find: overflow ->active */
+ 	entry = mas_find(&mas, 0x5000);
+ 	MT_BUG_ON(mt, entry != ptr3);
+ 	MT_BUG_ON(mt, mas.index != 0x3000);
+@@ -3637,7 +3695,6 @@ static int __init maple_tree_seed(void)
+ 	check_empty_area_fill(&tree);
+ 	mtree_destroy(&tree);
+ 
+-
+ 	mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
+ 	check_state_handling(&tree);
+ 	mtree_destroy(&tree);
+diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
+index 29ae9b254a34e..26a265d9c59cd 100644
+--- a/net/bluetooth/hci_core.c
++++ b/net/bluetooth/hci_core.c
+@@ -2784,6 +2784,7 @@ void hci_release_dev(struct hci_dev *hdev)
+ 	hci_conn_params_clear_all(hdev);
+ 	hci_discovery_filter_clear(hdev);
+ 	hci_blocked_keys_clear(hdev);
++	hci_codec_list_clear(&hdev->local_codecs);
+ 	hci_dev_unlock(hdev);
+ 
+ 	ida_simple_remove(&hci_index_ida, hdev->id);
+@@ -3418,7 +3419,12 @@ static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
+ 		if (c->type == type && c->sent) {
+ 			bt_dev_err(hdev, "killing stalled connection %pMR",
+ 				   &c->dst);
++			/* hci_disconnect might sleep, so, we have to release
++			 * the RCU read lock before calling it.
++			 */
++			rcu_read_unlock();
+ 			hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
++			rcu_read_lock();
+ 		}
+ 	}
+ 
+diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
+index 2358c1835d475..a77234478b2c4 100644
+--- a/net/bluetooth/hci_event.c
++++ b/net/bluetooth/hci_event.c
+@@ -33,6 +33,7 @@
+ 
+ #include "hci_request.h"
+ #include "hci_debugfs.h"
++#include "hci_codec.h"
+ #include "a2mp.h"
+ #include "amp.h"
+ #include "smp.h"
+diff --git a/net/bluetooth/hci_request.h b/net/bluetooth/hci_request.h
+index b9c5a98238374..0be75cf0efed8 100644
+--- a/net/bluetooth/hci_request.h
++++ b/net/bluetooth/hci_request.h
+@@ -71,7 +71,5 @@ struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
+ void hci_req_add_le_scan_disable(struct hci_request *req, bool rpa_le_conn);
+ void hci_req_add_le_passive_scan(struct hci_request *req);
+ 
+-void hci_req_prepare_suspend(struct hci_dev *hdev, enum suspended_state next);
+-
+ void hci_request_setup(struct hci_dev *hdev);
+ void hci_request_cancel_all(struct hci_dev *hdev);
+diff --git a/net/bluetooth/hci_sync.c b/net/bluetooth/hci_sync.c
+index 402b8522c2228..6aaecd6e656bc 100644
+--- a/net/bluetooth/hci_sync.c
++++ b/net/bluetooth/hci_sync.c
+@@ -412,11 +412,6 @@ static int hci_le_scan_restart_sync(struct hci_dev *hdev)
+ 					   LE_SCAN_FILTER_DUP_ENABLE);
+ }
+ 
+-static int le_scan_restart_sync(struct hci_dev *hdev, void *data)
+-{
+-	return hci_le_scan_restart_sync(hdev);
+-}
+-
+ static void le_scan_restart(struct work_struct *work)
+ {
+ 	struct hci_dev *hdev = container_of(work, struct hci_dev,
+@@ -426,15 +421,15 @@ static void le_scan_restart(struct work_struct *work)
+ 
+ 	bt_dev_dbg(hdev, "");
+ 
+-	hci_dev_lock(hdev);
+-
+-	status = hci_cmd_sync_queue(hdev, le_scan_restart_sync, NULL, NULL);
++	status = hci_le_scan_restart_sync(hdev);
+ 	if (status) {
+ 		bt_dev_err(hdev, "failed to restart LE scan: status %d",
+ 			   status);
+-		goto unlock;
++		return;
+ 	}
+ 
++	hci_dev_lock(hdev);
++
+ 	if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
+ 	    !hdev->discovery.scan_start)
+ 		goto unlock;
+@@ -5095,6 +5090,7 @@ int hci_dev_close_sync(struct hci_dev *hdev)
+ 	memset(hdev->eir, 0, sizeof(hdev->eir));
+ 	memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
+ 	bacpy(&hdev->random_addr, BDADDR_ANY);
++	hci_codec_list_clear(&hdev->local_codecs);
+ 
+ 	hci_dev_put(hdev);
+ 	return err;
+diff --git a/net/bluetooth/iso.c b/net/bluetooth/iso.c
+index 9b6a7eb2015f0..42f7b257bdfbc 100644
+--- a/net/bluetooth/iso.c
++++ b/net/bluetooth/iso.c
+@@ -499,7 +499,7 @@ drop:
+ }
+ 
+ /* -------- Socket interface ---------- */
+-static struct sock *__iso_get_sock_listen_by_addr(bdaddr_t *ba)
++static struct sock *__iso_get_sock_listen_by_addr(bdaddr_t *src, bdaddr_t *dst)
+ {
+ 	struct sock *sk;
+ 
+@@ -507,7 +507,10 @@ static struct sock *__iso_get_sock_listen_by_addr(bdaddr_t *ba)
+ 		if (sk->sk_state != BT_LISTEN)
+ 			continue;
+ 
+-		if (!bacmp(&iso_pi(sk)->src, ba))
++		if (bacmp(&iso_pi(sk)->dst, dst))
++			continue;
++
++		if (!bacmp(&iso_pi(sk)->src, src))
+ 			return sk;
+ 	}
+ 
+@@ -965,7 +968,7 @@ static int iso_listen_cis(struct sock *sk)
+ 
+ 	write_lock(&iso_sk_list.lock);
+ 
+-	if (__iso_get_sock_listen_by_addr(&iso_pi(sk)->src))
++	if (__iso_get_sock_listen_by_addr(&iso_pi(sk)->src, &iso_pi(sk)->dst))
+ 		err = -EADDRINUSE;
+ 
+ 	write_unlock(&iso_sk_list.lock);
+diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c
+index 1a801fab9543e..0be889905c2b6 100644
+--- a/net/bridge/br_netfilter_hooks.c
++++ b/net/bridge/br_netfilter_hooks.c
+@@ -294,7 +294,7 @@ int br_nf_pre_routing_finish_bridge(struct net *net, struct sock *sk, struct sk_
+ 			/* tell br_dev_xmit to continue with forwarding */
+ 			nf_bridge->bridged_dnat = 1;
+ 			/* FIXME Need to refragment */
+-			ret = neigh->output(neigh, skb);
++			ret = READ_ONCE(neigh->output)(neigh, skb);
+ 		}
+ 		neigh_release(neigh);
+ 		return ret;
+diff --git a/net/core/neighbour.c b/net/core/neighbour.c
+index ddd0f32de20ef..f16ec0e8a0348 100644
+--- a/net/core/neighbour.c
++++ b/net/core/neighbour.c
+@@ -410,7 +410,7 @@ static void neigh_flush_dev(struct neigh_table *tbl, struct net_device *dev,
+ 				 */
+ 				__skb_queue_purge(&n->arp_queue);
+ 				n->arp_queue_len_bytes = 0;
+-				n->output = neigh_blackhole;
++				WRITE_ONCE(n->output, neigh_blackhole);
+ 				if (n->nud_state & NUD_VALID)
+ 					n->nud_state = NUD_NOARP;
+ 				else
+@@ -920,7 +920,7 @@ static void neigh_suspect(struct neighbour *neigh)
+ {
+ 	neigh_dbg(2, "neigh %p is suspected\n", neigh);
+ 
+-	neigh->output = neigh->ops->output;
++	WRITE_ONCE(neigh->output, neigh->ops->output);
+ }
+ 
+ /* Neighbour state is OK;
+@@ -932,7 +932,7 @@ static void neigh_connect(struct neighbour *neigh)
+ {
+ 	neigh_dbg(2, "neigh %p is connected\n", neigh);
+ 
+-	neigh->output = neigh->ops->connected_output;
++	WRITE_ONCE(neigh->output, neigh->ops->connected_output);
+ }
+ 
+ static void neigh_periodic_work(struct work_struct *work)
+@@ -988,7 +988,9 @@ static void neigh_periodic_work(struct work_struct *work)
+ 			    (state == NUD_FAILED ||
+ 			     !time_in_range_open(jiffies, n->used,
+ 						 n->used + NEIGH_VAR(n->parms, GC_STALETIME)))) {
+-				*np = n->next;
++				rcu_assign_pointer(*np,
++					rcu_dereference_protected(n->next,
++						lockdep_is_held(&tbl->lock)));
+ 				neigh_mark_dead(n);
+ 				write_unlock(&n->lock);
+ 				neigh_cleanup_and_release(n);
+@@ -1447,7 +1449,7 @@ static int __neigh_update(struct neighbour *neigh, const u8 *lladdr,
+ 				if (n2)
+ 					n1 = n2;
+ 			}
+-			n1->output(n1, skb);
++			READ_ONCE(n1->output)(n1, skb);
+ 			if (n2)
+ 				neigh_release(n2);
+ 			rcu_read_unlock();
+@@ -3153,7 +3155,7 @@ int neigh_xmit(int index, struct net_device *dev,
+ 			rcu_read_unlock();
+ 			goto out_kfree_skb;
+ 		}
+-		err = neigh->output(neigh, skb);
++		err = READ_ONCE(neigh->output)(neigh, skb);
+ 		rcu_read_unlock();
+ 	}
+ 	else if (index == NEIGH_LINK_TABLE) {
+diff --git a/net/core/sock_map.c b/net/core/sock_map.c
+index 8f07fea39d9ea..3fc4086a414ea 100644
+--- a/net/core/sock_map.c
++++ b/net/core/sock_map.c
+@@ -668,6 +668,8 @@ BPF_CALL_4(bpf_msg_redirect_map, struct sk_msg *, msg,
+ 	sk = __sock_map_lookup_elem(map, key);
+ 	if (unlikely(!sk || !sock_map_redirect_allowed(sk)))
+ 		return SK_DROP;
++	if (!(flags & BPF_F_INGRESS) && !sk_is_tcp(sk))
++		return SK_DROP;
+ 
+ 	msg->flags = flags;
+ 	msg->sk_redir = sk;
+@@ -1267,6 +1269,8 @@ BPF_CALL_4(bpf_msg_redirect_hash, struct sk_msg *, msg,
+ 	sk = __sock_hash_lookup_elem(map, key);
+ 	if (unlikely(!sk || !sock_map_redirect_allowed(sk)))
+ 		return SK_DROP;
++	if (!(flags & BPF_F_INGRESS) && !sk_is_tcp(sk))
++		return SK_DROP;
+ 
+ 	msg->flags = flags;
+ 	msg->sk_redir = sk;
+diff --git a/net/ethtool/plca.c b/net/ethtool/plca.c
+index 5a8cab4df0c9c..a9334937ace26 100644
+--- a/net/ethtool/plca.c
++++ b/net/ethtool/plca.c
+@@ -21,16 +21,6 @@ struct plca_reply_data {
+ #define PLCA_REPDATA(__reply_base) \
+ 	container_of(__reply_base, struct plca_reply_data, base)
+ 
+-static void plca_update_sint(int *dst, const struct nlattr *attr,
+-			     bool *mod)
+-{
+-	if (!attr)
+-		return;
+-
+-	*dst = nla_get_u32(attr);
+-	*mod = true;
+-}
+-
+ // PLCA get configuration message ------------------------------------------- //
+ 
+ const struct nla_policy ethnl_plca_get_cfg_policy[] = {
+@@ -38,6 +28,29 @@ const struct nla_policy ethnl_plca_get_cfg_policy[] = {
+ 		NLA_POLICY_NESTED(ethnl_header_policy),
+ };
+ 
++static void plca_update_sint(int *dst, struct nlattr **tb, u32 attrid,
++			     bool *mod)
++{
++	const struct nlattr *attr = tb[attrid];
++
++	if (!attr ||
++	    WARN_ON_ONCE(attrid >= ARRAY_SIZE(ethnl_plca_set_cfg_policy)))
++		return;
++
++	switch (ethnl_plca_set_cfg_policy[attrid].type) {
++	case NLA_U8:
++		*dst = nla_get_u8(attr);
++		break;
++	case NLA_U32:
++		*dst = nla_get_u32(attr);
++		break;
++	default:
++		WARN_ON_ONCE(1);
++	}
++
++	*mod = true;
++}
++
+ static int plca_get_cfg_prepare_data(const struct ethnl_req_info *req_base,
+ 				     struct ethnl_reply_data *reply_base,
+ 				     struct genl_info *info)
+@@ -144,13 +157,13 @@ ethnl_set_plca(struct ethnl_req_info *req_info, struct genl_info *info)
+ 		return -EOPNOTSUPP;
+ 
+ 	memset(&plca_cfg, 0xff, sizeof(plca_cfg));
+-	plca_update_sint(&plca_cfg.enabled, tb[ETHTOOL_A_PLCA_ENABLED], &mod);
+-	plca_update_sint(&plca_cfg.node_id, tb[ETHTOOL_A_PLCA_NODE_ID], &mod);
+-	plca_update_sint(&plca_cfg.node_cnt, tb[ETHTOOL_A_PLCA_NODE_CNT], &mod);
+-	plca_update_sint(&plca_cfg.to_tmr, tb[ETHTOOL_A_PLCA_TO_TMR], &mod);
+-	plca_update_sint(&plca_cfg.burst_cnt, tb[ETHTOOL_A_PLCA_BURST_CNT],
++	plca_update_sint(&plca_cfg.enabled, tb, ETHTOOL_A_PLCA_ENABLED, &mod);
++	plca_update_sint(&plca_cfg.node_id, tb, ETHTOOL_A_PLCA_NODE_ID, &mod);
++	plca_update_sint(&plca_cfg.node_cnt, tb, ETHTOOL_A_PLCA_NODE_CNT, &mod);
++	plca_update_sint(&plca_cfg.to_tmr, tb, ETHTOOL_A_PLCA_TO_TMR, &mod);
++	plca_update_sint(&plca_cfg.burst_cnt, tb, ETHTOOL_A_PLCA_BURST_CNT,
+ 			 &mod);
+-	plca_update_sint(&plca_cfg.burst_tmr, tb[ETHTOOL_A_PLCA_BURST_TMR],
++	plca_update_sint(&plca_cfg.burst_tmr, tb, ETHTOOL_A_PLCA_BURST_TMR,
+ 			 &mod);
+ 	if (!mod)
+ 		return 0;
+diff --git a/net/ipv4/route.c b/net/ipv4/route.c
+index 0a53ca6ebb0d5..14fbc5cd157ef 100644
+--- a/net/ipv4/route.c
++++ b/net/ipv4/route.c
+@@ -3417,6 +3417,8 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
+ 				    fa->fa_type == fri.type) {
+ 					fri.offload = READ_ONCE(fa->offload);
+ 					fri.trap = READ_ONCE(fa->trap);
++					fri.offload_failed =
++						READ_ONCE(fa->offload_failed);
+ 					break;
+ 				}
+ 			}
+diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
+index 75f24b931a185..9cfc07d1e4252 100644
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -1618,16 +1618,13 @@ EXPORT_SYMBOL(tcp_read_sock);
+ 
+ int tcp_read_skb(struct sock *sk, skb_read_actor_t recv_actor)
+ {
+-	struct tcp_sock *tp = tcp_sk(sk);
+-	u32 seq = tp->copied_seq;
+ 	struct sk_buff *skb;
+ 	int copied = 0;
+-	u32 offset;
+ 
+ 	if (sk->sk_state == TCP_LISTEN)
+ 		return -ENOTCONN;
+ 
+-	while ((skb = tcp_recv_skb(sk, seq, &offset)) != NULL) {
++	while ((skb = skb_peek(&sk->sk_receive_queue)) != NULL) {
+ 		u8 tcp_flags;
+ 		int used;
+ 
+@@ -1640,13 +1637,10 @@ int tcp_read_skb(struct sock *sk, skb_read_actor_t recv_actor)
+ 				copied = used;
+ 			break;
+ 		}
+-		seq += used;
+ 		copied += used;
+ 
+-		if (tcp_flags & TCPHDR_FIN) {
+-			++seq;
++		if (tcp_flags & TCPHDR_FIN)
+ 			break;
+-		}
+ 	}
+ 	return copied;
+ }
+diff --git a/net/ipv4/tcp_bpf.c b/net/ipv4/tcp_bpf.c
+index 81f0dff69e0b6..3272682030015 100644
+--- a/net/ipv4/tcp_bpf.c
++++ b/net/ipv4/tcp_bpf.c
+@@ -222,6 +222,7 @@ static int tcp_bpf_recvmsg_parser(struct sock *sk,
+ 				  int *addr_len)
+ {
+ 	struct tcp_sock *tcp = tcp_sk(sk);
++	int peek = flags & MSG_PEEK;
+ 	u32 seq = tcp->copied_seq;
+ 	struct sk_psock *psock;
+ 	int copied = 0;
+@@ -311,7 +312,8 @@ msg_bytes_ready:
+ 		copied = -EAGAIN;
+ 	}
+ out:
+-	WRITE_ONCE(tcp->copied_seq, seq);
++	if (!peek)
++		WRITE_ONCE(tcp->copied_seq, seq);
+ 	tcp_rcv_space_adjust(sk);
+ 	if (copied > 0)
+ 		__tcp_cleanup_rbuf(sk, copied);
+diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
+index 48c2b96b08435..a5781f86ac375 100644
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -243,6 +243,19 @@ static void tcp_measure_rcv_mss(struct sock *sk, const struct sk_buff *skb)
+ 		if (unlikely(len > icsk->icsk_ack.rcv_mss +
+ 				   MAX_TCP_OPTION_SPACE))
+ 			tcp_gro_dev_warn(sk, skb, len);
++		/* If the skb has a len of exactly 1*MSS and has the PSH bit
++		 * set then it is likely the end of an application write. So
++		 * more data may not be arriving soon, and yet the data sender
++		 * may be waiting for an ACK if cwnd-bound or using TX zero
++		 * copy. So we set ICSK_ACK_PUSHED here so that
++		 * tcp_cleanup_rbuf() will send an ACK immediately if the app
++		 * reads all of the data and is not ping-pong. If len > MSS
++		 * then this logic does not matter (and does not hurt) because
++		 * tcp_cleanup_rbuf() will always ACK immediately if the app
++		 * reads data and there is more than an MSS of unACKed data.
++		 */
++		if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_PSH)
++			icsk->icsk_ack.pending |= ICSK_ACK_PUSHED;
+ 	} else {
+ 		/* Otherwise, we make more careful check taking into account,
+ 		 * that SACKs block is variable.
+diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
+index 9f9ca68c47026..37fd9537423f1 100644
+--- a/net/ipv4/tcp_output.c
++++ b/net/ipv4/tcp_output.c
+@@ -177,8 +177,7 @@ static void tcp_event_data_sent(struct tcp_sock *tp,
+ }
+ 
+ /* Account for an ACK we sent. */
+-static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts,
+-				      u32 rcv_nxt)
++static inline void tcp_event_ack_sent(struct sock *sk, u32 rcv_nxt)
+ {
+ 	struct tcp_sock *tp = tcp_sk(sk);
+ 
+@@ -192,7 +191,7 @@ static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts,
+ 
+ 	if (unlikely(rcv_nxt != tp->rcv_nxt))
+ 		return;  /* Special ACK sent by DCTCP to reflect ECN */
+-	tcp_dec_quickack_mode(sk, pkts);
++	tcp_dec_quickack_mode(sk);
+ 	inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
+ }
+ 
+@@ -1372,7 +1371,7 @@ static int __tcp_transmit_skb(struct sock *sk, struct sk_buff *skb,
+ 			   sk, skb);
+ 
+ 	if (likely(tcb->tcp_flags & TCPHDR_ACK))
+-		tcp_event_ack_sent(sk, tcp_skb_pcount(skb), rcv_nxt);
++		tcp_event_ack_sent(sk, rcv_nxt);
+ 
+ 	if (skb->len != tcp_header_size) {
+ 		tcp_event_data_sent(tp, sk);
+diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
+index c93a2b9a91723..bf025b18dab51 100644
+--- a/net/ipv6/addrconf.c
++++ b/net/ipv6/addrconf.c
+@@ -202,6 +202,7 @@ static struct ipv6_devconf ipv6_devconf __read_mostly = {
+ 	.ra_defrtr_metric	= IP6_RT_PRIO_USER,
+ 	.accept_ra_from_local	= 0,
+ 	.accept_ra_min_hop_limit= 1,
++	.accept_ra_min_lft	= 0,
+ 	.accept_ra_pinfo	= 1,
+ #ifdef CONFIG_IPV6_ROUTER_PREF
+ 	.accept_ra_rtr_pref	= 1,
+@@ -262,6 +263,7 @@ static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = {
+ 	.ra_defrtr_metric	= IP6_RT_PRIO_USER,
+ 	.accept_ra_from_local	= 0,
+ 	.accept_ra_min_hop_limit= 1,
++	.accept_ra_min_lft	= 0,
+ 	.accept_ra_pinfo	= 1,
+ #ifdef CONFIG_IPV6_ROUTER_PREF
+ 	.accept_ra_rtr_pref	= 1,
+@@ -2731,6 +2733,9 @@ void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len, bool sllao)
+ 		return;
+ 	}
+ 
++	if (valid_lft != 0 && valid_lft < in6_dev->cnf.accept_ra_min_lft)
++		goto put;
++
+ 	/*
+ 	 *	Two things going on here:
+ 	 *	1) Add routes for on-link prefixes
+@@ -5602,6 +5607,7 @@ static inline void ipv6_store_devconf(struct ipv6_devconf *cnf,
+ 	array[DEVCONF_IOAM6_ID_WIDE] = cnf->ioam6_id_wide;
+ 	array[DEVCONF_NDISC_EVICT_NOCARRIER] = cnf->ndisc_evict_nocarrier;
+ 	array[DEVCONF_ACCEPT_UNTRACKED_NA] = cnf->accept_untracked_na;
++	array[DEVCONF_ACCEPT_RA_MIN_LFT] = cnf->accept_ra_min_lft;
+ }
+ 
+ static inline size_t inet6_ifla6_size(void)
+@@ -6795,6 +6801,13 @@ static const struct ctl_table addrconf_sysctl[] = {
+ 		.mode		= 0644,
+ 		.proc_handler	= proc_dointvec,
+ 	},
++	{
++		.procname	= "accept_ra_min_lft",
++		.data		= &ipv6_devconf.accept_ra_min_lft,
++		.maxlen		= sizeof(int),
++		.mode		= 0644,
++		.proc_handler	= proc_dointvec,
++	},
+ 	{
+ 		.procname	= "accept_ra_pinfo",
+ 		.data		= &ipv6_devconf.accept_ra_pinfo,
+diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
+index a42be96ae209b..5b9337534a5f1 100644
+--- a/net/ipv6/ndisc.c
++++ b/net/ipv6/ndisc.c
+@@ -1328,6 +1328,14 @@ static enum skb_drop_reason ndisc_router_discovery(struct sk_buff *skb)
+ 		goto skip_defrtr;
+ 	}
+ 
++	lifetime = ntohs(ra_msg->icmph.icmp6_rt_lifetime);
++	if (lifetime != 0 && lifetime < in6_dev->cnf.accept_ra_min_lft) {
++		ND_PRINTK(2, info,
++			  "RA: router lifetime (%ds) is too short: %s\n",
++			  lifetime, skb->dev->name);
++		goto skip_defrtr;
++	}
++
+ 	/* Do not accept RA with source-addr found on local machine unless
+ 	 * accept_ra_from_local is set to true.
+ 	 */
+@@ -1340,8 +1348,6 @@ static enum skb_drop_reason ndisc_router_discovery(struct sk_buff *skb)
+ 		goto skip_defrtr;
+ 	}
+ 
+-	lifetime = ntohs(ra_msg->icmph.icmp6_rt_lifetime);
+-
+ #ifdef CONFIG_IPV6_ROUTER_PREF
+ 	pref = ra_msg->icmph.icmp6_router_pref;
+ 	/* 10b is handled as if it were 00b (medium) */
+@@ -1517,6 +1523,9 @@ skip_linkparms:
+ 			if (ri->prefix_len == 0 &&
+ 			    !in6_dev->cnf.accept_ra_defrtr)
+ 				continue;
++			if (ri->lifetime != 0 &&
++			    ntohl(ri->lifetime) < in6_dev->cnf.accept_ra_min_lft)
++				continue;
+ 			if (ri->prefix_len < in6_dev->cnf.accept_ra_rt_info_min_plen)
+ 				continue;
+ 			if (ri->prefix_len > in6_dev->cnf.accept_ra_rt_info_max_plen)
+diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
+index 3a88545a265d6..44b6949d72b22 100644
+--- a/net/ipv6/tcp_ipv6.c
++++ b/net/ipv6/tcp_ipv6.c
+@@ -1640,9 +1640,12 @@ process:
+ 		struct sock *nsk;
+ 
+ 		sk = req->rsk_listener;
+-		drop_reason = tcp_inbound_md5_hash(sk, skb,
+-						   &hdr->saddr, &hdr->daddr,
+-						   AF_INET6, dif, sdif);
++		if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
++			drop_reason = SKB_DROP_REASON_XFRM_POLICY;
++		else
++			drop_reason = tcp_inbound_md5_hash(sk, skb,
++							   &hdr->saddr, &hdr->daddr,
++							   AF_INET6, dif, sdif);
+ 		if (drop_reason) {
+ 			sk_drops_add(sk, skb);
+ 			reqsk_put(req);
+@@ -1689,6 +1692,7 @@ process:
+ 			}
+ 			goto discard_and_relse;
+ 		}
++		nf_reset_ct(skb);
+ 		if (nsk == sk) {
+ 			reqsk_put(req);
+ 			tcp_v6_restore_cb(skb);
+diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
+index ed8ebb6f59097..11f3d375cec00 100644
+--- a/net/l2tp/l2tp_ip6.c
++++ b/net/l2tp/l2tp_ip6.c
+@@ -507,7 +507,6 @@ static int l2tp_ip6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
+ 	 */
+ 	if (len > INT_MAX - transhdrlen)
+ 		return -EMSGSIZE;
+-	ulen = len + transhdrlen;
+ 
+ 	/* Mirror BSD error message compatibility */
+ 	if (msg->msg_flags & MSG_OOB)
+@@ -628,6 +627,7 @@ static int l2tp_ip6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
+ 
+ back_from_confirm:
+ 	lock_sock(sk);
++	ulen = len + skb_queue_empty(&sk->sk_write_queue) ? transhdrlen : 0;
+ 	err = ip6_append_data(sk, ip_generic_getfrag, msg,
+ 			      ulen, transhdrlen, &ipc6,
+ 			      &fl6, (struct rt6_info *)dst,
+diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
+index 45e7a5d9c7d94..e883c41a2163b 100644
+--- a/net/mac80211/cfg.c
++++ b/net/mac80211/cfg.c
+@@ -566,6 +566,9 @@ static int ieee80211_add_key(struct wiphy *wiphy, struct net_device *dev,
+ 	}
+ 
+ 	err = ieee80211_key_link(key, link, sta);
++	/* KRACK protection, shouldn't happen but just silently accept key */
++	if (err == -EALREADY)
++		err = 0;
+ 
+  out_unlock:
+ 	mutex_unlock(&local->sta_mtx);
+diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
+index 91633a0b723e0..f8cd94ba55ccc 100644
+--- a/net/mac80211/ieee80211_i.h
++++ b/net/mac80211/ieee80211_i.h
+@@ -676,7 +676,7 @@ struct ieee80211_if_mesh {
+ 	struct timer_list mesh_path_root_timer;
+ 
+ 	unsigned long wrkq_flags;
+-	unsigned long mbss_changed;
++	unsigned long mbss_changed[64 / BITS_PER_LONG];
+ 
+ 	bool userspace_handles_dfs;
+ 
+diff --git a/net/mac80211/key.c b/net/mac80211/key.c
+index 21cf5a2089101..f719abe33a328 100644
+--- a/net/mac80211/key.c
++++ b/net/mac80211/key.c
+@@ -905,7 +905,7 @@ int ieee80211_key_link(struct ieee80211_key *key,
+ 	 */
+ 	if (ieee80211_key_identical(sdata, old_key, key)) {
+ 		ieee80211_key_free_unused(key);
+-		ret = 0;
++		ret = -EALREADY;
+ 		goto out;
+ 	}
+ 
+diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
+index af8c5fc2db149..e31c312c124a1 100644
+--- a/net/mac80211/mesh.c
++++ b/net/mac80211/mesh.c
+@@ -1175,7 +1175,7 @@ void ieee80211_mbss_info_change_notify(struct ieee80211_sub_if_data *sdata,
+ 
+ 	/* if we race with running work, worst case this work becomes a noop */
+ 	for_each_set_bit(bit, &bits, sizeof(changed) * BITS_PER_BYTE)
+-		set_bit(bit, &ifmsh->mbss_changed);
++		set_bit(bit, ifmsh->mbss_changed);
+ 	set_bit(MESH_WORK_MBSS_CHANGED, &ifmsh->wrkq_flags);
+ 	wiphy_work_queue(sdata->local->hw.wiphy, &sdata->work);
+ }
+@@ -1257,7 +1257,7 @@ void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata)
+ 
+ 	/* clear any mesh work (for next join) we may have accrued */
+ 	ifmsh->wrkq_flags = 0;
+-	ifmsh->mbss_changed = 0;
++	memset(ifmsh->mbss_changed, 0, sizeof(ifmsh->mbss_changed));
+ 
+ 	local->fif_other_bss--;
+ 	atomic_dec(&local->iff_allmultis);
+@@ -1724,9 +1724,9 @@ static void mesh_bss_info_changed(struct ieee80211_sub_if_data *sdata)
+ 	u32 bit;
+ 	u64 changed = 0;
+ 
+-	for_each_set_bit(bit, &ifmsh->mbss_changed,
++	for_each_set_bit(bit, ifmsh->mbss_changed,
+ 			 sizeof(changed) * BITS_PER_BYTE) {
+-		clear_bit(bit, &ifmsh->mbss_changed);
++		clear_bit(bit, ifmsh->mbss_changed);
+ 		changed |= BIT(bit);
+ 	}
+ 
+diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
+index f93eb38ae0b8d..24b2833e0e475 100644
+--- a/net/mac80211/mlme.c
++++ b/net/mac80211/mlme.c
+@@ -5107,9 +5107,10 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
+ 				continue;
+ 
+ 			valid_links |= BIT(link_id);
+-			if (assoc_data->link[link_id].disabled) {
++			if (assoc_data->link[link_id].disabled)
+ 				dormant_links |= BIT(link_id);
+-			} else if (link_id != assoc_data->assoc_link_id) {
++
++			if (link_id != assoc_data->assoc_link_id) {
+ 				err = ieee80211_sta_allocate_link(sta, link_id);
+ 				if (err)
+ 					goto out_err;
+@@ -5124,7 +5125,7 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
+ 		struct ieee80211_link_data *link;
+ 		struct link_sta_info *link_sta;
+ 
+-		if (!cbss || assoc_data->link[link_id].disabled)
++		if (!cbss)
+ 			continue;
+ 
+ 		link = sdata_dereference(sdata->link[link_id], sdata);
+@@ -5429,17 +5430,18 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
+ 	for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS; link_id++) {
+ 		struct ieee80211_link_data *link;
+ 
+-		link = sdata_dereference(sdata->link[link_id], sdata);
+-		if (!link)
+-			continue;
+-
+ 		if (!assoc_data->link[link_id].bss)
+ 			continue;
+ 
+ 		resp.links[link_id].bss = assoc_data->link[link_id].bss;
+-		resp.links[link_id].addr = link->conf->addr;
++		ether_addr_copy(resp.links[link_id].addr,
++				assoc_data->link[link_id].addr);
+ 		resp.links[link_id].status = assoc_data->link[link_id].status;
+ 
++		link = sdata_dereference(sdata->link[link_id], sdata);
++		if (!link)
++			continue;
++
+ 		/* get uapsd queues configuration - same for all links */
+ 		resp.uapsd_queues = 0;
+ 		for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
+diff --git a/net/mptcp/pm_userspace.c b/net/mptcp/pm_userspace.c
+index b5a8aa4c1ebd6..d042d32beb4df 100644
+--- a/net/mptcp/pm_userspace.c
++++ b/net/mptcp/pm_userspace.c
+@@ -307,12 +307,6 @@ int mptcp_nl_cmd_sf_create(struct sk_buff *skb, struct genl_info *info)
+ 		goto create_err;
+ 	}
+ 
+-	if (addr_l.id == 0) {
+-		NL_SET_ERR_MSG_ATTR(info->extack, laddr, "missing local addr id");
+-		err = -EINVAL;
+-		goto create_err;
+-	}
+-
+ 	err = mptcp_pm_parse_addr(raddr, info, &addr_r);
+ 	if (err < 0) {
+ 		NL_SET_ERR_MSG_ATTR(info->extack, raddr, "error parsing remote addr");
+diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
+index 6947b4b2519c9..679c2732b5d01 100644
+--- a/net/mptcp/protocol.c
++++ b/net/mptcp/protocol.c
+@@ -407,7 +407,7 @@ drop:
+ 	return false;
+ }
+ 
+-static void mptcp_stop_timer(struct sock *sk)
++static void mptcp_stop_rtx_timer(struct sock *sk)
+ {
+ 	struct inet_connection_sock *icsk = inet_csk(sk);
+ 
+@@ -894,6 +894,7 @@ static bool __mptcp_finish_join(struct mptcp_sock *msk, struct sock *ssk)
+ 	mptcp_subflow_ctx(ssk)->subflow_id = msk->subflow_id++;
+ 	mptcp_sockopt_sync_locked(msk, ssk);
+ 	mptcp_subflow_joined(msk, ssk);
++	mptcp_stop_tout_timer(sk);
+ 	return true;
+ }
+ 
+@@ -913,12 +914,12 @@ static void __mptcp_flush_join_list(struct sock *sk, struct list_head *join_list
+ 	}
+ }
+ 
+-static bool mptcp_timer_pending(struct sock *sk)
++static bool mptcp_rtx_timer_pending(struct sock *sk)
+ {
+ 	return timer_pending(&inet_csk(sk)->icsk_retransmit_timer);
+ }
+ 
+-static void mptcp_reset_timer(struct sock *sk)
++static void mptcp_reset_rtx_timer(struct sock *sk)
+ {
+ 	struct inet_connection_sock *icsk = inet_csk(sk);
+ 	unsigned long tout;
+@@ -1052,10 +1053,10 @@ static void __mptcp_clean_una(struct sock *sk)
+ out:
+ 	if (snd_una == READ_ONCE(msk->snd_nxt) &&
+ 	    snd_una == READ_ONCE(msk->write_seq)) {
+-		if (mptcp_timer_pending(sk) && !mptcp_data_fin_enabled(msk))
+-			mptcp_stop_timer(sk);
++		if (mptcp_rtx_timer_pending(sk) && !mptcp_data_fin_enabled(msk))
++			mptcp_stop_rtx_timer(sk);
+ 	} else {
+-		mptcp_reset_timer(sk);
++		mptcp_reset_rtx_timer(sk);
+ 	}
+ }
+ 
+@@ -1606,8 +1607,8 @@ void __mptcp_push_pending(struct sock *sk, unsigned int flags)
+ 
+ out:
+ 	/* ensure the rtx timer is running */
+-	if (!mptcp_timer_pending(sk))
+-		mptcp_reset_timer(sk);
++	if (!mptcp_rtx_timer_pending(sk))
++		mptcp_reset_rtx_timer(sk);
+ 	if (do_check_data_fin)
+ 		mptcp_check_send_data_fin(sk);
+ }
+@@ -1663,8 +1664,8 @@ out:
+ 	if (copied) {
+ 		tcp_push(ssk, 0, info.mss_now, tcp_sk(ssk)->nonagle,
+ 			 info.size_goal);
+-		if (!mptcp_timer_pending(sk))
+-			mptcp_reset_timer(sk);
++		if (!mptcp_rtx_timer_pending(sk))
++			mptcp_reset_rtx_timer(sk);
+ 
+ 		if (msk->snd_data_fin_enable &&
+ 		    msk->snd_nxt + 1 == msk->write_seq)
+@@ -2235,7 +2236,7 @@ static void mptcp_retransmit_timer(struct timer_list *t)
+ 	sock_put(sk);
+ }
+ 
+-static void mptcp_timeout_timer(struct timer_list *t)
++static void mptcp_tout_timer(struct timer_list *t)
+ {
+ 	struct sock *sk = from_timer(sk, t, sk_timer);
+ 
+@@ -2357,18 +2358,14 @@ static void __mptcp_close_ssk(struct sock *sk, struct sock *ssk,
+ 	bool dispose_it, need_push = false;
+ 
+ 	/* If the first subflow moved to a close state before accept, e.g. due
+-	 * to an incoming reset, mptcp either:
+-	 * - if either the subflow or the msk are dead, destroy the context
+-	 *   (the subflow socket is deleted by inet_child_forget) and the msk
+-	 * - otherwise do nothing at the moment and take action at accept and/or
+-	 *   listener shutdown - user-space must be able to accept() the closed
+-	 *   socket.
++	 * to an incoming reset or listener shutdown, the subflow socket is
++	 * already deleted by inet_child_forget() and the mptcp socket can't
++	 * survive too.
+ 	 */
+-	if (msk->in_accept_queue && msk->first == ssk) {
+-		if (!sock_flag(sk, SOCK_DEAD) && !sock_flag(ssk, SOCK_DEAD))
+-			return;
+-
++	if (msk->in_accept_queue && msk->first == ssk &&
++	    (sock_flag(sk, SOCK_DEAD) || sock_flag(ssk, SOCK_DEAD))) {
+ 		/* ensure later check in mptcp_worker() will dispose the msk */
++		mptcp_set_close_tout(sk, tcp_jiffies32 - (TCP_TIMEWAIT_LEN + 1));
+ 		sock_set_flag(sk, SOCK_DEAD);
+ 		lock_sock_nested(ssk, SINGLE_DEPTH_NESTING);
+ 		mptcp_subflow_drop_ctx(ssk);
+@@ -2435,6 +2432,22 @@ out:
+ 
+ 	if (need_push)
+ 		__mptcp_push_pending(sk, 0);
++
++	/* Catch every 'all subflows closed' scenario, including peers silently
++	 * closing them, e.g. due to timeout.
++	 * For established sockets, allow an additional timeout before closing,
++	 * as the protocol can still create more subflows.
++	 */
++	if (list_is_singular(&msk->conn_list) && msk->first &&
++	    inet_sk_state_load(msk->first) == TCP_CLOSE) {
++		if (sk->sk_state != TCP_ESTABLISHED ||
++		    msk->in_accept_queue || sock_flag(sk, SOCK_DEAD)) {
++			inet_sk_state_store(sk, TCP_CLOSE);
++			mptcp_close_wake_up(sk);
++		} else {
++			mptcp_start_tout_timer(sk);
++		}
++	}
+ }
+ 
+ void mptcp_close_ssk(struct sock *sk, struct sock *ssk,
+@@ -2478,23 +2491,14 @@ static void __mptcp_close_subflow(struct sock *sk)
+ 
+ }
+ 
+-static bool mptcp_should_close(const struct sock *sk)
++static bool mptcp_close_tout_expired(const struct sock *sk)
+ {
+-	s32 delta = tcp_jiffies32 - inet_csk(sk)->icsk_mtup.probe_timestamp;
+-	struct mptcp_subflow_context *subflow;
+-
+-	if (delta >= TCP_TIMEWAIT_LEN || mptcp_sk(sk)->in_accept_queue)
+-		return true;
++	if (!inet_csk(sk)->icsk_mtup.probe_timestamp ||
++	    sk->sk_state == TCP_CLOSE)
++		return false;
+ 
+-	/* if all subflows are in closed status don't bother with additional
+-	 * timeout
+-	 */
+-	mptcp_for_each_subflow(mptcp_sk(sk), subflow) {
+-		if (inet_sk_state_load(mptcp_subflow_tcp_sock(subflow)) !=
+-		    TCP_CLOSE)
+-			return false;
+-	}
+-	return true;
++	return time_after32(tcp_jiffies32,
++		  inet_csk(sk)->icsk_mtup.probe_timestamp + TCP_TIMEWAIT_LEN);
+ }
+ 
+ static void mptcp_check_fastclose(struct mptcp_sock *msk)
+@@ -2607,27 +2611,28 @@ static void __mptcp_retrans(struct sock *sk)
+ reset_timer:
+ 	mptcp_check_and_set_pending(sk);
+ 
+-	if (!mptcp_timer_pending(sk))
+-		mptcp_reset_timer(sk);
++	if (!mptcp_rtx_timer_pending(sk))
++		mptcp_reset_rtx_timer(sk);
+ }
+ 
+ /* schedule the timeout timer for the relevant event: either close timeout
+  * or mp_fail timeout. The close timeout takes precedence on the mp_fail one
+  */
+-void mptcp_reset_timeout(struct mptcp_sock *msk, unsigned long fail_tout)
++void mptcp_reset_tout_timer(struct mptcp_sock *msk, unsigned long fail_tout)
+ {
+ 	struct sock *sk = (struct sock *)msk;
+ 	unsigned long timeout, close_timeout;
+ 
+-	if (!fail_tout && !sock_flag(sk, SOCK_DEAD))
++	if (!fail_tout && !inet_csk(sk)->icsk_mtup.probe_timestamp)
+ 		return;
+ 
+-	close_timeout = inet_csk(sk)->icsk_mtup.probe_timestamp - tcp_jiffies32 + jiffies + TCP_TIMEWAIT_LEN;
++	close_timeout = inet_csk(sk)->icsk_mtup.probe_timestamp - tcp_jiffies32 + jiffies +
++			TCP_TIMEWAIT_LEN;
+ 
+ 	/* the close timeout takes precedence on the fail one, and here at least one of
+ 	 * them is active
+ 	 */
+-	timeout = sock_flag(sk, SOCK_DEAD) ? close_timeout : fail_tout;
++	timeout = inet_csk(sk)->icsk_mtup.probe_timestamp ? close_timeout : fail_tout;
+ 
+ 	sk_reset_timer(sk, &sk->sk_timer, timeout);
+ }
+@@ -2646,8 +2651,6 @@ static void mptcp_mp_fail_no_response(struct mptcp_sock *msk)
+ 	mptcp_subflow_reset(ssk);
+ 	WRITE_ONCE(mptcp_subflow_ctx(ssk)->fail_tout, 0);
+ 	unlock_sock_fast(ssk, slow);
+-
+-	mptcp_reset_timeout(msk, 0);
+ }
+ 
+ static void mptcp_do_fastclose(struct sock *sk)
+@@ -2684,18 +2687,14 @@ static void mptcp_worker(struct work_struct *work)
+ 	if (test_and_clear_bit(MPTCP_WORK_CLOSE_SUBFLOW, &msk->flags))
+ 		__mptcp_close_subflow(sk);
+ 
+-	/* There is no point in keeping around an orphaned sk timedout or
+-	 * closed, but we need the msk around to reply to incoming DATA_FIN,
+-	 * even if it is orphaned and in FIN_WAIT2 state
+-	 */
+-	if (sock_flag(sk, SOCK_DEAD)) {
+-		if (mptcp_should_close(sk))
+-			mptcp_do_fastclose(sk);
++	if (mptcp_close_tout_expired(sk)) {
++		mptcp_do_fastclose(sk);
++		mptcp_close_wake_up(sk);
++	}
+ 
+-		if (sk->sk_state == TCP_CLOSE) {
+-			__mptcp_destroy_sock(sk);
+-			goto unlock;
+-		}
++	if (sock_flag(sk, SOCK_DEAD) && sk->sk_state == TCP_CLOSE) {
++		__mptcp_destroy_sock(sk);
++		goto unlock;
+ 	}
+ 
+ 	if (test_and_clear_bit(MPTCP_WORK_RTX, &msk->flags))
+@@ -2710,7 +2709,7 @@ unlock:
+ 	sock_put(sk);
+ }
+ 
+-static int __mptcp_init_sock(struct sock *sk)
++static void __mptcp_init_sock(struct sock *sk)
+ {
+ 	struct mptcp_sock *msk = mptcp_sk(sk);
+ 
+@@ -2736,9 +2735,7 @@ static int __mptcp_init_sock(struct sock *sk)
+ 
+ 	/* re-use the csk retrans timer for MPTCP-level retrans */
+ 	timer_setup(&msk->sk.icsk_retransmit_timer, mptcp_retransmit_timer, 0);
+-	timer_setup(&sk->sk_timer, mptcp_timeout_timer, 0);
+-
+-	return 0;
++	timer_setup(&sk->sk_timer, mptcp_tout_timer, 0);
+ }
+ 
+ static void mptcp_ca_reset(struct sock *sk)
+@@ -2756,11 +2753,8 @@ static void mptcp_ca_reset(struct sock *sk)
+ static int mptcp_init_sock(struct sock *sk)
+ {
+ 	struct net *net = sock_net(sk);
+-	int ret;
+ 
+-	ret = __mptcp_init_sock(sk);
+-	if (ret)
+-		return ret;
++	__mptcp_init_sock(sk);
+ 
+ 	if (!mptcp_is_enabled(net))
+ 		return -ENOPROTOOPT;
+@@ -2826,8 +2820,8 @@ void mptcp_subflow_shutdown(struct sock *sk, struct sock *ssk, int how)
+ 		} else {
+ 			pr_debug("Sending DATA_FIN on subflow %p", ssk);
+ 			tcp_send_ack(ssk);
+-			if (!mptcp_timer_pending(sk))
+-				mptcp_reset_timer(sk);
++			if (!mptcp_rtx_timer_pending(sk))
++				mptcp_reset_rtx_timer(sk);
+ 		}
+ 		break;
+ 	}
+@@ -2910,7 +2904,7 @@ static void __mptcp_destroy_sock(struct sock *sk)
+ 
+ 	might_sleep();
+ 
+-	mptcp_stop_timer(sk);
++	mptcp_stop_rtx_timer(sk);
+ 	sk_stop_timer(sk, &sk->sk_timer);
+ 	msk->pm.status = 0;
+ 
+@@ -2992,7 +2986,6 @@ bool __mptcp_close(struct sock *sk, long timeout)
+ 
+ cleanup:
+ 	/* orphan all the subflows */
+-	inet_csk(sk)->icsk_mtup.probe_timestamp = tcp_jiffies32;
+ 	mptcp_for_each_subflow(msk, subflow) {
+ 		struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
+ 		bool slow = lock_sock_fast_nested(ssk);
+@@ -3029,7 +3022,7 @@ cleanup:
+ 		__mptcp_destroy_sock(sk);
+ 		do_cancel_work = true;
+ 	} else {
+-		mptcp_reset_timeout(msk, 0);
++		mptcp_start_tout_timer(sk);
+ 	}
+ 
+ 	return do_cancel_work;
+@@ -3092,8 +3085,8 @@ static int mptcp_disconnect(struct sock *sk, int flags)
+ 	mptcp_check_listen_stop(sk);
+ 	inet_sk_state_store(sk, TCP_CLOSE);
+ 
+-	mptcp_stop_timer(sk);
+-	sk_stop_timer(sk, &sk->sk_timer);
++	mptcp_stop_rtx_timer(sk);
++	mptcp_stop_tout_timer(sk);
+ 
+ 	if (msk->token)
+ 		mptcp_event(MPTCP_EVENT_CLOSED, msk, NULL, GFP_KERNEL);
+@@ -3416,24 +3409,21 @@ static void schedule_3rdack_retransmission(struct sock *ssk)
+ 	sk_reset_timer(ssk, &icsk->icsk_delack_timer, timeout);
+ }
+ 
+-void mptcp_subflow_process_delegated(struct sock *ssk)
++void mptcp_subflow_process_delegated(struct sock *ssk, long status)
+ {
+ 	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
+ 	struct sock *sk = subflow->conn;
+ 
+-	if (test_bit(MPTCP_DELEGATE_SEND, &subflow->delegated_status)) {
++	if (status & BIT(MPTCP_DELEGATE_SEND)) {
+ 		mptcp_data_lock(sk);
+ 		if (!sock_owned_by_user(sk))
+ 			__mptcp_subflow_push_pending(sk, ssk, true);
+ 		else
+ 			__set_bit(MPTCP_PUSH_PENDING, &mptcp_sk(sk)->cb_flags);
+ 		mptcp_data_unlock(sk);
+-		mptcp_subflow_delegated_done(subflow, MPTCP_DELEGATE_SEND);
+ 	}
+-	if (test_bit(MPTCP_DELEGATE_ACK, &subflow->delegated_status)) {
++	if (status & BIT(MPTCP_DELEGATE_ACK))
+ 		schedule_3rdack_retransmission(ssk);
+-		mptcp_subflow_delegated_done(subflow, MPTCP_DELEGATE_ACK);
+-	}
+ }
+ 
+ static int mptcp_hash(struct sock *sk)
+@@ -3939,14 +3929,17 @@ static int mptcp_napi_poll(struct napi_struct *napi, int budget)
+ 		struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
+ 
+ 		bh_lock_sock_nested(ssk);
+-		if (!sock_owned_by_user(ssk) &&
+-		    mptcp_subflow_has_delegated_action(subflow))
+-			mptcp_subflow_process_delegated(ssk);
+-		/* ... elsewhere tcp_release_cb_override already processed
+-		 * the action or will do at next release_sock().
+-		 * In both case must dequeue the subflow here - on the same
+-		 * CPU that scheduled it.
+-		 */
++		if (!sock_owned_by_user(ssk)) {
++			mptcp_subflow_process_delegated(ssk, xchg(&subflow->delegated_status, 0));
++		} else {
++			/* tcp_release_cb_override already processed
++			 * the action or will do at next release_sock().
++			 * In both case must dequeue the subflow here - on the same
++			 * CPU that scheduled it.
++			 */
++			smp_wmb();
++			clear_bit(MPTCP_DELEGATE_SCHEDULED, &subflow->delegated_status);
++		}
+ 		bh_unlock_sock(ssk);
+ 		sock_put(ssk);
+ 
+diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h
+index ba2a873a4d2e6..072fe1bbc4045 100644
+--- a/net/mptcp/protocol.h
++++ b/net/mptcp/protocol.h
+@@ -440,9 +440,11 @@ struct mptcp_delegated_action {
+ 
+ DECLARE_PER_CPU(struct mptcp_delegated_action, mptcp_delegated_actions);
+ 
+-#define MPTCP_DELEGATE_SEND		0
+-#define MPTCP_DELEGATE_ACK		1
++#define MPTCP_DELEGATE_SCHEDULED	0
++#define MPTCP_DELEGATE_SEND		1
++#define MPTCP_DELEGATE_ACK		2
+ 
++#define MPTCP_DELEGATE_ACTIONS_MASK	(~BIT(MPTCP_DELEGATE_SCHEDULED))
+ /* MPTCP subflow context */
+ struct mptcp_subflow_context {
+ 	struct	list_head node;/* conn_list of subflows */
+@@ -559,23 +561,24 @@ mptcp_subflow_get_mapped_dsn(const struct mptcp_subflow_context *subflow)
+ 	return subflow->map_seq + mptcp_subflow_get_map_offset(subflow);
+ }
+ 
+-void mptcp_subflow_process_delegated(struct sock *ssk);
++void mptcp_subflow_process_delegated(struct sock *ssk, long actions);
+ 
+ static inline void mptcp_subflow_delegate(struct mptcp_subflow_context *subflow, int action)
+ {
++	long old, set_bits = BIT(MPTCP_DELEGATE_SCHEDULED) | BIT(action);
+ 	struct mptcp_delegated_action *delegated;
+ 	bool schedule;
+ 
+ 	/* the caller held the subflow bh socket lock */
+ 	lockdep_assert_in_softirq();
+ 
+-	/* The implied barrier pairs with mptcp_subflow_delegated_done(), and
+-	 * ensures the below list check sees list updates done prior to status
+-	 * bit changes
++	/* The implied barrier pairs with tcp_release_cb_override()
++	 * mptcp_napi_poll(), and ensures the below list check sees list
++	 * updates done prior to delegated status bits changes
+ 	 */
+-	if (!test_and_set_bit(action, &subflow->delegated_status)) {
+-		/* still on delegated list from previous scheduling */
+-		if (!list_empty(&subflow->delegated_node))
++	old = set_mask_bits(&subflow->delegated_status, 0, set_bits);
++	if (!(old & BIT(MPTCP_DELEGATE_SCHEDULED))) {
++		if (WARN_ON_ONCE(!list_empty(&subflow->delegated_node)))
+ 			return;
+ 
+ 		delegated = this_cpu_ptr(&mptcp_delegated_actions);
+@@ -600,20 +603,6 @@ mptcp_subflow_delegated_next(struct mptcp_delegated_action *delegated)
+ 	return ret;
+ }
+ 
+-static inline bool mptcp_subflow_has_delegated_action(const struct mptcp_subflow_context *subflow)
+-{
+-	return !!READ_ONCE(subflow->delegated_status);
+-}
+-
+-static inline void mptcp_subflow_delegated_done(struct mptcp_subflow_context *subflow, int action)
+-{
+-	/* pairs with mptcp_subflow_delegate, ensures delegate_node is updated before
+-	 * touching the status bit
+-	 */
+-	smp_wmb();
+-	clear_bit(action, &subflow->delegated_status);
+-}
+-
+ int mptcp_is_enabled(const struct net *net);
+ unsigned int mptcp_get_add_addr_timeout(const struct net *net);
+ int mptcp_is_checksum_enabled(const struct net *net);
+@@ -699,7 +688,29 @@ void mptcp_get_options(const struct sk_buff *skb,
+ 
+ void mptcp_finish_connect(struct sock *sk);
+ void __mptcp_set_connected(struct sock *sk);
+-void mptcp_reset_timeout(struct mptcp_sock *msk, unsigned long fail_tout);
++void mptcp_reset_tout_timer(struct mptcp_sock *msk, unsigned long fail_tout);
++
++static inline void mptcp_stop_tout_timer(struct sock *sk)
++{
++	if (!inet_csk(sk)->icsk_mtup.probe_timestamp)
++		return;
++
++	sk_stop_timer(sk, &sk->sk_timer);
++	inet_csk(sk)->icsk_mtup.probe_timestamp = 0;
++}
++
++static inline void mptcp_set_close_tout(struct sock *sk, unsigned long tout)
++{
++	/* avoid 0 timestamp, as that means no close timeout */
++	inet_csk(sk)->icsk_mtup.probe_timestamp = tout ? : 1;
++}
++
++static inline void mptcp_start_tout_timer(struct sock *sk)
++{
++	mptcp_set_close_tout(sk, tcp_jiffies32);
++	mptcp_reset_tout_timer(mptcp_sk(sk), 0);
++}
++
+ static inline bool mptcp_is_fully_established(struct sock *sk)
+ {
+ 	return inet_sk_state_load(sk) == TCP_ESTABLISHED &&
+diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
+index c7bd99b8e7b7a..a7aed534b780c 100644
+--- a/net/mptcp/subflow.c
++++ b/net/mptcp/subflow.c
+@@ -1226,7 +1226,7 @@ static void mptcp_subflow_fail(struct mptcp_sock *msk, struct sock *ssk)
+ 	WRITE_ONCE(subflow->fail_tout, fail_tout);
+ 	tcp_send_ack(ssk);
+ 
+-	mptcp_reset_timeout(msk, subflow->fail_tout);
++	mptcp_reset_tout_timer(msk, subflow->fail_tout);
+ }
+ 
+ static bool subflow_check_data_avail(struct sock *ssk)
+@@ -1552,6 +1552,7 @@ int __mptcp_subflow_connect(struct sock *sk, const struct mptcp_addr_info *loc,
+ 	mptcp_sock_graft(ssk, sk->sk_socket);
+ 	iput(SOCK_INODE(sf));
+ 	WRITE_ONCE(msk->allow_infinite_fallback, false);
++	mptcp_stop_tout_timer(sk);
+ 	return 0;
+ 
+ failed_unlink:
+@@ -1955,9 +1956,15 @@ static void subflow_ulp_clone(const struct request_sock *req,
+ static void tcp_release_cb_override(struct sock *ssk)
+ {
+ 	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
++	long status;
+ 
+-	if (mptcp_subflow_has_delegated_action(subflow))
+-		mptcp_subflow_process_delegated(ssk);
++	/* process and clear all the pending actions, but leave the subflow into
++	 * the napi queue. To respect locking, only the same CPU that originated
++	 * the action can touch the list. mptcp_napi_poll will take care of it.
++	 */
++	status = set_mask_bits(&subflow->delegated_status, MPTCP_DELEGATE_ACTIONS_MASK, 0);
++	if (status)
++		mptcp_subflow_process_delegated(ssk, status);
+ 
+ 	tcp_release_cb(ssk);
+ }
+diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
+index 264f2f87a4376..f6684c052e3ba 100644
+--- a/net/netfilter/ipvs/ip_vs_sync.c
++++ b/net/netfilter/ipvs/ip_vs_sync.c
+@@ -1507,8 +1507,8 @@ static int make_send_sock(struct netns_ipvs *ipvs, int id,
+ 	}
+ 
+ 	get_mcast_sockaddr(&mcast_addr, &salen, &ipvs->mcfg, id);
+-	result = sock->ops->connect(sock, (struct sockaddr *) &mcast_addr,
+-				    salen, 0);
++	result = kernel_connect(sock, (struct sockaddr *)&mcast_addr,
++				salen, 0);
+ 	if (result < 0) {
+ 		pr_err("Error connecting to the multicast addr\n");
+ 		goto error;
+diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c
+index b6bcc8f2f46b7..c6bd533983c1f 100644
+--- a/net/netfilter/nf_conntrack_proto_sctp.c
++++ b/net/netfilter/nf_conntrack_proto_sctp.c
+@@ -112,7 +112,7 @@ static const u8 sctp_conntracks[2][11][SCTP_CONNTRACK_MAX] = {
+ /* shutdown_ack */ {sSA, sCL, sCW, sCE, sES, sSA, sSA, sSA, sSA},
+ /* error        */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sCL},/* Can't have Stale cookie*/
+ /* cookie_echo  */ {sCL, sCL, sCE, sCE, sES, sSS, sSR, sSA, sCL},/* 5.2.4 - Big TODO */
+-/* cookie_ack   */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sCL},/* Can't come in orig dir */
++/* cookie_ack   */ {sCL, sCL, sCW, sES, sES, sSS, sSR, sSA, sCL},/* Can't come in orig dir */
+ /* shutdown_comp*/ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sCL, sCL},
+ /* heartbeat    */ {sHS, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS},
+ /* heartbeat_ack*/ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS},
+@@ -126,7 +126,7 @@ static const u8 sctp_conntracks[2][11][SCTP_CONNTRACK_MAX] = {
+ /* shutdown     */ {sIV, sCL, sCW, sCE, sSR, sSS, sSR, sSA, sIV},
+ /* shutdown_ack */ {sIV, sCL, sCW, sCE, sES, sSA, sSA, sSA, sIV},
+ /* error        */ {sIV, sCL, sCW, sCL, sES, sSS, sSR, sSA, sIV},
+-/* cookie_echo  */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA, sIV},/* Can't come in reply dir */
++/* cookie_echo  */ {sIV, sCL, sCE, sCE, sES, sSS, sSR, sSA, sIV},/* Can't come in reply dir */
+ /* cookie_ack   */ {sIV, sCL, sCW, sES, sES, sSS, sSR, sSA, sIV},
+ /* shutdown_comp*/ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sCL, sIV},
+ /* heartbeat    */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS},
+@@ -412,6 +412,9 @@ int nf_conntrack_sctp_packet(struct nf_conn *ct,
+ 			/* (D) vtag must be same as init_vtag as found in INIT_ACK */
+ 			if (sh->vtag != ct->proto.sctp.vtag[dir])
+ 				goto out_unlock;
++		} else if (sch->type == SCTP_CID_COOKIE_ACK) {
++			ct->proto.sctp.init[dir] = 0;
++			ct->proto.sctp.init[!dir] = 0;
+ 		} else if (sch->type == SCTP_CID_HEARTBEAT) {
+ 			if (ct->proto.sctp.vtag[dir] == 0) {
+ 				pr_debug("Setting %d vtag %x for dir %d\n", sch->type, sh->vtag, dir);
+@@ -461,16 +464,18 @@ int nf_conntrack_sctp_packet(struct nf_conn *ct,
+ 		}
+ 
+ 		/* If it is an INIT or an INIT ACK note down the vtag */
+-		if (sch->type == SCTP_CID_INIT ||
+-		    sch->type == SCTP_CID_INIT_ACK) {
+-			struct sctp_inithdr _inithdr, *ih;
++		if (sch->type == SCTP_CID_INIT) {
++			struct sctp_inithdr _ih, *ih;
+ 
+-			ih = skb_header_pointer(skb, offset + sizeof(_sch),
+-						sizeof(_inithdr), &_inithdr);
+-			if (ih == NULL)
++			ih = skb_header_pointer(skb, offset + sizeof(_sch), sizeof(*ih), &_ih);
++			if (!ih)
+ 				goto out_unlock;
+-			pr_debug("Setting vtag %x for dir %d\n",
+-				 ih->init_tag, !dir);
++
++			if (ct->proto.sctp.init[dir] && ct->proto.sctp.init[!dir])
++				ct->proto.sctp.init[!dir] = 0;
++			ct->proto.sctp.init[dir] = 1;
++
++			pr_debug("Setting vtag %x for dir %d\n", ih->init_tag, !dir);
+ 			ct->proto.sctp.vtag[!dir] = ih->init_tag;
+ 
+ 			/* don't renew timeout on init retransmit so
+@@ -481,6 +486,24 @@ int nf_conntrack_sctp_packet(struct nf_conn *ct,
+ 			    old_state == SCTP_CONNTRACK_CLOSED &&
+ 			    nf_ct_is_confirmed(ct))
+ 				ignore = true;
++		} else if (sch->type == SCTP_CID_INIT_ACK) {
++			struct sctp_inithdr _ih, *ih;
++			__be32 vtag;
++
++			ih = skb_header_pointer(skb, offset + sizeof(_sch), sizeof(*ih), &_ih);
++			if (!ih)
++				goto out_unlock;
++
++			vtag = ct->proto.sctp.vtag[!dir];
++			if (!ct->proto.sctp.init[!dir] && vtag && vtag != ih->init_tag)
++				goto out_unlock;
++			/* collision */
++			if (ct->proto.sctp.init[dir] && ct->proto.sctp.init[!dir] &&
++			    vtag != ih->init_tag)
++				goto out_unlock;
++
++			pr_debug("Setting vtag %x for dir %d\n", ih->init_tag, !dir);
++			ct->proto.sctp.vtag[!dir] = ih->init_tag;
+ 		}
+ 
+ 		ct->proto.sctp.state = new_state;
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
+index 976a9b763b9bb..be5869366c7d3 100644
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -7868,24 +7868,14 @@ static int nf_tables_delobj(struct sk_buff *skb, const struct nfnl_info *info,
+ 	return nft_delobj(&ctx, obj);
+ }
+ 
+-void nft_obj_notify(struct net *net, const struct nft_table *table,
+-		    struct nft_object *obj, u32 portid, u32 seq, int event,
+-		    u16 flags, int family, int report, gfp_t gfp)
++static void
++__nft_obj_notify(struct net *net, const struct nft_table *table,
++		 struct nft_object *obj, u32 portid, u32 seq, int event,
++		 u16 flags, int family, int report, gfp_t gfp)
+ {
+ 	struct nftables_pernet *nft_net = nft_pernet(net);
+ 	struct sk_buff *skb;
+ 	int err;
+-	char *buf = kasprintf(gfp, "%s:%u",
+-			      table->name, nft_net->base_seq);
+-
+-	audit_log_nfcfg(buf,
+-			family,
+-			obj->handle,
+-			event == NFT_MSG_NEWOBJ ?
+-				 AUDIT_NFT_OP_OBJ_REGISTER :
+-				 AUDIT_NFT_OP_OBJ_UNREGISTER,
+-			gfp);
+-	kfree(buf);
+ 
+ 	if (!report &&
+ 	    !nfnetlink_has_listeners(net, NFNLGRP_NFTABLES))
+@@ -7908,13 +7898,35 @@ void nft_obj_notify(struct net *net, const struct nft_table *table,
+ err:
+ 	nfnetlink_set_err(net, portid, NFNLGRP_NFTABLES, -ENOBUFS);
+ }
++
++void nft_obj_notify(struct net *net, const struct nft_table *table,
++		    struct nft_object *obj, u32 portid, u32 seq, int event,
++		    u16 flags, int family, int report, gfp_t gfp)
++{
++	struct nftables_pernet *nft_net = nft_pernet(net);
++	char *buf = kasprintf(gfp, "%s:%u",
++			      table->name, nft_net->base_seq);
++
++	audit_log_nfcfg(buf,
++			family,
++			obj->handle,
++			event == NFT_MSG_NEWOBJ ?
++				 AUDIT_NFT_OP_OBJ_REGISTER :
++				 AUDIT_NFT_OP_OBJ_UNREGISTER,
++			gfp);
++	kfree(buf);
++
++	__nft_obj_notify(net, table, obj, portid, seq, event,
++			 flags, family, report, gfp);
++}
+ EXPORT_SYMBOL_GPL(nft_obj_notify);
+ 
+ static void nf_tables_obj_notify(const struct nft_ctx *ctx,
+ 				 struct nft_object *obj, int event)
+ {
+-	nft_obj_notify(ctx->net, ctx->table, obj, ctx->portid, ctx->seq, event,
+-		       ctx->flags, ctx->family, ctx->report, GFP_KERNEL);
++	__nft_obj_notify(ctx->net, ctx->table, obj, ctx->portid,
++			 ctx->seq, event, ctx->flags, ctx->family,
++			 ctx->report, GFP_KERNEL);
+ }
+ 
+ /*
+diff --git a/net/netfilter/nft_payload.c b/net/netfilter/nft_payload.c
+index 8cb8009899479..120f6d395b98b 100644
+--- a/net/netfilter/nft_payload.c
++++ b/net/netfilter/nft_payload.c
+@@ -154,6 +154,17 @@ int nft_payload_inner_offset(const struct nft_pktinfo *pkt)
+ 	return pkt->inneroff;
+ }
+ 
++static bool nft_payload_need_vlan_copy(const struct nft_payload *priv)
++{
++	unsigned int len = priv->offset + priv->len;
++
++	/* data past ether src/dst requested, copy needed */
++	if (len > offsetof(struct ethhdr, h_proto))
++		return true;
++
++	return false;
++}
++
+ void nft_payload_eval(const struct nft_expr *expr,
+ 		      struct nft_regs *regs,
+ 		      const struct nft_pktinfo *pkt)
+@@ -172,7 +183,7 @@ void nft_payload_eval(const struct nft_expr *expr,
+ 			goto err;
+ 
+ 		if (skb_vlan_tag_present(skb) &&
+-		    priv->offset >= offsetof(struct ethhdr, h_proto)) {
++		    nft_payload_need_vlan_copy(priv)) {
+ 			if (!nft_payload_copy_vlan(dest, skb,
+ 						   priv->offset, priv->len))
+ 				goto err;
+diff --git a/net/netfilter/nft_set_rbtree.c b/net/netfilter/nft_set_rbtree.c
+index 487572dcd6144..2660ceab3759d 100644
+--- a/net/netfilter/nft_set_rbtree.c
++++ b/net/netfilter/nft_set_rbtree.c
+@@ -233,10 +233,9 @@ static void nft_rbtree_gc_remove(struct net *net, struct nft_set *set,
+ 	rb_erase(&rbe->node, &priv->root);
+ }
+ 
+-static int nft_rbtree_gc_elem(const struct nft_set *__set,
+-			      struct nft_rbtree *priv,
+-			      struct nft_rbtree_elem *rbe,
+-			      u8 genmask)
++static const struct nft_rbtree_elem *
++nft_rbtree_gc_elem(const struct nft_set *__set, struct nft_rbtree *priv,
++		   struct nft_rbtree_elem *rbe, u8 genmask)
+ {
+ 	struct nft_set *set = (struct nft_set *)__set;
+ 	struct rb_node *prev = rb_prev(&rbe->node);
+@@ -246,7 +245,7 @@ static int nft_rbtree_gc_elem(const struct nft_set *__set,
+ 
+ 	gc = nft_trans_gc_alloc(set, 0, GFP_ATOMIC);
+ 	if (!gc)
+-		return -ENOMEM;
++		return ERR_PTR(-ENOMEM);
+ 
+ 	/* search for end interval coming before this element.
+ 	 * end intervals don't carry a timeout extension, they
+@@ -261,6 +260,7 @@ static int nft_rbtree_gc_elem(const struct nft_set *__set,
+ 		prev = rb_prev(prev);
+ 	}
+ 
++	rbe_prev = NULL;
+ 	if (prev) {
+ 		rbe_prev = rb_entry(prev, struct nft_rbtree_elem, node);
+ 		nft_rbtree_gc_remove(net, set, priv, rbe_prev);
+@@ -272,7 +272,7 @@ static int nft_rbtree_gc_elem(const struct nft_set *__set,
+ 		 */
+ 		gc = nft_trans_gc_queue_sync(gc, GFP_ATOMIC);
+ 		if (WARN_ON_ONCE(!gc))
+-			return -ENOMEM;
++			return ERR_PTR(-ENOMEM);
+ 
+ 		nft_trans_gc_elem_add(gc, rbe_prev);
+ 	}
+@@ -280,13 +280,13 @@ static int nft_rbtree_gc_elem(const struct nft_set *__set,
+ 	nft_rbtree_gc_remove(net, set, priv, rbe);
+ 	gc = nft_trans_gc_queue_sync(gc, GFP_ATOMIC);
+ 	if (WARN_ON_ONCE(!gc))
+-		return -ENOMEM;
++		return ERR_PTR(-ENOMEM);
+ 
+ 	nft_trans_gc_elem_add(gc, rbe);
+ 
+ 	nft_trans_gc_queue_sync_done(gc);
+ 
+-	return 0;
++	return rbe_prev;
+ }
+ 
+ static bool nft_rbtree_update_first(const struct nft_set *set,
+@@ -314,7 +314,7 @@ static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set,
+ 	struct nft_rbtree *priv = nft_set_priv(set);
+ 	u8 cur_genmask = nft_genmask_cur(net);
+ 	u8 genmask = nft_genmask_next(net);
+-	int d, err;
++	int d;
+ 
+ 	/* Descend the tree to search for an existing element greater than the
+ 	 * key value to insert that is greater than the new element. This is the
+@@ -363,9 +363,14 @@ static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set,
+ 		 */
+ 		if (nft_set_elem_expired(&rbe->ext) &&
+ 		    nft_set_elem_active(&rbe->ext, cur_genmask)) {
+-			err = nft_rbtree_gc_elem(set, priv, rbe, genmask);
+-			if (err < 0)
+-				return err;
++			const struct nft_rbtree_elem *removed_end;
++
++			removed_end = nft_rbtree_gc_elem(set, priv, rbe, genmask);
++			if (IS_ERR(removed_end))
++				return PTR_ERR(removed_end);
++
++			if (removed_end == rbe_le || removed_end == rbe_ge)
++				return -EAGAIN;
+ 
+ 			continue;
+ 		}
+@@ -486,11 +491,18 @@ static int nft_rbtree_insert(const struct net *net, const struct nft_set *set,
+ 	struct nft_rbtree_elem *rbe = elem->priv;
+ 	int err;
+ 
+-	write_lock_bh(&priv->lock);
+-	write_seqcount_begin(&priv->count);
+-	err = __nft_rbtree_insert(net, set, rbe, ext);
+-	write_seqcount_end(&priv->count);
+-	write_unlock_bh(&priv->lock);
++	do {
++		if (fatal_signal_pending(current))
++			return -EINTR;
++
++		cond_resched();
++
++		write_lock_bh(&priv->lock);
++		write_seqcount_begin(&priv->count);
++		err = __nft_rbtree_insert(net, set, rbe, ext);
++		write_seqcount_end(&priv->count);
++		write_unlock_bh(&priv->lock);
++	} while (err == -EAGAIN);
+ 
+ 	return err;
+ }
+diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
+index 20082171f24a3..9c6bc47bc7f7b 100644
+--- a/net/netlink/af_netlink.c
++++ b/net/netlink/af_netlink.c
+@@ -352,7 +352,7 @@ static void netlink_overrun(struct sock *sk)
+ 	if (!nlk_test_bit(RECV_NO_ENOBUFS, sk)) {
+ 		if (!test_and_set_bit(NETLINK_S_CONGESTED,
+ 				      &nlk_sk(sk)->state)) {
+-			sk->sk_err = ENOBUFS;
++			WRITE_ONCE(sk->sk_err, ENOBUFS);
+ 			sk_error_report(sk);
+ 		}
+ 	}
+@@ -1577,7 +1577,7 @@ static int do_one_set_err(struct sock *sk, struct netlink_set_err_data *p)
+ 		goto out;
+ 	}
+ 
+-	sk->sk_err = p->code;
++	WRITE_ONCE(sk->sk_err, p->code);
+ 	sk_error_report(sk);
+ out:
+ 	return ret;
+@@ -1966,7 +1966,7 @@ static int netlink_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
+ 	    atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2) {
+ 		ret = netlink_dump(sk);
+ 		if (ret) {
+-			sk->sk_err = -ret;
++			WRITE_ONCE(sk->sk_err, -ret);
+ 			sk_error_report(sk);
+ 		}
+ 	}
+@@ -2485,7 +2485,7 @@ void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err,
+ err_bad_put:
+ 	nlmsg_free(skb);
+ err_skb:
+-	NETLINK_CB(in_skb).sk->sk_err = ENOBUFS;
++	WRITE_ONCE(NETLINK_CB(in_skb).sk->sk_err, ENOBUFS);
+ 	sk_error_report(NETLINK_CB(in_skb).sk);
+ }
+ EXPORT_SYMBOL(netlink_ack);
+diff --git a/net/nfc/llcp_core.c b/net/nfc/llcp_core.c
+index f60e424e06076..6705bb895e239 100644
+--- a/net/nfc/llcp_core.c
++++ b/net/nfc/llcp_core.c
+@@ -1636,7 +1636,9 @@ int nfc_llcp_register_device(struct nfc_dev *ndev)
+ 	timer_setup(&local->sdreq_timer, nfc_llcp_sdreq_timer, 0);
+ 	INIT_WORK(&local->sdreq_timeout_work, nfc_llcp_sdreq_timeout_work);
+ 
++	spin_lock(&llcp_devices_lock);
+ 	list_add(&local->list, &llcp_devices);
++	spin_unlock(&llcp_devices_lock);
+ 
+ 	return 0;
+ }
+diff --git a/net/rds/tcp_connect.c b/net/rds/tcp_connect.c
+index f0c477c5d1db4..d788c6d28986f 100644
+--- a/net/rds/tcp_connect.c
++++ b/net/rds/tcp_connect.c
+@@ -173,7 +173,7 @@ int rds_tcp_conn_path_connect(struct rds_conn_path *cp)
+ 	 * own the socket
+ 	 */
+ 	rds_tcp_set_callbacks(sock, cp);
+-	ret = sock->ops->connect(sock, addr, addrlen, O_NONBLOCK);
++	ret = kernel_connect(sock, addr, addrlen, O_NONBLOCK);
+ 
+ 	rdsdebug("connect to address %pI6c returned %d\n", &conn->c_faddr, ret);
+ 	if (ret == -EINPROGRESS)
+diff --git a/net/sctp/associola.c b/net/sctp/associola.c
+index 796529167e8d2..c45c192b78787 100644
+--- a/net/sctp/associola.c
++++ b/net/sctp/associola.c
+@@ -1159,8 +1159,7 @@ int sctp_assoc_update(struct sctp_association *asoc,
+ 		/* Add any peer addresses from the new association. */
+ 		list_for_each_entry(trans, &new->peer.transport_addr_list,
+ 				    transports)
+-			if (!sctp_assoc_lookup_paddr(asoc, &trans->ipaddr) &&
+-			    !sctp_assoc_add_peer(asoc, &trans->ipaddr,
++			if (!sctp_assoc_add_peer(asoc, &trans->ipaddr,
+ 						 GFP_ATOMIC, trans->state))
+ 				return -ENOMEM;
+ 
+diff --git a/net/sctp/socket.c b/net/sctp/socket.c
+index 7cf207706eb66..652af155966f1 100644
+--- a/net/sctp/socket.c
++++ b/net/sctp/socket.c
+@@ -2450,6 +2450,7 @@ static int sctp_apply_peer_addr_params(struct sctp_paddrparams *params,
+ 			if (trans) {
+ 				trans->hbinterval =
+ 				    msecs_to_jiffies(params->spp_hbinterval);
++				sctp_transport_reset_hb_timer(trans);
+ 			} else if (asoc) {
+ 				asoc->hbinterval =
+ 				    msecs_to_jiffies(params->spp_hbinterval);
+diff --git a/net/socket.c b/net/socket.c
+index b5639a6500158..95942c1786e50 100644
+--- a/net/socket.c
++++ b/net/socket.c
+@@ -732,6 +732,14 @@ static inline int sock_sendmsg_nosec(struct socket *sock, struct msghdr *msg)
+ 	return ret;
+ }
+ 
++static int __sock_sendmsg(struct socket *sock, struct msghdr *msg)
++{
++	int err = security_socket_sendmsg(sock, msg,
++					  msg_data_left(msg));
++
++	return err ?: sock_sendmsg_nosec(sock, msg);
++}
++
+ /**
+  *	sock_sendmsg - send a message through @sock
+  *	@sock: socket
+@@ -742,10 +750,19 @@ static inline int sock_sendmsg_nosec(struct socket *sock, struct msghdr *msg)
+  */
+ int sock_sendmsg(struct socket *sock, struct msghdr *msg)
+ {
+-	int err = security_socket_sendmsg(sock, msg,
+-					  msg_data_left(msg));
++	struct sockaddr_storage *save_addr = (struct sockaddr_storage *)msg->msg_name;
++	struct sockaddr_storage address;
++	int ret;
+ 
+-	return err ?: sock_sendmsg_nosec(sock, msg);
++	if (msg->msg_name) {
++		memcpy(&address, msg->msg_name, msg->msg_namelen);
++		msg->msg_name = &address;
++	}
++
++	ret = __sock_sendmsg(sock, msg);
++	msg->msg_name = save_addr;
++
++	return ret;
+ }
+ EXPORT_SYMBOL(sock_sendmsg);
+ 
+@@ -1127,7 +1144,7 @@ static ssize_t sock_write_iter(struct kiocb *iocb, struct iov_iter *from)
+ 	if (sock->type == SOCK_SEQPACKET)
+ 		msg.msg_flags |= MSG_EOR;
+ 
+-	res = sock_sendmsg(sock, &msg);
++	res = __sock_sendmsg(sock, &msg);
+ 	*from = msg.msg_iter;
+ 	return res;
+ }
+@@ -2132,7 +2149,7 @@ int __sys_sendto(int fd, void __user *buff, size_t len, unsigned int flags,
+ 	if (sock->file->f_flags & O_NONBLOCK)
+ 		flags |= MSG_DONTWAIT;
+ 	msg.msg_flags = flags;
+-	err = sock_sendmsg(sock, &msg);
++	err = __sock_sendmsg(sock, &msg);
+ 
+ out_put:
+ 	fput_light(sock->file, fput_needed);
+@@ -2492,7 +2509,7 @@ static int ____sys_sendmsg(struct socket *sock, struct msghdr *msg_sys,
+ 		err = sock_sendmsg_nosec(sock, msg_sys);
+ 		goto out_freectl;
+ 	}
+-	err = sock_sendmsg(sock, msg_sys);
++	err = __sock_sendmsg(sock, msg_sys);
+ 	/*
+ 	 * If this is sendmmsg() and sending to current destination address was
+ 	 * successful, remember it.
+diff --git a/net/tipc/crypto.c b/net/tipc/crypto.c
+index 302fd749c4249..43c3f1c971b8f 100644
+--- a/net/tipc/crypto.c
++++ b/net/tipc/crypto.c
+@@ -1441,14 +1441,14 @@ static int tipc_crypto_key_revoke(struct net *net, u8 tx_key)
+ 	struct tipc_crypto *tx = tipc_net(net)->crypto_tx;
+ 	struct tipc_key key;
+ 
+-	spin_lock(&tx->lock);
++	spin_lock_bh(&tx->lock);
+ 	key = tx->key;
+ 	WARN_ON(!key.active || tx_key != key.active);
+ 
+ 	/* Free the active key */
+ 	tipc_crypto_key_set_state(tx, key.passive, 0, key.pending);
+ 	tipc_crypto_key_detach(tx->aead[key.active], &tx->lock);
+-	spin_unlock(&tx->lock);
++	spin_unlock_bh(&tx->lock);
+ 
+ 	pr_warn("%s: key is revoked\n", tx->name);
+ 	return -EKEYREVOKED;
+diff --git a/net/wireless/core.c b/net/wireless/core.c
+index 25bc2e50a0615..64e8616171104 100644
+--- a/net/wireless/core.c
++++ b/net/wireless/core.c
+@@ -1181,16 +1181,11 @@ void wiphy_rfkill_set_hw_state_reason(struct wiphy *wiphy, bool blocked,
+ }
+ EXPORT_SYMBOL(wiphy_rfkill_set_hw_state_reason);
+ 
+-void cfg80211_cqm_config_free(struct wireless_dev *wdev)
+-{
+-	kfree(wdev->cqm_config);
+-	wdev->cqm_config = NULL;
+-}
+-
+ static void _cfg80211_unregister_wdev(struct wireless_dev *wdev,
+ 				      bool unregister_netdev)
+ {
+ 	struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
++	struct cfg80211_cqm_config *cqm_config;
+ 	unsigned int link_id;
+ 
+ 	ASSERT_RTNL();
+@@ -1227,7 +1222,10 @@ static void _cfg80211_unregister_wdev(struct wireless_dev *wdev,
+ 	kfree_sensitive(wdev->wext.keys);
+ 	wdev->wext.keys = NULL;
+ #endif
+-	cfg80211_cqm_config_free(wdev);
++	wiphy_work_cancel(wdev->wiphy, &wdev->cqm_rssi_work);
++	/* deleted from the list, so can't be found from nl80211 any more */
++	cqm_config = rcu_access_pointer(wdev->cqm_config);
++	kfree_rcu(cqm_config, rcu_head);
+ 
+ 	/*
+ 	 * Ensure that all events have been processed and
+@@ -1379,6 +1377,8 @@ void cfg80211_init_wdev(struct wireless_dev *wdev)
+ 	wdev->wext.connect.auth_type = NL80211_AUTHTYPE_AUTOMATIC;
+ #endif
+ 
++	wiphy_work_init(&wdev->cqm_rssi_work, cfg80211_cqm_rssi_notify_work);
++
+ 	if (wdev->wiphy->flags & WIPHY_FLAG_PS_ON_BY_DEFAULT)
+ 		wdev->ps = true;
+ 	else
+diff --git a/net/wireless/core.h b/net/wireless/core.h
+index 8a807b609ef73..86f209abc06ab 100644
+--- a/net/wireless/core.h
++++ b/net/wireless/core.h
+@@ -295,12 +295,17 @@ struct cfg80211_beacon_registration {
+ };
+ 
+ struct cfg80211_cqm_config {
++	struct rcu_head rcu_head;
+ 	u32 rssi_hyst;
+ 	s32 last_rssi_event_value;
++	enum nl80211_cqm_rssi_threshold_event last_rssi_event_type;
+ 	int n_rssi_thresholds;
+ 	s32 rssi_thresholds[];
+ };
+ 
++void cfg80211_cqm_rssi_notify_work(struct wiphy *wiphy,
++				   struct wiphy_work *work);
++
+ void cfg80211_destroy_ifaces(struct cfg80211_registered_device *rdev);
+ 
+ /* free object */
+@@ -566,8 +571,6 @@ cfg80211_bss_update(struct cfg80211_registered_device *rdev,
+ #define CFG80211_DEV_WARN_ON(cond)	({bool __r = (cond); __r; })
+ #endif
+ 
+-void cfg80211_cqm_config_free(struct wireless_dev *wdev);
+-
+ void cfg80211_release_pmsr(struct wireless_dev *wdev, u32 portid);
+ void cfg80211_pmsr_wdev_down(struct wireless_dev *wdev);
+ void cfg80211_pmsr_free_wk(struct work_struct *work);
+diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c
+index 775cac4d61006..3e2c398abddcc 100644
+--- a/net/wireless/mlme.c
++++ b/net/wireless/mlme.c
+@@ -52,7 +52,8 @@ void cfg80211_rx_assoc_resp(struct net_device *dev,
+ 		cr.links[link_id].bssid = data->links[link_id].bss->bssid;
+ 		cr.links[link_id].addr = data->links[link_id].addr;
+ 		/* need to have local link addresses for MLO connections */
+-		WARN_ON(cr.ap_mld_addr && !cr.links[link_id].addr);
++		WARN_ON(cr.ap_mld_addr &&
++			!is_valid_ether_addr(cr.links[link_id].addr));
+ 
+ 		BUG_ON(!cr.links[link_id].bss->channel);
+ 
+diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
+index 4dcbc40d07c85..705d1cf048309 100644
+--- a/net/wireless/nl80211.c
++++ b/net/wireless/nl80211.c
+@@ -12797,7 +12797,8 @@ static int nl80211_set_cqm_txe(struct genl_info *info,
+ }
+ 
+ static int cfg80211_cqm_rssi_update(struct cfg80211_registered_device *rdev,
+-				    struct net_device *dev)
++				    struct net_device *dev,
++				    struct cfg80211_cqm_config *cqm_config)
+ {
+ 	struct wireless_dev *wdev = dev->ieee80211_ptr;
+ 	s32 last, low, high;
+@@ -12806,7 +12807,7 @@ static int cfg80211_cqm_rssi_update(struct cfg80211_registered_device *rdev,
+ 	int err;
+ 
+ 	/* RSSI reporting disabled? */
+-	if (!wdev->cqm_config)
++	if (!cqm_config)
+ 		return rdev_set_cqm_rssi_range_config(rdev, dev, 0, 0);
+ 
+ 	/*
+@@ -12815,7 +12816,7 @@ static int cfg80211_cqm_rssi_update(struct cfg80211_registered_device *rdev,
+ 	 * connection is established and enough beacons received to calculate
+ 	 * the average.
+ 	 */
+-	if (!wdev->cqm_config->last_rssi_event_value &&
++	if (!cqm_config->last_rssi_event_value &&
+ 	    wdev->links[0].client.current_bss &&
+ 	    rdev->ops->get_station) {
+ 		struct station_info sinfo = {};
+@@ -12829,30 +12830,30 @@ static int cfg80211_cqm_rssi_update(struct cfg80211_registered_device *rdev,
+ 
+ 		cfg80211_sinfo_release_content(&sinfo);
+ 		if (sinfo.filled & BIT_ULL(NL80211_STA_INFO_BEACON_SIGNAL_AVG))
+-			wdev->cqm_config->last_rssi_event_value =
++			cqm_config->last_rssi_event_value =
+ 				(s8) sinfo.rx_beacon_signal_avg;
+ 	}
+ 
+-	last = wdev->cqm_config->last_rssi_event_value;
+-	hyst = wdev->cqm_config->rssi_hyst;
+-	n = wdev->cqm_config->n_rssi_thresholds;
++	last = cqm_config->last_rssi_event_value;
++	hyst = cqm_config->rssi_hyst;
++	n = cqm_config->n_rssi_thresholds;
+ 
+ 	for (i = 0; i < n; i++) {
+ 		i = array_index_nospec(i, n);
+-		if (last < wdev->cqm_config->rssi_thresholds[i])
++		if (last < cqm_config->rssi_thresholds[i])
+ 			break;
+ 	}
+ 
+ 	low_index = i - 1;
+ 	if (low_index >= 0) {
+ 		low_index = array_index_nospec(low_index, n);
+-		low = wdev->cqm_config->rssi_thresholds[low_index] - hyst;
++		low = cqm_config->rssi_thresholds[low_index] - hyst;
+ 	} else {
+ 		low = S32_MIN;
+ 	}
+ 	if (i < n) {
+ 		i = array_index_nospec(i, n);
+-		high = wdev->cqm_config->rssi_thresholds[i] + hyst - 1;
++		high = cqm_config->rssi_thresholds[i] + hyst - 1;
+ 	} else {
+ 		high = S32_MAX;
+ 	}
+@@ -12865,6 +12866,7 @@ static int nl80211_set_cqm_rssi(struct genl_info *info,
+ 				u32 hysteresis)
+ {
+ 	struct cfg80211_registered_device *rdev = info->user_ptr[0];
++	struct cfg80211_cqm_config *cqm_config = NULL, *old;
+ 	struct net_device *dev = info->user_ptr[1];
+ 	struct wireless_dev *wdev = dev->ieee80211_ptr;
+ 	int i, err;
+@@ -12882,10 +12884,6 @@ static int nl80211_set_cqm_rssi(struct genl_info *info,
+ 	    wdev->iftype != NL80211_IFTYPE_P2P_CLIENT)
+ 		return -EOPNOTSUPP;
+ 
+-	wdev_lock(wdev);
+-	cfg80211_cqm_config_free(wdev);
+-	wdev_unlock(wdev);
+-
+ 	if (n_thresholds <= 1 && rdev->ops->set_cqm_rssi_config) {
+ 		if (n_thresholds == 0 || thresholds[0] == 0) /* Disabling */
+ 			return rdev_set_cqm_rssi_config(rdev, dev, 0, 0);
+@@ -12902,9 +12900,10 @@ static int nl80211_set_cqm_rssi(struct genl_info *info,
+ 		n_thresholds = 0;
+ 
+ 	wdev_lock(wdev);
+-	if (n_thresholds) {
+-		struct cfg80211_cqm_config *cqm_config;
++	old = rcu_dereference_protected(wdev->cqm_config,
++					lockdep_is_held(&wdev->mtx));
+ 
++	if (n_thresholds) {
+ 		cqm_config = kzalloc(struct_size(cqm_config, rssi_thresholds,
+ 						 n_thresholds),
+ 				     GFP_KERNEL);
+@@ -12919,11 +12918,18 @@ static int nl80211_set_cqm_rssi(struct genl_info *info,
+ 		       flex_array_size(cqm_config, rssi_thresholds,
+ 				       n_thresholds));
+ 
+-		wdev->cqm_config = cqm_config;
++		rcu_assign_pointer(wdev->cqm_config, cqm_config);
++	} else {
++		RCU_INIT_POINTER(wdev->cqm_config, NULL);
+ 	}
+ 
+-	err = cfg80211_cqm_rssi_update(rdev, dev);
+-
++	err = cfg80211_cqm_rssi_update(rdev, dev, cqm_config);
++	if (err) {
++		rcu_assign_pointer(wdev->cqm_config, old);
++		kfree_rcu(cqm_config, rcu_head);
++	} else {
++		kfree_rcu(old, rcu_head);
++	}
+ unlock:
+ 	wdev_unlock(wdev);
+ 
+@@ -19074,9 +19080,8 @@ void cfg80211_cqm_rssi_notify(struct net_device *dev,
+ 			      enum nl80211_cqm_rssi_threshold_event rssi_event,
+ 			      s32 rssi_level, gfp_t gfp)
+ {
+-	struct sk_buff *msg;
+ 	struct wireless_dev *wdev = dev->ieee80211_ptr;
+-	struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
++	struct cfg80211_cqm_config *cqm_config;
+ 
+ 	trace_cfg80211_cqm_rssi_notify(dev, rssi_event, rssi_level);
+ 
+@@ -19084,18 +19089,41 @@ void cfg80211_cqm_rssi_notify(struct net_device *dev,
+ 		    rssi_event != NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH))
+ 		return;
+ 
+-	if (wdev->cqm_config) {
+-		wdev->cqm_config->last_rssi_event_value = rssi_level;
++	rcu_read_lock();
++	cqm_config = rcu_dereference(wdev->cqm_config);
++	if (cqm_config) {
++		cqm_config->last_rssi_event_value = rssi_level;
++		cqm_config->last_rssi_event_type = rssi_event;
++		wiphy_work_queue(wdev->wiphy, &wdev->cqm_rssi_work);
++	}
++	rcu_read_unlock();
++}
++EXPORT_SYMBOL(cfg80211_cqm_rssi_notify);
++
++void cfg80211_cqm_rssi_notify_work(struct wiphy *wiphy, struct wiphy_work *work)
++{
++	struct wireless_dev *wdev = container_of(work, struct wireless_dev,
++						 cqm_rssi_work);
++	struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
++	enum nl80211_cqm_rssi_threshold_event rssi_event;
++	struct cfg80211_cqm_config *cqm_config;
++	struct sk_buff *msg;
++	s32 rssi_level;
+ 
+-		cfg80211_cqm_rssi_update(rdev, dev);
++	wdev_lock(wdev);
++	cqm_config = rcu_dereference_protected(wdev->cqm_config,
++					       lockdep_is_held(&wdev->mtx));
++	if (!wdev->cqm_config)
++		goto unlock;
+ 
+-		if (rssi_level == 0)
+-			rssi_level = wdev->cqm_config->last_rssi_event_value;
+-	}
++	cfg80211_cqm_rssi_update(rdev, wdev->netdev, cqm_config);
+ 
+-	msg = cfg80211_prepare_cqm(dev, NULL, gfp);
++	rssi_level = cqm_config->last_rssi_event_value;
++	rssi_event = cqm_config->last_rssi_event_type;
++
++	msg = cfg80211_prepare_cqm(wdev->netdev, NULL, GFP_KERNEL);
+ 	if (!msg)
+-		return;
++		goto unlock;
+ 
+ 	if (nla_put_u32(msg, NL80211_ATTR_CQM_RSSI_THRESHOLD_EVENT,
+ 			rssi_event))
+@@ -19105,14 +19133,15 @@ void cfg80211_cqm_rssi_notify(struct net_device *dev,
+ 				      rssi_level))
+ 		goto nla_put_failure;
+ 
+-	cfg80211_send_cqm(msg, gfp);
++	cfg80211_send_cqm(msg, GFP_KERNEL);
+ 
+-	return;
++	goto unlock;
+ 
+  nla_put_failure:
+ 	nlmsg_free(msg);
++ unlock:
++	wdev_unlock(wdev);
+ }
+-EXPORT_SYMBOL(cfg80211_cqm_rssi_notify);
+ 
+ void cfg80211_cqm_txe_notify(struct net_device *dev,
+ 			     const u8 *peer, u32 num_packets,
+diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c
+index 38120f932b0dc..7056751c29b1f 100644
+--- a/scripts/mod/file2alias.c
++++ b/scripts/mod/file2alias.c
+@@ -1604,7 +1604,7 @@ void handle_moddevtable(struct module *mod, struct elf_info *info,
+ 	/* First handle the "special" cases */
+ 	if (sym_is(name, namelen, "usb"))
+ 		do_usb_table(symval, sym->st_size, mod);
+-	if (sym_is(name, namelen, "of"))
++	else if (sym_is(name, namelen, "of"))
+ 		do_of_table(symval, sym->st_size, mod);
+ 	else if (sym_is(name, namelen, "pnp"))
+ 		do_pnp_device_entry(symval, sym->st_size, mod);
+diff --git a/security/integrity/ima/Kconfig b/security/integrity/ima/Kconfig
+index c17660bf5f347..6ef7bde551263 100644
+--- a/security/integrity/ima/Kconfig
++++ b/security/integrity/ima/Kconfig
+@@ -29,9 +29,11 @@ config IMA
+ 	  to learn more about IMA.
+ 	  If unsure, say N.
+ 
++if IMA
++
+ config IMA_KEXEC
+ 	bool "Enable carrying the IMA measurement list across a soft boot"
+-	depends on IMA && TCG_TPM && HAVE_IMA_KEXEC
++	depends on TCG_TPM && HAVE_IMA_KEXEC
+ 	default n
+ 	help
+ 	   TPM PCRs are only reset on a hard reboot.  In order to validate
+@@ -43,7 +45,6 @@ config IMA_KEXEC
+ 
+ config IMA_MEASURE_PCR_IDX
+ 	int
+-	depends on IMA
+ 	range 8 14
+ 	default 10
+ 	help
+@@ -53,7 +54,7 @@ config IMA_MEASURE_PCR_IDX
+ 
+ config IMA_LSM_RULES
+ 	bool
+-	depends on IMA && AUDIT && (SECURITY_SELINUX || SECURITY_SMACK || SECURITY_APPARMOR)
++	depends on AUDIT && (SECURITY_SELINUX || SECURITY_SMACK || SECURITY_APPARMOR)
+ 	default y
+ 	help
+ 	  Disabling this option will disregard LSM based policy rules.
+@@ -61,7 +62,6 @@ config IMA_LSM_RULES
+ choice
+ 	prompt "Default template"
+ 	default IMA_NG_TEMPLATE
+-	depends on IMA
+ 	help
+ 	  Select the default IMA measurement template.
+ 
+@@ -80,14 +80,12 @@ endchoice
+ 
+ config IMA_DEFAULT_TEMPLATE
+ 	string
+-	depends on IMA
+ 	default "ima-ng" if IMA_NG_TEMPLATE
+ 	default "ima-sig" if IMA_SIG_TEMPLATE
+ 
+ choice
+ 	prompt "Default integrity hash algorithm"
+ 	default IMA_DEFAULT_HASH_SHA1
+-	depends on IMA
+ 	help
+ 	   Select the default hash algorithm used for the measurement
+ 	   list, integrity appraisal and audit log.  The compiled default
+@@ -117,7 +115,6 @@ endchoice
+ 
+ config IMA_DEFAULT_HASH
+ 	string
+-	depends on IMA
+ 	default "sha1" if IMA_DEFAULT_HASH_SHA1
+ 	default "sha256" if IMA_DEFAULT_HASH_SHA256
+ 	default "sha512" if IMA_DEFAULT_HASH_SHA512
+@@ -126,7 +123,6 @@ config IMA_DEFAULT_HASH
+ 
+ config IMA_WRITE_POLICY
+ 	bool "Enable multiple writes to the IMA policy"
+-	depends on IMA
+ 	default n
+ 	help
+ 	  IMA policy can now be updated multiple times.  The new rules get
+@@ -137,7 +133,6 @@ config IMA_WRITE_POLICY
+ 
+ config IMA_READ_POLICY
+ 	bool "Enable reading back the current IMA policy"
+-	depends on IMA
+ 	default y if IMA_WRITE_POLICY
+ 	default n if !IMA_WRITE_POLICY
+ 	help
+@@ -147,7 +142,6 @@ config IMA_READ_POLICY
+ 
+ config IMA_APPRAISE
+ 	bool "Appraise integrity measurements"
+-	depends on IMA
+ 	default n
+ 	help
+ 	  This option enables local measurement integrity appraisal.
+@@ -268,7 +262,7 @@ config IMA_KEYRINGS_PERMIT_SIGNED_BY_BUILTIN_OR_SECONDARY
+ config IMA_BLACKLIST_KEYRING
+ 	bool "Create IMA machine owner blacklist keyrings (EXPERIMENTAL)"
+ 	depends on SYSTEM_TRUSTED_KEYRING
+-	depends on IMA_TRUSTED_KEYRING
++	depends on INTEGRITY_TRUSTED_KEYRING
+ 	default n
+ 	help
+ 	   This option creates an IMA blacklist keyring, which contains all
+@@ -278,7 +272,7 @@ config IMA_BLACKLIST_KEYRING
+ 
+ config IMA_LOAD_X509
+ 	bool "Load X509 certificate onto the '.ima' trusted keyring"
+-	depends on IMA_TRUSTED_KEYRING
++	depends on INTEGRITY_TRUSTED_KEYRING
+ 	default n
+ 	help
+ 	   File signature verification is based on the public keys
+@@ -303,7 +297,6 @@ config IMA_APPRAISE_SIGNED_INIT
+ 
+ config IMA_MEASURE_ASYMMETRIC_KEYS
+ 	bool
+-	depends on IMA
+ 	depends on ASYMMETRIC_PUBLIC_KEY_SUBTYPE=y
+ 	default y
+ 
+@@ -322,7 +315,8 @@ config IMA_SECURE_AND_OR_TRUSTED_BOOT
+ 
+ config IMA_DISABLE_HTABLE
+ 	bool "Disable htable to allow measurement of duplicate records"
+-	depends on IMA
+ 	default n
+ 	help
+ 	   This option disables htable to allow measurement of duplicate records.
++
++endif
+diff --git a/sound/soc/soc-utils.c b/sound/soc/soc-utils.c
+index 11607c5f5d5a8..9c746e4edef71 100644
+--- a/sound/soc/soc-utils.c
++++ b/sound/soc/soc-utils.c
+@@ -217,6 +217,7 @@ int snd_soc_dai_is_dummy(struct snd_soc_dai *dai)
+ 		return 1;
+ 	return 0;
+ }
++EXPORT_SYMBOL_GPL(snd_soc_dai_is_dummy);
+ 
+ int snd_soc_component_is_dummy(struct snd_soc_component *component)
+ {
+diff --git a/sound/soc/tegra/tegra_audio_graph_card.c b/sound/soc/tegra/tegra_audio_graph_card.c
+index 1f2c5018bf5ac..4737e776d3837 100644
+--- a/sound/soc/tegra/tegra_audio_graph_card.c
++++ b/sound/soc/tegra/tegra_audio_graph_card.c
+@@ -10,6 +10,7 @@
+ #include <linux/platform_device.h>
+ #include <sound/graph_card.h>
+ #include <sound/pcm_params.h>
++#include <sound/soc-dai.h>
+ 
+ #define MAX_PLLA_OUT0_DIV 128
+ 
+@@ -44,6 +45,21 @@ struct tegra_audio_cdata {
+ 	unsigned int plla_out0_rates[NUM_RATE_TYPE];
+ };
+ 
++static bool need_clk_update(struct snd_soc_dai *dai)
++{
++	if (snd_soc_dai_is_dummy(dai) ||
++	    !dai->driver->ops ||
++	    !dai->driver->name)
++		return false;
++
++	if (strstr(dai->driver->name, "I2S") ||
++	    strstr(dai->driver->name, "DMIC") ||
++	    strstr(dai->driver->name, "DSPK"))
++		return true;
++
++	return false;
++}
++
+ /* Setup PLL clock as per the given sample rate */
+ static int tegra_audio_graph_update_pll(struct snd_pcm_substream *substream,
+ 					struct snd_pcm_hw_params *params)
+@@ -140,19 +156,7 @@ static int tegra_audio_graph_hw_params(struct snd_pcm_substream *substream,
+ 	struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
+ 	int err;
+ 
+-	/*
+-	 * This gets called for each DAI link (FE or BE) when DPCM is used.
+-	 * We may not want to update PLLA rate for each call. So PLLA update
+-	 * must be restricted to external I/O links (I2S, DMIC or DSPK) since
+-	 * they actually depend on it. I/O modules update their clocks in
+-	 * hw_param() of their respective component driver and PLLA rate
+-	 * update here helps them to derive appropriate rates.
+-	 *
+-	 * TODO: When more HW accelerators get added (like sample rate
+-	 * converter, volume gain controller etc., which don't really
+-	 * depend on PLLA) we need a better way to filter here.
+-	 */
+-	if (cpu_dai->driver->ops && rtd->dai_link->no_pcm) {
++	if (need_clk_update(cpu_dai)) {
+ 		err = tegra_audio_graph_update_pll(substream, params);
+ 		if (err)
+ 			return err;
+diff --git a/tools/testing/selftests/netfilter/.gitignore b/tools/testing/selftests/netfilter/.gitignore
+index 4cb887b574138..4b2928e1c19d8 100644
+--- a/tools/testing/selftests/netfilter/.gitignore
++++ b/tools/testing/selftests/netfilter/.gitignore
+@@ -1,3 +1,4 @@
+ # SPDX-License-Identifier: GPL-2.0-only
+ nf-queue
+ connect_close
++audit_logread
+diff --git a/tools/testing/selftests/netfilter/Makefile b/tools/testing/selftests/netfilter/Makefile
+index 3686bfa6c58d7..321db8850da00 100644
+--- a/tools/testing/selftests/netfilter/Makefile
++++ b/tools/testing/selftests/netfilter/Makefile
+@@ -6,13 +6,13 @@ TEST_PROGS := nft_trans_stress.sh nft_fib.sh nft_nat.sh bridge_brouter.sh \
+ 	nft_concat_range.sh nft_conntrack_helper.sh \
+ 	nft_queue.sh nft_meta.sh nf_nat_edemux.sh \
+ 	ipip-conntrack-mtu.sh conntrack_tcp_unreplied.sh \
+-	conntrack_vrf.sh nft_synproxy.sh rpath.sh
++	conntrack_vrf.sh nft_synproxy.sh rpath.sh nft_audit.sh
+ 
+ HOSTPKG_CONFIG := pkg-config
+ 
+ CFLAGS += $(shell $(HOSTPKG_CONFIG) --cflags libmnl 2>/dev/null)
+ LDLIBS += $(shell $(HOSTPKG_CONFIG) --libs libmnl 2>/dev/null || echo -lmnl)
+ 
+-TEST_GEN_FILES =  nf-queue connect_close
++TEST_GEN_FILES =  nf-queue connect_close audit_logread
+ 
+ include ../lib.mk
+diff --git a/tools/testing/selftests/netfilter/audit_logread.c b/tools/testing/selftests/netfilter/audit_logread.c
+new file mode 100644
+index 0000000000000..a0a880fc2d9de
+--- /dev/null
++++ b/tools/testing/selftests/netfilter/audit_logread.c
+@@ -0,0 +1,165 @@
++// SPDX-License-Identifier: GPL-2.0
++
++#define _GNU_SOURCE
++#include <errno.h>
++#include <fcntl.h>
++#include <poll.h>
++#include <signal.h>
++#include <stdint.h>
++#include <stdio.h>
++#include <stdlib.h>
++#include <string.h>
++#include <sys/socket.h>
++#include <unistd.h>
++#include <linux/audit.h>
++#include <linux/netlink.h>
++
++static int fd;
++
++#define MAX_AUDIT_MESSAGE_LENGTH	8970
++struct audit_message {
++	struct nlmsghdr nlh;
++	union {
++		struct audit_status s;
++		char data[MAX_AUDIT_MESSAGE_LENGTH];
++	} u;
++};
++
++int audit_recv(int fd, struct audit_message *rep)
++{
++	struct sockaddr_nl addr;
++	socklen_t addrlen = sizeof(addr);
++	int ret;
++
++	do {
++		ret = recvfrom(fd, rep, sizeof(*rep), 0,
++			       (struct sockaddr *)&addr, &addrlen);
++	} while (ret < 0 && errno == EINTR);
++
++	if (ret < 0 ||
++	    addrlen != sizeof(addr) ||
++	    addr.nl_pid != 0 ||
++	    rep->nlh.nlmsg_type == NLMSG_ERROR) /* short-cut for now */
++		return -1;
++
++	return ret;
++}
++
++int audit_send(int fd, uint16_t type, uint32_t key, uint32_t val)
++{
++	static int seq = 0;
++	struct audit_message msg = {
++		.nlh = {
++			.nlmsg_len   = NLMSG_SPACE(sizeof(msg.u.s)),
++			.nlmsg_type  = type,
++			.nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK,
++			.nlmsg_seq   = ++seq,
++		},
++		.u.s = {
++			.mask    = key,
++			.enabled = key == AUDIT_STATUS_ENABLED ? val : 0,
++			.pid     = key == AUDIT_STATUS_PID ? val : 0,
++		}
++	};
++	struct sockaddr_nl addr = {
++		.nl_family = AF_NETLINK,
++	};
++	int ret;
++
++	do {
++		ret = sendto(fd, &msg, msg.nlh.nlmsg_len, 0,
++			     (struct sockaddr *)&addr, sizeof(addr));
++	} while (ret < 0 && errno == EINTR);
++
++	if (ret != (int)msg.nlh.nlmsg_len)
++		return -1;
++	return 0;
++}
++
++int audit_set(int fd, uint32_t key, uint32_t val)
++{
++	struct audit_message rep = { 0 };
++	int ret;
++
++	ret = audit_send(fd, AUDIT_SET, key, val);
++	if (ret)
++		return ret;
++
++	ret = audit_recv(fd, &rep);
++	if (ret < 0)
++		return ret;
++	return 0;
++}
++
++int readlog(int fd)
++{
++	struct audit_message rep = { 0 };
++	int ret = audit_recv(fd, &rep);
++	const char *sep = "";
++	char *k, *v;
++
++	if (ret < 0)
++		return ret;
++
++	if (rep.nlh.nlmsg_type != AUDIT_NETFILTER_CFG)
++		return 0;
++
++	/* skip the initial "audit(...): " part */
++	strtok(rep.u.data, " ");
++
++	while ((k = strtok(NULL, "="))) {
++		v = strtok(NULL, " ");
++
++		/* these vary and/or are uninteresting, ignore */
++		if (!strcmp(k, "pid") ||
++		    !strcmp(k, "comm") ||
++		    !strcmp(k, "subj"))
++			continue;
++
++		/* strip the varying sequence number */
++		if (!strcmp(k, "table"))
++			*strchrnul(v, ':') = '\0';
++
++		printf("%s%s=%s", sep, k, v);
++		sep = " ";
++	}
++	if (*sep) {
++		printf("\n");
++		fflush(stdout);
++	}
++	return 0;
++}
++
++void cleanup(int sig)
++{
++	audit_set(fd, AUDIT_STATUS_ENABLED, 0);
++	close(fd);
++	if (sig)
++		exit(0);
++}
++
++int main(int argc, char **argv)
++{
++	struct sigaction act = {
++		.sa_handler = cleanup,
++	};
++
++	fd = socket(PF_NETLINK, SOCK_RAW, NETLINK_AUDIT);
++	if (fd < 0) {
++		perror("Can't open netlink socket");
++		return -1;
++	}
++
++	if (sigaction(SIGTERM, &act, NULL) < 0 ||
++	    sigaction(SIGINT, &act, NULL) < 0) {
++		perror("Can't set signal handler");
++		close(fd);
++		return -1;
++	}
++
++	audit_set(fd, AUDIT_STATUS_ENABLED, 1);
++	audit_set(fd, AUDIT_STATUS_PID, getpid());
++
++	while (1)
++		readlog(fd);
++}
+diff --git a/tools/testing/selftests/netfilter/config b/tools/testing/selftests/netfilter/config
+index 4faf2ce021d90..7c42b1b2c69b4 100644
+--- a/tools/testing/selftests/netfilter/config
++++ b/tools/testing/selftests/netfilter/config
+@@ -6,3 +6,4 @@ CONFIG_NFT_REDIR=m
+ CONFIG_NFT_MASQ=m
+ CONFIG_NFT_FLOW_OFFLOAD=m
+ CONFIG_NF_CT_NETLINK=m
++CONFIG_AUDIT=y
+diff --git a/tools/testing/selftests/netfilter/nft_audit.sh b/tools/testing/selftests/netfilter/nft_audit.sh
+new file mode 100755
+index 0000000000000..bb34329e02a7f
+--- /dev/null
++++ b/tools/testing/selftests/netfilter/nft_audit.sh
+@@ -0,0 +1,193 @@
++#!/bin/bash
++# SPDX-License-Identifier: GPL-2.0
++#
++# Check that audit logs generated for nft commands are as expected.
++
++SKIP_RC=4
++RC=0
++
++nft --version >/dev/null 2>&1 || {
++	echo "SKIP: missing nft tool"
++	exit $SKIP_RC
++}
++
++logfile=$(mktemp)
++rulefile=$(mktemp)
++echo "logging into $logfile"
++./audit_logread >"$logfile" &
++logread_pid=$!
++trap 'kill $logread_pid; rm -f $logfile $rulefile' EXIT
++exec 3<"$logfile"
++
++do_test() { # (cmd, log)
++	echo -n "testing for cmd: $1 ... "
++	cat <&3 >/dev/null
++	$1 >/dev/null || exit 1
++	sleep 0.1
++	res=$(diff -a -u <(echo "$2") - <&3)
++	[ $? -eq 0 ] && { echo "OK"; return; }
++	echo "FAIL"
++	grep -v '^\(---\|+++\|@@\)' <<< "$res"
++	((RC--))
++}
++
++nft flush ruleset
++
++# adding tables, chains and rules
++
++for table in t1 t2; do
++	do_test "nft add table $table" \
++	"table=$table family=2 entries=1 op=nft_register_table"
++
++	do_test "nft add chain $table c1" \
++	"table=$table family=2 entries=1 op=nft_register_chain"
++
++	do_test "nft add chain $table c2; add chain $table c3" \
++	"table=$table family=2 entries=2 op=nft_register_chain"
++
++	cmd="add rule $table c1 counter"
++
++	do_test "nft $cmd" \
++	"table=$table family=2 entries=1 op=nft_register_rule"
++
++	do_test "nft $cmd; $cmd" \
++	"table=$table family=2 entries=2 op=nft_register_rule"
++
++	cmd=""
++	sep=""
++	for chain in c2 c3; do
++		for i in {1..3}; do
++			cmd+="$sep add rule $table $chain counter"
++			sep=";"
++		done
++	done
++	do_test "nft $cmd" \
++	"table=$table family=2 entries=6 op=nft_register_rule"
++done
++
++for ((i = 0; i < 500; i++)); do
++	echo "add rule t2 c3 counter accept comment \"rule $i\""
++done >$rulefile
++do_test "nft -f $rulefile" \
++'table=t2 family=2 entries=500 op=nft_register_rule'
++
++# adding sets and elements
++
++settype='type inet_service; counter'
++setelem='{ 22, 80, 443 }'
++setblock="{ $settype; elements = $setelem; }"
++do_test "nft add set t1 s $setblock" \
++"table=t1 family=2 entries=4 op=nft_register_set"
++
++do_test "nft add set t1 s2 $setblock; add set t1 s3 { $settype; }" \
++"table=t1 family=2 entries=5 op=nft_register_set"
++
++do_test "nft add element t1 s3 $setelem" \
++"table=t1 family=2 entries=3 op=nft_register_setelem"
++
++# adding counters
++
++do_test 'nft add counter t1 c1' \
++'table=t1 family=2 entries=1 op=nft_register_obj'
++
++do_test 'nft add counter t2 c1; add counter t2 c2' \
++'table=t2 family=2 entries=2 op=nft_register_obj'
++
++# adding/updating quotas
++
++do_test 'nft add quota t1 q1 { 10 bytes }' \
++'table=t1 family=2 entries=1 op=nft_register_obj'
++
++do_test 'nft add quota t2 q1 { 10 bytes }; add quota t2 q2 { 10 bytes }' \
++'table=t2 family=2 entries=2 op=nft_register_obj'
++
++# changing the quota value triggers obj update path
++do_test 'nft add quota t1 q1 { 20 bytes }' \
++'table=t1 family=2 entries=1 op=nft_register_obj'
++
++# resetting rules
++
++do_test 'nft reset rules t1 c2' \
++'table=t1 family=2 entries=3 op=nft_reset_rule'
++
++do_test 'nft reset rules table t1' \
++'table=t1 family=2 entries=3 op=nft_reset_rule
++table=t1 family=2 entries=3 op=nft_reset_rule
++table=t1 family=2 entries=3 op=nft_reset_rule'
++
++do_test 'nft reset rules t2 c3' \
++'table=t2 family=2 entries=189 op=nft_reset_rule
++table=t2 family=2 entries=188 op=nft_reset_rule
++table=t2 family=2 entries=126 op=nft_reset_rule'
++
++do_test 'nft reset rules t2' \
++'table=t2 family=2 entries=3 op=nft_reset_rule
++table=t2 family=2 entries=3 op=nft_reset_rule
++table=t2 family=2 entries=186 op=nft_reset_rule
++table=t2 family=2 entries=188 op=nft_reset_rule
++table=t2 family=2 entries=129 op=nft_reset_rule'
++
++do_test 'nft reset rules' \
++'table=t1 family=2 entries=3 op=nft_reset_rule
++table=t1 family=2 entries=3 op=nft_reset_rule
++table=t1 family=2 entries=3 op=nft_reset_rule
++table=t2 family=2 entries=3 op=nft_reset_rule
++table=t2 family=2 entries=3 op=nft_reset_rule
++table=t2 family=2 entries=180 op=nft_reset_rule
++table=t2 family=2 entries=188 op=nft_reset_rule
++table=t2 family=2 entries=135 op=nft_reset_rule'
++
++# resetting sets and elements
++
++elem=(22 ,80 ,443)
++relem=""
++for i in {1..3}; do
++	relem+="${elem[((i - 1))]}"
++	do_test "nft reset element t1 s { $relem }" \
++	"table=t1 family=2 entries=$i op=nft_reset_setelem"
++done
++
++do_test 'nft reset set t1 s' \
++'table=t1 family=2 entries=3 op=nft_reset_setelem'
++
++# deleting rules
++
++readarray -t handles < <(nft -a list chain t1 c1 | \
++			 sed -n 's/.*counter.* handle \(.*\)$/\1/p')
++
++do_test "nft delete rule t1 c1 handle ${handles[0]}" \
++'table=t1 family=2 entries=1 op=nft_unregister_rule'
++
++cmd='delete rule t1 c1 handle'
++do_test "nft $cmd ${handles[1]}; $cmd ${handles[2]}" \
++'table=t1 family=2 entries=2 op=nft_unregister_rule'
++
++do_test 'nft flush chain t1 c2' \
++'table=t1 family=2 entries=3 op=nft_unregister_rule'
++
++do_test 'nft flush table t2' \
++'table=t2 family=2 entries=509 op=nft_unregister_rule'
++
++# deleting chains
++
++do_test 'nft delete chain t2 c2' \
++'table=t2 family=2 entries=1 op=nft_unregister_chain'
++
++# deleting sets and elements
++
++do_test 'nft delete element t1 s { 22 }' \
++'table=t1 family=2 entries=1 op=nft_unregister_setelem'
++
++do_test 'nft delete element t1 s { 80, 443 }' \
++'table=t1 family=2 entries=2 op=nft_unregister_setelem'
++
++do_test 'nft flush set t1 s2' \
++'table=t1 family=2 entries=3 op=nft_unregister_setelem'
++
++do_test 'nft delete set t1 s2' \
++'table=t1 family=2 entries=1 op=nft_unregister_set'
++
++do_test 'nft delete set t1 s3' \
++'table=t1 family=2 entries=1 op=nft_unregister_set'
++
++exit $RC
+diff --git a/tools/tracing/rtla/src/timerlat_aa.c b/tools/tracing/rtla/src/timerlat_aa.c
+index e0ffe69c271c6..7093fd5333beb 100644
+--- a/tools/tracing/rtla/src/timerlat_aa.c
++++ b/tools/tracing/rtla/src/timerlat_aa.c
+@@ -159,6 +159,7 @@ static int timerlat_aa_irq_latency(struct timerlat_aa_data *taa_data,
+ 	taa_data->thread_nmi_sum = 0;
+ 	taa_data->thread_irq_sum = 0;
+ 	taa_data->thread_softirq_sum = 0;
++	taa_data->thread_thread_sum = 0;
+ 	taa_data->thread_blocking_duration = 0;
+ 	taa_data->timer_irq_start_time = 0;
+ 	taa_data->timer_irq_duration = 0;
+@@ -337,7 +338,23 @@ static int timerlat_aa_irq_handler(struct trace_seq *s, struct tep_record *recor
+ 		taa_data->timer_irq_start_time = start;
+ 		taa_data->timer_irq_duration = duration;
+ 
+-		taa_data->timer_irq_start_delay = taa_data->timer_irq_start_time - expected_start;
++		/*
++		 * We are dealing with two different clock sources: the
++		 * external clock source that timerlat uses as a reference
++		 * and the clock used by the tracer. There are also two
++		 * moments: the time reading the clock and the timer in
++		 * which the event is placed in the buffer (the trace
++		 * event timestamp). If the processor is slow or there
++		 * is some hardware noise, the difference between the
++		 * timestamp and the external clock read can be longer
++		 * than the IRQ handler delay, resulting in a negative
++		 * time. If so, set IRQ start delay as 0. In the end,
++		 * it is less relevant than the noise.
++		 */
++		if (expected_start < taa_data->timer_irq_start_time)
++			taa_data->timer_irq_start_delay = taa_data->timer_irq_start_time - expected_start;
++		else
++			taa_data->timer_irq_start_delay = 0;
+ 
+ 		/*
+ 		 * not exit from idle.
+@@ -528,7 +545,7 @@ static int timerlat_aa_kworker_start_handler(struct trace_seq *s, struct tep_rec
+ static void timerlat_thread_analysis(struct timerlat_aa_data *taa_data, int cpu,
+ 				     int irq_thresh, int thread_thresh)
+ {
+-	unsigned long long exp_irq_ts;
++	long long exp_irq_ts;
+ 	int total;
+ 	int irq;
+ 
+@@ -545,12 +562,15 @@ static void timerlat_thread_analysis(struct timerlat_aa_data *taa_data, int cpu,
+ 
+ 	/*
+ 	 * Expected IRQ arrival time using the trace clock as the base.
++	 *
++	 * TODO: Add a list of previous IRQ, and then run the list backwards.
+ 	 */
+ 	exp_irq_ts = taa_data->timer_irq_start_time - taa_data->timer_irq_start_delay;
+-
+-	if (exp_irq_ts < taa_data->prev_irq_timstamp + taa_data->prev_irq_duration)
+-		printf("  Previous IRQ interference:	\t\t up to  %9.2f us\n",
+-			ns_to_usf(taa_data->prev_irq_duration));
++	if (exp_irq_ts < taa_data->prev_irq_timstamp + taa_data->prev_irq_duration) {
++		if (taa_data->prev_irq_timstamp < taa_data->timer_irq_start_time)
++			printf("  Previous IRQ interference:	\t\t up to  %9.2f us\n",
++				ns_to_usf(taa_data->prev_irq_duration));
++	}
+ 
+ 	/*
+ 	 * The delay that the IRQ suffered before starting.
+diff --git a/tools/tracing/rtla/src/timerlat_u.c b/tools/tracing/rtla/src/timerlat_u.c
+index 05e310696dd5c..01dbf9a6b5a51 100644
+--- a/tools/tracing/rtla/src/timerlat_u.c
++++ b/tools/tracing/rtla/src/timerlat_u.c
+@@ -45,7 +45,7 @@ static int timerlat_u_main(int cpu, struct timerlat_u_params *params)
+ 
+ 	retval = sched_setaffinity(gettid(), sizeof(set), &set);
+ 	if (retval == -1) {
+-		err_msg("Error setting user thread affinity\n");
++		debug_msg("Error setting user thread affinity %d, is the CPU online?\n", cpu);
+ 		exit(1);
+ 	}
+ 
+@@ -193,7 +193,9 @@ void *timerlat_u_dispatcher(void *data)
+ 					procs_count--;
+ 				}
+ 			}
+-			break;
++
++			if (!procs_count)
++				break;
+ 		}
+ 
+ 		sleep(1);


^ permalink raw reply related	[flat|nested] 24+ messages in thread
* [gentoo-commits] proj/linux-patches:6.5 commit in: /
@ 2023-10-06 12:36 Mike Pagano
  0 siblings, 0 replies; 24+ messages in thread
From: Mike Pagano @ 2023-10-06 12:36 UTC (permalink / raw
  To: gentoo-commits

commit:     30b3090c6bab3a7fb130eb08ccddb446aea6aeed
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Fri Oct  6 12:36:12 2023 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Fri Oct  6 12:36:12 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=30b3090c

Linux patch 6.5.6

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README            |     4 +
 1005_linux-6.5.6.patch | 12866 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 12870 insertions(+)

diff --git a/0000_README b/0000_README
index 46cf8e96..ffd65d42 100644
--- a/0000_README
+++ b/0000_README
@@ -63,6 +63,10 @@ Patch:  1004_linux-6.5.5.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.5.5
 
+Patch:  1005_linux-6.5.6.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.5.6
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1005_linux-6.5.6.patch b/1005_linux-6.5.6.patch
new file mode 100644
index 00000000..1cf3da64
--- /dev/null
+++ b/1005_linux-6.5.6.patch
@@ -0,0 +1,12866 @@
+diff --git a/Documentation/admin-guide/cgroup-v1/memory.rst b/Documentation/admin-guide/cgroup-v1/memory.rst
+index fabaad3fd9c21..5f7189dc98537 100644
+--- a/Documentation/admin-guide/cgroup-v1/memory.rst
++++ b/Documentation/admin-guide/cgroup-v1/memory.rst
+@@ -92,8 +92,13 @@ Brief summary of control files.
+  memory.oom_control		     set/show oom controls.
+  memory.numa_stat		     show the number of memory usage per numa
+ 				     node
+- memory.kmem.limit_in_bytes          This knob is deprecated and writing to
+-                                     it will return -ENOTSUPP.
++ memory.kmem.limit_in_bytes          Deprecated knob to set and read the kernel
++                                     memory hard limit. Kernel hard limit is not
++                                     supported since 5.16. Writing any value to
++                                     do file will not have any effect same as if
++                                     nokmem kernel parameter was specified.
++                                     Kernel memory is still charged and reported
++                                     by memory.kmem.usage_in_bytes.
+  memory.kmem.usage_in_bytes          show current kernel memory allocation
+  memory.kmem.failcnt                 show the number of kernel memory usage
+ 				     hits limits
+diff --git a/Documentation/sound/designs/midi-2.0.rst b/Documentation/sound/designs/midi-2.0.rst
+index 27d0d3dea1b0a..d91fdad524f1f 100644
+--- a/Documentation/sound/designs/midi-2.0.rst
++++ b/Documentation/sound/designs/midi-2.0.rst
+@@ -74,8 +74,8 @@ topology based on those information.  When the device is older and
+ doesn't respond to the new UMP inquiries, the driver falls back and
+ builds the topology based on Group Terminal Block (GTB) information
+ from the USB descriptor.  Some device might be screwed up by the
+-unexpected UMP command; in such a case, pass `midi2_probe=0` option to
+-snd-usb-audio driver for skipping the UMP v1.1 inquiries.
++unexpected UMP command; in such a case, pass `midi2_ump_probe=0`
++option to snd-usb-audio driver for skipping the UMP v1.1 inquiries.
+ 
+ When the MIDI 2.0 device is probed, the kernel creates a rawmidi
+ device for each UMP Endpoint of the device.  Its device name is
+diff --git a/Makefile b/Makefile
+index 7545d2b0e7b71..81f14b15592f0 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 5
+-SUBLEVEL = 5
++SUBLEVEL = 6
+ EXTRAVERSION =
+ NAME = Hurr durr I'ma ninja sloth
+ 
+diff --git a/arch/arm/boot/dts/ti/omap/motorola-mapphone-common.dtsi b/arch/arm/boot/dts/ti/omap/motorola-mapphone-common.dtsi
+index 091ba310053eb..d69f0f4b4990d 100644
+--- a/arch/arm/boot/dts/ti/omap/motorola-mapphone-common.dtsi
++++ b/arch/arm/boot/dts/ti/omap/motorola-mapphone-common.dtsi
+@@ -614,12 +614,12 @@
+ /* Configure pwm clock source for timers 8 & 9 */
+ &timer8 {
+ 	assigned-clocks = <&abe_clkctrl OMAP4_TIMER8_CLKCTRL 24>;
+-	assigned-clock-parents = <&sys_clkin_ck>;
++	assigned-clock-parents = <&sys_32k_ck>;
+ };
+ 
+ &timer9 {
+ 	assigned-clocks = <&l4_per_clkctrl OMAP4_TIMER9_CLKCTRL 24>;
+-	assigned-clock-parents = <&sys_clkin_ck>;
++	assigned-clock-parents = <&sys_32k_ck>;
+ };
+ 
+ /*
+diff --git a/arch/arm/boot/dts/ti/omap/omap3-cpu-thermal.dtsi b/arch/arm/boot/dts/ti/omap/omap3-cpu-thermal.dtsi
+index 0da759f8e2c2d..7dd2340bc5e45 100644
+--- a/arch/arm/boot/dts/ti/omap/omap3-cpu-thermal.dtsi
++++ b/arch/arm/boot/dts/ti/omap/omap3-cpu-thermal.dtsi
+@@ -12,8 +12,7 @@ cpu_thermal: cpu-thermal {
+ 	polling-delay = <1000>; /* milliseconds */
+ 	coefficients = <0 20000>;
+ 
+-			/* sensor       ID */
+-	thermal-sensors = <&bandgap     0>;
++	thermal-sensors = <&bandgap>;
+ 
+ 	cpu_trips: trips {
+ 		cpu_alert0: cpu_alert {
+diff --git a/arch/arm/boot/dts/ti/omap/omap4-cpu-thermal.dtsi b/arch/arm/boot/dts/ti/omap/omap4-cpu-thermal.dtsi
+index 801b4f10350c1..d484ec1e4fd86 100644
+--- a/arch/arm/boot/dts/ti/omap/omap4-cpu-thermal.dtsi
++++ b/arch/arm/boot/dts/ti/omap/omap4-cpu-thermal.dtsi
+@@ -12,7 +12,10 @@ cpu_thermal: cpu_thermal {
+ 	polling-delay-passive = <250>; /* milliseconds */
+ 	polling-delay = <1000>; /* milliseconds */
+ 
+-			/* sensor       ID */
++	/*
++	 * See 44xx files for single sensor addressing, omap5 and dra7 need
++	 * also sensor ID for addressing.
++	 */
+ 	thermal-sensors = <&bandgap     0>;
+ 
+ 	cpu_trips: trips {
+diff --git a/arch/arm/boot/dts/ti/omap/omap443x.dtsi b/arch/arm/boot/dts/ti/omap/omap443x.dtsi
+index 238aceb799f89..2104170fe2cd7 100644
+--- a/arch/arm/boot/dts/ti/omap/omap443x.dtsi
++++ b/arch/arm/boot/dts/ti/omap/omap443x.dtsi
+@@ -69,6 +69,7 @@
+ };
+ 
+ &cpu_thermal {
++	thermal-sensors = <&bandgap>;
+ 	coefficients = <0 20000>;
+ };
+ 
+diff --git a/arch/arm/boot/dts/ti/omap/omap4460.dtsi b/arch/arm/boot/dts/ti/omap/omap4460.dtsi
+index 1b27a862ae810..a6764750d4476 100644
+--- a/arch/arm/boot/dts/ti/omap/omap4460.dtsi
++++ b/arch/arm/boot/dts/ti/omap/omap4460.dtsi
+@@ -79,6 +79,7 @@
+ };
+ 
+ &cpu_thermal {
++	thermal-sensors = <&bandgap>;
+ 	coefficients = <348 (-9301)>;
+ };
+ 
+diff --git a/arch/arm64/boot/dts/freescale/Makefile b/arch/arm64/boot/dts/freescale/Makefile
+index a750be13ace89..cf32922c97619 100644
+--- a/arch/arm64/boot/dts/freescale/Makefile
++++ b/arch/arm64/boot/dts/freescale/Makefile
+@@ -66,6 +66,7 @@ dtb-$(CONFIG_ARCH_MXC) += imx8mm-mx8menlo.dtb
+ dtb-$(CONFIG_ARCH_MXC) += imx8mm-nitrogen-r2.dtb
+ dtb-$(CONFIG_ARCH_MXC) += imx8mm-phg.dtb
+ dtb-$(CONFIG_ARCH_MXC) += imx8mm-phyboard-polis-rdk.dtb
++dtb-$(CONFIG_ARCH_MXC) += imx8mm-prt8mm.dtb
+ dtb-$(CONFIG_ARCH_MXC) += imx8mm-tqma8mqml-mba8mx.dtb
+ dtb-$(CONFIG_ARCH_MXC) += imx8mm-var-som-symphony.dtb
+ dtb-$(CONFIG_ARCH_MXC) += imx8mm-venice-gw71xx-0x.dtb
+diff --git a/arch/arm64/boot/dts/freescale/imx8mm-evk.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-evk.dtsi
+index df8e808ac4739..6752c30274369 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mm-evk.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx8mm-evk.dtsi
+@@ -26,7 +26,7 @@
+ 
+ 		port {
+ 			hdmi_connector_in: endpoint {
+-				remote-endpoint = <&adv7533_out>;
++				remote-endpoint = <&adv7535_out>;
+ 			};
+ 		};
+ 	};
+@@ -72,6 +72,13 @@
+ 		enable-active-high;
+ 	};
+ 
++	reg_vddext_3v3: regulator-vddext-3v3 {
++		compatible = "regulator-fixed";
++		regulator-name = "VDDEXT_3V3";
++		regulator-min-microvolt = <3300000>;
++		regulator-max-microvolt = <3300000>;
++	};
++
+ 	backlight: backlight {
+ 		compatible = "pwm-backlight";
+ 		pwms = <&pwm1 0 5000000 0>;
+@@ -317,15 +324,16 @@
+ 
+ 	hdmi@3d {
+ 		compatible = "adi,adv7535";
+-		reg = <0x3d>, <0x3c>, <0x3e>, <0x3f>;
+-		reg-names = "main", "cec", "edid", "packet";
++		reg = <0x3d>;
++		interrupt-parent = <&gpio1>;
++		interrupts = <9 IRQ_TYPE_EDGE_FALLING>;
+ 		adi,dsi-lanes = <4>;
+-
+-		adi,input-depth = <8>;
+-		adi,input-colorspace = "rgb";
+-		adi,input-clock = "1x";
+-		adi,input-style = <1>;
+-		adi,input-justification = "evenly";
++		avdd-supply = <&buck5_reg>;
++		dvdd-supply = <&buck5_reg>;
++		pvdd-supply = <&buck5_reg>;
++		a2vdd-supply = <&buck5_reg>;
++		v3p3-supply = <&reg_vddext_3v3>;
++		v1p2-supply = <&buck5_reg>;
+ 
+ 		ports {
+ 			#address-cells = <1>;
+@@ -334,7 +342,7 @@
+ 			port@0 {
+ 				reg = <0>;
+ 
+-				adv7533_in: endpoint {
++				adv7535_in: endpoint {
+ 					remote-endpoint = <&dsi_out>;
+ 				};
+ 			};
+@@ -342,7 +350,7 @@
+ 			port@1 {
+ 				reg = <1>;
+ 
+-				adv7533_out: endpoint {
++				adv7535_out: endpoint {
+ 					remote-endpoint = <&hdmi_connector_in>;
+ 				};
+ 			};
+@@ -408,7 +416,7 @@
+ 			reg = <1>;
+ 
+ 			dsi_out: endpoint {
+-				remote-endpoint = <&adv7533_in>;
++				remote-endpoint = <&adv7535_in>;
+ 				data-lanes = <1 2 3 4>;
+ 			};
+ 		};
+diff --git a/arch/arm64/boot/dts/freescale/imx8mp-beacon-kit.dts b/arch/arm64/boot/dts/freescale/imx8mp-beacon-kit.dts
+index 06e91297fb163..acd265d8b58ed 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mp-beacon-kit.dts
++++ b/arch/arm64/boot/dts/freescale/imx8mp-beacon-kit.dts
+@@ -381,9 +381,10 @@
+ &sai3 {
+ 	pinctrl-names = "default";
+ 	pinctrl-0 = <&pinctrl_sai3>;
+-	assigned-clocks = <&clk IMX8MP_CLK_SAI3>;
++	assigned-clocks = <&clk IMX8MP_CLK_SAI3>,
++			  <&clk IMX8MP_AUDIO_PLL2> ;
+ 	assigned-clock-parents = <&clk IMX8MP_AUDIO_PLL2_OUT>;
+-	assigned-clock-rates = <12288000>;
++	assigned-clock-rates = <12288000>, <361267200>;
+ 	fsl,sai-mclk-direction-output;
+ 	status = "okay";
+ };
+diff --git a/arch/arm64/boot/dts/freescale/imx8mp.dtsi b/arch/arm64/boot/dts/freescale/imx8mp.dtsi
+index cc406bb338feb..587265395a9b4 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mp.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx8mp.dtsi
+@@ -794,6 +794,12 @@
+ 						reg = <IMX8MP_POWER_DOMAIN_AUDIOMIX>;
+ 						clocks = <&clk IMX8MP_CLK_AUDIO_ROOT>,
+ 							 <&clk IMX8MP_CLK_AUDIO_AXI>;
++						assigned-clocks = <&clk IMX8MP_CLK_AUDIO_AHB>,
++								  <&clk IMX8MP_CLK_AUDIO_AXI_SRC>;
++						assigned-clock-parents =  <&clk IMX8MP_SYS_PLL1_800M>,
++									  <&clk IMX8MP_SYS_PLL1_800M>;
++						assigned-clock-rates = <400000000>,
++								       <600000000>;
+ 					};
+ 
+ 					pgc_gpu2d: power-domain@6 {
+diff --git a/arch/arm64/boot/dts/qcom/sdm845-db845c.dts b/arch/arm64/boot/dts/qcom/sdm845-db845c.dts
+index d6b464cb61d6f..f546f6f57c1e5 100644
+--- a/arch/arm64/boot/dts/qcom/sdm845-db845c.dts
++++ b/arch/arm64/boot/dts/qcom/sdm845-db845c.dts
+@@ -101,6 +101,14 @@
+ 		};
+ 	};
+ 
++	reserved-memory {
++		/* Cont splash region set up by the bootloader */
++		cont_splash_mem: framebuffer@9d400000 {
++			reg = <0x0 0x9d400000 0x0 0x2400000>;
++			no-map;
++		};
++	};
++
+ 	lt9611_1v8: lt9611-vdd18-regulator {
+ 		compatible = "regulator-fixed";
+ 		regulator-name = "LT9611_1V8";
+@@ -506,6 +514,7 @@
+ };
+ 
+ &mdss {
++	memory-region = <&cont_splash_mem>;
+ 	status = "okay";
+ };
+ 
+diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig
+index d8bae57af16d5..02adc6ceb8316 100644
+--- a/arch/arm64/configs/defconfig
++++ b/arch/arm64/configs/defconfig
+@@ -1145,7 +1145,6 @@ CONFIG_COMMON_CLK_S2MPS11=y
+ CONFIG_COMMON_CLK_PWM=y
+ CONFIG_COMMON_CLK_RS9_PCIE=y
+ CONFIG_COMMON_CLK_VC5=y
+-CONFIG_COMMON_CLK_NPCM8XX=y
+ CONFIG_COMMON_CLK_BD718XX=m
+ CONFIG_CLK_RASPBERRYPI=m
+ CONFIG_CLK_IMX8MM=y
+diff --git a/arch/loongarch/include/asm/addrspace.h b/arch/loongarch/include/asm/addrspace.h
+index 5c9c03bdf9156..b24437e28c6ed 100644
+--- a/arch/loongarch/include/asm/addrspace.h
++++ b/arch/loongarch/include/asm/addrspace.h
+@@ -19,7 +19,7 @@
+  */
+ #ifndef __ASSEMBLY__
+ #ifndef PHYS_OFFSET
+-#define PHYS_OFFSET	_AC(0, UL)
++#define PHYS_OFFSET	_UL(0)
+ #endif
+ extern unsigned long vm_map_base;
+ #endif /* __ASSEMBLY__ */
+@@ -43,7 +43,7 @@ extern unsigned long vm_map_base;
+  * Memory above this physical address will be considered highmem.
+  */
+ #ifndef HIGHMEM_START
+-#define HIGHMEM_START		(_AC(1, UL) << _AC(DMW_PABITS, UL))
++#define HIGHMEM_START		(_UL(1) << _UL(DMW_PABITS))
+ #endif
+ 
+ #define TO_PHYS(x)		(		((x) & TO_PHYS_MASK))
+@@ -65,16 +65,16 @@ extern unsigned long vm_map_base;
+ #define _ATYPE_
+ #define _ATYPE32_
+ #define _ATYPE64_
+-#define _CONST64_(x)	x
+ #else
+ #define _ATYPE_		__PTRDIFF_TYPE__
+ #define _ATYPE32_	int
+ #define _ATYPE64_	__s64
++#endif
++
+ #ifdef CONFIG_64BIT
+-#define _CONST64_(x)	x ## UL
++#define _CONST64_(x)	_UL(x)
+ #else
+-#define _CONST64_(x)	x ## ULL
+-#endif
++#define _CONST64_(x)	_ULL(x)
+ #endif
+ 
+ /*
+diff --git a/arch/loongarch/include/asm/elf.h b/arch/loongarch/include/asm/elf.h
+index 7af0cebf28d73..b9a4ab54285c1 100644
+--- a/arch/loongarch/include/asm/elf.h
++++ b/arch/loongarch/include/asm/elf.h
+@@ -111,6 +111,15 @@
+ #define R_LARCH_TLS_GD_HI20			98
+ #define R_LARCH_32_PCREL			99
+ #define R_LARCH_RELAX				100
++#define R_LARCH_DELETE				101
++#define R_LARCH_ALIGN				102
++#define R_LARCH_PCREL20_S2			103
++#define R_LARCH_CFA				104
++#define R_LARCH_ADD6				105
++#define R_LARCH_SUB6				106
++#define R_LARCH_ADD_ULEB128			107
++#define R_LARCH_SUB_ULEB128			108
++#define R_LARCH_64_PCREL			109
+ 
+ #ifndef ELF_ARCH
+ 
+diff --git a/arch/loongarch/kernel/mem.c b/arch/loongarch/kernel/mem.c
+index 4a4107a6a9651..aed901c57fb43 100644
+--- a/arch/loongarch/kernel/mem.c
++++ b/arch/loongarch/kernel/mem.c
+@@ -50,7 +50,6 @@ void __init memblock_init(void)
+ 	}
+ 
+ 	memblock_set_current_limit(PFN_PHYS(max_low_pfn));
+-	memblock_set_node(0, PHYS_ADDR_MAX, &memblock.memory, 0);
+ 
+ 	/* Reserve the first 2MB */
+ 	memblock_reserve(PHYS_OFFSET, 0x200000);
+@@ -58,4 +57,7 @@ void __init memblock_init(void)
+ 	/* Reserve the kernel text/data/bss */
+ 	memblock_reserve(__pa_symbol(&_text),
+ 			 __pa_symbol(&_end) - __pa_symbol(&_text));
++
++	memblock_set_node(0, PHYS_ADDR_MAX, &memblock.memory, 0);
++	memblock_set_node(0, PHYS_ADDR_MAX, &memblock.reserved, 0);
+ }
+diff --git a/arch/loongarch/kernel/module.c b/arch/loongarch/kernel/module.c
+index b8b86088b2dd2..b13b2858fe392 100644
+--- a/arch/loongarch/kernel/module.c
++++ b/arch/loongarch/kernel/module.c
+@@ -367,6 +367,24 @@ static int apply_r_larch_got_pc(struct module *mod,
+ 	return apply_r_larch_pcala(mod, location, got, rela_stack, rela_stack_top, type);
+ }
+ 
++static int apply_r_larch_32_pcrel(struct module *mod, u32 *location, Elf_Addr v,
++				  s64 *rela_stack, size_t *rela_stack_top, unsigned int type)
++{
++	ptrdiff_t offset = (void *)v - (void *)location;
++
++	*(u32 *)location = offset;
++	return 0;
++}
++
++static int apply_r_larch_64_pcrel(struct module *mod, u32 *location, Elf_Addr v,
++				  s64 *rela_stack, size_t *rela_stack_top, unsigned int type)
++{
++	ptrdiff_t offset = (void *)v - (void *)location;
++
++	*(u64 *)location = offset;
++	return 0;
++}
++
+ /*
+  * reloc_handlers_rela() - Apply a particular relocation to a module
+  * @mod: the module to apply the reloc to
+@@ -382,7 +400,7 @@ typedef int (*reloc_rela_handler)(struct module *mod, u32 *location, Elf_Addr v,
+ 
+ /* The handlers for known reloc types */
+ static reloc_rela_handler reloc_rela_handlers[] = {
+-	[R_LARCH_NONE ... R_LARCH_RELAX]		     = apply_r_larch_error,
++	[R_LARCH_NONE ... R_LARCH_64_PCREL]		     = apply_r_larch_error,
+ 
+ 	[R_LARCH_NONE]					     = apply_r_larch_none,
+ 	[R_LARCH_32]					     = apply_r_larch_32,
+@@ -396,6 +414,8 @@ static reloc_rela_handler reloc_rela_handlers[] = {
+ 	[R_LARCH_SOP_POP_32_S_10_5 ... R_LARCH_SOP_POP_32_U] = apply_r_larch_sop_imm_field,
+ 	[R_LARCH_ADD32 ... R_LARCH_SUB64]		     = apply_r_larch_add_sub,
+ 	[R_LARCH_PCALA_HI20...R_LARCH_PCALA64_HI12]	     = apply_r_larch_pcala,
++	[R_LARCH_32_PCREL]				     = apply_r_larch_32_pcrel,
++	[R_LARCH_64_PCREL]				     = apply_r_larch_64_pcrel,
+ };
+ 
+ int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
+diff --git a/arch/loongarch/kernel/numa.c b/arch/loongarch/kernel/numa.c
+index 708665895b47d..c75faaa205b8a 100644
+--- a/arch/loongarch/kernel/numa.c
++++ b/arch/loongarch/kernel/numa.c
+@@ -468,7 +468,7 @@ void __init paging_init(void)
+ 
+ void __init mem_init(void)
+ {
+-	high_memory = (void *) __va(get_num_physpages() << PAGE_SHIFT);
++	high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
+ 	memblock_free_all();
+ 	setup_zero_pages();	/* This comes from node 0 */
+ }
+diff --git a/arch/loongarch/kernel/vmlinux.lds.S b/arch/loongarch/kernel/vmlinux.lds.S
+index b1686afcf8766..bb2ec86f37a8e 100644
+--- a/arch/loongarch/kernel/vmlinux.lds.S
++++ b/arch/loongarch/kernel/vmlinux.lds.S
+@@ -53,33 +53,6 @@ SECTIONS
+ 	. = ALIGN(PECOFF_SEGMENT_ALIGN);
+ 	_etext = .;
+ 
+-	/*
+-	 * struct alt_inst entries. From the header (alternative.h):
+-	 * "Alternative instructions for different CPU types or capabilities"
+-	 * Think locking instructions on spinlocks.
+-	 */
+-	. = ALIGN(4);
+-	.altinstructions : AT(ADDR(.altinstructions) - LOAD_OFFSET) {
+-		__alt_instructions = .;
+-		*(.altinstructions)
+-		__alt_instructions_end = .;
+-	}
+-
+-#ifdef CONFIG_RELOCATABLE
+-	. = ALIGN(8);
+-	.la_abs : AT(ADDR(.la_abs) - LOAD_OFFSET) {
+-		__la_abs_begin = .;
+-		*(.la_abs)
+-		__la_abs_end = .;
+-	}
+-#endif
+-
+-	.got : ALIGN(16) { *(.got) }
+-	.plt : ALIGN(16) { *(.plt) }
+-	.got.plt : ALIGN(16) { *(.got.plt) }
+-
+-	.data.rel : { *(.data.rel*) }
+-
+ 	. = ALIGN(PECOFF_SEGMENT_ALIGN);
+ 	__init_begin = .;
+ 	__inittext_begin = .;
+@@ -94,6 +67,18 @@ SECTIONS
+ 
+ 	__initdata_begin = .;
+ 
++	/*
++	 * struct alt_inst entries. From the header (alternative.h):
++	 * "Alternative instructions for different CPU types or capabilities"
++	 * Think locking instructions on spinlocks.
++	 */
++	. = ALIGN(4);
++	.altinstructions : AT(ADDR(.altinstructions) - LOAD_OFFSET) {
++		__alt_instructions = .;
++		*(.altinstructions)
++		__alt_instructions_end = .;
++	}
++
+ 	INIT_DATA_SECTION(16)
+ 	.exit.data : {
+ 		EXIT_DATA
+@@ -113,6 +98,11 @@ SECTIONS
+ 
+ 	_sdata = .;
+ 	RO_DATA(4096)
++
++	.got : ALIGN(16) { *(.got) }
++	.plt : ALIGN(16) { *(.plt) }
++	.got.plt : ALIGN(16) { *(.got.plt) }
++
+ 	RW_DATA(1 << CONFIG_L1_CACHE_SHIFT, PAGE_SIZE, THREAD_SIZE)
+ 
+ 	.rela.dyn : ALIGN(8) {
+@@ -121,6 +111,17 @@ SECTIONS
+ 		__rela_dyn_end = .;
+ 	}
+ 
++	.data.rel : { *(.data.rel*) }
++
++#ifdef CONFIG_RELOCATABLE
++	. = ALIGN(8);
++	.la_abs : AT(ADDR(.la_abs) - LOAD_OFFSET) {
++		__la_abs_begin = .;
++		*(.la_abs)
++		__la_abs_end = .;
++	}
++#endif
++
+ 	.sdata : {
+ 		*(.sdata)
+ 	}
+diff --git a/arch/mips/alchemy/devboards/db1000.c b/arch/mips/alchemy/devboards/db1000.c
+index 012da042d0a4f..7b9f91db227f2 100644
+--- a/arch/mips/alchemy/devboards/db1000.c
++++ b/arch/mips/alchemy/devboards/db1000.c
+@@ -164,6 +164,7 @@ static struct platform_device db1x00_audio_dev = {
+ 
+ /******************************************************************************/
+ 
++#ifdef CONFIG_MMC_AU1X
+ static irqreturn_t db1100_mmc_cd(int irq, void *ptr)
+ {
+ 	mmc_detect_change(ptr, msecs_to_jiffies(500));
+@@ -369,6 +370,7 @@ static struct platform_device db1100_mmc1_dev = {
+ 	.num_resources	= ARRAY_SIZE(au1100_mmc1_res),
+ 	.resource	= au1100_mmc1_res,
+ };
++#endif /* CONFIG_MMC_AU1X */
+ 
+ /******************************************************************************/
+ 
+@@ -440,8 +442,10 @@ static struct platform_device *db1x00_devs[] = {
+ 
+ static struct platform_device *db1100_devs[] = {
+ 	&au1100_lcd_device,
++#ifdef CONFIG_MMC_AU1X
+ 	&db1100_mmc0_dev,
+ 	&db1100_mmc1_dev,
++#endif
+ };
+ 
+ int __init db1000_dev_setup(void)
+diff --git a/arch/mips/alchemy/devboards/db1200.c b/arch/mips/alchemy/devboards/db1200.c
+index 76080c71a2a7b..f521874ebb07b 100644
+--- a/arch/mips/alchemy/devboards/db1200.c
++++ b/arch/mips/alchemy/devboards/db1200.c
+@@ -326,6 +326,7 @@ static struct platform_device db1200_ide_dev = {
+ 
+ /**********************************************************************/
+ 
++#ifdef CONFIG_MMC_AU1X
+ /* SD carddetects:  they're supposed to be edge-triggered, but ack
+  * doesn't seem to work (CPLD Rev 2).  Instead, the screaming one
+  * is disabled and its counterpart enabled.  The 200ms timeout is
+@@ -584,6 +585,7 @@ static struct platform_device pb1200_mmc1_dev = {
+ 	.num_resources	= ARRAY_SIZE(au1200_mmc1_res),
+ 	.resource	= au1200_mmc1_res,
+ };
++#endif /* CONFIG_MMC_AU1X */
+ 
+ /**********************************************************************/
+ 
+@@ -751,7 +753,9 @@ static struct platform_device db1200_audiodma_dev = {
+ static struct platform_device *db1200_devs[] __initdata = {
+ 	NULL,		/* PSC0, selected by S6.8 */
+ 	&db1200_ide_dev,
++#ifdef CONFIG_MMC_AU1X
+ 	&db1200_mmc0_dev,
++#endif
+ 	&au1200_lcd_dev,
+ 	&db1200_eth_dev,
+ 	&db1200_nand_dev,
+@@ -762,7 +766,9 @@ static struct platform_device *db1200_devs[] __initdata = {
+ };
+ 
+ static struct platform_device *pb1200_devs[] __initdata = {
++#ifdef CONFIG_MMC_AU1X
+ 	&pb1200_mmc1_dev,
++#endif
+ };
+ 
+ /* Some peripheral base addresses differ on the PB1200 */
+diff --git a/arch/mips/alchemy/devboards/db1300.c b/arch/mips/alchemy/devboards/db1300.c
+index ff61901329c62..d377e043b49f8 100644
+--- a/arch/mips/alchemy/devboards/db1300.c
++++ b/arch/mips/alchemy/devboards/db1300.c
+@@ -450,6 +450,7 @@ static struct platform_device db1300_ide_dev = {
+ 
+ /**********************************************************************/
+ 
++#ifdef CONFIG_MMC_AU1X
+ static irqreturn_t db1300_mmc_cd(int irq, void *ptr)
+ {
+ 	disable_irq_nosync(irq);
+@@ -632,6 +633,7 @@ static struct platform_device db1300_sd0_dev = {
+ 	.resource	= au1300_sd0_res,
+ 	.num_resources	= ARRAY_SIZE(au1300_sd0_res),
+ };
++#endif /* CONFIG_MMC_AU1X */
+ 
+ /**********************************************************************/
+ 
+@@ -767,8 +769,10 @@ static struct platform_device *db1300_dev[] __initdata = {
+ 	&db1300_5waysw_dev,
+ 	&db1300_nand_dev,
+ 	&db1300_ide_dev,
++#ifdef CONFIG_MMC_AU1X
+ 	&db1300_sd0_dev,
+ 	&db1300_sd1_dev,
++#endif
+ 	&db1300_lcd_dev,
+ 	&db1300_ac97_dev,
+ 	&db1300_i2s_dev,
+diff --git a/arch/parisc/include/asm/ropes.h b/arch/parisc/include/asm/ropes.h
+index 8e51c775c80a6..c46ad399a74f2 100644
+--- a/arch/parisc/include/asm/ropes.h
++++ b/arch/parisc/include/asm/ropes.h
+@@ -29,7 +29,7 @@
+ struct ioc {
+ 	void __iomem	*ioc_hpa;	/* I/O MMU base address */
+ 	char		*res_map;	/* resource map, bit == pdir entry */
+-	u64		*pdir_base;	/* physical base address */
++	__le64		*pdir_base;	/* physical base address */
+ 	unsigned long	ibase;		/* pdir IOV Space base - shared w/lba_pci */
+ 	unsigned long	imask;		/* pdir IOV Space mask - shared w/lba_pci */
+ #ifdef ZX1_SUPPORT
+@@ -86,6 +86,9 @@ struct sba_device {
+ 	struct ioc		ioc[MAX_IOC];
+ };
+ 
++/* list of SBA's in system, see drivers/parisc/sba_iommu.c */
++extern struct sba_device *sba_list;
++
+ #define ASTRO_RUNWAY_PORT	0x582
+ #define IKE_MERCED_PORT		0x803
+ #define REO_MERCED_PORT		0x804
+@@ -110,7 +113,7 @@ static inline int IS_PLUTO(struct parisc_device *d) {
+ 
+ #define SBA_PDIR_VALID_BIT	0x8000000000000000ULL
+ 
+-#define SBA_AGPGART_COOKIE	0x0000badbadc0ffeeULL
++#define SBA_AGPGART_COOKIE	(__force __le64) 0x0000badbadc0ffeeULL
+ 
+ #define SBA_FUNC_ID	0x0000	/* function id */
+ #define SBA_FCLASS	0x0008	/* function class, bist, header, rev... */
+diff --git a/arch/parisc/kernel/drivers.c b/arch/parisc/kernel/drivers.c
+index 8f4b77648491a..ed8b759480614 100644
+--- a/arch/parisc/kernel/drivers.c
++++ b/arch/parisc/kernel/drivers.c
+@@ -925,9 +925,9 @@ static __init void qemu_header(void)
+ 	pr_info("#define PARISC_MODEL \"%s\"\n\n",
+ 			boot_cpu_data.pdc.sys_model_name);
+ 
++	#define p ((unsigned long *)&boot_cpu_data.pdc.model)
+ 	pr_info("#define PARISC_PDC_MODEL 0x%lx, 0x%lx, 0x%lx, "
+ 		"0x%lx, 0x%lx, 0x%lx, 0x%lx, 0x%lx, 0x%lx\n\n",
+-	#define p ((unsigned long *)&boot_cpu_data.pdc.model)
+ 		p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7], p[8]);
+ 	#undef p
+ 
+diff --git a/arch/parisc/kernel/irq.c b/arch/parisc/kernel/irq.c
+index 12c4d4104ade4..2f81bfd4f15e1 100644
+--- a/arch/parisc/kernel/irq.c
++++ b/arch/parisc/kernel/irq.c
+@@ -365,7 +365,7 @@ union irq_stack_union {
+ 	volatile unsigned int lock[1];
+ };
+ 
+-DEFINE_PER_CPU(union irq_stack_union, irq_stack_union) = {
++static DEFINE_PER_CPU(union irq_stack_union, irq_stack_union) = {
+ 		.slock = { 1,1,1,1 },
+ 	};
+ #endif
+diff --git a/arch/powerpc/kernel/hw_breakpoint.c b/arch/powerpc/kernel/hw_breakpoint.c
+index e1b4e70c8fd0f..f432db3faa5b0 100644
+--- a/arch/powerpc/kernel/hw_breakpoint.c
++++ b/arch/powerpc/kernel/hw_breakpoint.c
+@@ -505,11 +505,13 @@ void thread_change_pc(struct task_struct *tsk, struct pt_regs *regs)
+ 	struct arch_hw_breakpoint *info;
+ 	int i;
+ 
++	preempt_disable();
++
+ 	for (i = 0; i < nr_wp_slots(); i++) {
+ 		if (unlikely(tsk->thread.last_hit_ubp[i]))
+ 			goto reset;
+ 	}
+-	return;
++	goto out;
+ 
+ reset:
+ 	regs_set_return_msr(regs, regs->msr & ~MSR_SE);
+@@ -518,6 +520,9 @@ reset:
+ 		__set_breakpoint(i, info);
+ 		tsk->thread.last_hit_ubp[i] = NULL;
+ 	}
++
++out:
++	preempt_enable();
+ }
+ 
+ static bool is_larx_stcx_instr(int type)
+@@ -632,6 +637,11 @@ static void handle_p10dd1_spurious_exception(struct arch_hw_breakpoint **info,
+ 	}
+ }
+ 
++/*
++ * Handle a DABR or DAWR exception.
++ *
++ * Called in atomic context.
++ */
+ int hw_breakpoint_handler(struct die_args *args)
+ {
+ 	bool err = false;
+@@ -758,6 +768,8 @@ NOKPROBE_SYMBOL(hw_breakpoint_handler);
+ 
+ /*
+  * Handle single-step exceptions following a DABR hit.
++ *
++ * Called in atomic context.
+  */
+ static int single_step_dabr_instruction(struct die_args *args)
+ {
+@@ -815,6 +827,8 @@ NOKPROBE_SYMBOL(single_step_dabr_instruction);
+ 
+ /*
+  * Handle debug exception notifications.
++ *
++ * Called in atomic context.
+  */
+ int hw_breakpoint_exceptions_notify(
+ 		struct notifier_block *unused, unsigned long val, void *data)
+diff --git a/arch/powerpc/kernel/hw_breakpoint_constraints.c b/arch/powerpc/kernel/hw_breakpoint_constraints.c
+index a74623025f3ab..9e51801c49152 100644
+--- a/arch/powerpc/kernel/hw_breakpoint_constraints.c
++++ b/arch/powerpc/kernel/hw_breakpoint_constraints.c
+@@ -131,8 +131,13 @@ void wp_get_instr_detail(struct pt_regs *regs, ppc_inst_t *instr,
+ 			 int *type, int *size, unsigned long *ea)
+ {
+ 	struct instruction_op op;
++	int err;
+ 
+-	if (__get_user_instr(*instr, (void __user *)regs->nip))
++	pagefault_disable();
++	err = __get_user_instr(*instr, (void __user *)regs->nip);
++	pagefault_enable();
++
++	if (err)
+ 		return;
+ 
+ 	analyse_instr(&op, regs, *instr);
+diff --git a/arch/powerpc/kernel/stacktrace.c b/arch/powerpc/kernel/stacktrace.c
+index b15f15dcacb5c..e6a958a5da276 100644
+--- a/arch/powerpc/kernel/stacktrace.c
++++ b/arch/powerpc/kernel/stacktrace.c
+@@ -73,29 +73,12 @@ int __no_sanitize_address arch_stack_walk_reliable(stack_trace_consume_fn consum
+ 	bool firstframe;
+ 
+ 	stack_end = stack_page + THREAD_SIZE;
+-	if (!is_idle_task(task)) {
+-		/*
+-		 * For user tasks, this is the SP value loaded on
+-		 * kernel entry, see "PACAKSAVE(r13)" in _switch() and
+-		 * system_call_common().
+-		 *
+-		 * Likewise for non-swapper kernel threads,
+-		 * this also happens to be the top of the stack
+-		 * as setup by copy_thread().
+-		 *
+-		 * Note that stack backlinks are not properly setup by
+-		 * copy_thread() and thus, a forked task() will have
+-		 * an unreliable stack trace until it's been
+-		 * _switch()'ed to for the first time.
+-		 */
+-		stack_end -= STACK_USER_INT_FRAME_SIZE;
+-	} else {
+-		/*
+-		 * idle tasks have a custom stack layout,
+-		 * c.f. cpu_idle_thread_init().
+-		 */
++
++	// See copy_thread() for details.
++	if (task->flags & PF_KTHREAD)
+ 		stack_end -= STACK_FRAME_MIN_SIZE;
+-	}
++	else
++		stack_end -= STACK_USER_INT_FRAME_SIZE;
+ 
+ 	if (task == current)
+ 		sp = current_stack_frame();
+diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
+index 7ef147e2a20d7..109b93874df92 100644
+--- a/arch/powerpc/kernel/traps.c
++++ b/arch/powerpc/kernel/traps.c
+@@ -1512,23 +1512,11 @@ static void do_program_check(struct pt_regs *regs)
+ 			return;
+ 		}
+ 
+-		if (cpu_has_feature(CPU_FTR_DEXCR_NPHIE) && user_mode(regs)) {
+-			ppc_inst_t insn;
+-
+-			if (get_user_instr(insn, (void __user *)regs->nip)) {
+-				_exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip);
+-				return;
+-			}
+-
+-			if (ppc_inst_primary_opcode(insn) == 31 &&
+-			    get_xop(ppc_inst_val(insn)) == OP_31_XOP_HASHCHK) {
+-				_exception(SIGILL, regs, ILL_ILLOPN, regs->nip);
+-				return;
+-			}
++		/* User mode considers other cases after enabling IRQs */
++		if (!user_mode(regs)) {
++			_exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip);
++			return;
+ 		}
+-
+-		_exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip);
+-		return;
+ 	}
+ #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ 	if (reason & REASON_TM) {
+@@ -1561,16 +1549,44 @@ static void do_program_check(struct pt_regs *regs)
+ 
+ 	/*
+ 	 * If we took the program check in the kernel skip down to sending a
+-	 * SIGILL. The subsequent cases all relate to emulating instructions
+-	 * which we should only do for userspace. We also do not want to enable
+-	 * interrupts for kernel faults because that might lead to further
+-	 * faults, and loose the context of the original exception.
++	 * SIGILL. The subsequent cases all relate to user space, such as
++	 * emulating instructions which we should only do for user space. We
++	 * also do not want to enable interrupts for kernel faults because that
++	 * might lead to further faults, and loose the context of the original
++	 * exception.
+ 	 */
+ 	if (!user_mode(regs))
+ 		goto sigill;
+ 
+ 	interrupt_cond_local_irq_enable(regs);
+ 
++	/*
++	 * (reason & REASON_TRAP) is mostly handled before enabling IRQs,
++	 * except get_user_instr() can sleep so we cannot reliably inspect the
++	 * current instruction in that context. Now that we know we are
++	 * handling a user space trap and can sleep, we can check if the trap
++	 * was a hashchk failure.
++	 */
++	if (reason & REASON_TRAP) {
++		if (cpu_has_feature(CPU_FTR_DEXCR_NPHIE)) {
++			ppc_inst_t insn;
++
++			if (get_user_instr(insn, (void __user *)regs->nip)) {
++				_exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip);
++				return;
++			}
++
++			if (ppc_inst_primary_opcode(insn) == 31 &&
++			    get_xop(ppc_inst_val(insn)) == OP_31_XOP_HASHCHK) {
++				_exception(SIGILL, regs, ILL_ILLOPN, regs->nip);
++				return;
++			}
++		}
++
++		_exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip);
++		return;
++	}
++
+ 	/* (reason & REASON_ILLEGAL) would be the obvious thing here,
+ 	 * but there seems to be a hardware bug on the 405GP (RevD)
+ 	 * that means ESR is sometimes set incorrectly - either to
+diff --git a/arch/powerpc/perf/hv-24x7.c b/arch/powerpc/perf/hv-24x7.c
+index 317175791d23c..3449be7c0d51f 100644
+--- a/arch/powerpc/perf/hv-24x7.c
++++ b/arch/powerpc/perf/hv-24x7.c
+@@ -1418,7 +1418,7 @@ static int h_24x7_event_init(struct perf_event *event)
+ 	}
+ 
+ 	domain = event_get_domain(event);
+-	if (domain >= HV_PERF_DOMAIN_MAX) {
++	if (domain  == 0 || domain >= HV_PERF_DOMAIN_MAX) {
+ 		pr_devel("invalid domain %d\n", domain);
+ 		return -EINVAL;
+ 	}
+diff --git a/arch/riscv/include/asm/errata_list.h b/arch/riscv/include/asm/errata_list.h
+index fb1a810f3d8ce..feab334dd8329 100644
+--- a/arch/riscv/include/asm/errata_list.h
++++ b/arch/riscv/include/asm/errata_list.h
+@@ -100,7 +100,7 @@ asm volatile(ALTERNATIVE(						\
+  * | 31 - 25 | 24 - 20 | 19 - 15 | 14 - 12 | 11 - 7 | 6 - 0 |
+  *   0000001    01001      rs1       000      00000  0001011
+  * dcache.cva rs1 (clean, virtual address)
+- *   0000001    00100      rs1       000      00000  0001011
++ *   0000001    00101      rs1       000      00000  0001011
+  *
+  * dcache.cipa rs1 (clean then invalidate, physical address)
+  * | 31 - 25 | 24 - 20 | 19 - 15 | 14 - 12 | 11 - 7 | 6 - 0 |
+@@ -113,7 +113,7 @@ asm volatile(ALTERNATIVE(						\
+  *   0000000    11001     00000      000      00000  0001011
+  */
+ #define THEAD_inval_A0	".long 0x0265000b"
+-#define THEAD_clean_A0	".long 0x0245000b"
++#define THEAD_clean_A0	".long 0x0255000b"
+ #define THEAD_flush_A0	".long 0x0275000b"
+ #define THEAD_SYNC_S	".long 0x0190000b"
+ 
+diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
+index e36261b4ea14f..68ce4f786dcd1 100644
+--- a/arch/x86/Kconfig
++++ b/arch/x86/Kconfig
+@@ -1955,6 +1955,7 @@ config EFI
+ 	select UCS2_STRING
+ 	select EFI_RUNTIME_WRAPPERS
+ 	select ARCH_USE_MEMREMAP_PROT
++	select EFI_RUNTIME_MAP if KEXEC_CORE
+ 	help
+ 	  This enables the kernel to use EFI runtime services that are
+ 	  available (such as the EFI variable services).
+@@ -2030,7 +2031,6 @@ config EFI_MAX_FAKE_MEM
+ config EFI_RUNTIME_MAP
+ 	bool "Export EFI runtime maps to sysfs" if EXPERT
+ 	depends on EFI
+-	default KEXEC_CORE
+ 	help
+ 	  Export EFI runtime memory regions to /sys/firmware/efi/runtime-map.
+ 	  That memory map is required by the 2nd kernel to set up EFI virtual
+diff --git a/arch/x86/include/asm/kexec.h b/arch/x86/include/asm/kexec.h
+index 5b77bbc28f969..819046974b997 100644
+--- a/arch/x86/include/asm/kexec.h
++++ b/arch/x86/include/asm/kexec.h
+@@ -205,8 +205,6 @@ int arch_kimage_file_post_load_cleanup(struct kimage *image);
+ #endif
+ #endif
+ 
+-typedef void crash_vmclear_fn(void);
+-extern crash_vmclear_fn __rcu *crash_vmclear_loaded_vmcss;
+ extern void kdump_nmi_shootdown_cpus(void);
+ 
+ #endif /* __ASSEMBLY__ */
+diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
+index 3bc146dfd38da..f72b30d2238a6 100644
+--- a/arch/x86/include/asm/kvm_host.h
++++ b/arch/x86/include/asm/kvm_host.h
+@@ -1400,7 +1400,6 @@ struct kvm_arch {
+ 	 * the thread holds the MMU lock in write mode.
+ 	 */
+ 	spinlock_t tdp_mmu_pages_lock;
+-	struct workqueue_struct *tdp_mmu_zap_wq;
+ #endif /* CONFIG_X86_64 */
+ 
+ 	/*
+@@ -1814,7 +1813,7 @@ void kvm_mmu_vendor_module_exit(void);
+ 
+ void kvm_mmu_destroy(struct kvm_vcpu *vcpu);
+ int kvm_mmu_create(struct kvm_vcpu *vcpu);
+-int kvm_mmu_init_vm(struct kvm *kvm);
++void kvm_mmu_init_vm(struct kvm *kvm);
+ void kvm_mmu_uninit_vm(struct kvm *kvm);
+ 
+ void kvm_mmu_after_set_cpuid(struct kvm_vcpu *vcpu);
+diff --git a/arch/x86/include/asm/linkage.h b/arch/x86/include/asm/linkage.h
+index 5ff49fd67732e..571fe4d2d2328 100644
+--- a/arch/x86/include/asm/linkage.h
++++ b/arch/x86/include/asm/linkage.h
+@@ -105,6 +105,13 @@
+ 	CFI_POST_PADDING					\
+ 	SYM_FUNC_END(__cfi_##name)
+ 
++/* UML needs to be able to override memcpy() and friends for KASAN. */
++#ifdef CONFIG_UML
++# define SYM_FUNC_ALIAS_MEMFUNC	SYM_FUNC_ALIAS_WEAK
++#else
++# define SYM_FUNC_ALIAS_MEMFUNC	SYM_FUNC_ALIAS
++#endif
++
+ /* SYM_TYPED_FUNC_START -- use for indirectly called globals, w/ CFI type */
+ #define SYM_TYPED_FUNC_START(name)				\
+ 	SYM_TYPED_START(name, SYM_L_GLOBAL, SYM_F_ALIGN)	\
+diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
+index fd750247ca891..9e26294e415c8 100644
+--- a/arch/x86/include/asm/processor.h
++++ b/arch/x86/include/asm/processor.h
+@@ -676,12 +676,10 @@ extern u16 get_llc_id(unsigned int cpu);
+ #ifdef CONFIG_CPU_SUP_AMD
+ extern u32 amd_get_nodes_per_socket(void);
+ extern u32 amd_get_highest_perf(void);
+-extern bool cpu_has_ibpb_brtype_microcode(void);
+ extern void amd_clear_divider(void);
+ #else
+ static inline u32 amd_get_nodes_per_socket(void)	{ return 0; }
+ static inline u32 amd_get_highest_perf(void)		{ return 0; }
+-static inline bool cpu_has_ibpb_brtype_microcode(void)	{ return false; }
+ static inline void amd_clear_divider(void)		{ }
+ #endif
+ 
+diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h
+index 9177b4354c3f5..dc201724a6433 100644
+--- a/arch/x86/include/asm/reboot.h
++++ b/arch/x86/include/asm/reboot.h
+@@ -25,6 +25,8 @@ void __noreturn machine_real_restart(unsigned int type);
+ #define MRR_BIOS	0
+ #define MRR_APM		1
+ 
++typedef void crash_vmclear_fn(void);
++extern crash_vmclear_fn __rcu *crash_vmclear_loaded_vmcss;
+ void cpu_emergency_disable_virtualization(void);
+ 
+ typedef void (*nmi_shootdown_cb)(int, struct pt_regs*);
+diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
+index 7eca6a8abbb1c..28e77c5d6484a 100644
+--- a/arch/x86/kernel/cpu/amd.c
++++ b/arch/x86/kernel/cpu/amd.c
+@@ -766,6 +766,15 @@ static void early_init_amd(struct cpuinfo_x86 *c)
+ 
+ 	if (cpu_has(c, X86_FEATURE_TOPOEXT))
+ 		smp_num_siblings = ((cpuid_ebx(0x8000001e) >> 8) & 0xff) + 1;
++
++	if (!cpu_has(c, X86_FEATURE_HYPERVISOR) && !cpu_has(c, X86_FEATURE_IBPB_BRTYPE)) {
++		if (c->x86 == 0x17 && boot_cpu_has(X86_FEATURE_AMD_IBPB))
++			setup_force_cpu_cap(X86_FEATURE_IBPB_BRTYPE);
++		else if (c->x86 >= 0x19 && !wrmsrl_safe(MSR_IA32_PRED_CMD, PRED_CMD_SBPB)) {
++			setup_force_cpu_cap(X86_FEATURE_IBPB_BRTYPE);
++			setup_force_cpu_cap(X86_FEATURE_SBPB);
++		}
++	}
+ }
+ 
+ static void init_amd_k8(struct cpuinfo_x86 *c)
+@@ -1301,25 +1310,6 @@ void amd_check_microcode(void)
+ 	on_each_cpu(zenbleed_check_cpu, NULL, 1);
+ }
+ 
+-bool cpu_has_ibpb_brtype_microcode(void)
+-{
+-	switch (boot_cpu_data.x86) {
+-	/* Zen1/2 IBPB flushes branch type predictions too. */
+-	case 0x17:
+-		return boot_cpu_has(X86_FEATURE_AMD_IBPB);
+-	case 0x19:
+-		/* Poke the MSR bit on Zen3/4 to check its presence. */
+-		if (!wrmsrl_safe(MSR_IA32_PRED_CMD, PRED_CMD_SBPB)) {
+-			setup_force_cpu_cap(X86_FEATURE_SBPB);
+-			return true;
+-		} else {
+-			return false;
+-		}
+-	default:
+-		return false;
+-	}
+-}
+-
+ /*
+  * Issue a DIV 0/1 insn to clear any division data from previous DIV
+  * operations.
+diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
+index f081d26616ac1..10499bcd4e396 100644
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -2404,26 +2404,15 @@ early_param("spec_rstack_overflow", srso_parse_cmdline);
+ 
+ static void __init srso_select_mitigation(void)
+ {
+-	bool has_microcode;
++	bool has_microcode = boot_cpu_has(X86_FEATURE_IBPB_BRTYPE);
+ 
+ 	if (!boot_cpu_has_bug(X86_BUG_SRSO) || cpu_mitigations_off())
+ 		goto pred_cmd;
+ 
+-	/*
+-	 * The first check is for the kernel running as a guest in order
+-	 * for guests to verify whether IBPB is a viable mitigation.
+-	 */
+-	has_microcode = boot_cpu_has(X86_FEATURE_IBPB_BRTYPE) || cpu_has_ibpb_brtype_microcode();
+ 	if (!has_microcode) {
+ 		pr_warn("IBPB-extending microcode not applied!\n");
+ 		pr_warn(SRSO_NOTICE);
+ 	} else {
+-		/*
+-		 * Enable the synthetic (even if in a real CPUID leaf)
+-		 * flags for guests.
+-		 */
+-		setup_force_cpu_cap(X86_FEATURE_IBPB_BRTYPE);
+-
+ 		/*
+ 		 * Zen1/2 with SMT off aren't vulnerable after the right
+ 		 * IBPB microcode has been applied.
+@@ -2444,7 +2433,7 @@ static void __init srso_select_mitigation(void)
+ 
+ 	switch (srso_cmd) {
+ 	case SRSO_CMD_OFF:
+-		return;
++		goto pred_cmd;
+ 
+ 	case SRSO_CMD_MICROCODE:
+ 		if (has_microcode) {
+@@ -2717,7 +2706,7 @@ static ssize_t srso_show_state(char *buf)
+ 
+ 	return sysfs_emit(buf, "%s%s\n",
+ 			  srso_strings[srso_mitigation],
+-			  (cpu_has_ibpb_brtype_microcode() ? "" : ", no microcode"));
++			  boot_cpu_has(X86_FEATURE_IBPB_BRTYPE) ? "" : ", no microcode");
+ }
+ 
+ static ssize_t gds_show_state(char *buf)
+diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
+index 00f043a094fcd..6acfe9037a8b6 100644
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -1288,7 +1288,7 @@ static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = {
+ 	VULNBL_AMD(0x15, RETBLEED),
+ 	VULNBL_AMD(0x16, RETBLEED),
+ 	VULNBL_AMD(0x17, RETBLEED | SMT_RSB | SRSO),
+-	VULNBL_HYGON(0x18, RETBLEED | SMT_RSB),
++	VULNBL_HYGON(0x18, RETBLEED | SMT_RSB | SRSO),
+ 	VULNBL_AMD(0x19, SRSO),
+ 	{}
+ };
+diff --git a/arch/x86/kernel/cpu/sgx/encl.c b/arch/x86/kernel/cpu/sgx/encl.c
+index 91fa70e510041..279148e724596 100644
+--- a/arch/x86/kernel/cpu/sgx/encl.c
++++ b/arch/x86/kernel/cpu/sgx/encl.c
+@@ -235,6 +235,21 @@ static struct sgx_epc_page *sgx_encl_eldu(struct sgx_encl_page *encl_page,
+ 	return epc_page;
+ }
+ 
++/*
++ * Ensure the SECS page is not swapped out.  Must be called with encl->lock
++ * to protect the enclave states including SECS and ensure the SECS page is
++ * not swapped out again while being used.
++ */
++static struct sgx_epc_page *sgx_encl_load_secs(struct sgx_encl *encl)
++{
++	struct sgx_epc_page *epc_page = encl->secs.epc_page;
++
++	if (!epc_page)
++		epc_page = sgx_encl_eldu(&encl->secs, NULL);
++
++	return epc_page;
++}
++
+ static struct sgx_encl_page *__sgx_encl_load_page(struct sgx_encl *encl,
+ 						  struct sgx_encl_page *entry)
+ {
+@@ -248,11 +263,9 @@ static struct sgx_encl_page *__sgx_encl_load_page(struct sgx_encl *encl,
+ 		return entry;
+ 	}
+ 
+-	if (!(encl->secs.epc_page)) {
+-		epc_page = sgx_encl_eldu(&encl->secs, NULL);
+-		if (IS_ERR(epc_page))
+-			return ERR_CAST(epc_page);
+-	}
++	epc_page = sgx_encl_load_secs(encl);
++	if (IS_ERR(epc_page))
++		return ERR_CAST(epc_page);
+ 
+ 	epc_page = sgx_encl_eldu(entry, encl->secs.epc_page);
+ 	if (IS_ERR(epc_page))
+@@ -339,6 +352,13 @@ static vm_fault_t sgx_encl_eaug_page(struct vm_area_struct *vma,
+ 
+ 	mutex_lock(&encl->lock);
+ 
++	epc_page = sgx_encl_load_secs(encl);
++	if (IS_ERR(epc_page)) {
++		if (PTR_ERR(epc_page) == -EBUSY)
++			vmret = VM_FAULT_NOPAGE;
++		goto err_out_unlock;
++	}
++
+ 	epc_page = sgx_alloc_epc_page(encl_page, false);
+ 	if (IS_ERR(epc_page)) {
+ 		if (PTR_ERR(epc_page) == -EBUSY)
+diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
+index cdd92ab43cda4..54cd959cb3160 100644
+--- a/arch/x86/kernel/crash.c
++++ b/arch/x86/kernel/crash.c
+@@ -48,38 +48,12 @@ struct crash_memmap_data {
+ 	unsigned int type;
+ };
+ 
+-/*
+- * This is used to VMCLEAR all VMCSs loaded on the
+- * processor. And when loading kvm_intel module, the
+- * callback function pointer will be assigned.
+- *
+- * protected by rcu.
+- */
+-crash_vmclear_fn __rcu *crash_vmclear_loaded_vmcss = NULL;
+-EXPORT_SYMBOL_GPL(crash_vmclear_loaded_vmcss);
+-
+-static inline void cpu_crash_vmclear_loaded_vmcss(void)
+-{
+-	crash_vmclear_fn *do_vmclear_operation = NULL;
+-
+-	rcu_read_lock();
+-	do_vmclear_operation = rcu_dereference(crash_vmclear_loaded_vmcss);
+-	if (do_vmclear_operation)
+-		do_vmclear_operation();
+-	rcu_read_unlock();
+-}
+-
+ #if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC)
+ 
+ static void kdump_nmi_callback(int cpu, struct pt_regs *regs)
+ {
+ 	crash_save_cpu(regs, cpu);
+ 
+-	/*
+-	 * VMCLEAR VMCSs loaded on all cpus if needed.
+-	 */
+-	cpu_crash_vmclear_loaded_vmcss();
+-
+ 	/*
+ 	 * Disable Intel PT to stop its logging
+ 	 */
+@@ -133,11 +107,6 @@ void native_machine_crash_shutdown(struct pt_regs *regs)
+ 
+ 	crash_smp_send_stop();
+ 
+-	/*
+-	 * VMCLEAR VMCSs loaded on this cpu if needed.
+-	 */
+-	cpu_crash_vmclear_loaded_vmcss();
+-
+ 	cpu_emergency_disable_virtualization();
+ 
+ 	/*
+diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
+index 3adbe97015c13..3fa4c6717a1db 100644
+--- a/arch/x86/kernel/reboot.c
++++ b/arch/x86/kernel/reboot.c
+@@ -787,6 +787,26 @@ void machine_crash_shutdown(struct pt_regs *regs)
+ }
+ #endif
+ 
++/*
++ * This is used to VMCLEAR all VMCSs loaded on the
++ * processor. And when loading kvm_intel module, the
++ * callback function pointer will be assigned.
++ *
++ * protected by rcu.
++ */
++crash_vmclear_fn __rcu *crash_vmclear_loaded_vmcss;
++EXPORT_SYMBOL_GPL(crash_vmclear_loaded_vmcss);
++
++static inline void cpu_crash_vmclear_loaded_vmcss(void)
++{
++	crash_vmclear_fn *do_vmclear_operation = NULL;
++
++	rcu_read_lock();
++	do_vmclear_operation = rcu_dereference(crash_vmclear_loaded_vmcss);
++	if (do_vmclear_operation)
++		do_vmclear_operation();
++	rcu_read_unlock();
++}
+ 
+ /* This is the CPU performing the emergency shutdown work. */
+ int crashing_cpu = -1;
+@@ -798,6 +818,8 @@ int crashing_cpu = -1;
+  */
+ void cpu_emergency_disable_virtualization(void)
+ {
++	cpu_crash_vmclear_loaded_vmcss();
++
+ 	cpu_emergency_vmxoff();
+ 	cpu_emergency_svm_disable();
+ }
+diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
+index fd975a4a52006..aa0df37c1fe72 100644
+--- a/arch/x86/kernel/setup.c
++++ b/arch/x86/kernel/setup.c
+@@ -359,15 +359,11 @@ static void __init add_early_ima_buffer(u64 phys_addr)
+ #if defined(CONFIG_HAVE_IMA_KEXEC) && !defined(CONFIG_OF_FLATTREE)
+ int __init ima_free_kexec_buffer(void)
+ {
+-	int rc;
+-
+ 	if (!ima_kexec_buffer_size)
+ 		return -ENOENT;
+ 
+-	rc = memblock_phys_free(ima_kexec_buffer_phys,
+-				ima_kexec_buffer_size);
+-	if (rc)
+-		return rc;
++	memblock_free_late(ima_kexec_buffer_phys,
++			   ima_kexec_buffer_size);
+ 
+ 	ima_kexec_buffer_phys = 0;
+ 	ima_kexec_buffer_size = 0;
+diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
+index ec169f5c7dce2..ec85e84d66ac3 100644
+--- a/arch/x86/kvm/mmu/mmu.c
++++ b/arch/x86/kvm/mmu/mmu.c
+@@ -6206,21 +6206,17 @@ static void kvm_mmu_invalidate_zap_pages_in_memslot(struct kvm *kvm,
+ 	kvm_mmu_zap_all_fast(kvm);
+ }
+ 
+-int kvm_mmu_init_vm(struct kvm *kvm)
++void kvm_mmu_init_vm(struct kvm *kvm)
+ {
+ 	struct kvm_page_track_notifier_node *node = &kvm->arch.mmu_sp_tracker;
+-	int r;
+ 
+ 	INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
+ 	INIT_LIST_HEAD(&kvm->arch.zapped_obsolete_pages);
+ 	INIT_LIST_HEAD(&kvm->arch.possible_nx_huge_pages);
+ 	spin_lock_init(&kvm->arch.mmu_unsync_pages_lock);
+ 
+-	if (tdp_mmu_enabled) {
+-		r = kvm_mmu_init_tdp_mmu(kvm);
+-		if (r < 0)
+-			return r;
+-	}
++	if (tdp_mmu_enabled)
++		kvm_mmu_init_tdp_mmu(kvm);
+ 
+ 	node->track_write = kvm_mmu_pte_write;
+ 	node->track_flush_slot = kvm_mmu_invalidate_zap_pages_in_memslot;
+@@ -6233,8 +6229,6 @@ int kvm_mmu_init_vm(struct kvm *kvm)
+ 
+ 	kvm->arch.split_desc_cache.kmem_cache = pte_list_desc_cache;
+ 	kvm->arch.split_desc_cache.gfp_zero = __GFP_ZERO;
+-
+-	return 0;
+ }
+ 
+ static void mmu_free_vm_memory_caches(struct kvm *kvm)
+@@ -6294,7 +6288,6 @@ static bool kvm_rmap_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_e
+ void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
+ {
+ 	bool flush;
+-	int i;
+ 
+ 	if (WARN_ON_ONCE(gfn_end <= gfn_start))
+ 		return;
+@@ -6305,11 +6298,8 @@ void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
+ 
+ 	flush = kvm_rmap_zap_gfn_range(kvm, gfn_start, gfn_end);
+ 
+-	if (tdp_mmu_enabled) {
+-		for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++)
+-			flush = kvm_tdp_mmu_zap_leafs(kvm, i, gfn_start,
+-						      gfn_end, true, flush);
+-	}
++	if (tdp_mmu_enabled)
++		flush = kvm_tdp_mmu_zap_leafs(kvm, gfn_start, gfn_end, flush);
+ 
+ 	if (flush)
+ 		kvm_flush_remote_tlbs_range(kvm, gfn_start, gfn_end - gfn_start);
+diff --git a/arch/x86/kvm/mmu/mmu_internal.h b/arch/x86/kvm/mmu/mmu_internal.h
+index d39af5639ce97..198e5b5f5ab06 100644
+--- a/arch/x86/kvm/mmu/mmu_internal.h
++++ b/arch/x86/kvm/mmu/mmu_internal.h
+@@ -56,7 +56,12 @@ struct kvm_mmu_page {
+ 
+ 	bool tdp_mmu_page;
+ 	bool unsync;
+-	u8 mmu_valid_gen;
++	union {
++		u8 mmu_valid_gen;
++
++		/* Only accessed under slots_lock.  */
++		bool tdp_mmu_scheduled_root_to_zap;
++	};
+ 
+ 	 /*
+ 	  * The shadow page can't be replaced by an equivalent huge page
+@@ -98,13 +103,7 @@ struct kvm_mmu_page {
+ 		struct kvm_rmap_head parent_ptes; /* rmap pointers to parent sptes */
+ 		tdp_ptep_t ptep;
+ 	};
+-	union {
+-		DECLARE_BITMAP(unsync_child_bitmap, 512);
+-		struct {
+-			struct work_struct tdp_mmu_async_work;
+-			void *tdp_mmu_async_data;
+-		};
+-	};
++	DECLARE_BITMAP(unsync_child_bitmap, 512);
+ 
+ 	/*
+ 	 * Tracks shadow pages that, if zapped, would allow KVM to create an NX
+diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
+index 512163d52194b..a423078350fda 100644
+--- a/arch/x86/kvm/mmu/tdp_mmu.c
++++ b/arch/x86/kvm/mmu/tdp_mmu.c
+@@ -12,18 +12,10 @@
+ #include <trace/events/kvm.h>
+ 
+ /* Initializes the TDP MMU for the VM, if enabled. */
+-int kvm_mmu_init_tdp_mmu(struct kvm *kvm)
++void kvm_mmu_init_tdp_mmu(struct kvm *kvm)
+ {
+-	struct workqueue_struct *wq;
+-
+-	wq = alloc_workqueue("kvm", WQ_UNBOUND|WQ_MEM_RECLAIM|WQ_CPU_INTENSIVE, 0);
+-	if (!wq)
+-		return -ENOMEM;
+-
+ 	INIT_LIST_HEAD(&kvm->arch.tdp_mmu_roots);
+ 	spin_lock_init(&kvm->arch.tdp_mmu_pages_lock);
+-	kvm->arch.tdp_mmu_zap_wq = wq;
+-	return 1;
+ }
+ 
+ /* Arbitrarily returns true so that this may be used in if statements. */
+@@ -46,20 +38,15 @@ void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm)
+ 	 * ultimately frees all roots.
+ 	 */
+ 	kvm_tdp_mmu_invalidate_all_roots(kvm);
+-
+-	/*
+-	 * Destroying a workqueue also first flushes the workqueue, i.e. no
+-	 * need to invoke kvm_tdp_mmu_zap_invalidated_roots().
+-	 */
+-	destroy_workqueue(kvm->arch.tdp_mmu_zap_wq);
++	kvm_tdp_mmu_zap_invalidated_roots(kvm);
+ 
+ 	WARN_ON(atomic64_read(&kvm->arch.tdp_mmu_pages));
+ 	WARN_ON(!list_empty(&kvm->arch.tdp_mmu_roots));
+ 
+ 	/*
+ 	 * Ensure that all the outstanding RCU callbacks to free shadow pages
+-	 * can run before the VM is torn down.  Work items on tdp_mmu_zap_wq
+-	 * can call kvm_tdp_mmu_put_root and create new callbacks.
++	 * can run before the VM is torn down.  Putting the last reference to
++	 * zapped roots will create new callbacks.
+ 	 */
+ 	rcu_barrier();
+ }
+@@ -86,46 +73,6 @@ static void tdp_mmu_free_sp_rcu_callback(struct rcu_head *head)
+ 	tdp_mmu_free_sp(sp);
+ }
+ 
+-static void tdp_mmu_zap_root(struct kvm *kvm, struct kvm_mmu_page *root,
+-			     bool shared);
+-
+-static void tdp_mmu_zap_root_work(struct work_struct *work)
+-{
+-	struct kvm_mmu_page *root = container_of(work, struct kvm_mmu_page,
+-						 tdp_mmu_async_work);
+-	struct kvm *kvm = root->tdp_mmu_async_data;
+-
+-	read_lock(&kvm->mmu_lock);
+-
+-	/*
+-	 * A TLB flush is not necessary as KVM performs a local TLB flush when
+-	 * allocating a new root (see kvm_mmu_load()), and when migrating vCPU
+-	 * to a different pCPU.  Note, the local TLB flush on reuse also
+-	 * invalidates any paging-structure-cache entries, i.e. TLB entries for
+-	 * intermediate paging structures, that may be zapped, as such entries
+-	 * are associated with the ASID on both VMX and SVM.
+-	 */
+-	tdp_mmu_zap_root(kvm, root, true);
+-
+-	/*
+-	 * Drop the refcount using kvm_tdp_mmu_put_root() to test its logic for
+-	 * avoiding an infinite loop.  By design, the root is reachable while
+-	 * it's being asynchronously zapped, thus a different task can put its
+-	 * last reference, i.e. flowing through kvm_tdp_mmu_put_root() for an
+-	 * asynchronously zapped root is unavoidable.
+-	 */
+-	kvm_tdp_mmu_put_root(kvm, root, true);
+-
+-	read_unlock(&kvm->mmu_lock);
+-}
+-
+-static void tdp_mmu_schedule_zap_root(struct kvm *kvm, struct kvm_mmu_page *root)
+-{
+-	root->tdp_mmu_async_data = kvm;
+-	INIT_WORK(&root->tdp_mmu_async_work, tdp_mmu_zap_root_work);
+-	queue_work(kvm->arch.tdp_mmu_zap_wq, &root->tdp_mmu_async_work);
+-}
+-
+ void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root,
+ 			  bool shared)
+ {
+@@ -211,8 +158,12 @@ static struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm,
+ #define for_each_valid_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _shared)	\
+ 	__for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _shared, true)
+ 
+-#define for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id)			\
+-	__for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, false, false)
++#define for_each_tdp_mmu_root_yield_safe(_kvm, _root, _shared)			\
++	for (_root = tdp_mmu_next_root(_kvm, NULL, _shared, false);		\
++	     _root;								\
++	     _root = tdp_mmu_next_root(_kvm, _root, _shared, false))		\
++		if (!kvm_lockdep_assert_mmu_lock_held(_kvm, _shared)) {		\
++		} else
+ 
+ /*
+  * Iterate over all TDP MMU roots.  Requires that mmu_lock be held for write,
+@@ -292,7 +243,7 @@ hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu)
+ 	 * by a memslot update or by the destruction of the VM.  Initialize the
+ 	 * refcount to two; one reference for the vCPU, and one reference for
+ 	 * the TDP MMU itself, which is held until the root is invalidated and
+-	 * is ultimately put by tdp_mmu_zap_root_work().
++	 * is ultimately put by kvm_tdp_mmu_zap_invalidated_roots().
+ 	 */
+ 	refcount_set(&root->tdp_mmu_root_count, 2);
+ 
+@@ -877,13 +828,12 @@ static bool tdp_mmu_zap_leafs(struct kvm *kvm, struct kvm_mmu_page *root,
+  * true if a TLB flush is needed before releasing the MMU lock, i.e. if one or
+  * more SPTEs were zapped since the MMU lock was last acquired.
+  */
+-bool kvm_tdp_mmu_zap_leafs(struct kvm *kvm, int as_id, gfn_t start, gfn_t end,
+-			   bool can_yield, bool flush)
++bool kvm_tdp_mmu_zap_leafs(struct kvm *kvm, gfn_t start, gfn_t end, bool flush)
+ {
+ 	struct kvm_mmu_page *root;
+ 
+-	for_each_tdp_mmu_root_yield_safe(kvm, root, as_id)
+-		flush = tdp_mmu_zap_leafs(kvm, root, start, end, can_yield, flush);
++	for_each_tdp_mmu_root_yield_safe(kvm, root, false)
++		flush = tdp_mmu_zap_leafs(kvm, root, start, end, true, flush);
+ 
+ 	return flush;
+ }
+@@ -891,7 +841,6 @@ bool kvm_tdp_mmu_zap_leafs(struct kvm *kvm, int as_id, gfn_t start, gfn_t end,
+ void kvm_tdp_mmu_zap_all(struct kvm *kvm)
+ {
+ 	struct kvm_mmu_page *root;
+-	int i;
+ 
+ 	/*
+ 	 * Zap all roots, including invalid roots, as all SPTEs must be dropped
+@@ -905,10 +854,8 @@ void kvm_tdp_mmu_zap_all(struct kvm *kvm)
+ 	 * is being destroyed or the userspace VMM has exited.  In both cases,
+ 	 * KVM_RUN is unreachable, i.e. no vCPUs will ever service the request.
+ 	 */
+-	for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
+-		for_each_tdp_mmu_root_yield_safe(kvm, root, i)
+-			tdp_mmu_zap_root(kvm, root, false);
+-	}
++	for_each_tdp_mmu_root_yield_safe(kvm, root, false)
++		tdp_mmu_zap_root(kvm, root, false);
+ }
+ 
+ /*
+@@ -917,18 +864,47 @@ void kvm_tdp_mmu_zap_all(struct kvm *kvm)
+  */
+ void kvm_tdp_mmu_zap_invalidated_roots(struct kvm *kvm)
+ {
+-	flush_workqueue(kvm->arch.tdp_mmu_zap_wq);
++	struct kvm_mmu_page *root;
++
++	read_lock(&kvm->mmu_lock);
++
++	for_each_tdp_mmu_root_yield_safe(kvm, root, true) {
++		if (!root->tdp_mmu_scheduled_root_to_zap)
++			continue;
++
++		root->tdp_mmu_scheduled_root_to_zap = false;
++		KVM_BUG_ON(!root->role.invalid, kvm);
++
++		/*
++		 * A TLB flush is not necessary as KVM performs a local TLB
++		 * flush when allocating a new root (see kvm_mmu_load()), and
++		 * when migrating a vCPU to a different pCPU.  Note, the local
++		 * TLB flush on reuse also invalidates paging-structure-cache
++		 * entries, i.e. TLB entries for intermediate paging structures,
++		 * that may be zapped, as such entries are associated with the
++		 * ASID on both VMX and SVM.
++		 */
++		tdp_mmu_zap_root(kvm, root, true);
++
++		/*
++		 * The referenced needs to be put *after* zapping the root, as
++		 * the root must be reachable by mmu_notifiers while it's being
++		 * zapped
++		 */
++		kvm_tdp_mmu_put_root(kvm, root, true);
++	}
++
++	read_unlock(&kvm->mmu_lock);
+ }
+ 
+ /*
+  * Mark each TDP MMU root as invalid to prevent vCPUs from reusing a root that
+  * is about to be zapped, e.g. in response to a memslots update.  The actual
+- * zapping is performed asynchronously.  Using a separate workqueue makes it
+- * easy to ensure that the destruction is performed before the "fast zap"
+- * completes, without keeping a separate list of invalidated roots; the list is
+- * effectively the list of work items in the workqueue.
++ * zapping is done separately so that it happens with mmu_lock with read,
++ * whereas invalidating roots must be done with mmu_lock held for write (unless
++ * the VM is being destroyed).
+  *
+- * Note, the asynchronous worker is gifted the TDP MMU's reference.
++ * Note, kvm_tdp_mmu_zap_invalidated_roots() is gifted the TDP MMU's reference.
+  * See kvm_tdp_mmu_get_vcpu_root_hpa().
+  */
+ void kvm_tdp_mmu_invalidate_all_roots(struct kvm *kvm)
+@@ -953,19 +929,20 @@ void kvm_tdp_mmu_invalidate_all_roots(struct kvm *kvm)
+ 	/*
+ 	 * As above, mmu_lock isn't held when destroying the VM!  There can't
+ 	 * be other references to @kvm, i.e. nothing else can invalidate roots
+-	 * or be consuming roots, but walking the list of roots does need to be
+-	 * guarded against roots being deleted by the asynchronous zap worker.
++	 * or get/put references to roots.
+ 	 */
+-	rcu_read_lock();
+-
+-	list_for_each_entry_rcu(root, &kvm->arch.tdp_mmu_roots, link) {
++	list_for_each_entry(root, &kvm->arch.tdp_mmu_roots, link) {
++		/*
++		 * Note, invalid roots can outlive a memslot update!  Invalid
++		 * roots must be *zapped* before the memslot update completes,
++		 * but a different task can acquire a reference and keep the
++		 * root alive after its been zapped.
++		 */
+ 		if (!root->role.invalid) {
++			root->tdp_mmu_scheduled_root_to_zap = true;
+ 			root->role.invalid = true;
+-			tdp_mmu_schedule_zap_root(kvm, root);
+ 		}
+ 	}
+-
+-	rcu_read_unlock();
+ }
+ 
+ /*
+@@ -1146,8 +1123,13 @@ retry:
+ bool kvm_tdp_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range,
+ 				 bool flush)
+ {
+-	return kvm_tdp_mmu_zap_leafs(kvm, range->slot->as_id, range->start,
+-				     range->end, range->may_block, flush);
++	struct kvm_mmu_page *root;
++
++	__for_each_tdp_mmu_root_yield_safe(kvm, root, range->slot->as_id, false, false)
++		flush = tdp_mmu_zap_leafs(kvm, root, range->start, range->end,
++					  range->may_block, flush);
++
++	return flush;
+ }
+ 
+ typedef bool (*tdp_handler_t)(struct kvm *kvm, struct tdp_iter *iter,
+diff --git a/arch/x86/kvm/mmu/tdp_mmu.h b/arch/x86/kvm/mmu/tdp_mmu.h
+index 0a63b1afabd3c..733a3aef3a96e 100644
+--- a/arch/x86/kvm/mmu/tdp_mmu.h
++++ b/arch/x86/kvm/mmu/tdp_mmu.h
+@@ -7,7 +7,7 @@
+ 
+ #include "spte.h"
+ 
+-int kvm_mmu_init_tdp_mmu(struct kvm *kvm);
++void kvm_mmu_init_tdp_mmu(struct kvm *kvm);
+ void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm);
+ 
+ hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu);
+@@ -20,8 +20,7 @@ __must_check static inline bool kvm_tdp_mmu_get_root(struct kvm_mmu_page *root)
+ void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root,
+ 			  bool shared);
+ 
+-bool kvm_tdp_mmu_zap_leafs(struct kvm *kvm, int as_id, gfn_t start,
+-				 gfn_t end, bool can_yield, bool flush);
++bool kvm_tdp_mmu_zap_leafs(struct kvm *kvm, gfn_t start, gfn_t end, bool flush);
+ bool kvm_tdp_mmu_zap_sp(struct kvm *kvm, struct kvm_mmu_page *sp);
+ void kvm_tdp_mmu_zap_all(struct kvm *kvm);
+ void kvm_tdp_mmu_invalidate_all_roots(struct kvm *kvm);
+diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
+index cefb67a8c668c..ed1d9de522f4e 100644
+--- a/arch/x86/kvm/svm/sev.c
++++ b/arch/x86/kvm/svm/sev.c
+@@ -2945,6 +2945,32 @@ int sev_es_string_io(struct vcpu_svm *svm, int size, unsigned int port, int in)
+ 				    count, in);
+ }
+ 
++static void sev_es_vcpu_after_set_cpuid(struct vcpu_svm *svm)
++{
++	struct kvm_vcpu *vcpu = &svm->vcpu;
++
++	if (boot_cpu_has(X86_FEATURE_V_TSC_AUX)) {
++		bool v_tsc_aux = guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP) ||
++				 guest_cpuid_has(vcpu, X86_FEATURE_RDPID);
++
++		set_msr_interception(vcpu, svm->msrpm, MSR_TSC_AUX, v_tsc_aux, v_tsc_aux);
++	}
++}
++
++void sev_vcpu_after_set_cpuid(struct vcpu_svm *svm)
++{
++	struct kvm_vcpu *vcpu = &svm->vcpu;
++	struct kvm_cpuid_entry2 *best;
++
++	/* For sev guests, the memory encryption bit is not reserved in CR3.  */
++	best = kvm_find_cpuid_entry(vcpu, 0x8000001F);
++	if (best)
++		vcpu->arch.reserved_gpa_bits &= ~(1UL << (best->ebx & 0x3f));
++
++	if (sev_es_guest(svm->vcpu.kvm))
++		sev_es_vcpu_after_set_cpuid(svm);
++}
++
+ static void sev_es_init_vmcb(struct vcpu_svm *svm)
+ {
+ 	struct kvm_vcpu *vcpu = &svm->vcpu;
+@@ -2991,14 +3017,6 @@ static void sev_es_init_vmcb(struct vcpu_svm *svm)
+ 	set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHTOIP, 1, 1);
+ 	set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTFROMIP, 1, 1);
+ 	set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTTOIP, 1, 1);
+-
+-	if (boot_cpu_has(X86_FEATURE_V_TSC_AUX) &&
+-	    (guest_cpuid_has(&svm->vcpu, X86_FEATURE_RDTSCP) ||
+-	     guest_cpuid_has(&svm->vcpu, X86_FEATURE_RDPID))) {
+-		set_msr_interception(vcpu, svm->msrpm, MSR_TSC_AUX, 1, 1);
+-		if (guest_cpuid_has(&svm->vcpu, X86_FEATURE_RDTSCP))
+-			svm_clr_intercept(svm, INTERCEPT_RDTSCP);
+-	}
+ }
+ 
+ void sev_init_vmcb(struct vcpu_svm *svm)
+diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
+index e3acccc126166..e3d92670c1115 100644
+--- a/arch/x86/kvm/svm/svm.c
++++ b/arch/x86/kvm/svm/svm.c
+@@ -4217,7 +4217,6 @@ static bool svm_has_emulated_msr(struct kvm *kvm, u32 index)
+ static void svm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
+ {
+ 	struct vcpu_svm *svm = to_svm(vcpu);
+-	struct kvm_cpuid_entry2 *best;
+ 
+ 	vcpu->arch.xsaves_enabled = guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) &&
+ 				    boot_cpu_has(X86_FEATURE_XSAVE) &&
+@@ -4252,12 +4251,8 @@ static void svm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
+ 		set_msr_interception(vcpu, svm->msrpm, MSR_IA32_FLUSH_CMD, 0,
+ 				     !!guest_cpuid_has(vcpu, X86_FEATURE_FLUSH_L1D));
+ 
+-	/* For sev guests, the memory encryption bit is not reserved in CR3.  */
+-	if (sev_guest(vcpu->kvm)) {
+-		best = kvm_find_cpuid_entry(vcpu, 0x8000001F);
+-		if (best)
+-			vcpu->arch.reserved_gpa_bits &= ~(1UL << (best->ebx & 0x3f));
+-	}
++	if (sev_guest(vcpu->kvm))
++		sev_vcpu_after_set_cpuid(svm);
+ 
+ 	init_vmcb_after_set_cpuid(vcpu);
+ }
+diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
+index 8239c8de45acf..a96d80465b83b 100644
+--- a/arch/x86/kvm/svm/svm.h
++++ b/arch/x86/kvm/svm/svm.h
+@@ -733,6 +733,7 @@ void __init sev_hardware_setup(void);
+ void sev_hardware_unsetup(void);
+ int sev_cpu_init(struct svm_cpu_data *sd);
+ void sev_init_vmcb(struct vcpu_svm *svm);
++void sev_vcpu_after_set_cpuid(struct vcpu_svm *svm);
+ void sev_free_vcpu(struct kvm_vcpu *vcpu);
+ int sev_handle_vmgexit(struct kvm_vcpu *vcpu);
+ int sev_es_string_io(struct vcpu_svm *svm, int size, unsigned int port, int in);
+diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
+index f2fb67a9dc050..bc6f0fea48b43 100644
+--- a/arch/x86/kvm/vmx/vmx.c
++++ b/arch/x86/kvm/vmx/vmx.c
+@@ -41,7 +41,7 @@
+ #include <asm/idtentry.h>
+ #include <asm/io.h>
+ #include <asm/irq_remapping.h>
+-#include <asm/kexec.h>
++#include <asm/reboot.h>
+ #include <asm/perf_event.h>
+ #include <asm/mmu_context.h>
+ #include <asm/mshyperv.h>
+@@ -754,7 +754,6 @@ static int vmx_set_guest_uret_msr(struct vcpu_vmx *vmx,
+ 	return ret;
+ }
+ 
+-#ifdef CONFIG_KEXEC_CORE
+ static void crash_vmclear_local_loaded_vmcss(void)
+ {
+ 	int cpu = raw_smp_processor_id();
+@@ -764,7 +763,6 @@ static void crash_vmclear_local_loaded_vmcss(void)
+ 			    loaded_vmcss_on_cpu_link)
+ 		vmcs_clear(v->vmcs);
+ }
+-#endif /* CONFIG_KEXEC_CORE */
+ 
+ static void __loaded_vmcs_clear(void *arg)
+ {
+@@ -8623,10 +8621,9 @@ static void __vmx_exit(void)
+ {
+ 	allow_smaller_maxphyaddr = false;
+ 
+-#ifdef CONFIG_KEXEC_CORE
+ 	RCU_INIT_POINTER(crash_vmclear_loaded_vmcss, NULL);
+ 	synchronize_rcu();
+-#endif
++
+ 	vmx_cleanup_l1d_flush();
+ }
+ 
+@@ -8675,10 +8672,9 @@ static int __init vmx_init(void)
+ 		pi_init_cpu(cpu);
+ 	}
+ 
+-#ifdef CONFIG_KEXEC_CORE
+ 	rcu_assign_pointer(crash_vmclear_loaded_vmcss,
+ 			   crash_vmclear_local_loaded_vmcss);
+-#endif
++
+ 	vmx_check_vmcs12_offsets();
+ 
+ 	/*
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index c381770bcbf13..e24bbc8d1fc19 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -12302,9 +12302,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
+ 	if (ret)
+ 		goto out;
+ 
+-	ret = kvm_mmu_init_vm(kvm);
+-	if (ret)
+-		goto out_page_track;
++	kvm_mmu_init_vm(kvm);
+ 
+ 	ret = static_call(kvm_x86_vm_init)(kvm);
+ 	if (ret)
+@@ -12349,7 +12347,6 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
+ 
+ out_uninit_mmu:
+ 	kvm_mmu_uninit_vm(kvm);
+-out_page_track:
+ 	kvm_page_track_cleanup(kvm);
+ out:
+ 	return ret;
+diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
+index 8f95fb267caa7..76697df8dfd5b 100644
+--- a/arch/x86/lib/memcpy_64.S
++++ b/arch/x86/lib/memcpy_64.S
+@@ -40,7 +40,7 @@ SYM_TYPED_FUNC_START(__memcpy)
+ SYM_FUNC_END(__memcpy)
+ EXPORT_SYMBOL(__memcpy)
+ 
+-SYM_FUNC_ALIAS(memcpy, __memcpy)
++SYM_FUNC_ALIAS_MEMFUNC(memcpy, __memcpy)
+ EXPORT_SYMBOL(memcpy)
+ 
+ SYM_FUNC_START_LOCAL(memcpy_orig)
+diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
+index 0559b206fb110..ccdf3a597045e 100644
+--- a/arch/x86/lib/memmove_64.S
++++ b/arch/x86/lib/memmove_64.S
+@@ -212,5 +212,5 @@ SYM_FUNC_START(__memmove)
+ SYM_FUNC_END(__memmove)
+ EXPORT_SYMBOL(__memmove)
+ 
+-SYM_FUNC_ALIAS(memmove, __memmove)
++SYM_FUNC_ALIAS_MEMFUNC(memmove, __memmove)
+ EXPORT_SYMBOL(memmove)
+diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
+index 7c59a704c4584..3d818b849ec64 100644
+--- a/arch/x86/lib/memset_64.S
++++ b/arch/x86/lib/memset_64.S
+@@ -40,7 +40,7 @@ SYM_FUNC_START(__memset)
+ SYM_FUNC_END(__memset)
+ EXPORT_SYMBOL(__memset)
+ 
+-SYM_FUNC_ALIAS(memset, __memset)
++SYM_FUNC_ALIAS_MEMFUNC(memset, __memset)
+ EXPORT_SYMBOL(memset)
+ 
+ SYM_FUNC_START_LOCAL(memset_orig)
+diff --git a/arch/xtensa/boot/Makefile b/arch/xtensa/boot/Makefile
+index a65b7a9ebff28..d8b0fadf429a9 100644
+--- a/arch/xtensa/boot/Makefile
++++ b/arch/xtensa/boot/Makefile
+@@ -9,8 +9,7 @@
+ 
+ 
+ # KBUILD_CFLAGS used when building rest of boot (takes effect recursively)
+-KBUILD_CFLAGS	+= -fno-builtin -Iarch/$(ARCH)/boot/include
+-HOSTFLAGS	+= -Iarch/$(ARCH)/boot/include
++KBUILD_CFLAGS	+= -fno-builtin
+ 
+ subdir-y	:= lib
+ targets		+= vmlinux.bin vmlinux.bin.gz
+diff --git a/arch/xtensa/boot/lib/zmem.c b/arch/xtensa/boot/lib/zmem.c
+index e3ecd743c5153..b89189355122a 100644
+--- a/arch/xtensa/boot/lib/zmem.c
++++ b/arch/xtensa/boot/lib/zmem.c
+@@ -4,13 +4,14 @@
+ /* bits taken from ppc */
+ 
+ extern void *avail_ram, *end_avail;
++void gunzip(void *dst, int dstlen, unsigned char *src, int *lenp);
+ 
+-void exit (void)
++static void exit(void)
+ {
+   for (;;);
+ }
+ 
+-void *zalloc(unsigned size)
++static void *zalloc(unsigned int size)
+ {
+         void *p = avail_ram;
+ 
+diff --git a/arch/xtensa/include/asm/core.h b/arch/xtensa/include/asm/core.h
+index 3f5ffae89b580..6f02f6f21890f 100644
+--- a/arch/xtensa/include/asm/core.h
++++ b/arch/xtensa/include/asm/core.h
+@@ -6,6 +6,10 @@
+ 
+ #include <variant/core.h>
+ 
++#ifndef XCHAL_HAVE_DIV32
++#define XCHAL_HAVE_DIV32 0
++#endif
++
+ #ifndef XCHAL_HAVE_EXCLUSIVE
+ #define XCHAL_HAVE_EXCLUSIVE 0
+ #endif
+diff --git a/arch/xtensa/lib/umulsidi3.S b/arch/xtensa/lib/umulsidi3.S
+index 8c7a94a0c5d07..5da501b578136 100644
+--- a/arch/xtensa/lib/umulsidi3.S
++++ b/arch/xtensa/lib/umulsidi3.S
+@@ -3,7 +3,9 @@
+ #include <asm/asmmacro.h>
+ #include <asm/core.h>
+ 
+-#if !XCHAL_HAVE_MUL16 && !XCHAL_HAVE_MUL32 && !XCHAL_HAVE_MAC16
++#if XCHAL_HAVE_MUL16 || XCHAL_HAVE_MUL32 || XCHAL_HAVE_MAC16
++#define XCHAL_NO_MUL 0
++#else
+ #define XCHAL_NO_MUL 1
+ #endif
+ 
+diff --git a/arch/xtensa/platforms/iss/network.c b/arch/xtensa/platforms/iss/network.c
+index 85c82cd42188a..e89f27f2bb18d 100644
+--- a/arch/xtensa/platforms/iss/network.c
++++ b/arch/xtensa/platforms/iss/network.c
+@@ -201,7 +201,7 @@ static int tuntap_write(struct iss_net_private *lp, struct sk_buff **skb)
+ 	return simc_write(lp->tp.info.tuntap.fd, (*skb)->data, (*skb)->len);
+ }
+ 
+-unsigned short tuntap_protocol(struct sk_buff *skb)
++static unsigned short tuntap_protocol(struct sk_buff *skb)
+ {
+ 	return eth_type_trans(skb, skb->dev);
+ }
+@@ -441,7 +441,7 @@ static int iss_net_change_mtu(struct net_device *dev, int new_mtu)
+ 	return -EINVAL;
+ }
+ 
+-void iss_net_user_timer_expire(struct timer_list *unused)
++static void iss_net_user_timer_expire(struct timer_list *unused)
+ {
+ }
+ 
+diff --git a/crypto/sm2.c b/crypto/sm2.c
+index 285b3cb7c0bc7..5ab120d74c592 100644
+--- a/crypto/sm2.c
++++ b/crypto/sm2.c
+@@ -278,10 +278,14 @@ int sm2_compute_z_digest(struct shash_desc *desc,
+ 	if (!ec)
+ 		return -ENOMEM;
+ 
+-	err = __sm2_set_pub_key(ec, key, keylen);
++	err = sm2_ec_ctx_init(ec);
+ 	if (err)
+ 		goto out_free_ec;
+ 
++	err = __sm2_set_pub_key(ec, key, keylen);
++	if (err)
++		goto out_deinit_ec;
++
+ 	bits_len = SM2_DEFAULT_USERID_LEN * 8;
+ 	entl[0] = bits_len >> 8;
+ 	entl[1] = bits_len & 0xff;
+diff --git a/drivers/accel/ivpu/ivpu_fw.c b/drivers/accel/ivpu/ivpu_fw.c
+index f58951a0d81b1..93c69aaa6218d 100644
+--- a/drivers/accel/ivpu/ivpu_fw.c
++++ b/drivers/accel/ivpu/ivpu_fw.c
+@@ -195,7 +195,8 @@ static int ivpu_fw_mem_init(struct ivpu_device *vdev)
+ 	if (ret)
+ 		return ret;
+ 
+-	fw->mem = ivpu_bo_alloc_internal(vdev, fw->runtime_addr, fw->runtime_size, DRM_IVPU_BO_WC);
++	fw->mem = ivpu_bo_alloc_internal(vdev, fw->runtime_addr, fw->runtime_size,
++					 DRM_IVPU_BO_CACHED | DRM_IVPU_BO_NOSNOOP);
+ 	if (!fw->mem) {
+ 		ivpu_err(vdev, "Failed to allocate firmware runtime memory\n");
+ 		return -ENOMEM;
+@@ -272,7 +273,7 @@ int ivpu_fw_load(struct ivpu_device *vdev)
+ 		memset(start, 0, size);
+ 	}
+ 
+-	wmb(); /* Flush WC buffers after writing fw->mem */
++	clflush_cache_range(fw->mem->kvaddr, fw->mem->base.size);
+ 
+ 	return 0;
+ }
+@@ -374,6 +375,7 @@ void ivpu_fw_boot_params_setup(struct ivpu_device *vdev, struct vpu_boot_params
+ 	if (!ivpu_fw_is_cold_boot(vdev)) {
+ 		boot_params->save_restore_ret_address = 0;
+ 		vdev->pm->is_warmboot = true;
++		clflush_cache_range(vdev->fw->mem->kvaddr, SZ_4K);
+ 		return;
+ 	}
+ 
+@@ -428,7 +430,7 @@ void ivpu_fw_boot_params_setup(struct ivpu_device *vdev, struct vpu_boot_params
+ 	boot_params->punit_telemetry_sram_size = ivpu_hw_reg_telemetry_size_get(vdev);
+ 	boot_params->vpu_telemetry_enable = ivpu_hw_reg_telemetry_enable_get(vdev);
+ 
+-	wmb(); /* Flush WC buffers after writing bootparams */
++	clflush_cache_range(vdev->fw->mem->kvaddr, SZ_4K);
+ 
+ 	ivpu_fw_boot_params_print(vdev, boot_params);
+ }
+diff --git a/drivers/accel/ivpu/ivpu_gem.h b/drivers/accel/ivpu/ivpu_gem.h
+index 6b0ceda5f2537..f4130586ff1b2 100644
+--- a/drivers/accel/ivpu/ivpu_gem.h
++++ b/drivers/accel/ivpu/ivpu_gem.h
+@@ -8,6 +8,8 @@
+ #include <drm/drm_gem.h>
+ #include <drm/drm_mm.h>
+ 
++#define DRM_IVPU_BO_NOSNOOP       0x10000000
++
+ struct dma_buf;
+ struct ivpu_bo_ops;
+ struct ivpu_file_priv;
+@@ -83,6 +85,9 @@ static inline u32 ivpu_bo_cache_mode(struct ivpu_bo *bo)
+ 
+ static inline bool ivpu_bo_is_snooped(struct ivpu_bo *bo)
+ {
++	if (bo->flags & DRM_IVPU_BO_NOSNOOP)
++		return false;
++
+ 	return ivpu_bo_cache_mode(bo) == DRM_IVPU_BO_CACHED;
+ }
+ 
+diff --git a/drivers/accel/ivpu/ivpu_ipc.c b/drivers/accel/ivpu/ivpu_ipc.c
+index fa0af59e39ab6..295c0d7b50398 100644
+--- a/drivers/accel/ivpu/ivpu_ipc.c
++++ b/drivers/accel/ivpu/ivpu_ipc.c
+@@ -209,10 +209,10 @@ int ivpu_ipc_receive(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons,
+ 	struct ivpu_ipc_rx_msg *rx_msg;
+ 	int wait_ret, ret = 0;
+ 
+-	wait_ret = wait_event_interruptible_timeout(cons->rx_msg_wq,
+-						    (IS_KTHREAD() && kthread_should_stop()) ||
+-						    !list_empty(&cons->rx_msg_list),
+-						    msecs_to_jiffies(timeout_ms));
++	wait_ret = wait_event_timeout(cons->rx_msg_wq,
++				      (IS_KTHREAD() && kthread_should_stop()) ||
++				      !list_empty(&cons->rx_msg_list),
++				      msecs_to_jiffies(timeout_ms));
+ 
+ 	if (IS_KTHREAD() && kthread_should_stop())
+ 		return -EINTR;
+@@ -220,9 +220,6 @@ int ivpu_ipc_receive(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons,
+ 	if (wait_ret == 0)
+ 		return -ETIMEDOUT;
+ 
+-	if (wait_ret < 0)
+-		return -ERESTARTSYS;
+-
+ 	spin_lock_irq(&cons->rx_msg_lock);
+ 	rx_msg = list_first_entry_or_null(&cons->rx_msg_list, struct ivpu_ipc_rx_msg, link);
+ 	if (!rx_msg) {
+diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
+index 07204d4829684..305f590c54a8c 100644
+--- a/drivers/acpi/nfit/core.c
++++ b/drivers/acpi/nfit/core.c
+@@ -855,7 +855,7 @@ static size_t sizeof_idt(struct acpi_nfit_interleave *idt)
+ {
+ 	if (idt->header.length < sizeof(*idt))
+ 		return 0;
+-	return sizeof(*idt) + sizeof(u32) * (idt->line_count - 1);
++	return sizeof(*idt) + sizeof(u32) * idt->line_count;
+ }
+ 
+ static bool add_idt(struct acpi_nfit_desc *acpi_desc,
+diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
+index 79d02eb4e4797..76bf185a73c65 100644
+--- a/drivers/ata/libata-core.c
++++ b/drivers/ata/libata-core.c
+@@ -5204,17 +5204,19 @@ static void ata_port_request_pm(struct ata_port *ap, pm_message_t mesg,
+ 	struct ata_link *link;
+ 	unsigned long flags;
+ 
+-	/* Previous resume operation might still be in
+-	 * progress.  Wait for PM_PENDING to clear.
++	spin_lock_irqsave(ap->lock, flags);
++
++	/*
++	 * A previous PM operation might still be in progress. Wait for
++	 * ATA_PFLAG_PM_PENDING to clear.
+ 	 */
+ 	if (ap->pflags & ATA_PFLAG_PM_PENDING) {
++		spin_unlock_irqrestore(ap->lock, flags);
+ 		ata_port_wait_eh(ap);
+-		WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
++		spin_lock_irqsave(ap->lock, flags);
+ 	}
+ 
+-	/* request PM ops to EH */
+-	spin_lock_irqsave(ap->lock, flags);
+-
++	/* Request PM operation to EH */
+ 	ap->pm_mesg = mesg;
+ 	ap->pflags |= ATA_PFLAG_PM_PENDING;
+ 	ata_for_each_link(link, ap, HOST_FIRST) {
+@@ -5226,10 +5228,8 @@ static void ata_port_request_pm(struct ata_port *ap, pm_message_t mesg,
+ 
+ 	spin_unlock_irqrestore(ap->lock, flags);
+ 
+-	if (!async) {
++	if (!async)
+ 		ata_port_wait_eh(ap);
+-		WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
+-	}
+ }
+ 
+ /*
+@@ -5396,7 +5396,7 @@ EXPORT_SYMBOL_GPL(ata_host_resume);
+ #endif
+ 
+ const struct device_type ata_port_type = {
+-	.name = "ata_port",
++	.name = ATA_PORT_TYPE_NAME,
+ #ifdef CONFIG_PM
+ 	.pm = &ata_port_pm_ops,
+ #endif
+@@ -6130,11 +6130,30 @@ static void ata_port_detach(struct ata_port *ap)
+ 	if (!ap->ops->error_handler)
+ 		goto skip_eh;
+ 
+-	/* tell EH we're leaving & flush EH */
++	/* Wait for any ongoing EH */
++	ata_port_wait_eh(ap);
++
++	mutex_lock(&ap->scsi_scan_mutex);
+ 	spin_lock_irqsave(ap->lock, flags);
++
++	/* Remove scsi devices */
++	ata_for_each_link(link, ap, HOST_FIRST) {
++		ata_for_each_dev(dev, link, ALL) {
++			if (dev->sdev) {
++				spin_unlock_irqrestore(ap->lock, flags);
++				scsi_remove_device(dev->sdev);
++				spin_lock_irqsave(ap->lock, flags);
++				dev->sdev = NULL;
++			}
++		}
++	}
++
++	/* Tell EH to disable all devices */
+ 	ap->pflags |= ATA_PFLAG_UNLOADING;
+ 	ata_port_schedule_eh(ap);
++
+ 	spin_unlock_irqrestore(ap->lock, flags);
++	mutex_unlock(&ap->scsi_scan_mutex);
+ 
+ 	/* wait till EH commits suicide */
+ 	ata_port_wait_eh(ap);
+diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
+index 35e03679b0bfe..960ef5c6f2c10 100644
+--- a/drivers/ata/libata-eh.c
++++ b/drivers/ata/libata-eh.c
+@@ -2822,23 +2822,13 @@ int ata_eh_reset(struct ata_link *link, int classify,
+ 		}
+ 	}
+ 
+-	/*
+-	 * Some controllers can't be frozen very well and may set spurious
+-	 * error conditions during reset.  Clear accumulated error
+-	 * information and re-thaw the port if frozen.  As reset is the
+-	 * final recovery action and we cross check link onlineness against
+-	 * device classification later, no hotplug event is lost by this.
+-	 */
++	/* clear cached SError */
+ 	spin_lock_irqsave(link->ap->lock, flags);
+-	memset(&link->eh_info, 0, sizeof(link->eh_info));
++	link->eh_info.serror = 0;
+ 	if (slave)
+-		memset(&slave->eh_info, 0, sizeof(link->eh_info));
+-	ap->pflags &= ~ATA_PFLAG_EH_PENDING;
++		slave->eh_info.serror = 0;
+ 	spin_unlock_irqrestore(link->ap->lock, flags);
+ 
+-	if (ata_port_is_frozen(ap))
+-		ata_eh_thaw_port(ap);
+-
+ 	/*
+ 	 * Make sure onlineness and classification result correspond.
+ 	 * Hotplug could have happened during reset and some
+diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
+index c6ece32de8e31..702812285d8f0 100644
+--- a/drivers/ata/libata-scsi.c
++++ b/drivers/ata/libata-scsi.c
+@@ -1106,7 +1106,8 @@ int ata_scsi_dev_config(struct scsi_device *sdev, struct ata_device *dev)
+ 		 * will be woken up by ata_port_pm_resume() with a port reset
+ 		 * and device revalidation.
+ 		 */
+-		sdev->manage_start_stop = 1;
++		sdev->manage_system_start_stop = true;
++		sdev->manage_runtime_start_stop = true;
+ 		sdev->no_start_on_resume = 1;
+ 	}
+ 
+@@ -1139,6 +1140,42 @@ int ata_scsi_dev_config(struct scsi_device *sdev, struct ata_device *dev)
+ 	return 0;
+ }
+ 
++/**
++ *	ata_scsi_slave_alloc - Early setup of SCSI device
++ *	@sdev: SCSI device to examine
++ *
++ *	This is called from scsi_alloc_sdev() when the scsi device
++ *	associated with an ATA device is scanned on a port.
++ *
++ *	LOCKING:
++ *	Defined by SCSI layer.  We don't really care.
++ */
++
++int ata_scsi_slave_alloc(struct scsi_device *sdev)
++{
++	struct ata_port *ap = ata_shost_to_port(sdev->host);
++	struct device_link *link;
++
++	ata_scsi_sdev_config(sdev);
++
++	/*
++	 * Create a link from the ata_port device to the scsi device to ensure
++	 * that PM does suspend/resume in the correct order: the scsi device is
++	 * consumer (child) and the ata port the supplier (parent).
++	 */
++	link = device_link_add(&sdev->sdev_gendev, &ap->tdev,
++			       DL_FLAG_STATELESS |
++			       DL_FLAG_PM_RUNTIME | DL_FLAG_RPM_ACTIVE);
++	if (!link) {
++		ata_port_err(ap, "Failed to create link to scsi device %s\n",
++			     dev_name(&sdev->sdev_gendev));
++		return -ENODEV;
++	}
++
++	return 0;
++}
++EXPORT_SYMBOL_GPL(ata_scsi_slave_alloc);
++
+ /**
+  *	ata_scsi_slave_config - Set SCSI device attributes
+  *	@sdev: SCSI device to examine
+@@ -1155,14 +1192,11 @@ int ata_scsi_slave_config(struct scsi_device *sdev)
+ {
+ 	struct ata_port *ap = ata_shost_to_port(sdev->host);
+ 	struct ata_device *dev = __ata_scsi_find_dev(ap, sdev);
+-	int rc = 0;
+-
+-	ata_scsi_sdev_config(sdev);
+ 
+ 	if (dev)
+-		rc = ata_scsi_dev_config(sdev, dev);
++		return ata_scsi_dev_config(sdev, dev);
+ 
+-	return rc;
++	return 0;
+ }
+ EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
+ 
+@@ -1189,6 +1223,8 @@ void ata_scsi_slave_destroy(struct scsi_device *sdev)
+ 	if (!ap->ops->error_handler)
+ 		return;
+ 
++	device_link_remove(&sdev->sdev_gendev, &ap->tdev);
++
+ 	spin_lock_irqsave(ap->lock, flags);
+ 	dev = __ata_scsi_find_dev(ap, sdev);
+ 	if (dev && dev->sdev) {
+@@ -1892,6 +1928,9 @@ static unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf)
+ 		hdr[2] = 0x7; /* claim SPC-5 version compatibility */
+ 	}
+ 
++	if (args->dev->flags & ATA_DFLAG_CDL)
++		hdr[2] = 0xd; /* claim SPC-6 version compatibility */
++
+ 	memcpy(rbuf, hdr, sizeof(hdr));
+ 	memcpy(&rbuf[8], "ATA     ", 8);
+ 	ata_id_string(args->id, &rbuf[16], ATA_ID_PROD, 16);
+@@ -4448,7 +4487,7 @@ void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd)
+ 		break;
+ 
+ 	case MAINTENANCE_IN:
+-		if (scsicmd[1] == MI_REPORT_SUPPORTED_OPERATION_CODES)
++		if ((scsicmd[1] & 0x1f) == MI_REPORT_SUPPORTED_OPERATION_CODES)
+ 			ata_scsi_rbuf_fill(&args, ata_scsiop_maint_in);
+ 		else
+ 			ata_scsi_set_invalid_field(dev, cmd, 1, 0xff);
+diff --git a/drivers/ata/libata-transport.c b/drivers/ata/libata-transport.c
+index e4fb9d1b9b398..3e49a877500e1 100644
+--- a/drivers/ata/libata-transport.c
++++ b/drivers/ata/libata-transport.c
+@@ -266,6 +266,10 @@ void ata_tport_delete(struct ata_port *ap)
+ 	put_device(dev);
+ }
+ 
++static const struct device_type ata_port_sas_type = {
++	.name = ATA_PORT_TYPE_NAME,
++};
++
+ /** ata_tport_add - initialize a transport ATA port structure
+  *
+  * @parent:	parent device
+@@ -283,7 +287,10 @@ int ata_tport_add(struct device *parent,
+ 	struct device *dev = &ap->tdev;
+ 
+ 	device_initialize(dev);
+-	dev->type = &ata_port_type;
++	if (ap->flags & ATA_FLAG_SAS_HOST)
++		dev->type = &ata_port_sas_type;
++	else
++		dev->type = &ata_port_type;
+ 
+ 	dev->parent = parent;
+ 	ata_host_get(ap->host);
+diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
+index cf993885d2b25..76d0a5937b66a 100644
+--- a/drivers/ata/libata.h
++++ b/drivers/ata/libata.h
+@@ -30,6 +30,8 @@ enum {
+ 	ATA_DNXFER_QUIET	= (1 << 31),
+ };
+ 
++#define ATA_PORT_TYPE_NAME	"ata_port"
++
+ extern atomic_t ata_print_id;
+ extern int atapi_passthru16;
+ extern int libata_fua;
+diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
+index d404e631d1527..68e660e4cc410 100644
+--- a/drivers/ata/sata_mv.c
++++ b/drivers/ata/sata_mv.c
+@@ -1255,8 +1255,8 @@ static void mv_dump_mem(struct device *dev, void __iomem *start, unsigned bytes)
+ 
+ 	for (b = 0; b < bytes; ) {
+ 		for (w = 0, o = 0; b < bytes && w < 4; w++) {
+-			o += snprintf(linebuf + o, sizeof(linebuf) - o,
+-				      "%08x ", readl(start + b));
++			o += scnprintf(linebuf + o, sizeof(linebuf) - o,
++				       "%08x ", readl(start + b));
+ 			b += sizeof(u32);
+ 		}
+ 		dev_dbg(dev, "%s: %p: %s\n",
+diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
+index 2328cc05be36e..58d3c4e647d78 100644
+--- a/drivers/block/rbd.c
++++ b/drivers/block/rbd.c
+@@ -632,9 +632,8 @@ void rbd_warn(struct rbd_device *rbd_dev, const char *fmt, ...)
+ static void rbd_dev_remove_parent(struct rbd_device *rbd_dev);
+ 
+ static int rbd_dev_refresh(struct rbd_device *rbd_dev);
+-static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev);
+-static int rbd_dev_header_info(struct rbd_device *rbd_dev);
+-static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev);
++static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev,
++				     struct rbd_image_header *header);
+ static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
+ 					u64 snap_id);
+ static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
+@@ -995,15 +994,24 @@ static void rbd_init_layout(struct rbd_device *rbd_dev)
+ 	RCU_INIT_POINTER(rbd_dev->layout.pool_ns, NULL);
+ }
+ 
++static void rbd_image_header_cleanup(struct rbd_image_header *header)
++{
++	kfree(header->object_prefix);
++	ceph_put_snap_context(header->snapc);
++	kfree(header->snap_sizes);
++	kfree(header->snap_names);
++
++	memset(header, 0, sizeof(*header));
++}
++
+ /*
+  * Fill an rbd image header with information from the given format 1
+  * on-disk header.
+  */
+-static int rbd_header_from_disk(struct rbd_device *rbd_dev,
+-				 struct rbd_image_header_ondisk *ondisk)
++static int rbd_header_from_disk(struct rbd_image_header *header,
++				struct rbd_image_header_ondisk *ondisk,
++				bool first_time)
+ {
+-	struct rbd_image_header *header = &rbd_dev->header;
+-	bool first_time = header->object_prefix == NULL;
+ 	struct ceph_snap_context *snapc;
+ 	char *object_prefix = NULL;
+ 	char *snap_names = NULL;
+@@ -1070,11 +1078,6 @@ static int rbd_header_from_disk(struct rbd_device *rbd_dev,
+ 	if (first_time) {
+ 		header->object_prefix = object_prefix;
+ 		header->obj_order = ondisk->options.order;
+-		rbd_init_layout(rbd_dev);
+-	} else {
+-		ceph_put_snap_context(header->snapc);
+-		kfree(header->snap_names);
+-		kfree(header->snap_sizes);
+ 	}
+ 
+ 	/* The remaining fields always get updated (when we refresh) */
+@@ -4859,7 +4862,9 @@ out_req:
+  * return, the rbd_dev->header field will contain up-to-date
+  * information about the image.
+  */
+-static int rbd_dev_v1_header_info(struct rbd_device *rbd_dev)
++static int rbd_dev_v1_header_info(struct rbd_device *rbd_dev,
++				  struct rbd_image_header *header,
++				  bool first_time)
+ {
+ 	struct rbd_image_header_ondisk *ondisk = NULL;
+ 	u32 snap_count = 0;
+@@ -4907,7 +4912,7 @@ static int rbd_dev_v1_header_info(struct rbd_device *rbd_dev)
+ 		snap_count = le32_to_cpu(ondisk->snap_count);
+ 	} while (snap_count != want_count);
+ 
+-	ret = rbd_header_from_disk(rbd_dev, ondisk);
++	ret = rbd_header_from_disk(header, ondisk, first_time);
+ out:
+ 	kfree(ondisk);
+ 
+@@ -4931,39 +4936,6 @@ static void rbd_dev_update_size(struct rbd_device *rbd_dev)
+ 	}
+ }
+ 
+-static int rbd_dev_refresh(struct rbd_device *rbd_dev)
+-{
+-	u64 mapping_size;
+-	int ret;
+-
+-	down_write(&rbd_dev->header_rwsem);
+-	mapping_size = rbd_dev->mapping.size;
+-
+-	ret = rbd_dev_header_info(rbd_dev);
+-	if (ret)
+-		goto out;
+-
+-	/*
+-	 * If there is a parent, see if it has disappeared due to the
+-	 * mapped image getting flattened.
+-	 */
+-	if (rbd_dev->parent) {
+-		ret = rbd_dev_v2_parent_info(rbd_dev);
+-		if (ret)
+-			goto out;
+-	}
+-
+-	rbd_assert(!rbd_is_snap(rbd_dev));
+-	rbd_dev->mapping.size = rbd_dev->header.image_size;
+-
+-out:
+-	up_write(&rbd_dev->header_rwsem);
+-	if (!ret && mapping_size != rbd_dev->mapping.size)
+-		rbd_dev_update_size(rbd_dev);
+-
+-	return ret;
+-}
+-
+ static const struct blk_mq_ops rbd_mq_ops = {
+ 	.queue_rq	= rbd_queue_rq,
+ };
+@@ -5503,17 +5475,12 @@ static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
+ 	return 0;
+ }
+ 
+-static int rbd_dev_v2_image_size(struct rbd_device *rbd_dev)
+-{
+-	return _rbd_dev_v2_snap_size(rbd_dev, CEPH_NOSNAP,
+-					&rbd_dev->header.obj_order,
+-					&rbd_dev->header.image_size);
+-}
+-
+-static int rbd_dev_v2_object_prefix(struct rbd_device *rbd_dev)
++static int rbd_dev_v2_object_prefix(struct rbd_device *rbd_dev,
++				    char **pobject_prefix)
+ {
+ 	size_t size;
+ 	void *reply_buf;
++	char *object_prefix;
+ 	int ret;
+ 	void *p;
+ 
+@@ -5531,16 +5498,16 @@ static int rbd_dev_v2_object_prefix(struct rbd_device *rbd_dev)
+ 		goto out;
+ 
+ 	p = reply_buf;
+-	rbd_dev->header.object_prefix = ceph_extract_encoded_string(&p,
+-						p + ret, NULL, GFP_NOIO);
++	object_prefix = ceph_extract_encoded_string(&p, p + ret, NULL,
++						    GFP_NOIO);
++	if (IS_ERR(object_prefix)) {
++		ret = PTR_ERR(object_prefix);
++		goto out;
++	}
+ 	ret = 0;
+ 
+-	if (IS_ERR(rbd_dev->header.object_prefix)) {
+-		ret = PTR_ERR(rbd_dev->header.object_prefix);
+-		rbd_dev->header.object_prefix = NULL;
+-	} else {
+-		dout("  object_prefix = %s\n", rbd_dev->header.object_prefix);
+-	}
++	*pobject_prefix = object_prefix;
++	dout("  object_prefix = %s\n", object_prefix);
+ out:
+ 	kfree(reply_buf);
+ 
+@@ -5591,13 +5558,6 @@ static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
+ 	return 0;
+ }
+ 
+-static int rbd_dev_v2_features(struct rbd_device *rbd_dev)
+-{
+-	return _rbd_dev_v2_snap_features(rbd_dev, CEPH_NOSNAP,
+-					 rbd_is_ro(rbd_dev),
+-					 &rbd_dev->header.features);
+-}
+-
+ /*
+  * These are generic image flags, but since they are used only for
+  * object map, store them in rbd_dev->object_map_flags.
+@@ -5634,6 +5594,14 @@ struct parent_image_info {
+ 	u64		overlap;
+ };
+ 
++static void rbd_parent_info_cleanup(struct parent_image_info *pii)
++{
++	kfree(pii->pool_ns);
++	kfree(pii->image_id);
++
++	memset(pii, 0, sizeof(*pii));
++}
++
+ /*
+  * The caller is responsible for @pii.
+  */
+@@ -5703,6 +5671,9 @@ static int __get_parent_info(struct rbd_device *rbd_dev,
+ 	if (pii->has_overlap)
+ 		ceph_decode_64_safe(&p, end, pii->overlap, e_inval);
+ 
++	dout("%s pool_id %llu pool_ns %s image_id %s snap_id %llu has_overlap %d overlap %llu\n",
++	     __func__, pii->pool_id, pii->pool_ns, pii->image_id, pii->snap_id,
++	     pii->has_overlap, pii->overlap);
+ 	return 0;
+ 
+ e_inval:
+@@ -5741,14 +5712,17 @@ static int __get_parent_info_legacy(struct rbd_device *rbd_dev,
+ 	pii->has_overlap = true;
+ 	ceph_decode_64_safe(&p, end, pii->overlap, e_inval);
+ 
++	dout("%s pool_id %llu pool_ns %s image_id %s snap_id %llu has_overlap %d overlap %llu\n",
++	     __func__, pii->pool_id, pii->pool_ns, pii->image_id, pii->snap_id,
++	     pii->has_overlap, pii->overlap);
+ 	return 0;
+ 
+ e_inval:
+ 	return -EINVAL;
+ }
+ 
+-static int get_parent_info(struct rbd_device *rbd_dev,
+-			   struct parent_image_info *pii)
++static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev,
++				  struct parent_image_info *pii)
+ {
+ 	struct page *req_page, *reply_page;
+ 	void *p;
+@@ -5776,7 +5750,7 @@ static int get_parent_info(struct rbd_device *rbd_dev,
+ 	return ret;
+ }
+ 
+-static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
++static int rbd_dev_setup_parent(struct rbd_device *rbd_dev)
+ {
+ 	struct rbd_spec *parent_spec;
+ 	struct parent_image_info pii = { 0 };
+@@ -5786,37 +5760,12 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
+ 	if (!parent_spec)
+ 		return -ENOMEM;
+ 
+-	ret = get_parent_info(rbd_dev, &pii);
++	ret = rbd_dev_v2_parent_info(rbd_dev, &pii);
+ 	if (ret)
+ 		goto out_err;
+ 
+-	dout("%s pool_id %llu pool_ns %s image_id %s snap_id %llu has_overlap %d overlap %llu\n",
+-	     __func__, pii.pool_id, pii.pool_ns, pii.image_id, pii.snap_id,
+-	     pii.has_overlap, pii.overlap);
+-
+-	if (pii.pool_id == CEPH_NOPOOL || !pii.has_overlap) {
+-		/*
+-		 * Either the parent never existed, or we have
+-		 * record of it but the image got flattened so it no
+-		 * longer has a parent.  When the parent of a
+-		 * layered image disappears we immediately set the
+-		 * overlap to 0.  The effect of this is that all new
+-		 * requests will be treated as if the image had no
+-		 * parent.
+-		 *
+-		 * If !pii.has_overlap, the parent image spec is not
+-		 * applicable.  It's there to avoid duplication in each
+-		 * snapshot record.
+-		 */
+-		if (rbd_dev->parent_overlap) {
+-			rbd_dev->parent_overlap = 0;
+-			rbd_dev_parent_put(rbd_dev);
+-			pr_info("%s: clone image has been flattened\n",
+-				rbd_dev->disk->disk_name);
+-		}
+-
++	if (pii.pool_id == CEPH_NOPOOL || !pii.has_overlap)
+ 		goto out;	/* No parent?  No problem. */
+-	}
+ 
+ 	/* The ceph file layout needs to fit pool id in 32 bits */
+ 
+@@ -5828,58 +5777,46 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
+ 	}
+ 
+ 	/*
+-	 * The parent won't change (except when the clone is
+-	 * flattened, already handled that).  So we only need to
+-	 * record the parent spec we have not already done so.
++	 * The parent won't change except when the clone is flattened,
++	 * so we only need to record the parent image spec once.
+ 	 */
+-	if (!rbd_dev->parent_spec) {
+-		parent_spec->pool_id = pii.pool_id;
+-		if (pii.pool_ns && *pii.pool_ns) {
+-			parent_spec->pool_ns = pii.pool_ns;
+-			pii.pool_ns = NULL;
+-		}
+-		parent_spec->image_id = pii.image_id;
+-		pii.image_id = NULL;
+-		parent_spec->snap_id = pii.snap_id;
+-
+-		rbd_dev->parent_spec = parent_spec;
+-		parent_spec = NULL;	/* rbd_dev now owns this */
++	parent_spec->pool_id = pii.pool_id;
++	if (pii.pool_ns && *pii.pool_ns) {
++		parent_spec->pool_ns = pii.pool_ns;
++		pii.pool_ns = NULL;
+ 	}
++	parent_spec->image_id = pii.image_id;
++	pii.image_id = NULL;
++	parent_spec->snap_id = pii.snap_id;
++
++	rbd_assert(!rbd_dev->parent_spec);
++	rbd_dev->parent_spec = parent_spec;
++	parent_spec = NULL;	/* rbd_dev now owns this */
+ 
+ 	/*
+-	 * We always update the parent overlap.  If it's zero we issue
+-	 * a warning, as we will proceed as if there was no parent.
++	 * Record the parent overlap.  If it's zero, issue a warning as
++	 * we will proceed as if there is no parent.
+ 	 */
+-	if (!pii.overlap) {
+-		if (parent_spec) {
+-			/* refresh, careful to warn just once */
+-			if (rbd_dev->parent_overlap)
+-				rbd_warn(rbd_dev,
+-				    "clone now standalone (overlap became 0)");
+-		} else {
+-			/* initial probe */
+-			rbd_warn(rbd_dev, "clone is standalone (overlap 0)");
+-		}
+-	}
++	if (!pii.overlap)
++		rbd_warn(rbd_dev, "clone is standalone (overlap 0)");
+ 	rbd_dev->parent_overlap = pii.overlap;
+ 
+ out:
+ 	ret = 0;
+ out_err:
+-	kfree(pii.pool_ns);
+-	kfree(pii.image_id);
++	rbd_parent_info_cleanup(&pii);
+ 	rbd_spec_put(parent_spec);
+ 	return ret;
+ }
+ 
+-static int rbd_dev_v2_striping_info(struct rbd_device *rbd_dev)
++static int rbd_dev_v2_striping_info(struct rbd_device *rbd_dev,
++				    u64 *stripe_unit, u64 *stripe_count)
+ {
+ 	struct {
+ 		__le64 stripe_unit;
+ 		__le64 stripe_count;
+ 	} __attribute__ ((packed)) striping_info_buf = { 0 };
+ 	size_t size = sizeof (striping_info_buf);
+-	void *p;
+ 	int ret;
+ 
+ 	ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
+@@ -5891,27 +5828,33 @@ static int rbd_dev_v2_striping_info(struct rbd_device *rbd_dev)
+ 	if (ret < size)
+ 		return -ERANGE;
+ 
+-	p = &striping_info_buf;
+-	rbd_dev->header.stripe_unit = ceph_decode_64(&p);
+-	rbd_dev->header.stripe_count = ceph_decode_64(&p);
++	*stripe_unit = le64_to_cpu(striping_info_buf.stripe_unit);
++	*stripe_count = le64_to_cpu(striping_info_buf.stripe_count);
++	dout("  stripe_unit = %llu stripe_count = %llu\n", *stripe_unit,
++	     *stripe_count);
++
+ 	return 0;
+ }
+ 
+-static int rbd_dev_v2_data_pool(struct rbd_device *rbd_dev)
++static int rbd_dev_v2_data_pool(struct rbd_device *rbd_dev, s64 *data_pool_id)
+ {
+-	__le64 data_pool_id;
++	__le64 data_pool_buf;
+ 	int ret;
+ 
+ 	ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
+ 				  &rbd_dev->header_oloc, "get_data_pool",
+-				  NULL, 0, &data_pool_id, sizeof(data_pool_id));
++				  NULL, 0, &data_pool_buf,
++				  sizeof(data_pool_buf));
++	dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
+ 	if (ret < 0)
+ 		return ret;
+-	if (ret < sizeof(data_pool_id))
++	if (ret < sizeof(data_pool_buf))
+ 		return -EBADMSG;
+ 
+-	rbd_dev->header.data_pool_id = le64_to_cpu(data_pool_id);
+-	WARN_ON(rbd_dev->header.data_pool_id == CEPH_NOPOOL);
++	*data_pool_id = le64_to_cpu(data_pool_buf);
++	dout("  data_pool_id = %lld\n", *data_pool_id);
++	WARN_ON(*data_pool_id == CEPH_NOPOOL);
++
+ 	return 0;
+ }
+ 
+@@ -6103,7 +6046,8 @@ out_err:
+ 	return ret;
+ }
+ 
+-static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev)
++static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev,
++				   struct ceph_snap_context **psnapc)
+ {
+ 	size_t size;
+ 	int ret;
+@@ -6164,9 +6108,7 @@ static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev)
+ 	for (i = 0; i < snap_count; i++)
+ 		snapc->snaps[i] = ceph_decode_64(&p);
+ 
+-	ceph_put_snap_context(rbd_dev->header.snapc);
+-	rbd_dev->header.snapc = snapc;
+-
++	*psnapc = snapc;
+ 	dout("  snap context seq = %llu, snap_count = %u\n",
+ 		(unsigned long long)seq, (unsigned int)snap_count);
+ out:
+@@ -6215,38 +6157,42 @@ out:
+ 	return snap_name;
+ }
+ 
+-static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev)
++static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev,
++				  struct rbd_image_header *header,
++				  bool first_time)
+ {
+-	bool first_time = rbd_dev->header.object_prefix == NULL;
+ 	int ret;
+ 
+-	ret = rbd_dev_v2_image_size(rbd_dev);
++	ret = _rbd_dev_v2_snap_size(rbd_dev, CEPH_NOSNAP,
++				    first_time ? &header->obj_order : NULL,
++				    &header->image_size);
+ 	if (ret)
+ 		return ret;
+ 
+ 	if (first_time) {
+-		ret = rbd_dev_v2_header_onetime(rbd_dev);
++		ret = rbd_dev_v2_header_onetime(rbd_dev, header);
+ 		if (ret)
+ 			return ret;
+ 	}
+ 
+-	ret = rbd_dev_v2_snap_context(rbd_dev);
+-	if (ret && first_time) {
+-		kfree(rbd_dev->header.object_prefix);
+-		rbd_dev->header.object_prefix = NULL;
+-	}
++	ret = rbd_dev_v2_snap_context(rbd_dev, &header->snapc);
++	if (ret)
++		return ret;
+ 
+-	return ret;
++	return 0;
+ }
+ 
+-static int rbd_dev_header_info(struct rbd_device *rbd_dev)
++static int rbd_dev_header_info(struct rbd_device *rbd_dev,
++			       struct rbd_image_header *header,
++			       bool first_time)
+ {
+ 	rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
++	rbd_assert(!header->object_prefix && !header->snapc);
+ 
+ 	if (rbd_dev->image_format == 1)
+-		return rbd_dev_v1_header_info(rbd_dev);
++		return rbd_dev_v1_header_info(rbd_dev, header, first_time);
+ 
+-	return rbd_dev_v2_header_info(rbd_dev);
++	return rbd_dev_v2_header_info(rbd_dev, header, first_time);
+ }
+ 
+ /*
+@@ -6734,60 +6680,49 @@ out:
+  */
+ static void rbd_dev_unprobe(struct rbd_device *rbd_dev)
+ {
+-	struct rbd_image_header	*header;
+-
+ 	rbd_dev_parent_put(rbd_dev);
+ 	rbd_object_map_free(rbd_dev);
+ 	rbd_dev_mapping_clear(rbd_dev);
+ 
+ 	/* Free dynamic fields from the header, then zero it out */
+ 
+-	header = &rbd_dev->header;
+-	ceph_put_snap_context(header->snapc);
+-	kfree(header->snap_sizes);
+-	kfree(header->snap_names);
+-	kfree(header->object_prefix);
+-	memset(header, 0, sizeof (*header));
++	rbd_image_header_cleanup(&rbd_dev->header);
+ }
+ 
+-static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev)
++static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev,
++				     struct rbd_image_header *header)
+ {
+ 	int ret;
+ 
+-	ret = rbd_dev_v2_object_prefix(rbd_dev);
++	ret = rbd_dev_v2_object_prefix(rbd_dev, &header->object_prefix);
+ 	if (ret)
+-		goto out_err;
++		return ret;
+ 
+ 	/*
+ 	 * Get the and check features for the image.  Currently the
+ 	 * features are assumed to never change.
+ 	 */
+-	ret = rbd_dev_v2_features(rbd_dev);
++	ret = _rbd_dev_v2_snap_features(rbd_dev, CEPH_NOSNAP,
++					rbd_is_ro(rbd_dev), &header->features);
+ 	if (ret)
+-		goto out_err;
++		return ret;
+ 
+ 	/* If the image supports fancy striping, get its parameters */
+ 
+-	if (rbd_dev->header.features & RBD_FEATURE_STRIPINGV2) {
+-		ret = rbd_dev_v2_striping_info(rbd_dev);
+-		if (ret < 0)
+-			goto out_err;
++	if (header->features & RBD_FEATURE_STRIPINGV2) {
++		ret = rbd_dev_v2_striping_info(rbd_dev, &header->stripe_unit,
++					       &header->stripe_count);
++		if (ret)
++			return ret;
+ 	}
+ 
+-	if (rbd_dev->header.features & RBD_FEATURE_DATA_POOL) {
+-		ret = rbd_dev_v2_data_pool(rbd_dev);
++	if (header->features & RBD_FEATURE_DATA_POOL) {
++		ret = rbd_dev_v2_data_pool(rbd_dev, &header->data_pool_id);
+ 		if (ret)
+-			goto out_err;
++			return ret;
+ 	}
+ 
+-	rbd_init_layout(rbd_dev);
+ 	return 0;
+-
+-out_err:
+-	rbd_dev->header.features = 0;
+-	kfree(rbd_dev->header.object_prefix);
+-	rbd_dev->header.object_prefix = NULL;
+-	return ret;
+ }
+ 
+ /*
+@@ -6982,13 +6917,15 @@ static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth)
+ 	if (!depth)
+ 		down_write(&rbd_dev->header_rwsem);
+ 
+-	ret = rbd_dev_header_info(rbd_dev);
++	ret = rbd_dev_header_info(rbd_dev, &rbd_dev->header, true);
+ 	if (ret) {
+ 		if (ret == -ENOENT && !need_watch)
+ 			rbd_print_dne(rbd_dev, false);
+ 		goto err_out_probe;
+ 	}
+ 
++	rbd_init_layout(rbd_dev);
++
+ 	/*
+ 	 * If this image is the one being mapped, we have pool name and
+ 	 * id, image name and id, and snap name - need to fill snap id.
+@@ -7017,7 +6954,7 @@ static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth)
+ 	}
+ 
+ 	if (rbd_dev->header.features & RBD_FEATURE_LAYERING) {
+-		ret = rbd_dev_v2_parent_info(rbd_dev);
++		ret = rbd_dev_setup_parent(rbd_dev);
+ 		if (ret)
+ 			goto err_out_probe;
+ 	}
+@@ -7043,6 +6980,107 @@ err_out_format:
+ 	return ret;
+ }
+ 
++static void rbd_dev_update_header(struct rbd_device *rbd_dev,
++				  struct rbd_image_header *header)
++{
++	rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
++	rbd_assert(rbd_dev->header.object_prefix); /* !first_time */
++
++	if (rbd_dev->header.image_size != header->image_size) {
++		rbd_dev->header.image_size = header->image_size;
++
++		if (!rbd_is_snap(rbd_dev)) {
++			rbd_dev->mapping.size = header->image_size;
++			rbd_dev_update_size(rbd_dev);
++		}
++	}
++
++	ceph_put_snap_context(rbd_dev->header.snapc);
++	rbd_dev->header.snapc = header->snapc;
++	header->snapc = NULL;
++
++	if (rbd_dev->image_format == 1) {
++		kfree(rbd_dev->header.snap_names);
++		rbd_dev->header.snap_names = header->snap_names;
++		header->snap_names = NULL;
++
++		kfree(rbd_dev->header.snap_sizes);
++		rbd_dev->header.snap_sizes = header->snap_sizes;
++		header->snap_sizes = NULL;
++	}
++}
++
++static void rbd_dev_update_parent(struct rbd_device *rbd_dev,
++				  struct parent_image_info *pii)
++{
++	if (pii->pool_id == CEPH_NOPOOL || !pii->has_overlap) {
++		/*
++		 * Either the parent never existed, or we have
++		 * record of it but the image got flattened so it no
++		 * longer has a parent.  When the parent of a
++		 * layered image disappears we immediately set the
++		 * overlap to 0.  The effect of this is that all new
++		 * requests will be treated as if the image had no
++		 * parent.
++		 *
++		 * If !pii.has_overlap, the parent image spec is not
++		 * applicable.  It's there to avoid duplication in each
++		 * snapshot record.
++		 */
++		if (rbd_dev->parent_overlap) {
++			rbd_dev->parent_overlap = 0;
++			rbd_dev_parent_put(rbd_dev);
++			pr_info("%s: clone has been flattened\n",
++				rbd_dev->disk->disk_name);
++		}
++	} else {
++		rbd_assert(rbd_dev->parent_spec);
++
++		/*
++		 * Update the parent overlap.  If it became zero, issue
++		 * a warning as we will proceed as if there is no parent.
++		 */
++		if (!pii->overlap && rbd_dev->parent_overlap)
++			rbd_warn(rbd_dev,
++				 "clone has become standalone (overlap 0)");
++		rbd_dev->parent_overlap = pii->overlap;
++	}
++}
++
++static int rbd_dev_refresh(struct rbd_device *rbd_dev)
++{
++	struct rbd_image_header	header = { 0 };
++	struct parent_image_info pii = { 0 };
++	int ret;
++
++	dout("%s rbd_dev %p\n", __func__, rbd_dev);
++
++	ret = rbd_dev_header_info(rbd_dev, &header, false);
++	if (ret)
++		goto out;
++
++	/*
++	 * If there is a parent, see if it has disappeared due to the
++	 * mapped image getting flattened.
++	 */
++	if (rbd_dev->parent) {
++		ret = rbd_dev_v2_parent_info(rbd_dev, &pii);
++		if (ret)
++			goto out;
++	}
++
++	down_write(&rbd_dev->header_rwsem);
++	rbd_dev_update_header(rbd_dev, &header);
++	if (rbd_dev->parent)
++		rbd_dev_update_parent(rbd_dev, &pii);
++	up_write(&rbd_dev->header_rwsem);
++
++out:
++	rbd_parent_info_cleanup(&pii);
++	rbd_image_header_cleanup(&header);
++	return ret;
++}
++
+ static ssize_t do_rbd_add(const char *buf, size_t count)
+ {
+ 	struct rbd_device *rbd_dev = NULL;
+diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c
+index 9766dbf607f97..27c5bae85adc2 100644
+--- a/drivers/bus/ti-sysc.c
++++ b/drivers/bus/ti-sysc.c
+@@ -38,6 +38,7 @@ enum sysc_soc {
+ 	SOC_2420,
+ 	SOC_2430,
+ 	SOC_3430,
++	SOC_AM35,
+ 	SOC_3630,
+ 	SOC_4430,
+ 	SOC_4460,
+@@ -1096,6 +1097,11 @@ static int sysc_enable_module(struct device *dev)
+ 	if (ddata->cfg.quirks & (SYSC_QUIRK_SWSUP_SIDLE |
+ 				 SYSC_QUIRK_SWSUP_SIDLE_ACT)) {
+ 		best_mode = SYSC_IDLE_NO;
++
++		/* Clear WAKEUP */
++		if (regbits->enwkup_shift >= 0 &&
++		    ddata->cfg.sysc_val & BIT(regbits->enwkup_shift))
++			reg &= ~BIT(regbits->enwkup_shift);
+ 	} else {
+ 		best_mode = fls(ddata->cfg.sidlemodes) - 1;
+ 		if (best_mode > SYSC_IDLE_MASK) {
+@@ -1223,6 +1229,13 @@ set_sidle:
+ 		}
+ 	}
+ 
++	if (ddata->cfg.quirks & SYSC_QUIRK_SWSUP_SIDLE_ACT) {
++		/* Set WAKEUP */
++		if (regbits->enwkup_shift >= 0 &&
++		    ddata->cfg.sysc_val & BIT(regbits->enwkup_shift))
++			reg |= BIT(regbits->enwkup_shift);
++	}
++
+ 	reg &= ~(SYSC_IDLE_MASK << regbits->sidle_shift);
+ 	reg |= best_mode << regbits->sidle_shift;
+ 	if (regbits->autoidle_shift >= 0 &&
+@@ -1517,16 +1530,16 @@ struct sysc_revision_quirk {
+ static const struct sysc_revision_quirk sysc_revision_quirks[] = {
+ 	/* These drivers need to be fixed to not use pm_runtime_irq_safe() */
+ 	SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x00000046, 0xffffffff,
+-		   SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_LEGACY_IDLE),
++		   SYSC_QUIRK_SWSUP_SIDLE_ACT | SYSC_QUIRK_LEGACY_IDLE),
+ 	SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x00000052, 0xffffffff,
+-		   SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_LEGACY_IDLE),
++		   SYSC_QUIRK_SWSUP_SIDLE_ACT | SYSC_QUIRK_LEGACY_IDLE),
+ 	/* Uarts on omap4 and later */
+ 	SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x50411e03, 0xffff00ff,
+-		   SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_LEGACY_IDLE),
++		   SYSC_QUIRK_SWSUP_SIDLE_ACT | SYSC_QUIRK_LEGACY_IDLE),
+ 	SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x47422e03, 0xffffffff,
+-		   SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_LEGACY_IDLE),
++		   SYSC_QUIRK_SWSUP_SIDLE_ACT | SYSC_QUIRK_LEGACY_IDLE),
+ 	SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x47424e03, 0xffffffff,
+-		   SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_LEGACY_IDLE),
++		   SYSC_QUIRK_SWSUP_SIDLE_ACT | SYSC_QUIRK_LEGACY_IDLE),
+ 
+ 	/* Quirks that need to be set based on the module address */
+ 	SYSC_QUIRK("mcpdm", 0x40132000, 0, 0x10, -ENODEV, 0x50000800, 0xffffffff,
+@@ -1861,7 +1874,7 @@ static void sysc_pre_reset_quirk_dss(struct sysc *ddata)
+ 		dev_warn(ddata->dev, "%s: timed out %08x !+ %08x\n",
+ 			 __func__, val, irq_mask);
+ 
+-	if (sysc_soc->soc == SOC_3430) {
++	if (sysc_soc->soc == SOC_3430 || sysc_soc->soc == SOC_AM35) {
+ 		/* Clear DSS_SDI_CONTROL */
+ 		sysc_write(ddata, 0x44, 0);
+ 
+@@ -2149,8 +2162,7 @@ static int sysc_reset(struct sysc *ddata)
+ 	}
+ 
+ 	if (ddata->cfg.srst_udelay)
+-		usleep_range(ddata->cfg.srst_udelay,
+-			     ddata->cfg.srst_udelay * 2);
++		fsleep(ddata->cfg.srst_udelay);
+ 
+ 	if (ddata->post_reset_quirk)
+ 		ddata->post_reset_quirk(ddata);
+@@ -3024,6 +3036,7 @@ static void ti_sysc_idle(struct work_struct *work)
+ static const struct soc_device_attribute sysc_soc_match[] = {
+ 	SOC_FLAG("OMAP242*", SOC_2420),
+ 	SOC_FLAG("OMAP243*", SOC_2430),
++	SOC_FLAG("AM35*", SOC_AM35),
+ 	SOC_FLAG("OMAP3[45]*", SOC_3430),
+ 	SOC_FLAG("OMAP3[67]*", SOC_3630),
+ 	SOC_FLAG("OMAP443*", SOC_4430),
+@@ -3228,7 +3241,7 @@ static int sysc_check_active_timer(struct sysc *ddata)
+ 	 * can be dropped if we stop supporting old beagleboard revisions
+ 	 * A to B4 at some point.
+ 	 */
+-	if (sysc_soc->soc == SOC_3430)
++	if (sysc_soc->soc == SOC_3430 || sysc_soc->soc == SOC_AM35)
+ 		error = -ENXIO;
+ 	else
+ 		error = -EBUSY;
+diff --git a/drivers/char/agp/parisc-agp.c b/drivers/char/agp/parisc-agp.c
+index 514f9f287a781..c6f181702b9a7 100644
+--- a/drivers/char/agp/parisc-agp.c
++++ b/drivers/char/agp/parisc-agp.c
+@@ -394,8 +394,6 @@ find_quicksilver(struct device *dev, void *data)
+ static int __init
+ parisc_agp_init(void)
+ {
+-	extern struct sba_device *sba_list;
+-
+ 	int err = -1;
+ 	struct parisc_device *sba = NULL, *lba = NULL;
+ 	struct lba_device *lbadev = NULL;
+diff --git a/drivers/clk/clk-si521xx.c b/drivers/clk/clk-si521xx.c
+index 4eaf1b53f06bd..ef4ba467e747b 100644
+--- a/drivers/clk/clk-si521xx.c
++++ b/drivers/clk/clk-si521xx.c
+@@ -96,7 +96,7 @@ static int si521xx_regmap_i2c_write(void *context, unsigned int reg,
+ 				    unsigned int val)
+ {
+ 	struct i2c_client *i2c = context;
+-	const u8 data[3] = { reg, 1, val };
++	const u8 data[2] = { reg, val };
+ 	const int count = ARRAY_SIZE(data);
+ 	int ret;
+ 
+@@ -146,7 +146,7 @@ static int si521xx_regmap_i2c_read(void *context, unsigned int reg,
+ static const struct regmap_config si521xx_regmap_config = {
+ 	.reg_bits = 8,
+ 	.val_bits = 8,
+-	.cache_type = REGCACHE_NONE,
++	.cache_type = REGCACHE_FLAT,
+ 	.max_register = SI521XX_REG_DA,
+ 	.rd_table = &si521xx_readable_table,
+ 	.wr_table = &si521xx_writeable_table,
+@@ -281,9 +281,10 @@ static int si521xx_probe(struct i2c_client *client)
+ {
+ 	const u16 chip_info = (u16)(uintptr_t)device_get_match_data(&client->dev);
+ 	const struct clk_parent_data clk_parent_data = { .index = 0 };
+-	struct si521xx *si;
++	const u8 data[3] = { SI521XX_REG_BC, 1, 1 };
+ 	unsigned char name[6] = "DIFF0";
+ 	struct clk_init_data init = {};
++	struct si521xx *si;
+ 	int i, ret;
+ 
+ 	if (!chip_info)
+@@ -308,7 +309,7 @@ static int si521xx_probe(struct i2c_client *client)
+ 				     "Failed to allocate register map\n");
+ 
+ 	/* Always read back 1 Byte via I2C */
+-	ret = regmap_write(si->regmap, SI521XX_REG_BC, 1);
++	ret = i2c_master_send(client, data, ARRAY_SIZE(data));
+ 	if (ret < 0)
+ 		return ret;
+ 
+diff --git a/drivers/clk/sprd/ums512-clk.c b/drivers/clk/sprd/ums512-clk.c
+index fc25bdd85e4ea..f43bb10bd5ae2 100644
+--- a/drivers/clk/sprd/ums512-clk.c
++++ b/drivers/clk/sprd/ums512-clk.c
+@@ -800,7 +800,7 @@ static SPRD_MUX_CLK_DATA(uart1_clk, "uart1-clk", uart_parents,
+ 			 0x250, 0, 3, UMS512_MUX_FLAG);
+ 
+ static const struct clk_parent_data thm_parents[] = {
+-	{ .fw_name = "ext-32m" },
++	{ .fw_name = "ext-32k" },
+ 	{ .hw = &clk_250k.hw  },
+ };
+ static SPRD_MUX_CLK_DATA(thm0_clk, "thm0-clk", thm_parents,
+diff --git a/drivers/clk/tegra/clk-bpmp.c b/drivers/clk/tegra/clk-bpmp.c
+index a9f3fb448de62..7bfba0afd7783 100644
+--- a/drivers/clk/tegra/clk-bpmp.c
++++ b/drivers/clk/tegra/clk-bpmp.c
+@@ -159,7 +159,7 @@ static unsigned long tegra_bpmp_clk_recalc_rate(struct clk_hw *hw,
+ 
+ 	err = tegra_bpmp_clk_transfer(clk->bpmp, &msg);
+ 	if (err < 0)
+-		return err;
++		return 0;
+ 
+ 	return response.rate;
+ }
+diff --git a/drivers/cxl/core/mbox.c b/drivers/cxl/core/mbox.c
+index ca60bb8114f22..4df4f614f490e 100644
+--- a/drivers/cxl/core/mbox.c
++++ b/drivers/cxl/core/mbox.c
+@@ -715,24 +715,25 @@ static void cxl_walk_cel(struct cxl_memdev_state *mds, size_t size, u8 *cel)
+ 	for (i = 0; i < cel_entries; i++) {
+ 		u16 opcode = le16_to_cpu(cel_entry[i].opcode);
+ 		struct cxl_mem_command *cmd = cxl_mem_find_command(opcode);
++		int enabled = 0;
+ 
+-		if (!cmd && (!cxl_is_poison_command(opcode) ||
+-			     !cxl_is_security_command(opcode))) {
+-			dev_dbg(dev,
+-				"Opcode 0x%04x unsupported by driver\n", opcode);
+-			continue;
+-		}
+-
+-		if (cmd)
++		if (cmd) {
+ 			set_bit(cmd->info.id, mds->enabled_cmds);
++			enabled++;
++		}
+ 
+-		if (cxl_is_poison_command(opcode))
++		if (cxl_is_poison_command(opcode)) {
+ 			cxl_set_poison_cmd_enabled(&mds->poison, opcode);
++			enabled++;
++		}
+ 
+-		if (cxl_is_security_command(opcode))
++		if (cxl_is_security_command(opcode)) {
+ 			cxl_set_security_cmd_enabled(&mds->security, opcode);
++			enabled++;
++		}
+ 
+-		dev_dbg(dev, "Opcode 0x%04x enabled\n", opcode);
++		dev_dbg(dev, "Opcode 0x%04x %s\n", opcode,
++			enabled ? "enabled" : "unsupported by driver");
+ 	}
+ }
+ 
+diff --git a/drivers/cxl/core/port.c b/drivers/cxl/core/port.c
+index 724be8448eb4a..7ca01a834e188 100644
+--- a/drivers/cxl/core/port.c
++++ b/drivers/cxl/core/port.c
+@@ -1,5 +1,6 @@
+ // SPDX-License-Identifier: GPL-2.0-only
+ /* Copyright(c) 2020 Intel Corporation. All rights reserved. */
++#include <linux/platform_device.h>
+ #include <linux/memregion.h>
+ #include <linux/workqueue.h>
+ #include <linux/debugfs.h>
+@@ -706,16 +707,20 @@ static int cxl_setup_comp_regs(struct device *dev, struct cxl_register_map *map,
+ 	return cxl_setup_regs(map);
+ }
+ 
+-static inline int cxl_port_setup_regs(struct cxl_port *port,
+-				      resource_size_t component_reg_phys)
++static int cxl_port_setup_regs(struct cxl_port *port,
++			resource_size_t component_reg_phys)
+ {
++	if (dev_is_platform(port->uport_dev))
++		return 0;
+ 	return cxl_setup_comp_regs(&port->dev, &port->comp_map,
+ 				   component_reg_phys);
+ }
+ 
+-static inline int cxl_dport_setup_regs(struct cxl_dport *dport,
+-				       resource_size_t component_reg_phys)
++static int cxl_dport_setup_regs(struct cxl_dport *dport,
++				resource_size_t component_reg_phys)
+ {
++	if (dev_is_platform(dport->dport_dev))
++		return 0;
+ 	return cxl_setup_comp_regs(dport->dport_dev, &dport->comp_map,
+ 				   component_reg_phys);
+ }
+diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c
+index e115ba382e044..b4c6a749406f1 100644
+--- a/drivers/cxl/core/region.c
++++ b/drivers/cxl/core/region.c
+@@ -717,13 +717,35 @@ static int match_free_decoder(struct device *dev, void *data)
+ 	return 0;
+ }
+ 
++static int match_auto_decoder(struct device *dev, void *data)
++{
++	struct cxl_region_params *p = data;
++	struct cxl_decoder *cxld;
++	struct range *r;
++
++	if (!is_switch_decoder(dev))
++		return 0;
++
++	cxld = to_cxl_decoder(dev);
++	r = &cxld->hpa_range;
++
++	if (p->res && p->res->start == r->start && p->res->end == r->end)
++		return 1;
++
++	return 0;
++}
++
+ static struct cxl_decoder *cxl_region_find_decoder(struct cxl_port *port,
+ 						   struct cxl_region *cxlr)
+ {
+ 	struct device *dev;
+ 	int id = 0;
+ 
+-	dev = device_find_child(&port->dev, &id, match_free_decoder);
++	if (test_bit(CXL_REGION_F_AUTO, &cxlr->flags))
++		dev = device_find_child(&port->dev, &cxlr->params,
++					match_auto_decoder);
++	else
++		dev = device_find_child(&port->dev, &id, match_free_decoder);
+ 	if (!dev)
+ 		return NULL;
+ 	/*
+diff --git a/drivers/cxl/pci.c b/drivers/cxl/pci.c
+index 1cb1494c28fe8..2323169b6e5fe 100644
+--- a/drivers/cxl/pci.c
++++ b/drivers/cxl/pci.c
+@@ -541,9 +541,9 @@ static int cxl_pci_ras_unmask(struct pci_dev *pdev)
+ 		return 0;
+ 	}
+ 
+-	/* BIOS has CXL error control */
+-	if (!host_bridge->native_cxl_error)
+-		return -ENXIO;
++	/* BIOS has PCIe AER error control */
++	if (!host_bridge->native_aer)
++		return 0;
+ 
+ 	rc = pcie_capability_read_word(pdev, PCI_EXP_DEVCTL, &cap);
+ 	if (rc)
+diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c
+index 26db5b8dfc1ef..749868b9e80d6 100644
+--- a/drivers/firewire/sbp2.c
++++ b/drivers/firewire/sbp2.c
+@@ -81,7 +81,8 @@ MODULE_PARM_DESC(exclusive_login, "Exclusive login to sbp2 device "
+  *
+  * - power condition
+  *   Set the power condition field in the START STOP UNIT commands sent by
+- *   sd_mod on suspend, resume, and shutdown (if manage_start_stop is on).
++ *   sd_mod on suspend, resume, and shutdown (if manage_system_start_stop or
++ *   manage_runtime_start_stop is on).
+  *   Some disks need this to spin down or to resume properly.
+  *
+  * - override internal blacklist
+@@ -1517,8 +1518,10 @@ static int sbp2_scsi_slave_configure(struct scsi_device *sdev)
+ 
+ 	sdev->use_10_for_rw = 1;
+ 
+-	if (sbp2_param_exclusive_login)
+-		sdev->manage_start_stop = 1;
++	if (sbp2_param_exclusive_login) {
++		sdev->manage_system_start_stop = true;
++		sdev->manage_runtime_start_stop = true;
++	}
+ 
+ 	if (sdev->type == TYPE_ROM)
+ 		sdev->use_10_for_ms = 1;
+diff --git a/drivers/firmware/arm_ffa/driver.c b/drivers/firmware/arm_ffa/driver.c
+index 2109cd178ff70..121f4fc903cd5 100644
+--- a/drivers/firmware/arm_ffa/driver.c
++++ b/drivers/firmware/arm_ffa/driver.c
+@@ -397,6 +397,19 @@ static u32 ffa_get_num_pages_sg(struct scatterlist *sg)
+ 	return num_pages;
+ }
+ 
++static u8 ffa_memory_attributes_get(u32 func_id)
++{
++	/*
++	 * For the memory lend or donate operation, if the receiver is a PE or
++	 * a proxy endpoint, the owner/sender must not specify the attributes
++	 */
++	if (func_id == FFA_FN_NATIVE(MEM_LEND) ||
++	    func_id == FFA_MEM_LEND)
++		return 0;
++
++	return FFA_MEM_NORMAL | FFA_MEM_WRITE_BACK | FFA_MEM_INNER_SHAREABLE;
++}
++
+ static int
+ ffa_setup_and_transmit(u32 func_id, void *buffer, u32 max_fragsize,
+ 		       struct ffa_mem_ops_args *args)
+@@ -413,8 +426,7 @@ ffa_setup_and_transmit(u32 func_id, void *buffer, u32 max_fragsize,
+ 	mem_region->tag = args->tag;
+ 	mem_region->flags = args->flags;
+ 	mem_region->sender_id = drv_info->vm_id;
+-	mem_region->attributes = FFA_MEM_NORMAL | FFA_MEM_WRITE_BACK |
+-				 FFA_MEM_INNER_SHAREABLE;
++	mem_region->attributes = ffa_memory_attributes_get(func_id);
+ 	ep_mem_access = &mem_region->ep_mem_access[0];
+ 
+ 	for (idx = 0; idx < args->nattrs; idx++, ep_mem_access++) {
+diff --git a/drivers/firmware/arm_scmi/perf.c b/drivers/firmware/arm_scmi/perf.c
+index ecf5c4de851b7..431bda9165c3d 100644
+--- a/drivers/firmware/arm_scmi/perf.c
++++ b/drivers/firmware/arm_scmi/perf.c
+@@ -139,7 +139,7 @@ struct perf_dom_info {
+ 
+ struct scmi_perf_info {
+ 	u32 version;
+-	int num_domains;
++	u16 num_domains;
+ 	enum scmi_power_scale power_scale;
+ 	u64 stats_addr;
+ 	u32 stats_size;
+@@ -356,11 +356,26 @@ static int scmi_perf_mb_limits_set(const struct scmi_protocol_handle *ph,
+ 	return ret;
+ }
+ 
++static inline struct perf_dom_info *
++scmi_perf_domain_lookup(const struct scmi_protocol_handle *ph, u32 domain)
++{
++	struct scmi_perf_info *pi = ph->get_priv(ph);
++
++	if (domain >= pi->num_domains)
++		return ERR_PTR(-EINVAL);
++
++	return pi->dom_info + domain;
++}
++
+ static int scmi_perf_limits_set(const struct scmi_protocol_handle *ph,
+ 				u32 domain, u32 max_perf, u32 min_perf)
+ {
+ 	struct scmi_perf_info *pi = ph->get_priv(ph);
+-	struct perf_dom_info *dom = pi->dom_info + domain;
++	struct perf_dom_info *dom;
++
++	dom = scmi_perf_domain_lookup(ph, domain);
++	if (IS_ERR(dom))
++		return PTR_ERR(dom);
+ 
+ 	if (PROTOCOL_REV_MAJOR(pi->version) >= 0x3 && !max_perf && !min_perf)
+ 		return -EINVAL;
+@@ -408,8 +423,11 @@ static int scmi_perf_mb_limits_get(const struct scmi_protocol_handle *ph,
+ static int scmi_perf_limits_get(const struct scmi_protocol_handle *ph,
+ 				u32 domain, u32 *max_perf, u32 *min_perf)
+ {
+-	struct scmi_perf_info *pi = ph->get_priv(ph);
+-	struct perf_dom_info *dom = pi->dom_info + domain;
++	struct perf_dom_info *dom;
++
++	dom = scmi_perf_domain_lookup(ph, domain);
++	if (IS_ERR(dom))
++		return PTR_ERR(dom);
+ 
+ 	if (dom->fc_info && dom->fc_info[PERF_FC_LIMIT].get_addr) {
+ 		struct scmi_fc_info *fci = &dom->fc_info[PERF_FC_LIMIT];
+@@ -449,8 +467,11 @@ static int scmi_perf_mb_level_set(const struct scmi_protocol_handle *ph,
+ static int scmi_perf_level_set(const struct scmi_protocol_handle *ph,
+ 			       u32 domain, u32 level, bool poll)
+ {
+-	struct scmi_perf_info *pi = ph->get_priv(ph);
+-	struct perf_dom_info *dom = pi->dom_info + domain;
++	struct perf_dom_info *dom;
++
++	dom = scmi_perf_domain_lookup(ph, domain);
++	if (IS_ERR(dom))
++		return PTR_ERR(dom);
+ 
+ 	if (dom->fc_info && dom->fc_info[PERF_FC_LEVEL].set_addr) {
+ 		struct scmi_fc_info *fci = &dom->fc_info[PERF_FC_LEVEL];
+@@ -490,8 +511,11 @@ static int scmi_perf_mb_level_get(const struct scmi_protocol_handle *ph,
+ static int scmi_perf_level_get(const struct scmi_protocol_handle *ph,
+ 			       u32 domain, u32 *level, bool poll)
+ {
+-	struct scmi_perf_info *pi = ph->get_priv(ph);
+-	struct perf_dom_info *dom = pi->dom_info + domain;
++	struct perf_dom_info *dom;
++
++	dom = scmi_perf_domain_lookup(ph, domain);
++	if (IS_ERR(dom))
++		return PTR_ERR(dom);
+ 
+ 	if (dom->fc_info && dom->fc_info[PERF_FC_LEVEL].get_addr) {
+ 		*level = ioread32(dom->fc_info[PERF_FC_LEVEL].get_addr);
+@@ -574,13 +598,14 @@ static int scmi_dvfs_device_opps_add(const struct scmi_protocol_handle *ph,
+ 	unsigned long freq;
+ 	struct scmi_opp *opp;
+ 	struct perf_dom_info *dom;
+-	struct scmi_perf_info *pi = ph->get_priv(ph);
+ 
+ 	domain = scmi_dev_domain_id(dev);
+ 	if (domain < 0)
+-		return domain;
++		return -EINVAL;
+ 
+-	dom = pi->dom_info + domain;
++	dom = scmi_perf_domain_lookup(ph, domain);
++	if (IS_ERR(dom))
++		return PTR_ERR(dom);
+ 
+ 	for (opp = dom->opp, idx = 0; idx < dom->opp_count; idx++, opp++) {
+ 		freq = opp->perf * dom->mult_factor;
+@@ -603,14 +628,17 @@ static int
+ scmi_dvfs_transition_latency_get(const struct scmi_protocol_handle *ph,
+ 				 struct device *dev)
+ {
++	int domain;
+ 	struct perf_dom_info *dom;
+-	struct scmi_perf_info *pi = ph->get_priv(ph);
+-	int domain = scmi_dev_domain_id(dev);
+ 
++	domain = scmi_dev_domain_id(dev);
+ 	if (domain < 0)
+-		return domain;
++		return -EINVAL;
++
++	dom = scmi_perf_domain_lookup(ph, domain);
++	if (IS_ERR(dom))
++		return PTR_ERR(dom);
+ 
+-	dom = pi->dom_info + domain;
+ 	/* uS to nS */
+ 	return dom->opp[dom->opp_count - 1].trans_latency_us * 1000;
+ }
+@@ -618,8 +646,11 @@ scmi_dvfs_transition_latency_get(const struct scmi_protocol_handle *ph,
+ static int scmi_dvfs_freq_set(const struct scmi_protocol_handle *ph, u32 domain,
+ 			      unsigned long freq, bool poll)
+ {
+-	struct scmi_perf_info *pi = ph->get_priv(ph);
+-	struct perf_dom_info *dom = pi->dom_info + domain;
++	struct perf_dom_info *dom;
++
++	dom = scmi_perf_domain_lookup(ph, domain);
++	if (IS_ERR(dom))
++		return PTR_ERR(dom);
+ 
+ 	return scmi_perf_level_set(ph, domain, freq / dom->mult_factor, poll);
+ }
+@@ -630,11 +661,14 @@ static int scmi_dvfs_freq_get(const struct scmi_protocol_handle *ph, u32 domain,
+ 	int ret;
+ 	u32 level;
+ 	struct scmi_perf_info *pi = ph->get_priv(ph);
+-	struct perf_dom_info *dom = pi->dom_info + domain;
+ 
+ 	ret = scmi_perf_level_get(ph, domain, &level, poll);
+-	if (!ret)
++	if (!ret) {
++		struct perf_dom_info *dom = pi->dom_info + domain;
++
++		/* Note domain is validated implicitly by scmi_perf_level_get */
+ 		*freq = level * dom->mult_factor;
++	}
+ 
+ 	return ret;
+ }
+@@ -643,15 +677,14 @@ static int scmi_dvfs_est_power_get(const struct scmi_protocol_handle *ph,
+ 				   u32 domain, unsigned long *freq,
+ 				   unsigned long *power)
+ {
+-	struct scmi_perf_info *pi = ph->get_priv(ph);
+ 	struct perf_dom_info *dom;
+ 	unsigned long opp_freq;
+ 	int idx, ret = -EINVAL;
+ 	struct scmi_opp *opp;
+ 
+-	dom = pi->dom_info + domain;
+-	if (!dom)
+-		return -EIO;
++	dom = scmi_perf_domain_lookup(ph, domain);
++	if (IS_ERR(dom))
++		return PTR_ERR(dom);
+ 
+ 	for (opp = dom->opp, idx = 0; idx < dom->opp_count; idx++, opp++) {
+ 		opp_freq = opp->perf * dom->mult_factor;
+@@ -670,10 +703,16 @@ static int scmi_dvfs_est_power_get(const struct scmi_protocol_handle *ph,
+ static bool scmi_fast_switch_possible(const struct scmi_protocol_handle *ph,
+ 				      struct device *dev)
+ {
++	int domain;
+ 	struct perf_dom_info *dom;
+-	struct scmi_perf_info *pi = ph->get_priv(ph);
+ 
+-	dom = pi->dom_info + scmi_dev_domain_id(dev);
++	domain = scmi_dev_domain_id(dev);
++	if (domain < 0)
++		return false;
++
++	dom = scmi_perf_domain_lookup(ph, domain);
++	if (IS_ERR(dom))
++		return false;
+ 
+ 	return dom->fc_info && dom->fc_info[PERF_FC_LEVEL].set_addr;
+ }
+@@ -819,6 +858,8 @@ static int scmi_perf_protocol_init(const struct scmi_protocol_handle *ph)
+ 	if (!pinfo)
+ 		return -ENOMEM;
+ 
++	pinfo->version = version;
++
+ 	ret = scmi_perf_attributes_get(ph, pinfo);
+ 	if (ret)
+ 		return ret;
+@@ -838,8 +879,6 @@ static int scmi_perf_protocol_init(const struct scmi_protocol_handle *ph)
+ 			scmi_perf_domain_init_fc(ph, domain, &dom->fc_info);
+ 	}
+ 
+-	pinfo->version = version;
+-
+ 	return ph->set_priv(ph, pinfo);
+ }
+ 
+diff --git a/drivers/firmware/cirrus/cs_dsp.c b/drivers/firmware/cirrus/cs_dsp.c
+index 49b70c70dc696..79d4254d1f9bc 100644
+--- a/drivers/firmware/cirrus/cs_dsp.c
++++ b/drivers/firmware/cirrus/cs_dsp.c
+@@ -1863,15 +1863,15 @@ static int cs_dsp_adsp2_setup_algs(struct cs_dsp *dsp)
+ 		return PTR_ERR(adsp2_alg);
+ 
+ 	for (i = 0; i < n_algs; i++) {
+-		cs_dsp_info(dsp,
+-			    "%d: ID %x v%d.%d.%d XM@%x YM@%x ZM@%x\n",
+-			    i, be32_to_cpu(adsp2_alg[i].alg.id),
+-			    (be32_to_cpu(adsp2_alg[i].alg.ver) & 0xff0000) >> 16,
+-			    (be32_to_cpu(adsp2_alg[i].alg.ver) & 0xff00) >> 8,
+-			    be32_to_cpu(adsp2_alg[i].alg.ver) & 0xff,
+-			    be32_to_cpu(adsp2_alg[i].xm),
+-			    be32_to_cpu(adsp2_alg[i].ym),
+-			    be32_to_cpu(adsp2_alg[i].zm));
++		cs_dsp_dbg(dsp,
++			   "%d: ID %x v%d.%d.%d XM@%x YM@%x ZM@%x\n",
++			   i, be32_to_cpu(adsp2_alg[i].alg.id),
++			   (be32_to_cpu(adsp2_alg[i].alg.ver) & 0xff0000) >> 16,
++			   (be32_to_cpu(adsp2_alg[i].alg.ver) & 0xff00) >> 8,
++			   be32_to_cpu(adsp2_alg[i].alg.ver) & 0xff,
++			   be32_to_cpu(adsp2_alg[i].xm),
++			   be32_to_cpu(adsp2_alg[i].ym),
++			   be32_to_cpu(adsp2_alg[i].zm));
+ 
+ 		alg_region = cs_dsp_create_region(dsp, WMFW_ADSP2_XM,
+ 						  adsp2_alg[i].alg.id,
+@@ -1996,14 +1996,14 @@ static int cs_dsp_halo_setup_algs(struct cs_dsp *dsp)
+ 		return PTR_ERR(halo_alg);
+ 
+ 	for (i = 0; i < n_algs; i++) {
+-		cs_dsp_info(dsp,
+-			    "%d: ID %x v%d.%d.%d XM@%x YM@%x\n",
+-			    i, be32_to_cpu(halo_alg[i].alg.id),
+-			    (be32_to_cpu(halo_alg[i].alg.ver) & 0xff0000) >> 16,
+-			    (be32_to_cpu(halo_alg[i].alg.ver) & 0xff00) >> 8,
+-			    be32_to_cpu(halo_alg[i].alg.ver) & 0xff,
+-			    be32_to_cpu(halo_alg[i].xm_base),
+-			    be32_to_cpu(halo_alg[i].ym_base));
++		cs_dsp_dbg(dsp,
++			   "%d: ID %x v%d.%d.%d XM@%x YM@%x\n",
++			   i, be32_to_cpu(halo_alg[i].alg.id),
++			   (be32_to_cpu(halo_alg[i].alg.ver) & 0xff0000) >> 16,
++			   (be32_to_cpu(halo_alg[i].alg.ver) & 0xff00) >> 8,
++			   be32_to_cpu(halo_alg[i].alg.ver) & 0xff,
++			   be32_to_cpu(halo_alg[i].xm_base),
++			   be32_to_cpu(halo_alg[i].ym_base));
+ 
+ 		ret = cs_dsp_halo_create_regions(dsp, halo_alg[i].alg.id,
+ 						 halo_alg[i].alg.ver,
+diff --git a/drivers/firmware/imx/imx-dsp.c b/drivers/firmware/imx/imx-dsp.c
+index a6c06d7476c32..1f410809d3ee4 100644
+--- a/drivers/firmware/imx/imx-dsp.c
++++ b/drivers/firmware/imx/imx-dsp.c
+@@ -115,6 +115,7 @@ static int imx_dsp_setup_channels(struct imx_dsp_ipc *dsp_ipc)
+ 		dsp_chan->idx = i % 2;
+ 		dsp_chan->ch = mbox_request_channel_byname(cl, chan_name);
+ 		if (IS_ERR(dsp_chan->ch)) {
++			kfree(dsp_chan->name);
+ 			ret = PTR_ERR(dsp_chan->ch);
+ 			if (ret != -EPROBE_DEFER)
+ 				dev_err(dev, "Failed to request mbox chan %s ret %d\n",
+diff --git a/drivers/gpio/gpio-pmic-eic-sprd.c b/drivers/gpio/gpio-pmic-eic-sprd.c
+index c3e4d90f6b183..36f6cfc224c2d 100644
+--- a/drivers/gpio/gpio-pmic-eic-sprd.c
++++ b/drivers/gpio/gpio-pmic-eic-sprd.c
+@@ -352,6 +352,7 @@ static int sprd_pmic_eic_probe(struct platform_device *pdev)
+ 	pmic_eic->chip.set_config = sprd_pmic_eic_set_config;
+ 	pmic_eic->chip.set = sprd_pmic_eic_set;
+ 	pmic_eic->chip.get = sprd_pmic_eic_get;
++	pmic_eic->chip.can_sleep = true;
+ 
+ 	irq = &pmic_eic->chip.irq;
+ 	gpio_irq_chip_set_chip(irq, &pmic_eic_irq_chip);
+diff --git a/drivers/gpio/gpio-tb10x.c b/drivers/gpio/gpio-tb10x.c
+index 78f8790168ae1..f96d260a4a19d 100644
+--- a/drivers/gpio/gpio-tb10x.c
++++ b/drivers/gpio/gpio-tb10x.c
+@@ -195,7 +195,7 @@ static int tb10x_gpio_probe(struct platform_device *pdev)
+ 				handle_edge_irq, IRQ_NOREQUEST, IRQ_NOPROBE,
+ 				IRQ_GC_INIT_MASK_CACHE);
+ 		if (ret)
+-			return ret;
++			goto err_remove_domain;
+ 
+ 		gc = tb10x_gpio->domain->gc->gc[0];
+ 		gc->reg_base                         = tb10x_gpio->base;
+@@ -209,6 +209,10 @@ static int tb10x_gpio_probe(struct platform_device *pdev)
+ 	}
+ 
+ 	return 0;
++
++err_remove_domain:
++	irq_domain_remove(tb10x_gpio->domain);
++	return ret;
+ }
+ 
+ static int tb10x_gpio_remove(struct platform_device *pdev)
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+index b4fcad0e62f7e..a7c8beff1647c 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+@@ -492,7 +492,7 @@ void amdgpu_amdkfd_get_cu_info(struct amdgpu_device *adev, struct kfd_cu_info *c
+ 	cu_info->cu_active_number = acu_info.number;
+ 	cu_info->cu_ao_mask = acu_info.ao_cu_mask;
+ 	memcpy(&cu_info->cu_bitmap[0], &acu_info.bitmap[0],
+-	       sizeof(acu_info.bitmap));
++	       sizeof(cu_info->cu_bitmap));
+ 	cu_info->num_shader_engines = adev->gfx.config.max_shader_engines;
+ 	cu_info->num_shader_arrays_per_engine = adev->gfx.config.max_sh_per_se;
+ 	cu_info->num_cu_per_sh = adev->gfx.config.max_cu_per_sh;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
+index a4ff515ce8966..59ba03d387fcc 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
+@@ -43,6 +43,7 @@
+ #define AMDGPU_GFX_LBPW_DISABLED_MODE		0x00000008L
+ 
+ #define AMDGPU_MAX_GC_INSTANCES		8
++#define KGD_MAX_QUEUES			128
+ 
+ #define AMDGPU_MAX_GFX_QUEUES KGD_MAX_QUEUES
+ #define AMDGPU_MAX_COMPUTE_QUEUES KGD_MAX_QUEUES
+@@ -254,7 +255,7 @@ struct amdgpu_cu_info {
+ 	uint32_t number;
+ 	uint32_t ao_cu_mask;
+ 	uint32_t ao_cu_bitmap[4][4];
+-	uint32_t bitmap[4][4];
++	uint32_t bitmap[AMDGPU_MAX_GC_INSTANCES][4][4];
+ };
+ 
+ struct amdgpu_gfx_ras {
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+index d4ca19ba5a289..b9fc7e2db5e59 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+@@ -839,7 +839,7 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
+ 		memcpy(&dev_info->cu_ao_bitmap[0], &adev->gfx.cu_info.ao_cu_bitmap[0],
+ 		       sizeof(adev->gfx.cu_info.ao_cu_bitmap));
+ 		memcpy(&dev_info->cu_bitmap[0], &adev->gfx.cu_info.bitmap[0],
+-		       sizeof(adev->gfx.cu_info.bitmap));
++		       sizeof(dev_info->cu_bitmap));
+ 		dev_info->vram_type = adev->gmc.vram_type;
+ 		dev_info->vram_bit_width = adev->gmc.vram_width;
+ 		dev_info->vce_harvest_config = adev->vce.harvest_config;
+@@ -940,12 +940,17 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
+ 			struct atom_context *atom_context;
+ 
+ 			atom_context = adev->mode_info.atom_context;
+-			memcpy(vbios_info.name, atom_context->name, sizeof(atom_context->name));
+-			memcpy(vbios_info.vbios_pn, atom_context->vbios_pn, sizeof(atom_context->vbios_pn));
+-			vbios_info.version = atom_context->version;
+-			memcpy(vbios_info.vbios_ver_str, atom_context->vbios_ver_str,
+-						sizeof(atom_context->vbios_ver_str));
+-			memcpy(vbios_info.date, atom_context->date, sizeof(atom_context->date));
++			if (atom_context) {
++				memcpy(vbios_info.name, atom_context->name,
++				       sizeof(atom_context->name));
++				memcpy(vbios_info.vbios_pn, atom_context->vbios_pn,
++				       sizeof(atom_context->vbios_pn));
++				vbios_info.version = atom_context->version;
++				memcpy(vbios_info.vbios_ver_str, atom_context->vbios_ver_str,
++				       sizeof(atom_context->vbios_ver_str));
++				memcpy(vbios_info.date, atom_context->date,
++				       sizeof(atom_context->date));
++			}
+ 
+ 			return copy_to_user(out, &vbios_info,
+ 						min((size_t)size, sizeof(vbios_info))) ? -EFAULT : 0;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+index 8aaa427f8c0f6..7d5019a884024 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+@@ -1061,7 +1061,8 @@ int amdgpu_ras_query_error_status(struct amdgpu_device *adev,
+ 	info->ce_count = obj->err_data.ce_count;
+ 
+ 	if (err_data.ce_count) {
+-		if (adev->smuio.funcs &&
++		if (!adev->aid_mask &&
++		    adev->smuio.funcs &&
+ 		    adev->smuio.funcs->get_socket_id &&
+ 		    adev->smuio.funcs->get_die_id) {
+ 			dev_info(adev->dev, "socket: %d, die: %d "
+@@ -1081,7 +1082,8 @@ int amdgpu_ras_query_error_status(struct amdgpu_device *adev,
+ 		}
+ 	}
+ 	if (err_data.ue_count) {
+-		if (adev->smuio.funcs &&
++		if (!adev->aid_mask &&
++		    adev->smuio.funcs &&
+ 		    adev->smuio.funcs->get_socket_id &&
+ 		    adev->smuio.funcs->get_die_id) {
+ 			dev_info(adev->dev, "socket: %d, die: %d "
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring_mux.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring_mux.h
+index b22d4fb2a8470..d3186b570b82e 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring_mux.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring_mux.h
+@@ -56,6 +56,15 @@ enum amdgpu_ring_mux_offset_type {
+ 	AMDGPU_MUX_OFFSET_TYPE_CE,
+ };
+ 
++enum ib_complete_status {
++	/* IB not started/reset value, default value. */
++	IB_COMPLETION_STATUS_DEFAULT = 0,
++	/* IB preempted, started but not completed. */
++	IB_COMPLETION_STATUS_PREEMPTED = 1,
++	/* IB completed. */
++	IB_COMPLETION_STATUS_COMPLETED = 2,
++};
++
+ struct amdgpu_ring_mux {
+ 	struct amdgpu_ring      *real_ring;
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+index 44af8022b89fa..f743bf2c92877 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+@@ -9448,7 +9448,7 @@ static int gfx_v10_0_get_cu_info(struct amdgpu_device *adev,
+ 				gfx_v10_0_set_user_wgp_inactive_bitmap_per_sh(
+ 					adev, disable_masks[i * 2 + j]);
+ 			bitmap = gfx_v10_0_get_cu_active_bitmap_per_sh(adev);
+-			cu_info->bitmap[i][j] = bitmap;
++			cu_info->bitmap[0][i][j] = bitmap;
+ 
+ 			for (k = 0; k < adev->gfx.config.max_cu_per_sh; k++) {
+ 				if (bitmap & mask) {
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
+index 0451533ddde41..a82cba884c48f 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
+@@ -6394,7 +6394,7 @@ static int gfx_v11_0_get_cu_info(struct amdgpu_device *adev,
+ 			 *    SE6: {SH0,SH1} --> {bitmap[2][2], bitmap[2][3]}
+ 			 *    SE7: {SH0,SH1} --> {bitmap[3][2], bitmap[3][3]}
+ 			 */
+-			cu_info->bitmap[i % 4][j + (i / 4) * 2] = bitmap;
++			cu_info->bitmap[0][i % 4][j + (i / 4) * 2] = bitmap;
+ 
+ 			for (k = 0; k < adev->gfx.config.max_cu_per_sh; k++) {
+ 				if (bitmap & mask)
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
+index da6caff78c22b..34f9211b26793 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
+@@ -3577,7 +3577,7 @@ static void gfx_v6_0_get_cu_info(struct amdgpu_device *adev)
+ 				gfx_v6_0_set_user_cu_inactive_bitmap(
+ 					adev, disable_masks[i * 2 + j]);
+ 			bitmap = gfx_v6_0_get_cu_enabled(adev);
+-			cu_info->bitmap[i][j] = bitmap;
++			cu_info->bitmap[0][i][j] = bitmap;
+ 
+ 			for (k = 0; k < adev->gfx.config.max_cu_per_sh; k++) {
+ 				if (bitmap & mask) {
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+index 8c174c11eaee0..6feae2548e8ee 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+@@ -5122,7 +5122,7 @@ static void gfx_v7_0_get_cu_info(struct amdgpu_device *adev)
+ 				gfx_v7_0_set_user_cu_inactive_bitmap(
+ 					adev, disable_masks[i * 2 + j]);
+ 			bitmap = gfx_v7_0_get_cu_active_bitmap(adev);
+-			cu_info->bitmap[i][j] = bitmap;
++			cu_info->bitmap[0][i][j] = bitmap;
+ 
+ 			for (k = 0; k < adev->gfx.config.max_cu_per_sh; k ++) {
+ 				if (bitmap & mask) {
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+index 51c1745c83697..885ebd703260f 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+@@ -7121,7 +7121,7 @@ static void gfx_v8_0_get_cu_info(struct amdgpu_device *adev)
+ 				gfx_v8_0_set_user_cu_inactive_bitmap(
+ 					adev, disable_masks[i * 2 + j]);
+ 			bitmap = gfx_v8_0_get_cu_active_bitmap(adev);
+-			cu_info->bitmap[i][j] = bitmap;
++			cu_info->bitmap[0][i][j] = bitmap;
+ 
+ 			for (k = 0; k < adev->gfx.config.max_cu_per_sh; k ++) {
+ 				if (bitmap & mask) {
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+index 65577eca58f1c..602d74023b0b9 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+@@ -1497,7 +1497,7 @@ static void gfx_v9_0_init_always_on_cu_mask(struct amdgpu_device *adev)
+ 			amdgpu_gfx_select_se_sh(adev, i, j, 0xffffffff, 0);
+ 
+ 			for (k = 0; k < adev->gfx.config.max_cu_per_sh; k ++) {
+-				if (cu_info->bitmap[i][j] & mask) {
++				if (cu_info->bitmap[0][i][j] & mask) {
+ 					if (counter == pg_always_on_cu_num)
+ 						WREG32_SOC15(GC, 0, mmRLC_PG_ALWAYS_ON_CU_MASK, cu_bitmap);
+ 					if (counter < always_on_cu_num)
+@@ -5230,6 +5230,9 @@ static void gfx_v9_0_ring_patch_de_meta(struct amdgpu_ring *ring,
+ 		de_payload_cpu_addr = adev->virt.csa_cpu_addr + payload_offset;
+ 	}
+ 
++	((struct v9_de_ib_state *)de_payload_cpu_addr)->ib_completion_status =
++		IB_COMPLETION_STATUS_PREEMPTED;
++
+ 	if (offset + (payload_size >> 2) <= ring->buf_mask + 1) {
+ 		memcpy((void *)&ring->ring[offset], de_payload_cpu_addr, payload_size);
+ 	} else {
+@@ -7234,7 +7237,7 @@ static int gfx_v9_0_get_cu_info(struct amdgpu_device *adev,
+ 			 *    SE6,SH0 --> bitmap[2][1]
+ 			 *    SE7,SH0 --> bitmap[3][1]
+ 			 */
+-			cu_info->bitmap[i % 4][j + i / 4] = bitmap;
++			cu_info->bitmap[0][i % 4][j + i / 4] = bitmap;
+ 
+ 			for (k = 0; k < adev->gfx.config.max_cu_per_sh; k ++) {
+ 				if (bitmap & mask) {
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
+index 4f883b94f98ef..84a74a6c6b2de 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
+@@ -4228,7 +4228,7 @@ static void gfx_v9_4_3_set_gds_init(struct amdgpu_device *adev)
+ }
+ 
+ static void gfx_v9_4_3_set_user_cu_inactive_bitmap(struct amdgpu_device *adev,
+-						 u32 bitmap)
++						 u32 bitmap, int xcc_id)
+ {
+ 	u32 data;
+ 
+@@ -4238,15 +4238,15 @@ static void gfx_v9_4_3_set_user_cu_inactive_bitmap(struct amdgpu_device *adev,
+ 	data = bitmap << GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
+ 	data &= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
+ 
+-	WREG32_SOC15(GC, GET_INST(GC, 0), regGC_USER_SHADER_ARRAY_CONFIG, data);
++	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regGC_USER_SHADER_ARRAY_CONFIG, data);
+ }
+ 
+-static u32 gfx_v9_4_3_get_cu_active_bitmap(struct amdgpu_device *adev)
++static u32 gfx_v9_4_3_get_cu_active_bitmap(struct amdgpu_device *adev, int xcc_id)
+ {
+ 	u32 data, mask;
+ 
+-	data = RREG32_SOC15(GC, GET_INST(GC, 0), regCC_GC_SHADER_ARRAY_CONFIG);
+-	data |= RREG32_SOC15(GC, GET_INST(GC, 0), regGC_USER_SHADER_ARRAY_CONFIG);
++	data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCC_GC_SHADER_ARRAY_CONFIG);
++	data |= RREG32_SOC15(GC, GET_INST(GC, xcc_id), regGC_USER_SHADER_ARRAY_CONFIG);
+ 
+ 	data &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
+ 	data >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
+@@ -4259,7 +4259,7 @@ static u32 gfx_v9_4_3_get_cu_active_bitmap(struct amdgpu_device *adev)
+ static int gfx_v9_4_3_get_cu_info(struct amdgpu_device *adev,
+ 				 struct amdgpu_cu_info *cu_info)
+ {
+-	int i, j, k, counter, active_cu_number = 0;
++	int i, j, k, counter, xcc_id, active_cu_number = 0;
+ 	u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0;
+ 	unsigned disable_masks[4 * 4];
+ 
+@@ -4278,46 +4278,38 @@ static int gfx_v9_4_3_get_cu_info(struct amdgpu_device *adev,
+ 				    adev->gfx.config.max_sh_per_se);
+ 
+ 	mutex_lock(&adev->grbm_idx_mutex);
+-	for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
+-		for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
+-			mask = 1;
+-			ao_bitmap = 0;
+-			counter = 0;
+-			gfx_v9_4_3_xcc_select_se_sh(adev, i, j, 0xffffffff, 0);
+-			gfx_v9_4_3_set_user_cu_inactive_bitmap(
+-				adev, disable_masks[i * adev->gfx.config.max_sh_per_se + j]);
+-			bitmap = gfx_v9_4_3_get_cu_active_bitmap(adev);
+-
+-			/*
+-			 * The bitmap(and ao_cu_bitmap) in cu_info structure is
+-			 * 4x4 size array, and it's usually suitable for Vega
+-			 * ASICs which has 4*2 SE/SH layout.
+-			 * But for Arcturus, SE/SH layout is changed to 8*1.
+-			 * To mostly reduce the impact, we make it compatible
+-			 * with current bitmap array as below:
+-			 *    SE4,SH0 --> bitmap[0][1]
+-			 *    SE5,SH0 --> bitmap[1][1]
+-			 *    SE6,SH0 --> bitmap[2][1]
+-			 *    SE7,SH0 --> bitmap[3][1]
+-			 */
+-			cu_info->bitmap[i % 4][j + i / 4] = bitmap;
+-
+-			for (k = 0; k < adev->gfx.config.max_cu_per_sh; k++) {
+-				if (bitmap & mask) {
+-					if (counter < adev->gfx.config.max_cu_per_sh)
+-						ao_bitmap |= mask;
+-					counter++;
++	for (xcc_id = 0; xcc_id < NUM_XCC(adev->gfx.xcc_mask); xcc_id++) {
++		for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
++			for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
++				mask = 1;
++				ao_bitmap = 0;
++				counter = 0;
++				gfx_v9_4_3_xcc_select_se_sh(adev, i, j, 0xffffffff, xcc_id);
++				gfx_v9_4_3_set_user_cu_inactive_bitmap(
++					adev,
++					disable_masks[i * adev->gfx.config.max_sh_per_se + j],
++					xcc_id);
++				bitmap = gfx_v9_4_3_get_cu_active_bitmap(adev, xcc_id);
++
++				cu_info->bitmap[xcc_id][i][j] = bitmap;
++
++				for (k = 0; k < adev->gfx.config.max_cu_per_sh; k++) {
++					if (bitmap & mask) {
++						if (counter < adev->gfx.config.max_cu_per_sh)
++							ao_bitmap |= mask;
++						counter++;
++					}
++					mask <<= 1;
+ 				}
+-				mask <<= 1;
++				active_cu_number += counter;
++				if (i < 2 && j < 2)
++					ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8));
++				cu_info->ao_cu_bitmap[i][j] = ao_bitmap;
+ 			}
+-			active_cu_number += counter;
+-			if (i < 2 && j < 2)
+-				ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8));
+-			cu_info->ao_cu_bitmap[i % 4][j + i / 4] = ao_bitmap;
+ 		}
++		gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff,
++					    xcc_id);
+ 	}
+-	gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff,
+-				    0);
+ 	mutex_unlock(&adev->grbm_idx_mutex);
+ 
+ 	cu_info->number = active_cu_number;
+diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v4_3.c b/drivers/gpu/drm/amd/amdgpu/nbio_v4_3.c
+index d5ed9e0e1a5f1..e5b5b0f4940f4 100644
+--- a/drivers/gpu/drm/amd/amdgpu/nbio_v4_3.c
++++ b/drivers/gpu/drm/amd/amdgpu/nbio_v4_3.c
+@@ -345,6 +345,9 @@ static void nbio_v4_3_init_registers(struct amdgpu_device *adev)
+ 		data &= ~RCC_DEV0_EPF2_STRAP2__STRAP_NO_SOFT_RESET_DEV0_F2_MASK;
+ 		WREG32_SOC15(NBIO, 0, regRCC_DEV0_EPF2_STRAP2, data);
+ 	}
++	if (amdgpu_sriov_vf(adev))
++		adev->rmmio_remap.reg_offset = SOC15_REG_OFFSET(NBIO, 0,
++			regBIF_BX_DEV0_EPF0_VF0_HDP_MEM_COHERENCY_FLUSH_CNTL) << 2;
+ }
+ 
+ static u32 nbio_v4_3_get_rom_offset(struct amdgpu_device *adev)
+diff --git a/drivers/gpu/drm/amd/amdgpu/soc21.c b/drivers/gpu/drm/amd/amdgpu/soc21.c
+index e5e5d68a4d702..1a5ffbf884891 100644
+--- a/drivers/gpu/drm/amd/amdgpu/soc21.c
++++ b/drivers/gpu/drm/amd/amdgpu/soc21.c
+@@ -786,7 +786,7 @@ static int soc21_common_hw_init(void *handle)
+ 	 * for the purpose of expose those registers
+ 	 * to process space
+ 	 */
+-	if (adev->nbio.funcs->remap_hdp_registers)
++	if (adev->nbio.funcs->remap_hdp_registers && !amdgpu_sriov_vf(adev))
+ 		adev->nbio.funcs->remap_hdp_registers(adev);
+ 	/* enable the doorbell aperture */
+ 	adev->nbio.funcs->enable_doorbell_aperture(adev, true);
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
+index f5a6f562e2a80..11b9837292536 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
+@@ -2154,7 +2154,8 @@ static int kfd_create_vcrat_image_gpu(void *pcrat_image,
+ 
+ 	amdgpu_amdkfd_get_cu_info(kdev->adev, &cu_info);
+ 	cu->num_simd_per_cu = cu_info.simd_per_cu;
+-	cu->num_simd_cores = cu_info.simd_per_cu * cu_info.cu_active_number;
++	cu->num_simd_cores = cu_info.simd_per_cu *
++			(cu_info.cu_active_number / kdev->kfd->num_nodes);
+ 	cu->max_waves_simd = cu_info.max_waves_per_simd;
+ 
+ 	cu->wave_front_size = cu_info.wave_front_size;
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.h b/drivers/gpu/drm/amd/amdkfd/kfd_crat.h
+index fc719389b5d65..4684711aa695a 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.h
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.h
+@@ -79,6 +79,10 @@ struct crat_header {
+ #define CRAT_SUBTYPE_IOLINK_AFFINITY		5
+ #define CRAT_SUBTYPE_MAX			6
+ 
++/*
++ * Do not change the value of CRAT_SIBLINGMAP_SIZE from 32
++ * as it breaks the ABI.
++ */
+ #define CRAT_SIBLINGMAP_SIZE	32
+ 
+ /*
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+index 01192f5abe462..a61334592c87a 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+@@ -216,7 +216,7 @@ static int add_queue_mes(struct device_queue_manager *dqm, struct queue *q,
+ 
+ 	if (q->wptr_bo) {
+ 		wptr_addr_off = (uint64_t)q->properties.write_ptr & (PAGE_SIZE - 1);
+-		queue_input.wptr_mc_addr = ((uint64_t)q->wptr_bo->tbo.resource->start << PAGE_SHIFT) + wptr_addr_off;
++		queue_input.wptr_mc_addr = amdgpu_bo_gpu_offset(q->wptr_bo) + wptr_addr_off;
+ 	}
+ 
+ 	queue_input.is_kfd_process = 1;
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
+index 863cf060af484..254f343f967a3 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
+@@ -97,18 +97,22 @@ void free_mqd_hiq_sdma(struct mqd_manager *mm, void *mqd,
+ 
+ void mqd_symmetrically_map_cu_mask(struct mqd_manager *mm,
+ 		const uint32_t *cu_mask, uint32_t cu_mask_count,
+-		uint32_t *se_mask)
++		uint32_t *se_mask, uint32_t inst)
+ {
+ 	struct kfd_cu_info cu_info;
+ 	uint32_t cu_per_sh[KFD_MAX_NUM_SE][KFD_MAX_NUM_SH_PER_SE] = {0};
+ 	bool wgp_mode_req = KFD_GC_VERSION(mm->dev) >= IP_VERSION(10, 0, 0);
+ 	uint32_t en_mask = wgp_mode_req ? 0x3 : 0x1;
+-	int i, se, sh, cu, cu_bitmap_sh_mul, inc = wgp_mode_req ? 2 : 1;
++	int i, se, sh, cu, cu_bitmap_sh_mul, cu_inc = wgp_mode_req ? 2 : 1;
++	uint32_t cu_active_per_node;
++	int inc = cu_inc * NUM_XCC(mm->dev->xcc_mask);
++	int xcc_inst = inst + ffs(mm->dev->xcc_mask) - 1;
+ 
+ 	amdgpu_amdkfd_get_cu_info(mm->dev->adev, &cu_info);
+ 
+-	if (cu_mask_count > cu_info.cu_active_number)
+-		cu_mask_count = cu_info.cu_active_number;
++	cu_active_per_node = cu_info.cu_active_number / mm->dev->kfd->num_nodes;
++	if (cu_mask_count > cu_active_per_node)
++		cu_mask_count = cu_active_per_node;
+ 
+ 	/* Exceeding these bounds corrupts the stack and indicates a coding error.
+ 	 * Returning with no CU's enabled will hang the queue, which should be
+@@ -141,7 +145,8 @@ void mqd_symmetrically_map_cu_mask(struct mqd_manager *mm,
+ 	for (se = 0; se < cu_info.num_shader_engines; se++)
+ 		for (sh = 0; sh < cu_info.num_shader_arrays_per_engine; sh++)
+ 			cu_per_sh[se][sh] = hweight32(
+-				cu_info.cu_bitmap[se % 4][sh + (se / 4) * cu_bitmap_sh_mul]);
++				cu_info.cu_bitmap[xcc_inst][se % 4][sh + (se / 4) *
++				cu_bitmap_sh_mul]);
+ 
+ 	/* Symmetrically map cu_mask to all SEs & SHs:
+ 	 * se_mask programs up to 2 SH in the upper and lower 16 bits.
+@@ -164,20 +169,33 @@ void mqd_symmetrically_map_cu_mask(struct mqd_manager *mm,
+ 	 * cu_mask[0] bit8 -> se_mask[0] bit1 (SE0,SH0,CU1)
+ 	 * ...
+ 	 *
++	 * For GFX 9.4.3, the following code only looks at a
++	 * subset of the cu_mask corresponding to the inst parameter.
++	 * If we have n XCCs under one GPU node
++	 * cu_mask[0] bit0 -> XCC0 se_mask[0] bit0 (XCC0,SE0,SH0,CU0)
++	 * cu_mask[0] bit1 -> XCC1 se_mask[0] bit0 (XCC1,SE0,SH0,CU0)
++	 * ..
++	 * cu_mask[0] bitn -> XCCn se_mask[0] bit0 (XCCn,SE0,SH0,CU0)
++	 * cu_mask[0] bit n+1 -> XCC0 se_mask[1] bit0 (XCC0,SE1,SH0,CU0)
++	 *
++	 * For example, if there are 6 XCCs under 1 KFD node, this code
++	 * running for each inst, will look at the bits as:
++	 * inst, inst + 6, inst + 12...
++	 *
+ 	 * First ensure all CUs are disabled, then enable user specified CUs.
+ 	 */
+ 	for (i = 0; i < cu_info.num_shader_engines; i++)
+ 		se_mask[i] = 0;
+ 
+-	i = 0;
+-	for (cu = 0; cu < 16; cu += inc) {
++	i = inst;
++	for (cu = 0; cu < 16; cu += cu_inc) {
+ 		for (sh = 0; sh < cu_info.num_shader_arrays_per_engine; sh++) {
+ 			for (se = 0; se < cu_info.num_shader_engines; se++) {
+ 				if (cu_per_sh[se][sh] > cu) {
+ 					if (cu_mask[i / 32] & (en_mask << (i % 32)))
+ 						se_mask[se] |= en_mask << (cu + sh * 16);
+ 					i += inc;
+-					if (i == cu_mask_count)
++					if (i >= cu_mask_count)
+ 						return;
+ 				}
+ 			}
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h
+index 23158db7da035..57bf5e513f4d1 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h
+@@ -138,7 +138,7 @@ void free_mqd_hiq_sdma(struct mqd_manager *mm, void *mqd,
+ 
+ void mqd_symmetrically_map_cu_mask(struct mqd_manager *mm,
+ 		const uint32_t *cu_mask, uint32_t cu_mask_count,
+-		uint32_t *se_mask);
++		uint32_t *se_mask, uint32_t inst);
+ 
+ int kfd_hiq_load_mqd_kiq(struct mqd_manager *mm, void *mqd,
+ 		uint32_t pipe_id, uint32_t queue_id,
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
+index 65c9f01a1f86c..faa01ee0d1655 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
+@@ -52,7 +52,7 @@ static void update_cu_mask(struct mqd_manager *mm, void *mqd,
+ 		return;
+ 
+ 	mqd_symmetrically_map_cu_mask(mm,
+-		minfo->cu_mask.ptr, minfo->cu_mask.count, se_mask);
++		minfo->cu_mask.ptr, minfo->cu_mask.count, se_mask, 0);
+ 
+ 	m = get_mqd(mqd);
+ 	m->compute_static_thread_mgmt_se0 = se_mask[0];
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
+index 94c0fc2e57b7f..0fcb176601295 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
+@@ -52,7 +52,7 @@ static void update_cu_mask(struct mqd_manager *mm, void *mqd,
+ 		return;
+ 
+ 	mqd_symmetrically_map_cu_mask(mm,
+-		minfo->cu_mask.ptr, minfo->cu_mask.count, se_mask);
++		minfo->cu_mask.ptr, minfo->cu_mask.count, se_mask, 0);
+ 
+ 	m = get_mqd(mqd);
+ 	m->compute_static_thread_mgmt_se0 = se_mask[0];
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
+index 23b30783dce31..352757f2d3202 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
+@@ -71,7 +71,7 @@ static void update_cu_mask(struct mqd_manager *mm, void *mqd,
+ 	}
+ 
+ 	mqd_symmetrically_map_cu_mask(mm,
+-		minfo->cu_mask.ptr, minfo->cu_mask.count, se_mask);
++		minfo->cu_mask.ptr, minfo->cu_mask.count, se_mask, 0);
+ 
+ 	m->compute_static_thread_mgmt_se0 = se_mask[0];
+ 	m->compute_static_thread_mgmt_se1 = se_mask[1];
+@@ -321,6 +321,43 @@ static int get_wave_state(struct mqd_manager *mm, void *mqd,
+ 	return 0;
+ }
+ 
++static void checkpoint_mqd(struct mqd_manager *mm, void *mqd, void *mqd_dst, void *ctl_stack_dst)
++{
++	struct v11_compute_mqd *m;
++
++	m = get_mqd(mqd);
++
++	memcpy(mqd_dst, m, sizeof(struct v11_compute_mqd));
++}
++
++static void restore_mqd(struct mqd_manager *mm, void **mqd,
++			struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr,
++			struct queue_properties *qp,
++			const void *mqd_src,
++			const void *ctl_stack_src, const u32 ctl_stack_size)
++{
++	uint64_t addr;
++	struct v11_compute_mqd *m;
++
++	m = (struct v11_compute_mqd *) mqd_mem_obj->cpu_ptr;
++	addr = mqd_mem_obj->gpu_addr;
++
++	memcpy(m, mqd_src, sizeof(*m));
++
++	*mqd = m;
++	if (gart_addr)
++		*gart_addr = addr;
++
++	m->cp_hqd_pq_doorbell_control =
++		qp->doorbell_off <<
++			CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_OFFSET__SHIFT;
++	pr_debug("cp_hqd_pq_doorbell_control 0x%x\n",
++			m->cp_hqd_pq_doorbell_control);
++
++	qp->is_active = 0;
++}
++
++
+ static void init_mqd_hiq(struct mqd_manager *mm, void **mqd,
+ 			struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr,
+ 			struct queue_properties *q)
+@@ -438,6 +475,8 @@ struct mqd_manager *mqd_manager_init_v11(enum KFD_MQD_TYPE type,
+ 		mqd->mqd_size = sizeof(struct v11_compute_mqd);
+ 		mqd->get_wave_state = get_wave_state;
+ 		mqd->mqd_stride = kfd_mqd_stride;
++		mqd->checkpoint_mqd = checkpoint_mqd;
++		mqd->restore_mqd = restore_mqd;
+ #if defined(CONFIG_DEBUG_FS)
+ 		mqd->debugfs_show_mqd = debugfs_show_mqd;
+ #endif
+@@ -482,6 +521,8 @@ struct mqd_manager *mqd_manager_init_v11(enum KFD_MQD_TYPE type,
+ 		mqd->update_mqd = update_mqd_sdma;
+ 		mqd->destroy_mqd = kfd_destroy_mqd_sdma;
+ 		mqd->is_occupied = kfd_is_occupied_sdma;
++		mqd->checkpoint_mqd = checkpoint_mqd;
++		mqd->restore_mqd = restore_mqd;
+ 		mqd->mqd_size = sizeof(struct v11_sdma_mqd);
+ 		mqd->mqd_stride = kfd_mqd_stride;
+ #if defined(CONFIG_DEBUG_FS)
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
+index 601bb9f68048c..a76ae27c8a919 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
+@@ -60,7 +60,7 @@ static inline struct v9_sdma_mqd *get_sdma_mqd(void *mqd)
+ }
+ 
+ static void update_cu_mask(struct mqd_manager *mm, void *mqd,
+-			struct mqd_update_info *minfo)
++			struct mqd_update_info *minfo, uint32_t inst)
+ {
+ 	struct v9_mqd *m;
+ 	uint32_t se_mask[KFD_MAX_NUM_SE] = {0};
+@@ -69,27 +69,36 @@ static void update_cu_mask(struct mqd_manager *mm, void *mqd,
+ 		return;
+ 
+ 	mqd_symmetrically_map_cu_mask(mm,
+-		minfo->cu_mask.ptr, minfo->cu_mask.count, se_mask);
++		minfo->cu_mask.ptr, minfo->cu_mask.count, se_mask, inst);
+ 
+ 	m = get_mqd(mqd);
++
+ 	m->compute_static_thread_mgmt_se0 = se_mask[0];
+ 	m->compute_static_thread_mgmt_se1 = se_mask[1];
+ 	m->compute_static_thread_mgmt_se2 = se_mask[2];
+ 	m->compute_static_thread_mgmt_se3 = se_mask[3];
+-	m->compute_static_thread_mgmt_se4 = se_mask[4];
+-	m->compute_static_thread_mgmt_se5 = se_mask[5];
+-	m->compute_static_thread_mgmt_se6 = se_mask[6];
+-	m->compute_static_thread_mgmt_se7 = se_mask[7];
+-
+-	pr_debug("update cu mask to %#x %#x %#x %#x %#x %#x %#x %#x\n",
+-		m->compute_static_thread_mgmt_se0,
+-		m->compute_static_thread_mgmt_se1,
+-		m->compute_static_thread_mgmt_se2,
+-		m->compute_static_thread_mgmt_se3,
+-		m->compute_static_thread_mgmt_se4,
+-		m->compute_static_thread_mgmt_se5,
+-		m->compute_static_thread_mgmt_se6,
+-		m->compute_static_thread_mgmt_se7);
++	if (KFD_GC_VERSION(mm->dev) != IP_VERSION(9, 4, 3)) {
++		m->compute_static_thread_mgmt_se4 = se_mask[4];
++		m->compute_static_thread_mgmt_se5 = se_mask[5];
++		m->compute_static_thread_mgmt_se6 = se_mask[6];
++		m->compute_static_thread_mgmt_se7 = se_mask[7];
++
++		pr_debug("update cu mask to %#x %#x %#x %#x %#x %#x %#x %#x\n",
++			m->compute_static_thread_mgmt_se0,
++			m->compute_static_thread_mgmt_se1,
++			m->compute_static_thread_mgmt_se2,
++			m->compute_static_thread_mgmt_se3,
++			m->compute_static_thread_mgmt_se4,
++			m->compute_static_thread_mgmt_se5,
++			m->compute_static_thread_mgmt_se6,
++			m->compute_static_thread_mgmt_se7);
++	} else {
++		pr_debug("inst: %u, update cu mask to %#x %#x %#x %#x\n",
++			inst, m->compute_static_thread_mgmt_se0,
++			m->compute_static_thread_mgmt_se1,
++			m->compute_static_thread_mgmt_se2,
++			m->compute_static_thread_mgmt_se3);
++	}
+ }
+ 
+ static void set_priority(struct v9_mqd *m, struct queue_properties *q)
+@@ -290,7 +299,8 @@ static void update_mqd(struct mqd_manager *mm, void *mqd,
+ 	if (mm->dev->kfd->cwsr_enabled && q->ctx_save_restore_area_address)
+ 		m->cp_hqd_ctx_save_control = 0;
+ 
+-	update_cu_mask(mm, mqd, minfo);
++	if (KFD_GC_VERSION(mm->dev) != IP_VERSION(9, 4, 3))
++		update_cu_mask(mm, mqd, minfo, 0);
+ 	set_priority(m, q);
+ 
+ 	q->is_active = QUEUE_IS_ACTIVE(*q);
+@@ -654,6 +664,8 @@ static void update_mqd_v9_4_3(struct mqd_manager *mm, void *mqd,
+ 		m = get_mqd(mqd + size * xcc);
+ 		update_mqd(mm, m, q, minfo);
+ 
++		update_cu_mask(mm, mqd, minfo, xcc);
++
+ 		if (q->format == KFD_QUEUE_FORMAT_AQL) {
+ 			switch (xcc) {
+ 			case 0:
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
+index d1e962da51dd3..2551a7529b5e0 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
+@@ -55,7 +55,7 @@ static void update_cu_mask(struct mqd_manager *mm, void *mqd,
+ 		return;
+ 
+ 	mqd_symmetrically_map_cu_mask(mm,
+-		minfo->cu_mask.ptr, minfo->cu_mask.count, se_mask);
++		minfo->cu_mask.ptr, minfo->cu_mask.count, se_mask, 0);
+ 
+ 	m = get_mqd(mqd);
+ 	m->compute_static_thread_mgmt_se0 = se_mask[0];
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+index 4a17bb7c7b27d..5582191022106 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+@@ -450,8 +450,7 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
+ 	sysfs_show_32bit_prop(buffer, offs, "cpu_cores_count",
+ 			      dev->node_props.cpu_cores_count);
+ 	sysfs_show_32bit_prop(buffer, offs, "simd_count",
+-			      dev->gpu ? (dev->node_props.simd_count *
+-					  NUM_XCC(dev->gpu->xcc_mask)) : 0);
++			      dev->gpu ? dev->node_props.simd_count : 0);
+ 	sysfs_show_32bit_prop(buffer, offs, "mem_banks_count",
+ 			      dev->node_props.mem_banks_count);
+ 	sysfs_show_32bit_prop(buffer, offs, "caches_count",
+@@ -1651,14 +1650,17 @@ static int fill_in_l1_pcache(struct kfd_cache_properties **props_ext,
+ static int fill_in_l2_l3_pcache(struct kfd_cache_properties **props_ext,
+ 				struct kfd_gpu_cache_info *pcache_info,
+ 				struct kfd_cu_info *cu_info,
+-				int cache_type, unsigned int cu_processor_id)
++				int cache_type, unsigned int cu_processor_id,
++				struct kfd_node *knode)
+ {
+ 	unsigned int cu_sibling_map_mask;
+ 	int first_active_cu;
+-	int i, j, k;
++	int i, j, k, xcc, start, end;
+ 	struct kfd_cache_properties *pcache = NULL;
+ 
+-	cu_sibling_map_mask = cu_info->cu_bitmap[0][0];
++	start = ffs(knode->xcc_mask) - 1;
++	end = start + NUM_XCC(knode->xcc_mask);
++	cu_sibling_map_mask = cu_info->cu_bitmap[start][0][0];
+ 	cu_sibling_map_mask &=
+ 		((1 << pcache_info[cache_type].num_cu_shared) - 1);
+ 	first_active_cu = ffs(cu_sibling_map_mask);
+@@ -1693,16 +1695,18 @@ static int fill_in_l2_l3_pcache(struct kfd_cache_properties **props_ext,
+ 		cu_sibling_map_mask = cu_sibling_map_mask >> (first_active_cu - 1);
+ 		k = 0;
+ 
+-		for (i = 0; i < cu_info->num_shader_engines; i++) {
+-			for (j = 0; j < cu_info->num_shader_arrays_per_engine; j++) {
+-				pcache->sibling_map[k] = (uint8_t)(cu_sibling_map_mask & 0xFF);
+-				pcache->sibling_map[k+1] = (uint8_t)((cu_sibling_map_mask >> 8) & 0xFF);
+-				pcache->sibling_map[k+2] = (uint8_t)((cu_sibling_map_mask >> 16) & 0xFF);
+-				pcache->sibling_map[k+3] = (uint8_t)((cu_sibling_map_mask >> 24) & 0xFF);
+-				k += 4;
+-
+-				cu_sibling_map_mask = cu_info->cu_bitmap[i % 4][j + i / 4];
+-				cu_sibling_map_mask &= ((1 << pcache_info[cache_type].num_cu_shared) - 1);
++		for (xcc = start; xcc < end; xcc++) {
++			for (i = 0; i < cu_info->num_shader_engines; i++) {
++				for (j = 0; j < cu_info->num_shader_arrays_per_engine; j++) {
++					pcache->sibling_map[k] = (uint8_t)(cu_sibling_map_mask & 0xFF);
++					pcache->sibling_map[k+1] = (uint8_t)((cu_sibling_map_mask >> 8) & 0xFF);
++					pcache->sibling_map[k+2] = (uint8_t)((cu_sibling_map_mask >> 16) & 0xFF);
++					pcache->sibling_map[k+3] = (uint8_t)((cu_sibling_map_mask >> 24) & 0xFF);
++					k += 4;
++
++					cu_sibling_map_mask = cu_info->cu_bitmap[xcc][i % 4][j + i / 4];
++					cu_sibling_map_mask &= ((1 << pcache_info[cache_type].num_cu_shared) - 1);
++				}
+ 			}
+ 		}
+ 		pcache->sibling_map_size = k;
+@@ -1720,7 +1724,7 @@ static int fill_in_l2_l3_pcache(struct kfd_cache_properties **props_ext,
+ static void kfd_fill_cache_non_crat_info(struct kfd_topology_device *dev, struct kfd_node *kdev)
+ {
+ 	struct kfd_gpu_cache_info *pcache_info = NULL;
+-	int i, j, k;
++	int i, j, k, xcc, start, end;
+ 	int ct = 0;
+ 	unsigned int cu_processor_id;
+ 	int ret;
+@@ -1754,37 +1758,42 @@ static void kfd_fill_cache_non_crat_info(struct kfd_topology_device *dev, struct
+ 	 *			then it will consider only one CU from
+ 	 *			the shared unit
+ 	 */
++	start = ffs(kdev->xcc_mask) - 1;
++	end = start + NUM_XCC(kdev->xcc_mask);
++
+ 	for (ct = 0; ct < num_of_cache_types; ct++) {
+ 		cu_processor_id = gpu_processor_id;
+ 		if (pcache_info[ct].cache_level == 1) {
+-			for (i = 0; i < pcu_info->num_shader_engines; i++) {
+-				for (j = 0; j < pcu_info->num_shader_arrays_per_engine; j++) {
+-					for (k = 0; k < pcu_info->num_cu_per_sh; k += pcache_info[ct].num_cu_shared) {
++			for (xcc = start; xcc < end; xcc++) {
++				for (i = 0; i < pcu_info->num_shader_engines; i++) {
++					for (j = 0; j < pcu_info->num_shader_arrays_per_engine; j++) {
++						for (k = 0; k < pcu_info->num_cu_per_sh; k += pcache_info[ct].num_cu_shared) {
+ 
+-						ret = fill_in_l1_pcache(&props_ext, pcache_info, pcu_info,
+-										pcu_info->cu_bitmap[i % 4][j + i / 4], ct,
++							ret = fill_in_l1_pcache(&props_ext, pcache_info, pcu_info,
++										pcu_info->cu_bitmap[xcc][i % 4][j + i / 4], ct,
+ 										cu_processor_id, k);
+ 
+-						if (ret < 0)
+-							break;
++							if (ret < 0)
++								break;
+ 
+-						if (!ret) {
+-							num_of_entries++;
+-							list_add_tail(&props_ext->list, &dev->cache_props);
+-						}
++							if (!ret) {
++								num_of_entries++;
++								list_add_tail(&props_ext->list, &dev->cache_props);
++							}
+ 
+-						/* Move to next CU block */
+-						num_cu_shared = ((k + pcache_info[ct].num_cu_shared) <=
+-							pcu_info->num_cu_per_sh) ?
+-							pcache_info[ct].num_cu_shared :
+-							(pcu_info->num_cu_per_sh - k);
+-						cu_processor_id += num_cu_shared;
++							/* Move to next CU block */
++							num_cu_shared = ((k + pcache_info[ct].num_cu_shared) <=
++								pcu_info->num_cu_per_sh) ?
++								pcache_info[ct].num_cu_shared :
++								(pcu_info->num_cu_per_sh - k);
++							cu_processor_id += num_cu_shared;
++						}
+ 					}
+ 				}
+ 			}
+ 		} else {
+ 			ret = fill_in_l2_l3_pcache(&props_ext, pcache_info,
+-								pcu_info, ct, cu_processor_id);
++					pcu_info, ct, cu_processor_id, kdev);
+ 
+ 			if (ret < 0)
+ 				break;
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
+index cba2cd5ed9d19..46927263e014d 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
+@@ -86,7 +86,7 @@ struct kfd_mem_properties {
+ 	struct attribute	attr;
+ };
+ 
+-#define CACHE_SIBLINGMAP_SIZE 64
++#define CACHE_SIBLINGMAP_SIZE 128
+ 
+ struct kfd_cache_properties {
+ 	struct list_head	list;
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index e0d556cf919f7..c9959bd8147db 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -6062,8 +6062,6 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
+ 
+ 	if (recalculate_timing)
+ 		drm_mode_set_crtcinfo(&saved_mode, 0);
+-	else if (!old_stream)
+-		drm_mode_set_crtcinfo(&mode, 0);
+ 
+ 	/*
+ 	 * If scaling is enabled and refresh rate didn't change
+@@ -6625,6 +6623,8 @@ enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connec
+ 		goto fail;
+ 	}
+ 
++	drm_mode_set_crtcinfo(mode, 0);
++
+ 	stream = create_validate_stream_for_sink(aconnector, mode,
+ 						 to_dm_connector_state(connector->state),
+ 						 NULL);
+diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+index 6966420dfbac3..15fa19ee748cf 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+@@ -964,7 +964,9 @@ void dce110_edp_backlight_control(
+ 		return;
+ 	}
+ 
+-	if (link->panel_cntl) {
++	if (link->panel_cntl && !(link->dpcd_sink_ext_caps.bits.oled ||
++		link->dpcd_sink_ext_caps.bits.hdr_aux_backlight_control == 1 ||
++		link->dpcd_sink_ext_caps.bits.sdr_aux_backlight_control == 1)) {
+ 		bool is_backlight_on = link->panel_cntl->funcs->is_panel_backlight_on(link->panel_cntl);
+ 
+ 		if ((enable && is_backlight_on) || (!enable && !is_backlight_on)) {
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
+index 7c344132a0072..62a077adcdbfa 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
+@@ -1054,9 +1054,9 @@ void dcn20_blank_pixel_data(
+ 	enum controller_dp_color_space test_pattern_color_space = CONTROLLER_DP_COLOR_SPACE_UDEFINED;
+ 	struct pipe_ctx *odm_pipe;
+ 	int odm_cnt = 1;
+-
+-	int width = stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right;
+-	int height = stream->timing.v_addressable + stream->timing.v_border_bottom + stream->timing.v_border_top;
++	int h_active = stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right;
++	int v_active = stream->timing.v_addressable + stream->timing.v_border_bottom + stream->timing.v_border_top;
++	int odm_slice_width, last_odm_slice_width, offset = 0;
+ 
+ 	if (stream->link->test_pattern_enabled)
+ 		return;
+@@ -1066,8 +1066,8 @@ void dcn20_blank_pixel_data(
+ 
+ 	for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
+ 		odm_cnt++;
+-
+-	width = width / odm_cnt;
++	odm_slice_width = h_active / odm_cnt;
++	last_odm_slice_width = h_active - odm_slice_width * (odm_cnt - 1);
+ 
+ 	if (blank) {
+ 		dc->hwss.set_abm_immediate_disable(pipe_ctx);
+@@ -1080,29 +1080,32 @@ void dcn20_blank_pixel_data(
+ 		test_pattern = CONTROLLER_DP_TEST_PATTERN_VIDEOMODE;
+ 	}
+ 
+-	dc->hwss.set_disp_pattern_generator(dc,
+-			pipe_ctx,
+-			test_pattern,
+-			test_pattern_color_space,
+-			stream->timing.display_color_depth,
+-			&black_color,
+-			width,
+-			height,
+-			0);
++	odm_pipe = pipe_ctx;
+ 
+-	for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
++	while (odm_pipe->next_odm_pipe) {
+ 		dc->hwss.set_disp_pattern_generator(dc,
+ 				odm_pipe,
+-				dc->debug.visual_confirm != VISUAL_CONFIRM_DISABLE && blank ?
+-						CONTROLLER_DP_TEST_PATTERN_COLORRAMP : test_pattern,
++				test_pattern,
+ 				test_pattern_color_space,
+ 				stream->timing.display_color_depth,
+ 				&black_color,
+-				width,
+-				height,
+-				0);
++				odm_slice_width,
++				v_active,
++				offset);
++		offset += odm_slice_width;
++		odm_pipe = odm_pipe->next_odm_pipe;
+ 	}
+ 
++	dc->hwss.set_disp_pattern_generator(dc,
++			odm_pipe,
++			test_pattern,
++			test_pattern_color_space,
++			stream->timing.display_color_depth,
++			&black_color,
++			last_odm_slice_width,
++			v_active,
++			offset);
++
+ 	if (!blank && dc->debug.enable_single_display_2to1_odm_policy) {
+ 		/* when exiting dynamic ODM need to reinit DPG state for unused pipes */
+ 		struct pipe_ctx *old_odm_pipe = dc->current_state->res_ctx.pipe_ctx[pipe_ctx->pipe_idx].next_odm_pipe;
+diff --git a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c
+index db9f1baa27e5e..9fd68a11fad23 100644
+--- a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c
++++ b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c
+@@ -428,15 +428,24 @@ static void set_crtc_test_pattern(struct dc_link *link,
+ 		stream->timing.display_color_depth;
+ 	struct bit_depth_reduction_params params;
+ 	struct output_pixel_processor *opp = pipe_ctx->stream_res.opp;
+-	int width = pipe_ctx->stream->timing.h_addressable +
++	struct pipe_ctx *odm_pipe;
++	int odm_cnt = 1;
++	int h_active = pipe_ctx->stream->timing.h_addressable +
+ 		pipe_ctx->stream->timing.h_border_left +
+ 		pipe_ctx->stream->timing.h_border_right;
+-	int height = pipe_ctx->stream->timing.v_addressable +
++	int v_active = pipe_ctx->stream->timing.v_addressable +
+ 		pipe_ctx->stream->timing.v_border_bottom +
+ 		pipe_ctx->stream->timing.v_border_top;
++	int odm_slice_width, last_odm_slice_width, offset = 0;
+ 
+ 	memset(&params, 0, sizeof(params));
+ 
++	for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
++		odm_cnt++;
++
++	odm_slice_width = h_active / odm_cnt;
++	last_odm_slice_width = h_active - odm_slice_width * (odm_cnt - 1);
++
+ 	switch (test_pattern) {
+ 	case DP_TEST_PATTERN_COLOR_SQUARES:
+ 		controller_test_pattern =
+@@ -473,16 +482,13 @@ static void set_crtc_test_pattern(struct dc_link *link,
+ 	{
+ 		/* disable bit depth reduction */
+ 		pipe_ctx->stream->bit_depth_params = params;
+-		opp->funcs->opp_program_bit_depth_reduction(opp, &params);
+-		if (pipe_ctx->stream_res.tg->funcs->set_test_pattern)
++		if (pipe_ctx->stream_res.tg->funcs->set_test_pattern) {
++			opp->funcs->opp_program_bit_depth_reduction(opp, &params);
+ 			pipe_ctx->stream_res.tg->funcs->set_test_pattern(pipe_ctx->stream_res.tg,
+ 				controller_test_pattern, color_depth);
+-		else if (link->dc->hwss.set_disp_pattern_generator) {
+-			struct pipe_ctx *odm_pipe;
++		} else if (link->dc->hwss.set_disp_pattern_generator) {
+ 			enum controller_dp_color_space controller_color_space;
+-			int opp_cnt = 1;
+-			int offset = 0;
+-			int dpg_width = width;
++			struct output_pixel_processor *odm_opp;
+ 
+ 			switch (test_pattern_color_space) {
+ 			case DP_TEST_PATTERN_COLOR_SPACE_RGB:
+@@ -502,24 +508,9 @@ static void set_crtc_test_pattern(struct dc_link *link,
+ 				break;
+ 			}
+ 
+-			for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
+-				opp_cnt++;
+-			dpg_width = width / opp_cnt;
+-			offset = dpg_width;
+-
+-			link->dc->hwss.set_disp_pattern_generator(link->dc,
+-					pipe_ctx,
+-					controller_test_pattern,
+-					controller_color_space,
+-					color_depth,
+-					NULL,
+-					dpg_width,
+-					height,
+-					0);
+-
+-			for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
+-				struct output_pixel_processor *odm_opp = odm_pipe->stream_res.opp;
+-
++			odm_pipe = pipe_ctx;
++			while (odm_pipe->next_odm_pipe) {
++				odm_opp = odm_pipe->stream_res.opp;
+ 				odm_opp->funcs->opp_program_bit_depth_reduction(odm_opp, &params);
+ 				link->dc->hwss.set_disp_pattern_generator(link->dc,
+ 						odm_pipe,
+@@ -527,11 +518,23 @@ static void set_crtc_test_pattern(struct dc_link *link,
+ 						controller_color_space,
+ 						color_depth,
+ 						NULL,
+-						dpg_width,
+-						height,
++						odm_slice_width,
++						v_active,
+ 						offset);
+-				offset += offset;
++				offset += odm_slice_width;
++				odm_pipe = odm_pipe->next_odm_pipe;
+ 			}
++			odm_opp = odm_pipe->stream_res.opp;
++			odm_opp->funcs->opp_program_bit_depth_reduction(odm_opp, &params);
++			link->dc->hwss.set_disp_pattern_generator(link->dc,
++					odm_pipe,
++					controller_test_pattern,
++					controller_color_space,
++					color_depth,
++					NULL,
++					last_odm_slice_width,
++					v_active,
++					offset);
+ 		}
+ 	}
+ 	break;
+@@ -540,23 +543,17 @@ static void set_crtc_test_pattern(struct dc_link *link,
+ 		/* restore bitdepth reduction */
+ 		resource_build_bit_depth_reduction_params(pipe_ctx->stream, &params);
+ 		pipe_ctx->stream->bit_depth_params = params;
+-		opp->funcs->opp_program_bit_depth_reduction(opp, &params);
+-		if (pipe_ctx->stream_res.tg->funcs->set_test_pattern)
++		if (pipe_ctx->stream_res.tg->funcs->set_test_pattern) {
++			opp->funcs->opp_program_bit_depth_reduction(opp, &params);
+ 			pipe_ctx->stream_res.tg->funcs->set_test_pattern(pipe_ctx->stream_res.tg,
+-				CONTROLLER_DP_TEST_PATTERN_VIDEOMODE,
+-				color_depth);
+-		else if (link->dc->hwss.set_disp_pattern_generator) {
+-			struct pipe_ctx *odm_pipe;
+-			int opp_cnt = 1;
+-			int dpg_width;
+-
+-			for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
+-				opp_cnt++;
+-
+-			dpg_width = width / opp_cnt;
+-			for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
+-				struct output_pixel_processor *odm_opp = odm_pipe->stream_res.opp;
++					CONTROLLER_DP_TEST_PATTERN_VIDEOMODE,
++					color_depth);
++		} else if (link->dc->hwss.set_disp_pattern_generator) {
++			struct output_pixel_processor *odm_opp;
+ 
++			odm_pipe = pipe_ctx;
++			while (odm_pipe->next_odm_pipe) {
++				odm_opp = odm_pipe->stream_res.opp;
+ 				odm_opp->funcs->opp_program_bit_depth_reduction(odm_opp, &params);
+ 				link->dc->hwss.set_disp_pattern_generator(link->dc,
+ 						odm_pipe,
+@@ -564,19 +561,23 @@ static void set_crtc_test_pattern(struct dc_link *link,
+ 						CONTROLLER_DP_COLOR_SPACE_UDEFINED,
+ 						color_depth,
+ 						NULL,
+-						dpg_width,
+-						height,
+-						0);
++						odm_slice_width,
++						v_active,
++						offset);
++				offset += odm_slice_width;
++				odm_pipe = odm_pipe->next_odm_pipe;
+ 			}
++			odm_opp = odm_pipe->stream_res.opp;
++			odm_opp->funcs->opp_program_bit_depth_reduction(odm_opp, &params);
+ 			link->dc->hwss.set_disp_pattern_generator(link->dc,
+-					pipe_ctx,
++					odm_pipe,
+ 					CONTROLLER_DP_TEST_PATTERN_VIDEOMODE,
+ 					CONTROLLER_DP_COLOR_SPACE_UDEFINED,
+ 					color_depth,
+ 					NULL,
+-					dpg_width,
+-					height,
+-					0);
++					last_odm_slice_width,
++					v_active,
++					offset);
+ 		}
+ 	}
+ 	break;
+diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
+index d0df3381539f0..74cc545085a02 100644
+--- a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
++++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
+@@ -31,12 +31,12 @@
+ #include <linux/types.h>
+ #include <linux/bitmap.h>
+ #include <linux/dma-fence.h>
++#include "amdgpu_irq.h"
++#include "amdgpu_gfx.h"
+ 
+ struct pci_dev;
+ struct amdgpu_device;
+ 
+-#define KGD_MAX_QUEUES 128
+-
+ struct kfd_dev;
+ struct kgd_mem;
+ 
+@@ -68,7 +68,7 @@ struct kfd_cu_info {
+ 	uint32_t wave_front_size;
+ 	uint32_t max_scratch_slots_per_cu;
+ 	uint32_t lds_size;
+-	uint32_t cu_bitmap[4][4];
++	uint32_t cu_bitmap[AMDGPU_MAX_GC_INSTANCES][4][4];
+ };
+ 
+ /* For getting GPU local memory information from KGD */
+diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
+index 8f1633c3fb935..73a4a4eb29e08 100644
+--- a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
++++ b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
+@@ -100,6 +100,7 @@ int shmem_sg_alloc_table(struct drm_i915_private *i915, struct sg_table *st,
+ 	st->nents = 0;
+ 	for (i = 0; i < page_count; i++) {
+ 		struct folio *folio;
++		unsigned long nr_pages;
+ 		const unsigned int shrink[] = {
+ 			I915_SHRINK_BOUND | I915_SHRINK_UNBOUND,
+ 			0,
+@@ -150,6 +151,8 @@ int shmem_sg_alloc_table(struct drm_i915_private *i915, struct sg_table *st,
+ 			}
+ 		} while (1);
+ 
++		nr_pages = min_t(unsigned long,
++				folio_nr_pages(folio), page_count - i);
+ 		if (!i ||
+ 		    sg->length >= max_segment ||
+ 		    folio_pfn(folio) != next_pfn) {
+@@ -157,13 +160,13 @@ int shmem_sg_alloc_table(struct drm_i915_private *i915, struct sg_table *st,
+ 				sg = sg_next(sg);
+ 
+ 			st->nents++;
+-			sg_set_folio(sg, folio, folio_size(folio), 0);
++			sg_set_folio(sg, folio, nr_pages * PAGE_SIZE, 0);
+ 		} else {
+ 			/* XXX: could overflow? */
+-			sg->length += folio_size(folio);
++			sg->length += nr_pages * PAGE_SIZE;
+ 		}
+-		next_pfn = folio_pfn(folio) + folio_nr_pages(folio);
+-		i += folio_nr_pages(folio) - 1;
++		next_pfn = folio_pfn(folio) + nr_pages;
++		i += nr_pages - 1;
+ 
+ 		/* Check that the i965g/gm workaround works. */
+ 		GEM_BUG_ON(gfp & __GFP_DMA32 && next_pfn >= 0x00100000UL);
+diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+index 0aff5bb13c538..0e81ea6191c64 100644
+--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
++++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+@@ -558,7 +558,6 @@ static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id,
+ 		DRIVER_CAPS(i915)->has_logical_contexts = true;
+ 
+ 	ewma__engine_latency_init(&engine->latency);
+-	seqcount_init(&engine->stats.execlists.lock);
+ 
+ 	ATOMIC_INIT_NOTIFIER_HEAD(&engine->context_status_notifier);
+ 
+diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
+index 2ebd937f3b4cb..082c973370824 100644
+--- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
++++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
+@@ -3550,6 +3550,8 @@ int intel_execlists_submission_setup(struct intel_engine_cs *engine)
+ 	logical_ring_default_vfuncs(engine);
+ 	logical_ring_default_irqs(engine);
+ 
++	seqcount_init(&engine->stats.execlists.lock);
++
+ 	if (engine->flags & I915_ENGINE_HAS_RCS_REG_STATE)
+ 		rcs_submission_override(engine);
+ 
+diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt.c b/drivers/gpu/drm/i915/gt/intel_ggtt.c
+index dd0ed941441aa..da21f2786b5d7 100644
+--- a/drivers/gpu/drm/i915/gt/intel_ggtt.c
++++ b/drivers/gpu/drm/i915/gt/intel_ggtt.c
+@@ -511,20 +511,31 @@ void intel_ggtt_unbind_vma(struct i915_address_space *vm,
+ 	vm->clear_range(vm, vma_res->start, vma_res->vma_size);
+ }
+ 
++/*
++ * Reserve the top of the GuC address space for firmware images. Addresses
++ * beyond GUC_GGTT_TOP in the GuC address space are inaccessible by GuC,
++ * which makes for a suitable range to hold GuC/HuC firmware images if the
++ * size of the GGTT is 4G. However, on a 32-bit platform the size of the GGTT
++ * is limited to 2G, which is less than GUC_GGTT_TOP, but we reserve a chunk
++ * of the same size anyway, which is far more than needed, to keep the logic
++ * in uc_fw_ggtt_offset() simple.
++ */
++#define GUC_TOP_RESERVE_SIZE (SZ_4G - GUC_GGTT_TOP)
++
+ static int ggtt_reserve_guc_top(struct i915_ggtt *ggtt)
+ {
+-	u64 size;
++	u64 offset;
+ 	int ret;
+ 
+ 	if (!intel_uc_uses_guc(&ggtt->vm.gt->uc))
+ 		return 0;
+ 
+-	GEM_BUG_ON(ggtt->vm.total <= GUC_GGTT_TOP);
+-	size = ggtt->vm.total - GUC_GGTT_TOP;
++	GEM_BUG_ON(ggtt->vm.total <= GUC_TOP_RESERVE_SIZE);
++	offset = ggtt->vm.total - GUC_TOP_RESERVE_SIZE;
+ 
+-	ret = i915_gem_gtt_reserve(&ggtt->vm, NULL, &ggtt->uc_fw, size,
+-				   GUC_GGTT_TOP, I915_COLOR_UNEVICTABLE,
+-				   PIN_NOEVICT);
++	ret = i915_gem_gtt_reserve(&ggtt->vm, NULL, &ggtt->uc_fw,
++				   GUC_TOP_RESERVE_SIZE, offset,
++				   I915_COLOR_UNEVICTABLE, PIN_NOEVICT);
+ 	if (ret)
+ 		drm_dbg(&ggtt->vm.i915->drm,
+ 			"Failed to reserve top of GGTT for GuC\n");
+diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+index b5b7f2fe8c78e..dc7b40e06e38a 100644
+--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
++++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+@@ -1432,6 +1432,36 @@ static void guc_timestamp_ping(struct work_struct *wrk)
+ 	unsigned long index;
+ 	int srcu, ret;
+ 
++	/*
++	 * Ideally the busyness worker should take a gt pm wakeref because the
++	 * worker only needs to be active while gt is awake. However, the
++	 * gt_park path cancels the worker synchronously and this complicates
++	 * the flow if the worker is also running at the same time. The cancel
++	 * waits for the worker and when the worker releases the wakeref, that
++	 * would call gt_park and would lead to a deadlock.
++	 *
++	 * The resolution is to take the global pm wakeref if runtime pm is
++	 * already active. If not, we don't need to update the busyness stats as
++	 * the stats would already be updated when the gt was parked.
++	 *
++	 * Note:
++	 * - We do not requeue the worker if we cannot take a reference to runtime
++	 *   pm since intel_guc_busyness_unpark would requeue the worker in the
++	 *   resume path.
++	 *
++	 * - If the gt was parked longer than time taken for GT timestamp to roll
++	 *   over, we ignore those rollovers since we don't care about tracking
++	 *   the exact GT time. We only care about roll overs when the gt is
++	 *   active and running workloads.
++	 *
++	 * - There is a window of time between gt_park and runtime suspend,
++	 *   where the worker may run. This is acceptable since the worker will
++	 *   not find any new data to update busyness.
++	 */
++	wakeref = intel_runtime_pm_get_if_active(&gt->i915->runtime_pm);
++	if (!wakeref)
++		return;
++
+ 	/*
+ 	 * Synchronize with gt reset to make sure the worker does not
+ 	 * corrupt the engine/guc stats. NB: can't actually block waiting
+@@ -1440,10 +1470,9 @@ static void guc_timestamp_ping(struct work_struct *wrk)
+ 	 */
+ 	ret = intel_gt_reset_trylock(gt, &srcu);
+ 	if (ret)
+-		return;
++		goto err_trylock;
+ 
+-	with_intel_runtime_pm(&gt->i915->runtime_pm, wakeref)
+-		__update_guc_busyness_stats(guc);
++	__update_guc_busyness_stats(guc);
+ 
+ 	/* adjust context stats for overflow */
+ 	xa_for_each(&guc->context_lookup, index, ce)
+@@ -1452,6 +1481,9 @@ static void guc_timestamp_ping(struct work_struct *wrk)
+ 	intel_gt_reset_unlock(gt, srcu);
+ 
+ 	guc_enable_busyness_worker(guc);
++
++err_trylock:
++	intel_runtime_pm_put(&gt->i915->runtime_pm, wakeref);
+ }
+ 
+ static int guc_action_enable_usage_stats(struct intel_guc *guc)
+diff --git a/drivers/gpu/drm/meson/meson_encoder_hdmi.c b/drivers/gpu/drm/meson/meson_encoder_hdmi.c
+index 53231bfdf7e24..b14e6e507c61b 100644
+--- a/drivers/gpu/drm/meson/meson_encoder_hdmi.c
++++ b/drivers/gpu/drm/meson/meson_encoder_hdmi.c
+@@ -332,6 +332,8 @@ static void meson_encoder_hdmi_hpd_notify(struct drm_bridge *bridge,
+ 			return;
+ 
+ 		cec_notifier_set_phys_addr_from_edid(encoder_hdmi->cec_notifier, edid);
++
++		kfree(edid);
+ 	} else
+ 		cec_notifier_phys_addr_invalidate(encoder_hdmi->cec_notifier);
+ }
+diff --git a/drivers/gpu/drm/tests/drm_mm_test.c b/drivers/gpu/drm/tests/drm_mm_test.c
+index 186b28dc70380..05d5e7af6d250 100644
+--- a/drivers/gpu/drm/tests/drm_mm_test.c
++++ b/drivers/gpu/drm/tests/drm_mm_test.c
+@@ -939,7 +939,7 @@ static void drm_test_mm_insert_range(struct kunit *test)
+ 		KUNIT_ASSERT_FALSE(test, __drm_test_mm_insert_range(test, count, size, 0, max - 1));
+ 		KUNIT_ASSERT_FALSE(test, __drm_test_mm_insert_range(test, count, size, 0, max / 2));
+ 		KUNIT_ASSERT_FALSE(test, __drm_test_mm_insert_range(test, count, size,
+-								    max / 2, max / 2));
++								    max / 2, max));
+ 		KUNIT_ASSERT_FALSE(test, __drm_test_mm_insert_range(test, count, size,
+ 								    max / 4 + 1, 3 * max / 4 - 1));
+ 
+diff --git a/drivers/gpu/drm/virtio/virtgpu_submit.c b/drivers/gpu/drm/virtio/virtgpu_submit.c
+index 1d010c66910d8..aa61e7993e21b 100644
+--- a/drivers/gpu/drm/virtio/virtgpu_submit.c
++++ b/drivers/gpu/drm/virtio/virtgpu_submit.c
+@@ -147,7 +147,6 @@ static void virtio_gpu_complete_submit(struct virtio_gpu_submit *submit)
+ 	submit->buf = NULL;
+ 	submit->buflist = NULL;
+ 	submit->sync_file = NULL;
+-	submit->out_fence = NULL;
+ 	submit->out_fence_fd = -1;
+ }
+ 
+diff --git a/drivers/i2c/busses/i2c-designware-common.c b/drivers/i2c/busses/i2c-designware-common.c
+index cdd8c67d91298..affcfb243f0f5 100644
+--- a/drivers/i2c/busses/i2c-designware-common.c
++++ b/drivers/i2c/busses/i2c-designware-common.c
+@@ -441,8 +441,25 @@ err_release_lock:
+ 
+ void __i2c_dw_disable(struct dw_i2c_dev *dev)
+ {
++	unsigned int raw_intr_stats;
++	unsigned int enable;
+ 	int timeout = 100;
++	bool abort_needed;
+ 	unsigned int status;
++	int ret;
++
++	regmap_read(dev->map, DW_IC_RAW_INTR_STAT, &raw_intr_stats);
++	regmap_read(dev->map, DW_IC_ENABLE, &enable);
++
++	abort_needed = raw_intr_stats & DW_IC_INTR_MST_ON_HOLD;
++	if (abort_needed) {
++		regmap_write(dev->map, DW_IC_ENABLE, enable | DW_IC_ENABLE_ABORT);
++		ret = regmap_read_poll_timeout(dev->map, DW_IC_ENABLE, enable,
++					       !(enable & DW_IC_ENABLE_ABORT), 10,
++					       100);
++		if (ret)
++			dev_err(dev->dev, "timeout while trying to abort current transfer\n");
++	}
+ 
+ 	do {
+ 		__i2c_dw_disable_nowait(dev);
+diff --git a/drivers/i2c/busses/i2c-designware-core.h b/drivers/i2c/busses/i2c-designware-core.h
+index cf4f684f53566..a7f6f3eafad7d 100644
+--- a/drivers/i2c/busses/i2c-designware-core.h
++++ b/drivers/i2c/busses/i2c-designware-core.h
+@@ -98,6 +98,7 @@
+ #define DW_IC_INTR_START_DET			BIT(10)
+ #define DW_IC_INTR_GEN_CALL			BIT(11)
+ #define DW_IC_INTR_RESTART_DET			BIT(12)
++#define DW_IC_INTR_MST_ON_HOLD			BIT(13)
+ 
+ #define DW_IC_INTR_DEFAULT_MASK			(DW_IC_INTR_RX_FULL | \
+ 						 DW_IC_INTR_TX_ABRT | \
+@@ -108,6 +109,8 @@
+ 						 DW_IC_INTR_RX_UNDER | \
+ 						 DW_IC_INTR_RD_REQ)
+ 
++#define DW_IC_ENABLE_ABORT			BIT(1)
++
+ #define DW_IC_STATUS_ACTIVITY			BIT(0)
+ #define DW_IC_STATUS_TFE			BIT(2)
+ #define DW_IC_STATUS_RFNE			BIT(3)
+diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
+index 943b8e6d026da..2a3215ac01b3a 100644
+--- a/drivers/i2c/busses/i2c-i801.c
++++ b/drivers/i2c/busses/i2c-i801.c
+@@ -1754,6 +1754,7 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
+ 		"SMBus I801 adapter at %04lx", priv->smba);
+ 	err = i2c_add_adapter(&priv->adapter);
+ 	if (err) {
++		platform_device_unregister(priv->tco_pdev);
+ 		i801_acpi_remove(priv);
+ 		return err;
+ 	}
+diff --git a/drivers/i2c/busses/i2c-npcm7xx.c b/drivers/i2c/busses/i2c-npcm7xx.c
+index 53b65ffb6a647..bf9dbab52d228 100644
+--- a/drivers/i2c/busses/i2c-npcm7xx.c
++++ b/drivers/i2c/busses/i2c-npcm7xx.c
+@@ -695,6 +695,7 @@ static void npcm_i2c_callback(struct npcm_i2c *bus,
+ {
+ 	struct i2c_msg *msgs;
+ 	int msgs_num;
++	bool do_complete = false;
+ 
+ 	msgs = bus->msgs;
+ 	msgs_num = bus->msgs_num;
+@@ -723,23 +724,17 @@ static void npcm_i2c_callback(struct npcm_i2c *bus,
+ 				 msgs[1].flags & I2C_M_RD)
+ 				msgs[1].len = info;
+ 		}
+-		if (completion_done(&bus->cmd_complete) == false)
+-			complete(&bus->cmd_complete);
+-	break;
+-
++		do_complete = true;
++		break;
+ 	case I2C_NACK_IND:
+ 		/* MASTER transmit got a NACK before tx all bytes */
+ 		bus->cmd_err = -ENXIO;
+-		if (bus->master_or_slave == I2C_MASTER)
+-			complete(&bus->cmd_complete);
+-
++		do_complete = true;
+ 		break;
+ 	case I2C_BUS_ERR_IND:
+ 		/* Bus error */
+ 		bus->cmd_err = -EAGAIN;
+-		if (bus->master_or_slave == I2C_MASTER)
+-			complete(&bus->cmd_complete);
+-
++		do_complete = true;
+ 		break;
+ 	case I2C_WAKE_UP_IND:
+ 		/* I2C wake up */
+@@ -753,6 +748,8 @@ static void npcm_i2c_callback(struct npcm_i2c *bus,
+ 	if (bus->slave)
+ 		bus->master_or_slave = I2C_SLAVE;
+ #endif
++	if (do_complete)
++		complete(&bus->cmd_complete);
+ }
+ 
+ static u8 npcm_i2c_fifo_usage(struct npcm_i2c *bus)
+diff --git a/drivers/i2c/busses/i2c-xiic.c b/drivers/i2c/busses/i2c-xiic.c
+index b3bb97762c859..71391b590adae 100644
+--- a/drivers/i2c/busses/i2c-xiic.c
++++ b/drivers/i2c/busses/i2c-xiic.c
+@@ -710,7 +710,7 @@ static irqreturn_t xiic_process(int irq, void *dev_id)
+ 		 * reset the IP instead of just flush fifos
+ 		 */
+ 		ret = xiic_reinit(i2c);
+-		if (!ret)
++		if (ret < 0)
+ 			dev_dbg(i2c->adap.dev.parent, "reinit failed\n");
+ 
+ 		if (i2c->rx_msg) {
+diff --git a/drivers/i2c/muxes/i2c-demux-pinctrl.c b/drivers/i2c/muxes/i2c-demux-pinctrl.c
+index a3a122fae71e0..22f2280eab7f7 100644
+--- a/drivers/i2c/muxes/i2c-demux-pinctrl.c
++++ b/drivers/i2c/muxes/i2c-demux-pinctrl.c
+@@ -243,6 +243,10 @@ static int i2c_demux_pinctrl_probe(struct platform_device *pdev)
+ 
+ 		props[i].name = devm_kstrdup(&pdev->dev, "status", GFP_KERNEL);
+ 		props[i].value = devm_kstrdup(&pdev->dev, "ok", GFP_KERNEL);
++		if (!props[i].name || !props[i].value) {
++			err = -ENOMEM;
++			goto err_rollback;
++		}
+ 		props[i].length = 3;
+ 
+ 		of_changeset_init(&priv->chan[i].chgset);
+diff --git a/drivers/i2c/muxes/i2c-mux-gpio.c b/drivers/i2c/muxes/i2c-mux-gpio.c
+index 5d5cbe0130cdf..5ca03bd34c8d1 100644
+--- a/drivers/i2c/muxes/i2c-mux-gpio.c
++++ b/drivers/i2c/muxes/i2c-mux-gpio.c
+@@ -105,8 +105,10 @@ static int i2c_mux_gpio_probe_fw(struct gpiomux *mux,
+ 
+ 		} else if (is_acpi_node(child)) {
+ 			rc = acpi_get_local_address(ACPI_HANDLE_FWNODE(child), values + i);
+-			if (rc)
++			if (rc) {
++				fwnode_handle_put(child);
+ 				return dev_err_probe(dev, rc, "Cannot get address\n");
++			}
+ 		}
+ 
+ 		i++;
+diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
+index a5a63b1c947eb..98d3ba7f94873 100644
+--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
++++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
+@@ -186,6 +186,15 @@ static void arm_smmu_free_shared_cd(struct arm_smmu_ctx_desc *cd)
+ 	}
+ }
+ 
++/*
++ * Cloned from the MAX_TLBI_OPS in arch/arm64/include/asm/tlbflush.h, this
++ * is used as a threshold to replace per-page TLBI commands to issue in the
++ * command queue with an address-space TLBI command, when SMMU w/o a range
++ * invalidation feature handles too many per-page TLBI commands, which will
++ * otherwise result in a soft lockup.
++ */
++#define CMDQ_MAX_TLBI_OPS		(1 << (PAGE_SHIFT - 3))
++
+ static void arm_smmu_mm_invalidate_range(struct mmu_notifier *mn,
+ 					 struct mm_struct *mm,
+ 					 unsigned long start, unsigned long end)
+@@ -200,10 +209,22 @@ static void arm_smmu_mm_invalidate_range(struct mmu_notifier *mn,
+ 	 * range. So do a simple translation here by calculating size correctly.
+ 	 */
+ 	size = end - start;
++	if (!(smmu_domain->smmu->features & ARM_SMMU_FEAT_RANGE_INV)) {
++		if (size >= CMDQ_MAX_TLBI_OPS * PAGE_SIZE)
++			size = 0;
++	}
++
++	if (!(smmu_domain->smmu->features & ARM_SMMU_FEAT_BTM)) {
++		if (!size)
++			arm_smmu_tlb_inv_asid(smmu_domain->smmu,
++					      smmu_mn->cd->asid);
++		else
++			arm_smmu_tlb_inv_range_asid(start, size,
++						    smmu_mn->cd->asid,
++						    PAGE_SIZE, false,
++						    smmu_domain);
++	}
+ 
+-	if (!(smmu_domain->smmu->features & ARM_SMMU_FEAT_BTM))
+-		arm_smmu_tlb_inv_range_asid(start, size, smmu_mn->cd->asid,
+-					    PAGE_SIZE, false, smmu_domain);
+ 	arm_smmu_atc_inv_domain(smmu_domain, mm->pasid, start, size);
+ }
+ 
+diff --git a/drivers/media/common/videobuf2/frame_vector.c b/drivers/media/common/videobuf2/frame_vector.c
+index 0f430ddc1f670..fd87747be9b17 100644
+--- a/drivers/media/common/videobuf2/frame_vector.c
++++ b/drivers/media/common/videobuf2/frame_vector.c
+@@ -31,6 +31,10 @@
+  * different type underlying the specified range of virtual addresses.
+  * When the function isn't able to map a single page, it returns error.
+  *
++ * Note that get_vaddr_frames() cannot follow VM_IO mappings. It used
++ * to be able to do that, but that could (racily) return non-refcounted
++ * pfns.
++ *
+  * This function takes care of grabbing mmap_lock as necessary.
+  */
+ int get_vaddr_frames(unsigned long start, unsigned int nr_frames, bool write,
+@@ -59,8 +63,6 @@ int get_vaddr_frames(unsigned long start, unsigned int nr_frames, bool write,
+ 	if (likely(ret > 0))
+ 		return ret;
+ 
+-	/* This used to (racily) return non-refcounted pfns. Let people know */
+-	WARN_ONCE(1, "get_vaddr_frames() cannot follow VM_IO mapping");
+ 	vec->nr_frames = 0;
+ 	return ret ? ret : -EFAULT;
+ }
+diff --git a/drivers/media/platform/marvell/Kconfig b/drivers/media/platform/marvell/Kconfig
+index ec1a16734a280..d6499ffe30e8b 100644
+--- a/drivers/media/platform/marvell/Kconfig
++++ b/drivers/media/platform/marvell/Kconfig
+@@ -7,7 +7,7 @@ config VIDEO_CAFE_CCIC
+ 	depends on V4L_PLATFORM_DRIVERS
+ 	depends on PCI && I2C && VIDEO_DEV
+ 	depends on COMMON_CLK
+-	select VIDEO_OV7670
++	select VIDEO_OV7670 if MEDIA_SUBDRV_AUTOSELECT && VIDEO_CAMERA_SENSOR
+ 	select VIDEOBUF2_VMALLOC
+ 	select VIDEOBUF2_DMA_CONTIG
+ 	select VIDEOBUF2_DMA_SG
+@@ -22,7 +22,7 @@ config VIDEO_MMP_CAMERA
+ 	depends on I2C && VIDEO_DEV
+ 	depends on ARCH_MMP || COMPILE_TEST
+ 	depends on COMMON_CLK
+-	select VIDEO_OV7670
++	select VIDEO_OV7670 if MEDIA_SUBDRV_AUTOSELECT && VIDEO_CAMERA_SENSOR
+ 	select I2C_GPIO
+ 	select VIDEOBUF2_VMALLOC
+ 	select VIDEOBUF2_DMA_CONTIG
+diff --git a/drivers/media/platform/via/Kconfig b/drivers/media/platform/via/Kconfig
+index 8926eb0803b27..6e603c0382487 100644
+--- a/drivers/media/platform/via/Kconfig
++++ b/drivers/media/platform/via/Kconfig
+@@ -7,7 +7,7 @@ config VIDEO_VIA_CAMERA
+ 	depends on V4L_PLATFORM_DRIVERS
+ 	depends on FB_VIA && VIDEO_DEV
+ 	select VIDEOBUF2_DMA_SG
+-	select VIDEO_OV7670
++	select VIDEO_OV7670 if VIDEO_CAMERA_SENSOR
+ 	help
+ 	   Driver support for the integrated camera controller in VIA
+ 	   Chrome9 chipsets.  Currently only tested on OLPC xo-1.5 systems
+diff --git a/drivers/media/usb/em28xx/Kconfig b/drivers/media/usb/em28xx/Kconfig
+index b3c472b8c5a96..cb61fd6cc6c61 100644
+--- a/drivers/media/usb/em28xx/Kconfig
++++ b/drivers/media/usb/em28xx/Kconfig
+@@ -12,8 +12,8 @@ config VIDEO_EM28XX_V4L2
+ 	select VIDEO_SAA711X if MEDIA_SUBDRV_AUTOSELECT
+ 	select VIDEO_TVP5150 if MEDIA_SUBDRV_AUTOSELECT
+ 	select VIDEO_MSP3400 if MEDIA_SUBDRV_AUTOSELECT
+-	select VIDEO_MT9V011 if MEDIA_SUBDRV_AUTOSELECT && MEDIA_CAMERA_SUPPORT
+-	select VIDEO_OV2640 if MEDIA_SUBDRV_AUTOSELECT && MEDIA_CAMERA_SUPPORT
++	select VIDEO_MT9V011 if MEDIA_SUBDRV_AUTOSELECT && VIDEO_CAMERA_SENSOR
++	select VIDEO_OV2640 if MEDIA_SUBDRV_AUTOSELECT && VIDEO_CAMERA_SENSOR
+ 	help
+ 	  This is a video4linux driver for Empia 28xx based TV cards.
+ 
+diff --git a/drivers/media/usb/go7007/Kconfig b/drivers/media/usb/go7007/Kconfig
+index 4ff79940ad8d4..b2a15d9fb1f33 100644
+--- a/drivers/media/usb/go7007/Kconfig
++++ b/drivers/media/usb/go7007/Kconfig
+@@ -12,8 +12,8 @@ config VIDEO_GO7007
+ 	select VIDEO_TW2804 if MEDIA_SUBDRV_AUTOSELECT
+ 	select VIDEO_TW9903 if MEDIA_SUBDRV_AUTOSELECT
+ 	select VIDEO_TW9906 if MEDIA_SUBDRV_AUTOSELECT
+-	select VIDEO_OV7640 if MEDIA_SUBDRV_AUTOSELECT && MEDIA_CAMERA_SUPPORT
+ 	select VIDEO_UDA1342 if MEDIA_SUBDRV_AUTOSELECT
++	select VIDEO_OV7640 if MEDIA_SUBDRV_AUTOSELECT && VIDEO_CAMERA_SENSOR
+ 	help
+ 	  This is a video4linux driver for the WIS GO7007 MPEG
+ 	  encoder chip.
+diff --git a/drivers/media/usb/uvc/uvc_ctrl.c b/drivers/media/usb/uvc/uvc_ctrl.c
+index 5e9d3da862dd8..e59a463c27618 100644
+--- a/drivers/media/usb/uvc/uvc_ctrl.c
++++ b/drivers/media/usb/uvc/uvc_ctrl.c
+@@ -1402,6 +1402,9 @@ int uvc_query_v4l2_menu(struct uvc_video_chain *chain,
+ 	query_menu->id = id;
+ 	query_menu->index = index;
+ 
++	if (index >= BITS_PER_TYPE(mapping->menu_mask))
++		return -EINVAL;
++
+ 	ret = mutex_lock_interruptible(&chain->ctrl_mutex);
+ 	if (ret < 0)
+ 		return -ERESTARTSYS;
+diff --git a/drivers/misc/cardreader/rts5227.c b/drivers/misc/cardreader/rts5227.c
+index 3dae5e3a16976..cd512284bfb39 100644
+--- a/drivers/misc/cardreader/rts5227.c
++++ b/drivers/misc/cardreader/rts5227.c
+@@ -83,63 +83,20 @@ static void rts5227_fetch_vendor_settings(struct rtsx_pcr *pcr)
+ 
+ static void rts5227_init_from_cfg(struct rtsx_pcr *pcr)
+ {
+-	struct pci_dev *pdev = pcr->pci;
+-	int l1ss;
+-	u32 lval;
+ 	struct rtsx_cr_option *option = &pcr->option;
+ 
+-	l1ss = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_L1SS);
+-	if (!l1ss)
+-		return;
+-
+-	pci_read_config_dword(pdev, l1ss + PCI_L1SS_CTL1, &lval);
+-
+ 	if (CHK_PCI_PID(pcr, 0x522A)) {
+-		if (0 == (lval & 0x0F))
+-			rtsx_pci_enable_oobs_polling(pcr);
+-		else
++		if (rtsx_check_dev_flag(pcr, ASPM_L1_1_EN | ASPM_L1_2_EN
++				| PM_L1_1_EN | PM_L1_2_EN))
+ 			rtsx_pci_disable_oobs_polling(pcr);
++		else
++			rtsx_pci_enable_oobs_polling(pcr);
+ 	}
+ 
+-	if (lval & PCI_L1SS_CTL1_ASPM_L1_1)
+-		rtsx_set_dev_flag(pcr, ASPM_L1_1_EN);
+-	else
+-		rtsx_clear_dev_flag(pcr, ASPM_L1_1_EN);
+-
+-	if (lval & PCI_L1SS_CTL1_ASPM_L1_2)
+-		rtsx_set_dev_flag(pcr, ASPM_L1_2_EN);
+-	else
+-		rtsx_clear_dev_flag(pcr, ASPM_L1_2_EN);
+-
+-	if (lval & PCI_L1SS_CTL1_PCIPM_L1_1)
+-		rtsx_set_dev_flag(pcr, PM_L1_1_EN);
+-	else
+-		rtsx_clear_dev_flag(pcr, PM_L1_1_EN);
+-
+-	if (lval & PCI_L1SS_CTL1_PCIPM_L1_2)
+-		rtsx_set_dev_flag(pcr, PM_L1_2_EN);
+-	else
+-		rtsx_clear_dev_flag(pcr, PM_L1_2_EN);
+-
+ 	if (option->ltr_en) {
+-		u16 val;
+-
+-		pcie_capability_read_word(pcr->pci, PCI_EXP_DEVCTL2, &val);
+-		if (val & PCI_EXP_DEVCTL2_LTR_EN) {
+-			option->ltr_enabled = true;
+-			option->ltr_active = true;
++		if (option->ltr_enabled)
+ 			rtsx_set_ltr_latency(pcr, option->ltr_active_latency);
+-		} else {
+-			option->ltr_enabled = false;
+-		}
+ 	}
+-
+-	if (rtsx_check_dev_flag(pcr, ASPM_L1_1_EN | ASPM_L1_2_EN
+-				| PM_L1_1_EN | PM_L1_2_EN))
+-		option->force_clkreq_0 = false;
+-	else
+-		option->force_clkreq_0 = true;
+-
+ }
+ 
+ static int rts5227_extra_init_hw(struct rtsx_pcr *pcr)
+@@ -195,7 +152,7 @@ static int rts5227_extra_init_hw(struct rtsx_pcr *pcr)
+ 		}
+ 	}
+ 
+-	if (option->force_clkreq_0 && pcr->aspm_mode == ASPM_MODE_CFG)
++	if (option->force_clkreq_0)
+ 		rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PETXCFG,
+ 				FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_LOW);
+ 	else
+diff --git a/drivers/misc/cardreader/rts5228.c b/drivers/misc/cardreader/rts5228.c
+index f4ab09439da70..0c7f10bcf6f12 100644
+--- a/drivers/misc/cardreader/rts5228.c
++++ b/drivers/misc/cardreader/rts5228.c
+@@ -386,59 +386,25 @@ static void rts5228_process_ocp(struct rtsx_pcr *pcr)
+ 
+ static void rts5228_init_from_cfg(struct rtsx_pcr *pcr)
+ {
+-	struct pci_dev *pdev = pcr->pci;
+-	int l1ss;
+-	u32 lval;
+ 	struct rtsx_cr_option *option = &pcr->option;
+ 
+-	l1ss = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_L1SS);
+-	if (!l1ss)
+-		return;
+-
+-	pci_read_config_dword(pdev, l1ss + PCI_L1SS_CTL1, &lval);
+-
+-	if (0 == (lval & 0x0F))
+-		rtsx_pci_enable_oobs_polling(pcr);
+-	else
++	if (rtsx_check_dev_flag(pcr, ASPM_L1_1_EN | ASPM_L1_2_EN
++				| PM_L1_1_EN | PM_L1_2_EN))
+ 		rtsx_pci_disable_oobs_polling(pcr);
+-
+-	if (lval & PCI_L1SS_CTL1_ASPM_L1_1)
+-		rtsx_set_dev_flag(pcr, ASPM_L1_1_EN);
+-	else
+-		rtsx_clear_dev_flag(pcr, ASPM_L1_1_EN);
+-
+-	if (lval & PCI_L1SS_CTL1_ASPM_L1_2)
+-		rtsx_set_dev_flag(pcr, ASPM_L1_2_EN);
+-	else
+-		rtsx_clear_dev_flag(pcr, ASPM_L1_2_EN);
+-
+-	if (lval & PCI_L1SS_CTL1_PCIPM_L1_1)
+-		rtsx_set_dev_flag(pcr, PM_L1_1_EN);
+ 	else
+-		rtsx_clear_dev_flag(pcr, PM_L1_1_EN);
+-
+-	if (lval & PCI_L1SS_CTL1_PCIPM_L1_2)
+-		rtsx_set_dev_flag(pcr, PM_L1_2_EN);
+-	else
+-		rtsx_clear_dev_flag(pcr, PM_L1_2_EN);
++		rtsx_pci_enable_oobs_polling(pcr);
+ 
+ 	rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, 0xFF, 0);
+-	if (option->ltr_en) {
+-		u16 val;
+ 
+-		pcie_capability_read_word(pcr->pci, PCI_EXP_DEVCTL2, &val);
+-		if (val & PCI_EXP_DEVCTL2_LTR_EN) {
+-			option->ltr_enabled = true;
+-			option->ltr_active = true;
++	if (option->ltr_en) {
++		if (option->ltr_enabled)
+ 			rtsx_set_ltr_latency(pcr, option->ltr_active_latency);
+-		} else {
+-			option->ltr_enabled = false;
+-		}
+ 	}
+ }
+ 
+ static int rts5228_extra_init_hw(struct rtsx_pcr *pcr)
+ {
++	struct rtsx_cr_option *option = &pcr->option;
+ 
+ 	rtsx_pci_write_register(pcr, RTS5228_AUTOLOAD_CFG1,
+ 			CD_RESUME_EN_MASK, CD_RESUME_EN_MASK);
+@@ -469,6 +435,17 @@ static int rts5228_extra_init_hw(struct rtsx_pcr *pcr)
+ 	else
+ 		rtsx_pci_write_register(pcr, PETXCFG, 0x30, 0x00);
+ 
++	/*
++	 * If u_force_clkreq_0 is enabled, CLKREQ# PIN will be forced
++	 * to drive low, and we forcibly request clock.
++	 */
++	if (option->force_clkreq_0)
++		rtsx_pci_write_register(pcr, PETXCFG,
++				 FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_LOW);
++	else
++		rtsx_pci_write_register(pcr, PETXCFG,
++				 FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_HIGH);
++
+ 	rtsx_pci_write_register(pcr, PWD_SUSPEND_EN, 0xFF, 0xFB);
+ 
+ 	if (pcr->rtd3_en) {
+diff --git a/drivers/misc/cardreader/rts5249.c b/drivers/misc/cardreader/rts5249.c
+index 47ab72a43256b..6c81040e18bef 100644
+--- a/drivers/misc/cardreader/rts5249.c
++++ b/drivers/misc/cardreader/rts5249.c
+@@ -86,64 +86,22 @@ static void rtsx_base_fetch_vendor_settings(struct rtsx_pcr *pcr)
+ 
+ static void rts5249_init_from_cfg(struct rtsx_pcr *pcr)
+ {
+-	struct pci_dev *pdev = pcr->pci;
+-	int l1ss;
+ 	struct rtsx_cr_option *option = &(pcr->option);
+-	u32 lval;
+-
+-	l1ss = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_L1SS);
+-	if (!l1ss)
+-		return;
+-
+-	pci_read_config_dword(pdev, l1ss + PCI_L1SS_CTL1, &lval);
+ 
+ 	if (CHK_PCI_PID(pcr, PID_524A) || CHK_PCI_PID(pcr, PID_525A)) {
+-		if (0 == (lval & 0x0F))
+-			rtsx_pci_enable_oobs_polling(pcr);
+-		else
++		if (rtsx_check_dev_flag(pcr, ASPM_L1_1_EN | ASPM_L1_2_EN
++				| PM_L1_1_EN | PM_L1_2_EN))
+ 			rtsx_pci_disable_oobs_polling(pcr);
++		else
++			rtsx_pci_enable_oobs_polling(pcr);
+ 	}
+ 
+-
+-	if (lval & PCI_L1SS_CTL1_ASPM_L1_1)
+-		rtsx_set_dev_flag(pcr, ASPM_L1_1_EN);
+-
+-	if (lval & PCI_L1SS_CTL1_ASPM_L1_2)
+-		rtsx_set_dev_flag(pcr, ASPM_L1_2_EN);
+-
+-	if (lval & PCI_L1SS_CTL1_PCIPM_L1_1)
+-		rtsx_set_dev_flag(pcr, PM_L1_1_EN);
+-
+-	if (lval & PCI_L1SS_CTL1_PCIPM_L1_2)
+-		rtsx_set_dev_flag(pcr, PM_L1_2_EN);
+-
+ 	if (option->ltr_en) {
+-		u16 val;
+-
+-		pcie_capability_read_word(pdev, PCI_EXP_DEVCTL2, &val);
+-		if (val & PCI_EXP_DEVCTL2_LTR_EN) {
+-			option->ltr_enabled = true;
+-			option->ltr_active = true;
++		if (option->ltr_enabled)
+ 			rtsx_set_ltr_latency(pcr, option->ltr_active_latency);
+-		} else {
+-			option->ltr_enabled = false;
+-		}
+ 	}
+ }
+ 
+-static int rts5249_init_from_hw(struct rtsx_pcr *pcr)
+-{
+-	struct rtsx_cr_option *option = &(pcr->option);
+-
+-	if (rtsx_check_dev_flag(pcr, ASPM_L1_1_EN | ASPM_L1_2_EN
+-				| PM_L1_1_EN | PM_L1_2_EN))
+-		option->force_clkreq_0 = false;
+-	else
+-		option->force_clkreq_0 = true;
+-
+-	return 0;
+-}
+-
+ static void rts52xa_force_power_down(struct rtsx_pcr *pcr, u8 pm_state, bool runtime)
+ {
+ 	/* Set relink_time to 0 */
+@@ -276,7 +234,6 @@ static int rts5249_extra_init_hw(struct rtsx_pcr *pcr)
+ 	struct rtsx_cr_option *option = &(pcr->option);
+ 
+ 	rts5249_init_from_cfg(pcr);
+-	rts5249_init_from_hw(pcr);
+ 
+ 	rtsx_pci_init_cmd(pcr);
+ 
+@@ -327,11 +284,12 @@ static int rts5249_extra_init_hw(struct rtsx_pcr *pcr)
+ 		}
+ 	}
+ 
++
+ 	/*
+ 	 * If u_force_clkreq_0 is enabled, CLKREQ# PIN will be forced
+ 	 * to drive low, and we forcibly request clock.
+ 	 */
+-	if (option->force_clkreq_0 && pcr->aspm_mode == ASPM_MODE_CFG)
++	if (option->force_clkreq_0)
+ 		rtsx_pci_write_register(pcr, PETXCFG,
+ 			FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_LOW);
+ 	else
+diff --git a/drivers/misc/cardreader/rts5260.c b/drivers/misc/cardreader/rts5260.c
+index 79b18f6f73a8a..d2d3a6ccb8f7d 100644
+--- a/drivers/misc/cardreader/rts5260.c
++++ b/drivers/misc/cardreader/rts5260.c
+@@ -480,47 +480,19 @@ static void rts5260_pwr_saving_setting(struct rtsx_pcr *pcr)
+ 
+ static void rts5260_init_from_cfg(struct rtsx_pcr *pcr)
+ {
+-	struct pci_dev *pdev = pcr->pci;
+-	int l1ss;
+ 	struct rtsx_cr_option *option = &pcr->option;
+-	u32 lval;
+-
+-	l1ss = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_L1SS);
+-	if (!l1ss)
+-		return;
+-
+-	pci_read_config_dword(pdev, l1ss + PCI_L1SS_CTL1, &lval);
+-
+-	if (lval & PCI_L1SS_CTL1_ASPM_L1_1)
+-		rtsx_set_dev_flag(pcr, ASPM_L1_1_EN);
+-
+-	if (lval & PCI_L1SS_CTL1_ASPM_L1_2)
+-		rtsx_set_dev_flag(pcr, ASPM_L1_2_EN);
+-
+-	if (lval & PCI_L1SS_CTL1_PCIPM_L1_1)
+-		rtsx_set_dev_flag(pcr, PM_L1_1_EN);
+-
+-	if (lval & PCI_L1SS_CTL1_PCIPM_L1_2)
+-		rtsx_set_dev_flag(pcr, PM_L1_2_EN);
+ 
+ 	rts5260_pwr_saving_setting(pcr);
+ 
+ 	if (option->ltr_en) {
+-		u16 val;
+-
+-		pcie_capability_read_word(pdev, PCI_EXP_DEVCTL2, &val);
+-		if (val & PCI_EXP_DEVCTL2_LTR_EN) {
+-			option->ltr_enabled = true;
+-			option->ltr_active = true;
++		if (option->ltr_enabled)
+ 			rtsx_set_ltr_latency(pcr, option->ltr_active_latency);
+-		} else {
+-			option->ltr_enabled = false;
+-		}
+ 	}
+ }
+ 
+ static int rts5260_extra_init_hw(struct rtsx_pcr *pcr)
+ {
++	struct rtsx_cr_option *option = &pcr->option;
+ 
+ 	/* Set mcu_cnt to 7 to ensure data can be sampled properly */
+ 	rtsx_pci_write_register(pcr, 0xFC03, 0x7F, 0x07);
+@@ -539,6 +511,17 @@ static int rts5260_extra_init_hw(struct rtsx_pcr *pcr)
+ 
+ 	rts5260_init_hw(pcr);
+ 
++	/*
++	 * If u_force_clkreq_0 is enabled, CLKREQ# PIN will be forced
++	 * to drive low, and we forcibly request clock.
++	 */
++	if (option->force_clkreq_0)
++		rtsx_pci_write_register(pcr, PETXCFG,
++				 FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_LOW);
++	else
++		rtsx_pci_write_register(pcr, PETXCFG,
++				 FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_HIGH);
++
+ 	rtsx_pci_write_register(pcr, pcr->reg_pm_ctrl3, 0x10, 0x00);
+ 
+ 	return 0;
+diff --git a/drivers/misc/cardreader/rts5261.c b/drivers/misc/cardreader/rts5261.c
+index 94af6bf8a25a6..67252512a1329 100644
+--- a/drivers/misc/cardreader/rts5261.c
++++ b/drivers/misc/cardreader/rts5261.c
+@@ -454,54 +454,17 @@ static void rts5261_init_from_hw(struct rtsx_pcr *pcr)
+ 
+ static void rts5261_init_from_cfg(struct rtsx_pcr *pcr)
+ {
+-	struct pci_dev *pdev = pcr->pci;
+-	int l1ss;
+-	u32 lval;
+ 	struct rtsx_cr_option *option = &pcr->option;
+ 
+-	l1ss = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_L1SS);
+-	if (!l1ss)
+-		return;
+-
+-	pci_read_config_dword(pdev, l1ss + PCI_L1SS_CTL1, &lval);
+-
+-	if (lval & PCI_L1SS_CTL1_ASPM_L1_1)
+-		rtsx_set_dev_flag(pcr, ASPM_L1_1_EN);
+-	else
+-		rtsx_clear_dev_flag(pcr, ASPM_L1_1_EN);
+-
+-	if (lval & PCI_L1SS_CTL1_ASPM_L1_2)
+-		rtsx_set_dev_flag(pcr, ASPM_L1_2_EN);
+-	else
+-		rtsx_clear_dev_flag(pcr, ASPM_L1_2_EN);
+-
+-	if (lval & PCI_L1SS_CTL1_PCIPM_L1_1)
+-		rtsx_set_dev_flag(pcr, PM_L1_1_EN);
+-	else
+-		rtsx_clear_dev_flag(pcr, PM_L1_1_EN);
+-
+-	if (lval & PCI_L1SS_CTL1_PCIPM_L1_2)
+-		rtsx_set_dev_flag(pcr, PM_L1_2_EN);
+-	else
+-		rtsx_clear_dev_flag(pcr, PM_L1_2_EN);
+-
+-	rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, 0xFF, 0);
+ 	if (option->ltr_en) {
+-		u16 val;
+-
+-		pcie_capability_read_word(pdev, PCI_EXP_DEVCTL2, &val);
+-		if (val & PCI_EXP_DEVCTL2_LTR_EN) {
+-			option->ltr_enabled = true;
+-			option->ltr_active = true;
++		if (option->ltr_enabled)
+ 			rtsx_set_ltr_latency(pcr, option->ltr_active_latency);
+-		} else {
+-			option->ltr_enabled = false;
+-		}
+ 	}
+ }
+ 
+ static int rts5261_extra_init_hw(struct rtsx_pcr *pcr)
+ {
++	struct rtsx_cr_option *option = &pcr->option;
+ 	u32 val;
+ 
+ 	rtsx_pci_write_register(pcr, RTS5261_AUTOLOAD_CFG1,
+@@ -547,6 +510,17 @@ static int rts5261_extra_init_hw(struct rtsx_pcr *pcr)
+ 	else
+ 		rtsx_pci_write_register(pcr, PETXCFG, 0x30, 0x00);
+ 
++	/*
++	 * If u_force_clkreq_0 is enabled, CLKREQ# PIN will be forced
++	 * to drive low, and we forcibly request clock.
++	 */
++	if (option->force_clkreq_0)
++		rtsx_pci_write_register(pcr, PETXCFG,
++				 FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_LOW);
++	else
++		rtsx_pci_write_register(pcr, PETXCFG,
++				 FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_HIGH);
++
+ 	rtsx_pci_write_register(pcr, PWD_SUSPEND_EN, 0xFF, 0xFB);
+ 
+ 	if (pcr->rtd3_en) {
+diff --git a/drivers/misc/cardreader/rtsx_pcr.c b/drivers/misc/cardreader/rtsx_pcr.c
+index a3f4b52bb159f..a30751ad37330 100644
+--- a/drivers/misc/cardreader/rtsx_pcr.c
++++ b/drivers/misc/cardreader/rtsx_pcr.c
+@@ -1326,11 +1326,8 @@ static int rtsx_pci_init_hw(struct rtsx_pcr *pcr)
+ 			return err;
+ 	}
+ 
+-	if (pcr->aspm_mode == ASPM_MODE_REG) {
++	if (pcr->aspm_mode == ASPM_MODE_REG)
+ 		rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, 0x30, 0x30);
+-		rtsx_pci_write_register(pcr, PETXCFG,
+-				FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_HIGH);
+-	}
+ 
+ 	/* No CD interrupt if probing driver with card inserted.
+ 	 * So we need to initialize pcr->card_exist here.
+@@ -1345,7 +1342,9 @@ static int rtsx_pci_init_hw(struct rtsx_pcr *pcr)
+ 
+ static int rtsx_pci_init_chip(struct rtsx_pcr *pcr)
+ {
+-	int err;
++	struct rtsx_cr_option *option = &(pcr->option);
++	int err, l1ss;
++	u32 lval;
+ 	u16 cfg_val;
+ 	u8 val;
+ 
+@@ -1430,6 +1429,48 @@ static int rtsx_pci_init_chip(struct rtsx_pcr *pcr)
+ 			pcr->aspm_enabled = true;
+ 	}
+ 
++	l1ss = pci_find_ext_capability(pcr->pci, PCI_EXT_CAP_ID_L1SS);
++	if (l1ss) {
++		pci_read_config_dword(pcr->pci, l1ss + PCI_L1SS_CTL1, &lval);
++
++		if (lval & PCI_L1SS_CTL1_ASPM_L1_1)
++			rtsx_set_dev_flag(pcr, ASPM_L1_1_EN);
++		else
++			rtsx_clear_dev_flag(pcr, ASPM_L1_1_EN);
++
++		if (lval & PCI_L1SS_CTL1_ASPM_L1_2)
++			rtsx_set_dev_flag(pcr, ASPM_L1_2_EN);
++		else
++			rtsx_clear_dev_flag(pcr, ASPM_L1_2_EN);
++
++		if (lval & PCI_L1SS_CTL1_PCIPM_L1_1)
++			rtsx_set_dev_flag(pcr, PM_L1_1_EN);
++		else
++			rtsx_clear_dev_flag(pcr, PM_L1_1_EN);
++
++		if (lval & PCI_L1SS_CTL1_PCIPM_L1_2)
++			rtsx_set_dev_flag(pcr, PM_L1_2_EN);
++		else
++			rtsx_clear_dev_flag(pcr, PM_L1_2_EN);
++
++		pcie_capability_read_word(pcr->pci, PCI_EXP_DEVCTL2, &cfg_val);
++		if (cfg_val & PCI_EXP_DEVCTL2_LTR_EN) {
++			option->ltr_enabled = true;
++			option->ltr_active = true;
++		} else {
++			option->ltr_enabled = false;
++		}
++
++		if (rtsx_check_dev_flag(pcr, ASPM_L1_1_EN | ASPM_L1_2_EN
++				| PM_L1_1_EN | PM_L1_2_EN))
++			option->force_clkreq_0 = false;
++		else
++			option->force_clkreq_0 = true;
++	} else {
++		option->ltr_enabled = false;
++		option->force_clkreq_0 = true;
++	}
++
+ 	if (pcr->ops->fetch_vendor_settings)
+ 		pcr->ops->fetch_vendor_settings(pcr);
+ 
+diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
+index d19593fae2265..dcfda0e8e1b78 100644
+--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
++++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
+@@ -1833,6 +1833,9 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
+ 	return work_done;
+ 
+ error:
++	if (xdp_flags & ENA_XDP_REDIRECT)
++		xdp_do_flush();
++
+ 	adapter = netdev_priv(rx_ring->netdev);
+ 
+ 	if (rc == -ENOSPC) {
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+index 1eb490c48c52e..3325e7021745f 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -2626,6 +2626,7 @@ static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget)
+ 	struct rx_cmp_ext *rxcmp1;
+ 	u32 cp_cons, tmp_raw_cons;
+ 	u32 raw_cons = cpr->cp_raw_cons;
++	bool flush_xdp = false;
+ 	u32 rx_pkts = 0;
+ 	u8 event = 0;
+ 
+@@ -2660,6 +2661,8 @@ static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget)
+ 				rx_pkts++;
+ 			else if (rc == -EBUSY)	/* partial completion */
+ 				break;
++			if (event & BNXT_REDIRECT_EVENT)
++				flush_xdp = true;
+ 		} else if (unlikely(TX_CMP_TYPE(txcmp) ==
+ 				    CMPL_BASE_TYPE_HWRM_DONE)) {
+ 			bnxt_hwrm_handler(bp, txcmp);
+@@ -2679,6 +2682,8 @@ static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget)
+ 
+ 	if (event & BNXT_AGG_EVENT)
+ 		bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
++	if (flush_xdp)
++		xdp_do_flush();
+ 
+ 	if (!bnxt_has_work(bp, cpr) && rx_pkts < budget) {
+ 		napi_complete_done(napi, rx_pkts);
+diff --git a/drivers/net/ethernet/engleder/tsnep_ethtool.c b/drivers/net/ethernet/engleder/tsnep_ethtool.c
+index 716815dad7d21..65ec1abc94421 100644
+--- a/drivers/net/ethernet/engleder/tsnep_ethtool.c
++++ b/drivers/net/ethernet/engleder/tsnep_ethtool.c
+@@ -300,10 +300,8 @@ static void tsnep_ethtool_get_channels(struct net_device *netdev,
+ {
+ 	struct tsnep_adapter *adapter = netdev_priv(netdev);
+ 
+-	ch->max_rx = adapter->num_rx_queues;
+-	ch->max_tx = adapter->num_tx_queues;
+-	ch->rx_count = adapter->num_rx_queues;
+-	ch->tx_count = adapter->num_tx_queues;
++	ch->max_combined = adapter->num_queues;
++	ch->combined_count = adapter->num_queues;
+ }
+ 
+ static int tsnep_ethtool_get_ts_info(struct net_device *netdev,
+diff --git a/drivers/net/ethernet/engleder/tsnep_main.c b/drivers/net/ethernet/engleder/tsnep_main.c
+index 84751bb303a68..479156576bc8a 100644
+--- a/drivers/net/ethernet/engleder/tsnep_main.c
++++ b/drivers/net/ethernet/engleder/tsnep_main.c
+@@ -86,8 +86,11 @@ static irqreturn_t tsnep_irq(int irq, void *arg)
+ 
+ 	/* handle TX/RX queue 0 interrupt */
+ 	if ((active & adapter->queue[0].irq_mask) != 0) {
+-		tsnep_disable_irq(adapter, adapter->queue[0].irq_mask);
+-		napi_schedule(&adapter->queue[0].napi);
++		if (napi_schedule_prep(&adapter->queue[0].napi)) {
++			tsnep_disable_irq(adapter, adapter->queue[0].irq_mask);
++			/* schedule after masking to avoid races */
++			__napi_schedule(&adapter->queue[0].napi);
++		}
+ 	}
+ 
+ 	return IRQ_HANDLED;
+@@ -98,8 +101,11 @@ static irqreturn_t tsnep_irq_txrx(int irq, void *arg)
+ 	struct tsnep_queue *queue = arg;
+ 
+ 	/* handle TX/RX queue interrupt */
+-	tsnep_disable_irq(queue->adapter, queue->irq_mask);
+-	napi_schedule(&queue->napi);
++	if (napi_schedule_prep(&queue->napi)) {
++		tsnep_disable_irq(queue->adapter, queue->irq_mask);
++		/* schedule after masking to avoid races */
++		__napi_schedule(&queue->napi);
++	}
+ 
+ 	return IRQ_HANDLED;
+ }
+@@ -1727,6 +1733,10 @@ static int tsnep_poll(struct napi_struct *napi, int budget)
+ 	if (queue->tx)
+ 		complete = tsnep_tx_poll(queue->tx, budget);
+ 
++	/* handle case where we are called by netpoll with a budget of 0 */
++	if (unlikely(budget <= 0))
++		return budget;
++
+ 	if (queue->rx) {
+ 		done = queue->rx->xsk_pool ?
+ 		       tsnep_rx_poll_zc(queue->rx, napi, budget) :
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+index 613d0a779cef2..71a2ec03f2b38 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+@@ -3352,6 +3352,15 @@ static void hns3_set_default_feature(struct net_device *netdev)
+ 		  NETIF_F_HW_TC);
+ 
+ 	netdev->hw_enc_features |= netdev->vlan_features | NETIF_F_TSO_MANGLEID;
++
++	/* The device_version V3 hardware can't offload the checksum for IP in
++	 * GRE packets, but can do it for NvGRE. So default to disable the
++	 * checksum and GSO offload for GRE.
++	 */
++	if (ae_dev->dev_version > HNAE3_DEVICE_VERSION_V2) {
++		netdev->features &= ~NETIF_F_GSO_GRE;
++		netdev->features &= ~NETIF_F_GSO_GRE_CSUM;
++	}
+ }
+ 
+ static int hns3_alloc_buffer(struct hns3_enet_ring *ring,
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+index ce6b658a930cc..ed6cf59853bf6 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+@@ -3564,9 +3564,14 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
+ static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
+ 				    u32 regclr)
+ {
++#define HCLGE_IMP_RESET_DELAY		5
++
+ 	switch (event_type) {
+ 	case HCLGE_VECTOR0_EVENT_PTP:
+ 	case HCLGE_VECTOR0_EVENT_RST:
++		if (regclr == BIT(HCLGE_VECTOR0_IMPRESET_INT_B))
++			mdelay(HCLGE_IMP_RESET_DELAY);
++
+ 		hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
+ 		break;
+ 	case HCLGE_VECTOR0_EVENT_MBX:
+@@ -7348,6 +7353,12 @@ static int hclge_del_cls_flower(struct hnae3_handle *handle,
+ 	ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, rule->location,
+ 				   NULL, false);
+ 	if (ret) {
++		/* if tcam config fail, set rule state to TO_DEL,
++		 * so the rule will be deleted when periodic
++		 * task being scheduled.
++		 */
++		hclge_update_fd_list(hdev, HCLGE_FD_TO_DEL, rule->location, NULL);
++		set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
+ 		spin_unlock_bh(&hdev->fd_rule_lock);
+ 		return ret;
+ 	}
+@@ -8824,7 +8835,7 @@ static void hclge_update_overflow_flags(struct hclge_vport *vport,
+ 	if (mac_type == HCLGE_MAC_ADDR_UC) {
+ 		if (is_all_added)
+ 			vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_UPE;
+-		else
++		else if (hclge_is_umv_space_full(vport, true))
+ 			vport->overflow_promisc_flags |= HNAE3_OVERFLOW_UPE;
+ 	} else {
+ 		if (is_all_added)
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+index 7a2f9233d6954..a4d68fb216fb9 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+@@ -1855,7 +1855,8 @@ static void hclgevf_periodic_service_task(struct hclgevf_dev *hdev)
+ 	unsigned long delta = round_jiffies_relative(HZ);
+ 	struct hnae3_handle *handle = &hdev->nic;
+ 
+-	if (test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state))
++	if (test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state) ||
++	    test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state))
+ 		return;
+ 
+ 	if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) {
+diff --git a/drivers/net/ethernet/huawei/hinic/hinic_port.c b/drivers/net/ethernet/huawei/hinic/hinic_port.c
+index 9406237c461e0..f81a43d2cdfcd 100644
+--- a/drivers/net/ethernet/huawei/hinic/hinic_port.c
++++ b/drivers/net/ethernet/huawei/hinic/hinic_port.c
+@@ -456,9 +456,6 @@ int hinic_set_vlan_fliter(struct hinic_dev *nic_dev, u32 en)
+ 	u16 out_size = sizeof(vlan_filter);
+ 	int err;
+ 
+-	if (!hwdev)
+-		return -EINVAL;
+-
+ 	vlan_filter.func_idx = HINIC_HWIF_FUNC_IDX(hwif);
+ 	vlan_filter.enable = en;
+ 
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+index be59ba3774e15..c1e1e8912350b 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+@@ -4464,9 +4464,7 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id,
+ 		goto error_pvid;
+ 
+ 	i40e_vlan_stripping_enable(vsi);
+-	i40e_vc_reset_vf(vf, true);
+-	/* During reset the VF got a new VSI, so refresh a pointer. */
+-	vsi = pf->vsi[vf->lan_vsi_idx];
++
+ 	/* Locked once because multiple functions below iterate list */
+ 	spin_lock_bh(&vsi->mac_filter_hash_lock);
+ 
+@@ -4552,6 +4550,10 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id,
+ 	 */
+ 	vf->port_vlan_id = le16_to_cpu(vsi->info.pvid);
+ 
++	i40e_vc_reset_vf(vf, true);
++	/* During reset the VF got a new VSI, so refresh a pointer. */
++	vsi = pf->vsi[vf->lan_vsi_idx];
++
+ 	ret = i40e_config_vf_promiscuous_mode(vf, vsi->id, allmulti, alluni);
+ 	if (ret) {
+ 		dev_err(&pf->pdev->dev, "Unable to config vf promiscuous mode\n");
+diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h
+index 8cbdebc5b6989..4d4508e04b1d2 100644
+--- a/drivers/net/ethernet/intel/iavf/iavf.h
++++ b/drivers/net/ethernet/intel/iavf/iavf.h
+@@ -521,7 +521,7 @@ void iavf_down(struct iavf_adapter *adapter);
+ int iavf_process_config(struct iavf_adapter *adapter);
+ int iavf_parse_vf_resource_msg(struct iavf_adapter *adapter);
+ void iavf_schedule_reset(struct iavf_adapter *adapter, u64 flags);
+-void iavf_schedule_request_stats(struct iavf_adapter *adapter);
++void iavf_schedule_aq_request(struct iavf_adapter *adapter, u64 flags);
+ void iavf_schedule_finish_config(struct iavf_adapter *adapter);
+ void iavf_reset(struct iavf_adapter *adapter);
+ void iavf_set_ethtool_ops(struct net_device *netdev);
+diff --git a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
+index a34303ad057d0..90397293525f7 100644
+--- a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
++++ b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
+@@ -362,7 +362,7 @@ static void iavf_get_ethtool_stats(struct net_device *netdev,
+ 	unsigned int i;
+ 
+ 	/* Explicitly request stats refresh */
+-	iavf_schedule_request_stats(adapter);
++	iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_REQUEST_STATS);
+ 
+ 	iavf_add_ethtool_stats(&data, adapter, iavf_gstrings_stats);
+ 
+diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
+index 9610ca770349e..8ea5c0825c3c4 100644
+--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
++++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
+@@ -314,15 +314,13 @@ void iavf_schedule_reset(struct iavf_adapter *adapter, u64 flags)
+ }
+ 
+ /**
+- * iavf_schedule_request_stats - Set the flags and schedule statistics request
++ * iavf_schedule_aq_request - Set the flags and schedule aq request
+  * @adapter: board private structure
+- *
+- * Sets IAVF_FLAG_AQ_REQUEST_STATS flag so iavf_watchdog_task() will explicitly
+- * request and refresh ethtool stats
++ * @flags: requested aq flags
+  **/
+-void iavf_schedule_request_stats(struct iavf_adapter *adapter)
++void iavf_schedule_aq_request(struct iavf_adapter *adapter, u64 flags)
+ {
+-	adapter->aq_required |= IAVF_FLAG_AQ_REQUEST_STATS;
++	adapter->aq_required |= flags;
+ 	mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
+ }
+ 
+@@ -823,7 +821,7 @@ iavf_vlan_filter *iavf_add_vlan(struct iavf_adapter *adapter,
+ 		list_add_tail(&f->list, &adapter->vlan_filter_list);
+ 		f->state = IAVF_VLAN_ADD;
+ 		adapter->num_vlan_filters++;
+-		adapter->aq_required |= IAVF_FLAG_AQ_ADD_VLAN_FILTER;
++		iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_ADD_VLAN_FILTER);
+ 	}
+ 
+ clearout:
+@@ -845,7 +843,7 @@ static void iavf_del_vlan(struct iavf_adapter *adapter, struct iavf_vlan vlan)
+ 	f = iavf_find_vlan(adapter, vlan);
+ 	if (f) {
+ 		f->state = IAVF_VLAN_REMOVE;
+-		adapter->aq_required |= IAVF_FLAG_AQ_DEL_VLAN_FILTER;
++		iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_DEL_VLAN_FILTER);
+ 	}
+ 
+ 	spin_unlock_bh(&adapter->mac_vlan_list_lock);
+@@ -1421,7 +1419,8 @@ void iavf_down(struct iavf_adapter *adapter)
+ 	iavf_clear_fdir_filters(adapter);
+ 	iavf_clear_adv_rss_conf(adapter);
+ 
+-	if (!(adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)) {
++	if (!(adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) &&
++	    !(test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section))) {
+ 		/* cancel any current operation */
+ 		adapter->current_op = VIRTCHNL_OP_UNKNOWN;
+ 		/* Schedule operations to close down the HW. Don't wait
+diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c
+index 93bce729be76a..7ab6dd58e4001 100644
+--- a/drivers/net/ethernet/intel/igc/igc_ethtool.c
++++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c
+@@ -868,6 +868,18 @@ static void igc_ethtool_get_stats(struct net_device *netdev,
+ 	spin_unlock(&adapter->stats64_lock);
+ }
+ 
++static int igc_ethtool_get_previous_rx_coalesce(struct igc_adapter *adapter)
++{
++	return (adapter->rx_itr_setting <= 3) ?
++		adapter->rx_itr_setting : adapter->rx_itr_setting >> 2;
++}
++
++static int igc_ethtool_get_previous_tx_coalesce(struct igc_adapter *adapter)
++{
++	return (adapter->tx_itr_setting <= 3) ?
++		adapter->tx_itr_setting : adapter->tx_itr_setting >> 2;
++}
++
+ static int igc_ethtool_get_coalesce(struct net_device *netdev,
+ 				    struct ethtool_coalesce *ec,
+ 				    struct kernel_ethtool_coalesce *kernel_coal,
+@@ -875,17 +887,8 @@ static int igc_ethtool_get_coalesce(struct net_device *netdev,
+ {
+ 	struct igc_adapter *adapter = netdev_priv(netdev);
+ 
+-	if (adapter->rx_itr_setting <= 3)
+-		ec->rx_coalesce_usecs = adapter->rx_itr_setting;
+-	else
+-		ec->rx_coalesce_usecs = adapter->rx_itr_setting >> 2;
+-
+-	if (!(adapter->flags & IGC_FLAG_QUEUE_PAIRS)) {
+-		if (adapter->tx_itr_setting <= 3)
+-			ec->tx_coalesce_usecs = adapter->tx_itr_setting;
+-		else
+-			ec->tx_coalesce_usecs = adapter->tx_itr_setting >> 2;
+-	}
++	ec->rx_coalesce_usecs = igc_ethtool_get_previous_rx_coalesce(adapter);
++	ec->tx_coalesce_usecs = igc_ethtool_get_previous_tx_coalesce(adapter);
+ 
+ 	return 0;
+ }
+@@ -910,8 +913,12 @@ static int igc_ethtool_set_coalesce(struct net_device *netdev,
+ 	    ec->tx_coalesce_usecs == 2)
+ 		return -EINVAL;
+ 
+-	if ((adapter->flags & IGC_FLAG_QUEUE_PAIRS) && ec->tx_coalesce_usecs)
++	if ((adapter->flags & IGC_FLAG_QUEUE_PAIRS) &&
++	    ec->tx_coalesce_usecs != igc_ethtool_get_previous_tx_coalesce(adapter)) {
++		NL_SET_ERR_MSG_MOD(extack,
++				   "Queue Pair mode enabled, both Rx and Tx coalescing controlled by rx-usecs");
+ 		return -EINVAL;
++	}
+ 
+ 	/* If ITR is disabled, disable DMAC */
+ 	if (ec->rx_coalesce_usecs == 0) {
+diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
+index 6f557e843e495..4e23b821c39ba 100644
+--- a/drivers/net/ethernet/intel/igc/igc_main.c
++++ b/drivers/net/ethernet/intel/igc/igc_main.c
+@@ -6433,7 +6433,7 @@ static int igc_xdp_xmit(struct net_device *dev, int num_frames,
+ 	struct igc_ring *ring;
+ 	int i, drops;
+ 
+-	if (unlikely(test_bit(__IGC_DOWN, &adapter->state)))
++	if (unlikely(!netif_carrier_ok(dev)))
+ 		return -ENETDOWN;
+ 
+ 	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
+diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
+index 4424de2ffd70c..dbc518ff82768 100644
+--- a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
++++ b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
+@@ -734,13 +734,13 @@ static netdev_tx_t octep_start_xmit(struct sk_buff *skb,
+ dma_map_sg_err:
+ 	if (si > 0) {
+ 		dma_unmap_single(iq->dev, sglist[0].dma_ptr[0],
+-				 sglist[0].len[0], DMA_TO_DEVICE);
+-		sglist[0].len[0] = 0;
++				 sglist[0].len[3], DMA_TO_DEVICE);
++		sglist[0].len[3] = 0;
+ 	}
+ 	while (si > 1) {
+ 		dma_unmap_page(iq->dev, sglist[si >> 2].dma_ptr[si & 3],
+-			       sglist[si >> 2].len[si & 3], DMA_TO_DEVICE);
+-		sglist[si >> 2].len[si & 3] = 0;
++			       sglist[si >> 2].len[3 - (si & 3)], DMA_TO_DEVICE);
++		sglist[si >> 2].len[3 - (si & 3)] = 0;
+ 		si--;
+ 	}
+ 	tx_buffer->gather = 0;
+diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_tx.c b/drivers/net/ethernet/marvell/octeon_ep/octep_tx.c
+index 5a520d37bea02..d0adb82d65c31 100644
+--- a/drivers/net/ethernet/marvell/octeon_ep/octep_tx.c
++++ b/drivers/net/ethernet/marvell/octeon_ep/octep_tx.c
+@@ -69,12 +69,12 @@ int octep_iq_process_completions(struct octep_iq *iq, u16 budget)
+ 		compl_sg++;
+ 
+ 		dma_unmap_single(iq->dev, tx_buffer->sglist[0].dma_ptr[0],
+-				 tx_buffer->sglist[0].len[0], DMA_TO_DEVICE);
++				 tx_buffer->sglist[0].len[3], DMA_TO_DEVICE);
+ 
+ 		i = 1; /* entry 0 is main skb, unmapped above */
+ 		while (frags--) {
+ 			dma_unmap_page(iq->dev, tx_buffer->sglist[i >> 2].dma_ptr[i & 3],
+-				       tx_buffer->sglist[i >> 2].len[i & 3], DMA_TO_DEVICE);
++				       tx_buffer->sglist[i >> 2].len[3 - (i & 3)], DMA_TO_DEVICE);
+ 			i++;
+ 		}
+ 
+@@ -131,13 +131,13 @@ static void octep_iq_free_pending(struct octep_iq *iq)
+ 
+ 		dma_unmap_single(iq->dev,
+ 				 tx_buffer->sglist[0].dma_ptr[0],
+-				 tx_buffer->sglist[0].len[0],
++				 tx_buffer->sglist[0].len[3],
+ 				 DMA_TO_DEVICE);
+ 
+ 		i = 1; /* entry 0 is main skb, unmapped above */
+ 		while (frags--) {
+ 			dma_unmap_page(iq->dev, tx_buffer->sglist[i >> 2].dma_ptr[i & 3],
+-				       tx_buffer->sglist[i >> 2].len[i & 3], DMA_TO_DEVICE);
++				       tx_buffer->sglist[i >> 2].len[3 - (i & 3)], DMA_TO_DEVICE);
+ 			i++;
+ 		}
+ 
+diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_tx.h b/drivers/net/ethernet/marvell/octeon_ep/octep_tx.h
+index 2ef57980eb47b..21e75ff9f5e71 100644
+--- a/drivers/net/ethernet/marvell/octeon_ep/octep_tx.h
++++ b/drivers/net/ethernet/marvell/octeon_ep/octep_tx.h
+@@ -17,7 +17,21 @@
+ #define TX_BUFTYPE_NET_SG        2
+ #define NUM_TX_BUFTYPES          3
+ 
+-/* Hardware format for Scatter/Gather list */
++/* Hardware format for Scatter/Gather list
++ *
++ * 63      48|47     32|31     16|15       0
++ * -----------------------------------------
++ * |  Len 0  |  Len 1  |  Len 2  |  Len 3  |
++ * -----------------------------------------
++ * |                Ptr 0                  |
++ * -----------------------------------------
++ * |                Ptr 1                  |
++ * -----------------------------------------
++ * |                Ptr 2                  |
++ * -----------------------------------------
++ * |                Ptr 3                  |
++ * -----------------------------------------
++ */
+ struct octep_tx_sglist_desc {
+ 	u16 len[4];
+ 	dma_addr_t dma_ptr[4];
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
+index e77d438489557..53b2a4ef52985 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
+@@ -29,7 +29,8 @@
+ static bool otx2_xdp_rcv_pkt_handler(struct otx2_nic *pfvf,
+ 				     struct bpf_prog *prog,
+ 				     struct nix_cqe_rx_s *cqe,
+-				     struct otx2_cq_queue *cq);
++				     struct otx2_cq_queue *cq,
++				     bool *need_xdp_flush);
+ 
+ static int otx2_nix_cq_op_status(struct otx2_nic *pfvf,
+ 				 struct otx2_cq_queue *cq)
+@@ -337,7 +338,7 @@ static bool otx2_check_rcv_errors(struct otx2_nic *pfvf,
+ static void otx2_rcv_pkt_handler(struct otx2_nic *pfvf,
+ 				 struct napi_struct *napi,
+ 				 struct otx2_cq_queue *cq,
+-				 struct nix_cqe_rx_s *cqe)
++				 struct nix_cqe_rx_s *cqe, bool *need_xdp_flush)
+ {
+ 	struct nix_rx_parse_s *parse = &cqe->parse;
+ 	struct nix_rx_sg_s *sg = &cqe->sg;
+@@ -353,7 +354,7 @@ static void otx2_rcv_pkt_handler(struct otx2_nic *pfvf,
+ 	}
+ 
+ 	if (pfvf->xdp_prog)
+-		if (otx2_xdp_rcv_pkt_handler(pfvf, pfvf->xdp_prog, cqe, cq))
++		if (otx2_xdp_rcv_pkt_handler(pfvf, pfvf->xdp_prog, cqe, cq, need_xdp_flush))
+ 			return;
+ 
+ 	skb = napi_get_frags(napi);
+@@ -388,6 +389,7 @@ static int otx2_rx_napi_handler(struct otx2_nic *pfvf,
+ 				struct napi_struct *napi,
+ 				struct otx2_cq_queue *cq, int budget)
+ {
++	bool need_xdp_flush = false;
+ 	struct nix_cqe_rx_s *cqe;
+ 	int processed_cqe = 0;
+ 
+@@ -409,13 +411,15 @@ process_cqe:
+ 		cq->cq_head++;
+ 		cq->cq_head &= (cq->cqe_cnt - 1);
+ 
+-		otx2_rcv_pkt_handler(pfvf, napi, cq, cqe);
++		otx2_rcv_pkt_handler(pfvf, napi, cq, cqe, &need_xdp_flush);
+ 
+ 		cqe->hdr.cqe_type = NIX_XQE_TYPE_INVALID;
+ 		cqe->sg.seg_addr = 0x00;
+ 		processed_cqe++;
+ 		cq->pend_cqe--;
+ 	}
++	if (need_xdp_flush)
++		xdp_do_flush();
+ 
+ 	/* Free CQEs to HW */
+ 	otx2_write64(pfvf, NIX_LF_CQ_OP_DOOR,
+@@ -1354,7 +1358,8 @@ bool otx2_xdp_sq_append_pkt(struct otx2_nic *pfvf, u64 iova, int len, u16 qidx)
+ static bool otx2_xdp_rcv_pkt_handler(struct otx2_nic *pfvf,
+ 				     struct bpf_prog *prog,
+ 				     struct nix_cqe_rx_s *cqe,
+-				     struct otx2_cq_queue *cq)
++				     struct otx2_cq_queue *cq,
++				     bool *need_xdp_flush)
+ {
+ 	unsigned char *hard_start, *data;
+ 	int qidx = cq->cq_idx;
+@@ -1391,8 +1396,10 @@ static bool otx2_xdp_rcv_pkt_handler(struct otx2_nic *pfvf,
+ 
+ 		otx2_dma_unmap_page(pfvf, iova, pfvf->rbsize,
+ 				    DMA_FROM_DEVICE);
+-		if (!err)
++		if (!err) {
++			*need_xdp_flush = true;
+ 			return true;
++		}
+ 		put_page(page);
+ 		break;
+ 	default:
+diff --git a/drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c b/drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c
+index c07f25e791c76..fe4e166de8a04 100644
+--- a/drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c
++++ b/drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c
+@@ -243,10 +243,9 @@ static void vcap_test_api_init(struct vcap_admin *admin)
+ }
+ 
+ /* Helper function to create a rule of a specific size */
+-static struct vcap_rule *
+-test_vcap_xn_rule_creator(struct kunit *test, int cid, enum vcap_user user,
+-			  u16 priority,
+-			  int id, int size, int expected_addr)
++static void test_vcap_xn_rule_creator(struct kunit *test, int cid,
++				      enum vcap_user user, u16 priority,
++				      int id, int size, int expected_addr)
+ {
+ 	struct vcap_rule *rule;
+ 	struct vcap_rule_internal *ri;
+@@ -311,7 +310,7 @@ test_vcap_xn_rule_creator(struct kunit *test, int cid, enum vcap_user user,
+ 	ret = vcap_add_rule(rule);
+ 	KUNIT_EXPECT_EQ(test, 0, ret);
+ 	KUNIT_EXPECT_EQ(test, expected_addr, ri->addr);
+-	return rule;
++	vcap_free_rule(rule);
+ }
+ 
+ /* Prepare testing rule deletion */
+@@ -995,6 +994,16 @@ static void vcap_api_encode_rule_actionset_test(struct kunit *test)
+ 	KUNIT_EXPECT_EQ(test, (u32)0x00000000, actwords[11]);
+ }
+ 
++static void vcap_free_ckf(struct vcap_rule *rule)
++{
++	struct vcap_client_keyfield *ckf, *next_ckf;
++
++	list_for_each_entry_safe(ckf, next_ckf, &rule->keyfields, ctrl.list) {
++		list_del(&ckf->ctrl.list);
++		kfree(ckf);
++	}
++}
++
+ static void vcap_api_rule_add_keyvalue_test(struct kunit *test)
+ {
+ 	struct vcap_admin admin = {
+@@ -1027,6 +1036,7 @@ static void vcap_api_rule_add_keyvalue_test(struct kunit *test)
+ 	KUNIT_EXPECT_EQ(test, VCAP_FIELD_BIT, kf->ctrl.type);
+ 	KUNIT_EXPECT_EQ(test, 0x0, kf->data.u1.value);
+ 	KUNIT_EXPECT_EQ(test, 0x1, kf->data.u1.mask);
++	vcap_free_ckf(rule);
+ 
+ 	INIT_LIST_HEAD(&rule->keyfields);
+ 	ret = vcap_rule_add_key_bit(rule, VCAP_KF_LOOKUP_FIRST_IS, VCAP_BIT_1);
+@@ -1039,6 +1049,7 @@ static void vcap_api_rule_add_keyvalue_test(struct kunit *test)
+ 	KUNIT_EXPECT_EQ(test, VCAP_FIELD_BIT, kf->ctrl.type);
+ 	KUNIT_EXPECT_EQ(test, 0x1, kf->data.u1.value);
+ 	KUNIT_EXPECT_EQ(test, 0x1, kf->data.u1.mask);
++	vcap_free_ckf(rule);
+ 
+ 	INIT_LIST_HEAD(&rule->keyfields);
+ 	ret = vcap_rule_add_key_bit(rule, VCAP_KF_LOOKUP_FIRST_IS,
+@@ -1052,6 +1063,7 @@ static void vcap_api_rule_add_keyvalue_test(struct kunit *test)
+ 	KUNIT_EXPECT_EQ(test, VCAP_FIELD_BIT, kf->ctrl.type);
+ 	KUNIT_EXPECT_EQ(test, 0x0, kf->data.u1.value);
+ 	KUNIT_EXPECT_EQ(test, 0x0, kf->data.u1.mask);
++	vcap_free_ckf(rule);
+ 
+ 	INIT_LIST_HEAD(&rule->keyfields);
+ 	ret = vcap_rule_add_key_u32(rule, VCAP_KF_TYPE, 0x98765432, 0xff00ffab);
+@@ -1064,6 +1076,7 @@ static void vcap_api_rule_add_keyvalue_test(struct kunit *test)
+ 	KUNIT_EXPECT_EQ(test, VCAP_FIELD_U32, kf->ctrl.type);
+ 	KUNIT_EXPECT_EQ(test, 0x98765432, kf->data.u32.value);
+ 	KUNIT_EXPECT_EQ(test, 0xff00ffab, kf->data.u32.mask);
++	vcap_free_ckf(rule);
+ 
+ 	INIT_LIST_HEAD(&rule->keyfields);
+ 	ret = vcap_rule_add_key_u128(rule, VCAP_KF_L3_IP6_SIP, &dip);
+@@ -1078,6 +1091,18 @@ static void vcap_api_rule_add_keyvalue_test(struct kunit *test)
+ 		KUNIT_EXPECT_EQ(test, dip.value[idx], kf->data.u128.value[idx]);
+ 	for (idx = 0; idx < ARRAY_SIZE(dip.mask); ++idx)
+ 		KUNIT_EXPECT_EQ(test, dip.mask[idx], kf->data.u128.mask[idx]);
++	vcap_free_ckf(rule);
++}
++
++static void vcap_free_caf(struct vcap_rule *rule)
++{
++	struct vcap_client_actionfield *caf, *next_caf;
++
++	list_for_each_entry_safe(caf, next_caf,
++				 &rule->actionfields, ctrl.list) {
++		list_del(&caf->ctrl.list);
++		kfree(caf);
++	}
+ }
+ 
+ static void vcap_api_rule_add_actionvalue_test(struct kunit *test)
+@@ -1105,6 +1130,7 @@ static void vcap_api_rule_add_actionvalue_test(struct kunit *test)
+ 	KUNIT_EXPECT_EQ(test, VCAP_AF_POLICE_ENA, af->ctrl.action);
+ 	KUNIT_EXPECT_EQ(test, VCAP_FIELD_BIT, af->ctrl.type);
+ 	KUNIT_EXPECT_EQ(test, 0x0, af->data.u1.value);
++	vcap_free_caf(rule);
+ 
+ 	INIT_LIST_HEAD(&rule->actionfields);
+ 	ret = vcap_rule_add_action_bit(rule, VCAP_AF_POLICE_ENA, VCAP_BIT_1);
+@@ -1116,6 +1142,7 @@ static void vcap_api_rule_add_actionvalue_test(struct kunit *test)
+ 	KUNIT_EXPECT_EQ(test, VCAP_AF_POLICE_ENA, af->ctrl.action);
+ 	KUNIT_EXPECT_EQ(test, VCAP_FIELD_BIT, af->ctrl.type);
+ 	KUNIT_EXPECT_EQ(test, 0x1, af->data.u1.value);
++	vcap_free_caf(rule);
+ 
+ 	INIT_LIST_HEAD(&rule->actionfields);
+ 	ret = vcap_rule_add_action_bit(rule, VCAP_AF_POLICE_ENA, VCAP_BIT_ANY);
+@@ -1127,6 +1154,7 @@ static void vcap_api_rule_add_actionvalue_test(struct kunit *test)
+ 	KUNIT_EXPECT_EQ(test, VCAP_AF_POLICE_ENA, af->ctrl.action);
+ 	KUNIT_EXPECT_EQ(test, VCAP_FIELD_BIT, af->ctrl.type);
+ 	KUNIT_EXPECT_EQ(test, 0x0, af->data.u1.value);
++	vcap_free_caf(rule);
+ 
+ 	INIT_LIST_HEAD(&rule->actionfields);
+ 	ret = vcap_rule_add_action_u32(rule, VCAP_AF_TYPE, 0x98765432);
+@@ -1138,6 +1166,7 @@ static void vcap_api_rule_add_actionvalue_test(struct kunit *test)
+ 	KUNIT_EXPECT_EQ(test, VCAP_AF_TYPE, af->ctrl.action);
+ 	KUNIT_EXPECT_EQ(test, VCAP_FIELD_U32, af->ctrl.type);
+ 	KUNIT_EXPECT_EQ(test, 0x98765432, af->data.u32.value);
++	vcap_free_caf(rule);
+ 
+ 	INIT_LIST_HEAD(&rule->actionfields);
+ 	ret = vcap_rule_add_action_u32(rule, VCAP_AF_MASK_MODE, 0xaabbccdd);
+@@ -1149,6 +1178,7 @@ static void vcap_api_rule_add_actionvalue_test(struct kunit *test)
+ 	KUNIT_EXPECT_EQ(test, VCAP_AF_MASK_MODE, af->ctrl.action);
+ 	KUNIT_EXPECT_EQ(test, VCAP_FIELD_U32, af->ctrl.type);
+ 	KUNIT_EXPECT_EQ(test, 0xaabbccdd, af->data.u32.value);
++	vcap_free_caf(rule);
+ }
+ 
+ static void vcap_api_rule_find_keyset_basic_test(struct kunit *test)
+@@ -1408,6 +1438,10 @@ static void vcap_api_encode_rule_test(struct kunit *test)
+ 	ret = list_empty(&is2_admin.rules);
+ 	KUNIT_EXPECT_EQ(test, false, ret);
+ 	KUNIT_EXPECT_EQ(test, 0, ret);
++
++	vcap_enable_lookups(&test_vctrl, &test_netdev, 0, 0,
++			    rule->cookie, false);
++
+ 	vcap_free_rule(rule);
+ 
+ 	/* Check that the rule has been freed: tricky to access since this
+@@ -1418,6 +1452,8 @@ static void vcap_api_encode_rule_test(struct kunit *test)
+ 	KUNIT_EXPECT_EQ(test, true, ret);
+ 	ret = list_empty(&rule->actionfields);
+ 	KUNIT_EXPECT_EQ(test, true, ret);
++
++	vcap_del_rule(&test_vctrl, &test_netdev, id);
+ }
+ 
+ static void vcap_api_set_rule_counter_test(struct kunit *test)
+@@ -1561,6 +1597,11 @@ static void vcap_api_rule_insert_in_order_test(struct kunit *test)
+ 	test_vcap_xn_rule_creator(test, 10000, VCAP_USER_QOS, 20, 400, 6, 774);
+ 	test_vcap_xn_rule_creator(test, 10000, VCAP_USER_QOS, 30, 300, 3, 771);
+ 	test_vcap_xn_rule_creator(test, 10000, VCAP_USER_QOS, 40, 200, 2, 768);
++
++	vcap_del_rule(&test_vctrl, &test_netdev, 200);
++	vcap_del_rule(&test_vctrl, &test_netdev, 300);
++	vcap_del_rule(&test_vctrl, &test_netdev, 400);
++	vcap_del_rule(&test_vctrl, &test_netdev, 500);
+ }
+ 
+ static void vcap_api_rule_insert_reverse_order_test(struct kunit *test)
+@@ -1619,6 +1660,11 @@ static void vcap_api_rule_insert_reverse_order_test(struct kunit *test)
+ 		++idx;
+ 	}
+ 	KUNIT_EXPECT_EQ(test, 768, admin.last_used_addr);
++
++	vcap_del_rule(&test_vctrl, &test_netdev, 500);
++	vcap_del_rule(&test_vctrl, &test_netdev, 400);
++	vcap_del_rule(&test_vctrl, &test_netdev, 300);
++	vcap_del_rule(&test_vctrl, &test_netdev, 200);
+ }
+ 
+ static void vcap_api_rule_remove_at_end_test(struct kunit *test)
+@@ -1819,6 +1865,9 @@ static void vcap_api_rule_remove_in_front_test(struct kunit *test)
+ 	KUNIT_EXPECT_EQ(test, 786, test_init_start);
+ 	KUNIT_EXPECT_EQ(test, 8, test_init_count);
+ 	KUNIT_EXPECT_EQ(test, 794, admin.last_used_addr);
++
++	vcap_del_rule(&test_vctrl, &test_netdev, 200);
++	vcap_del_rule(&test_vctrl, &test_netdev, 300);
+ }
+ 
+ static struct kunit_case vcap_api_rule_remove_test_cases[] = {
+diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.h b/drivers/net/ethernet/pensando/ionic/ionic_dev.h
+index 0bea208bfba2f..43ce0aac6a94c 100644
+--- a/drivers/net/ethernet/pensando/ionic/ionic_dev.h
++++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.h
+@@ -187,6 +187,7 @@ typedef void (*ionic_desc_cb)(struct ionic_queue *q,
+ 			      struct ionic_desc_info *desc_info,
+ 			      struct ionic_cq_info *cq_info, void *cb_arg);
+ 
++#define IONIC_MAX_BUF_LEN			((u16)-1)
+ #define IONIC_PAGE_SIZE				PAGE_SIZE
+ #define IONIC_PAGE_SPLIT_SZ			(PAGE_SIZE / 2)
+ #define IONIC_PAGE_GFP_MASK			(GFP_ATOMIC | __GFP_NOWARN |\
+diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
+index 26798fc635dbd..44466e8c5d77b 100644
+--- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
++++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
+@@ -207,7 +207,8 @@ static struct sk_buff *ionic_rx_frags(struct ionic_queue *q,
+ 			return NULL;
+ 		}
+ 
+-		frag_len = min_t(u16, len, IONIC_PAGE_SIZE - buf_info->page_offset);
++		frag_len = min_t(u16, len, min_t(u32, IONIC_MAX_BUF_LEN,
++						 IONIC_PAGE_SIZE - buf_info->page_offset));
+ 		len -= frag_len;
+ 
+ 		dma_sync_single_for_cpu(dev,
+@@ -452,7 +453,8 @@ void ionic_rx_fill(struct ionic_queue *q)
+ 
+ 		/* fill main descriptor - buf[0] */
+ 		desc->addr = cpu_to_le64(buf_info->dma_addr + buf_info->page_offset);
+-		frag_len = min_t(u16, len, IONIC_PAGE_SIZE - buf_info->page_offset);
++		frag_len = min_t(u16, len, min_t(u32, IONIC_MAX_BUF_LEN,
++						 IONIC_PAGE_SIZE - buf_info->page_offset));
+ 		desc->len = cpu_to_le16(frag_len);
+ 		remain_len -= frag_len;
+ 		buf_info++;
+@@ -471,7 +473,9 @@ void ionic_rx_fill(struct ionic_queue *q)
+ 			}
+ 
+ 			sg_elem->addr = cpu_to_le64(buf_info->dma_addr + buf_info->page_offset);
+-			frag_len = min_t(u16, remain_len, IONIC_PAGE_SIZE - buf_info->page_offset);
++			frag_len = min_t(u16, remain_len, min_t(u32, IONIC_MAX_BUF_LEN,
++								IONIC_PAGE_SIZE -
++								buf_info->page_offset));
+ 			sg_elem->len = cpu_to_le16(frag_len);
+ 			remain_len -= frag_len;
+ 			buf_info++;
+diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
+index 382756c3fb837..1b0fc84b4d0cd 100644
+--- a/drivers/net/team/team.c
++++ b/drivers/net/team/team.c
+@@ -2127,7 +2127,12 @@ static const struct ethtool_ops team_ethtool_ops = {
+ static void team_setup_by_port(struct net_device *dev,
+ 			       struct net_device *port_dev)
+ {
+-	dev->header_ops	= port_dev->header_ops;
++	struct team *team = netdev_priv(dev);
++
++	if (port_dev->type == ARPHRD_ETHER)
++		dev->header_ops	= team->header_ops_cache;
++	else
++		dev->header_ops	= port_dev->header_ops;
+ 	dev->type = port_dev->type;
+ 	dev->hard_header_len = port_dev->hard_header_len;
+ 	dev->needed_headroom = port_dev->needed_headroom;
+@@ -2174,8 +2179,11 @@ static int team_dev_type_check_change(struct net_device *dev,
+ 
+ static void team_setup(struct net_device *dev)
+ {
++	struct team *team = netdev_priv(dev);
++
+ 	ether_setup(dev);
+ 	dev->max_mtu = ETH_MAX_MTU;
++	team->header_ops_cache = dev->header_ops;
+ 
+ 	dev->netdev_ops = &team_netdev_ops;
+ 	dev->ethtool_ops = &team_ethtool_ops;
+diff --git a/drivers/net/thunderbolt/main.c b/drivers/net/thunderbolt/main.c
+index 0c1e8970ee589..0a53ec293d040 100644
+--- a/drivers/net/thunderbolt/main.c
++++ b/drivers/net/thunderbolt/main.c
+@@ -1049,12 +1049,11 @@ static bool tbnet_xmit_csum_and_map(struct tbnet *net, struct sk_buff *skb,
+ 		*tucso = ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
+ 					    ip_hdr(skb)->daddr, 0,
+ 					    ip_hdr(skb)->protocol, 0);
+-	} else if (skb_is_gso_v6(skb)) {
++	} else if (skb_is_gso(skb) && skb_is_gso_v6(skb)) {
+ 		tucso = dest + ((void *)&(tcp_hdr(skb)->check) - data);
+ 		*tucso = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
+ 					  &ipv6_hdr(skb)->daddr, 0,
+ 					  IPPROTO_TCP, 0);
+-		return false;
+ 	} else if (protocol == htons(ETH_P_IPV6)) {
+ 		tucso = dest + skb_checksum_start_offset(skb) + skb->csum_offset;
+ 		*tucso = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
+diff --git a/drivers/net/vxlan/vxlan_core.c b/drivers/net/vxlan/vxlan_core.c
+index c9a9373733c01..4b2db14472e6c 100644
+--- a/drivers/net/vxlan/vxlan_core.c
++++ b/drivers/net/vxlan/vxlan_core.c
+@@ -4296,6 +4296,10 @@ static size_t vxlan_get_size(const struct net_device *dev)
+ 		nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_REMCSUM_TX */
+ 		nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_REMCSUM_RX */
+ 		nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_LOCALBYPASS */
++		nla_total_size(0) + /* IFLA_VXLAN_GBP */
++		nla_total_size(0) + /* IFLA_VXLAN_GPE */
++		nla_total_size(0) + /* IFLA_VXLAN_REMCSUM_NOPARTIAL */
++		nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_VNIFILTER */
+ 		0;
+ }
+ 
+diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
+index 1cd2bf82319a9..a15b37750d6e9 100644
+--- a/drivers/nvme/host/fc.c
++++ b/drivers/nvme/host/fc.c
+@@ -1924,7 +1924,7 @@ char *nvme_fc_io_getuuid(struct nvmefc_fcp_req *req)
+ 	struct nvme_fc_fcp_op *op = fcp_req_to_fcp_op(req);
+ 	struct request *rq = op->rq;
+ 
+-	if (!IS_ENABLED(CONFIG_BLK_CGROUP_FC_APPID) || !rq->bio)
++	if (!IS_ENABLED(CONFIG_BLK_CGROUP_FC_APPID) || !rq || !rq->bio)
+ 		return NULL;
+ 	return blkcg_get_fc_appid(rq->bio);
+ }
+diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
+index 2f57da12d9836..347cb5daebc3c 100644
+--- a/drivers/nvme/host/pci.c
++++ b/drivers/nvme/host/pci.c
+@@ -2916,9 +2916,6 @@ static struct nvme_dev *nvme_pci_alloc_dev(struct pci_dev *pdev,
+ 	struct nvme_dev *dev;
+ 	int ret = -ENOMEM;
+ 
+-	if (node == NUMA_NO_NODE)
+-		set_dev_node(&pdev->dev, first_memory_node);
+-
+ 	dev = kzalloc_node(sizeof(*dev), GFP_KERNEL, node);
+ 	if (!dev)
+ 		return ERR_PTR(-ENOMEM);
+diff --git a/drivers/parisc/ccio-dma.c b/drivers/parisc/ccio-dma.c
+index 10e846286f4ef..623707fc6ff1c 100644
+--- a/drivers/parisc/ccio-dma.c
++++ b/drivers/parisc/ccio-dma.c
+@@ -222,7 +222,7 @@ struct ioa_registers {
+ struct ioc {
+ 	struct ioa_registers __iomem *ioc_regs;  /* I/O MMU base address */
+ 	u8  *res_map;	                /* resource map, bit == pdir entry */
+-	u64 *pdir_base;	                /* physical base address */
++	__le64 *pdir_base;		/* physical base address */
+ 	u32 pdir_size;			/* bytes, function of IOV Space size */
+ 	u32 res_hint;			/* next available IOVP -
+ 					   circular search */
+@@ -347,7 +347,7 @@ ccio_alloc_range(struct ioc *ioc, struct device *dev, size_t size)
+ 	BUG_ON(pages_needed == 0);
+ 	BUG_ON((pages_needed * IOVP_SIZE) > DMA_CHUNK_SIZE);
+ 
+-	DBG_RES("%s() size: %d pages_needed %d\n",
++	DBG_RES("%s() size: %zu pages_needed %d\n",
+ 			__func__, size, pages_needed);
+ 
+ 	/*
+@@ -435,7 +435,7 @@ ccio_free_range(struct ioc *ioc, dma_addr_t iova, unsigned long pages_mapped)
+ 	BUG_ON((pages_mapped * IOVP_SIZE) > DMA_CHUNK_SIZE);
+ 	BUG_ON(pages_mapped > BITS_PER_LONG);
+ 
+-	DBG_RES("%s():  res_idx: %d pages_mapped %d\n", 
++	DBG_RES("%s():  res_idx: %d pages_mapped %lu\n",
+ 		__func__, res_idx, pages_mapped);
+ 
+ #ifdef CCIO_COLLECT_STATS
+@@ -551,7 +551,7 @@ static u32 hint_lookup[] = {
+  * index are bits 12:19 of the value returned by LCI.
+  */ 
+ static void
+-ccio_io_pdir_entry(u64 *pdir_ptr, space_t sid, unsigned long vba,
++ccio_io_pdir_entry(__le64 *pdir_ptr, space_t sid, unsigned long vba,
+ 		   unsigned long hints)
+ {
+ 	register unsigned long pa;
+@@ -727,7 +727,7 @@ ccio_map_single(struct device *dev, void *addr, size_t size,
+ 	unsigned long flags;
+ 	dma_addr_t iovp;
+ 	dma_addr_t offset;
+-	u64 *pdir_start;
++	__le64 *pdir_start;
+ 	unsigned long hint = hint_lookup[(int)direction];
+ 
+ 	BUG_ON(!dev);
+@@ -754,8 +754,8 @@ ccio_map_single(struct device *dev, void *addr, size_t size,
+ 
+ 	pdir_start = &(ioc->pdir_base[idx]);
+ 
+-	DBG_RUN("%s() 0x%p -> 0x%lx size: %0x%x\n",
+-		__func__, addr, (long)iovp | offset, size);
++	DBG_RUN("%s() %px -> %#lx size: %zu\n",
++		__func__, addr, (long)(iovp | offset), size);
+ 
+ 	/* If not cacheline aligned, force SAFE_DMA on the whole mess */
+ 	if((size % L1_CACHE_BYTES) || ((unsigned long)addr % L1_CACHE_BYTES))
+@@ -813,7 +813,7 @@ ccio_unmap_page(struct device *dev, dma_addr_t iova, size_t size,
+ 		return;
+ 	}
+ 
+-	DBG_RUN("%s() iovp 0x%lx/%x\n",
++	DBG_RUN("%s() iovp %#lx/%zx\n",
+ 		__func__, (long)iova, size);
+ 
+ 	iova ^= offset;        /* clear offset bits */
+@@ -1291,7 +1291,7 @@ ccio_ioc_init(struct ioc *ioc)
+ 			iova_space_size>>20,
+ 			iov_order + PAGE_SHIFT);
+ 
+-	ioc->pdir_base = (u64 *)__get_free_pages(GFP_KERNEL, 
++	ioc->pdir_base = (__le64 *)__get_free_pages(GFP_KERNEL,
+ 						 get_order(ioc->pdir_size));
+ 	if(NULL == ioc->pdir_base) {
+ 		panic("%s() could not allocate I/O Page Table\n", __func__);
+diff --git a/drivers/parisc/iommu-helpers.h b/drivers/parisc/iommu-helpers.h
+index 0905be256de08..c43f1a212a5c8 100644
+--- a/drivers/parisc/iommu-helpers.h
++++ b/drivers/parisc/iommu-helpers.h
+@@ -14,13 +14,13 @@
+ static inline unsigned int
+ iommu_fill_pdir(struct ioc *ioc, struct scatterlist *startsg, int nents, 
+ 		unsigned long hint,
+-		void (*iommu_io_pdir_entry)(u64 *, space_t, unsigned long,
++		void (*iommu_io_pdir_entry)(__le64 *, space_t, unsigned long,
+ 					    unsigned long))
+ {
+ 	struct scatterlist *dma_sg = startsg;	/* pointer to current DMA */
+ 	unsigned int n_mappings = 0;
+ 	unsigned long dma_offset = 0, dma_len = 0;
+-	u64 *pdirp = NULL;
++	__le64 *pdirp = NULL;
+ 
+ 	/* Horrible hack.  For efficiency's sake, dma_sg starts one 
+ 	 * entry below the true start (it is immediately incremented
+@@ -31,8 +31,8 @@ iommu_fill_pdir(struct ioc *ioc, struct scatterlist *startsg, int nents,
+ 		unsigned long vaddr;
+ 		long size;
+ 
+-		DBG_RUN_SG(" %d : %08lx/%05x %p/%05x\n", nents,
+-			   (unsigned long)sg_dma_address(startsg), cnt,
++		DBG_RUN_SG(" %d : %08lx %p/%05x\n", nents,
++			   (unsigned long)sg_dma_address(startsg),
+ 			   sg_virt(startsg), startsg->length
+ 		);
+ 
+diff --git a/drivers/parisc/iosapic.c b/drivers/parisc/iosapic.c
+index bcc1dae007803..890c3c0f3d140 100644
+--- a/drivers/parisc/iosapic.c
++++ b/drivers/parisc/iosapic.c
+@@ -202,9 +202,9 @@ static inline void iosapic_write(void __iomem *iosapic, unsigned int reg, u32 va
+ 
+ static DEFINE_SPINLOCK(iosapic_lock);
+ 
+-static inline void iosapic_eoi(void __iomem *addr, unsigned int data)
++static inline void iosapic_eoi(__le32 __iomem *addr, __le32 data)
+ {
+-	__raw_writel(data, addr);
++	__raw_writel((__force u32)data, addr);
+ }
+ 
+ /*
+diff --git a/drivers/parisc/iosapic_private.h b/drivers/parisc/iosapic_private.h
+index 73ecc657ad954..bd8ff40162b4b 100644
+--- a/drivers/parisc/iosapic_private.h
++++ b/drivers/parisc/iosapic_private.h
+@@ -118,8 +118,8 @@ struct iosapic_irt {
+ struct vector_info {
+ 	struct iosapic_info *iosapic;	/* I/O SAPIC this vector is on */
+ 	struct irt_entry *irte;		/* IRT entry */
+-	u32 __iomem *eoi_addr;		/* precalculate EOI reg address */
+-	u32	eoi_data;		/* IA64: ?       PA: swapped txn_data */
++	__le32 __iomem *eoi_addr;	/* precalculate EOI reg address */
++	__le32	eoi_data;		/* IA64: ?       PA: swapped txn_data */
+ 	int	txn_irq;		/* virtual IRQ number for processor */
+ 	ulong	txn_addr;		/* IA64: id_eid  PA: partial HPA */
+ 	u32	txn_data;		/* CPU interrupt bit */
+diff --git a/drivers/parisc/sba_iommu.c b/drivers/parisc/sba_iommu.c
+index b8e91cbb60567..780ea219cd8d4 100644
+--- a/drivers/parisc/sba_iommu.c
++++ b/drivers/parisc/sba_iommu.c
+@@ -202,7 +202,7 @@ static void
+ sba_dump_pdir_entry(struct ioc *ioc, char *msg, uint pide)
+ {
+ 	/* start printing from lowest pde in rval */
+-	u64 *ptr = &(ioc->pdir_base[pide & (~0U * BITS_PER_LONG)]);
++	__le64 *ptr = &(ioc->pdir_base[pide & (~0U * BITS_PER_LONG)]);
+ 	unsigned long *rptr = (unsigned long *) &(ioc->res_map[(pide >>3) & ~(sizeof(unsigned long) - 1)]);
+ 	uint rcnt;
+ 
+@@ -569,7 +569,7 @@ typedef unsigned long space_t;
+  */
+ 
+ static void
+-sba_io_pdir_entry(u64 *pdir_ptr, space_t sid, unsigned long vba,
++sba_io_pdir_entry(__le64 *pdir_ptr, space_t sid, unsigned long vba,
+ 		  unsigned long hint)
+ {
+ 	u64 pa; /* physical address */
+@@ -613,7 +613,7 @@ static void
+ sba_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt)
+ {
+ 	u32 iovp = (u32) SBA_IOVP(ioc,iova);
+-	u64 *pdir_ptr = &ioc->pdir_base[PDIR_INDEX(iovp)];
++	__le64 *pdir_ptr = &ioc->pdir_base[PDIR_INDEX(iovp)];
+ 
+ #ifdef ASSERT_PDIR_SANITY
+ 	/* Assert first pdir entry is set.
+@@ -714,7 +714,7 @@ sba_map_single(struct device *dev, void *addr, size_t size,
+ 	unsigned long flags; 
+ 	dma_addr_t iovp;
+ 	dma_addr_t offset;
+-	u64 *pdir_start;
++	__le64 *pdir_start;
+ 	int pide;
+ 
+ 	ioc = GET_IOC(dev);
+@@ -1432,7 +1432,7 @@ sba_ioc_init(struct parisc_device *sba, struct ioc *ioc, int ioc_num)
+ 
+ 	ioc->pdir_size = pdir_size = (iova_space_size/IOVP_SIZE) * sizeof(u64);
+ 
+-	DBG_INIT("%s() hpa 0x%lx mem %ldMB IOV %dMB (%d bits)\n",
++	DBG_INIT("%s() hpa %px mem %ldMB IOV %dMB (%d bits)\n",
+ 			__func__,
+ 			ioc->ioc_hpa,
+ 			(unsigned long) totalram_pages() >> (20 - PAGE_SHIFT),
+@@ -1469,7 +1469,7 @@ sba_ioc_init(struct parisc_device *sba, struct ioc *ioc, int ioc_num)
+ 	ioc->iovp_mask = ~(iova_space_mask + PAGE_SIZE - 1);
+ #endif
+ 
+-	DBG_INIT("%s() IOV base 0x%lx mask 0x%0lx\n",
++	DBG_INIT("%s() IOV base %#lx mask %#0lx\n",
+ 		__func__, ioc->ibase, ioc->imask);
+ 
+ 	/*
+@@ -1581,7 +1581,7 @@ printk("sba_hw_init(): mem_boot 0x%x 0x%x 0x%x 0x%x\n", PAGE0->mem_boot.hpa,
+ 
+ 	if (!IS_PLUTO(sba_dev->dev)) {
+ 		ioc_ctl = READ_REG(sba_dev->sba_hpa+IOC_CTRL);
+-		DBG_INIT("%s() hpa 0x%lx ioc_ctl 0x%Lx ->",
++		DBG_INIT("%s() hpa %px ioc_ctl 0x%Lx ->",
+ 			__func__, sba_dev->sba_hpa, ioc_ctl);
+ 		ioc_ctl &= ~(IOC_CTRL_RM | IOC_CTRL_NC | IOC_CTRL_CE);
+ 		ioc_ctl |= IOC_CTRL_DD | IOC_CTRL_D4 | IOC_CTRL_TC;
+@@ -1666,14 +1666,14 @@ printk("sba_hw_init(): mem_boot 0x%x 0x%x 0x%x 0x%x\n", PAGE0->mem_boot.hpa,
+ 		/* flush out the last writes */
+ 		READ_REG(sba_dev->ioc[i].ioc_hpa + ROPE7_CTL);
+ 
+-		DBG_INIT("	ioc[%d] ROPE_CFG 0x%Lx  ROPE_DBG 0x%Lx\n",
++		DBG_INIT("	ioc[%d] ROPE_CFG %#lx  ROPE_DBG %lx\n",
+ 				i,
+-				READ_REG(sba_dev->ioc[i].ioc_hpa + 0x40),
+-				READ_REG(sba_dev->ioc[i].ioc_hpa + 0x50)
++				(unsigned long) READ_REG(sba_dev->ioc[i].ioc_hpa + 0x40),
++				(unsigned long) READ_REG(sba_dev->ioc[i].ioc_hpa + 0x50)
+ 			);
+-		DBG_INIT("	STATUS_CONTROL 0x%Lx  FLUSH_CTRL 0x%Lx\n",
+-				READ_REG(sba_dev->ioc[i].ioc_hpa + 0x108),
+-				READ_REG(sba_dev->ioc[i].ioc_hpa + 0x400)
++		DBG_INIT("	STATUS_CONTROL %#lx  FLUSH_CTRL %#lx\n",
++				(unsigned long) READ_REG(sba_dev->ioc[i].ioc_hpa + 0x108),
++				(unsigned long) READ_REG(sba_dev->ioc[i].ioc_hpa + 0x400)
+ 			);
+ 
+ 		if (IS_PLUTO(sba_dev->dev)) {
+@@ -1737,7 +1737,7 @@ sba_common_init(struct sba_device *sba_dev)
+ #ifdef ASSERT_PDIR_SANITY
+ 		/* Mark first bit busy - ie no IOVA 0 */
+ 		sba_dev->ioc[i].res_map[0] = 0x80;
+-		sba_dev->ioc[i].pdir_base[0] = 0xeeffc0addbba0080ULL;
++		sba_dev->ioc[i].pdir_base[0] = (__force __le64) 0xeeffc0addbba0080ULL;
+ #endif
+ 
+ 		/* Third (and last) part of PIRANHA BUG */
+diff --git a/drivers/platform/mellanox/Kconfig b/drivers/platform/mellanox/Kconfig
+index 30b50920b278c..f7dfa0e785fd6 100644
+--- a/drivers/platform/mellanox/Kconfig
++++ b/drivers/platform/mellanox/Kconfig
+@@ -60,6 +60,7 @@ config MLXBF_BOOTCTL
+ 	tristate "Mellanox BlueField Firmware Boot Control driver"
+ 	depends on ARM64
+ 	depends on ACPI
++	depends on NET
+ 	help
+ 	  The Mellanox BlueField firmware implements functionality to
+ 	  request swapping the primary and alternate eMMC boot partition,
+diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c
+index fdf7da06af306..d85d895fee894 100644
+--- a/drivers/platform/x86/asus-nb-wmi.c
++++ b/drivers/platform/x86/asus-nb-wmi.c
+@@ -478,6 +478,15 @@ static const struct dmi_system_id asus_quirks[] = {
+ 		},
+ 		.driver_data = &quirk_asus_tablet_mode,
+ 	},
++	{
++		.callback = dmi_matched,
++		.ident = "ASUS ROG FLOW X16",
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
++			DMI_MATCH(DMI_PRODUCT_NAME, "GV601V"),
++		},
++		.driver_data = &quirk_asus_tablet_mode,
++	},
+ 	{
+ 		.callback = dmi_matched,
+ 		.ident = "ASUS VivoBook E410MA",
+diff --git a/drivers/platform/x86/intel_scu_ipc.c b/drivers/platform/x86/intel_scu_ipc.c
+index 6851d10d65825..a68df41334035 100644
+--- a/drivers/platform/x86/intel_scu_ipc.c
++++ b/drivers/platform/x86/intel_scu_ipc.c
+@@ -19,6 +19,7 @@
+ #include <linux/init.h>
+ #include <linux/interrupt.h>
+ #include <linux/io.h>
++#include <linux/iopoll.h>
+ #include <linux/module.h>
+ #include <linux/slab.h>
+ 
+@@ -231,19 +232,15 @@ static inline u32 ipc_data_readl(struct intel_scu_ipc_dev *scu, u32 offset)
+ /* Wait till scu status is busy */
+ static inline int busy_loop(struct intel_scu_ipc_dev *scu)
+ {
+-	unsigned long end = jiffies + IPC_TIMEOUT;
+-
+-	do {
+-		u32 status;
+-
+-		status = ipc_read_status(scu);
+-		if (!(status & IPC_STATUS_BUSY))
+-			return (status & IPC_STATUS_ERR) ? -EIO : 0;
++	u8 status;
++	int err;
+ 
+-		usleep_range(50, 100);
+-	} while (time_before(jiffies, end));
++	err = readx_poll_timeout(ipc_read_status, scu, status, !(status & IPC_STATUS_BUSY),
++				 100, jiffies_to_usecs(IPC_TIMEOUT));
++	if (err)
++		return err;
+ 
+-	return -ETIMEDOUT;
++	return (status & IPC_STATUS_ERR) ? -EIO : 0;
+ }
+ 
+ /* Wait till ipc ioc interrupt is received or timeout in 10 HZ */
+@@ -251,10 +248,12 @@ static inline int ipc_wait_for_interrupt(struct intel_scu_ipc_dev *scu)
+ {
+ 	int status;
+ 
+-	if (!wait_for_completion_timeout(&scu->cmd_complete, IPC_TIMEOUT))
+-		return -ETIMEDOUT;
++	wait_for_completion_timeout(&scu->cmd_complete, IPC_TIMEOUT);
+ 
+ 	status = ipc_read_status(scu);
++	if (status & IPC_STATUS_BUSY)
++		return -ETIMEDOUT;
++
+ 	if (status & IPC_STATUS_ERR)
+ 		return -EIO;
+ 
+@@ -266,6 +265,24 @@ static int intel_scu_ipc_check_status(struct intel_scu_ipc_dev *scu)
+ 	return scu->irq > 0 ? ipc_wait_for_interrupt(scu) : busy_loop(scu);
+ }
+ 
++static struct intel_scu_ipc_dev *intel_scu_ipc_get(struct intel_scu_ipc_dev *scu)
++{
++	u8 status;
++
++	if (!scu)
++		scu = ipcdev;
++	if (!scu)
++		return ERR_PTR(-ENODEV);
++
++	status = ipc_read_status(scu);
++	if (status & IPC_STATUS_BUSY) {
++		dev_dbg(&scu->dev, "device is busy\n");
++		return ERR_PTR(-EBUSY);
++	}
++
++	return scu;
++}
++
+ /* Read/Write power control(PMIC in Langwell, MSIC in PenWell) registers */
+ static int pwr_reg_rdwr(struct intel_scu_ipc_dev *scu, u16 *addr, u8 *data,
+ 			u32 count, u32 op, u32 id)
+@@ -279,11 +296,10 @@ static int pwr_reg_rdwr(struct intel_scu_ipc_dev *scu, u16 *addr, u8 *data,
+ 	memset(cbuf, 0, sizeof(cbuf));
+ 
+ 	mutex_lock(&ipclock);
+-	if (!scu)
+-		scu = ipcdev;
+-	if (!scu) {
++	scu = intel_scu_ipc_get(scu);
++	if (IS_ERR(scu)) {
+ 		mutex_unlock(&ipclock);
+-		return -ENODEV;
++		return PTR_ERR(scu);
+ 	}
+ 
+ 	for (nc = 0; nc < count; nc++, offset += 2) {
+@@ -438,13 +454,12 @@ int intel_scu_ipc_dev_simple_command(struct intel_scu_ipc_dev *scu, int cmd,
+ 	int err;
+ 
+ 	mutex_lock(&ipclock);
+-	if (!scu)
+-		scu = ipcdev;
+-	if (!scu) {
++	scu = intel_scu_ipc_get(scu);
++	if (IS_ERR(scu)) {
+ 		mutex_unlock(&ipclock);
+-		return -ENODEV;
++		return PTR_ERR(scu);
+ 	}
+-	scu = ipcdev;
++
+ 	cmdval = sub << 12 | cmd;
+ 	ipc_command(scu, cmdval);
+ 	err = intel_scu_ipc_check_status(scu);
+@@ -484,11 +499,10 @@ int intel_scu_ipc_dev_command_with_size(struct intel_scu_ipc_dev *scu, int cmd,
+ 		return -EINVAL;
+ 
+ 	mutex_lock(&ipclock);
+-	if (!scu)
+-		scu = ipcdev;
+-	if (!scu) {
++	scu = intel_scu_ipc_get(scu);
++	if (IS_ERR(scu)) {
+ 		mutex_unlock(&ipclock);
+-		return -ENODEV;
++		return PTR_ERR(scu);
+ 	}
+ 
+ 	memcpy(inbuf, in, inlen);
+diff --git a/drivers/power/supply/ab8500_btemp.c b/drivers/power/supply/ab8500_btemp.c
+index 6f83e99d2eb72..ce36d6ca34226 100644
+--- a/drivers/power/supply/ab8500_btemp.c
++++ b/drivers/power/supply/ab8500_btemp.c
+@@ -115,7 +115,6 @@ struct ab8500_btemp {
+ static enum power_supply_property ab8500_btemp_props[] = {
+ 	POWER_SUPPLY_PROP_PRESENT,
+ 	POWER_SUPPLY_PROP_ONLINE,
+-	POWER_SUPPLY_PROP_TECHNOLOGY,
+ 	POWER_SUPPLY_PROP_TEMP,
+ };
+ 
+@@ -532,12 +531,6 @@ static int ab8500_btemp_get_property(struct power_supply *psy,
+ 		else
+ 			val->intval = 1;
+ 		break;
+-	case POWER_SUPPLY_PROP_TECHNOLOGY:
+-		if (di->bm->bi)
+-			val->intval = di->bm->bi->technology;
+-		else
+-			val->intval = POWER_SUPPLY_TECHNOLOGY_UNKNOWN;
+-		break;
+ 	case POWER_SUPPLY_PROP_TEMP:
+ 		val->intval = ab8500_btemp_get_temp(di);
+ 		break;
+@@ -662,7 +655,7 @@ static char *supply_interface[] = {
+ 
+ static const struct power_supply_desc ab8500_btemp_desc = {
+ 	.name			= "ab8500_btemp",
+-	.type			= POWER_SUPPLY_TYPE_BATTERY,
++	.type			= POWER_SUPPLY_TYPE_UNKNOWN,
+ 	.properties		= ab8500_btemp_props,
+ 	.num_properties		= ARRAY_SIZE(ab8500_btemp_props),
+ 	.get_property		= ab8500_btemp_get_property,
+diff --git a/drivers/power/supply/ab8500_chargalg.c b/drivers/power/supply/ab8500_chargalg.c
+index ea4ad61d4c7e2..2205ea0834a61 100644
+--- a/drivers/power/supply/ab8500_chargalg.c
++++ b/drivers/power/supply/ab8500_chargalg.c
+@@ -1720,7 +1720,7 @@ static char *supply_interface[] = {
+ 
+ static const struct power_supply_desc ab8500_chargalg_desc = {
+ 	.name			= "ab8500_chargalg",
+-	.type			= POWER_SUPPLY_TYPE_BATTERY,
++	.type			= POWER_SUPPLY_TYPE_UNKNOWN,
+ 	.properties		= ab8500_chargalg_props,
+ 	.num_properties		= ARRAY_SIZE(ab8500_chargalg_props),
+ 	.get_property		= ab8500_chargalg_get_property,
+diff --git a/drivers/power/supply/mt6370-charger.c b/drivers/power/supply/mt6370-charger.c
+index f27dae5043f5b..a9641bd3d8cf8 100644
+--- a/drivers/power/supply/mt6370-charger.c
++++ b/drivers/power/supply/mt6370-charger.c
+@@ -324,7 +324,7 @@ static int mt6370_chg_toggle_cfo(struct mt6370_priv *priv)
+ 
+ 	if (fl_strobe) {
+ 		dev_err(priv->dev, "Flash led is still in strobe mode\n");
+-		return ret;
++		return -EINVAL;
+ 	}
+ 
+ 	/* cfo off */
+diff --git a/drivers/power/supply/power_supply_sysfs.c b/drivers/power/supply/power_supply_sysfs.c
+index 06e5b6b0e255c..d483a81560ab0 100644
+--- a/drivers/power/supply/power_supply_sysfs.c
++++ b/drivers/power/supply/power_supply_sysfs.c
+@@ -482,6 +482,13 @@ int power_supply_uevent(const struct device *dev, struct kobj_uevent_env *env)
+ 	if (ret)
+ 		return ret;
+ 
++	/*
++	 * Kernel generates KOBJ_REMOVE uevent in device removal path, after
++	 * resources have been freed. Exit early to avoid use-after-free.
++	 */
++	if (psy->removing)
++		return 0;
++
+ 	prop_buf = (char *)get_zeroed_page(GFP_KERNEL);
+ 	if (!prop_buf)
+ 		return -ENOMEM;
+diff --git a/drivers/power/supply/rk817_charger.c b/drivers/power/supply/rk817_charger.c
+index 8328bcea1a299..f64daf5a41d93 100644
+--- a/drivers/power/supply/rk817_charger.c
++++ b/drivers/power/supply/rk817_charger.c
+@@ -1045,6 +1045,13 @@ static void rk817_charging_monitor(struct work_struct *work)
+ 	queue_delayed_work(system_wq, &charger->work, msecs_to_jiffies(8000));
+ }
+ 
++static void rk817_cleanup_node(void *data)
++{
++	struct device_node *node = data;
++
++	of_node_put(node);
++}
++
+ static int rk817_charger_probe(struct platform_device *pdev)
+ {
+ 	struct rk808 *rk808 = dev_get_drvdata(pdev->dev.parent);
+@@ -1061,11 +1068,13 @@ static int rk817_charger_probe(struct platform_device *pdev)
+ 	if (!node)
+ 		return -ENODEV;
+ 
++	ret = devm_add_action_or_reset(&pdev->dev, rk817_cleanup_node, node);
++	if (ret)
++		return ret;
++
+ 	charger = devm_kzalloc(&pdev->dev, sizeof(*charger), GFP_KERNEL);
+-	if (!charger) {
+-		of_node_put(node);
++	if (!charger)
+ 		return -ENOMEM;
+-	}
+ 
+ 	charger->rk808 = rk808;
+ 
+@@ -1211,3 +1220,4 @@ MODULE_DESCRIPTION("Battery power supply driver for RK817 PMIC");
+ MODULE_AUTHOR("Maya Matuszczyk <maccraft123mc@gmail.com>");
+ MODULE_AUTHOR("Chris Morgan <macromorgan@hotmail.com>");
+ MODULE_LICENSE("GPL");
++MODULE_ALIAS("platform:rk817-charger");
+diff --git a/drivers/power/supply/rt9467-charger.c b/drivers/power/supply/rt9467-charger.c
+index 683adb18253dd..fdfdc83ab0458 100644
+--- a/drivers/power/supply/rt9467-charger.c
++++ b/drivers/power/supply/rt9467-charger.c
+@@ -598,8 +598,8 @@ static int rt9467_run_aicl(struct rt9467_chg_data *data)
+ 
+ 	reinit_completion(&data->aicl_done);
+ 	ret = wait_for_completion_timeout(&data->aicl_done, msecs_to_jiffies(3500));
+-	if (ret)
+-		return ret;
++	if (ret == 0)
++		return -ETIMEDOUT;
+ 
+ 	ret = rt9467_get_value_from_ranges(data, F_IAICR, RT9467_RANGE_IAICR, &aicr_get);
+ 	if (ret) {
+diff --git a/drivers/power/supply/ucs1002_power.c b/drivers/power/supply/ucs1002_power.c
+index 954feba6600b8..7970843a4f480 100644
+--- a/drivers/power/supply/ucs1002_power.c
++++ b/drivers/power/supply/ucs1002_power.c
+@@ -384,7 +384,8 @@ static int ucs1002_get_property(struct power_supply *psy,
+ 	case POWER_SUPPLY_PROP_USB_TYPE:
+ 		return ucs1002_get_usb_type(info, val);
+ 	case POWER_SUPPLY_PROP_HEALTH:
+-		return val->intval = info->health;
++		val->intval = info->health;
++		return 0;
+ 	case POWER_SUPPLY_PROP_PRESENT:
+ 		val->intval = info->present;
+ 		return 0;
+diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
+index 9ab8555180a3a..8e14cea15f980 100644
+--- a/drivers/scsi/iscsi_tcp.c
++++ b/drivers/scsi/iscsi_tcp.c
+@@ -724,6 +724,10 @@ iscsi_sw_tcp_conn_bind(struct iscsi_cls_session *cls_session,
+ 		return -EEXIST;
+ 	}
+ 
++	err = -EINVAL;
++	if (!sk_is_tcp(sock->sk))
++		goto free_socket;
++
+ 	err = iscsi_conn_bind(cls_session, cls_conn, is_leading);
+ 	if (err)
+ 		goto free_socket;
+diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c
+index 73cd25f30ca58..00f22058ccf4e 100644
+--- a/drivers/scsi/pm8001/pm8001_hwi.c
++++ b/drivers/scsi/pm8001/pm8001_hwi.c
+@@ -4180,7 +4180,7 @@ pm8001_chip_phy_start_req(struct pm8001_hba_info *pm8001_ha, u8 phy_id)
+ 	payload.sas_identify.dev_type = SAS_END_DEVICE;
+ 	payload.sas_identify.initiator_bits = SAS_PROTOCOL_ALL;
+ 	memcpy(payload.sas_identify.sas_addr,
+-		pm8001_ha->sas_addr, SAS_ADDR_SIZE);
++		&pm8001_ha->phy[phy_id].dev_sas_addr, SAS_ADDR_SIZE);
+ 	payload.sas_identify.phy_id = phy_id;
+ 
+ 	return pm8001_mpi_build_cmd(pm8001_ha, 0, opcode, &payload,
+diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c
+index 39a12ee94a72f..e543bc36c84df 100644
+--- a/drivers/scsi/pm8001/pm80xx_hwi.c
++++ b/drivers/scsi/pm8001/pm80xx_hwi.c
+@@ -3671,10 +3671,12 @@ static int mpi_set_controller_config_resp(struct pm8001_hba_info *pm8001_ha,
+ 			(struct set_ctrl_cfg_resp *)(piomb + 4);
+ 	u32 status = le32_to_cpu(pPayload->status);
+ 	u32 err_qlfr_pgcd = le32_to_cpu(pPayload->err_qlfr_pgcd);
++	u32 tag = le32_to_cpu(pPayload->tag);
+ 
+ 	pm8001_dbg(pm8001_ha, MSG,
+ 		   "SET CONTROLLER RESP: status 0x%x qlfr_pgcd 0x%x\n",
+ 		   status, err_qlfr_pgcd);
++	pm8001_tag_free(pm8001_ha, tag);
+ 
+ 	return 0;
+ }
+@@ -4676,7 +4678,7 @@ pm80xx_chip_phy_start_req(struct pm8001_hba_info *pm8001_ha, u8 phy_id)
+ 	payload.sas_identify.dev_type = SAS_END_DEVICE;
+ 	payload.sas_identify.initiator_bits = SAS_PROTOCOL_ALL;
+ 	memcpy(payload.sas_identify.sas_addr,
+-	  &pm8001_ha->sas_addr, SAS_ADDR_SIZE);
++		&pm8001_ha->phy[phy_id].dev_sas_addr, SAS_ADDR_SIZE);
+ 	payload.sas_identify.phy_id = phy_id;
+ 
+ 	return pm8001_mpi_build_cmd(pm8001_ha, 0, opcode, &payload,
+diff --git a/drivers/scsi/qedf/qedf_io.c b/drivers/scsi/qedf/qedf_io.c
+index 4750ec5789a80..10fe3383855c0 100644
+--- a/drivers/scsi/qedf/qedf_io.c
++++ b/drivers/scsi/qedf/qedf_io.c
+@@ -1904,6 +1904,7 @@ int qedf_initiate_abts(struct qedf_ioreq *io_req, bool return_scsi_cmd_on_abts)
+ 		goto drop_rdata_kref;
+ 	}
+ 
++	spin_lock_irqsave(&fcport->rport_lock, flags);
+ 	if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags) ||
+ 	    test_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags) ||
+ 	    test_bit(QEDF_CMD_IN_ABORT, &io_req->flags)) {
+@@ -1911,17 +1912,20 @@ int qedf_initiate_abts(struct qedf_ioreq *io_req, bool return_scsi_cmd_on_abts)
+ 			 "io_req xid=0x%x sc_cmd=%p already in cleanup or abort processing or already completed.\n",
+ 			 io_req->xid, io_req->sc_cmd);
+ 		rc = 1;
++		spin_unlock_irqrestore(&fcport->rport_lock, flags);
+ 		goto drop_rdata_kref;
+ 	}
+ 
++	/* Set the command type to abort */
++	io_req->cmd_type = QEDF_ABTS;
++	spin_unlock_irqrestore(&fcport->rport_lock, flags);
++
+ 	kref_get(&io_req->refcount);
+ 
+ 	xid = io_req->xid;
+ 	qedf->control_requests++;
+ 	qedf->packet_aborts++;
+ 
+-	/* Set the command type to abort */
+-	io_req->cmd_type = QEDF_ABTS;
+ 	io_req->return_scsi_cmd_on_abts = return_scsi_cmd_on_abts;
+ 
+ 	set_bit(QEDF_CMD_IN_ABORT, &io_req->flags);
+@@ -2210,7 +2214,9 @@ process_els:
+ 		  refcount, fcport, fcport->rdata->ids.port_id);
+ 
+ 	/* Cleanup cmds re-use the same TID as the original I/O */
++	spin_lock_irqsave(&fcport->rport_lock, flags);
+ 	io_req->cmd_type = QEDF_CLEANUP;
++	spin_unlock_irqrestore(&fcport->rport_lock, flags);
+ 	io_req->return_scsi_cmd_on_abts = return_scsi_cmd_on_abts;
+ 
+ 	init_completion(&io_req->cleanup_done);
+diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
+index 7825765c936cd..91f3f1d7098eb 100644
+--- a/drivers/scsi/qedf/qedf_main.c
++++ b/drivers/scsi/qedf/qedf_main.c
+@@ -2805,6 +2805,8 @@ void qedf_process_cqe(struct qedf_ctx *qedf, struct fcoe_cqe *cqe)
+ 	struct qedf_ioreq *io_req;
+ 	struct qedf_rport *fcport;
+ 	u32 comp_type;
++	u8 io_comp_type;
++	unsigned long flags;
+ 
+ 	comp_type = (cqe->cqe_data >> FCOE_CQE_CQE_TYPE_SHIFT) &
+ 	    FCOE_CQE_CQE_TYPE_MASK;
+@@ -2838,11 +2840,14 @@ void qedf_process_cqe(struct qedf_ctx *qedf, struct fcoe_cqe *cqe)
+ 		return;
+ 	}
+ 
++	spin_lock_irqsave(&fcport->rport_lock, flags);
++	io_comp_type = io_req->cmd_type;
++	spin_unlock_irqrestore(&fcport->rport_lock, flags);
+ 
+ 	switch (comp_type) {
+ 	case FCOE_GOOD_COMPLETION_CQE_TYPE:
+ 		atomic_inc(&fcport->free_sqes);
+-		switch (io_req->cmd_type) {
++		switch (io_comp_type) {
+ 		case QEDF_SCSI_CMD:
+ 			qedf_scsi_completion(qedf, cqe, io_req);
+ 			break;
+diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
+index d0911bc28663a..89367c4bf0ef5 100644
+--- a/drivers/scsi/scsi.c
++++ b/drivers/scsi/scsi.c
+@@ -613,6 +613,17 @@ void scsi_cdl_check(struct scsi_device *sdev)
+ 	bool cdl_supported;
+ 	unsigned char *buf;
+ 
++	/*
++	 * Support for CDL was defined in SPC-5. Ignore devices reporting an
++	 * lower SPC version. This also avoids problems with old drives choking
++	 * on MAINTENANCE_IN / MI_REPORT_SUPPORTED_OPERATION_CODES with a
++	 * service action specified, as done in scsi_cdl_check_cmd().
++	 */
++	if (sdev->scsi_level < SCSI_SPC_5) {
++		sdev->cdl_supported = 0;
++		return;
++	}
++
+ 	buf = kmalloc(SCSI_CDL_CHECK_BUF_LEN, GFP_KERNEL);
+ 	if (!buf) {
+ 		sdev->cdl_supported = 0;
+diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
+index aa13feb17c626..97669657a9976 100644
+--- a/drivers/scsi/scsi_scan.c
++++ b/drivers/scsi/scsi_scan.c
+@@ -822,7 +822,7 @@ static int scsi_probe_lun(struct scsi_device *sdev, unsigned char *inq_result,
+ 	 * device is attached at LUN 0 (SCSI_SCAN_TARGET_PRESENT) so
+ 	 * non-zero LUNs can be scanned.
+ 	 */
+-	sdev->scsi_level = inq_result[2] & 0x07;
++	sdev->scsi_level = inq_result[2] & 0x0f;
+ 	if (sdev->scsi_level >= 2 ||
+ 	    (sdev->scsi_level == 1 && (inq_result[3] & 0x0f) == 1))
+ 		sdev->scsi_level++;
+diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
+index 3c668cfb146d3..d6535cbb4e05e 100644
+--- a/drivers/scsi/sd.c
++++ b/drivers/scsi/sd.c
+@@ -213,18 +213,32 @@ cache_type_store(struct device *dev, struct device_attribute *attr,
+ }
+ 
+ static ssize_t
+-manage_start_stop_show(struct device *dev, struct device_attribute *attr,
+-		       char *buf)
++manage_start_stop_show(struct device *dev,
++		       struct device_attribute *attr, char *buf)
+ {
+ 	struct scsi_disk *sdkp = to_scsi_disk(dev);
+ 	struct scsi_device *sdp = sdkp->device;
+ 
+-	return sprintf(buf, "%u\n", sdp->manage_start_stop);
++	return sysfs_emit(buf, "%u\n",
++			  sdp->manage_system_start_stop &&
++			  sdp->manage_runtime_start_stop);
+ }
++static DEVICE_ATTR_RO(manage_start_stop);
+ 
+ static ssize_t
+-manage_start_stop_store(struct device *dev, struct device_attribute *attr,
+-			const char *buf, size_t count)
++manage_system_start_stop_show(struct device *dev,
++			      struct device_attribute *attr, char *buf)
++{
++	struct scsi_disk *sdkp = to_scsi_disk(dev);
++	struct scsi_device *sdp = sdkp->device;
++
++	return sysfs_emit(buf, "%u\n", sdp->manage_system_start_stop);
++}
++
++static ssize_t
++manage_system_start_stop_store(struct device *dev,
++			       struct device_attribute *attr,
++			       const char *buf, size_t count)
+ {
+ 	struct scsi_disk *sdkp = to_scsi_disk(dev);
+ 	struct scsi_device *sdp = sdkp->device;
+@@ -236,11 +250,42 @@ manage_start_stop_store(struct device *dev, struct device_attribute *attr,
+ 	if (kstrtobool(buf, &v))
+ 		return -EINVAL;
+ 
+-	sdp->manage_start_stop = v;
++	sdp->manage_system_start_stop = v;
+ 
+ 	return count;
+ }
+-static DEVICE_ATTR_RW(manage_start_stop);
++static DEVICE_ATTR_RW(manage_system_start_stop);
++
++static ssize_t
++manage_runtime_start_stop_show(struct device *dev,
++			       struct device_attribute *attr, char *buf)
++{
++	struct scsi_disk *sdkp = to_scsi_disk(dev);
++	struct scsi_device *sdp = sdkp->device;
++
++	return sysfs_emit(buf, "%u\n", sdp->manage_runtime_start_stop);
++}
++
++static ssize_t
++manage_runtime_start_stop_store(struct device *dev,
++				struct device_attribute *attr,
++				const char *buf, size_t count)
++{
++	struct scsi_disk *sdkp = to_scsi_disk(dev);
++	struct scsi_device *sdp = sdkp->device;
++	bool v;
++
++	if (!capable(CAP_SYS_ADMIN))
++		return -EACCES;
++
++	if (kstrtobool(buf, &v))
++		return -EINVAL;
++
++	sdp->manage_runtime_start_stop = v;
++
++	return count;
++}
++static DEVICE_ATTR_RW(manage_runtime_start_stop);
+ 
+ static ssize_t
+ allow_restart_show(struct device *dev, struct device_attribute *attr, char *buf)
+@@ -572,6 +617,8 @@ static struct attribute *sd_disk_attrs[] = {
+ 	&dev_attr_FUA.attr,
+ 	&dev_attr_allow_restart.attr,
+ 	&dev_attr_manage_start_stop.attr,
++	&dev_attr_manage_system_start_stop.attr,
++	&dev_attr_manage_runtime_start_stop.attr,
+ 	&dev_attr_protection_type.attr,
+ 	&dev_attr_protection_mode.attr,
+ 	&dev_attr_app_tag_own.attr,
+@@ -3733,7 +3780,8 @@ static int sd_remove(struct device *dev)
+ 
+ 	device_del(&sdkp->disk_dev);
+ 	del_gendisk(sdkp->disk);
+-	sd_shutdown(dev);
++	if (!sdkp->suspended)
++		sd_shutdown(dev);
+ 
+ 	put_disk(sdkp->disk);
+ 	return 0;
+@@ -3810,13 +3858,20 @@ static void sd_shutdown(struct device *dev)
+ 		sd_sync_cache(sdkp, NULL);
+ 	}
+ 
+-	if (system_state != SYSTEM_RESTART && sdkp->device->manage_start_stop) {
++	if (system_state != SYSTEM_RESTART &&
++	    sdkp->device->manage_system_start_stop) {
+ 		sd_printk(KERN_NOTICE, sdkp, "Stopping disk\n");
+ 		sd_start_stop_device(sdkp, 0);
+ 	}
+ }
+ 
+-static int sd_suspend_common(struct device *dev, bool ignore_stop_errors)
++static inline bool sd_do_start_stop(struct scsi_device *sdev, bool runtime)
++{
++	return (sdev->manage_system_start_stop && !runtime) ||
++		(sdev->manage_runtime_start_stop && runtime);
++}
++
++static int sd_suspend_common(struct device *dev, bool runtime)
+ {
+ 	struct scsi_disk *sdkp = dev_get_drvdata(dev);
+ 	struct scsi_sense_hdr sshdr;
+@@ -3848,15 +3903,18 @@ static int sd_suspend_common(struct device *dev, bool ignore_stop_errors)
+ 		}
+ 	}
+ 
+-	if (sdkp->device->manage_start_stop) {
++	if (sd_do_start_stop(sdkp->device, runtime)) {
+ 		if (!sdkp->device->silence_suspend)
+ 			sd_printk(KERN_NOTICE, sdkp, "Stopping disk\n");
+ 		/* an error is not worth aborting a system sleep */
+ 		ret = sd_start_stop_device(sdkp, 0);
+-		if (ignore_stop_errors)
++		if (!runtime)
+ 			ret = 0;
+ 	}
+ 
++	if (!ret)
++		sdkp->suspended = true;
++
+ 	return ret;
+ }
+ 
+@@ -3865,15 +3923,15 @@ static int sd_suspend_system(struct device *dev)
+ 	if (pm_runtime_suspended(dev))
+ 		return 0;
+ 
+-	return sd_suspend_common(dev, true);
++	return sd_suspend_common(dev, false);
+ }
+ 
+ static int sd_suspend_runtime(struct device *dev)
+ {
+-	return sd_suspend_common(dev, false);
++	return sd_suspend_common(dev, true);
+ }
+ 
+-static int sd_resume(struct device *dev)
++static int sd_resume(struct device *dev, bool runtime)
+ {
+ 	struct scsi_disk *sdkp = dev_get_drvdata(dev);
+ 	int ret = 0;
+@@ -3881,16 +3939,21 @@ static int sd_resume(struct device *dev)
+ 	if (!sdkp)	/* E.g.: runtime resume at the start of sd_probe() */
+ 		return 0;
+ 
+-	if (!sdkp->device->manage_start_stop)
++	if (!sd_do_start_stop(sdkp->device, runtime)) {
++		sdkp->suspended = false;
+ 		return 0;
++	}
+ 
+ 	if (!sdkp->device->no_start_on_resume) {
+ 		sd_printk(KERN_NOTICE, sdkp, "Starting disk\n");
+ 		ret = sd_start_stop_device(sdkp, 1);
+ 	}
+ 
+-	if (!ret)
++	if (!ret) {
+ 		opal_unlock_from_suspend(sdkp->opal_dev);
++		sdkp->suspended = false;
++	}
++
+ 	return ret;
+ }
+ 
+@@ -3899,7 +3962,7 @@ static int sd_resume_system(struct device *dev)
+ 	if (pm_runtime_suspended(dev))
+ 		return 0;
+ 
+-	return sd_resume(dev);
++	return sd_resume(dev, false);
+ }
+ 
+ static int sd_resume_runtime(struct device *dev)
+@@ -3926,7 +3989,7 @@ static int sd_resume_runtime(struct device *dev)
+ 				  "Failed to clear sense data\n");
+ 	}
+ 
+-	return sd_resume(dev);
++	return sd_resume(dev, true);
+ }
+ 
+ /**
+diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h
+index 5eea762f84d18..409dda5350d10 100644
+--- a/drivers/scsi/sd.h
++++ b/drivers/scsi/sd.h
+@@ -131,6 +131,7 @@ struct scsi_disk {
+ 	u8		provisioning_mode;
+ 	u8		zeroing_mode;
+ 	u8		nr_actuators;		/* Number of actuators */
++	bool		suspended;	/* Disk is suspended (stopped) */
+ 	unsigned	ATO : 1;	/* state of disk ATO bit */
+ 	unsigned	cache_override : 1; /* temp override of WCE,RCD */
+ 	unsigned	WCE : 1;	/* state of disk WCE bit */
+diff --git a/drivers/soc/imx/soc-imx8m.c b/drivers/soc/imx/soc-imx8m.c
+index 1dcd243df5677..ec87d9d878f30 100644
+--- a/drivers/soc/imx/soc-imx8m.c
++++ b/drivers/soc/imx/soc-imx8m.c
+@@ -100,6 +100,7 @@ static void __init imx8mm_soc_uid(void)
+ {
+ 	void __iomem *ocotp_base;
+ 	struct device_node *np;
++	struct clk *clk;
+ 	u32 offset = of_machine_is_compatible("fsl,imx8mp") ?
+ 		     IMX8MP_OCOTP_UID_OFFSET : 0;
+ 
+@@ -109,11 +110,20 @@ static void __init imx8mm_soc_uid(void)
+ 
+ 	ocotp_base = of_iomap(np, 0);
+ 	WARN_ON(!ocotp_base);
++	clk = of_clk_get_by_name(np, NULL);
++	if (IS_ERR(clk)) {
++		WARN_ON(IS_ERR(clk));
++		return;
++	}
++
++	clk_prepare_enable(clk);
+ 
+ 	soc_uid = readl_relaxed(ocotp_base + OCOTP_UID_HIGH + offset);
+ 	soc_uid <<= 32;
+ 	soc_uid |= readl_relaxed(ocotp_base + OCOTP_UID_LOW + offset);
+ 
++	clk_disable_unprepare(clk);
++	clk_put(clk);
+ 	iounmap(ocotp_base);
+ 	of_node_put(np);
+ }
+diff --git a/drivers/spi/spi-gxp.c b/drivers/spi/spi-gxp.c
+index 684d63f402f34..aba08d06c251c 100644
+--- a/drivers/spi/spi-gxp.c
++++ b/drivers/spi/spi-gxp.c
+@@ -195,7 +195,7 @@ static ssize_t gxp_spi_write(struct gxp_spi_chip *chip, const struct spi_mem_op
+ 		return ret;
+ 	}
+ 
+-	return write_len;
++	return 0;
+ }
+ 
+ static int do_gxp_exec_mem_op(struct spi_mem *mem, const struct spi_mem_op *op)
+diff --git a/drivers/spi/spi-intel-pci.c b/drivers/spi/spi-intel-pci.c
+index a7381e774b953..57d767a68e7b2 100644
+--- a/drivers/spi/spi-intel-pci.c
++++ b/drivers/spi/spi-intel-pci.c
+@@ -72,6 +72,7 @@ static const struct pci_device_id intel_spi_pci_ids[] = {
+ 	{ PCI_VDEVICE(INTEL, 0x4da4), (unsigned long)&bxt_info },
+ 	{ PCI_VDEVICE(INTEL, 0x51a4), (unsigned long)&cnl_info },
+ 	{ PCI_VDEVICE(INTEL, 0x54a4), (unsigned long)&cnl_info },
++	{ PCI_VDEVICE(INTEL, 0x5794), (unsigned long)&cnl_info },
+ 	{ PCI_VDEVICE(INTEL, 0x7a24), (unsigned long)&cnl_info },
+ 	{ PCI_VDEVICE(INTEL, 0x7aa4), (unsigned long)&cnl_info },
+ 	{ PCI_VDEVICE(INTEL, 0x7e23), (unsigned long)&cnl_info },
+diff --git a/drivers/spi/spi-nxp-fspi.c b/drivers/spi/spi-nxp-fspi.c
+index 5440176557875..8e44de084bbe3 100644
+--- a/drivers/spi/spi-nxp-fspi.c
++++ b/drivers/spi/spi-nxp-fspi.c
+@@ -1085,6 +1085,13 @@ static int nxp_fspi_default_setup(struct nxp_fspi *f)
+ 	fspi_writel(f, FSPI_AHBCR_PREF_EN | FSPI_AHBCR_RDADDROPT,
+ 		 base + FSPI_AHBCR);
+ 
++	/* Reset the FLSHxCR1 registers. */
++	reg = FSPI_FLSHXCR1_TCSH(0x3) | FSPI_FLSHXCR1_TCSS(0x3);
++	fspi_writel(f, reg, base + FSPI_FLSHA1CR1);
++	fspi_writel(f, reg, base + FSPI_FLSHA2CR1);
++	fspi_writel(f, reg, base + FSPI_FLSHB1CR1);
++	fspi_writel(f, reg, base + FSPI_FLSHB2CR1);
++
+ 	/* AHB Read - Set lut sequence ID for all CS. */
+ 	fspi_writel(f, SEQID_LUT, base + FSPI_FLSHA1CR2);
+ 	fspi_writel(f, SEQID_LUT, base + FSPI_FLSHA2CR2);
+diff --git a/drivers/spi/spi-stm32.c b/drivers/spi/spi-stm32.c
+index 7ddf9db776b06..4737a36e5d4e9 100644
+--- a/drivers/spi/spi-stm32.c
++++ b/drivers/spi/spi-stm32.c
+@@ -275,6 +275,7 @@ struct stm32_spi_cfg {
+  * @fifo_size: size of the embedded fifo in bytes
+  * @cur_midi: master inter-data idleness in ns
+  * @cur_speed: speed configured in Hz
++ * @cur_half_period: time of a half bit in us
+  * @cur_bpw: number of bits in a single SPI data frame
+  * @cur_fthlv: fifo threshold level (data frames in a single data packet)
+  * @cur_comm: SPI communication mode
+@@ -302,6 +303,7 @@ struct stm32_spi {
+ 
+ 	unsigned int cur_midi;
+ 	unsigned int cur_speed;
++	unsigned int cur_half_period;
+ 	unsigned int cur_bpw;
+ 	unsigned int cur_fthlv;
+ 	unsigned int cur_comm;
+@@ -466,6 +468,8 @@ static int stm32_spi_prepare_mbr(struct stm32_spi *spi, u32 speed_hz,
+ 
+ 	spi->cur_speed = spi->clk_rate / (1 << mbrdiv);
+ 
++	spi->cur_half_period = DIV_ROUND_CLOSEST(USEC_PER_SEC, 2 * spi->cur_speed);
++
+ 	return mbrdiv - 1;
+ }
+ 
+@@ -707,6 +711,10 @@ static void stm32h7_spi_disable(struct stm32_spi *spi)
+ 		return;
+ 	}
+ 
++	/* Add a delay to make sure that transmission is ended. */
++	if (spi->cur_half_period)
++		udelay(spi->cur_half_period);
++
+ 	if (spi->cur_usedma && spi->dma_tx)
+ 		dmaengine_terminate_async(spi->dma_tx);
+ 	if (spi->cur_usedma && spi->dma_rx)
+diff --git a/drivers/spi/spi-sun6i.c b/drivers/spi/spi-sun6i.c
+index cec2747235abf..26abd26dc3652 100644
+--- a/drivers/spi/spi-sun6i.c
++++ b/drivers/spi/spi-sun6i.c
+@@ -106,6 +106,7 @@ struct sun6i_spi {
+ 	struct reset_control	*rstc;
+ 
+ 	struct completion	done;
++	struct completion	dma_rx_done;
+ 
+ 	const u8		*tx_buf;
+ 	u8			*rx_buf;
+@@ -200,6 +201,13 @@ static size_t sun6i_spi_max_transfer_size(struct spi_device *spi)
+ 	return SUN6I_MAX_XFER_SIZE - 1;
+ }
+ 
++static void sun6i_spi_dma_rx_cb(void *param)
++{
++	struct sun6i_spi *sspi = param;
++
++	complete(&sspi->dma_rx_done);
++}
++
+ static int sun6i_spi_prepare_dma(struct sun6i_spi *sspi,
+ 				 struct spi_transfer *tfr)
+ {
+@@ -211,7 +219,7 @@ static int sun6i_spi_prepare_dma(struct sun6i_spi *sspi,
+ 		struct dma_slave_config rxconf = {
+ 			.direction = DMA_DEV_TO_MEM,
+ 			.src_addr = sspi->dma_addr_rx,
+-			.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
++			.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
+ 			.src_maxburst = 8,
+ 		};
+ 
+@@ -224,6 +232,8 @@ static int sun6i_spi_prepare_dma(struct sun6i_spi *sspi,
+ 						 DMA_PREP_INTERRUPT);
+ 		if (!rxdesc)
+ 			return -EINVAL;
++		rxdesc->callback_param = sspi;
++		rxdesc->callback = sun6i_spi_dma_rx_cb;
+ 	}
+ 
+ 	txdesc = NULL;
+@@ -279,6 +289,7 @@ static int sun6i_spi_transfer_one(struct spi_master *master,
+ 		return -EINVAL;
+ 
+ 	reinit_completion(&sspi->done);
++	reinit_completion(&sspi->dma_rx_done);
+ 	sspi->tx_buf = tfr->tx_buf;
+ 	sspi->rx_buf = tfr->rx_buf;
+ 	sspi->len = tfr->len;
+@@ -479,6 +490,22 @@ static int sun6i_spi_transfer_one(struct spi_master *master,
+ 	start = jiffies;
+ 	timeout = wait_for_completion_timeout(&sspi->done,
+ 					      msecs_to_jiffies(tx_time));
++
++	if (!use_dma) {
++		sun6i_spi_drain_fifo(sspi);
++	} else {
++		if (timeout && rx_len) {
++			/*
++			 * Even though RX on the peripheral side has finished
++			 * RX DMA might still be in flight
++			 */
++			timeout = wait_for_completion_timeout(&sspi->dma_rx_done,
++							      timeout);
++			if (!timeout)
++				dev_warn(&master->dev, "RX DMA timeout\n");
++		}
++	}
++
+ 	end = jiffies;
+ 	if (!timeout) {
+ 		dev_warn(&master->dev,
+@@ -506,7 +533,6 @@ static irqreturn_t sun6i_spi_handler(int irq, void *dev_id)
+ 	/* Transfer complete */
+ 	if (status & SUN6I_INT_CTL_TC) {
+ 		sun6i_spi_write(sspi, SUN6I_INT_STA_REG, SUN6I_INT_CTL_TC);
+-		sun6i_spi_drain_fifo(sspi);
+ 		complete(&sspi->done);
+ 		return IRQ_HANDLED;
+ 	}
+@@ -665,6 +691,7 @@ static int sun6i_spi_probe(struct platform_device *pdev)
+ 	}
+ 
+ 	init_completion(&sspi->done);
++	init_completion(&sspi->dma_rx_done);
+ 
+ 	sspi->rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL);
+ 	if (IS_ERR(sspi->rstc)) {
+diff --git a/drivers/spi/spi-zynqmp-gqspi.c b/drivers/spi/spi-zynqmp-gqspi.c
+index fb2ca9b90eabf..c309dedfd6020 100644
+--- a/drivers/spi/spi-zynqmp-gqspi.c
++++ b/drivers/spi/spi-zynqmp-gqspi.c
+@@ -1342,9 +1342,9 @@ static int zynqmp_qspi_probe(struct platform_device *pdev)
+ 	return 0;
+ 
+ clk_dis_all:
+-	pm_runtime_put_sync(&pdev->dev);
+-	pm_runtime_set_suspended(&pdev->dev);
+ 	pm_runtime_disable(&pdev->dev);
++	pm_runtime_put_noidle(&pdev->dev);
++	pm_runtime_set_suspended(&pdev->dev);
+ 	clk_disable_unprepare(xqspi->refclk);
+ clk_dis_pclk:
+ 	clk_disable_unprepare(xqspi->pclk);
+@@ -1368,11 +1368,15 @@ static void zynqmp_qspi_remove(struct platform_device *pdev)
+ {
+ 	struct zynqmp_qspi *xqspi = platform_get_drvdata(pdev);
+ 
++	pm_runtime_get_sync(&pdev->dev);
++
+ 	zynqmp_gqspi_write(xqspi, GQSPI_EN_OFST, 0x0);
++
++	pm_runtime_disable(&pdev->dev);
++	pm_runtime_put_noidle(&pdev->dev);
++	pm_runtime_set_suspended(&pdev->dev);
+ 	clk_disable_unprepare(xqspi->refclk);
+ 	clk_disable_unprepare(xqspi->pclk);
+-	pm_runtime_set_suspended(&pdev->dev);
+-	pm_runtime_disable(&pdev->dev);
+ }
+ 
+ MODULE_DEVICE_TABLE(of, zynqmp_qspi_of_match);
+diff --git a/drivers/thermal/thermal_of.c b/drivers/thermal/thermal_of.c
+index 22272f9c5934a..e615f735f4c03 100644
+--- a/drivers/thermal/thermal_of.c
++++ b/drivers/thermal/thermal_of.c
+@@ -38,8 +38,10 @@ static int of_find_trip_id(struct device_node *np, struct device_node *trip)
+ 	 */
+ 	for_each_child_of_node(trips, t) {
+ 
+-		if (t == trip)
++		if (t == trip) {
++			of_node_put(t);
+ 			goto out;
++		}
+ 		i++;
+ 	}
+ 
+@@ -402,8 +404,10 @@ static int thermal_of_for_each_cooling_maps(struct thermal_zone_device *tz,
+ 
+ 	for_each_child_of_node(cm_np, child) {
+ 		ret = thermal_of_for_each_cooling_device(tz_np, child, tz, cdev, action);
+-		if (ret)
++		if (ret) {
++			of_node_put(child);
+ 			break;
++		}
+ 	}
+ 
+ 	of_node_put(cm_np);
+diff --git a/drivers/thermal/thermal_sysfs.c b/drivers/thermal/thermal_sysfs.c
+index 6c20c9f90a05a..4e6a97db894e9 100644
+--- a/drivers/thermal/thermal_sysfs.c
++++ b/drivers/thermal/thermal_sysfs.c
+@@ -185,9 +185,6 @@ trip_point_hyst_store(struct device *dev, struct device_attribute *attr,
+ 	if (sscanf(attr->attr.name, "trip_point_%d_hyst", &trip_id) != 1)
+ 		return -EINVAL;
+ 
+-	if (kstrtoint(buf, 10, &trip.hysteresis))
+-		return -EINVAL;
+-
+ 	mutex_lock(&tz->lock);
+ 
+ 	if (!device_is_registered(dev)) {
+@@ -198,7 +195,11 @@ trip_point_hyst_store(struct device *dev, struct device_attribute *attr,
+ 	ret = __thermal_zone_get_trip(tz, trip_id, &trip);
+ 	if (ret)
+ 		goto unlock;
+-	
++
++	ret = kstrtoint(buf, 10, &trip.hysteresis);
++	if (ret)
++		goto unlock;
++
+ 	ret = thermal_zone_set_trip(tz, trip_id, &trip);
+ unlock:
+ 	mutex_unlock(&tz->lock);
+diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
+index 739f522cb893c..5574b4b61a25c 100644
+--- a/drivers/tty/n_gsm.c
++++ b/drivers/tty/n_gsm.c
+@@ -3071,10 +3071,8 @@ static void gsm_cleanup_mux(struct gsm_mux *gsm, bool disc)
+ 		gsm->has_devices = false;
+ 	}
+ 	for (i = NUM_DLCI - 1; i >= 0; i--)
+-		if (gsm->dlci[i]) {
++		if (gsm->dlci[i])
+ 			gsm_dlci_release(gsm->dlci[i]);
+-			gsm->dlci[i] = NULL;
+-		}
+ 	mutex_unlock(&gsm->mutex);
+ 	/* Now wipe the queues */
+ 	tty_ldisc_flush(gsm->tty);
+diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
+index 483bb552cdc40..c4da580bcb444 100644
+--- a/drivers/tty/serial/8250/8250_port.c
++++ b/drivers/tty/serial/8250/8250_port.c
+@@ -1929,7 +1929,10 @@ int serial8250_handle_irq(struct uart_port *port, unsigned int iir)
+ 		skip_rx = true;
+ 
+ 	if (status & (UART_LSR_DR | UART_LSR_BI) && !skip_rx) {
+-		if (irqd_is_wakeup_set(irq_get_irq_data(port->irq)))
++		struct irq_data *d;
++
++		d = irq_get_irq_data(port->irq);
++		if (d && irqd_is_wakeup_set(d))
+ 			pm_wakeup_event(tport->tty->dev, 0);
+ 		if (!up->dma || handle_rx_dma(up, iir))
+ 			status = serial8250_rx_chars(up, status);
+diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
+index 9615a076735bd..80c48eb6bf85c 100644
+--- a/drivers/ufs/core/ufshcd.c
++++ b/drivers/ufs/core/ufshcd.c
+@@ -22,6 +22,7 @@
+ #include <linux/module.h>
+ #include <linux/regulator/consumer.h>
+ #include <linux/sched/clock.h>
++#include <linux/iopoll.h>
+ #include <scsi/scsi_cmnd.h>
+ #include <scsi/scsi_dbg.h>
+ #include <scsi/scsi_driver.h>
+@@ -2324,7 +2325,11 @@ static inline int ufshcd_hba_capabilities(struct ufs_hba *hba)
+  */
+ static inline bool ufshcd_ready_for_uic_cmd(struct ufs_hba *hba)
+ {
+-	return ufshcd_readl(hba, REG_CONTROLLER_STATUS) & UIC_COMMAND_READY;
++	u32 val;
++	int ret = read_poll_timeout(ufshcd_readl, val, val & UIC_COMMAND_READY,
++				    500, UIC_CMD_TIMEOUT * 1000, false, hba,
++				    REG_CONTROLLER_STATUS);
++	return ret == 0 ? true : false;
+ }
+ 
+ /**
+@@ -2416,7 +2421,6 @@ __ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd,
+ 		      bool completion)
+ {
+ 	lockdep_assert_held(&hba->uic_cmd_mutex);
+-	lockdep_assert_held(hba->host->host_lock);
+ 
+ 	if (!ufshcd_ready_for_uic_cmd(hba)) {
+ 		dev_err(hba->dev,
+@@ -2443,7 +2447,6 @@ __ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd,
+ int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
+ {
+ 	int ret;
+-	unsigned long flags;
+ 
+ 	if (hba->quirks & UFSHCD_QUIRK_BROKEN_UIC_CMD)
+ 		return 0;
+@@ -2452,9 +2455,7 @@ int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
+ 	mutex_lock(&hba->uic_cmd_mutex);
+ 	ufshcd_add_delay_before_dme_cmd(hba);
+ 
+-	spin_lock_irqsave(hba->host->host_lock, flags);
+ 	ret = __ufshcd_send_uic_cmd(hba, uic_cmd, true);
+-	spin_unlock_irqrestore(hba->host->host_lock, flags);
+ 	if (!ret)
+ 		ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd);
+ 
+@@ -4166,8 +4167,8 @@ static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
+ 		wmb();
+ 		reenable_intr = true;
+ 	}
+-	ret = __ufshcd_send_uic_cmd(hba, cmd, false);
+ 	spin_unlock_irqrestore(hba->host->host_lock, flags);
++	ret = __ufshcd_send_uic_cmd(hba, cmd, false);
+ 	if (ret) {
+ 		dev_err(hba->dev,
+ 			"pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n",
+diff --git a/drivers/vfio/mdev/mdev_sysfs.c b/drivers/vfio/mdev/mdev_sysfs.c
+index e4490639d3833..9d2738e10c0b9 100644
+--- a/drivers/vfio/mdev/mdev_sysfs.c
++++ b/drivers/vfio/mdev/mdev_sysfs.c
+@@ -233,7 +233,8 @@ int parent_create_sysfs_files(struct mdev_parent *parent)
+ out_err:
+ 	while (--i >= 0)
+ 		mdev_type_remove(parent->types[i]);
+-	return 0;
++	kset_unregister(parent->mdev_types_kset);
++	return ret;
+ }
+ 
+ static ssize_t remove_store(struct device *dev, struct device_attribute *attr,
+diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig
+index 6df9bd09454a2..80c999a67779f 100644
+--- a/drivers/video/fbdev/Kconfig
++++ b/drivers/video/fbdev/Kconfig
+@@ -2003,7 +2003,7 @@ config FB_COBALT
+ 
+ config FB_SH7760
+ 	bool "SH7760/SH7763/SH7720/SH7721 LCDC support"
+-	depends on FB && (CPU_SUBTYPE_SH7760 || CPU_SUBTYPE_SH7763 \
++	depends on FB=y && (CPU_SUBTYPE_SH7760 || CPU_SUBTYPE_SH7763 \
+ 		|| CPU_SUBTYPE_SH7720 || CPU_SUBTYPE_SH7721)
+ 	select FB_CFB_FILLRECT
+ 	select FB_CFB_COPYAREA
+diff --git a/fs/binfmt_elf_fdpic.c b/fs/binfmt_elf_fdpic.c
+index 1c6c5832af86d..124544fb27b1d 100644
+--- a/fs/binfmt_elf_fdpic.c
++++ b/fs/binfmt_elf_fdpic.c
+@@ -345,10 +345,9 @@ static int load_elf_fdpic_binary(struct linux_binprm *bprm)
+ 	/* there's now no turning back... the old userspace image is dead,
+ 	 * defunct, deceased, etc.
+ 	 */
++	SET_PERSONALITY(exec_params.hdr);
+ 	if (elf_check_fdpic(&exec_params.hdr))
+-		set_personality(PER_LINUX_FDPIC);
+-	else
+-		set_personality(PER_LINUX);
++		current->personality |= PER_LINUX_FDPIC;
+ 	if (elf_read_implies_exec(&exec_params.hdr, executable_stack))
+ 		current->personality |= READ_IMPLIES_EXEC;
+ 
+diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
+index 0f147240ce9b8..142e0a0f6a9fe 100644
+--- a/fs/btrfs/delayed-inode.c
++++ b/fs/btrfs/delayed-inode.c
+@@ -412,6 +412,7 @@ static void finish_one_item(struct btrfs_delayed_root *delayed_root)
+ 
+ static void __btrfs_remove_delayed_item(struct btrfs_delayed_item *delayed_item)
+ {
++	struct btrfs_delayed_node *delayed_node = delayed_item->delayed_node;
+ 	struct rb_root_cached *root;
+ 	struct btrfs_delayed_root *delayed_root;
+ 
+@@ -419,18 +420,21 @@ static void __btrfs_remove_delayed_item(struct btrfs_delayed_item *delayed_item)
+ 	if (RB_EMPTY_NODE(&delayed_item->rb_node))
+ 		return;
+ 
+-	delayed_root = delayed_item->delayed_node->root->fs_info->delayed_root;
++	/* If it's in a rbtree, then we need to have delayed node locked. */
++	lockdep_assert_held(&delayed_node->mutex);
++
++	delayed_root = delayed_node->root->fs_info->delayed_root;
+ 
+ 	BUG_ON(!delayed_root);
+ 
+ 	if (delayed_item->type == BTRFS_DELAYED_INSERTION_ITEM)
+-		root = &delayed_item->delayed_node->ins_root;
++		root = &delayed_node->ins_root;
+ 	else
+-		root = &delayed_item->delayed_node->del_root;
++		root = &delayed_node->del_root;
+ 
+ 	rb_erase_cached(&delayed_item->rb_node, root);
+ 	RB_CLEAR_NODE(&delayed_item->rb_node);
+-	delayed_item->delayed_node->count--;
++	delayed_node->count--;
+ 
+ 	finish_one_item(delayed_root);
+ }
+@@ -1426,7 +1430,29 @@ void btrfs_balance_delayed_items(struct btrfs_fs_info *fs_info)
+ 	btrfs_wq_run_delayed_node(delayed_root, fs_info, BTRFS_DELAYED_BATCH);
+ }
+ 
+-/* Will return 0 or -ENOMEM */
++static void btrfs_release_dir_index_item_space(struct btrfs_trans_handle *trans)
++{
++	struct btrfs_fs_info *fs_info = trans->fs_info;
++	const u64 bytes = btrfs_calc_insert_metadata_size(fs_info, 1);
++
++	if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags))
++		return;
++
++	/*
++	 * Adding the new dir index item does not require touching another
++	 * leaf, so we can release 1 unit of metadata that was previously
++	 * reserved when starting the transaction. This applies only to
++	 * the case where we had a transaction start and excludes the
++	 * transaction join case (when replaying log trees).
++	 */
++	trace_btrfs_space_reservation(fs_info, "transaction",
++				      trans->transid, bytes, 0);
++	btrfs_block_rsv_release(fs_info, trans->block_rsv, bytes, NULL);
++	ASSERT(trans->bytes_reserved >= bytes);
++	trans->bytes_reserved -= bytes;
++}
++
++/* Will return 0, -ENOMEM or -EEXIST (index number collision, unexpected). */
+ int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans,
+ 				   const char *name, int name_len,
+ 				   struct btrfs_inode *dir,
+@@ -1468,6 +1494,27 @@ int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans,
+ 
+ 	mutex_lock(&delayed_node->mutex);
+ 
++	/*
++	 * First attempt to insert the delayed item. This is to make the error
++	 * handling path simpler in case we fail (-EEXIST). There's no risk of
++	 * any other task coming in and running the delayed item before we do
++	 * the metadata space reservation below, because we are holding the
++	 * delayed node's mutex and that mutex must also be locked before the
++	 * node's delayed items can be run.
++	 */
++	ret = __btrfs_add_delayed_item(delayed_node, delayed_item);
++	if (unlikely(ret)) {
++		btrfs_err(trans->fs_info,
++"error adding delayed dir index item, name: %.*s, index: %llu, root: %llu, dir: %llu, dir->index_cnt: %llu, delayed_node->index_cnt: %llu, error: %d",
++			  name_len, name, index, btrfs_root_id(delayed_node->root),
++			  delayed_node->inode_id, dir->index_cnt,
++			  delayed_node->index_cnt, ret);
++		btrfs_release_delayed_item(delayed_item);
++		btrfs_release_dir_index_item_space(trans);
++		mutex_unlock(&delayed_node->mutex);
++		goto release_node;
++	}
++
+ 	if (delayed_node->index_item_leaves == 0 ||
+ 	    delayed_node->curr_index_batch_size + data_len > leaf_data_size) {
+ 		delayed_node->curr_index_batch_size = data_len;
+@@ -1485,36 +1532,14 @@ int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans,
+ 		 * impossible.
+ 		 */
+ 		if (WARN_ON(ret)) {
+-			mutex_unlock(&delayed_node->mutex);
+ 			btrfs_release_delayed_item(delayed_item);
++			mutex_unlock(&delayed_node->mutex);
+ 			goto release_node;
+ 		}
+ 
+ 		delayed_node->index_item_leaves++;
+-	} else if (!test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags)) {
+-		const u64 bytes = btrfs_calc_insert_metadata_size(fs_info, 1);
+-
+-		/*
+-		 * Adding the new dir index item does not require touching another
+-		 * leaf, so we can release 1 unit of metadata that was previously
+-		 * reserved when starting the transaction. This applies only to
+-		 * the case where we had a transaction start and excludes the
+-		 * transaction join case (when replaying log trees).
+-		 */
+-		trace_btrfs_space_reservation(fs_info, "transaction",
+-					      trans->transid, bytes, 0);
+-		btrfs_block_rsv_release(fs_info, trans->block_rsv, bytes, NULL);
+-		ASSERT(trans->bytes_reserved >= bytes);
+-		trans->bytes_reserved -= bytes;
+-	}
+-
+-	ret = __btrfs_add_delayed_item(delayed_node, delayed_item);
+-	if (unlikely(ret)) {
+-		btrfs_err(trans->fs_info,
+-			  "err add delayed dir index item(name: %.*s) into the insertion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)",
+-			  name_len, name, delayed_node->root->root_key.objectid,
+-			  delayed_node->inode_id, ret);
+-		BUG();
++	} else {
++		btrfs_release_dir_index_item_space(trans);
+ 	}
+ 	mutex_unlock(&delayed_node->mutex);
+ 
+diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
+index 2ebc982e8eccb..7cc0ed7532793 100644
+--- a/fs/btrfs/extent_io.c
++++ b/fs/btrfs/extent_io.c
+@@ -4083,8 +4083,14 @@ void read_extent_buffer(const struct extent_buffer *eb, void *dstv,
+ 	char *dst = (char *)dstv;
+ 	unsigned long i = get_eb_page_index(start);
+ 
+-	if (check_eb_range(eb, start, len))
++	if (check_eb_range(eb, start, len)) {
++		/*
++		 * Invalid range hit, reset the memory, so callers won't get
++		 * some random garbage for their uninitialzed memory.
++		 */
++		memset(dstv, 0, len);
+ 		return;
++	}
+ 
+ 	offset = get_eb_offset_in_page(eb, start);
+ 
+diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
+index fd03e689a6bed..eae9175f2c29b 100644
+--- a/fs/btrfs/file.c
++++ b/fs/btrfs/file.c
+@@ -1466,8 +1466,13 @@ static ssize_t btrfs_direct_write(struct kiocb *iocb, struct iov_iter *from)
+ 	if (iocb->ki_flags & IOCB_NOWAIT)
+ 		ilock_flags |= BTRFS_ILOCK_TRY;
+ 
+-	/* If the write DIO is within EOF, use a shared lock */
+-	if (iocb->ki_pos + iov_iter_count(from) <= i_size_read(inode))
++	/*
++	 * If the write DIO is within EOF, use a shared lock and also only if
++	 * security bits will likely not be dropped by file_remove_privs() called
++	 * from btrfs_write_check(). Either will need to be rechecked after the
++	 * lock was acquired.
++	 */
++	if (iocb->ki_pos + iov_iter_count(from) <= i_size_read(inode) && IS_NOSEC(inode))
+ 		ilock_flags |= BTRFS_ILOCK_SHARED;
+ 
+ relock:
+@@ -1475,6 +1480,13 @@ relock:
+ 	if (err < 0)
+ 		return err;
+ 
++	/* Shared lock cannot be used with security bits set. */
++	if ((ilock_flags & BTRFS_ILOCK_SHARED) && !IS_NOSEC(inode)) {
++		btrfs_inode_unlock(BTRFS_I(inode), ilock_flags);
++		ilock_flags &= ~BTRFS_ILOCK_SHARED;
++		goto relock;
++	}
++
+ 	err = generic_write_checks(iocb, from);
+ 	if (err <= 0) {
+ 		btrfs_inode_unlock(BTRFS_I(inode), ilock_flags);
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index db2b33a822fcd..d5c112f6091b1 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -5931,20 +5931,24 @@ out:
+ 
+ static int btrfs_get_dir_last_index(struct btrfs_inode *dir, u64 *index)
+ {
+-	if (dir->index_cnt == (u64)-1) {
+-		int ret;
++	int ret = 0;
+ 
++	btrfs_inode_lock(dir, 0);
++	if (dir->index_cnt == (u64)-1) {
+ 		ret = btrfs_inode_delayed_dir_index_count(dir);
+ 		if (ret) {
+ 			ret = btrfs_set_inode_index_count(dir);
+ 			if (ret)
+-				return ret;
++				goto out;
+ 		}
+ 	}
+ 
+-	*index = dir->index_cnt;
++	/* index_cnt is the index number of next new entry, so decrement it. */
++	*index = dir->index_cnt - 1;
++out:
++	btrfs_inode_unlock(dir, 0);
+ 
+-	return 0;
++	return ret;
+ }
+ 
+ /*
+@@ -5979,6 +5983,19 @@ static int btrfs_opendir(struct inode *inode, struct file *file)
+ 	return 0;
+ }
+ 
++static loff_t btrfs_dir_llseek(struct file *file, loff_t offset, int whence)
++{
++	struct btrfs_file_private *private = file->private_data;
++	int ret;
++
++	ret = btrfs_get_dir_last_index(BTRFS_I(file_inode(file)),
++				       &private->last_index);
++	if (ret)
++		return ret;
++
++	return generic_file_llseek(file, offset, whence);
++}
++
+ struct dir_entry {
+ 	u64 ino;
+ 	u64 offset;
+@@ -11059,7 +11076,7 @@ static const struct inode_operations btrfs_dir_inode_operations = {
+ };
+ 
+ static const struct file_operations btrfs_dir_file_operations = {
+-	.llseek		= generic_file_llseek,
++	.llseek		= btrfs_dir_llseek,
+ 	.read		= generic_read_dir,
+ 	.iterate_shared	= btrfs_real_readdir,
+ 	.open		= btrfs_opendir,
+diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
+index f1dd172d8d5bd..f285c26c05655 100644
+--- a/fs/btrfs/super.c
++++ b/fs/btrfs/super.c
+@@ -2111,7 +2111,7 @@ static int btrfs_statfs(struct dentry *dentry, struct kstatfs *buf)
+ 	 * calculated f_bavail.
+ 	 */
+ 	if (!mixed && block_rsv->space_info->full &&
+-	    total_free_meta - thresh < block_rsv->size)
++	    (total_free_meta < thresh || total_free_meta - thresh < block_rsv->size))
+ 		buf->f_bavail = 0;
+ 
+ 	buf->f_type = BTRFS_SUPER_MAGIC;
+diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
+index e2bb0d0072da5..c268bd07e7ddd 100644
+--- a/fs/ceph/caps.c
++++ b/fs/ceph/caps.c
+@@ -4105,6 +4105,9 @@ void ceph_handle_caps(struct ceph_mds_session *session,
+ 
+ 	dout("handle_caps from mds%d\n", session->s_mds);
+ 
++	if (!ceph_inc_mds_stopping_blocker(mdsc, session))
++		return;
++
+ 	/* decode */
+ 	end = msg->front.iov_base + msg->front.iov_len;
+ 	if (msg->front.iov_len < sizeof(*h))
+@@ -4201,7 +4204,6 @@ void ceph_handle_caps(struct ceph_mds_session *session,
+ 	     vino.snap, inode);
+ 
+ 	mutex_lock(&session->s_mutex);
+-	inc_session_sequence(session);
+ 	dout(" mds%d seq %lld cap seq %u\n", session->s_mds, session->s_seq,
+ 	     (unsigned)seq);
+ 
+@@ -4309,6 +4311,8 @@ done:
+ done_unlocked:
+ 	iput(inode);
+ out:
++	ceph_dec_mds_stopping_blocker(mdsc);
++
+ 	ceph_put_string(extra_info.pool_ns);
+ 
+ 	/* Defer closing the sessions after s_mutex lock being released */
+diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
+index 5fb367b1d4b06..4b0ba067e9c93 100644
+--- a/fs/ceph/mds_client.c
++++ b/fs/ceph/mds_client.c
+@@ -4550,6 +4550,9 @@ static void handle_lease(struct ceph_mds_client *mdsc,
+ 
+ 	dout("handle_lease from mds%d\n", mds);
+ 
++	if (!ceph_inc_mds_stopping_blocker(mdsc, session))
++		return;
++
+ 	/* decode */
+ 	if (msg->front.iov_len < sizeof(*h) + sizeof(u32))
+ 		goto bad;
+@@ -4568,8 +4571,6 @@ static void handle_lease(struct ceph_mds_client *mdsc,
+ 	     dname.len, dname.name);
+ 
+ 	mutex_lock(&session->s_mutex);
+-	inc_session_sequence(session);
+-
+ 	if (!inode) {
+ 		dout("handle_lease no inode %llx\n", vino.ino);
+ 		goto release;
+@@ -4631,9 +4632,13 @@ release:
+ out:
+ 	mutex_unlock(&session->s_mutex);
+ 	iput(inode);
++
++	ceph_dec_mds_stopping_blocker(mdsc);
+ 	return;
+ 
+ bad:
++	ceph_dec_mds_stopping_blocker(mdsc);
++
+ 	pr_err("corrupt lease message\n");
+ 	ceph_msg_dump(msg);
+ }
+@@ -4829,6 +4834,9 @@ int ceph_mdsc_init(struct ceph_fs_client *fsc)
+ 	}
+ 
+ 	init_completion(&mdsc->safe_umount_waiters);
++	spin_lock_init(&mdsc->stopping_lock);
++	atomic_set(&mdsc->stopping_blockers, 0);
++	init_completion(&mdsc->stopping_waiter);
+ 	init_waitqueue_head(&mdsc->session_close_wq);
+ 	INIT_LIST_HEAD(&mdsc->waiting_for_map);
+ 	mdsc->quotarealms_inodes = RB_ROOT;
+diff --git a/fs/ceph/mds_client.h b/fs/ceph/mds_client.h
+index 86d2965e68a1f..cff7392809032 100644
+--- a/fs/ceph/mds_client.h
++++ b/fs/ceph/mds_client.h
+@@ -381,8 +381,9 @@ struct cap_wait {
+ };
+ 
+ enum {
+-       CEPH_MDSC_STOPPING_BEGIN = 1,
+-       CEPH_MDSC_STOPPING_FLUSHED = 2,
++	CEPH_MDSC_STOPPING_BEGIN = 1,
++	CEPH_MDSC_STOPPING_FLUSHING = 2,
++	CEPH_MDSC_STOPPING_FLUSHED = 3,
+ };
+ 
+ /*
+@@ -401,7 +402,11 @@ struct ceph_mds_client {
+ 	struct ceph_mds_session **sessions;    /* NULL for mds if no session */
+ 	atomic_t		num_sessions;
+ 	int                     max_sessions;  /* len of sessions array */
+-	int                     stopping;      /* true if shutting down */
++
++	spinlock_t              stopping_lock;  /* protect snap_empty */
++	int                     stopping;      /* the stage of shutting down */
++	atomic_t                stopping_blockers;
++	struct completion	stopping_waiter;
+ 
+ 	atomic64_t		quotarealms_count; /* # realms with quota */
+ 	/*
+diff --git a/fs/ceph/quota.c b/fs/ceph/quota.c
+index 64592adfe48fb..f7fcf7f08ec64 100644
+--- a/fs/ceph/quota.c
++++ b/fs/ceph/quota.c
+@@ -47,25 +47,23 @@ void ceph_handle_quota(struct ceph_mds_client *mdsc,
+ 	struct inode *inode;
+ 	struct ceph_inode_info *ci;
+ 
++	if (!ceph_inc_mds_stopping_blocker(mdsc, session))
++		return;
++
+ 	if (msg->front.iov_len < sizeof(*h)) {
+ 		pr_err("%s corrupt message mds%d len %d\n", __func__,
+ 		       session->s_mds, (int)msg->front.iov_len);
+ 		ceph_msg_dump(msg);
+-		return;
++		goto out;
+ 	}
+ 
+-	/* increment msg sequence number */
+-	mutex_lock(&session->s_mutex);
+-	inc_session_sequence(session);
+-	mutex_unlock(&session->s_mutex);
+-
+ 	/* lookup inode */
+ 	vino.ino = le64_to_cpu(h->ino);
+ 	vino.snap = CEPH_NOSNAP;
+ 	inode = ceph_find_inode(sb, vino);
+ 	if (!inode) {
+ 		pr_warn("Failed to find inode %llu\n", vino.ino);
+-		return;
++		goto out;
+ 	}
+ 	ci = ceph_inode(inode);
+ 
+@@ -78,6 +76,8 @@ void ceph_handle_quota(struct ceph_mds_client *mdsc,
+ 	spin_unlock(&ci->i_ceph_lock);
+ 
+ 	iput(inode);
++out:
++	ceph_dec_mds_stopping_blocker(mdsc);
+ }
+ 
+ static struct ceph_quotarealm_inode *
+diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c
+index 343d738448dcd..7ddc6bad77ef3 100644
+--- a/fs/ceph/snap.c
++++ b/fs/ceph/snap.c
+@@ -1015,6 +1015,9 @@ void ceph_handle_snap(struct ceph_mds_client *mdsc,
+ 	int locked_rwsem = 0;
+ 	bool close_sessions = false;
+ 
++	if (!ceph_inc_mds_stopping_blocker(mdsc, session))
++		return;
++
+ 	/* decode */
+ 	if (msg->front.iov_len < sizeof(*h))
+ 		goto bad;
+@@ -1030,10 +1033,6 @@ void ceph_handle_snap(struct ceph_mds_client *mdsc,
+ 	dout("%s from mds%d op %s split %llx tracelen %d\n", __func__,
+ 	     mds, ceph_snap_op_name(op), split, trace_len);
+ 
+-	mutex_lock(&session->s_mutex);
+-	inc_session_sequence(session);
+-	mutex_unlock(&session->s_mutex);
+-
+ 	down_write(&mdsc->snap_rwsem);
+ 	locked_rwsem = 1;
+ 
+@@ -1151,6 +1150,7 @@ skip_inode:
+ 	up_write(&mdsc->snap_rwsem);
+ 
+ 	flush_snaps(mdsc);
++	ceph_dec_mds_stopping_blocker(mdsc);
+ 	return;
+ 
+ bad:
+@@ -1160,6 +1160,8 @@ out:
+ 	if (locked_rwsem)
+ 		up_write(&mdsc->snap_rwsem);
+ 
++	ceph_dec_mds_stopping_blocker(mdsc);
++
+ 	if (close_sessions)
+ 		ceph_mdsc_close_sessions(mdsc);
+ 	return;
+diff --git a/fs/ceph/super.c b/fs/ceph/super.c
+index a5f52013314d6..281b493fdac8e 100644
+--- a/fs/ceph/super.c
++++ b/fs/ceph/super.c
+@@ -1365,25 +1365,90 @@ nomem:
+ 	return -ENOMEM;
+ }
+ 
++/*
++ * Return true if it successfully increases the blocker counter,
++ * or false if the mdsc is in stopping and flushed state.
++ */
++static bool __inc_stopping_blocker(struct ceph_mds_client *mdsc)
++{
++	spin_lock(&mdsc->stopping_lock);
++	if (mdsc->stopping >= CEPH_MDSC_STOPPING_FLUSHING) {
++		spin_unlock(&mdsc->stopping_lock);
++		return false;
++	}
++	atomic_inc(&mdsc->stopping_blockers);
++	spin_unlock(&mdsc->stopping_lock);
++	return true;
++}
++
++static void __dec_stopping_blocker(struct ceph_mds_client *mdsc)
++{
++	spin_lock(&mdsc->stopping_lock);
++	if (!atomic_dec_return(&mdsc->stopping_blockers) &&
++	    mdsc->stopping >= CEPH_MDSC_STOPPING_FLUSHING)
++		complete_all(&mdsc->stopping_waiter);
++	spin_unlock(&mdsc->stopping_lock);
++}
++
++/* For metadata IO requests */
++bool ceph_inc_mds_stopping_blocker(struct ceph_mds_client *mdsc,
++				   struct ceph_mds_session *session)
++{
++	mutex_lock(&session->s_mutex);
++	inc_session_sequence(session);
++	mutex_unlock(&session->s_mutex);
++
++	return __inc_stopping_blocker(mdsc);
++}
++
++void ceph_dec_mds_stopping_blocker(struct ceph_mds_client *mdsc)
++{
++	__dec_stopping_blocker(mdsc);
++}
++
+ static void ceph_kill_sb(struct super_block *s)
+ {
+ 	struct ceph_fs_client *fsc = ceph_sb_to_client(s);
++	struct ceph_mds_client *mdsc = fsc->mdsc;
++	bool wait;
+ 
+ 	dout("kill_sb %p\n", s);
+ 
+-	ceph_mdsc_pre_umount(fsc->mdsc);
++	ceph_mdsc_pre_umount(mdsc);
+ 	flush_fs_workqueues(fsc);
+ 
+ 	/*
+ 	 * Though the kill_anon_super() will finally trigger the
+-	 * sync_filesystem() anyway, we still need to do it here
+-	 * and then bump the stage of shutdown to stop the work
+-	 * queue as earlier as possible.
++	 * sync_filesystem() anyway, we still need to do it here and
++	 * then bump the stage of shutdown. This will allow us to
++	 * drop any further message, which will increase the inodes'
++	 * i_count reference counters but makes no sense any more,
++	 * from MDSs.
++	 *
++	 * Without this when evicting the inodes it may fail in the
++	 * kill_anon_super(), which will trigger a warning when
++	 * destroying the fscrypt keyring and then possibly trigger
++	 * a further crash in ceph module when the iput() tries to
++	 * evict the inodes later.
+ 	 */
+ 	sync_filesystem(s);
+ 
+-	fsc->mdsc->stopping = CEPH_MDSC_STOPPING_FLUSHED;
++	spin_lock(&mdsc->stopping_lock);
++	mdsc->stopping = CEPH_MDSC_STOPPING_FLUSHING;
++	wait = !!atomic_read(&mdsc->stopping_blockers);
++	spin_unlock(&mdsc->stopping_lock);
++
++	if (wait && atomic_read(&mdsc->stopping_blockers)) {
++		long timeleft = wait_for_completion_killable_timeout(
++					&mdsc->stopping_waiter,
++					fsc->client->options->mount_timeout);
++		if (!timeleft) /* timed out */
++			pr_warn("umount timed out, %ld\n", timeleft);
++		else if (timeleft < 0) /* killed */
++			pr_warn("umount was killed, %ld\n", timeleft);
++	}
+ 
++	mdsc->stopping = CEPH_MDSC_STOPPING_FLUSHED;
+ 	kill_anon_super(s);
+ 
+ 	fsc->client->extra_mon_dispatch = NULL;
+diff --git a/fs/ceph/super.h b/fs/ceph/super.h
+index 3bfddf34d488b..e6c1edf9e12b0 100644
+--- a/fs/ceph/super.h
++++ b/fs/ceph/super.h
+@@ -1375,4 +1375,7 @@ extern bool ceph_quota_update_statfs(struct ceph_fs_client *fsc,
+ 				     struct kstatfs *buf);
+ extern void ceph_cleanup_quotarealms_inodes(struct ceph_mds_client *mdsc);
+ 
++bool ceph_inc_mds_stopping_blocker(struct ceph_mds_client *mdsc,
++			       struct ceph_mds_session *session);
++void ceph_dec_mds_stopping_blocker(struct ceph_mds_client *mdsc);
+ #endif /* _FS_CEPH_SUPER_H */
+diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
+index bd7557d8dec41..3711be697a0a5 100644
+--- a/fs/ext4/mballoc.c
++++ b/fs/ext4/mballoc.c
+@@ -16,6 +16,7 @@
+ #include <linux/slab.h>
+ #include <linux/nospec.h>
+ #include <linux/backing-dev.h>
++#include <linux/freezer.h>
+ #include <trace/events/ext4.h>
+ 
+ /*
+@@ -6920,6 +6921,21 @@ __acquires(bitlock)
+ 	return ret;
+ }
+ 
++static ext4_grpblk_t ext4_last_grp_cluster(struct super_block *sb,
++					   ext4_group_t grp)
++{
++	if (grp < ext4_get_groups_count(sb))
++		return EXT4_CLUSTERS_PER_GROUP(sb) - 1;
++	return (ext4_blocks_count(EXT4_SB(sb)->s_es) -
++		ext4_group_first_block_no(sb, grp) - 1) >>
++					EXT4_CLUSTER_BITS(sb);
++}
++
++static bool ext4_trim_interrupted(void)
++{
++	return fatal_signal_pending(current) || freezing(current);
++}
++
+ static int ext4_try_to_trim_range(struct super_block *sb,
+ 		struct ext4_buddy *e4b, ext4_grpblk_t start,
+ 		ext4_grpblk_t max, ext4_grpblk_t minblocks)
+@@ -6927,11 +6943,13 @@ __acquires(ext4_group_lock_ptr(sb, e4b->bd_group))
+ __releases(ext4_group_lock_ptr(sb, e4b->bd_group))
+ {
+ 	ext4_grpblk_t next, count, free_count;
++	bool set_trimmed = false;
+ 	void *bitmap;
+ 
+ 	bitmap = e4b->bd_bitmap;
+-	start = (e4b->bd_info->bb_first_free > start) ?
+-		e4b->bd_info->bb_first_free : start;
++	if (start == 0 && max >= ext4_last_grp_cluster(sb, e4b->bd_group))
++		set_trimmed = true;
++	start = max(e4b->bd_info->bb_first_free, start);
+ 	count = 0;
+ 	free_count = 0;
+ 
+@@ -6945,16 +6963,14 @@ __releases(ext4_group_lock_ptr(sb, e4b->bd_group))
+ 			int ret = ext4_trim_extent(sb, start, next - start, e4b);
+ 
+ 			if (ret && ret != -EOPNOTSUPP)
+-				break;
++				return count;
+ 			count += next - start;
+ 		}
+ 		free_count += next - start;
+ 		start = next + 1;
+ 
+-		if (fatal_signal_pending(current)) {
+-			count = -ERESTARTSYS;
+-			break;
+-		}
++		if (ext4_trim_interrupted())
++			return count;
+ 
+ 		if (need_resched()) {
+ 			ext4_unlock_group(sb, e4b->bd_group);
+@@ -6966,6 +6982,9 @@ __releases(ext4_group_lock_ptr(sb, e4b->bd_group))
+ 			break;
+ 	}
+ 
++	if (set_trimmed)
++		EXT4_MB_GRP_SET_TRIMMED(e4b->bd_info);
++
+ 	return count;
+ }
+ 
+@@ -6976,7 +6995,6 @@ __releases(ext4_group_lock_ptr(sb, e4b->bd_group))
+  * @start:		first group block to examine
+  * @max:		last group block to examine
+  * @minblocks:		minimum extent block count
+- * @set_trimmed:	set the trimmed flag if at least one block is trimmed
+  *
+  * ext4_trim_all_free walks through group's block bitmap searching for free
+  * extents. When the free extent is found, mark it as used in group buddy
+@@ -6986,7 +7004,7 @@ __releases(ext4_group_lock_ptr(sb, e4b->bd_group))
+ static ext4_grpblk_t
+ ext4_trim_all_free(struct super_block *sb, ext4_group_t group,
+ 		   ext4_grpblk_t start, ext4_grpblk_t max,
+-		   ext4_grpblk_t minblocks, bool set_trimmed)
++		   ext4_grpblk_t minblocks)
+ {
+ 	struct ext4_buddy e4b;
+ 	int ret;
+@@ -7003,13 +7021,10 @@ ext4_trim_all_free(struct super_block *sb, ext4_group_t group,
+ 	ext4_lock_group(sb, group);
+ 
+ 	if (!EXT4_MB_GRP_WAS_TRIMMED(e4b.bd_info) ||
+-	    minblocks < EXT4_SB(sb)->s_last_trim_minblks) {
++	    minblocks < EXT4_SB(sb)->s_last_trim_minblks)
+ 		ret = ext4_try_to_trim_range(sb, &e4b, start, max, minblocks);
+-		if (ret >= 0 && set_trimmed)
+-			EXT4_MB_GRP_SET_TRIMMED(e4b.bd_info);
+-	} else {
++	else
+ 		ret = 0;
+-	}
+ 
+ 	ext4_unlock_group(sb, group);
+ 	ext4_mb_unload_buddy(&e4b);
+@@ -7042,7 +7057,6 @@ int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range)
+ 	ext4_fsblk_t first_data_blk =
+ 			le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block);
+ 	ext4_fsblk_t max_blks = ext4_blocks_count(EXT4_SB(sb)->s_es);
+-	bool whole_group, eof = false;
+ 	int ret = 0;
+ 
+ 	start = range->start >> sb->s_blocksize_bits;
+@@ -7061,10 +7075,8 @@ int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range)
+ 		if (minlen > EXT4_CLUSTERS_PER_GROUP(sb))
+ 			goto out;
+ 	}
+-	if (end >= max_blks - 1) {
++	if (end >= max_blks - 1)
+ 		end = max_blks - 1;
+-		eof = true;
+-	}
+ 	if (end <= first_data_blk)
+ 		goto out;
+ 	if (start < first_data_blk)
+@@ -7078,9 +7090,10 @@ int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range)
+ 
+ 	/* end now represents the last cluster to discard in this group */
+ 	end = EXT4_CLUSTERS_PER_GROUP(sb) - 1;
+-	whole_group = true;
+ 
+ 	for (group = first_group; group <= last_group; group++) {
++		if (ext4_trim_interrupted())
++			break;
+ 		grp = ext4_get_group_info(sb, group);
+ 		if (!grp)
+ 			continue;
+@@ -7097,13 +7110,11 @@ int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range)
+ 		 * change it for the last group, note that last_cluster is
+ 		 * already computed earlier by ext4_get_group_no_and_offset()
+ 		 */
+-		if (group == last_group) {
++		if (group == last_group)
+ 			end = last_cluster;
+-			whole_group = eof ? true : end == EXT4_CLUSTERS_PER_GROUP(sb) - 1;
+-		}
+ 		if (grp->bb_free >= minlen) {
+ 			cnt = ext4_trim_all_free(sb, group, first_cluster,
+-						 end, minlen, whole_group);
++						 end, minlen);
+ 			if (cnt < 0) {
+ 				ret = cnt;
+ 				break;
+@@ -7148,8 +7159,7 @@ ext4_mballoc_query_range(
+ 
+ 	ext4_lock_group(sb, group);
+ 
+-	start = (e4b.bd_info->bb_first_free > start) ?
+-		e4b.bd_info->bb_first_free : start;
++	start = max(e4b.bd_info->bb_first_free, start);
+ 	if (end >= EXT4_CLUSTERS_PER_GROUP(sb))
+ 		end = EXT4_CLUSTERS_PER_GROUP(sb) - 1;
+ 
+diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
+index 1438e7465e306..59c1aed0b9b90 100644
+--- a/fs/gfs2/glock.c
++++ b/fs/gfs2/glock.c
+@@ -2017,7 +2017,9 @@ static long gfs2_scan_glock_lru(int nr)
+ 		if (!test_bit(GLF_LOCK, &gl->gl_flags)) {
+ 			if (!spin_trylock(&gl->gl_lockref.lock))
+ 				continue;
+-			if (!gl->gl_lockref.count) {
++			if (gl->gl_lockref.count <= 1 &&
++			    (gl->gl_state == LM_ST_UNLOCKED ||
++			     demote_ok(gl))) {
+ 				list_move(&gl->gl_lru, &dispose);
+ 				atomic_dec(&lru_count);
+ 				freed++;
+diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c
+index 54319328b16b5..0a3b069386ec9 100644
+--- a/fs/gfs2/glops.c
++++ b/fs/gfs2/glops.c
+@@ -567,15 +567,16 @@ static void freeze_go_callback(struct gfs2_glock *gl, bool remote)
+ 	struct super_block *sb = sdp->sd_vfs;
+ 
+ 	if (!remote ||
+-	    gl->gl_state != LM_ST_SHARED ||
++	    (gl->gl_state != LM_ST_SHARED &&
++	     gl->gl_state != LM_ST_UNLOCKED) ||
+ 	    gl->gl_demote_state != LM_ST_UNLOCKED)
+ 		return;
+ 
+ 	/*
+ 	 * Try to get an active super block reference to prevent racing with
+-	 * unmount (see trylock_super()).  But note that unmount isn't the only
+-	 * place where a write lock on s_umount is taken, and we can fail here
+-	 * because of things like remount as well.
++	 * unmount (see super_trylock_shared()).  But note that unmount isn't
++	 * the only place where a write lock on s_umount is taken, and we can
++	 * fail here because of things like remount as well.
+ 	 */
+ 	if (down_read_trylock(&sb->s_umount)) {
+ 		atomic_inc(&sb->s_active);
+diff --git a/fs/libfs.c b/fs/libfs.c
+index 5b851315eeed0..712c57828c0e4 100644
+--- a/fs/libfs.c
++++ b/fs/libfs.c
+@@ -1646,6 +1646,7 @@ ssize_t direct_write_fallback(struct kiocb *iocb, struct iov_iter *iter,
+ 		 * We don't know how much we wrote, so just return the number of
+ 		 * bytes which were direct-written
+ 		 */
++		iocb->ki_pos -= buffered_written;
+ 		if (direct_written)
+ 			return direct_written;
+ 		return err;
+diff --git a/fs/netfs/buffered_read.c b/fs/netfs/buffered_read.c
+index 3404707ddbe73..2cd3ccf4c4399 100644
+--- a/fs/netfs/buffered_read.c
++++ b/fs/netfs/buffered_read.c
+@@ -47,12 +47,14 @@ void netfs_rreq_unlock_folios(struct netfs_io_request *rreq)
+ 	xas_for_each(&xas, folio, last_page) {
+ 		loff_t pg_end;
+ 		bool pg_failed = false;
++		bool folio_started;
+ 
+ 		if (xas_retry(&xas, folio))
+ 			continue;
+ 
+ 		pg_end = folio_pos(folio) + folio_size(folio) - 1;
+ 
++		folio_started = false;
+ 		for (;;) {
+ 			loff_t sreq_end;
+ 
+@@ -60,8 +62,10 @@ void netfs_rreq_unlock_folios(struct netfs_io_request *rreq)
+ 				pg_failed = true;
+ 				break;
+ 			}
+-			if (test_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags))
++			if (!folio_started && test_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags)) {
+ 				folio_start_fscache(folio);
++				folio_started = true;
++			}
+ 			pg_failed |= subreq_failed;
+ 			sreq_end = subreq->start + subreq->len - 1;
+ 			if (pg_end < sreq_end)
+diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
+index 47d892a1d363d..f6c74f4246917 100644
+--- a/fs/nfs/direct.c
++++ b/fs/nfs/direct.c
+@@ -93,12 +93,10 @@ nfs_direct_handle_truncated(struct nfs_direct_req *dreq,
+ 		dreq->max_count = dreq_len;
+ 		if (dreq->count > dreq_len)
+ 			dreq->count = dreq_len;
+-
+-		if (test_bit(NFS_IOHDR_ERROR, &hdr->flags))
+-			dreq->error = hdr->error;
+-		else /* Clear outstanding error if this is EOF */
+-			dreq->error = 0;
+ 	}
++
++	if (test_bit(NFS_IOHDR_ERROR, &hdr->flags) && !dreq->error)
++		dreq->error = hdr->error;
+ }
+ 
+ static void
+@@ -120,6 +118,18 @@ nfs_direct_count_bytes(struct nfs_direct_req *dreq,
+ 		dreq->count = dreq_len;
+ }
+ 
++static void nfs_direct_truncate_request(struct nfs_direct_req *dreq,
++					struct nfs_page *req)
++{
++	loff_t offs = req_offset(req);
++	size_t req_start = (size_t)(offs - dreq->io_start);
++
++	if (req_start < dreq->max_count)
++		dreq->max_count = req_start;
++	if (req_start < dreq->count)
++		dreq->count = req_start;
++}
++
+ /**
+  * nfs_swap_rw - NFS address space operation for swap I/O
+  * @iocb: target I/O control block
+@@ -488,7 +498,9 @@ static void nfs_direct_add_page_head(struct list_head *list,
+ 	kref_get(&head->wb_kref);
+ }
+ 
+-static void nfs_direct_join_group(struct list_head *list, struct inode *inode)
++static void nfs_direct_join_group(struct list_head *list,
++				  struct nfs_commit_info *cinfo,
++				  struct inode *inode)
+ {
+ 	struct nfs_page *req, *subreq;
+ 
+@@ -510,7 +522,7 @@ static void nfs_direct_join_group(struct list_head *list, struct inode *inode)
+ 				nfs_release_request(subreq);
+ 			}
+ 		} while ((subreq = subreq->wb_this_page) != req);
+-		nfs_join_page_group(req, inode);
++		nfs_join_page_group(req, cinfo, inode);
+ 	}
+ }
+ 
+@@ -528,20 +540,15 @@ nfs_direct_write_scan_commit_list(struct inode *inode,
+ static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq)
+ {
+ 	struct nfs_pageio_descriptor desc;
+-	struct nfs_page *req, *tmp;
++	struct nfs_page *req;
+ 	LIST_HEAD(reqs);
+ 	struct nfs_commit_info cinfo;
+-	LIST_HEAD(failed);
+ 
+ 	nfs_init_cinfo_from_dreq(&cinfo, dreq);
+ 	nfs_direct_write_scan_commit_list(dreq->inode, &reqs, &cinfo);
+ 
+-	nfs_direct_join_group(&reqs, dreq->inode);
++	nfs_direct_join_group(&reqs, &cinfo, dreq->inode);
+ 
+-	dreq->count = 0;
+-	dreq->max_count = 0;
+-	list_for_each_entry(req, &reqs, wb_list)
+-		dreq->max_count += req->wb_bytes;
+ 	nfs_clear_pnfs_ds_commit_verifiers(&dreq->ds_cinfo);
+ 	get_dreq(dreq);
+ 
+@@ -549,27 +556,40 @@ static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq)
+ 			      &nfs_direct_write_completion_ops);
+ 	desc.pg_dreq = dreq;
+ 
+-	list_for_each_entry_safe(req, tmp, &reqs, wb_list) {
++	while (!list_empty(&reqs)) {
++		req = nfs_list_entry(reqs.next);
+ 		/* Bump the transmission count */
+ 		req->wb_nio++;
+ 		if (!nfs_pageio_add_request(&desc, req)) {
+-			nfs_list_move_request(req, &failed);
+-			spin_lock(&cinfo.inode->i_lock);
+-			dreq->flags = 0;
+-			if (desc.pg_error < 0)
++			spin_lock(&dreq->lock);
++			if (dreq->error < 0) {
++				desc.pg_error = dreq->error;
++			} else if (desc.pg_error != -EAGAIN) {
++				dreq->flags = 0;
++				if (!desc.pg_error)
++					desc.pg_error = -EIO;
+ 				dreq->error = desc.pg_error;
+-			else
+-				dreq->error = -EIO;
+-			spin_unlock(&cinfo.inode->i_lock);
++			} else
++				dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
++			spin_unlock(&dreq->lock);
++			break;
+ 		}
+ 		nfs_release_request(req);
+ 	}
+ 	nfs_pageio_complete(&desc);
+ 
+-	while (!list_empty(&failed)) {
+-		req = nfs_list_entry(failed.next);
++	while (!list_empty(&reqs)) {
++		req = nfs_list_entry(reqs.next);
+ 		nfs_list_remove_request(req);
+ 		nfs_unlock_and_release_request(req);
++		if (desc.pg_error == -EAGAIN) {
++			nfs_mark_request_commit(req, NULL, &cinfo, 0);
++		} else {
++			spin_lock(&dreq->lock);
++			nfs_direct_truncate_request(dreq, req);
++			spin_unlock(&dreq->lock);
++			nfs_release_request(req);
++		}
+ 	}
+ 
+ 	if (put_dreq(dreq))
+@@ -589,8 +609,6 @@ static void nfs_direct_commit_complete(struct nfs_commit_data *data)
+ 	if (status < 0) {
+ 		/* Errors in commit are fatal */
+ 		dreq->error = status;
+-		dreq->max_count = 0;
+-		dreq->count = 0;
+ 		dreq->flags = NFS_ODIRECT_DONE;
+ 	} else {
+ 		status = dreq->error;
+@@ -601,7 +619,12 @@ static void nfs_direct_commit_complete(struct nfs_commit_data *data)
+ 	while (!list_empty(&data->pages)) {
+ 		req = nfs_list_entry(data->pages.next);
+ 		nfs_list_remove_request(req);
+-		if (status >= 0 && !nfs_write_match_verf(verf, req)) {
++		if (status < 0) {
++			spin_lock(&dreq->lock);
++			nfs_direct_truncate_request(dreq, req);
++			spin_unlock(&dreq->lock);
++			nfs_release_request(req);
++		} else if (!nfs_write_match_verf(verf, req)) {
+ 			dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
+ 			/*
+ 			 * Despite the reboot, the write was successful,
+@@ -609,7 +632,7 @@ static void nfs_direct_commit_complete(struct nfs_commit_data *data)
+ 			 */
+ 			req->wb_nio = 0;
+ 			nfs_mark_request_commit(req, NULL, &cinfo, 0);
+-		} else /* Error or match */
++		} else
+ 			nfs_release_request(req);
+ 		nfs_unlock_and_release_request(req);
+ 	}
+@@ -662,6 +685,7 @@ static void nfs_direct_write_clear_reqs(struct nfs_direct_req *dreq)
+ 	while (!list_empty(&reqs)) {
+ 		req = nfs_list_entry(reqs.next);
+ 		nfs_list_remove_request(req);
++		nfs_direct_truncate_request(dreq, req);
+ 		nfs_release_request(req);
+ 		nfs_unlock_and_release_request(req);
+ 	}
+@@ -711,7 +735,8 @@ static void nfs_direct_write_completion(struct nfs_pgio_header *hdr)
+ 	}
+ 
+ 	nfs_direct_count_bytes(dreq, hdr);
+-	if (test_bit(NFS_IOHDR_UNSTABLE_WRITES, &hdr->flags)) {
++	if (test_bit(NFS_IOHDR_UNSTABLE_WRITES, &hdr->flags) &&
++	    !test_bit(NFS_IOHDR_ERROR, &hdr->flags)) {
+ 		if (!dreq->flags)
+ 			dreq->flags = NFS_ODIRECT_DO_COMMIT;
+ 		flags = dreq->flags;
+@@ -755,18 +780,23 @@ static void nfs_write_sync_pgio_error(struct list_head *head, int error)
+ static void nfs_direct_write_reschedule_io(struct nfs_pgio_header *hdr)
+ {
+ 	struct nfs_direct_req *dreq = hdr->dreq;
++	struct nfs_page *req;
++	struct nfs_commit_info cinfo;
+ 
+ 	trace_nfs_direct_write_reschedule_io(dreq);
+ 
++	nfs_init_cinfo_from_dreq(&cinfo, dreq);
+ 	spin_lock(&dreq->lock);
+-	if (dreq->error == 0) {
++	if (dreq->error == 0)
+ 		dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
+-		/* fake unstable write to let common nfs resend pages */
+-		hdr->verf.committed = NFS_UNSTABLE;
+-		hdr->good_bytes = hdr->args.offset + hdr->args.count -
+-			hdr->io_start;
+-	}
++	set_bit(NFS_IOHDR_REDO, &hdr->flags);
+ 	spin_unlock(&dreq->lock);
++	while (!list_empty(&hdr->pages)) {
++		req = nfs_list_entry(hdr->pages.next);
++		nfs_list_remove_request(req);
++		nfs_unlock_request(req);
++		nfs_mark_request_commit(req, NULL, &cinfo, 0);
++	}
+ }
+ 
+ static const struct nfs_pgio_completion_ops nfs_direct_write_completion_ops = {
+@@ -794,9 +824,11 @@ static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq,
+ {
+ 	struct nfs_pageio_descriptor desc;
+ 	struct inode *inode = dreq->inode;
++	struct nfs_commit_info cinfo;
+ 	ssize_t result = 0;
+ 	size_t requested_bytes = 0;
+ 	size_t wsize = max_t(size_t, NFS_SERVER(inode)->wsize, PAGE_SIZE);
++	bool defer = false;
+ 
+ 	trace_nfs_direct_write_schedule_iovec(dreq);
+ 
+@@ -837,17 +869,37 @@ static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq,
+ 				break;
+ 			}
+ 
+-			nfs_lock_request(req);
+-			if (!nfs_pageio_add_request(&desc, req)) {
+-				result = desc.pg_error;
+-				nfs_unlock_and_release_request(req);
+-				break;
+-			}
+ 			pgbase = 0;
+ 			bytes -= req_len;
+ 			requested_bytes += req_len;
+ 			pos += req_len;
+ 			dreq->bytes_left -= req_len;
++
++			if (defer) {
++				nfs_mark_request_commit(req, NULL, &cinfo, 0);
++				continue;
++			}
++
++			nfs_lock_request(req);
++			if (nfs_pageio_add_request(&desc, req))
++				continue;
++
++			/* Exit on hard errors */
++			if (desc.pg_error < 0 && desc.pg_error != -EAGAIN) {
++				result = desc.pg_error;
++				nfs_unlock_and_release_request(req);
++				break;
++			}
++
++			/* If the error is soft, defer remaining requests */
++			nfs_init_cinfo_from_dreq(&cinfo, dreq);
++			spin_lock(&dreq->lock);
++			dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
++			spin_unlock(&dreq->lock);
++			nfs_unlock_request(req);
++			nfs_mark_request_commit(req, NULL, &cinfo, 0);
++			desc.pg_error = 0;
++			defer = true;
+ 		}
+ 		nfs_direct_release_pages(pagevec, npages);
+ 		kvfree(pagevec);
+diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c
+index 7deb3cd76abe4..a1dc338649062 100644
+--- a/fs/nfs/flexfilelayout/flexfilelayout.c
++++ b/fs/nfs/flexfilelayout/flexfilelayout.c
+@@ -1235,6 +1235,7 @@ static void ff_layout_io_track_ds_error(struct pnfs_layout_segment *lseg,
+ 		case -EPFNOSUPPORT:
+ 		case -EPROTONOSUPPORT:
+ 		case -EOPNOTSUPP:
++		case -EINVAL:
+ 		case -ECONNREFUSED:
+ 		case -ECONNRESET:
+ 		case -EHOSTDOWN:
+diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c
+index d9114a754db73..11e3a285594c2 100644
+--- a/fs/nfs/nfs4client.c
++++ b/fs/nfs/nfs4client.c
+@@ -232,6 +232,8 @@ struct nfs_client *nfs4_alloc_client(const struct nfs_client_initdata *cl_init)
+ 	__set_bit(NFS_CS_DISCRTRY, &clp->cl_flags);
+ 	__set_bit(NFS_CS_NO_RETRANS_TIMEOUT, &clp->cl_flags);
+ 
++	if (test_bit(NFS_CS_DS, &cl_init->init_flags))
++		__set_bit(NFS_CS_DS, &clp->cl_flags);
+ 	/*
+ 	 * Set up the connection to the server before we add add to the
+ 	 * global list.
+@@ -415,6 +417,8 @@ static void nfs4_add_trunk(struct nfs_client *clp, struct nfs_client *old)
+ 		.net = old->cl_net,
+ 		.servername = old->cl_hostname,
+ 	};
++	int max_connect = test_bit(NFS_CS_PNFS, &clp->cl_flags) ?
++		clp->cl_max_connect : old->cl_max_connect;
+ 
+ 	if (clp->cl_proto != old->cl_proto)
+ 		return;
+@@ -428,7 +432,7 @@ static void nfs4_add_trunk(struct nfs_client *clp, struct nfs_client *old)
+ 	xprt_args.addrlen = clp_salen;
+ 
+ 	rpc_clnt_add_xprt(old->cl_rpcclient, &xprt_args,
+-			  rpc_clnt_test_and_add_xprt, NULL);
++			  rpc_clnt_test_and_add_xprt, &max_connect);
+ }
+ 
+ /**
+@@ -1007,6 +1011,9 @@ struct nfs_client *nfs4_set_ds_client(struct nfs_server *mds_srv,
+ 	if (mds_srv->flags & NFS_MOUNT_NORESVPORT)
+ 		__set_bit(NFS_CS_NORESVPORT, &cl_init.init_flags);
+ 
++	__set_bit(NFS_CS_DS, &cl_init.init_flags);
++	__set_bit(NFS_CS_PNFS, &cl_init.init_flags);
++	cl_init.max_connect = NFS_MAX_TRANSPORTS;
+ 	/*
+ 	 * Set an authflavor equual to the MDS value. Use the MDS nfs_client
+ 	 * cl_ipaddr so as to use the same EXCHANGE_ID co_ownerid as the MDS
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index 3c24c3c99e8ac..51029e4b60f56 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -2703,8 +2703,12 @@ static int _nfs4_proc_open(struct nfs4_opendata *data,
+ 			return status;
+ 	}
+ 	if (!(o_res->f_attr->valid & NFS_ATTR_FATTR)) {
++		struct nfs_fh *fh = &o_res->fh;
++
+ 		nfs4_sequence_free_slot(&o_res->seq_res);
+-		nfs4_proc_getattr(server, &o_res->fh, o_res->f_attr, NULL);
++		if (o_arg->claim == NFS4_OPEN_CLAIM_FH)
++			fh = NFS_FH(d_inode(data->dentry));
++		nfs4_proc_getattr(server, fh, o_res->f_attr, NULL);
+ 	}
+ 	return 0;
+ }
+@@ -8787,6 +8791,8 @@ nfs4_run_exchange_id(struct nfs_client *clp, const struct cred *cred,
+ #ifdef CONFIG_NFS_V4_1_MIGRATION
+ 	calldata->args.flags |= EXCHGID4_FLAG_SUPP_MOVED_MIGR;
+ #endif
++	if (test_bit(NFS_CS_DS, &clp->cl_flags))
++		calldata->args.flags |= EXCHGID4_FLAG_USE_PNFS_DS;
+ 	msg.rpc_argp = &calldata->args;
+ 	msg.rpc_resp = &calldata->res;
+ 	task_setup_data.callback_data = calldata;
+@@ -8864,6 +8870,8 @@ static int _nfs4_proc_exchange_id(struct nfs_client *clp, const struct cred *cre
+ 	/* Save the EXCHANGE_ID verifier session trunk tests */
+ 	memcpy(clp->cl_confirm.data, argp->verifier.data,
+ 	       sizeof(clp->cl_confirm.data));
++	if (resp->flags & EXCHGID4_FLAG_USE_PNFS_DS)
++		set_bit(NFS_CS_DS, &clp->cl_flags);
+ out:
+ 	trace_nfs4_exchange_id(clp, status);
+ 	rpc_put_task(task);
+@@ -10614,7 +10622,9 @@ static void nfs4_disable_swap(struct inode *inode)
+ 	 */
+ 	struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
+ 
+-	nfs4_schedule_state_manager(clp);
++	set_bit(NFS4CLNT_RUN_MANAGER, &clp->cl_state);
++	clear_bit(NFS4CLNT_MANAGER_AVAILABLE, &clp->cl_state);
++	wake_up_var(&clp->cl_state);
+ }
+ 
+ static const struct inode_operations nfs4_dir_inode_operations = {
+diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
+index e079987af4a3e..597ae4535fe33 100644
+--- a/fs/nfs/nfs4state.c
++++ b/fs/nfs/nfs4state.c
+@@ -1209,16 +1209,26 @@ void nfs4_schedule_state_manager(struct nfs_client *clp)
+ {
+ 	struct task_struct *task;
+ 	char buf[INET6_ADDRSTRLEN + sizeof("-manager") + 1];
++	struct rpc_clnt *clnt = clp->cl_rpcclient;
++	bool swapon = false;
+ 
+-	if (clp->cl_rpcclient->cl_shutdown)
++	if (clnt->cl_shutdown)
+ 		return;
+ 
+ 	set_bit(NFS4CLNT_RUN_MANAGER, &clp->cl_state);
+-	if (test_and_set_bit(NFS4CLNT_MANAGER_AVAILABLE, &clp->cl_state) != 0) {
+-		wake_up_var(&clp->cl_state);
+-		return;
++
++	if (atomic_read(&clnt->cl_swapper)) {
++		swapon = !test_and_set_bit(NFS4CLNT_MANAGER_AVAILABLE,
++					   &clp->cl_state);
++		if (!swapon) {
++			wake_up_var(&clp->cl_state);
++			return;
++		}
+ 	}
+-	set_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state);
++
++	if (test_and_set_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) != 0)
++		return;
++
+ 	__module_get(THIS_MODULE);
+ 	refcount_inc(&clp->cl_count);
+ 
+@@ -1235,8 +1245,9 @@ void nfs4_schedule_state_manager(struct nfs_client *clp)
+ 			__func__, PTR_ERR(task));
+ 		if (!nfs_client_init_is_complete(clp))
+ 			nfs_mark_client_ready(clp, PTR_ERR(task));
++		if (swapon)
++			clear_bit(NFS4CLNT_MANAGER_AVAILABLE, &clp->cl_state);
+ 		nfs4_clear_state_manager_bit(clp);
+-		clear_bit(NFS4CLNT_MANAGER_AVAILABLE, &clp->cl_state);
+ 		nfs_put_client(clp);
+ 		module_put(THIS_MODULE);
+ 	}
+@@ -2741,22 +2752,25 @@ static int nfs4_run_state_manager(void *ptr)
+ 
+ 	allow_signal(SIGKILL);
+ again:
+-	set_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state);
+ 	nfs4_state_manager(clp);
+-	if (atomic_read(&cl->cl_swapper)) {
++
++	if (test_bit(NFS4CLNT_MANAGER_AVAILABLE, &clp->cl_state) &&
++	    !test_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state)) {
+ 		wait_var_event_interruptible(&clp->cl_state,
+ 					     test_bit(NFS4CLNT_RUN_MANAGER,
+ 						      &clp->cl_state));
+-		if (atomic_read(&cl->cl_swapper) &&
+-		    test_bit(NFS4CLNT_RUN_MANAGER, &clp->cl_state))
++		if (!atomic_read(&cl->cl_swapper))
++			clear_bit(NFS4CLNT_MANAGER_AVAILABLE, &clp->cl_state);
++		if (refcount_read(&clp->cl_count) > 1 && !signalled() &&
++		    !test_and_set_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state))
+ 			goto again;
+ 		/* Either no longer a swapper, or were signalled */
++		clear_bit(NFS4CLNT_MANAGER_AVAILABLE, &clp->cl_state);
+ 	}
+-	clear_bit(NFS4CLNT_MANAGER_AVAILABLE, &clp->cl_state);
+ 
+ 	if (refcount_read(&clp->cl_count) > 1 && !signalled() &&
+ 	    test_bit(NFS4CLNT_RUN_MANAGER, &clp->cl_state) &&
+-	    !test_and_set_bit(NFS4CLNT_MANAGER_AVAILABLE, &clp->cl_state))
++	    !test_and_set_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state))
+ 		goto again;
+ 
+ 	nfs_put_client(clp);
+diff --git a/fs/nfs/write.c b/fs/nfs/write.c
+index f4cca8f00c0c2..8c1ee1a1a28f1 100644
+--- a/fs/nfs/write.c
++++ b/fs/nfs/write.c
+@@ -59,7 +59,8 @@ static const struct nfs_pgio_completion_ops nfs_async_write_completion_ops;
+ static const struct nfs_commit_completion_ops nfs_commit_completion_ops;
+ static const struct nfs_rw_ops nfs_rw_write_ops;
+ static void nfs_inode_remove_request(struct nfs_page *req);
+-static void nfs_clear_request_commit(struct nfs_page *req);
++static void nfs_clear_request_commit(struct nfs_commit_info *cinfo,
++				     struct nfs_page *req);
+ static void nfs_init_cinfo_from_inode(struct nfs_commit_info *cinfo,
+ 				      struct inode *inode);
+ static struct nfs_page *
+@@ -502,8 +503,8 @@ nfs_destroy_unlinked_subrequests(struct nfs_page *destroy_list,
+  * the (former) group.  All subrequests are removed from any write or commit
+  * lists, unlinked from the group and destroyed.
+  */
+-void
+-nfs_join_page_group(struct nfs_page *head, struct inode *inode)
++void nfs_join_page_group(struct nfs_page *head, struct nfs_commit_info *cinfo,
++			 struct inode *inode)
+ {
+ 	struct nfs_page *subreq;
+ 	struct nfs_page *destroy_list = NULL;
+@@ -533,7 +534,7 @@ nfs_join_page_group(struct nfs_page *head, struct inode *inode)
+ 	 * Commit list removal accounting is done after locks are dropped */
+ 	subreq = head;
+ 	do {
+-		nfs_clear_request_commit(subreq);
++		nfs_clear_request_commit(cinfo, subreq);
+ 		subreq = subreq->wb_this_page;
+ 	} while (subreq != head);
+ 
+@@ -566,8 +567,10 @@ static struct nfs_page *nfs_lock_and_join_requests(struct folio *folio)
+ {
+ 	struct inode *inode = folio_file_mapping(folio)->host;
+ 	struct nfs_page *head;
++	struct nfs_commit_info cinfo;
+ 	int ret;
+ 
++	nfs_init_cinfo_from_inode(&cinfo, inode);
+ 	/*
+ 	 * A reference is taken only on the head request which acts as a
+ 	 * reference to the whole page group - the group will not be destroyed
+@@ -584,7 +587,7 @@ static struct nfs_page *nfs_lock_and_join_requests(struct folio *folio)
+ 		return ERR_PTR(ret);
+ 	}
+ 
+-	nfs_join_page_group(head, inode);
++	nfs_join_page_group(head, &cinfo, inode);
+ 
+ 	return head;
+ }
+@@ -955,18 +958,16 @@ static void nfs_folio_clear_commit(struct folio *folio)
+ }
+ 
+ /* Called holding the request lock on @req */
+-static void
+-nfs_clear_request_commit(struct nfs_page *req)
++static void nfs_clear_request_commit(struct nfs_commit_info *cinfo,
++				     struct nfs_page *req)
+ {
+ 	if (test_bit(PG_CLEAN, &req->wb_flags)) {
+ 		struct nfs_open_context *ctx = nfs_req_openctx(req);
+ 		struct inode *inode = d_inode(ctx->dentry);
+-		struct nfs_commit_info cinfo;
+ 
+-		nfs_init_cinfo_from_inode(&cinfo, inode);
+ 		mutex_lock(&NFS_I(inode)->commit_mutex);
+-		if (!pnfs_clear_request_commit(req, &cinfo)) {
+-			nfs_request_remove_commit_list(req, &cinfo);
++		if (!pnfs_clear_request_commit(req, cinfo)) {
++			nfs_request_remove_commit_list(req, cinfo);
+ 		}
+ 		mutex_unlock(&NFS_I(inode)->commit_mutex);
+ 		nfs_folio_clear_commit(nfs_page_to_folio(req));
+diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
+index be72628b13376..d2588f4ac42be 100644
+--- a/fs/nfsd/nfs4xdr.c
++++ b/fs/nfsd/nfs4xdr.c
+@@ -4105,6 +4105,7 @@ static __be32 nfsd4_encode_readv(struct nfsd4_compoundres *resp,
+ 				 struct file *file, unsigned long maxcount)
+ {
+ 	struct xdr_stream *xdr = resp->xdr;
++	unsigned int base = xdr->buf->page_len & ~PAGE_MASK;
+ 	unsigned int starting_len = xdr->buf->len;
+ 	__be32 zero = xdr_zero;
+ 	__be32 nfserr;
+@@ -4113,8 +4114,7 @@ static __be32 nfsd4_encode_readv(struct nfsd4_compoundres *resp,
+ 		return nfserr_resource;
+ 
+ 	nfserr = nfsd_iter_read(resp->rqstp, read->rd_fhp, file,
+-				read->rd_offset, &maxcount,
+-				xdr->buf->page_len & ~PAGE_MASK,
++				read->rd_offset, &maxcount, base,
+ 				&read->rd_eof);
+ 	read->rd_length = maxcount;
+ 	if (nfserr)
+diff --git a/fs/nilfs2/gcinode.c b/fs/nilfs2/gcinode.c
+index 48fe71d309cb4..8beb2730929d4 100644
+--- a/fs/nilfs2/gcinode.c
++++ b/fs/nilfs2/gcinode.c
+@@ -73,10 +73,8 @@ int nilfs_gccache_submit_read_data(struct inode *inode, sector_t blkoff,
+ 		struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
+ 
+ 		err = nilfs_dat_translate(nilfs->ns_dat, vbn, &pbn);
+-		if (unlikely(err)) { /* -EIO, -ENOMEM, -ENOENT */
+-			brelse(bh);
++		if (unlikely(err)) /* -EIO, -ENOMEM, -ENOENT */
+ 			goto failed;
+-		}
+ 	}
+ 
+ 	lock_buffer(bh);
+@@ -102,6 +100,8 @@ int nilfs_gccache_submit_read_data(struct inode *inode, sector_t blkoff,
+  failed:
+ 	unlock_page(bh->b_page);
+ 	put_page(bh->b_page);
++	if (unlikely(err))
++		brelse(bh);
+ 	return err;
+ }
+ 
+diff --git a/fs/proc/internal.h b/fs/proc/internal.h
+index 9dda7e54b2d0d..9a8f32f21ff56 100644
+--- a/fs/proc/internal.h
++++ b/fs/proc/internal.h
+@@ -289,9 +289,7 @@ struct proc_maps_private {
+ 	struct inode *inode;
+ 	struct task_struct *task;
+ 	struct mm_struct *mm;
+-#ifdef CONFIG_MMU
+ 	struct vma_iterator iter;
+-#endif
+ #ifdef CONFIG_NUMA
+ 	struct mempolicy *task_mempolicy;
+ #endif
+diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
+index 2c8b622659814..d3e19080df4af 100644
+--- a/fs/proc/task_nommu.c
++++ b/fs/proc/task_nommu.c
+@@ -188,15 +188,28 @@ static int show_map(struct seq_file *m, void *_p)
+ 	return nommu_vma_show(m, _p);
+ }
+ 
+-static void *m_start(struct seq_file *m, loff_t *pos)
++static struct vm_area_struct *proc_get_vma(struct proc_maps_private *priv,
++						loff_t *ppos)
++{
++	struct vm_area_struct *vma = vma_next(&priv->iter);
++
++	if (vma) {
++		*ppos = vma->vm_start;
++	} else {
++		*ppos = -1UL;
++	}
++
++	return vma;
++}
++
++static void *m_start(struct seq_file *m, loff_t *ppos)
+ {
+ 	struct proc_maps_private *priv = m->private;
++	unsigned long last_addr = *ppos;
+ 	struct mm_struct *mm;
+-	struct vm_area_struct *vma;
+-	unsigned long addr = *pos;
+ 
+-	/* See m_next(). Zero at the start or after lseek. */
+-	if (addr == -1UL)
++	/* See proc_get_vma(). Zero at the start or after lseek. */
++	if (last_addr == -1UL)
+ 		return NULL;
+ 
+ 	/* pin the task and mm whilst we play with them */
+@@ -205,44 +218,41 @@ static void *m_start(struct seq_file *m, loff_t *pos)
+ 		return ERR_PTR(-ESRCH);
+ 
+ 	mm = priv->mm;
+-	if (!mm || !mmget_not_zero(mm))
++	if (!mm || !mmget_not_zero(mm)) {
++		put_task_struct(priv->task);
++		priv->task = NULL;
+ 		return NULL;
++	}
+ 
+ 	if (mmap_read_lock_killable(mm)) {
+ 		mmput(mm);
++		put_task_struct(priv->task);
++		priv->task = NULL;
+ 		return ERR_PTR(-EINTR);
+ 	}
+ 
+-	/* start the next element from addr */
+-	vma = find_vma(mm, addr);
+-	if (vma)
+-		return vma;
++	vma_iter_init(&priv->iter, mm, last_addr);
+ 
+-	mmap_read_unlock(mm);
+-	mmput(mm);
+-	return NULL;
++	return proc_get_vma(priv, ppos);
+ }
+ 
+-static void m_stop(struct seq_file *m, void *_vml)
++static void m_stop(struct seq_file *m, void *v)
+ {
+ 	struct proc_maps_private *priv = m->private;
++	struct mm_struct *mm = priv->mm;
+ 
+-	if (!IS_ERR_OR_NULL(_vml)) {
+-		mmap_read_unlock(priv->mm);
+-		mmput(priv->mm);
+-	}
+-	if (priv->task) {
+-		put_task_struct(priv->task);
+-		priv->task = NULL;
+-	}
++	if (!priv->task)
++		return;
++
++	mmap_read_unlock(mm);
++	mmput(mm);
++	put_task_struct(priv->task);
++	priv->task = NULL;
+ }
+ 
+-static void *m_next(struct seq_file *m, void *_p, loff_t *pos)
++static void *m_next(struct seq_file *m, void *_p, loff_t *ppos)
+ {
+-	struct vm_area_struct *vma = _p;
+-
+-	*pos = vma->vm_end;
+-	return find_vma(vma->vm_mm, vma->vm_end);
++	return proc_get_vma(m->private, ppos);
+ }
+ 
+ static const struct seq_operations proc_pid_maps_ops = {
+diff --git a/fs/smb/client/cifsglob.h b/fs/smb/client/cifsglob.h
+index 051f15b9d6078..35782a6bede0b 100644
+--- a/fs/smb/client/cifsglob.h
++++ b/fs/smb/client/cifsglob.h
+@@ -1776,6 +1776,7 @@ static inline bool is_retryable_error(int error)
+ #define   MID_RETRY_NEEDED      8 /* session closed while this request out */
+ #define   MID_RESPONSE_MALFORMED 0x10
+ #define   MID_SHUTDOWN		 0x20
++#define   MID_RESPONSE_READY 0x40 /* ready for other process handle the rsp */
+ 
+ /* Flags */
+ #define   MID_WAIT_CANCELLED	 1 /* Cancelled while waiting for response */
+diff --git a/fs/smb/client/fs_context.c b/fs/smb/client/fs_context.c
+index 67e16c2ac90e6..f12203c49b802 100644
+--- a/fs/smb/client/fs_context.c
++++ b/fs/smb/client/fs_context.c
+@@ -1532,6 +1532,7 @@ static int smb3_fs_context_parse_param(struct fs_context *fc,
+ 
+  cifs_parse_mount_err:
+ 	kfree_sensitive(ctx->password);
++	ctx->password = NULL;
+ 	return -EINVAL;
+ }
+ 
+diff --git a/fs/smb/client/inode.c b/fs/smb/client/inode.c
+index c3eeae07e1390..cb85d7977b1e3 100644
+--- a/fs/smb/client/inode.c
++++ b/fs/smb/client/inode.c
+@@ -2610,7 +2610,7 @@ int cifs_fiemap(struct inode *inode, struct fiemap_extent_info *fei, u64 start,
+ 	}
+ 
+ 	cifsFileInfo_put(cfile);
+-	return -ENOTSUPP;
++	return -EOPNOTSUPP;
+ }
+ 
+ int cifs_truncate_page(struct address_space *mapping, loff_t from)
+diff --git a/fs/smb/client/smb2ops.c b/fs/smb/client/smb2ops.c
+index dd6a423dc6e11..a5cba71c30aed 100644
+--- a/fs/smb/client/smb2ops.c
++++ b/fs/smb/client/smb2ops.c
+@@ -297,7 +297,7 @@ smb2_adjust_credits(struct TCP_Server_Info *server,
+ 		cifs_server_dbg(VFS, "request has less credits (%d) than required (%d)",
+ 				credits->value, new_val);
+ 
+-		return -ENOTSUPP;
++		return -EOPNOTSUPP;
+ 	}
+ 
+ 	spin_lock(&server->req_lock);
+@@ -1159,7 +1159,7 @@ smb2_set_ea(const unsigned int xid, struct cifs_tcon *tcon,
+ 			/* Use a fudge factor of 256 bytes in case we collide
+ 			 * with a different set_EAs command.
+ 			 */
+-			if(CIFSMaxBufSize - MAX_SMB2_CREATE_RESPONSE_SIZE -
++			if (CIFSMaxBufSize - MAX_SMB2_CREATE_RESPONSE_SIZE -
+ 			   MAX_SMB2_CLOSE_RESPONSE_SIZE - 256 <
+ 			   used_len + ea_name_len + ea_value_len + 1) {
+ 				rc = -ENOSPC;
+@@ -4716,7 +4716,7 @@ handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid,
+ 
+ 	if (shdr->Command != SMB2_READ) {
+ 		cifs_server_dbg(VFS, "only big read responses are supported\n");
+-		return -ENOTSUPP;
++		return -EOPNOTSUPP;
+ 	}
+ 
+ 	if (server->ops->is_session_expired &&
+diff --git a/fs/smb/client/transport.c b/fs/smb/client/transport.c
+index f280502a2aee8..2b9a2ed45a652 100644
+--- a/fs/smb/client/transport.c
++++ b/fs/smb/client/transport.c
+@@ -35,6 +35,8 @@
+ void
+ cifs_wake_up_task(struct mid_q_entry *mid)
+ {
++	if (mid->mid_state == MID_RESPONSE_RECEIVED)
++		mid->mid_state = MID_RESPONSE_READY;
+ 	wake_up_process(mid->callback_data);
+ }
+ 
+@@ -87,7 +89,8 @@ static void __release_mid(struct kref *refcount)
+ 	struct TCP_Server_Info *server = midEntry->server;
+ 
+ 	if (midEntry->resp_buf && (midEntry->mid_flags & MID_WAIT_CANCELLED) &&
+-	    midEntry->mid_state == MID_RESPONSE_RECEIVED &&
++	    (midEntry->mid_state == MID_RESPONSE_RECEIVED ||
++	     midEntry->mid_state == MID_RESPONSE_READY) &&
+ 	    server->ops->handle_cancelled_mid)
+ 		server->ops->handle_cancelled_mid(midEntry, server);
+ 
+@@ -732,7 +735,8 @@ wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ)
+ 	int error;
+ 
+ 	error = wait_event_state(server->response_q,
+-				 midQ->mid_state != MID_REQUEST_SUBMITTED,
++				 midQ->mid_state != MID_REQUEST_SUBMITTED &&
++				 midQ->mid_state != MID_RESPONSE_RECEIVED,
+ 				 (TASK_KILLABLE|TASK_FREEZABLE_UNSAFE));
+ 	if (error < 0)
+ 		return -ERESTARTSYS;
+@@ -885,7 +889,7 @@ cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
+ 
+ 	spin_lock(&server->mid_lock);
+ 	switch (mid->mid_state) {
+-	case MID_RESPONSE_RECEIVED:
++	case MID_RESPONSE_READY:
+ 		spin_unlock(&server->mid_lock);
+ 		return rc;
+ 	case MID_RETRY_NEEDED:
+@@ -984,6 +988,9 @@ cifs_compound_callback(struct mid_q_entry *mid)
+ 	credits.instance = server->reconnect_instance;
+ 
+ 	add_credits(server, &credits, mid->optype);
++
++	if (mid->mid_state == MID_RESPONSE_RECEIVED)
++		mid->mid_state = MID_RESPONSE_READY;
+ }
+ 
+ static void
+@@ -1204,7 +1211,8 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
+ 			send_cancel(server, &rqst[i], midQ[i]);
+ 			spin_lock(&server->mid_lock);
+ 			midQ[i]->mid_flags |= MID_WAIT_CANCELLED;
+-			if (midQ[i]->mid_state == MID_REQUEST_SUBMITTED) {
++			if (midQ[i]->mid_state == MID_REQUEST_SUBMITTED ||
++			    midQ[i]->mid_state == MID_RESPONSE_RECEIVED) {
+ 				midQ[i]->callback = cifs_cancelled_callback;
+ 				cancelled_mid[i] = true;
+ 				credits[i].value = 0;
+@@ -1225,7 +1233,7 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
+ 		}
+ 
+ 		if (!midQ[i]->resp_buf ||
+-		    midQ[i]->mid_state != MID_RESPONSE_RECEIVED) {
++		    midQ[i]->mid_state != MID_RESPONSE_READY) {
+ 			rc = -EIO;
+ 			cifs_dbg(FYI, "Bad MID state?\n");
+ 			goto out;
+@@ -1412,7 +1420,8 @@ SendReceive(const unsigned int xid, struct cifs_ses *ses,
+ 	if (rc != 0) {
+ 		send_cancel(server, &rqst, midQ);
+ 		spin_lock(&server->mid_lock);
+-		if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
++		if (midQ->mid_state == MID_REQUEST_SUBMITTED ||
++		    midQ->mid_state == MID_RESPONSE_RECEIVED) {
+ 			/* no longer considered to be "in-flight" */
+ 			midQ->callback = release_mid;
+ 			spin_unlock(&server->mid_lock);
+@@ -1429,7 +1438,7 @@ SendReceive(const unsigned int xid, struct cifs_ses *ses,
+ 	}
+ 
+ 	if (!midQ->resp_buf || !out_buf ||
+-	    midQ->mid_state != MID_RESPONSE_RECEIVED) {
++	    midQ->mid_state != MID_RESPONSE_READY) {
+ 		rc = -EIO;
+ 		cifs_server_dbg(VFS, "Bad MID state?\n");
+ 		goto out;
+@@ -1553,14 +1562,16 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
+ 
+ 	/* Wait for a reply - allow signals to interrupt. */
+ 	rc = wait_event_interruptible(server->response_q,
+-		(!(midQ->mid_state == MID_REQUEST_SUBMITTED)) ||
++		(!(midQ->mid_state == MID_REQUEST_SUBMITTED ||
++		   midQ->mid_state == MID_RESPONSE_RECEIVED)) ||
+ 		((server->tcpStatus != CifsGood) &&
+ 		 (server->tcpStatus != CifsNew)));
+ 
+ 	/* Were we interrupted by a signal ? */
+ 	spin_lock(&server->srv_lock);
+ 	if ((rc == -ERESTARTSYS) &&
+-		(midQ->mid_state == MID_REQUEST_SUBMITTED) &&
++		(midQ->mid_state == MID_REQUEST_SUBMITTED ||
++		 midQ->mid_state == MID_RESPONSE_RECEIVED) &&
+ 		((server->tcpStatus == CifsGood) ||
+ 		 (server->tcpStatus == CifsNew))) {
+ 		spin_unlock(&server->srv_lock);
+@@ -1591,7 +1602,8 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
+ 		if (rc) {
+ 			send_cancel(server, &rqst, midQ);
+ 			spin_lock(&server->mid_lock);
+-			if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
++			if (midQ->mid_state == MID_REQUEST_SUBMITTED ||
++			    midQ->mid_state == MID_RESPONSE_RECEIVED) {
+ 				/* no longer considered to be "in-flight" */
+ 				midQ->callback = release_mid;
+ 				spin_unlock(&server->mid_lock);
+@@ -1611,7 +1623,7 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
+ 		return rc;
+ 
+ 	/* rcvd frame is ok */
+-	if (out_buf == NULL || midQ->mid_state != MID_RESPONSE_RECEIVED) {
++	if (out_buf == NULL || midQ->mid_state != MID_RESPONSE_READY) {
+ 		rc = -EIO;
+ 		cifs_tcon_dbg(VFS, "Bad MID state?\n");
+ 		goto out;
+diff --git a/include/linux/atomic/atomic-arch-fallback.h b/include/linux/atomic/atomic-arch-fallback.h
+index 18f5744dfb5d8..b83ef19da13de 100644
+--- a/include/linux/atomic/atomic-arch-fallback.h
++++ b/include/linux/atomic/atomic-arch-fallback.h
+@@ -459,8 +459,6 @@ raw_atomic_read_acquire(const atomic_t *v)
+ {
+ #if defined(arch_atomic_read_acquire)
+ 	return arch_atomic_read_acquire(v);
+-#elif defined(arch_atomic_read)
+-	return arch_atomic_read(v);
+ #else
+ 	int ret;
+ 
+@@ -508,8 +506,6 @@ raw_atomic_set_release(atomic_t *v, int i)
+ {
+ #if defined(arch_atomic_set_release)
+ 	arch_atomic_set_release(v, i);
+-#elif defined(arch_atomic_set)
+-	arch_atomic_set(v, i);
+ #else
+ 	if (__native_word(atomic_t)) {
+ 		smp_store_release(&(v)->counter, i);
+@@ -2575,8 +2571,6 @@ raw_atomic64_read_acquire(const atomic64_t *v)
+ {
+ #if defined(arch_atomic64_read_acquire)
+ 	return arch_atomic64_read_acquire(v);
+-#elif defined(arch_atomic64_read)
+-	return arch_atomic64_read(v);
+ #else
+ 	s64 ret;
+ 
+@@ -2624,8 +2618,6 @@ raw_atomic64_set_release(atomic64_t *v, s64 i)
+ {
+ #if defined(arch_atomic64_set_release)
+ 	arch_atomic64_set_release(v, i);
+-#elif defined(arch_atomic64_set)
+-	arch_atomic64_set(v, i);
+ #else
+ 	if (__native_word(atomic64_t)) {
+ 		smp_store_release(&(v)->counter, i);
+@@ -4657,4 +4649,4 @@ raw_atomic64_dec_if_positive(atomic64_t *v)
+ }
+ 
+ #endif /* _LINUX_ATOMIC_FALLBACK_H */
+-// 202b45c7db600ce36198eb1f1fc2c2d5268ace2d
++// 2fdd6702823fa842f9cea57a002e6e4476ae780c
+diff --git a/include/linux/bpf.h b/include/linux/bpf.h
+index 28e2e0ce2ed07..477d91b926b35 100644
+--- a/include/linux/bpf.h
++++ b/include/linux/bpf.h
+@@ -425,7 +425,7 @@ static inline void bpf_long_memcpy(void *dst, const void *src, u32 size)
+ 
+ 	size /= sizeof(long);
+ 	while (size--)
+-		*ldst++ = *lsrc++;
++		data_race(*ldst++ = *lsrc++);
+ }
+ 
+ /* copy everything but bpf_spin_lock, bpf_timer, and kptrs. There could be one of each. */
+diff --git a/include/linux/btf_ids.h b/include/linux/btf_ids.h
+index 00950cc03bff2..b247784427d6f 100644
+--- a/include/linux/btf_ids.h
++++ b/include/linux/btf_ids.h
+@@ -49,7 +49,7 @@ word							\
+ 	____BTF_ID(symbol, word)
+ 
+ #define __ID(prefix) \
+-	__PASTE(prefix, __COUNTER__)
++	__PASTE(__PASTE(prefix, __COUNTER__), __LINE__)
+ 
+ /*
+  * The BTF_ID defines unique symbol for each ID pointing
+diff --git a/include/linux/compiler_attributes.h b/include/linux/compiler_attributes.h
+index 00efa35c350f6..28566624f008f 100644
+--- a/include/linux/compiler_attributes.h
++++ b/include/linux/compiler_attributes.h
+@@ -94,6 +94,19 @@
+ # define __copy(symbol)
+ #endif
+ 
++/*
++ * Optional: only supported since gcc >= 14
++ * Optional: only supported since clang >= 18
++ *
++ *   gcc: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=108896
++ * clang: https://reviews.llvm.org/D148381
++ */
++#if __has_attribute(__counted_by__)
++# define __counted_by(member)		__attribute__((__counted_by__(member)))
++#else
++# define __counted_by(member)
++#endif
++
+ /*
+  * Optional: not supported by gcc
+  * Optional: only supported since clang >= 14.0
+@@ -129,19 +142,6 @@
+ # define __designated_init
+ #endif
+ 
+-/*
+- * Optional: only supported since gcc >= 14
+- * Optional: only supported since clang >= 17
+- *
+- *   gcc: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=108896
+- * clang: https://reviews.llvm.org/D148381
+- */
+-#if __has_attribute(__element_count__)
+-# define __counted_by(member)		__attribute__((__element_count__(#member)))
+-#else
+-# define __counted_by(member)
+-#endif
+-
+ /*
+  * Optional: only supported since clang >= 14.0
+  *
+diff --git a/include/linux/if_team.h b/include/linux/if_team.h
+index 8de6b6e678295..34bcba5a70677 100644
+--- a/include/linux/if_team.h
++++ b/include/linux/if_team.h
+@@ -189,6 +189,8 @@ struct team {
+ 	struct net_device *dev; /* associated netdevice */
+ 	struct team_pcpu_stats __percpu *pcpu_stats;
+ 
++	const struct header_ops *header_ops_cache;
++
+ 	struct mutex lock; /* used for overall locking, e.g. port lists write */
+ 
+ 	/*
+diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
+index a92bce40b04b3..4a1dc88ddbff9 100644
+--- a/include/linux/interrupt.h
++++ b/include/linux/interrupt.h
+@@ -569,8 +569,12 @@ enum
+  * 	2) rcu_report_dead() reports the final quiescent states.
+  *
+  * _ IRQ_POLL: irq_poll_cpu_dead() migrates the queue
++ *
++ * _ (HR)TIMER_SOFTIRQ: (hr)timers_dead_cpu() migrates the queue
+  */
+-#define SOFTIRQ_HOTPLUG_SAFE_MASK (BIT(RCU_SOFTIRQ) | BIT(IRQ_POLL_SOFTIRQ))
++#define SOFTIRQ_HOTPLUG_SAFE_MASK (BIT(TIMER_SOFTIRQ) | BIT(IRQ_POLL_SOFTIRQ) |\
++				   BIT(HRTIMER_SOFTIRQ) | BIT(RCU_SOFTIRQ))
++
+ 
+ /* map softirq index to softirq name. update 'softirq_to_name' in
+  * kernel/softirq.c when adding a new softirq.
+diff --git a/include/linux/libata.h b/include/linux/libata.h
+index f5bb4415c5e2d..19ddc6c804008 100644
+--- a/include/linux/libata.h
++++ b/include/linux/libata.h
+@@ -259,7 +259,7 @@ enum {
+ 	 * advised to wait only for the following duration before
+ 	 * doing SRST.
+ 	 */
+-	ATA_TMOUT_PMP_SRST_WAIT	= 5000,
++	ATA_TMOUT_PMP_SRST_WAIT	= 10000,
+ 
+ 	/* When the LPM policy is set to ATA_LPM_MAX_POWER, there might
+ 	 * be a spurious PHY event, so ignore the first PHY event that
+@@ -1155,6 +1155,7 @@ extern int ata_std_bios_param(struct scsi_device *sdev,
+ 			      struct block_device *bdev,
+ 			      sector_t capacity, int geom[]);
+ extern void ata_scsi_unlock_native_capacity(struct scsi_device *sdev);
++extern int ata_scsi_slave_alloc(struct scsi_device *sdev);
+ extern int ata_scsi_slave_config(struct scsi_device *sdev);
+ extern void ata_scsi_slave_destroy(struct scsi_device *sdev);
+ extern int ata_scsi_change_queue_depth(struct scsi_device *sdev,
+@@ -1408,6 +1409,7 @@ extern const struct attribute_group *ata_common_sdev_groups[];
+ 	.this_id		= ATA_SHT_THIS_ID,		\
+ 	.emulated		= ATA_SHT_EMULATED,		\
+ 	.proc_name		= drv_name,			\
++	.slave_alloc		= ata_scsi_slave_alloc,		\
+ 	.slave_destroy		= ata_scsi_slave_destroy,	\
+ 	.bios_param		= ata_std_bios_param,		\
+ 	.unlock_native_capacity	= ata_scsi_unlock_native_capacity,\
+diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
+index dbf26bc89dd46..6d2a771debbad 100644
+--- a/include/linux/memcontrol.h
++++ b/include/linux/memcontrol.h
+@@ -919,7 +919,7 @@ unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec,
+ 	return READ_ONCE(mz->lru_zone_size[zone_idx][lru]);
+ }
+ 
+-void mem_cgroup_handle_over_high(void);
++void mem_cgroup_handle_over_high(gfp_t gfp_mask);
+ 
+ unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg);
+ 
+@@ -1460,7 +1460,7 @@ static inline void mem_cgroup_unlock_pages(void)
+ 	rcu_read_unlock();
+ }
+ 
+-static inline void mem_cgroup_handle_over_high(void)
++static inline void mem_cgroup_handle_over_high(gfp_t gfp_mask)
+ {
+ }
+ 
+diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
+index 20eeba8b009df..cd628c4b011e5 100644
+--- a/include/linux/nfs_fs_sb.h
++++ b/include/linux/nfs_fs_sb.h
+@@ -48,6 +48,7 @@ struct nfs_client {
+ #define NFS_CS_NOPING		6		/* - don't ping on connect */
+ #define NFS_CS_DS		7		/* - Server is a DS */
+ #define NFS_CS_REUSEPORT	8		/* - reuse src port on reconnect */
++#define NFS_CS_PNFS		9		/* - Server used for pnfs */
+ 	struct sockaddr_storage	cl_addr;	/* server identifier */
+ 	size_t			cl_addrlen;
+ 	char *			cl_hostname;	/* hostname of server */
+diff --git a/include/linux/nfs_page.h b/include/linux/nfs_page.h
+index aa9f4c6ebe261..1c315f854ea80 100644
+--- a/include/linux/nfs_page.h
++++ b/include/linux/nfs_page.h
+@@ -157,7 +157,9 @@ extern	void nfs_unlock_request(struct nfs_page *req);
+ extern	void nfs_unlock_and_release_request(struct nfs_page *);
+ extern	struct nfs_page *nfs_page_group_lock_head(struct nfs_page *req);
+ extern	int nfs_page_group_lock_subrequests(struct nfs_page *head);
+-extern	void nfs_join_page_group(struct nfs_page *head, struct inode *inode);
++extern void nfs_join_page_group(struct nfs_page *head,
++				struct nfs_commit_info *cinfo,
++				struct inode *inode);
+ extern int nfs_page_group_lock(struct nfs_page *);
+ extern void nfs_page_group_unlock(struct nfs_page *);
+ extern bool nfs_page_group_sync_on_bit(struct nfs_page *, unsigned int);
+diff --git a/include/linux/resume_user_mode.h b/include/linux/resume_user_mode.h
+index 2851894544496..f8f3e958e9cf2 100644
+--- a/include/linux/resume_user_mode.h
++++ b/include/linux/resume_user_mode.h
+@@ -55,7 +55,7 @@ static inline void resume_user_mode_work(struct pt_regs *regs)
+ 	}
+ #endif
+ 
+-	mem_cgroup_handle_over_high();
++	mem_cgroup_handle_over_high(GFP_KERNEL);
+ 	blkcg_maybe_throttle_current();
+ 
+ 	rseq_handle_notify_resume(NULL, regs);
+diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
+index 987a59d977c56..e9bd2f65d7f4e 100644
+--- a/include/linux/seqlock.h
++++ b/include/linux/seqlock.h
+@@ -512,8 +512,8 @@ do {									\
+ 
+ static inline void do_write_seqcount_begin_nested(seqcount_t *s, int subclass)
+ {
+-	do_raw_write_seqcount_begin(s);
+ 	seqcount_acquire(&s->dep_map, subclass, 0, _RET_IP_);
++	do_raw_write_seqcount_begin(s);
+ }
+ 
+ /**
+diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
+index dd40c75011d25..7c816359d5a98 100644
+--- a/include/net/netfilter/nf_tables.h
++++ b/include/net/netfilter/nf_tables.h
+@@ -1682,7 +1682,7 @@ struct nft_trans_gc {
+ 	struct net		*net;
+ 	struct nft_set		*set;
+ 	u32			seq;
+-	u8			count;
++	u16			count;
+ 	void			*priv[NFT_TRANS_GC_BATCHCOUNT];
+ 	struct rcu_head		rcu;
+ };
+@@ -1700,8 +1700,9 @@ void nft_trans_gc_queue_sync_done(struct nft_trans_gc *trans);
+ 
+ void nft_trans_gc_elem_add(struct nft_trans_gc *gc, void *priv);
+ 
+-struct nft_trans_gc *nft_trans_gc_catchall(struct nft_trans_gc *gc,
+-					   unsigned int gc_seq);
++struct nft_trans_gc *nft_trans_gc_catchall_async(struct nft_trans_gc *gc,
++						 unsigned int gc_seq);
++struct nft_trans_gc *nft_trans_gc_catchall_sync(struct nft_trans_gc *gc);
+ 
+ void nft_setelem_data_deactivate(const struct net *net,
+ 				 const struct nft_set *set,
+diff --git a/include/scsi/scsi.h b/include/scsi/scsi.h
+index ec093594ba53d..4498f845b1122 100644
+--- a/include/scsi/scsi.h
++++ b/include/scsi/scsi.h
+@@ -157,6 +157,9 @@ enum scsi_disposition {
+ #define SCSI_3          4        /* SPC */
+ #define SCSI_SPC_2      5
+ #define SCSI_SPC_3      6
++#define SCSI_SPC_4	7
++#define SCSI_SPC_5	8
++#define SCSI_SPC_6	14
+ 
+ /*
+  * INQ PERIPHERAL QUALIFIERS
+diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
+index b9230b6add041..fd41fdac0a8e6 100644
+--- a/include/scsi/scsi_device.h
++++ b/include/scsi/scsi_device.h
+@@ -161,6 +161,10 @@ struct scsi_device {
+ 				 * pass settings from slave_alloc to scsi
+ 				 * core. */
+ 	unsigned int eh_timeout; /* Error handling timeout */
++
++	bool manage_system_start_stop; /* Let HLD (sd) manage system start/stop */
++	bool manage_runtime_start_stop; /* Let HLD (sd) manage runtime start/stop */
++
+ 	unsigned removable:1;
+ 	unsigned changed:1;	/* Data invalid due to media change */
+ 	unsigned busy:1;	/* Used to prevent races */
+@@ -193,7 +197,6 @@ struct scsi_device {
+ 	unsigned use_192_bytes_for_3f:1; /* ask for 192 bytes from page 0x3f */
+ 	unsigned no_start_on_add:1;	/* do not issue start on add */
+ 	unsigned allow_restart:1; /* issue START_UNIT in error handler */
+-	unsigned manage_start_stop:1;	/* Let HLD (sd) manage start/stop */
+ 	unsigned no_start_on_resume:1; /* Do not issue START_STOP_UNIT on resume */
+ 	unsigned start_stop_pwr_cond:1;	/* Set power cond. in START_STOP_UNIT */
+ 	unsigned no_uld_attach:1; /* disable connecting to upper level drivers */
+diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
+index 60a9d59beeabb..25f668165b567 100644
+--- a/include/uapi/linux/bpf.h
++++ b/include/uapi/linux/bpf.h
+@@ -1897,7 +1897,9 @@ union bpf_attr {
+  * 		performed again, if the helper is used in combination with
+  * 		direct packet access.
+  * 	Return
+- * 		0 on success, or a negative error in case of failure.
++ * 		0 on success, or a negative error in case of failure. Positive
++ * 		error indicates a potential drop or congestion in the target
++ * 		device. The particular positive error codes are not defined.
+  *
+  * u64 bpf_get_current_pid_tgid(void)
+  * 	Description
+diff --git a/include/uapi/linux/stddef.h b/include/uapi/linux/stddef.h
+index 7837ba4fe7289..5c6c4269f7efe 100644
+--- a/include/uapi/linux/stddef.h
++++ b/include/uapi/linux/stddef.h
+@@ -29,6 +29,11 @@
+ 		struct TAG { MEMBERS } ATTRS NAME; \
+ 	}
+ 
++#ifdef __cplusplus
++/* sizeof(struct{}) is 1 in C++, not 0, can't use C version of the macro. */
++#define __DECLARE_FLEX_ARRAY(T, member)	\
++	T member[0]
++#else
+ /**
+  * __DECLARE_FLEX_ARRAY() - Declare a flexible array usable in a union
+  *
+@@ -45,3 +50,9 @@
+ 		TYPE NAME[]; \
+ 	}
+ #endif
++
++#ifndef __counted_by
++#define __counted_by(m)
++#endif
++
++#endif /* _UAPI_LINUX_STDDEF_H */
+diff --git a/io_uring/fs.c b/io_uring/fs.c
+index f6a69a549fd45..08e3b175469c6 100644
+--- a/io_uring/fs.c
++++ b/io_uring/fs.c
+@@ -243,7 +243,7 @@ int io_linkat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+ 	struct io_link *lnk = io_kiocb_to_cmd(req, struct io_link);
+ 	const char __user *oldf, *newf;
+ 
+-	if (sqe->rw_flags || sqe->buf_index || sqe->splice_fd_in)
++	if (sqe->buf_index || sqe->splice_fd_in)
+ 		return -EINVAL;
+ 	if (unlikely(req->flags & REQ_F_FIXED_FILE))
+ 		return -EBADF;
+diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c
+index 4b38c97990872..197d8252ffc65 100644
+--- a/kernel/bpf/btf.c
++++ b/kernel/bpf/btf.c
+@@ -8498,7 +8498,7 @@ bool btf_nested_type_is_trusted(struct bpf_verifier_log *log,
+ 	tname = btf_name_by_offset(btf, walk_type->name_off);
+ 
+ 	ret = snprintf(safe_tname, sizeof(safe_tname), "%s%s", tname, suffix);
+-	if (ret < 0)
++	if (ret >= sizeof(safe_tname))
+ 		return false;
+ 
+ 	safe_id = btf_find_by_name_kind(btf, safe_tname, BTF_INFO_KIND(walk_type->info));
+diff --git a/kernel/bpf/offload.c b/kernel/bpf/offload.c
+index 8a26cd8814c1b..e842229123ffc 100644
+--- a/kernel/bpf/offload.c
++++ b/kernel/bpf/offload.c
+@@ -198,12 +198,14 @@ static int __bpf_prog_dev_bound_init(struct bpf_prog *prog, struct net_device *n
+ 	offload->netdev = netdev;
+ 
+ 	ondev = bpf_offload_find_netdev(offload->netdev);
++	/* When program is offloaded require presence of "true"
++	 * bpf_offload_netdev, avoid the one created for !ondev case below.
++	 */
++	if (bpf_prog_is_offloaded(prog->aux) && (!ondev || !ondev->offdev)) {
++		err = -EINVAL;
++		goto err_free;
++	}
+ 	if (!ondev) {
+-		if (bpf_prog_is_offloaded(prog->aux)) {
+-			err = -EINVAL;
+-			goto err_free;
+-		}
+-
+ 		/* When only binding to the device, explicitly
+ 		 * create an entry in the hashtable.
+ 		 */
+diff --git a/kernel/bpf/queue_stack_maps.c b/kernel/bpf/queue_stack_maps.c
+index 8d2ddcb7566b7..d869f51ea93a0 100644
+--- a/kernel/bpf/queue_stack_maps.c
++++ b/kernel/bpf/queue_stack_maps.c
+@@ -98,7 +98,12 @@ static long __queue_map_get(struct bpf_map *map, void *value, bool delete)
+ 	int err = 0;
+ 	void *ptr;
+ 
+-	raw_spin_lock_irqsave(&qs->lock, flags);
++	if (in_nmi()) {
++		if (!raw_spin_trylock_irqsave(&qs->lock, flags))
++			return -EBUSY;
++	} else {
++		raw_spin_lock_irqsave(&qs->lock, flags);
++	}
+ 
+ 	if (queue_stack_map_is_empty(qs)) {
+ 		memset(value, 0, qs->map.value_size);
+@@ -128,7 +133,12 @@ static long __stack_map_get(struct bpf_map *map, void *value, bool delete)
+ 	void *ptr;
+ 	u32 index;
+ 
+-	raw_spin_lock_irqsave(&qs->lock, flags);
++	if (in_nmi()) {
++		if (!raw_spin_trylock_irqsave(&qs->lock, flags))
++			return -EBUSY;
++	} else {
++		raw_spin_lock_irqsave(&qs->lock, flags);
++	}
+ 
+ 	if (queue_stack_map_is_empty(qs)) {
+ 		memset(value, 0, qs->map.value_size);
+@@ -193,7 +203,12 @@ static long queue_stack_map_push_elem(struct bpf_map *map, void *value,
+ 	if (flags & BPF_NOEXIST || flags > BPF_EXIST)
+ 		return -EINVAL;
+ 
+-	raw_spin_lock_irqsave(&qs->lock, irq_flags);
++	if (in_nmi()) {
++		if (!raw_spin_trylock_irqsave(&qs->lock, irq_flags))
++			return -EBUSY;
++	} else {
++		raw_spin_lock_irqsave(&qs->lock, irq_flags);
++	}
+ 
+ 	if (queue_stack_map_is_full(qs)) {
+ 		if (!replace) {
+diff --git a/kernel/dma/debug.c b/kernel/dma/debug.c
+index f190651bcaddc..06366acd27b08 100644
+--- a/kernel/dma/debug.c
++++ b/kernel/dma/debug.c
+@@ -637,15 +637,19 @@ static struct dma_debug_entry *__dma_entry_alloc(void)
+ 	return entry;
+ }
+ 
+-static void __dma_entry_alloc_check_leak(void)
++/*
++ * This should be called outside of free_entries_lock scope to avoid potential
++ * deadlocks with serial consoles that use DMA.
++ */
++static void __dma_entry_alloc_check_leak(u32 nr_entries)
+ {
+-	u32 tmp = nr_total_entries % nr_prealloc_entries;
++	u32 tmp = nr_entries % nr_prealloc_entries;
+ 
+ 	/* Shout each time we tick over some multiple of the initial pool */
+ 	if (tmp < DMA_DEBUG_DYNAMIC_ENTRIES) {
+ 		pr_info("dma_debug_entry pool grown to %u (%u00%%)\n",
+-			nr_total_entries,
+-			(nr_total_entries / nr_prealloc_entries));
++			nr_entries,
++			(nr_entries / nr_prealloc_entries));
+ 	}
+ }
+ 
+@@ -656,8 +660,10 @@ static void __dma_entry_alloc_check_leak(void)
+  */
+ static struct dma_debug_entry *dma_entry_alloc(void)
+ {
++	bool alloc_check_leak = false;
+ 	struct dma_debug_entry *entry;
+ 	unsigned long flags;
++	u32 nr_entries;
+ 
+ 	spin_lock_irqsave(&free_entries_lock, flags);
+ 	if (num_free_entries == 0) {
+@@ -667,13 +673,17 @@ static struct dma_debug_entry *dma_entry_alloc(void)
+ 			pr_err("debugging out of memory - disabling\n");
+ 			return NULL;
+ 		}
+-		__dma_entry_alloc_check_leak();
++		alloc_check_leak = true;
++		nr_entries = nr_total_entries;
+ 	}
+ 
+ 	entry = __dma_entry_alloc();
+ 
+ 	spin_unlock_irqrestore(&free_entries_lock, flags);
+ 
++	if (alloc_check_leak)
++		__dma_entry_alloc_check_leak(nr_entries);
++
+ #ifdef CONFIG_STACKTRACE
+ 	entry->stack_len = stack_trace_save(entry->stack_entries,
+ 					    ARRAY_SIZE(entry->stack_entries),
+diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
+index 2b83e3ad9dca1..aa0a4a220719a 100644
+--- a/kernel/dma/swiotlb.c
++++ b/kernel/dma/swiotlb.c
+@@ -350,14 +350,14 @@ void __init swiotlb_init_remap(bool addressing_limit, unsigned int flags,
+ 	}
+ 
+ 	mem->areas = memblock_alloc(array_size(sizeof(struct io_tlb_area),
+-		default_nareas), SMP_CACHE_BYTES);
++		nareas), SMP_CACHE_BYTES);
+ 	if (!mem->areas) {
+ 		pr_warn("%s: Failed to allocate mem->areas.\n", __func__);
+ 		return;
+ 	}
+ 
+ 	swiotlb_init_io_tlb_mem(mem, __pa(tlb), nslabs, flags, false,
+-				default_nareas);
++				nareas);
+ 
+ 	if (flags & SWIOTLB_VERBOSE)
+ 		swiotlb_print_info();
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index c52c2eba7c739..e8f73ff12126c 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -9271,7 +9271,7 @@ void __init init_idle(struct task_struct *idle, int cpu)
+ 	 * PF_KTHREAD should already be set at this point; regardless, make it
+ 	 * look like a proper per-CPU kthread.
+ 	 */
+-	idle->flags |= PF_IDLE | PF_KTHREAD | PF_NO_SETAFFINITY;
++	idle->flags |= PF_KTHREAD | PF_NO_SETAFFINITY;
+ 	kthread_set_per_cpu(idle, cpu);
+ 
+ #ifdef CONFIG_SMP
+diff --git a/kernel/sched/cpupri.c b/kernel/sched/cpupri.c
+index a286e726eb4b8..42c40cfdf8363 100644
+--- a/kernel/sched/cpupri.c
++++ b/kernel/sched/cpupri.c
+@@ -101,6 +101,7 @@ static inline int __cpupri_find(struct cpupri *cp, struct task_struct *p,
+ 
+ 	if (lowest_mask) {
+ 		cpumask_and(lowest_mask, &p->cpus_mask, vec->mask);
++		cpumask_and(lowest_mask, lowest_mask, cpu_active_mask);
+ 
+ 		/*
+ 		 * We have to ensure that we have at least one bit
+diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c
+index 342f58a329f52..5007b25c5bc65 100644
+--- a/kernel/sched/idle.c
++++ b/kernel/sched/idle.c
+@@ -373,6 +373,7 @@ EXPORT_SYMBOL_GPL(play_idle_precise);
+ 
+ void cpu_startup_entry(enum cpuhp_state state)
+ {
++	current->flags |= PF_IDLE;
+ 	arch_cpu_idle_prepare();
+ 	cpuhp_online_idle(state);
+ 	while (1)
+diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
+index abf287b2678a1..bb0d8b9c09e7c 100644
+--- a/kernel/trace/bpf_trace.c
++++ b/kernel/trace/bpf_trace.c
+@@ -2772,6 +2772,17 @@ static int get_modules_for_addrs(struct module ***mods, unsigned long *addrs, u3
+ 	return arr.mods_cnt;
+ }
+ 
++static int addrs_check_error_injection_list(unsigned long *addrs, u32 cnt)
++{
++	u32 i;
++
++	for (i = 0; i < cnt; i++) {
++		if (!within_error_injection_list(addrs[i]))
++			return -EINVAL;
++	}
++	return 0;
++}
++
+ int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
+ {
+ 	struct bpf_kprobe_multi_link *link = NULL;
+@@ -2849,6 +2860,11 @@ int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *pr
+ 			goto error;
+ 	}
+ 
++	if (prog->kprobe_override && addrs_check_error_injection_list(addrs, cnt)) {
++		err = -EINVAL;
++		goto error;
++	}
++
+ 	link = kzalloc(sizeof(*link), GFP_KERNEL);
+ 	if (!link) {
+ 		err = -ENOMEM;
+diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
+index 52dea5dd5362e..da665764dd4d1 100644
+--- a/kernel/trace/ring_buffer.c
++++ b/kernel/trace/ring_buffer.c
+@@ -354,6 +354,11 @@ static void rb_init_page(struct buffer_data_page *bpage)
+ 	local_set(&bpage->commit, 0);
+ }
+ 
++static __always_inline unsigned int rb_page_commit(struct buffer_page *bpage)
++{
++	return local_read(&bpage->page->commit);
++}
++
+ static void free_buffer_page(struct buffer_page *bpage)
+ {
+ 	free_page((unsigned long)bpage->page);
+@@ -1137,6 +1142,9 @@ __poll_t ring_buffer_poll_wait(struct trace_buffer *buffer, int cpu,
+ 	if (full) {
+ 		poll_wait(filp, &work->full_waiters, poll_table);
+ 		work->full_waiters_pending = true;
++		if (!cpu_buffer->shortest_full ||
++		    cpu_buffer->shortest_full > full)
++			cpu_buffer->shortest_full = full;
+ 	} else {
+ 		poll_wait(filp, &work->waiters, poll_table);
+ 		work->waiters_pending = true;
+@@ -2011,7 +2019,7 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned long nr_pages)
+ 			 * Increment overrun to account for the lost events.
+ 			 */
+ 			local_add(page_entries, &cpu_buffer->overrun);
+-			local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
++			local_sub(rb_page_commit(to_remove_page), &cpu_buffer->entries_bytes);
+ 			local_inc(&cpu_buffer->pages_lost);
+ 		}
+ 
+@@ -2206,6 +2214,8 @@ int ring_buffer_resize(struct trace_buffer *buffer, unsigned long size,
+ 				err = -ENOMEM;
+ 				goto out_err;
+ 			}
++
++			cond_resched();
+ 		}
+ 
+ 		cpus_read_lock();
+@@ -2373,11 +2383,6 @@ rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer)
+ 			       cpu_buffer->reader_page->read);
+ }
+ 
+-static __always_inline unsigned rb_page_commit(struct buffer_page *bpage)
+-{
+-	return local_read(&bpage->page->commit);
+-}
+-
+ static struct ring_buffer_event *
+ rb_iter_head_event(struct ring_buffer_iter *iter)
+ {
+@@ -2396,6 +2401,11 @@ rb_iter_head_event(struct ring_buffer_iter *iter)
+ 	 */
+ 	commit = rb_page_commit(iter_head_page);
+ 	smp_rmb();
++
++	/* An event needs to be at least 8 bytes in size */
++	if (iter->head > commit - 8)
++		goto reset;
++
+ 	event = __rb_page_index(iter_head_page, iter->head);
+ 	length = rb_event_length(event);
+ 
+@@ -2518,7 +2528,7 @@ rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer,
+ 		 * the counters.
+ 		 */
+ 		local_add(entries, &cpu_buffer->overrun);
+-		local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
++		local_sub(rb_page_commit(next_page), &cpu_buffer->entries_bytes);
+ 		local_inc(&cpu_buffer->pages_lost);
+ 
+ 		/*
+@@ -2661,9 +2671,6 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
+ 
+ 	event = __rb_page_index(tail_page, tail);
+ 
+-	/* account for padding bytes */
+-	local_add(BUF_PAGE_SIZE - tail, &cpu_buffer->entries_bytes);
+-
+ 	/*
+ 	 * Save the original length to the meta data.
+ 	 * This will be used by the reader to add lost event
+@@ -2677,7 +2684,8 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
+ 	 * write counter enough to allow another writer to slip
+ 	 * in on this page.
+ 	 * We put in a discarded commit instead, to make sure
+-	 * that this space is not used again.
++	 * that this space is not used again, and this space will
++	 * not be accounted into 'entries_bytes'.
+ 	 *
+ 	 * If we are less than the minimum size, we don't need to
+ 	 * worry about it.
+@@ -2702,6 +2710,9 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
+ 	/* time delta must be non zero */
+ 	event->time_delta = 1;
+ 
++	/* account for padding bytes */
++	local_add(BUF_PAGE_SIZE - tail, &cpu_buffer->entries_bytes);
++
+ 	/* Make sure the padding is visible before the tail_page->write update */
+ 	smp_wmb();
+ 
+@@ -4216,7 +4227,7 @@ u64 ring_buffer_oldest_event_ts(struct trace_buffer *buffer, int cpu)
+ EXPORT_SYMBOL_GPL(ring_buffer_oldest_event_ts);
+ 
+ /**
+- * ring_buffer_bytes_cpu - get the number of bytes consumed in a cpu buffer
++ * ring_buffer_bytes_cpu - get the number of bytes unconsumed in a cpu buffer
+  * @buffer: The ring buffer
+  * @cpu: The per CPU buffer to read from.
+  */
+@@ -4724,6 +4735,7 @@ static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer)
+ 
+ 	length = rb_event_length(event);
+ 	cpu_buffer->reader_page->read += length;
++	cpu_buffer->read_bytes += length;
+ }
+ 
+ static void rb_advance_iter(struct ring_buffer_iter *iter)
+@@ -5817,7 +5829,7 @@ int ring_buffer_read_page(struct trace_buffer *buffer,
+ 	} else {
+ 		/* update the entry counter */
+ 		cpu_buffer->read += rb_page_entries(reader);
+-		cpu_buffer->read_bytes += BUF_PAGE_SIZE;
++		cpu_buffer->read_bytes += rb_page_commit(reader);
+ 
+ 		/* swap the pages */
+ 		rb_init_page(bpage);
+diff --git a/kernel/trace/trace_events_user.c b/kernel/trace/trace_events_user.c
+index 33cb6af31f395..d4755d4f744cc 100644
+--- a/kernel/trace/trace_events_user.c
++++ b/kernel/trace/trace_events_user.c
+@@ -127,8 +127,13 @@ struct user_event_enabler {
+ /* Bit 7 is for freeing status of enablement */
+ #define ENABLE_VAL_FREEING_BIT 7
+ 
+-/* Only duplicate the bit value */
+-#define ENABLE_VAL_DUP_MASK ENABLE_VAL_BIT_MASK
++/* Bit 8 is for marking 32-bit on 64-bit */
++#define ENABLE_VAL_32_ON_64_BIT 8
++
++#define ENABLE_VAL_COMPAT_MASK (1 << ENABLE_VAL_32_ON_64_BIT)
++
++/* Only duplicate the bit and compat values */
++#define ENABLE_VAL_DUP_MASK (ENABLE_VAL_BIT_MASK | ENABLE_VAL_COMPAT_MASK)
+ 
+ #define ENABLE_BITOPS(e) (&(e)->values)
+ 
+@@ -174,6 +179,30 @@ struct user_event_validator {
+ 	int			flags;
+ };
+ 
++static inline void align_addr_bit(unsigned long *addr, int *bit,
++				  unsigned long *flags)
++{
++	if (IS_ALIGNED(*addr, sizeof(long))) {
++#ifdef __BIG_ENDIAN
++		/* 32 bit on BE 64 bit requires a 32 bit offset when aligned. */
++		if (test_bit(ENABLE_VAL_32_ON_64_BIT, flags))
++			*bit += 32;
++#endif
++		return;
++	}
++
++	*addr = ALIGN_DOWN(*addr, sizeof(long));
++
++	/*
++	 * We only support 32 and 64 bit values. The only time we need
++	 * to align is a 32 bit value on a 64 bit kernel, which on LE
++	 * is always 32 bits, and on BE requires no change when unaligned.
++	 */
++#ifdef __LITTLE_ENDIAN
++	*bit += 32;
++#endif
++}
++
+ typedef void (*user_event_func_t) (struct user_event *user, struct iov_iter *i,
+ 				   void *tpdata, bool *faulted);
+ 
+@@ -482,6 +511,7 @@ static int user_event_enabler_write(struct user_event_mm *mm,
+ 	unsigned long *ptr;
+ 	struct page *page;
+ 	void *kaddr;
++	int bit = ENABLE_BIT(enabler);
+ 	int ret;
+ 
+ 	lockdep_assert_held(&event_mutex);
+@@ -497,6 +527,8 @@ static int user_event_enabler_write(struct user_event_mm *mm,
+ 		     test_bit(ENABLE_VAL_FREEING_BIT, ENABLE_BITOPS(enabler))))
+ 		return -EBUSY;
+ 
++	align_addr_bit(&uaddr, &bit, ENABLE_BITOPS(enabler));
++
+ 	ret = pin_user_pages_remote(mm->mm, uaddr, 1, FOLL_WRITE | FOLL_NOFAULT,
+ 				    &page, NULL);
+ 
+@@ -515,9 +547,9 @@ static int user_event_enabler_write(struct user_event_mm *mm,
+ 
+ 	/* Update bit atomically, user tracers must be atomic as well */
+ 	if (enabler->event && enabler->event->status)
+-		set_bit(ENABLE_BIT(enabler), ptr);
++		set_bit(bit, ptr);
+ 	else
+-		clear_bit(ENABLE_BIT(enabler), ptr);
++		clear_bit(bit, ptr);
+ 
+ 	kunmap_local(kaddr);
+ 	unpin_user_pages_dirty_lock(&page, 1, true);
+@@ -849,6 +881,12 @@ static struct user_event_enabler
+ 	enabler->event = user;
+ 	enabler->addr = uaddr;
+ 	enabler->values = reg->enable_bit;
++
++#if BITS_PER_LONG >= 64
++	if (reg->enable_size == 4)
++		set_bit(ENABLE_VAL_32_ON_64_BIT, ENABLE_BITOPS(enabler));
++#endif
++
+ retry:
+ 	/* Prevents state changes from racing with new enablers */
+ 	mutex_lock(&event_mutex);
+@@ -2376,7 +2414,8 @@ static long user_unreg_get(struct user_unreg __user *ureg,
+ }
+ 
+ static int user_event_mm_clear_bit(struct user_event_mm *user_mm,
+-				   unsigned long uaddr, unsigned char bit)
++				   unsigned long uaddr, unsigned char bit,
++				   unsigned long flags)
+ {
+ 	struct user_event_enabler enabler;
+ 	int result;
+@@ -2384,7 +2423,7 @@ static int user_event_mm_clear_bit(struct user_event_mm *user_mm,
+ 
+ 	memset(&enabler, 0, sizeof(enabler));
+ 	enabler.addr = uaddr;
+-	enabler.values = bit;
++	enabler.values = bit | flags;
+ retry:
+ 	/* Prevents state changes from racing with new enablers */
+ 	mutex_lock(&event_mutex);
+@@ -2414,6 +2453,7 @@ static long user_events_ioctl_unreg(unsigned long uarg)
+ 	struct user_event_mm *mm = current->user_event_mm;
+ 	struct user_event_enabler *enabler, *next;
+ 	struct user_unreg reg;
++	unsigned long flags;
+ 	long ret;
+ 
+ 	ret = user_unreg_get(ureg, &reg);
+@@ -2424,6 +2464,7 @@ static long user_events_ioctl_unreg(unsigned long uarg)
+ 	if (!mm)
+ 		return -ENOENT;
+ 
++	flags = 0;
+ 	ret = -ENOENT;
+ 
+ 	/*
+@@ -2440,6 +2481,9 @@ static long user_events_ioctl_unreg(unsigned long uarg)
+ 		    ENABLE_BIT(enabler) == reg.disable_bit) {
+ 			set_bit(ENABLE_VAL_FREEING_BIT, ENABLE_BITOPS(enabler));
+ 
++			/* We must keep compat flags for the clear */
++			flags |= enabler->values & ENABLE_VAL_COMPAT_MASK;
++
+ 			if (!test_bit(ENABLE_VAL_FAULTING_BIT, ENABLE_BITOPS(enabler)))
+ 				user_event_enabler_destroy(enabler, true);
+ 
+@@ -2453,7 +2497,7 @@ static long user_events_ioctl_unreg(unsigned long uarg)
+ 	/* Ensure bit is now cleared for user, regardless of event status */
+ 	if (!ret)
+ 		ret = user_event_mm_clear_bit(mm, reg.disable_addr,
+-					      reg.disable_bit);
++					      reg.disable_bit, flags);
+ 
+ 	return ret;
+ }
+diff --git a/mm/damon/vaddr-test.h b/mm/damon/vaddr-test.h
+index c4b455b5ee30b..dcf1ca6b31cc4 100644
+--- a/mm/damon/vaddr-test.h
++++ b/mm/damon/vaddr-test.h
+@@ -148,6 +148,8 @@ static void damon_do_test_apply_three_regions(struct kunit *test,
+ 		KUNIT_EXPECT_EQ(test, r->ar.start, expected[i * 2]);
+ 		KUNIT_EXPECT_EQ(test, r->ar.end, expected[i * 2 + 1]);
+ 	}
++
++	damon_destroy_target(t);
+ }
+ 
+ /*
+diff --git a/mm/memcontrol.c b/mm/memcontrol.c
+index 4fe5a562d0bbc..339dd2ccc9333 100644
+--- a/mm/memcontrol.c
++++ b/mm/memcontrol.c
+@@ -2559,7 +2559,7 @@ static unsigned long calculate_high_delay(struct mem_cgroup *memcg,
+  * Scheduled by try_charge() to be executed from the userland return path
+  * and reclaims memory over the high limit.
+  */
+-void mem_cgroup_handle_over_high(void)
++void mem_cgroup_handle_over_high(gfp_t gfp_mask)
+ {
+ 	unsigned long penalty_jiffies;
+ 	unsigned long pflags;
+@@ -2587,7 +2587,7 @@ retry_reclaim:
+ 	 */
+ 	nr_reclaimed = reclaim_high(memcg,
+ 				    in_retry ? SWAP_CLUSTER_MAX : nr_pages,
+-				    GFP_KERNEL);
++				    gfp_mask);
+ 
+ 	/*
+ 	 * memory.high is breached and reclaim is unable to keep up. Throttle
+@@ -2823,7 +2823,7 @@ done_restock:
+ 	if (current->memcg_nr_pages_over_high > MEMCG_CHARGE_BATCH &&
+ 	    !(current->flags & PF_MEMALLOC) &&
+ 	    gfpflags_allow_blocking(gfp_mask)) {
+-		mem_cgroup_handle_over_high();
++		mem_cgroup_handle_over_high(gfp_mask);
+ 	}
+ 	return 0;
+ }
+@@ -3872,8 +3872,11 @@ static ssize_t mem_cgroup_write(struct kernfs_open_file *of,
+ 			ret = mem_cgroup_resize_max(memcg, nr_pages, true);
+ 			break;
+ 		case _KMEM:
+-			/* kmem.limit_in_bytes is deprecated. */
+-			ret = -EOPNOTSUPP;
++			pr_warn_once("kmem.limit_in_bytes is deprecated and will be removed. "
++				     "Writing any value to this file has no effect. "
++				     "Please report your usecase to linux-mm@kvack.org if you "
++				     "depend on this functionality.\n");
++			ret = 0;
+ 			break;
+ 		case _TCP:
+ 			ret = memcg_update_tcp_max(memcg, nr_pages);
+diff --git a/mm/mempolicy.c b/mm/mempolicy.c
+index ec2eaceffd74b..071edec3dca2a 100644
+--- a/mm/mempolicy.c
++++ b/mm/mempolicy.c
+@@ -426,6 +426,7 @@ struct queue_pages {
+ 	unsigned long start;
+ 	unsigned long end;
+ 	struct vm_area_struct *first;
++	bool has_unmovable;
+ };
+ 
+ /*
+@@ -446,9 +447,8 @@ static inline bool queue_folio_required(struct folio *folio,
+ /*
+  * queue_folios_pmd() has three possible return values:
+  * 0 - folios are placed on the right node or queued successfully, or
+- *     special page is met, i.e. huge zero page.
+- * 1 - there is unmovable folio, and MPOL_MF_MOVE* & MPOL_MF_STRICT were
+- *     specified.
++ *     special page is met, i.e. zero page, or unmovable page is found
++ *     but continue walking (indicated by queue_pages.has_unmovable).
+  * -EIO - is migration entry or only MPOL_MF_STRICT was specified and an
+  *        existing folio was already on a node that does not follow the
+  *        policy.
+@@ -479,7 +479,7 @@ static int queue_folios_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr,
+ 	if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
+ 		if (!vma_migratable(walk->vma) ||
+ 		    migrate_folio_add(folio, qp->pagelist, flags)) {
+-			ret = 1;
++			qp->has_unmovable = true;
+ 			goto unlock;
+ 		}
+ 	} else
+@@ -495,9 +495,8 @@ unlock:
+  *
+  * queue_folios_pte_range() has three possible return values:
+  * 0 - folios are placed on the right node or queued successfully, or
+- *     special page is met, i.e. zero page.
+- * 1 - there is unmovable folio, and MPOL_MF_MOVE* & MPOL_MF_STRICT were
+- *     specified.
++ *     special page is met, i.e. zero page, or unmovable page is found
++ *     but continue walking (indicated by queue_pages.has_unmovable).
+  * -EIO - only MPOL_MF_STRICT was specified and an existing folio was already
+  *        on a node that does not follow the policy.
+  */
+@@ -508,7 +507,6 @@ static int queue_folios_pte_range(pmd_t *pmd, unsigned long addr,
+ 	struct folio *folio;
+ 	struct queue_pages *qp = walk->private;
+ 	unsigned long flags = qp->flags;
+-	bool has_unmovable = false;
+ 	pte_t *pte, *mapped_pte;
+ 	pte_t ptent;
+ 	spinlock_t *ptl;
+@@ -538,11 +536,12 @@ static int queue_folios_pte_range(pmd_t *pmd, unsigned long addr,
+ 		if (!queue_folio_required(folio, qp))
+ 			continue;
+ 		if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
+-			/* MPOL_MF_STRICT must be specified if we get here */
+-			if (!vma_migratable(vma)) {
+-				has_unmovable = true;
+-				break;
+-			}
++			/*
++			 * MPOL_MF_STRICT must be specified if we get here.
++			 * Continue walking vmas due to MPOL_MF_MOVE* flags.
++			 */
++			if (!vma_migratable(vma))
++				qp->has_unmovable = true;
+ 
+ 			/*
+ 			 * Do not abort immediately since there may be
+@@ -550,16 +549,13 @@ static int queue_folios_pte_range(pmd_t *pmd, unsigned long addr,
+ 			 * need migrate other LRU pages.
+ 			 */
+ 			if (migrate_folio_add(folio, qp->pagelist, flags))
+-				has_unmovable = true;
++				qp->has_unmovable = true;
+ 		} else
+ 			break;
+ 	}
+ 	pte_unmap_unlock(mapped_pte, ptl);
+ 	cond_resched();
+ 
+-	if (has_unmovable)
+-		return 1;
+-
+ 	return addr != end ? -EIO : 0;
+ }
+ 
+@@ -599,7 +595,7 @@ static int queue_folios_hugetlb(pte_t *pte, unsigned long hmask,
+ 		 * Detecting misplaced folio but allow migrating folios which
+ 		 * have been queued.
+ 		 */
+-		ret = 1;
++		qp->has_unmovable = true;
+ 		goto unlock;
+ 	}
+ 
+@@ -620,7 +616,7 @@ static int queue_folios_hugetlb(pte_t *pte, unsigned long hmask,
+ 			 * Failed to isolate folio but allow migrating pages
+ 			 * which have been queued.
+ 			 */
+-			ret = 1;
++			qp->has_unmovable = true;
+ 	}
+ unlock:
+ 	spin_unlock(ptl);
+@@ -756,12 +752,15 @@ queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
+ 		.start = start,
+ 		.end = end,
+ 		.first = NULL,
++		.has_unmovable = false,
+ 	};
+ 	const struct mm_walk_ops *ops = lock_vma ?
+ 			&queue_pages_lock_vma_walk_ops : &queue_pages_walk_ops;
+ 
+ 	err = walk_page_range(mm, start, end, ops, &qp);
+ 
++	if (qp.has_unmovable)
++		err = 1;
+ 	if (!qp.first)
+ 		/* whole range in hole */
+ 		err = -EFAULT;
+@@ -1358,7 +1357,7 @@ static long do_mbind(unsigned long start, unsigned long len,
+ 				putback_movable_pages(&pagelist);
+ 		}
+ 
+-		if ((ret > 0) || (nr_failed && (flags & MPOL_MF_STRICT)))
++		if (((ret > 0) || nr_failed) && (flags & MPOL_MF_STRICT))
+ 			err = -EIO;
+ 	} else {
+ up_out:
+diff --git a/mm/page_alloc.c b/mm/page_alloc.c
+index 7d3460c7a480b..d322bfae8f69b 100644
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -2438,7 +2438,7 @@ void free_unref_page(struct page *page, unsigned int order)
+ 	struct per_cpu_pages *pcp;
+ 	struct zone *zone;
+ 	unsigned long pfn = page_to_pfn(page);
+-	int migratetype;
++	int migratetype, pcpmigratetype;
+ 
+ 	if (!free_unref_page_prepare(page, pfn, order))
+ 		return;
+@@ -2446,24 +2446,24 @@ void free_unref_page(struct page *page, unsigned int order)
+ 	/*
+ 	 * We only track unmovable, reclaimable and movable on pcp lists.
+ 	 * Place ISOLATE pages on the isolated list because they are being
+-	 * offlined but treat HIGHATOMIC as movable pages so we can get those
+-	 * areas back if necessary. Otherwise, we may have to free
++	 * offlined but treat HIGHATOMIC and CMA as movable pages so we can
++	 * get those areas back if necessary. Otherwise, we may have to free
+ 	 * excessively into the page allocator
+ 	 */
+-	migratetype = get_pcppage_migratetype(page);
++	migratetype = pcpmigratetype = get_pcppage_migratetype(page);
+ 	if (unlikely(migratetype >= MIGRATE_PCPTYPES)) {
+ 		if (unlikely(is_migrate_isolate(migratetype))) {
+ 			free_one_page(page_zone(page), page, pfn, order, migratetype, FPI_NONE);
+ 			return;
+ 		}
+-		migratetype = MIGRATE_MOVABLE;
++		pcpmigratetype = MIGRATE_MOVABLE;
+ 	}
+ 
+ 	zone = page_zone(page);
+ 	pcp_trylock_prepare(UP_flags);
+ 	pcp = pcp_spin_trylock(zone->per_cpu_pageset);
+ 	if (pcp) {
+-		free_unref_page_commit(zone, pcp, page, migratetype, order);
++		free_unref_page_commit(zone, pcp, page, pcpmigratetype, order);
+ 		pcp_spin_unlock(pcp);
+ 	} else {
+ 		free_one_page(zone, page, pfn, order, migratetype, FPI_NONE);
+diff --git a/mm/slab_common.c b/mm/slab_common.c
+index d1555ea2981ac..5658da50a2d07 100644
+--- a/mm/slab_common.c
++++ b/mm/slab_common.c
+@@ -479,7 +479,7 @@ void slab_kmem_cache_release(struct kmem_cache *s)
+ 
+ void kmem_cache_destroy(struct kmem_cache *s)
+ {
+-	int refcnt;
++	int err = -EBUSY;
+ 	bool rcu_set;
+ 
+ 	if (unlikely(!s) || !kasan_check_byte(s))
+@@ -490,17 +490,17 @@ void kmem_cache_destroy(struct kmem_cache *s)
+ 
+ 	rcu_set = s->flags & SLAB_TYPESAFE_BY_RCU;
+ 
+-	refcnt = --s->refcount;
+-	if (refcnt)
++	s->refcount--;
++	if (s->refcount)
+ 		goto out_unlock;
+ 
+-	WARN(shutdown_cache(s),
+-	     "%s %s: Slab cache still has objects when called from %pS",
++	err = shutdown_cache(s);
++	WARN(err, "%s %s: Slab cache still has objects when called from %pS",
+ 	     __func__, s->name, (void *)_RET_IP_);
+ out_unlock:
+ 	mutex_unlock(&slab_mutex);
+ 	cpus_read_unlock();
+-	if (!refcnt && !rcu_set)
++	if (!err && !rcu_set)
+ 		kmem_cache_release(s);
+ }
+ EXPORT_SYMBOL(kmem_cache_destroy);
+diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c
+index 6116eba1bd891..bb1ab53e54e03 100644
+--- a/net/bridge/br_forward.c
++++ b/net/bridge/br_forward.c
+@@ -124,7 +124,7 @@ static int deliver_clone(const struct net_bridge_port *prev,
+ 
+ 	skb = skb_clone(skb, GFP_ATOMIC);
+ 	if (!skb) {
+-		dev->stats.tx_dropped++;
++		DEV_STATS_INC(dev, tx_dropped);
+ 		return -ENOMEM;
+ 	}
+ 
+@@ -267,7 +267,7 @@ static void maybe_deliver_addr(struct net_bridge_port *p, struct sk_buff *skb,
+ 
+ 	skb = skb_copy(skb, GFP_ATOMIC);
+ 	if (!skb) {
+-		dev->stats.tx_dropped++;
++		DEV_STATS_INC(dev, tx_dropped);
+ 		return;
+ 	}
+ 
+diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
+index c34a0b0901b07..c729528b5e85f 100644
+--- a/net/bridge/br_input.c
++++ b/net/bridge/br_input.c
+@@ -181,12 +181,12 @@ int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb
+ 			if ((mdst && mdst->host_joined) ||
+ 			    br_multicast_is_router(brmctx, skb)) {
+ 				local_rcv = true;
+-				br->dev->stats.multicast++;
++				DEV_STATS_INC(br->dev, multicast);
+ 			}
+ 			mcast_hit = true;
+ 		} else {
+ 			local_rcv = true;
+-			br->dev->stats.multicast++;
++			DEV_STATS_INC(br->dev, multicast);
+ 		}
+ 		break;
+ 	case BR_PKT_UNICAST:
+diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
+index 6bed3992df814..aac954d1f757d 100644
+--- a/net/core/flow_dissector.c
++++ b/net/core/flow_dissector.c
+@@ -1402,7 +1402,7 @@ proto_again:
+ 			break;
+ 		}
+ 
+-		nhoff += ntohs(hdr->message_length);
++		nhoff += sizeof(struct ptp_header);
+ 		fdret = FLOW_DISSECT_RET_OUT_GOOD;
+ 		break;
+ 	}
+diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
+index a5361fb7a415b..fa14eef8f0688 100644
+--- a/net/dccp/ipv4.c
++++ b/net/dccp/ipv4.c
+@@ -255,13 +255,8 @@ static int dccp_v4_err(struct sk_buff *skb, u32 info)
+ 	int err;
+ 	struct net *net = dev_net(skb->dev);
+ 
+-	/* For the first __dccp_basic_hdr_len() check, we only need dh->dccph_x,
+-	 * which is in byte 7 of the dccp header.
+-	 * Our caller (icmp_socket_deliver()) already pulled 8 bytes for us.
+-	 *
+-	 * Later on, we want to access the sequence number fields, which are
+-	 * beyond 8 bytes, so we have to pskb_may_pull() ourselves.
+-	 */
++	if (!pskb_may_pull(skb, offset + sizeof(*dh)))
++		return -EINVAL;
+ 	dh = (struct dccp_hdr *)(skb->data + offset);
+ 	if (!pskb_may_pull(skb, offset + __dccp_basic_hdr_len(dh)))
+ 		return -EINVAL;
+diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
+index 33f6ccf6ba77b..c693a570682fb 100644
+--- a/net/dccp/ipv6.c
++++ b/net/dccp/ipv6.c
+@@ -83,13 +83,8 @@ static int dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
+ 	__u64 seq;
+ 	struct net *net = dev_net(skb->dev);
+ 
+-	/* For the first __dccp_basic_hdr_len() check, we only need dh->dccph_x,
+-	 * which is in byte 7 of the dccp header.
+-	 * Our caller (icmpv6_notify()) already pulled 8 bytes for us.
+-	 *
+-	 * Later on, we want to access the sequence number fields, which are
+-	 * beyond 8 bytes, so we have to pskb_may_pull() ourselves.
+-	 */
++	if (!pskb_may_pull(skb, offset + sizeof(*dh)))
++		return -EINVAL;
+ 	dh = (struct dccp_hdr *)(skb->data + offset);
+ 	if (!pskb_may_pull(skb, offset + __dccp_basic_hdr_len(dh)))
+ 		return -EINVAL;
+diff --git a/net/handshake/handshake-test.c b/net/handshake/handshake-test.c
+index 6d37bab35c8fc..16ed7bfd29e4f 100644
+--- a/net/handshake/handshake-test.c
++++ b/net/handshake/handshake-test.c
+@@ -235,7 +235,7 @@ static void handshake_req_submit_test4(struct kunit *test)
+ 	KUNIT_EXPECT_PTR_EQ(test, req, result);
+ 
+ 	handshake_req_cancel(sock->sk);
+-	sock_release(sock);
++	fput(filp);
+ }
+ 
+ static void handshake_req_submit_test5(struct kunit *test)
+@@ -272,7 +272,7 @@ static void handshake_req_submit_test5(struct kunit *test)
+ 	/* Assert */
+ 	KUNIT_EXPECT_EQ(test, err, -EAGAIN);
+ 
+-	sock_release(sock);
++	fput(filp);
+ 	hn->hn_pending = saved;
+ }
+ 
+@@ -306,7 +306,7 @@ static void handshake_req_submit_test6(struct kunit *test)
+ 	KUNIT_EXPECT_EQ(test, err, -EBUSY);
+ 
+ 	handshake_req_cancel(sock->sk);
+-	sock_release(sock);
++	fput(filp);
+ }
+ 
+ static void handshake_req_cancel_test1(struct kunit *test)
+@@ -340,7 +340,7 @@ static void handshake_req_cancel_test1(struct kunit *test)
+ 	/* Assert */
+ 	KUNIT_EXPECT_TRUE(test, result);
+ 
+-	sock_release(sock);
++	fput(filp);
+ }
+ 
+ static void handshake_req_cancel_test2(struct kunit *test)
+@@ -382,7 +382,7 @@ static void handshake_req_cancel_test2(struct kunit *test)
+ 	/* Assert */
+ 	KUNIT_EXPECT_TRUE(test, result);
+ 
+-	sock_release(sock);
++	fput(filp);
+ }
+ 
+ static void handshake_req_cancel_test3(struct kunit *test)
+@@ -427,7 +427,7 @@ static void handshake_req_cancel_test3(struct kunit *test)
+ 	/* Assert */
+ 	KUNIT_EXPECT_FALSE(test, result);
+ 
+-	sock_release(sock);
++	fput(filp);
+ }
+ 
+ static struct handshake_req *handshake_req_destroy_test;
+@@ -471,7 +471,7 @@ static void handshake_req_destroy_test1(struct kunit *test)
+ 	handshake_req_cancel(sock->sk);
+ 
+ 	/* Act */
+-	sock_release(sock);
++	fput(filp);
+ 
+ 	/* Assert */
+ 	KUNIT_EXPECT_PTR_EQ(test, handshake_req_destroy_test, req);
+diff --git a/net/hsr/hsr_framereg.c b/net/hsr/hsr_framereg.c
+index b77f1189d19d1..6d14d935ee828 100644
+--- a/net/hsr/hsr_framereg.c
++++ b/net/hsr/hsr_framereg.c
+@@ -288,13 +288,13 @@ void hsr_handle_sup_frame(struct hsr_frame_info *frame)
+ 
+ 	/* And leave the HSR tag. */
+ 	if (ethhdr->h_proto == htons(ETH_P_HSR)) {
+-		pull_size = sizeof(struct ethhdr);
++		pull_size = sizeof(struct hsr_tag);
+ 		skb_pull(skb, pull_size);
+ 		total_pull_size += pull_size;
+ 	}
+ 
+ 	/* And leave the HSR sup tag. */
+-	pull_size = sizeof(struct hsr_tag);
++	pull_size = sizeof(struct hsr_sup_tag);
+ 	skb_pull(skb, pull_size);
+ 	total_pull_size += pull_size;
+ 
+diff --git a/net/hsr/hsr_main.h b/net/hsr/hsr_main.h
+index 6851e33df7d14..18e01791ad799 100644
+--- a/net/hsr/hsr_main.h
++++ b/net/hsr/hsr_main.h
+@@ -83,7 +83,7 @@ struct hsr_vlan_ethhdr {
+ struct hsr_sup_tlv {
+ 	u8		HSR_TLV_type;
+ 	u8		HSR_TLV_length;
+-};
++} __packed;
+ 
+ /* HSR/PRP Supervision Frame data types.
+  * Field names as defined in the IEC:2010 standard for HSR.
+diff --git a/net/ipv4/route.c b/net/ipv4/route.c
+index 33626619aee79..0a53ca6ebb0d5 100644
+--- a/net/ipv4/route.c
++++ b/net/ipv4/route.c
+@@ -1213,6 +1213,7 @@ EXPORT_INDIRECT_CALLABLE(ipv4_dst_check);
+ 
+ static void ipv4_send_dest_unreach(struct sk_buff *skb)
+ {
++	struct net_device *dev;
+ 	struct ip_options opt;
+ 	int res;
+ 
+@@ -1230,7 +1231,8 @@ static void ipv4_send_dest_unreach(struct sk_buff *skb)
+ 		opt.optlen = ip_hdr(skb)->ihl * 4 - sizeof(struct iphdr);
+ 
+ 		rcu_read_lock();
+-		res = __ip_options_compile(dev_net(skb->dev), &opt, skb, NULL);
++		dev = skb->dev ? skb->dev : skb_rtable(skb)->dst.dev;
++		res = __ip_options_compile(dev_net(dev), &opt, skb, NULL);
+ 		rcu_read_unlock();
+ 
+ 		if (res)
+diff --git a/net/mptcp/options.c b/net/mptcp/options.c
+index c254accb14dee..cd15ec73073e0 100644
+--- a/net/mptcp/options.c
++++ b/net/mptcp/options.c
+@@ -1269,12 +1269,13 @@ static void mptcp_set_rwin(struct tcp_sock *tp, struct tcphdr *th)
+ 
+ 			if (rcv_wnd == rcv_wnd_old)
+ 				break;
+-			if (before64(rcv_wnd_new, rcv_wnd)) {
++
++			rcv_wnd_old = rcv_wnd;
++			if (before64(rcv_wnd_new, rcv_wnd_old)) {
+ 				MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_RCVWNDCONFLICTUPDATE);
+ 				goto raise_win;
+ 			}
+ 			MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_RCVWNDCONFLICT);
+-			rcv_wnd_old = rcv_wnd;
+ 		}
+ 		return;
+ 	}
+diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
+index 40258d9f8c799..6947b4b2519c9 100644
+--- a/net/mptcp/protocol.c
++++ b/net/mptcp/protocol.c
+@@ -772,6 +772,46 @@ static bool __mptcp_ofo_queue(struct mptcp_sock *msk)
+ 	return moved;
+ }
+ 
++static bool __mptcp_subflow_error_report(struct sock *sk, struct sock *ssk)
++{
++	int err = sock_error(ssk);
++	int ssk_state;
++
++	if (!err)
++		return false;
++
++	/* only propagate errors on fallen-back sockets or
++	 * on MPC connect
++	 */
++	if (sk->sk_state != TCP_SYN_SENT && !__mptcp_check_fallback(mptcp_sk(sk)))
++		return false;
++
++	/* We need to propagate only transition to CLOSE state.
++	 * Orphaned socket will see such state change via
++	 * subflow_sched_work_if_closed() and that path will properly
++	 * destroy the msk as needed.
++	 */
++	ssk_state = inet_sk_state_load(ssk);
++	if (ssk_state == TCP_CLOSE && !sock_flag(sk, SOCK_DEAD))
++		inet_sk_state_store(sk, ssk_state);
++	WRITE_ONCE(sk->sk_err, -err);
++
++	/* This barrier is coupled with smp_rmb() in mptcp_poll() */
++	smp_wmb();
++	sk_error_report(sk);
++	return true;
++}
++
++void __mptcp_error_report(struct sock *sk)
++{
++	struct mptcp_subflow_context *subflow;
++	struct mptcp_sock *msk = mptcp_sk(sk);
++
++	mptcp_for_each_subflow(msk, subflow)
++		if (__mptcp_subflow_error_report(sk, mptcp_subflow_tcp_sock(subflow)))
++			break;
++}
++
+ /* In most cases we will be able to lock the mptcp socket.  If its already
+  * owned, we need to defer to the work queue to avoid ABBA deadlock.
+  */
+@@ -2381,6 +2421,7 @@ static void __mptcp_close_ssk(struct sock *sk, struct sock *ssk,
+ 	}
+ 
+ out_release:
++	__mptcp_subflow_error_report(sk, ssk);
+ 	release_sock(ssk);
+ 
+ 	sock_put(ssk);
+diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
+index 94ae7dd01c65e..c7bd99b8e7b7a 100644
+--- a/net/mptcp/subflow.c
++++ b/net/mptcp/subflow.c
+@@ -1362,42 +1362,6 @@ void mptcp_space(const struct sock *ssk, int *space, int *full_space)
+ 	*full_space = tcp_full_space(sk);
+ }
+ 
+-void __mptcp_error_report(struct sock *sk)
+-{
+-	struct mptcp_subflow_context *subflow;
+-	struct mptcp_sock *msk = mptcp_sk(sk);
+-
+-	mptcp_for_each_subflow(msk, subflow) {
+-		struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
+-		int err = sock_error(ssk);
+-		int ssk_state;
+-
+-		if (!err)
+-			continue;
+-
+-		/* only propagate errors on fallen-back sockets or
+-		 * on MPC connect
+-		 */
+-		if (sk->sk_state != TCP_SYN_SENT && !__mptcp_check_fallback(msk))
+-			continue;
+-
+-		/* We need to propagate only transition to CLOSE state.
+-		 * Orphaned socket will see such state change via
+-		 * subflow_sched_work_if_closed() and that path will properly
+-		 * destroy the msk as needed.
+-		 */
+-		ssk_state = inet_sk_state_load(ssk);
+-		if (ssk_state == TCP_CLOSE && !sock_flag(sk, SOCK_DEAD))
+-			inet_sk_state_store(sk, ssk_state);
+-		WRITE_ONCE(sk->sk_err, -err);
+-
+-		/* This barrier is coupled with smp_rmb() in mptcp_poll() */
+-		smp_wmb();
+-		sk_error_report(sk);
+-		break;
+-	}
+-}
+-
+ static void subflow_error_report(struct sock *ssk)
+ {
+ 	struct sock *sk = mptcp_subflow_ctx(ssk)->conn;
+diff --git a/net/ncsi/ncsi-aen.c b/net/ncsi/ncsi-aen.c
+index 62fb1031763d1..f8854bff286cb 100644
+--- a/net/ncsi/ncsi-aen.c
++++ b/net/ncsi/ncsi-aen.c
+@@ -89,6 +89,11 @@ static int ncsi_aen_handler_lsc(struct ncsi_dev_priv *ndp,
+ 	if ((had_link == has_link) || chained)
+ 		return 0;
+ 
++	if (had_link)
++		netif_carrier_off(ndp->ndev.dev);
++	else
++		netif_carrier_on(ndp->ndev.dev);
++
+ 	if (!ndp->multi_package && !nc->package->multi_channel) {
+ 		if (had_link) {
+ 			ndp->flags |= NCSI_DEV_RESHUFFLE;
+diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
+index 0b68e2e2824e1..58608460cf6df 100644
+--- a/net/netfilter/ipset/ip_set_core.c
++++ b/net/netfilter/ipset/ip_set_core.c
+@@ -682,6 +682,14 @@ __ip_set_put(struct ip_set *set)
+ /* set->ref can be swapped out by ip_set_swap, netlink events (like dump) need
+  * a separate reference counter
+  */
++static void
++__ip_set_get_netlink(struct ip_set *set)
++{
++	write_lock_bh(&ip_set_ref_lock);
++	set->ref_netlink++;
++	write_unlock_bh(&ip_set_ref_lock);
++}
++
+ static void
+ __ip_set_put_netlink(struct ip_set *set)
+ {
+@@ -1693,11 +1701,11 @@ call_ad(struct net *net, struct sock *ctnl, struct sk_buff *skb,
+ 
+ 	do {
+ 		if (retried) {
+-			__ip_set_get(set);
++			__ip_set_get_netlink(set);
+ 			nfnl_unlock(NFNL_SUBSYS_IPSET);
+ 			cond_resched();
+ 			nfnl_lock(NFNL_SUBSYS_IPSET);
+-			__ip_set_put(set);
++			__ip_set_put_netlink(set);
+ 		}
+ 
+ 		ip_set_lock(set);
+diff --git a/net/netfilter/nf_conntrack_bpf.c b/net/netfilter/nf_conntrack_bpf.c
+index 0d36d7285e3f0..747dc22655018 100644
+--- a/net/netfilter/nf_conntrack_bpf.c
++++ b/net/netfilter/nf_conntrack_bpf.c
+@@ -380,6 +380,8 @@ __bpf_kfunc struct nf_conn *bpf_ct_insert_entry(struct nf_conn___init *nfct_i)
+ 	struct nf_conn *nfct = (struct nf_conn *)nfct_i;
+ 	int err;
+ 
++	if (!nf_ct_is_confirmed(nfct))
++		nfct->timeout += nfct_time_stamp;
+ 	nfct->status |= IPS_CONFIRMED;
+ 	err = nf_conntrack_hash_check_insert(nfct);
+ 	if (err < 0) {
+diff --git a/net/netfilter/nf_conntrack_extend.c b/net/netfilter/nf_conntrack_extend.c
+index 0b513f7bf9f39..dd62cc12e7750 100644
+--- a/net/netfilter/nf_conntrack_extend.c
++++ b/net/netfilter/nf_conntrack_extend.c
+@@ -40,10 +40,10 @@ static const u8 nf_ct_ext_type_len[NF_CT_EXT_NUM] = {
+ 	[NF_CT_EXT_ECACHE] = sizeof(struct nf_conntrack_ecache),
+ #endif
+ #ifdef CONFIG_NF_CONNTRACK_TIMESTAMP
+-	[NF_CT_EXT_TSTAMP] = sizeof(struct nf_conn_acct),
++	[NF_CT_EXT_TSTAMP] = sizeof(struct nf_conn_tstamp),
+ #endif
+ #ifdef CONFIG_NF_CONNTRACK_TIMEOUT
+-	[NF_CT_EXT_TIMEOUT] = sizeof(struct nf_conn_tstamp),
++	[NF_CT_EXT_TIMEOUT] = sizeof(struct nf_conn_timeout),
+ #endif
+ #ifdef CONFIG_NF_CONNTRACK_LABELS
+ 	[NF_CT_EXT_LABELS] = sizeof(struct nf_conn_labels),
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
+index a72934f00804e..976a9b763b9bb 100644
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -1219,6 +1219,10 @@ static int nf_tables_updtable(struct nft_ctx *ctx)
+ 	     flags & NFT_TABLE_F_OWNER))
+ 		return -EOPNOTSUPP;
+ 
++	/* No dormant off/on/off/on games in single transaction */
++	if (ctx->table->flags & __NFT_TABLE_F_UPDATE)
++		return -EINVAL;
++
+ 	trans = nft_trans_alloc(ctx, NFT_MSG_NEWTABLE,
+ 				sizeof(struct nft_trans_table));
+ 	if (trans == NULL)
+@@ -1432,7 +1436,7 @@ static int nft_flush_table(struct nft_ctx *ctx)
+ 		if (!nft_is_active_next(ctx->net, chain))
+ 			continue;
+ 
+-		if (nft_chain_is_bound(chain))
++		if (nft_chain_binding(chain))
+ 			continue;
+ 
+ 		ctx->chain = chain;
+@@ -1446,8 +1450,7 @@ static int nft_flush_table(struct nft_ctx *ctx)
+ 		if (!nft_is_active_next(ctx->net, set))
+ 			continue;
+ 
+-		if (nft_set_is_anonymous(set) &&
+-		    !list_empty(&set->bindings))
++		if (nft_set_is_anonymous(set))
+ 			continue;
+ 
+ 		err = nft_delset(ctx, set);
+@@ -1477,7 +1480,7 @@ static int nft_flush_table(struct nft_ctx *ctx)
+ 		if (!nft_is_active_next(ctx->net, chain))
+ 			continue;
+ 
+-		if (nft_chain_is_bound(chain))
++		if (nft_chain_binding(chain))
+ 			continue;
+ 
+ 		ctx->chain = chain;
+@@ -2910,6 +2913,9 @@ static int nf_tables_delchain(struct sk_buff *skb, const struct nfnl_info *info,
+ 		return PTR_ERR(chain);
+ 	}
+ 
++	if (nft_chain_binding(chain))
++		return -EOPNOTSUPP;
++
+ 	nft_ctx_init(&ctx, net, skb, info->nlh, family, table, chain, nla);
+ 
+ 	if (nla[NFTA_CHAIN_HOOK]) {
+@@ -3449,6 +3455,8 @@ static int __nf_tables_dump_rules(struct sk_buff *skb,
+ 	struct net *net = sock_net(skb->sk);
+ 	const struct nft_rule *rule, *prule;
+ 	unsigned int s_idx = cb->args[0];
++	unsigned int entries = 0;
++	int ret = 0;
+ 	u64 handle;
+ 
+ 	prule = NULL;
+@@ -3471,9 +3479,11 @@ static int __nf_tables_dump_rules(struct sk_buff *skb,
+ 					NFT_MSG_NEWRULE,
+ 					NLM_F_MULTI | NLM_F_APPEND,
+ 					table->family,
+-					table, chain, rule, handle, reset) < 0)
+-			return 1;
+-
++					table, chain, rule, handle, reset) < 0) {
++			ret = 1;
++			break;
++		}
++		entries++;
+ 		nl_dump_check_consistent(cb, nlmsg_hdr(skb));
+ cont:
+ 		prule = rule;
+@@ -3481,10 +3491,10 @@ cont_skip:
+ 		(*idx)++;
+ 	}
+ 
+-	if (reset && *idx)
+-		audit_log_rule_reset(table, cb->seq, *idx);
++	if (reset && entries)
++		audit_log_rule_reset(table, cb->seq, entries);
+ 
+-	return 0;
++	return ret;
+ }
+ 
+ static int nf_tables_dump_rules(struct sk_buff *skb,
+@@ -3968,6 +3978,11 @@ static int nf_tables_newrule(struct sk_buff *skb, const struct nfnl_info *info,
+ 	}
+ 
+ 	if (info->nlh->nlmsg_flags & NLM_F_REPLACE) {
++		if (nft_chain_binding(chain)) {
++			err = -EOPNOTSUPP;
++			goto err_destroy_flow_rule;
++		}
++
+ 		err = nft_delrule(&ctx, old_rule);
+ 		if (err < 0)
+ 			goto err_destroy_flow_rule;
+@@ -4075,7 +4090,7 @@ static int nf_tables_delrule(struct sk_buff *skb, const struct nfnl_info *info,
+ 			NL_SET_BAD_ATTR(extack, nla[NFTA_RULE_CHAIN]);
+ 			return PTR_ERR(chain);
+ 		}
+-		if (nft_chain_is_bound(chain))
++		if (nft_chain_binding(chain))
+ 			return -EOPNOTSUPP;
+ 	}
+ 
+@@ -4109,7 +4124,7 @@ static int nf_tables_delrule(struct sk_buff *skb, const struct nfnl_info *info,
+ 		list_for_each_entry(chain, &table->chains, list) {
+ 			if (!nft_is_active_next(net, chain))
+ 				continue;
+-			if (nft_chain_is_bound(chain))
++			if (nft_chain_binding(chain))
+ 				continue;
+ 
+ 			ctx.chain = chain;
+@@ -7180,8 +7195,10 @@ static int nf_tables_delsetelem(struct sk_buff *skb,
+ 	if (IS_ERR(set))
+ 		return PTR_ERR(set);
+ 
+-	if (!list_empty(&set->bindings) &&
+-	    (set->flags & (NFT_SET_CONSTANT | NFT_SET_ANONYMOUS)))
++	if (nft_set_is_anonymous(set))
++		return -EOPNOTSUPP;
++
++	if (!list_empty(&set->bindings) && (set->flags & NFT_SET_CONSTANT))
+ 		return -EBUSY;
+ 
+ 	nft_ctx_init(&ctx, net, skb, info->nlh, family, table, NULL, nla);
+@@ -9559,12 +9576,15 @@ static int nft_trans_gc_space(struct nft_trans_gc *trans)
+ struct nft_trans_gc *nft_trans_gc_queue_async(struct nft_trans_gc *gc,
+ 					      unsigned int gc_seq, gfp_t gfp)
+ {
++	struct nft_set *set;
++
+ 	if (nft_trans_gc_space(gc))
+ 		return gc;
+ 
++	set = gc->set;
+ 	nft_trans_gc_queue_work(gc);
+ 
+-	return nft_trans_gc_alloc(gc->set, gc_seq, gfp);
++	return nft_trans_gc_alloc(set, gc_seq, gfp);
+ }
+ 
+ void nft_trans_gc_queue_async_done(struct nft_trans_gc *trans)
+@@ -9579,15 +9599,18 @@ void nft_trans_gc_queue_async_done(struct nft_trans_gc *trans)
+ 
+ struct nft_trans_gc *nft_trans_gc_queue_sync(struct nft_trans_gc *gc, gfp_t gfp)
+ {
++	struct nft_set *set;
++
+ 	if (WARN_ON_ONCE(!lockdep_commit_lock_is_held(gc->net)))
+ 		return NULL;
+ 
+ 	if (nft_trans_gc_space(gc))
+ 		return gc;
+ 
++	set = gc->set;
+ 	call_rcu(&gc->rcu, nft_trans_gc_trans_free);
+ 
+-	return nft_trans_gc_alloc(gc->set, 0, gfp);
++	return nft_trans_gc_alloc(set, 0, gfp);
+ }
+ 
+ void nft_trans_gc_queue_sync_done(struct nft_trans_gc *trans)
+@@ -9602,8 +9625,9 @@ void nft_trans_gc_queue_sync_done(struct nft_trans_gc *trans)
+ 	call_rcu(&trans->rcu, nft_trans_gc_trans_free);
+ }
+ 
+-struct nft_trans_gc *nft_trans_gc_catchall(struct nft_trans_gc *gc,
+-					   unsigned int gc_seq)
++static struct nft_trans_gc *nft_trans_gc_catchall(struct nft_trans_gc *gc,
++						  unsigned int gc_seq,
++						  bool sync)
+ {
+ 	struct nft_set_elem_catchall *catchall;
+ 	const struct nft_set *set = gc->set;
+@@ -9619,7 +9643,11 @@ struct nft_trans_gc *nft_trans_gc_catchall(struct nft_trans_gc *gc,
+ 
+ 		nft_set_elem_dead(ext);
+ dead_elem:
+-		gc = nft_trans_gc_queue_async(gc, gc_seq, GFP_ATOMIC);
++		if (sync)
++			gc = nft_trans_gc_queue_sync(gc, GFP_ATOMIC);
++		else
++			gc = nft_trans_gc_queue_async(gc, gc_seq, GFP_ATOMIC);
++
+ 		if (!gc)
+ 			return NULL;
+ 
+@@ -9629,6 +9657,17 @@ dead_elem:
+ 	return gc;
+ }
+ 
++struct nft_trans_gc *nft_trans_gc_catchall_async(struct nft_trans_gc *gc,
++						 unsigned int gc_seq)
++{
++	return nft_trans_gc_catchall(gc, gc_seq, false);
++}
++
++struct nft_trans_gc *nft_trans_gc_catchall_sync(struct nft_trans_gc *gc)
++{
++	return nft_trans_gc_catchall(gc, 0, true);
++}
++
+ static void nf_tables_module_autoload_cleanup(struct net *net)
+ {
+ 	struct nftables_pernet *nft_net = nft_pernet(net);
+@@ -11048,7 +11087,7 @@ static void __nft_release_table(struct net *net, struct nft_table *table)
+ 	ctx.family = table->family;
+ 	ctx.table = table;
+ 	list_for_each_entry(chain, &table->chains, list) {
+-		if (nft_chain_is_bound(chain))
++		if (nft_chain_binding(chain))
+ 			continue;
+ 
+ 		ctx.chain = chain;
+diff --git a/net/netfilter/nft_set_hash.c b/net/netfilter/nft_set_hash.c
+index 524763659f251..2013de934cef0 100644
+--- a/net/netfilter/nft_set_hash.c
++++ b/net/netfilter/nft_set_hash.c
+@@ -338,12 +338,9 @@ static void nft_rhash_gc(struct work_struct *work)
+ 
+ 	while ((he = rhashtable_walk_next(&hti))) {
+ 		if (IS_ERR(he)) {
+-			if (PTR_ERR(he) != -EAGAIN) {
+-				nft_trans_gc_destroy(gc);
+-				gc = NULL;
+-				goto try_later;
+-			}
+-			continue;
++			nft_trans_gc_destroy(gc);
++			gc = NULL;
++			goto try_later;
+ 		}
+ 
+ 		/* Ruleset has been updated, try later. */
+@@ -372,7 +369,7 @@ dead_elem:
+ 		nft_trans_gc_elem_add(gc, he);
+ 	}
+ 
+-	gc = nft_trans_gc_catchall(gc, gc_seq);
++	gc = nft_trans_gc_catchall_async(gc, gc_seq);
+ 
+ try_later:
+ 	/* catchall list iteration requires rcu read side lock. */
+diff --git a/net/netfilter/nft_set_pipapo.c b/net/netfilter/nft_set_pipapo.c
+index 6af9c9ed4b5c3..c0dcc40de358f 100644
+--- a/net/netfilter/nft_set_pipapo.c
++++ b/net/netfilter/nft_set_pipapo.c
+@@ -1596,7 +1596,7 @@ static void pipapo_gc(const struct nft_set *_set, struct nft_pipapo_match *m)
+ 
+ 			gc = nft_trans_gc_queue_sync(gc, GFP_ATOMIC);
+ 			if (!gc)
+-				break;
++				return;
+ 
+ 			nft_pipapo_gc_deactivate(net, set, e);
+ 			pipapo_drop(m, rulemap);
+@@ -1610,7 +1610,7 @@ static void pipapo_gc(const struct nft_set *_set, struct nft_pipapo_match *m)
+ 		}
+ 	}
+ 
+-	gc = nft_trans_gc_catchall(gc, 0);
++	gc = nft_trans_gc_catchall_sync(gc);
+ 	if (gc) {
+ 		nft_trans_gc_queue_sync_done(gc);
+ 		priv->last_gc = jiffies;
+diff --git a/net/netfilter/nft_set_rbtree.c b/net/netfilter/nft_set_rbtree.c
+index f250b5399344a..487572dcd6144 100644
+--- a/net/netfilter/nft_set_rbtree.c
++++ b/net/netfilter/nft_set_rbtree.c
+@@ -622,8 +622,7 @@ static void nft_rbtree_gc(struct work_struct *work)
+ 	if (!gc)
+ 		goto done;
+ 
+-	write_lock_bh(&priv->lock);
+-	write_seqcount_begin(&priv->count);
++	read_lock_bh(&priv->lock);
+ 	for (node = rb_first(&priv->root); node != NULL; node = rb_next(node)) {
+ 
+ 		/* Ruleset has been updated, try later. */
+@@ -670,11 +669,10 @@ dead_elem:
+ 		nft_trans_gc_elem_add(gc, rbe);
+ 	}
+ 
+-	gc = nft_trans_gc_catchall(gc, gc_seq);
++	gc = nft_trans_gc_catchall_async(gc, gc_seq);
+ 
+ try_later:
+-	write_seqcount_end(&priv->count);
+-	write_unlock_bh(&priv->lock);
++	read_unlock_bh(&priv->lock);
+ 
+ 	if (gc)
+ 		nft_trans_gc_queue_async_done(gc);
+diff --git a/net/rds/rdma_transport.c b/net/rds/rdma_transport.c
+index d36f3f6b43510..b15cf316b23a2 100644
+--- a/net/rds/rdma_transport.c
++++ b/net/rds/rdma_transport.c
+@@ -86,11 +86,13 @@ static int rds_rdma_cm_event_handler_cmn(struct rdma_cm_id *cm_id,
+ 		break;
+ 
+ 	case RDMA_CM_EVENT_ADDR_RESOLVED:
+-		rdma_set_service_type(cm_id, conn->c_tos);
+-		rdma_set_min_rnr_timer(cm_id, IB_RNR_TIMER_000_32);
+-		/* XXX do we need to clean up if this fails? */
+-		ret = rdma_resolve_route(cm_id,
+-					 RDS_RDMA_RESOLVE_TIMEOUT_MS);
++		if (conn) {
++			rdma_set_service_type(cm_id, conn->c_tos);
++			rdma_set_min_rnr_timer(cm_id, IB_RNR_TIMER_000_32);
++			/* XXX do we need to clean up if this fails? */
++			ret = rdma_resolve_route(cm_id,
++						 RDS_RDMA_RESOLVE_TIMEOUT_MS);
++		}
+ 		break;
+ 
+ 	case RDMA_CM_EVENT_ROUTE_RESOLVED:
+diff --git a/net/smc/smc_stats.h b/net/smc/smc_stats.h
+index b60fe1eb37ab6..aa8928975cc63 100644
+--- a/net/smc/smc_stats.h
++++ b/net/smc/smc_stats.h
+@@ -243,8 +243,9 @@ while (0)
+ #define SMC_STAT_SERV_SUCC_INC(net, _ini) \
+ do { \
+ 	typeof(_ini) i = (_ini); \
+-	bool is_v2 = (i->smcd_version & SMC_V2); \
+ 	bool is_smcd = (i->is_smcd); \
++	u8 version = is_smcd ? i->smcd_version : i->smcr_version; \
++	bool is_v2 = (version & SMC_V2); \
+ 	typeof(net->smc.smc_stats) smc_stats = (net)->smc.smc_stats; \
+ 	if (is_v2 && is_smcd) \
+ 		this_cpu_inc(smc_stats->smc[SMC_TYPE_D].srv_v2_succ_cnt); \
+diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
+index 315bd59dea056..be6be7d785315 100644
+--- a/net/sunrpc/clnt.c
++++ b/net/sunrpc/clnt.c
+@@ -2474,8 +2474,7 @@ call_status(struct rpc_task *task)
+ 		goto out_exit;
+ 	}
+ 	task->tk_action = call_encode;
+-	if (status != -ECONNRESET && status != -ECONNABORTED)
+-		rpc_check_timeout(task);
++	rpc_check_timeout(task);
+ 	return;
+ out_exit:
+ 	rpc_call_rpcerror(task, status);
+@@ -2748,6 +2747,7 @@ out_msg_denied:
+ 	case rpc_autherr_rejectedverf:
+ 	case rpcsec_gsserr_credproblem:
+ 	case rpcsec_gsserr_ctxproblem:
++		rpcauth_invalcred(task);
+ 		if (!task->tk_cred_retry)
+ 			break;
+ 		task->tk_cred_retry--;
+@@ -2904,19 +2904,22 @@ static const struct rpc_call_ops rpc_cb_add_xprt_call_ops = {
+  * @clnt: pointer to struct rpc_clnt
+  * @xps: pointer to struct rpc_xprt_switch,
+  * @xprt: pointer struct rpc_xprt
+- * @dummy: unused
++ * @in_max_connect: pointer to the max_connect value for the passed in xprt transport
+  */
+ int rpc_clnt_test_and_add_xprt(struct rpc_clnt *clnt,
+ 		struct rpc_xprt_switch *xps, struct rpc_xprt *xprt,
+-		void *dummy)
++		void *in_max_connect)
+ {
+ 	struct rpc_cb_add_xprt_calldata *data;
+ 	struct rpc_task *task;
++	int max_connect = clnt->cl_max_connect;
+ 
+-	if (xps->xps_nunique_destaddr_xprts + 1 > clnt->cl_max_connect) {
++	if (in_max_connect)
++		max_connect = *(int *)in_max_connect;
++	if (xps->xps_nunique_destaddr_xprts + 1 > max_connect) {
+ 		rcu_read_lock();
+ 		pr_warn("SUNRPC: reached max allowed number (%d) did not add "
+-			"transport to server: %s\n", clnt->cl_max_connect,
++			"transport to server: %s\n", max_connect,
+ 			rpc_peeraddr2str(clnt, RPC_DISPLAY_ADDR));
+ 		rcu_read_unlock();
+ 		return -EINVAL;
+diff --git a/scripts/atomic/gen-atomic-fallback.sh b/scripts/atomic/gen-atomic-fallback.sh
+index c0c8a85d7c81b..a45154cefa487 100755
+--- a/scripts/atomic/gen-atomic-fallback.sh
++++ b/scripts/atomic/gen-atomic-fallback.sh
+@@ -102,7 +102,7 @@ gen_proto_order_variant()
+ 	fi
+ 
+ 	# Allow ACQUIRE/RELEASE/RELAXED ops to be defined in terms of FULL ops
+-	if [ ! -z "${order}" ]; then
++	if [ ! -z "${order}" ] && ! meta_is_implicitly_relaxed "${meta}"; then
+ 		printf "#elif defined(arch_${basename})\n"
+ 		printf "\t${retstmt}arch_${basename}(${args});\n"
+ 	fi
+diff --git a/sound/core/rawmidi.c b/sound/core/rawmidi.c
+index 2d3cec908154d..cef48319bd396 100644
+--- a/sound/core/rawmidi.c
++++ b/sound/core/rawmidi.c
+@@ -1770,7 +1770,7 @@ static void snd_rawmidi_proc_info_read(struct snd_info_entry *entry,
+ 	if (IS_ENABLED(CONFIG_SND_UMP))
+ 		snd_iprintf(buffer, "Type: %s\n",
+ 			    rawmidi_is_ump(rmidi) ? "UMP" : "Legacy");
+-	if (rmidi->ops->proc_read)
++	if (rmidi->ops && rmidi->ops->proc_read)
+ 		rmidi->ops->proc_read(entry, buffer);
+ 	mutex_lock(&rmidi->open_mutex);
+ 	if (rmidi->info_flags & SNDRV_RAWMIDI_INFO_OUTPUT) {
+diff --git a/sound/core/seq/seq_ump_client.c b/sound/core/seq/seq_ump_client.c
+index f26a1812dfa73..2db371d79930d 100644
+--- a/sound/core/seq/seq_ump_client.c
++++ b/sound/core/seq/seq_ump_client.c
+@@ -207,7 +207,7 @@ static void fill_port_info(struct snd_seq_port_info *port,
+ 		SNDRV_SEQ_PORT_TYPE_PORT;
+ 	port->midi_channels = 16;
+ 	if (*group->name)
+-		snprintf(port->name, sizeof(port->name), "Group %d (%s)",
++		snprintf(port->name, sizeof(port->name), "Group %d (%.53s)",
+ 			 group->group + 1, group->name);
+ 	else
+ 		sprintf(port->name, "Group %d", group->group + 1);
+@@ -416,6 +416,25 @@ static void setup_client_midi_version(struct seq_ump_client *client)
+ 	snd_seq_kernel_client_put(cptr);
+ }
+ 
++/* set up client's group_filter bitmap */
++static void setup_client_group_filter(struct seq_ump_client *client)
++{
++	struct snd_seq_client *cptr;
++	unsigned int filter;
++	int p;
++
++	cptr = snd_seq_kernel_client_get(client->seq_client);
++	if (!cptr)
++		return;
++	filter = ~(1U << 0); /* always allow groupless messages */
++	for (p = 0; p < SNDRV_UMP_MAX_GROUPS; p++) {
++		if (client->groups[p].active)
++			filter &= ~(1U << (p + 1));
++	}
++	cptr->group_filter = filter;
++	snd_seq_kernel_client_put(cptr);
++}
++
+ /* UMP group change notification */
+ static void handle_group_notify(struct work_struct *work)
+ {
+@@ -424,6 +443,7 @@ static void handle_group_notify(struct work_struct *work)
+ 
+ 	update_group_attrs(client);
+ 	update_port_infos(client);
++	setup_client_group_filter(client);
+ }
+ 
+ /* UMP FB change notification */
+@@ -492,6 +512,8 @@ static int snd_seq_ump_probe(struct device *_dev)
+ 			goto error;
+ 	}
+ 
++	setup_client_group_filter(client);
++
+ 	err = create_ump_endpoint_port(client);
+ 	if (err < 0)
+ 		goto error;
+diff --git a/sound/core/seq/seq_ump_convert.c b/sound/core/seq/seq_ump_convert.c
+index 7cc84e137999c..b141024830ecc 100644
+--- a/sound/core/seq/seq_ump_convert.c
++++ b/sound/core/seq/seq_ump_convert.c
+@@ -1197,6 +1197,8 @@ int snd_seq_deliver_to_ump(struct snd_seq_client *source,
+ 			   struct snd_seq_event *event,
+ 			   int atomic, int hop)
+ {
++	if (dest->group_filter & (1U << dest_port->ump_group))
++		return 0; /* group filtered - skip the event */
+ 	if (event->type == SNDRV_SEQ_EVENT_SYSEX)
+ 		return cvt_sysex_to_ump(dest, dest_port, event, atomic, hop);
+ 	else if (snd_seq_client_is_midi2(dest))
+diff --git a/sound/hda/intel-sdw-acpi.c b/sound/hda/intel-sdw-acpi.c
+index 5cb92f7ccbcac..b57d72ea4503f 100644
+--- a/sound/hda/intel-sdw-acpi.c
++++ b/sound/hda/intel-sdw-acpi.c
+@@ -23,7 +23,7 @@ static int ctrl_link_mask;
+ module_param_named(sdw_link_mask, ctrl_link_mask, int, 0444);
+ MODULE_PARM_DESC(sdw_link_mask, "Intel link mask (one bit per link)");
+ 
+-static bool is_link_enabled(struct fwnode_handle *fw_node, int i)
++static bool is_link_enabled(struct fwnode_handle *fw_node, u8 idx)
+ {
+ 	struct fwnode_handle *link;
+ 	char name[32];
+@@ -31,7 +31,7 @@ static bool is_link_enabled(struct fwnode_handle *fw_node, int i)
+ 
+ 	/* Find master handle */
+ 	snprintf(name, sizeof(name),
+-		 "mipi-sdw-link-%d-subproperties", i);
++		 "mipi-sdw-link-%hhu-subproperties", idx);
+ 
+ 	link = fwnode_get_named_child_node(fw_node, name);
+ 	if (!link)
+@@ -51,8 +51,8 @@ static int
+ sdw_intel_scan_controller(struct sdw_intel_acpi_info *info)
+ {
+ 	struct acpi_device *adev = acpi_fetch_acpi_dev(info->handle);
+-	int ret, i;
+-	u8 count;
++	u8 count, i;
++	int ret;
+ 
+ 	if (!adev)
+ 		return -EINVAL;
+diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
+index ef831770ca7da..5cfd009175dac 100644
+--- a/sound/pci/hda/hda_intel.c
++++ b/sound/pci/hda/hda_intel.c
+@@ -2223,6 +2223,7 @@ static const struct snd_pci_quirk power_save_denylist[] = {
+ 	SND_PCI_QUIRK(0x8086, 0x2068, "Intel NUC7i3BNB", 0),
+ 	/* https://bugzilla.kernel.org/show_bug.cgi?id=198611 */
+ 	SND_PCI_QUIRK(0x17aa, 0x2227, "Lenovo X1 Carbon 3rd Gen", 0),
++	SND_PCI_QUIRK(0x17aa, 0x316e, "Lenovo ThinkCentre M70q", 0),
+ 	/* https://bugzilla.redhat.com/show_bug.cgi?id=1689623 */
+ 	SND_PCI_QUIRK(0x17aa, 0x367b, "Lenovo IdeaCentre B550", 0),
+ 	/* https://bugzilla.redhat.com/show_bug.cgi?id=1572975 */
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index dc7b7a407638a..4a13747b2b0f3 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -9680,7 +9680,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1043, 0x1d1f, "ASUS ROG Strix G17 2023 (G713PV)", ALC287_FIXUP_CS35L41_I2C_2),
+ 	SND_PCI_QUIRK(0x1043, 0x1d42, "ASUS Zephyrus G14 2022", ALC289_FIXUP_ASUS_GA401),
+ 	SND_PCI_QUIRK(0x1043, 0x1d4e, "ASUS TM420", ALC256_FIXUP_ASUS_HPE),
+-	SND_PCI_QUIRK(0x1043, 0x1e02, "ASUS UX3402", ALC245_FIXUP_CS35L41_SPI_2),
++	SND_PCI_QUIRK(0x1043, 0x1e02, "ASUS UX3402ZA", ALC245_FIXUP_CS35L41_SPI_2),
++	SND_PCI_QUIRK(0x1043, 0x16a3, "ASUS UX3402VA", ALC245_FIXUP_CS35L41_SPI_2),
+ 	SND_PCI_QUIRK(0x1043, 0x1e11, "ASUS Zephyrus G15", ALC289_FIXUP_ASUS_GA502),
+ 	SND_PCI_QUIRK(0x1043, 0x1e12, "ASUS UM3402", ALC287_FIXUP_CS35L41_I2C_2),
+ 	SND_PCI_QUIRK(0x1043, 0x1e51, "ASUS Zephyrus M15", ALC294_FIXUP_ASUS_GU502_PINS),
+diff --git a/sound/soc/amd/yc/acp6x-mach.c b/sound/soc/amd/yc/acp6x-mach.c
+index b304b3562c82b..5cc774b3da05c 100644
+--- a/sound/soc/amd/yc/acp6x-mach.c
++++ b/sound/soc/amd/yc/acp6x-mach.c
+@@ -213,6 +213,20 @@ static const struct dmi_system_id yc_acp_quirk_table[] = {
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "21J6"),
+ 		}
+ 	},
++	{
++		.driver_data = &acp6x_card,
++		.matches = {
++			DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "82TL"),
++		}
++	},
++	{
++		.driver_data = &acp6x_card,
++		.matches = {
++			DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "82QF"),
++		}
++	},
+ 	{
+ 		.driver_data = &acp6x_card,
+ 		.matches = {
+@@ -220,6 +234,13 @@ static const struct dmi_system_id yc_acp_quirk_table[] = {
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "82V2"),
+ 		}
+ 	},
++	{
++		.driver_data = &acp6x_card,
++		.matches = {
++			DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "82UG"),
++		}
++	},
+ 	{
+ 		.driver_data = &acp6x_card,
+ 		.matches = {
+@@ -255,6 +276,13 @@ static const struct dmi_system_id yc_acp_quirk_table[] = {
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "M6500RC"),
+ 		}
+ 	},
++	{
++		.driver_data = &acp6x_card,
++		.matches = {
++			DMI_MATCH(DMI_BOARD_VENDOR, "Micro-Star International Co., Ltd."),
++			DMI_MATCH(DMI_PRODUCT_NAME, "Bravo 15 B7ED"),
++		}
++	},
+ 	{
+ 		.driver_data = &acp6x_card,
+ 		.matches = {
+@@ -325,6 +353,13 @@ static const struct dmi_system_id yc_acp_quirk_table[] = {
+ 			DMI_MATCH(DMI_BOARD_NAME, "8A22"),
+ 		}
+ 	},
++	{
++		.driver_data = &acp6x_card,
++		.matches = {
++			DMI_MATCH(DMI_BOARD_VENDOR, "HP"),
++			DMI_MATCH(DMI_BOARD_NAME, "8A3E"),
++		}
++	},
+ 	{
+ 		.driver_data = &acp6x_card,
+ 		.matches = {
+diff --git a/sound/soc/codecs/cs35l56-i2c.c b/sound/soc/codecs/cs35l56-i2c.c
+index 40666e6698ba9..b69441ec8d99f 100644
+--- a/sound/soc/codecs/cs35l56-i2c.c
++++ b/sound/soc/codecs/cs35l56-i2c.c
+@@ -27,7 +27,6 @@ static int cs35l56_i2c_probe(struct i2c_client *client)
+ 		return -ENOMEM;
+ 
+ 	cs35l56->dev = dev;
+-	cs35l56->can_hibernate = true;
+ 
+ 	i2c_set_clientdata(client, cs35l56);
+ 	cs35l56->regmap = devm_regmap_init_i2c(client, regmap_config);
+diff --git a/sound/soc/codecs/cs35l56.c b/sound/soc/codecs/cs35l56.c
+index fd06b9f9d496d..7e241908b5f16 100644
+--- a/sound/soc/codecs/cs35l56.c
++++ b/sound/soc/codecs/cs35l56.c
+@@ -1594,6 +1594,7 @@ void cs35l56_remove(struct cs35l56_private *cs35l56)
+ 	flush_workqueue(cs35l56->dsp_wq);
+ 	destroy_workqueue(cs35l56->dsp_wq);
+ 
++	pm_runtime_dont_use_autosuspend(cs35l56->dev);
+ 	pm_runtime_suspend(cs35l56->dev);
+ 	pm_runtime_disable(cs35l56->dev);
+ 
+diff --git a/sound/soc/codecs/cs42l42-sdw.c b/sound/soc/codecs/cs42l42-sdw.c
+index eeab07c850f95..974bae4abfad1 100644
+--- a/sound/soc/codecs/cs42l42-sdw.c
++++ b/sound/soc/codecs/cs42l42-sdw.c
+@@ -344,6 +344,16 @@ static int cs42l42_sdw_update_status(struct sdw_slave *peripheral,
+ 	switch (status) {
+ 	case SDW_SLAVE_ATTACHED:
+ 		dev_dbg(cs42l42->dev, "ATTACHED\n");
++
++		/*
++		 * The SoundWire core can report stale ATTACH notifications
++		 * if we hard-reset CS42L42 in probe() but it had already been
++		 * enumerated. Reject the ATTACH if we haven't yet seen an
++		 * UNATTACH report for the device being in reset.
++		 */
++		if (cs42l42->sdw_waiting_first_unattach)
++			break;
++
+ 		/*
+ 		 * Initialise codec, this only needs to be done once.
+ 		 * When resuming from suspend, resume callback will handle re-init of codec,
+@@ -354,6 +364,16 @@ static int cs42l42_sdw_update_status(struct sdw_slave *peripheral,
+ 		break;
+ 	case SDW_SLAVE_UNATTACHED:
+ 		dev_dbg(cs42l42->dev, "UNATTACHED\n");
++
++		if (cs42l42->sdw_waiting_first_unattach) {
++			/*
++			 * SoundWire core has seen that CS42L42 is not on
++			 * the bus so release RESET and wait for ATTACH.
++			 */
++			cs42l42->sdw_waiting_first_unattach = false;
++			gpiod_set_value_cansleep(cs42l42->reset_gpio, 1);
++		}
++
+ 		break;
+ 	default:
+ 		break;
+diff --git a/sound/soc/codecs/cs42l42.c b/sound/soc/codecs/cs42l42.c
+index a0de0329406a1..2961340f15e2e 100644
+--- a/sound/soc/codecs/cs42l42.c
++++ b/sound/soc/codecs/cs42l42.c
+@@ -2320,7 +2320,26 @@ int cs42l42_common_probe(struct cs42l42_private *cs42l42,
+ 
+ 	if (cs42l42->reset_gpio) {
+ 		dev_dbg(cs42l42->dev, "Found reset GPIO\n");
+-		gpiod_set_value_cansleep(cs42l42->reset_gpio, 1);
++
++		/*
++		 * ACPI can override the default GPIO state we requested
++		 * so ensure that we start with RESET low.
++		 */
++		gpiod_set_value_cansleep(cs42l42->reset_gpio, 0);
++
++		/* Ensure minimum reset pulse width */
++		usleep_range(10, 500);
++
++		/*
++		 * On SoundWire keep the chip in reset until we get an UNATTACH
++		 * notification from the SoundWire core. This acts as a
++		 * synchronization point to reject stale ATTACH notifications
++		 * if the chip was already enumerated before we reset it.
++		 */
++		if (cs42l42->sdw_peripheral)
++			cs42l42->sdw_waiting_first_unattach = true;
++		else
++			gpiod_set_value_cansleep(cs42l42->reset_gpio, 1);
+ 	}
+ 	usleep_range(CS42L42_BOOT_TIME_US, CS42L42_BOOT_TIME_US * 2);
+ 
+diff --git a/sound/soc/codecs/cs42l42.h b/sound/soc/codecs/cs42l42.h
+index 4bd7b85a57471..7785125b73ab9 100644
+--- a/sound/soc/codecs/cs42l42.h
++++ b/sound/soc/codecs/cs42l42.h
+@@ -53,6 +53,7 @@ struct  cs42l42_private {
+ 	u8 stream_use;
+ 	bool hp_adc_up_pending;
+ 	bool suspended;
++	bool sdw_waiting_first_unattach;
+ 	bool init_done;
+ };
+ 
+diff --git a/sound/soc/codecs/rt5640.c b/sound/soc/codecs/rt5640.c
+index eceed82097877..0a05554da3739 100644
+--- a/sound/soc/codecs/rt5640.c
++++ b/sound/soc/codecs/rt5640.c
+@@ -2404,13 +2404,11 @@ static irqreturn_t rt5640_irq(int irq, void *data)
+ 	struct rt5640_priv *rt5640 = data;
+ 	int delay = 0;
+ 
+-	if (rt5640->jd_src == RT5640_JD_SRC_HDA_HEADER) {
+-		cancel_delayed_work_sync(&rt5640->jack_work);
++	if (rt5640->jd_src == RT5640_JD_SRC_HDA_HEADER)
+ 		delay = 100;
+-	}
+ 
+ 	if (rt5640->jack)
+-		queue_delayed_work(system_long_wq, &rt5640->jack_work, delay);
++		mod_delayed_work(system_long_wq, &rt5640->jack_work, delay);
+ 
+ 	return IRQ_HANDLED;
+ }
+@@ -2566,12 +2564,11 @@ static void rt5640_enable_jack_detect(struct snd_soc_component *component,
+ 	if (jack_data && jack_data->use_platform_clock)
+ 		rt5640->use_platform_clock = jack_data->use_platform_clock;
+ 
+-	ret = devm_request_threaded_irq(component->dev, rt5640->irq,
+-					NULL, rt5640_irq,
+-					IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+-					"rt5640", rt5640);
++	ret = request_irq(rt5640->irq, rt5640_irq,
++			  IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
++			  "rt5640", rt5640);
+ 	if (ret) {
+-		dev_warn(component->dev, "Failed to reguest IRQ %d: %d\n", rt5640->irq, ret);
++		dev_warn(component->dev, "Failed to request IRQ %d: %d\n", rt5640->irq, ret);
+ 		rt5640_disable_jack_detect(component);
+ 		return;
+ 	}
+@@ -2622,14 +2619,14 @@ static void rt5640_enable_hda_jack_detect(
+ 
+ 	rt5640->jack = jack;
+ 
+-	ret = devm_request_threaded_irq(component->dev, rt5640->irq,
+-					NULL, rt5640_irq, IRQF_TRIGGER_RISING | IRQF_ONESHOT,
+-					"rt5640", rt5640);
++	ret = request_irq(rt5640->irq, rt5640_irq,
++			  IRQF_TRIGGER_RISING | IRQF_ONESHOT, "rt5640", rt5640);
+ 	if (ret) {
+-		dev_warn(component->dev, "Failed to reguest IRQ %d: %d\n", rt5640->irq, ret);
+-		rt5640->irq = -ENXIO;
++		dev_warn(component->dev, "Failed to request IRQ %d: %d\n", rt5640->irq, ret);
++		rt5640->jack = NULL;
+ 		return;
+ 	}
++	rt5640->irq_requested = true;
+ 
+ 	/* sync initial jack state */
+ 	queue_delayed_work(system_long_wq, &rt5640->jack_work, 0);
+@@ -2802,12 +2799,12 @@ static int rt5640_suspend(struct snd_soc_component *component)
+ {
+ 	struct rt5640_priv *rt5640 = snd_soc_component_get_drvdata(component);
+ 
+-	if (rt5640->irq) {
++	if (rt5640->jack) {
+ 		/* disable jack interrupts during system suspend */
+ 		disable_irq(rt5640->irq);
++		rt5640_cancel_work(rt5640);
+ 	}
+ 
+-	rt5640_cancel_work(rt5640);
+ 	snd_soc_component_force_bias_level(component, SND_SOC_BIAS_OFF);
+ 	rt5640_reset(component);
+ 	regcache_cache_only(rt5640->regmap, true);
+@@ -2830,9 +2827,6 @@ static int rt5640_resume(struct snd_soc_component *component)
+ 	regcache_cache_only(rt5640->regmap, false);
+ 	regcache_sync(rt5640->regmap);
+ 
+-	if (rt5640->irq)
+-		enable_irq(rt5640->irq);
+-
+ 	if (rt5640->jack) {
+ 		if (rt5640->jd_src == RT5640_JD_SRC_HDA_HEADER) {
+ 			snd_soc_component_update_bits(component,
+@@ -2860,6 +2854,7 @@ static int rt5640_resume(struct snd_soc_component *component)
+ 			}
+ 		}
+ 
++		enable_irq(rt5640->irq);
+ 		queue_delayed_work(system_long_wq, &rt5640->jack_work, 0);
+ 	}
+ 
+diff --git a/sound/soc/codecs/wm_adsp.c b/sound/soc/codecs/wm_adsp.c
+index 5a89abfe87846..8c20ff6808941 100644
+--- a/sound/soc/codecs/wm_adsp.c
++++ b/sound/soc/codecs/wm_adsp.c
+@@ -687,7 +687,10 @@ int wm_adsp_write_ctl(struct wm_adsp *dsp, const char *name, int type,
+ 	struct wm_coeff_ctl *ctl;
+ 	int ret;
+ 
++	mutex_lock(&dsp->cs_dsp.pwr_lock);
+ 	ret = cs_dsp_coeff_write_ctrl(cs_ctl, 0, buf, len);
++	mutex_unlock(&dsp->cs_dsp.pwr_lock);
++
+ 	if (ret < 0)
+ 		return ret;
+ 
+@@ -703,8 +706,14 @@ EXPORT_SYMBOL_GPL(wm_adsp_write_ctl);
+ int wm_adsp_read_ctl(struct wm_adsp *dsp, const char *name, int type,
+ 		     unsigned int alg, void *buf, size_t len)
+ {
+-	return cs_dsp_coeff_read_ctrl(cs_dsp_get_ctl(&dsp->cs_dsp, name, type, alg),
+-				      0, buf, len);
++	int ret;
++
++	mutex_lock(&dsp->cs_dsp.pwr_lock);
++	ret = cs_dsp_coeff_read_ctrl(cs_dsp_get_ctl(&dsp->cs_dsp, name, type, alg),
++				     0, buf, len);
++	mutex_unlock(&dsp->cs_dsp.pwr_lock);
++
++	return ret;
+ }
+ EXPORT_SYMBOL_GPL(wm_adsp_read_ctl);
+ 
+diff --git a/sound/soc/fsl/imx-audmix.c b/sound/soc/fsl/imx-audmix.c
+index 0b58df56f4daa..aeb81aa61184f 100644
+--- a/sound/soc/fsl/imx-audmix.c
++++ b/sound/soc/fsl/imx-audmix.c
+@@ -315,7 +315,7 @@ static int imx_audmix_probe(struct platform_device *pdev)
+ 	if (IS_ERR(priv->cpu_mclk)) {
+ 		ret = PTR_ERR(priv->cpu_mclk);
+ 		dev_err(&cpu_pdev->dev, "failed to get DAI mclk1: %d\n", ret);
+-		return -EINVAL;
++		return ret;
+ 	}
+ 
+ 	priv->audmix_pdev = audmix_pdev;
+diff --git a/sound/soc/fsl/imx-pcm-rpmsg.c b/sound/soc/fsl/imx-pcm-rpmsg.c
+index 765dad607bf61..5eef1554a93a1 100644
+--- a/sound/soc/fsl/imx-pcm-rpmsg.c
++++ b/sound/soc/fsl/imx-pcm-rpmsg.c
+@@ -19,6 +19,7 @@
+ static struct snd_pcm_hardware imx_rpmsg_pcm_hardware = {
+ 	.info = SNDRV_PCM_INFO_INTERLEAVED |
+ 		SNDRV_PCM_INFO_BLOCK_TRANSFER |
++		SNDRV_PCM_INFO_BATCH |
+ 		SNDRV_PCM_INFO_MMAP |
+ 		SNDRV_PCM_INFO_MMAP_VALID |
+ 		SNDRV_PCM_INFO_NO_PERIOD_WAKEUP |
+diff --git a/sound/soc/fsl/imx-rpmsg.c b/sound/soc/fsl/imx-rpmsg.c
+index 3c7b95db2eacc..b578f9a32d7f1 100644
+--- a/sound/soc/fsl/imx-rpmsg.c
++++ b/sound/soc/fsl/imx-rpmsg.c
+@@ -89,6 +89,14 @@ static int imx_rpmsg_probe(struct platform_device *pdev)
+ 			    SND_SOC_DAIFMT_NB_NF |
+ 			    SND_SOC_DAIFMT_CBC_CFC;
+ 
++	/*
++	 * i.MX rpmsg sound cards work on codec slave mode. MCLK will be
++	 * disabled by CPU DAI driver in hw_free(). Some codec requires MCLK
++	 * present at power up/down sequence. So need to set ignore_pmdown_time
++	 * to power down codec immediately before MCLK is turned off.
++	 */
++	data->dai.ignore_pmdown_time = 1;
++
+ 	/* Optional codec node */
+ 	ret = of_parse_phandle_with_fixed_args(np, "audio-codec", 0, 0, &args);
+ 	if (ret) {
+diff --git a/sound/soc/intel/avs/boards/hdaudio.c b/sound/soc/intel/avs/boards/hdaudio.c
+index cb00bc86ac949..8876558f19a1b 100644
+--- a/sound/soc/intel/avs/boards/hdaudio.c
++++ b/sound/soc/intel/avs/boards/hdaudio.c
+@@ -55,6 +55,9 @@ static int avs_create_dai_links(struct device *dev, struct hda_codec *codec, int
+ 			return -ENOMEM;
+ 
+ 		dl[i].codecs->name = devm_kstrdup(dev, cname, GFP_KERNEL);
++		if (!dl[i].codecs->name)
++			return -ENOMEM;
++
+ 		dl[i].codecs->dai_name = pcm->name;
+ 		dl[i].num_codecs = 1;
+ 		dl[i].num_cpus = 1;
+diff --git a/sound/soc/meson/axg-spdifin.c b/sound/soc/meson/axg-spdifin.c
+index e2cc4c4be7586..97e81ec4a78ce 100644
+--- a/sound/soc/meson/axg-spdifin.c
++++ b/sound/soc/meson/axg-spdifin.c
+@@ -112,34 +112,6 @@ static int axg_spdifin_prepare(struct snd_pcm_substream *substream,
+ 	return 0;
+ }
+ 
+-static int axg_spdifin_startup(struct snd_pcm_substream *substream,
+-			       struct snd_soc_dai *dai)
+-{
+-	struct axg_spdifin *priv = snd_soc_dai_get_drvdata(dai);
+-	int ret;
+-
+-	ret = clk_prepare_enable(priv->refclk);
+-	if (ret) {
+-		dev_err(dai->dev,
+-			"failed to enable spdifin reference clock\n");
+-		return ret;
+-	}
+-
+-	regmap_update_bits(priv->map, SPDIFIN_CTRL0, SPDIFIN_CTRL0_EN,
+-			   SPDIFIN_CTRL0_EN);
+-
+-	return 0;
+-}
+-
+-static void axg_spdifin_shutdown(struct snd_pcm_substream *substream,
+-				 struct snd_soc_dai *dai)
+-{
+-	struct axg_spdifin *priv = snd_soc_dai_get_drvdata(dai);
+-
+-	regmap_update_bits(priv->map, SPDIFIN_CTRL0, SPDIFIN_CTRL0_EN, 0);
+-	clk_disable_unprepare(priv->refclk);
+-}
+-
+ static void axg_spdifin_write_mode_param(struct regmap *map, int mode,
+ 					 unsigned int val,
+ 					 unsigned int num_per_reg,
+@@ -251,25 +223,38 @@ static int axg_spdifin_dai_probe(struct snd_soc_dai *dai)
+ 	ret = axg_spdifin_sample_mode_config(dai, priv);
+ 	if (ret) {
+ 		dev_err(dai->dev, "mode configuration failed\n");
+-		clk_disable_unprepare(priv->pclk);
+-		return ret;
++		goto pclk_err;
+ 	}
+ 
++	ret = clk_prepare_enable(priv->refclk);
++	if (ret) {
++		dev_err(dai->dev,
++			"failed to enable spdifin reference clock\n");
++		goto pclk_err;
++	}
++
++	regmap_update_bits(priv->map, SPDIFIN_CTRL0, SPDIFIN_CTRL0_EN,
++			   SPDIFIN_CTRL0_EN);
++
+ 	return 0;
++
++pclk_err:
++	clk_disable_unprepare(priv->pclk);
++	return ret;
+ }
+ 
+ static int axg_spdifin_dai_remove(struct snd_soc_dai *dai)
+ {
+ 	struct axg_spdifin *priv = snd_soc_dai_get_drvdata(dai);
+ 
++	regmap_update_bits(priv->map, SPDIFIN_CTRL0, SPDIFIN_CTRL0_EN, 0);
++	clk_disable_unprepare(priv->refclk);
+ 	clk_disable_unprepare(priv->pclk);
+ 	return 0;
+ }
+ 
+ static const struct snd_soc_dai_ops axg_spdifin_ops = {
+ 	.prepare	= axg_spdifin_prepare,
+-	.startup	= axg_spdifin_startup,
+-	.shutdown	= axg_spdifin_shutdown,
+ };
+ 
+ static int axg_spdifin_iec958_info(struct snd_kcontrol *kcontrol,
+diff --git a/sound/soc/sof/core.c b/sound/soc/sof/core.c
+index 30db685cc5f4b..2d1616b81485c 100644
+--- a/sound/soc/sof/core.c
++++ b/sound/soc/sof/core.c
+@@ -486,10 +486,9 @@ int snd_sof_device_remove(struct device *dev)
+ 		snd_sof_ipc_free(sdev);
+ 		snd_sof_free_debug(sdev);
+ 		snd_sof_remove(sdev);
++		sof_ops_free(sdev);
+ 	}
+ 
+-	sof_ops_free(sdev);
+-
+ 	/* release firmware */
+ 	snd_sof_fw_unload(sdev);
+ 
+diff --git a/sound/soc/sof/intel/mtl.c b/sound/soc/sof/intel/mtl.c
+index 30fe77fd87bf8..79e9a7ed8feaa 100644
+--- a/sound/soc/sof/intel/mtl.c
++++ b/sound/soc/sof/intel/mtl.c
+@@ -460,7 +460,7 @@ int mtl_dsp_cl_init(struct snd_sof_dev *sdev, int stream_tag, bool imr_boot)
+ 	/* step 3: wait for IPC DONE bit from ROM */
+ 	ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR, chip->ipc_ack, status,
+ 					    ((status & chip->ipc_ack_mask) == chip->ipc_ack_mask),
+-					    HDA_DSP_REG_POLL_INTERVAL_US, MTL_DSP_PURGE_TIMEOUT_US);
++					    HDA_DSP_REG_POLL_INTERVAL_US, HDA_DSP_INIT_TIMEOUT_US);
+ 	if (ret < 0) {
+ 		if (hda->boot_iteration == HDA_FW_BOOT_ATTEMPTS)
+ 			dev_err(sdev->dev, "timeout waiting for purge IPC done\n");
+diff --git a/sound/soc/sof/intel/mtl.h b/sound/soc/sof/intel/mtl.h
+index 2794fe6e81396..9a0b8b9d8a0c9 100644
+--- a/sound/soc/sof/intel/mtl.h
++++ b/sound/soc/sof/intel/mtl.h
+@@ -62,7 +62,6 @@
+ #define MTL_DSP_IRQSTS_IPC		BIT(0)
+ #define MTL_DSP_IRQSTS_SDW		BIT(6)
+ 
+-#define MTL_DSP_PURGE_TIMEOUT_US	20000000 /* 20s */
+ #define MTL_DSP_REG_POLL_INTERVAL_US	10	/* 10 us */
+ 
+ /* Memory windows */
+diff --git a/sound/soc/sof/ipc4-topology.c b/sound/soc/sof/ipc4-topology.c
+index 11361e1cd6881..8fb6582e568e7 100644
+--- a/sound/soc/sof/ipc4-topology.c
++++ b/sound/soc/sof/ipc4-topology.c
+@@ -218,7 +218,7 @@ static int sof_ipc4_get_audio_fmt(struct snd_soc_component *scomp,
+ 
+ 	ret = sof_update_ipc_object(scomp, available_fmt,
+ 				    SOF_AUDIO_FMT_NUM_TOKENS, swidget->tuples,
+-				    swidget->num_tuples, sizeof(available_fmt), 1);
++				    swidget->num_tuples, sizeof(*available_fmt), 1);
+ 	if (ret) {
+ 		dev_err(scomp->dev, "Failed to parse audio format token count\n");
+ 		return ret;
+diff --git a/sound/soc/sof/sof-audio.c b/sound/soc/sof/sof-audio.c
+index e7ef77012c358..e5405f854a910 100644
+--- a/sound/soc/sof/sof-audio.c
++++ b/sound/soc/sof/sof-audio.c
+@@ -212,7 +212,8 @@ widget_free:
+ 	sof_widget_free_unlocked(sdev, swidget);
+ 	use_count_decremented = true;
+ core_put:
+-	snd_sof_dsp_core_put(sdev, swidget->core);
++	if (!use_count_decremented)
++		snd_sof_dsp_core_put(sdev, swidget->core);
+ pipe_widget_free:
+ 	if (swidget->id != snd_soc_dapm_scheduler)
+ 		sof_widget_free_unlocked(sdev, swidget->spipe->pipe_widget);
+diff --git a/tools/include/linux/btf_ids.h b/tools/include/linux/btf_ids.h
+index 71e54b1e37964..2f882d5cb30f5 100644
+--- a/tools/include/linux/btf_ids.h
++++ b/tools/include/linux/btf_ids.h
+@@ -38,7 +38,7 @@ asm(							\
+ 	____BTF_ID(symbol)
+ 
+ #define __ID(prefix) \
+-	__PASTE(prefix, __COUNTER__)
++	__PASTE(__PASTE(prefix, __COUNTER__), __LINE__)
+ 
+ /*
+  * The BTF_ID defines unique symbol for each ID pointing
+diff --git a/tools/include/linux/mm.h b/tools/include/linux/mm.h
+index a03d9bba51514..f3c82ab5b14cd 100644
+--- a/tools/include/linux/mm.h
++++ b/tools/include/linux/mm.h
+@@ -11,8 +11,6 @@
+ 
+ #define PHYS_ADDR_MAX	(~(phys_addr_t)0)
+ 
+-#define __ALIGN_KERNEL(x, a)		__ALIGN_KERNEL_MASK(x, (typeof(x))(a) - 1)
+-#define __ALIGN_KERNEL_MASK(x, mask)	(((x) + (mask)) & ~(mask))
+ #define ALIGN(x, a)			__ALIGN_KERNEL((x), (a))
+ #define ALIGN_DOWN(x, a)		__ALIGN_KERNEL((x) - ((a) - 1), (a))
+ 
+@@ -29,7 +27,7 @@ static inline void *phys_to_virt(unsigned long address)
+ 	return __va(address);
+ }
+ 
+-void reserve_bootmem_region(phys_addr_t start, phys_addr_t end);
++void reserve_bootmem_region(phys_addr_t start, phys_addr_t end, int nid);
+ 
+ static inline void totalram_pages_inc(void)
+ {
+diff --git a/tools/include/linux/seq_file.h b/tools/include/linux/seq_file.h
+index 102fd9217f1f9..f6bc226af0c1d 100644
+--- a/tools/include/linux/seq_file.h
++++ b/tools/include/linux/seq_file.h
+@@ -1,4 +1,6 @@
+ #ifndef _TOOLS_INCLUDE_LINUX_SEQ_FILE_H
+ #define _TOOLS_INCLUDE_LINUX_SEQ_FILE_H
+ 
++struct seq_file;
++
+ #endif /* _TOOLS_INCLUDE_LINUX_SEQ_FILE_H */
+diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
+index 60a9d59beeabb..25f668165b567 100644
+--- a/tools/include/uapi/linux/bpf.h
++++ b/tools/include/uapi/linux/bpf.h
+@@ -1897,7 +1897,9 @@ union bpf_attr {
+  * 		performed again, if the helper is used in combination with
+  * 		direct packet access.
+  * 	Return
+- * 		0 on success, or a negative error in case of failure.
++ * 		0 on success, or a negative error in case of failure. Positive
++ * 		error indicates a potential drop or congestion in the target
++ * 		device. The particular positive error codes are not defined.
+  *
+  * u64 bpf_get_current_pid_tgid(void)
+  * 	Description
+diff --git a/tools/objtool/check.c b/tools/objtool/check.c
+index 1384090530dbe..e308d1ba664ef 100644
+--- a/tools/objtool/check.c
++++ b/tools/objtool/check.c
+@@ -4333,7 +4333,8 @@ static int validate_ibt_insn(struct objtool_file *file, struct instruction *insn
+ 			continue;
+ 		}
+ 
+-		if (insn_func(dest) && insn_func(dest) == insn_func(insn)) {
++		if (insn_func(dest) && insn_func(insn) &&
++		    insn_func(dest)->pfunc == insn_func(insn)->pfunc) {
+ 			/*
+ 			 * Anything from->to self is either _THIS_IP_ or
+ 			 * IRET-to-self.
+diff --git a/tools/perf/util/Build b/tools/perf/util/Build
+index 96f4ea1d45c56..9c6c4475524b9 100644
+--- a/tools/perf/util/Build
++++ b/tools/perf/util/Build
+@@ -301,6 +301,12 @@ ifeq ($(BISON_GE_35),1)
+ else
+   bison_flags += -w
+ endif
++
++BISON_LT_381 := $(shell expr $(shell $(BISON) --version | grep bison | sed -e 's/.\+ \([0-9]\+\).\([0-9]\+\).\([0-9]\+\)/\1\2\3/g') \< 381)
++ifeq ($(BISON_LT_381),1)
++  bison_flags += -DYYNOMEM=YYABORT
++endif
++
+ CFLAGS_parse-events-bison.o += $(bison_flags)
+ CFLAGS_pmu-bison.o          += -DYYLTYPE_IS_TRIVIAL=0 $(bison_flags)
+ CFLAGS_expr-bison.o         += -DYYLTYPE_IS_TRIVIAL=0 $(bison_flags)
+diff --git a/tools/testing/memblock/internal.h b/tools/testing/memblock/internal.h
+index fdb7f5db73082..f6c6e5474c3af 100644
+--- a/tools/testing/memblock/internal.h
++++ b/tools/testing/memblock/internal.h
+@@ -20,4 +20,8 @@ void memblock_free_pages(struct page *page, unsigned long pfn,
+ {
+ }
+ 
++static inline void accept_memory(phys_addr_t start, phys_addr_t end)
++{
++}
++
+ #endif
+diff --git a/tools/testing/memblock/mmzone.c b/tools/testing/memblock/mmzone.c
+index 7b0909e8b759d..d3d58851864e7 100644
+--- a/tools/testing/memblock/mmzone.c
++++ b/tools/testing/memblock/mmzone.c
+@@ -11,7 +11,7 @@ struct pglist_data *next_online_pgdat(struct pglist_data *pgdat)
+ 	return NULL;
+ }
+ 
+-void reserve_bootmem_region(phys_addr_t start, phys_addr_t end)
++void reserve_bootmem_region(phys_addr_t start, phys_addr_t end, int nid)
+ {
+ }
+ 
+diff --git a/tools/testing/memblock/tests/basic_api.c b/tools/testing/memblock/tests/basic_api.c
+index 411647094cc37..57bf2688edfd6 100644
+--- a/tools/testing/memblock/tests/basic_api.c
++++ b/tools/testing/memblock/tests/basic_api.c
+@@ -1,7 +1,7 @@
+ // SPDX-License-Identifier: GPL-2.0-or-later
++#include "basic_api.h"
+ #include <string.h>
+ #include <linux/memblock.h>
+-#include "basic_api.h"
+ 
+ #define EXPECTED_MEMBLOCK_REGIONS			128
+ #define FUNC_ADD					"memblock_add"
+diff --git a/tools/testing/memblock/tests/common.h b/tools/testing/memblock/tests/common.h
+index 4f23302ee6779..b5ec59aa62d72 100644
+--- a/tools/testing/memblock/tests/common.h
++++ b/tools/testing/memblock/tests/common.h
+@@ -5,6 +5,7 @@
+ #include <stdlib.h>
+ #include <assert.h>
+ #include <linux/types.h>
++#include <linux/seq_file.h>
+ #include <linux/memblock.h>
+ #include <linux/sizes.h>
+ #include <linux/printk.h>
+diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c
+index 31f1c935cd07d..98107e0452d33 100644
+--- a/tools/testing/selftests/bpf/test_verifier.c
++++ b/tools/testing/selftests/bpf/test_verifier.c
+@@ -1880,7 +1880,7 @@ int main(int argc, char **argv)
+ 		}
+ 	}
+ 
+-	get_unpriv_disabled();
++	unpriv_disabled = get_unpriv_disabled();
+ 	if (unpriv && unpriv_disabled) {
+ 		printf("Cannot run as unprivileged user with sysctl %s.\n",
+ 		       UNPRIV_SYSCTL);
+diff --git a/tools/testing/selftests/ftrace/test.d/instances/instance-event.tc b/tools/testing/selftests/ftrace/test.d/instances/instance-event.tc
+index 0eb47fbb3f44d..42422e4251078 100644
+--- a/tools/testing/selftests/ftrace/test.d/instances/instance-event.tc
++++ b/tools/testing/selftests/ftrace/test.d/instances/instance-event.tc
+@@ -39,7 +39,7 @@ instance_read() {
+ 
+ instance_set() {
+         while :; do
+-                echo 1 > foo/events/sched/sched_switch
++                echo 1 > foo/events/sched/sched_switch/enable
+         done 2> /dev/null
+ }
+ 
+diff --git a/tools/testing/selftests/kselftest_deps.sh b/tools/testing/selftests/kselftest_deps.sh
+index 4bc14d9e8ff1d..de59cc8f03c3f 100755
+--- a/tools/testing/selftests/kselftest_deps.sh
++++ b/tools/testing/selftests/kselftest_deps.sh
+@@ -46,11 +46,11 @@ fi
+ print_targets=0
+ 
+ while getopts "p" arg; do
+-    case $arg in
+-        p)
++	case $arg in
++		p)
+ 		print_targets=1
+ 	shift;;
+-    esac
++	esac
+ done
+ 
+ if [ $# -eq 0 ]
+@@ -92,6 +92,10 @@ pass_cnt=0
+ # Get all TARGETS from selftests Makefile
+ targets=$(grep -E "^TARGETS +|^TARGETS =" Makefile | cut -d "=" -f2)
+ 
++# Initially, in LDLIBS related lines, the dep checker needs
++# to ignore lines containing the following strings:
++filter="\$(VAR_LDLIBS)\|pkg-config\|PKG_CONFIG\|IOURING_EXTRA_LIBS"
++
+ # Single test case
+ if [ $# -eq 2 ]
+ then
+@@ -100,6 +104,8 @@ then
+ 	l1_test $test
+ 	l2_test $test
+ 	l3_test $test
++	l4_test $test
++	l5_test $test
+ 
+ 	print_results $1 $2
+ 	exit $?
+@@ -113,7 +119,7 @@ fi
+ # Append space at the end of the list to append more tests.
+ 
+ l1_tests=$(grep -r --include=Makefile "^LDLIBS" | \
+-		grep -v "VAR_LDLIBS" | awk -F: '{print $1}')
++		grep -v "$filter" | awk -F: '{print $1}' | uniq)
+ 
+ # Level 2: LDLIBS set dynamically.
+ #
+@@ -126,7 +132,7 @@ l1_tests=$(grep -r --include=Makefile "^LDLIBS" | \
+ # Append space at the end of the list to append more tests.
+ 
+ l2_tests=$(grep -r --include=Makefile ": LDLIBS" | \
+-		grep -v "VAR_LDLIBS" | awk -F: '{print $1}')
++		grep -v "$filter" | awk -F: '{print $1}' | uniq)
+ 
+ # Level 3
+ # memfd and others use pkg-config to find mount and fuse libs
+@@ -138,11 +144,32 @@ l2_tests=$(grep -r --include=Makefile ": LDLIBS" | \
+ #	VAR_LDLIBS := $(shell pkg-config fuse --libs 2>/dev/null)
+ 
+ l3_tests=$(grep -r --include=Makefile "^VAR_LDLIBS" | \
+-		grep -v "pkg-config" | awk -F: '{print $1}')
++		grep -v "pkg-config\|PKG_CONFIG" | awk -F: '{print $1}' | uniq)
+ 
+-#echo $l1_tests
+-#echo $l2_1_tests
+-#echo $l3_tests
++# Level 4
++# some tests may fall back to default using `|| echo -l<libname>`
++# if pkg-config doesn't find the libs, instead of using VAR_LDLIBS
++# as per level 3 checks.
++# e.g:
++# netfilter/Makefile
++#	LDLIBS += $(shell $(HOSTPKG_CONFIG) --libs libmnl 2>/dev/null || echo -lmnl)
++l4_tests=$(grep -r --include=Makefile "^LDLIBS" | \
++		grep "pkg-config\|PKG_CONFIG" | awk -F: '{print $1}' | uniq)
++
++# Level 5
++# some tests may use IOURING_EXTRA_LIBS to add extra libs to LDLIBS,
++# which in turn may be defined in a sub-Makefile
++# e.g.:
++# mm/Makefile
++#	$(OUTPUT)/gup_longterm: LDLIBS += $(IOURING_EXTRA_LIBS)
++l5_tests=$(grep -r --include=Makefile "LDLIBS +=.*\$(IOURING_EXTRA_LIBS)" | \
++	awk -F: '{print $1}' | uniq)
++
++#echo l1_tests $l1_tests
++#echo l2_tests $l2_tests
++#echo l3_tests $l3_tests
++#echo l4_tests $l4_tests
++#echo l5_tests $l5_tests
+ 
+ all_tests
+ print_results $1 $2
+@@ -164,24 +191,32 @@ all_tests()
+ 	for test in $l3_tests; do
+ 		l3_test $test
+ 	done
++
++	for test in $l4_tests; do
++		l4_test $test
++	done
++
++	for test in $l5_tests; do
++		l5_test $test
++	done
+ }
+ 
+ # Use same parsing used for l1_tests and pick libraries this time.
+ l1_test()
+ {
+ 	test_libs=$(grep --include=Makefile "^LDLIBS" $test | \
+-			grep -v "VAR_LDLIBS" | \
++			grep -v "$filter" | \
+ 			sed -e 's/\:/ /' | \
+ 			sed -e 's/+/ /' | cut -d "=" -f 2)
+ 
+ 	check_libs $test $test_libs
+ }
+ 
+-# Use same parsing used for l2__tests and pick libraries this time.
++# Use same parsing used for l2_tests and pick libraries this time.
+ l2_test()
+ {
+ 	test_libs=$(grep --include=Makefile ": LDLIBS" $test | \
+-			grep -v "VAR_LDLIBS" | \
++			grep -v "$filter" | \
+ 			sed -e 's/\:/ /' | sed -e 's/+/ /' | \
+ 			cut -d "=" -f 2)
+ 
+@@ -197,6 +232,24 @@ l3_test()
+ 	check_libs $test $test_libs
+ }
+ 
++l4_test()
++{
++	test_libs=$(grep --include=Makefile "^VAR_LDLIBS\|^LDLIBS" $test | \
++			grep "\(pkg-config\|PKG_CONFIG\).*|| echo " | \
++			sed -e 's/.*|| echo //' | sed -e 's/)$//')
++
++	check_libs $test $test_libs
++}
++
++l5_test()
++{
++	tests=$(find $(dirname "$test") -type f -name "*.mk")
++	test_libs=$(grep "^IOURING_EXTRA_LIBS +\?=" $tests | \
++			cut -d "=" -f 2)
++
++	check_libs $test $test_libs
++}
++
+ check_libs()
+ {
+ 
+diff --git a/tools/testing/selftests/mm/charge_reserved_hugetlb.sh b/tools/testing/selftests/mm/charge_reserved_hugetlb.sh
+index a5cb4b09a46c4..0899019a7fcb4 100755
+--- a/tools/testing/selftests/mm/charge_reserved_hugetlb.sh
++++ b/tools/testing/selftests/mm/charge_reserved_hugetlb.sh
+@@ -25,7 +25,7 @@ if [[ "$1" == "-cgroup-v2" ]]; then
+ fi
+ 
+ if [[ $cgroup2 ]]; then
+-  cgroup_path=$(mount -t cgroup2 | head -1 | awk -e '{print $3}')
++  cgroup_path=$(mount -t cgroup2 | head -1 | awk '{print $3}')
+   if [[ -z "$cgroup_path" ]]; then
+     cgroup_path=/dev/cgroup/memory
+     mount -t cgroup2 none $cgroup_path
+@@ -33,7 +33,7 @@ if [[ $cgroup2 ]]; then
+   fi
+   echo "+hugetlb" >$cgroup_path/cgroup.subtree_control
+ else
+-  cgroup_path=$(mount -t cgroup | grep ",hugetlb" | awk -e '{print $3}')
++  cgroup_path=$(mount -t cgroup | grep ",hugetlb" | awk '{print $3}')
+   if [[ -z "$cgroup_path" ]]; then
+     cgroup_path=/dev/cgroup/memory
+     mount -t cgroup memory,hugetlb $cgroup_path
+diff --git a/tools/testing/selftests/mm/hugetlb_reparenting_test.sh b/tools/testing/selftests/mm/hugetlb_reparenting_test.sh
+index bf2d2a684edfd..14d26075c8635 100755
+--- a/tools/testing/selftests/mm/hugetlb_reparenting_test.sh
++++ b/tools/testing/selftests/mm/hugetlb_reparenting_test.sh
+@@ -20,7 +20,7 @@ fi
+ 
+ 
+ if [[ $cgroup2 ]]; then
+-  CGROUP_ROOT=$(mount -t cgroup2 | head -1 | awk -e '{print $3}')
++  CGROUP_ROOT=$(mount -t cgroup2 | head -1 | awk '{print $3}')
+   if [[ -z "$CGROUP_ROOT" ]]; then
+     CGROUP_ROOT=/dev/cgroup/memory
+     mount -t cgroup2 none $CGROUP_ROOT
+@@ -28,7 +28,7 @@ if [[ $cgroup2 ]]; then
+   fi
+   echo "+hugetlb +memory" >$CGROUP_ROOT/cgroup.subtree_control
+ else
+-  CGROUP_ROOT=$(mount -t cgroup | grep ",hugetlb" | awk -e '{print $3}')
++  CGROUP_ROOT=$(mount -t cgroup | grep ",hugetlb" | awk '{print $3}')
+   if [[ -z "$CGROUP_ROOT" ]]; then
+     CGROUP_ROOT=/dev/cgroup/memory
+     mount -t cgroup memory,hugetlb $CGROUP_ROOT
+diff --git a/tools/testing/selftests/net/tls.c b/tools/testing/selftests/net/tls.c
+index a3c57004344c6..6ec8b8335bdbf 100644
+--- a/tools/testing/selftests/net/tls.c
++++ b/tools/testing/selftests/net/tls.c
+@@ -552,11 +552,11 @@ TEST_F(tls, sendmsg_large)
+ 
+ 		msg.msg_iov = &vec;
+ 		msg.msg_iovlen = 1;
+-		EXPECT_EQ(sendmsg(self->cfd, &msg, 0), send_len);
++		EXPECT_EQ(sendmsg(self->fd, &msg, 0), send_len);
+ 	}
+ 
+ 	while (recvs++ < sends) {
+-		EXPECT_NE(recv(self->fd, mem, send_len, 0), -1);
++		EXPECT_NE(recv(self->cfd, mem, send_len, 0), -1);
+ 	}
+ 
+ 	free(mem);
+@@ -585,9 +585,9 @@ TEST_F(tls, sendmsg_multiple)
+ 	msg.msg_iov = vec;
+ 	msg.msg_iovlen = iov_len;
+ 
+-	EXPECT_EQ(sendmsg(self->cfd, &msg, 0), total_len);
++	EXPECT_EQ(sendmsg(self->fd, &msg, 0), total_len);
+ 	buf = malloc(total_len);
+-	EXPECT_NE(recv(self->fd, buf, total_len, 0), -1);
++	EXPECT_NE(recv(self->cfd, buf, total_len, 0), -1);
+ 	for (i = 0; i < iov_len; i++) {
+ 		EXPECT_EQ(memcmp(test_strs[i], buf + len_cmp,
+ 				 strlen(test_strs[i])),
+diff --git a/tools/testing/selftests/powerpc/Makefile b/tools/testing/selftests/powerpc/Makefile
+index 49f2ad1793fd9..7ea42fa02eabd 100644
+--- a/tools/testing/selftests/powerpc/Makefile
++++ b/tools/testing/selftests/powerpc/Makefile
+@@ -59,12 +59,11 @@ override define INSTALL_RULE
+ 	done;
+ endef
+ 
+-override define EMIT_TESTS
++emit_tests:
+ 	+@for TARGET in $(SUB_DIRS); do \
+ 		BUILD_TARGET=$(OUTPUT)/$$TARGET;	\
+-		$(MAKE) OUTPUT=$$BUILD_TARGET -s -C $$TARGET emit_tests;\
++		$(MAKE) OUTPUT=$$BUILD_TARGET -s -C $$TARGET $@;\
+ 	done;
+-endef
+ 
+ override define CLEAN
+ 	+@for TARGET in $(SUB_DIRS); do \
+@@ -77,4 +76,4 @@ endef
+ tags:
+ 	find . -name '*.c' -o -name '*.h' | xargs ctags
+ 
+-.PHONY: tags $(SUB_DIRS)
++.PHONY: tags $(SUB_DIRS) emit_tests
+diff --git a/tools/testing/selftests/powerpc/pmu/Makefile b/tools/testing/selftests/powerpc/pmu/Makefile
+index 2b95e44d20ff9..a284fa874a9f1 100644
+--- a/tools/testing/selftests/powerpc/pmu/Makefile
++++ b/tools/testing/selftests/powerpc/pmu/Makefile
+@@ -30,13 +30,14 @@ override define RUN_TESTS
+ 	+TARGET=event_code_tests; BUILD_TARGET=$$OUTPUT/$$TARGET; $(MAKE) OUTPUT=$$BUILD_TARGET -C $$TARGET run_tests
+ endef
+ 
+-DEFAULT_EMIT_TESTS := $(EMIT_TESTS)
+-override define EMIT_TESTS
+-	$(DEFAULT_EMIT_TESTS)
++emit_tests:
++	for TEST in $(TEST_GEN_PROGS); do \
++		BASENAME_TEST=`basename $$TEST`;	\
++		echo "$(COLLECTION):$$BASENAME_TEST";	\
++	done
+ 	+TARGET=ebb; BUILD_TARGET=$$OUTPUT/$$TARGET; $(MAKE) OUTPUT=$$BUILD_TARGET -s -C $$TARGET emit_tests
+ 	+TARGET=sampling_tests; BUILD_TARGET=$$OUTPUT/$$TARGET; $(MAKE) OUTPUT=$$BUILD_TARGET -s -C $$TARGET emit_tests
+ 	+TARGET=event_code_tests; BUILD_TARGET=$$OUTPUT/$$TARGET; $(MAKE) OUTPUT=$$BUILD_TARGET -s -C $$TARGET emit_tests
+-endef
+ 
+ DEFAULT_INSTALL_RULE := $(INSTALL_RULE)
+ override define INSTALL_RULE
+@@ -64,4 +65,4 @@ sampling_tests:
+ event_code_tests:
+ 	TARGET=$@; BUILD_TARGET=$$OUTPUT/$$TARGET; mkdir -p $$BUILD_TARGET; $(MAKE) OUTPUT=$$BUILD_TARGET -k -C $$TARGET all
+ 
+-.PHONY: all run_tests ebb sampling_tests event_code_tests
++.PHONY: all run_tests ebb sampling_tests event_code_tests emit_tests


^ permalink raw reply related	[flat|nested] 24+ messages in thread
* [gentoo-commits] proj/linux-patches:6.5 commit in: /
@ 2023-10-05 14:07 Mike Pagano
  0 siblings, 0 replies; 24+ messages in thread
From: Mike Pagano @ 2023-10-05 14:07 UTC (permalink / raw
  To: gentoo-commits

commit:     d33625c328ad5ceea02a40310cf7b849022ef7bf
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Thu Oct  5 14:04:01 2023 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Thu Oct  5 14:07:47 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=d33625c3

select BLK_DEV_BSG if SCSI as it depends on it.

Thanks, Ancient.

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 4567_distro-Gentoo-Kconfig.patch | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/4567_distro-Gentoo-Kconfig.patch b/4567_distro-Gentoo-Kconfig.patch
index bd7b76ca..d215166c 100644
--- a/4567_distro-Gentoo-Kconfig.patch
+++ b/4567_distro-Gentoo-Kconfig.patch
@@ -122,7 +122,7 @@
 +	depends on GENTOO_LINUX && GENTOO_LINUX_UDEV
 +
 +	select AUTOFS_FS
-+	select BLK_DEV_BSG
++	select BLK_DEV_BSG if SCSI
 +	select BPF_SYSCALL
 +	select CGROUP_BPF
 +	select CGROUPS


^ permalink raw reply related	[flat|nested] 24+ messages in thread
* [gentoo-commits] proj/linux-patches:6.5 commit in: /
@ 2023-09-23 11:08 Mike Pagano
  0 siblings, 0 replies; 24+ messages in thread
From: Mike Pagano @ 2023-09-23 11:08 UTC (permalink / raw
  To: gentoo-commits

commit:     6cbe2eab5cac5d19329f52e8d63c493e90d05a2c
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sat Sep 23 11:08:14 2023 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sat Sep 23 11:08:14 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=6cbe2eab

Actually remove the redundant patch this time

Removed:
1515_selinux-fix-handling-of-empty-opts.patch

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 1515_selinux-fix-handling-of-empty-opts.patch | 51 ---------------------------
 1 file changed, 51 deletions(-)

diff --git a/1515_selinux-fix-handling-of-empty-opts.patch b/1515_selinux-fix-handling-of-empty-opts.patch
deleted file mode 100644
index 10336ec5..00000000
--- a/1515_selinux-fix-handling-of-empty-opts.patch
+++ /dev/null
@@ -1,51 +0,0 @@
-selinux: fix handling of empty opts in selinux_fs_context_submount()
-
-selinux_set_mnt_opts() relies on the fact that the mount options pointer
-is always NULL when all options are unset (specifically in its
-!selinux_initialized() branch. However, the new
-selinux_fs_context_submount() hook breaks this rule by allocating a new
-structure even if no options are set. That causes any submount created
-before a SELinux policy is loaded to be rejected in
-selinux_set_mnt_opts().
-
-Fix this by making selinux_fs_context_submount() leave fc->security
-set to NULL when there are no options to be copied from the reference
-superblock.
-
-Reported-by: Adam Williamson <awilliam@xxxxxxxxxx>
-Link: https://bugzilla.redhat.com/show_bug.cgi?id=2236345
-Fixes: d80a8f1b58c2 ("vfs, security: Fix automount superblock LSM init problem, preventing NFS sb sharing")
-Signed-off-by: Ondrej Mosnacek <omosnace@xxxxxxxxxx>
----
- security/selinux/hooks.c | 10 ++++++++--
- 1 file changed, 8 insertions(+), 2 deletions(-)
-
-diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
-index 10350534de6d6..2aa0e219d7217 100644
---- a/security/selinux/hooks.c
-+++ b/security/selinux/hooks.c
-@@ -2775,14 +2775,20 @@ static int selinux_umount(struct vfsmount *mnt, int flags)
- static int selinux_fs_context_submount(struct fs_context *fc,
- 				   struct super_block *reference)
- {
--	const struct superblock_security_struct *sbsec;
-+	const struct superblock_security_struct *sbsec = selinux_superblock(reference);
- 	struct selinux_mnt_opts *opts;
- 
-+	/*
-+	 * Ensure that fc->security remains NULL when no options are set
-+	 * as expected by selinux_set_mnt_opts().
-+	 */
-+	if (!(sbsec->flags & (FSCONTEXT_MNT|CONTEXT_MNT|DEFCONTEXT_MNT)))
-+		return 0;
-+
- 	opts = kzalloc(sizeof(*opts), GFP_KERNEL);
- 	if (!opts)
- 		return -ENOMEM;
- 
--	sbsec = selinux_superblock(reference);
- 	if (sbsec->flags & FSCONTEXT_MNT)
- 		opts->fscontext_sid = sbsec->sid;
- 	if (sbsec->flags & CONTEXT_MNT)
--- 
-2.41.0


^ permalink raw reply related	[flat|nested] 24+ messages in thread
* [gentoo-commits] proj/linux-patches:6.5 commit in: /
@ 2023-09-23 11:06 Mike Pagano
  0 siblings, 0 replies; 24+ messages in thread
From: Mike Pagano @ 2023-09-23 11:06 UTC (permalink / raw
  To: gentoo-commits

commit:     2ee3cdde869666f49ee1b4aad3035e4bec2c8cb5
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sat Sep 23 11:06:24 2023 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sat Sep 23 11:06:24 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=2ee3cdde

Remove redundant patch

Removed:
1515_selinux-fix-handling-of-empty-opts.patch

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README | 4 ----
 1 file changed, 4 deletions(-)

diff --git a/0000_README b/0000_README
index a2ae7329..46cf8e96 100644
--- a/0000_README
+++ b/0000_README
@@ -71,10 +71,6 @@ Patch:  1510_fs-enable-link-security-restrictions-by-default.patch
 From:   http://sources.debian.net/src/linux/3.16.7-ckt4-3/debian/patches/debian/fs-enable-link-security-restrictions-by-default.patch/
 Desc:   Enable link security restrictions by default.
 
-Patch:  1515_selinux-fix-handling-of-empty-opts.patch
-From:   https://www.spinics.net/lists/linux-fsdevel/msg249428.html
-Desc:   selinux: fix handling of empty opts in selinux_fs_context_submount()
-
 Patch:  1700_sparc-address-warray-bound-warnings.patch
 From:		https://github.com/KSPP/linux/issues/109
 Desc:		Address -Warray-bounds warnings 


^ permalink raw reply related	[flat|nested] 24+ messages in thread
* [gentoo-commits] proj/linux-patches:6.5 commit in: /
@ 2023-09-23 10:15 Mike Pagano
  0 siblings, 0 replies; 24+ messages in thread
From: Mike Pagano @ 2023-09-23 10:15 UTC (permalink / raw
  To: gentoo-commits

commit:     f9b249413e10502317b5e0cae0b002ed24491cd5
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sat Sep 23 10:15:18 2023 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sat Sep 23 10:15:18 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=f9b24941

Linux patch 6.5.5

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README            |    4 +
 1004_linux-6.5.5.patch | 9194 ++++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 9198 insertions(+)

diff --git a/0000_README b/0000_README
index 37ce8d41..a2ae7329 100644
--- a/0000_README
+++ b/0000_README
@@ -59,6 +59,10 @@ Patch:  1003_linux-6.5.4.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.5.4
 
+Patch:  1004_linux-6.5.5.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.5.5
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1004_linux-6.5.5.patch b/1004_linux-6.5.5.patch
new file mode 100644
index 00000000..3ac002d7
--- /dev/null
+++ b/1004_linux-6.5.5.patch
@@ -0,0 +1,9194 @@
+diff --git a/Documentation/admin-guide/cgroup-v1/memory.rst b/Documentation/admin-guide/cgroup-v1/memory.rst
+index 8d3afeede10e4..fabaad3fd9c21 100644
+--- a/Documentation/admin-guide/cgroup-v1/memory.rst
++++ b/Documentation/admin-guide/cgroup-v1/memory.rst
+@@ -92,6 +92,8 @@ Brief summary of control files.
+  memory.oom_control		     set/show oom controls.
+  memory.numa_stat		     show the number of memory usage per numa
+ 				     node
++ memory.kmem.limit_in_bytes          This knob is deprecated and writing to
++                                     it will return -ENOTSUPP.
+  memory.kmem.usage_in_bytes          show current kernel memory allocation
+  memory.kmem.failcnt                 show the number of kernel memory usage
+ 				     hits limits
+diff --git a/Documentation/arch/arm64/silicon-errata.rst b/Documentation/arch/arm64/silicon-errata.rst
+index bedd3a1d7b423..0ac452333eb4f 100644
+--- a/Documentation/arch/arm64/silicon-errata.rst
++++ b/Documentation/arch/arm64/silicon-errata.rst
+@@ -198,6 +198,9 @@ stable kernels.
+ +----------------+-----------------+-----------------+-----------------------------+
+ | Hisilicon      | Hip08 SMMU PMCG | #162001800      | N/A                         |
+ +----------------+-----------------+-----------------+-----------------------------+
++| Hisilicon      | Hip08 SMMU PMCG | #162001900      | N/A                         |
++|                | Hip09 SMMU PMCG |                 |                             |
+++----------------+-----------------+-----------------+-----------------------------+
+ +----------------+-----------------+-----------------+-----------------------------+
+ | Qualcomm Tech. | Kryo/Falkor v1  | E1003           | QCOM_FALKOR_ERRATUM_1003    |
+ +----------------+-----------------+-----------------+-----------------------------+
+diff --git a/Makefile b/Makefile
+index beddccac32831..7545d2b0e7b71 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 5
+-SUBLEVEL = 4
++SUBLEVEL = 5
+ EXTRAVERSION =
+ NAME = Hurr durr I'ma ninja sloth
+ 
+diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c
+index 054e9199f30db..dc0fb7a813715 100644
+--- a/arch/arm/kernel/hw_breakpoint.c
++++ b/arch/arm/kernel/hw_breakpoint.c
+@@ -626,7 +626,7 @@ int hw_breakpoint_arch_parse(struct perf_event *bp,
+ 	hw->address &= ~alignment_mask;
+ 	hw->ctrl.len <<= offset;
+ 
+-	if (is_default_overflow_handler(bp)) {
++	if (uses_default_overflow_handler(bp)) {
+ 		/*
+ 		 * Mismatch breakpoints are required for single-stepping
+ 		 * breakpoints.
+@@ -798,7 +798,7 @@ static void watchpoint_handler(unsigned long addr, unsigned int fsr,
+ 		 * Otherwise, insert a temporary mismatch breakpoint so that
+ 		 * we can single-step over the watchpoint trigger.
+ 		 */
+-		if (!is_default_overflow_handler(wp))
++		if (!uses_default_overflow_handler(wp))
+ 			continue;
+ step:
+ 		enable_single_step(wp, instruction_pointer(regs));
+@@ -811,7 +811,7 @@ step:
+ 		info->trigger = addr;
+ 		pr_debug("watchpoint fired: address = 0x%x\n", info->trigger);
+ 		perf_bp_event(wp, regs);
+-		if (is_default_overflow_handler(wp))
++		if (uses_default_overflow_handler(wp))
+ 			enable_single_step(wp, instruction_pointer(regs));
+ 	}
+ 
+@@ -886,7 +886,7 @@ static void breakpoint_handler(unsigned long unknown, struct pt_regs *regs)
+ 			info->trigger = addr;
+ 			pr_debug("breakpoint fired: address = 0x%x\n", addr);
+ 			perf_bp_event(bp, regs);
+-			if (is_default_overflow_handler(bp))
++			if (uses_default_overflow_handler(bp))
+ 				enable_single_step(bp, addr);
+ 			goto unlock;
+ 		}
+diff --git a/arch/arm/kernel/machine_kexec.c b/arch/arm/kernel/machine_kexec.c
+index 46364b699cc30..5d07cf9e0044d 100644
+--- a/arch/arm/kernel/machine_kexec.c
++++ b/arch/arm/kernel/machine_kexec.c
+@@ -94,16 +94,28 @@ static void machine_crash_nonpanic_core(void *unused)
+ 	}
+ }
+ 
++static DEFINE_PER_CPU(call_single_data_t, cpu_stop_csd) =
++	CSD_INIT(machine_crash_nonpanic_core, NULL);
++
+ void crash_smp_send_stop(void)
+ {
+ 	static int cpus_stopped;
+ 	unsigned long msecs;
++	call_single_data_t *csd;
++	int cpu, this_cpu = raw_smp_processor_id();
+ 
+ 	if (cpus_stopped)
+ 		return;
+ 
+ 	atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1);
+-	smp_call_function(machine_crash_nonpanic_core, NULL, false);
++	for_each_online_cpu(cpu) {
++		if (cpu == this_cpu)
++			continue;
++
++		csd = &per_cpu(cpu_stop_csd, cpu);
++		smp_call_function_single_async(cpu, csd);
++	}
++
+ 	msecs = 1000; /* Wait at most a second for the other cpus to stop */
+ 	while ((atomic_read(&waiting_for_crash_ipi) > 0) && msecs) {
+ 		mdelay(1);
+diff --git a/arch/arm64/boot/dts/qcom/sc8280xp-lenovo-thinkpad-x13s.dts b/arch/arm64/boot/dts/qcom/sc8280xp-lenovo-thinkpad-x13s.dts
+index 059dfccdfe7c2..b51787b6561d7 100644
+--- a/arch/arm64/boot/dts/qcom/sc8280xp-lenovo-thinkpad-x13s.dts
++++ b/arch/arm64/boot/dts/qcom/sc8280xp-lenovo-thinkpad-x13s.dts
+@@ -12,6 +12,7 @@
+ #include <dt-bindings/iio/qcom,spmi-adc7-pmr735a.h>
+ #include <dt-bindings/input/gpio-keys.h>
+ #include <dt-bindings/input/input.h>
++#include <dt-bindings/leds/common.h>
+ #include <dt-bindings/regulator/qcom,rpmh-regulator.h>
+ 
+ #include "sc8280xp.dtsi"
+@@ -78,6 +79,21 @@
+ 		};
+ 	};
+ 
++	leds {
++		compatible = "gpio-leds";
++
++		led-camera-indicator {
++			label = "white:camera-indicator";
++			function = LED_FUNCTION_INDICATOR;
++			color = <LED_COLOR_ID_WHITE>;
++			gpios = <&tlmm 28 GPIO_ACTIVE_HIGH>;
++			linux,default-trigger = "none";
++			default-state = "off";
++			/* Reuse as a panic indicator until we get a "camera on" trigger */
++			panic-indicator;
++		};
++	};
++
+ 	pmic-glink {
+ 		compatible = "qcom,sc8280xp-pmic-glink", "qcom,pmic-glink";
+ 
+diff --git a/arch/arm64/boot/dts/qcom/sm6125-sony-xperia-seine-pdx201.dts b/arch/arm64/boot/dts/qcom/sm6125-sony-xperia-seine-pdx201.dts
+index 9f8a9ef398a26..de85086c65adc 100644
+--- a/arch/arm64/boot/dts/qcom/sm6125-sony-xperia-seine-pdx201.dts
++++ b/arch/arm64/boot/dts/qcom/sm6125-sony-xperia-seine-pdx201.dts
+@@ -79,7 +79,7 @@
+ 			reg = <0x0 0xffc40000 0x0 0xc0000>;
+ 			record-size = <0x1000>;
+ 			console-size = <0x40000>;
+-			msg-size = <0x20000 0x20000>;
++			pmsg-size = <0x20000>;
+ 		};
+ 
+ 		cmdline_mem: memory@ffd00000 {
+diff --git a/arch/arm64/boot/dts/qcom/sm6125-xiaomi-laurel-sprout.dts b/arch/arm64/boot/dts/qcom/sm6125-xiaomi-laurel-sprout.dts
+index a7f4aeae9c1a5..7c58d1299a609 100644
+--- a/arch/arm64/boot/dts/qcom/sm6125-xiaomi-laurel-sprout.dts
++++ b/arch/arm64/boot/dts/qcom/sm6125-xiaomi-laurel-sprout.dts
+@@ -52,7 +52,7 @@
+ 			reg = <0x0 0xffc40000 0x0 0xc0000>;
+ 			record-size = <0x1000>;
+ 			console-size = <0x40000>;
+-			msg-size = <0x20000 0x20000>;
++			pmsg-size = <0x20000>;
+ 		};
+ 
+ 		cmdline_mem: memory@ffd00000 {
+diff --git a/arch/arm64/boot/dts/qcom/sm6350.dtsi b/arch/arm64/boot/dts/qcom/sm6350.dtsi
+index 7cafb32fbb941..4b4ea156a92c5 100644
+--- a/arch/arm64/boot/dts/qcom/sm6350.dtsi
++++ b/arch/arm64/boot/dts/qcom/sm6350.dtsi
+@@ -673,7 +673,7 @@
+ 			reg = <0 0xffc00000 0 0x100000>;
+ 			record-size = <0x1000>;
+ 			console-size = <0x40000>;
+-			msg-size = <0x20000 0x20000>;
++			pmsg-size = <0x20000>;
+ 			ecc-size = <16>;
+ 			no-map;
+ 		};
+diff --git a/arch/arm64/boot/dts/qcom/sm8150-sony-xperia-kumano.dtsi b/arch/arm64/boot/dts/qcom/sm8150-sony-xperia-kumano.dtsi
+index baafea53770bf..ae0ca48b89a59 100644
+--- a/arch/arm64/boot/dts/qcom/sm8150-sony-xperia-kumano.dtsi
++++ b/arch/arm64/boot/dts/qcom/sm8150-sony-xperia-kumano.dtsi
+@@ -222,7 +222,7 @@
+ 			reg = <0x0 0xffc00000 0x0 0x100000>;
+ 			record-size = <0x1000>;
+ 			console-size = <0x40000>;
+-			msg-size = <0x20000 0x20000>;
++			pmsg-size = <0x20000>;
+ 			ecc-size = <16>;
+ 			no-map;
+ 		};
+diff --git a/arch/arm64/boot/dts/qcom/sm8250-sony-xperia-edo.dtsi b/arch/arm64/boot/dts/qcom/sm8250-sony-xperia-edo.dtsi
+index b044cffb419e5..6b044eca7ad5a 100644
+--- a/arch/arm64/boot/dts/qcom/sm8250-sony-xperia-edo.dtsi
++++ b/arch/arm64/boot/dts/qcom/sm8250-sony-xperia-edo.dtsi
+@@ -126,7 +126,7 @@
+ 			reg = <0x0 0xffc00000 0x0 0x100000>;
+ 			record-size = <0x1000>;
+ 			console-size = <0x40000>;
+-			msg-size = <0x20000 0x20000>;
++			pmsg-size = <0x20000>;
+ 			ecc-size = <16>;
+ 			no-map;
+ 		};
+diff --git a/arch/arm64/kernel/hw_breakpoint.c b/arch/arm64/kernel/hw_breakpoint.c
+index db2a1861bb978..35225632d70ad 100644
+--- a/arch/arm64/kernel/hw_breakpoint.c
++++ b/arch/arm64/kernel/hw_breakpoint.c
+@@ -654,7 +654,7 @@ static int breakpoint_handler(unsigned long unused, unsigned long esr,
+ 		perf_bp_event(bp, regs);
+ 
+ 		/* Do we need to handle the stepping? */
+-		if (is_default_overflow_handler(bp))
++		if (uses_default_overflow_handler(bp))
+ 			step = 1;
+ unlock:
+ 		rcu_read_unlock();
+@@ -733,7 +733,7 @@ static u64 get_distance_from_watchpoint(unsigned long addr, u64 val,
+ static int watchpoint_report(struct perf_event *wp, unsigned long addr,
+ 			     struct pt_regs *regs)
+ {
+-	int step = is_default_overflow_handler(wp);
++	int step = uses_default_overflow_handler(wp);
+ 	struct arch_hw_breakpoint *info = counter_arch_bp(wp);
+ 
+ 	info->trigger = addr;
+diff --git a/arch/mips/cavium-octeon/octeon-usb.c b/arch/mips/cavium-octeon/octeon-usb.c
+index 2add435ad0387..165e032d08647 100644
+--- a/arch/mips/cavium-octeon/octeon-usb.c
++++ b/arch/mips/cavium-octeon/octeon-usb.c
+@@ -243,11 +243,11 @@ static int dwc3_octeon_get_divider(void)
+ 	while (div < ARRAY_SIZE(clk_div)) {
+ 		uint64_t rate = octeon_get_io_clock_rate() / clk_div[div];
+ 		if (rate <= 300000000 && rate >= 150000000)
+-			break;
++			return div;
+ 		div++;
+ 	}
+ 
+-	return div;
++	return -EINVAL;
+ }
+ 
+ static int dwc3_octeon_config_power(struct device *dev, void __iomem *base)
+@@ -374,6 +374,10 @@ static int dwc3_octeon_clocks_start(struct device *dev, void __iomem *base)
+ 
+ 	/* Step 4b: Select controller clock frequency. */
+ 	div = dwc3_octeon_get_divider();
++	if (div < 0) {
++		dev_err(dev, "clock divider invalid\n");
++		return div;
++	}
+ 	val = dwc3_octeon_readq(uctl_ctl_reg);
+ 	val &= ~USBDRD_UCTL_CTL_H_CLKDIV_SEL;
+ 	val |= FIELD_PREP(USBDRD_UCTL_CTL_H_CLKDIV_SEL, div);
+diff --git a/arch/powerpc/platforms/pseries/ibmebus.c b/arch/powerpc/platforms/pseries/ibmebus.c
+index 44703f13985bf..969cb9fc960f8 100644
+--- a/arch/powerpc/platforms/pseries/ibmebus.c
++++ b/arch/powerpc/platforms/pseries/ibmebus.c
+@@ -460,6 +460,7 @@ static int __init ibmebus_bus_init(void)
+ 	if (err) {
+ 		printk(KERN_WARNING "%s: device_register returned %i\n",
+ 		       __func__, err);
++		put_device(&ibmebus_bus_device);
+ 		bus_unregister(&ibmebus_bus_type);
+ 
+ 		return err;
+diff --git a/arch/riscv/kernel/elf_kexec.c b/arch/riscv/kernel/elf_kexec.c
+index c08bb5c3b3857..b3b96ff46d193 100644
+--- a/arch/riscv/kernel/elf_kexec.c
++++ b/arch/riscv/kernel/elf_kexec.c
+@@ -98,7 +98,13 @@ static int elf_find_pbase(struct kimage *image, unsigned long kernel_len,
+ 	kbuf.image = image;
+ 	kbuf.buf_min = lowest_paddr;
+ 	kbuf.buf_max = ULONG_MAX;
+-	kbuf.buf_align = PAGE_SIZE;
++
++	/*
++	 * Current riscv boot protocol requires 2MB alignment for
++	 * RV64 and 4MB alignment for RV32
++	 *
++	 */
++	kbuf.buf_align = PMD_SIZE;
+ 	kbuf.mem = KEXEC_BUF_MEM_UNKNOWN;
+ 	kbuf.memsz = ALIGN(kernel_len, PAGE_SIZE);
+ 	kbuf.top_down = false;
+diff --git a/arch/s390/boot/startup.c b/arch/s390/boot/startup.c
+index 64bd7ac3e35d1..f8d0550e5d2af 100644
+--- a/arch/s390/boot/startup.c
++++ b/arch/s390/boot/startup.c
+@@ -176,6 +176,7 @@ static unsigned long setup_kernel_memory_layout(void)
+ 	unsigned long asce_limit;
+ 	unsigned long rte_size;
+ 	unsigned long pages;
++	unsigned long vsize;
+ 	unsigned long vmax;
+ 
+ 	pages = ident_map_size / PAGE_SIZE;
+@@ -183,11 +184,9 @@ static unsigned long setup_kernel_memory_layout(void)
+ 	vmemmap_size = SECTION_ALIGN_UP(pages) * sizeof(struct page);
+ 
+ 	/* choose kernel address space layout: 4 or 3 levels. */
+-	vmemmap_start = round_up(ident_map_size, _REGION3_SIZE);
+-	if (IS_ENABLED(CONFIG_KASAN) ||
+-	    vmalloc_size > _REGION2_SIZE ||
+-	    vmemmap_start + vmemmap_size + vmalloc_size + MODULES_LEN >
+-		    _REGION2_SIZE) {
++	vsize = round_up(ident_map_size, _REGION3_SIZE) + vmemmap_size + MODULES_LEN;
++	vsize = size_add(vsize, vmalloc_size);
++	if (IS_ENABLED(CONFIG_KASAN) || (vsize > _REGION2_SIZE)) {
+ 		asce_limit = _REGION1_SIZE;
+ 		rte_size = _REGION2_SIZE;
+ 	} else {
+diff --git a/arch/x86/boot/compressed/ident_map_64.c b/arch/x86/boot/compressed/ident_map_64.c
+index bcc956c17872b..08f93b0401bbd 100644
+--- a/arch/x86/boot/compressed/ident_map_64.c
++++ b/arch/x86/boot/compressed/ident_map_64.c
+@@ -59,6 +59,14 @@ static void *alloc_pgt_page(void *context)
+ 		return NULL;
+ 	}
+ 
++	/* Consumed more tables than expected? */
++	if (pages->pgt_buf_offset == BOOT_PGT_SIZE_WARN) {
++		debug_putstr("pgt_buf running low in " __FILE__ "\n");
++		debug_putstr("Need to raise BOOT_PGT_SIZE?\n");
++		debug_putaddr(pages->pgt_buf_offset);
++		debug_putaddr(pages->pgt_buf_size);
++	}
++
+ 	entry = pages->pgt_buf + pages->pgt_buf_offset;
+ 	pages->pgt_buf_offset += PAGE_SIZE;
+ 
+diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
+index 9191280d9ea31..215d37f7dde8a 100644
+--- a/arch/x86/include/asm/boot.h
++++ b/arch/x86/include/asm/boot.h
+@@ -40,23 +40,40 @@
+ #ifdef CONFIG_X86_64
+ # define BOOT_STACK_SIZE	0x4000
+ 
++/*
++ * Used by decompressor's startup_32() to allocate page tables for identity
++ * mapping of the 4G of RAM in 4-level paging mode:
++ * - 1 level4 table;
++ * - 1 level3 table;
++ * - 4 level2 table that maps everything with 2M pages;
++ *
++ * The additional level5 table needed for 5-level paging is allocated from
++ * trampoline_32bit memory.
++ */
+ # define BOOT_INIT_PGT_SIZE	(6*4096)
+-# ifdef CONFIG_RANDOMIZE_BASE
++
+ /*
+- * Assuming all cross the 512GB boundary:
+- * 1 page for level4
+- * (2+2)*4 pages for kernel, param, cmd_line, and randomized kernel
+- * 2 pages for first 2M (video RAM: CONFIG_X86_VERBOSE_BOOTUP).
+- * Total is 19 pages.
++ * Total number of page tables kernel_add_identity_map() can allocate,
++ * including page tables consumed by startup_32().
++ *
++ * Worst-case scenario:
++ *  - 5-level paging needs 1 level5 table;
++ *  - KASLR needs to map kernel, boot_params, cmdline and randomized kernel,
++ *    assuming all of them cross 256T boundary:
++ *    + 4*2 level4 table;
++ *    + 4*2 level3 table;
++ *    + 4*2 level2 table;
++ *  - X86_VERBOSE_BOOTUP needs to map the first 2M (video RAM):
++ *    + 1 level4 table;
++ *    + 1 level3 table;
++ *    + 1 level2 table;
++ * Total: 28 tables
++ *
++ * Add 4 spare table in case decompressor touches anything beyond what is
++ * accounted above. Warn if it happens.
+  */
+-#  ifdef CONFIG_X86_VERBOSE_BOOTUP
+-#   define BOOT_PGT_SIZE	(19*4096)
+-#  else /* !CONFIG_X86_VERBOSE_BOOTUP */
+-#   define BOOT_PGT_SIZE	(17*4096)
+-#  endif
+-# else /* !CONFIG_RANDOMIZE_BASE */
+-#  define BOOT_PGT_SIZE		BOOT_INIT_PGT_SIZE
+-# endif
++# define BOOT_PGT_SIZE_WARN	(28*4096)
++# define BOOT_PGT_SIZE		(32*4096)
+ 
+ #else /* !CONFIG_X86_64 */
+ # define BOOT_STACK_SIZE	0x1000
+diff --git a/arch/x86/include/asm/linkage.h b/arch/x86/include/asm/linkage.h
+index 97a3de7892d3f..5ff49fd67732e 100644
+--- a/arch/x86/include/asm/linkage.h
++++ b/arch/x86/include/asm/linkage.h
+@@ -8,6 +8,14 @@
+ #undef notrace
+ #define notrace __attribute__((no_instrument_function))
+ 
++#ifdef CONFIG_64BIT
++/*
++ * The generic version tends to create spurious ENDBR instructions under
++ * certain conditions.
++ */
++#define _THIS_IP_ ({ unsigned long __here; asm ("lea 0(%%rip), %0" : "=r" (__here)); __here; })
++#endif
++
+ #ifdef CONFIG_X86_32
+ #define asmlinkage CPP_ASMLINKAGE __attribute__((regparm(0)))
+ #endif /* CONFIG_X86_32 */
+diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
+index 81b826d3b7530..f2c02e4469ccc 100644
+--- a/arch/x86/include/asm/uaccess_64.h
++++ b/arch/x86/include/asm/uaccess_64.h
+@@ -116,7 +116,7 @@ copy_user_generic(void *to, const void *from, unsigned long len)
+ 		"2:\n"
+ 		_ASM_EXTABLE_UA(1b, 2b)
+ 		:"+c" (len), "+D" (to), "+S" (from), ASM_CALL_CONSTRAINT
+-		: : "memory", "rax", "r8", "r9", "r10", "r11");
++		: : "memory", "rax");
+ 	clac();
+ 	return len;
+ }
+diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
+index 099d58d02a262..44843a492e69c 100644
+--- a/arch/x86/kernel/alternative.c
++++ b/arch/x86/kernel/alternative.c
+@@ -720,13 +720,8 @@ void __init_or_module noinline apply_returns(s32 *start, s32 *end)
+ {
+ 	s32 *s;
+ 
+-	/*
+-	 * Do not patch out the default return thunks if those needed are the
+-	 * ones generated by the compiler.
+-	 */
+-	if (cpu_feature_enabled(X86_FEATURE_RETHUNK) &&
+-	    (x86_return_thunk == __x86_return_thunk))
+-		return;
++	if (cpu_feature_enabled(X86_FEATURE_RETHUNK))
++		static_call_force_reinit();
+ 
+ 	for (s = start; s < end; s++) {
+ 		void *dest = NULL, *addr = (void *)s + *s;
+diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c
+index 035a3db5330b0..356de955e78dd 100644
+--- a/arch/x86/kernel/amd_nb.c
++++ b/arch/x86/kernel/amd_nb.c
+@@ -24,6 +24,8 @@
+ #define PCI_DEVICE_ID_AMD_19H_M40H_ROOT		0x14b5
+ #define PCI_DEVICE_ID_AMD_19H_M60H_ROOT		0x14d8
+ #define PCI_DEVICE_ID_AMD_19H_M70H_ROOT		0x14e8
++#define PCI_DEVICE_ID_AMD_1AH_M00H_ROOT		0x153a
++#define PCI_DEVICE_ID_AMD_1AH_M20H_ROOT		0x1507
+ #define PCI_DEVICE_ID_AMD_MI200_ROOT		0x14bb
+ 
+ #define PCI_DEVICE_ID_AMD_17H_DF_F4		0x1464
+@@ -39,6 +41,7 @@
+ #define PCI_DEVICE_ID_AMD_19H_M60H_DF_F4	0x14e4
+ #define PCI_DEVICE_ID_AMD_19H_M70H_DF_F4	0x14f4
+ #define PCI_DEVICE_ID_AMD_19H_M78H_DF_F4	0x12fc
++#define PCI_DEVICE_ID_AMD_1AH_M00H_DF_F4	0x12c4
+ #define PCI_DEVICE_ID_AMD_MI200_DF_F4		0x14d4
+ 
+ /* Protect the PCI config register pairs used for SMN. */
+@@ -56,6 +59,8 @@ static const struct pci_device_id amd_root_ids[] = {
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M40H_ROOT) },
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M60H_ROOT) },
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M70H_ROOT) },
++	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M00H_ROOT) },
++	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M20H_ROOT) },
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_MI200_ROOT) },
+ 	{}
+ };
+@@ -85,6 +90,8 @@ static const struct pci_device_id amd_nb_misc_ids[] = {
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M60H_DF_F3) },
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M70H_DF_F3) },
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M78H_DF_F3) },
++	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M00H_DF_F3) },
++	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M20H_DF_F3) },
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_MI200_DF_F3) },
+ 	{}
+ };
+@@ -106,6 +113,7 @@ static const struct pci_device_id amd_nb_link_ids[] = {
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M40H_DF_F4) },
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M50H_DF_F4) },
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F4) },
++	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M00H_DF_F4) },
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_MI200_DF_F4) },
+ 	{}
+ };
+diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
+index d9384d5b4b8e9..35acc95c6dd59 100644
+--- a/arch/x86/kernel/apic/x2apic_uv_x.c
++++ b/arch/x86/kernel/apic/x2apic_uv_x.c
+@@ -1571,7 +1571,7 @@ static void __init build_socket_tables(void)
+ {
+ 	struct uv_gam_range_entry *gre = uv_gre_table;
+ 	int nums, numn, nump;
+-	int cpu, i, lnid;
++	int i, lnid, apicid;
+ 	int minsock = _min_socket;
+ 	int maxsock = _max_socket;
+ 	int minpnode = _min_pnode;
+@@ -1622,15 +1622,14 @@ static void __init build_socket_tables(void)
+ 
+ 	/* Set socket -> node values: */
+ 	lnid = NUMA_NO_NODE;
+-	for_each_possible_cpu(cpu) {
+-		int nid = cpu_to_node(cpu);
+-		int apicid, sockid;
++	for (apicid = 0; apicid < ARRAY_SIZE(__apicid_to_node); apicid++) {
++		int nid = __apicid_to_node[apicid];
++		int sockid;
+ 
+-		if (lnid == nid)
++		if ((nid == NUMA_NO_NODE) || (lnid == nid))
+ 			continue;
+ 		lnid = nid;
+ 
+-		apicid = per_cpu(x86_cpu_to_apicid, cpu);
+ 		sockid = apicid >> uv_cpuid.socketid_shift;
+ 
+ 		if (_socket_to_node[sockid - minsock] == SOCK_EMPTY)
+diff --git a/arch/x86/kernel/callthunks.c b/arch/x86/kernel/callthunks.c
+index c06bfc086565d..faa9f22998488 100644
+--- a/arch/x86/kernel/callthunks.c
++++ b/arch/x86/kernel/callthunks.c
+@@ -272,7 +272,6 @@ void __init callthunks_patch_builtin_calls(void)
+ 	pr_info("Setting up call depth tracking\n");
+ 	mutex_lock(&text_mutex);
+ 	callthunks_setup(&cs, &builtin_coretext);
+-	static_call_force_reinit();
+ 	thunks_initialized = true;
+ 	mutex_unlock(&text_mutex);
+ }
+diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
+index 7d82f0bd449c7..747b83a373a2d 100644
+--- a/arch/x86/kernel/smpboot.c
++++ b/arch/x86/kernel/smpboot.c
+@@ -587,7 +587,6 @@ static bool match_llc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
+ }
+ 
+ 
+-#if defined(CONFIG_SCHED_SMT) || defined(CONFIG_SCHED_CLUSTER) || defined(CONFIG_SCHED_MC)
+ static inline int x86_sched_itmt_flags(void)
+ {
+ 	return sysctl_sched_itmt_enabled ? SD_ASYM_PACKING : 0;
+@@ -611,7 +610,14 @@ static int x86_cluster_flags(void)
+ 	return cpu_cluster_flags() | x86_sched_itmt_flags();
+ }
+ #endif
+-#endif
++
++static int x86_die_flags(void)
++{
++	if (cpu_feature_enabled(X86_FEATURE_HYBRID_CPU))
++	       return x86_sched_itmt_flags();
++
++	return 0;
++}
+ 
+ /*
+  * Set if a package/die has multiple NUMA nodes inside.
+@@ -653,7 +659,7 @@ static void __init build_sched_topology(void)
+ 	 */
+ 	if (!x86_has_numa_in_package) {
+ 		x86_topology[i++] = (struct sched_domain_topology_level){
+-			cpu_cpu_mask, SD_INIT_NAME(DIE)
++			cpu_cpu_mask, x86_die_flags, SD_INIT_NAME(DIE)
+ 		};
+ 	}
+ 
+diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
+index 01c5de4c279b8..0a81aafed7f88 100644
+--- a/arch/x86/lib/copy_user_64.S
++++ b/arch/x86/lib/copy_user_64.S
+@@ -27,7 +27,7 @@
+  * NOTE! The calling convention is very intentionally the same as
+  * for 'rep movs', so that we can rewrite the function call with
+  * just a plain 'rep movs' on machines that have FSRM.  But to make
+- * it simpler for us, we can clobber rsi/rdi and rax/r8-r11 freely.
++ * it simpler for us, we can clobber rsi/rdi and rax freely.
+  */
+ SYM_FUNC_START(rep_movs_alternative)
+ 	cmpq $64,%rcx
+@@ -68,55 +68,24 @@ SYM_FUNC_START(rep_movs_alternative)
+ 	_ASM_EXTABLE_UA( 3b, .Lcopy_user_tail)
+ 
+ .Llarge:
+-0:	ALTERNATIVE "jmp .Lunrolled", "rep movsb", X86_FEATURE_ERMS
++0:	ALTERNATIVE "jmp .Llarge_movsq", "rep movsb", X86_FEATURE_ERMS
+ 1:	RET
+ 
+-        _ASM_EXTABLE_UA( 0b, 1b)
++	_ASM_EXTABLE_UA( 0b, 1b)
+ 
+-	.p2align 4
+-.Lunrolled:
+-10:	movq (%rsi),%r8
+-11:	movq 8(%rsi),%r9
+-12:	movq 16(%rsi),%r10
+-13:	movq 24(%rsi),%r11
+-14:	movq %r8,(%rdi)
+-15:	movq %r9,8(%rdi)
+-16:	movq %r10,16(%rdi)
+-17:	movq %r11,24(%rdi)
+-20:	movq 32(%rsi),%r8
+-21:	movq 40(%rsi),%r9
+-22:	movq 48(%rsi),%r10
+-23:	movq 56(%rsi),%r11
+-24:	movq %r8,32(%rdi)
+-25:	movq %r9,40(%rdi)
+-26:	movq %r10,48(%rdi)
+-27:	movq %r11,56(%rdi)
+-	addq $64,%rsi
+-	addq $64,%rdi
+-	subq $64,%rcx
+-	cmpq $64,%rcx
+-	jae .Lunrolled
+-	cmpl $8,%ecx
+-	jae .Lword
++.Llarge_movsq:
++	movq %rcx,%rax
++	shrq $3,%rcx
++	andl $7,%eax
++0:	rep movsq
++	movl %eax,%ecx
+ 	testl %ecx,%ecx
+ 	jne .Lcopy_user_tail
+ 	RET
+ 
+-	_ASM_EXTABLE_UA(10b, .Lcopy_user_tail)
+-	_ASM_EXTABLE_UA(11b, .Lcopy_user_tail)
+-	_ASM_EXTABLE_UA(12b, .Lcopy_user_tail)
+-	_ASM_EXTABLE_UA(13b, .Lcopy_user_tail)
+-	_ASM_EXTABLE_UA(14b, .Lcopy_user_tail)
+-	_ASM_EXTABLE_UA(15b, .Lcopy_user_tail)
+-	_ASM_EXTABLE_UA(16b, .Lcopy_user_tail)
+-	_ASM_EXTABLE_UA(17b, .Lcopy_user_tail)
+-	_ASM_EXTABLE_UA(20b, .Lcopy_user_tail)
+-	_ASM_EXTABLE_UA(21b, .Lcopy_user_tail)
+-	_ASM_EXTABLE_UA(22b, .Lcopy_user_tail)
+-	_ASM_EXTABLE_UA(23b, .Lcopy_user_tail)
+-	_ASM_EXTABLE_UA(24b, .Lcopy_user_tail)
+-	_ASM_EXTABLE_UA(25b, .Lcopy_user_tail)
+-	_ASM_EXTABLE_UA(26b, .Lcopy_user_tail)
+-	_ASM_EXTABLE_UA(27b, .Lcopy_user_tail)
++1:	leaq (%rax,%rcx,8),%rcx
++	jmp .Lcopy_user_tail
++
++	_ASM_EXTABLE_UA( 0b, 1b)
+ SYM_FUNC_END(rep_movs_alternative)
+ EXPORT_SYMBOL(rep_movs_alternative)
+diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S
+index 1451e0c4ae22a..235bbda6fc823 100644
+--- a/arch/x86/lib/putuser.S
++++ b/arch/x86/lib/putuser.S
+@@ -56,7 +56,6 @@ SYM_FUNC_END(__put_user_1)
+ EXPORT_SYMBOL(__put_user_1)
+ 
+ SYM_FUNC_START(__put_user_nocheck_1)
+-	ENDBR
+ 	ASM_STAC
+ 2:	movb %al,(%_ASM_CX)
+ 	xor %ecx,%ecx
+@@ -76,7 +75,6 @@ SYM_FUNC_END(__put_user_2)
+ EXPORT_SYMBOL(__put_user_2)
+ 
+ SYM_FUNC_START(__put_user_nocheck_2)
+-	ENDBR
+ 	ASM_STAC
+ 4:	movw %ax,(%_ASM_CX)
+ 	xor %ecx,%ecx
+@@ -96,7 +94,6 @@ SYM_FUNC_END(__put_user_4)
+ EXPORT_SYMBOL(__put_user_4)
+ 
+ SYM_FUNC_START(__put_user_nocheck_4)
+-	ENDBR
+ 	ASM_STAC
+ 6:	movl %eax,(%_ASM_CX)
+ 	xor %ecx,%ecx
+@@ -119,7 +116,6 @@ SYM_FUNC_END(__put_user_8)
+ EXPORT_SYMBOL(__put_user_8)
+ 
+ SYM_FUNC_START(__put_user_nocheck_8)
+-	ENDBR
+ 	ASM_STAC
+ 9:	mov %_ASM_AX,(%_ASM_CX)
+ #ifdef CONFIG_X86_32
+diff --git a/arch/x86/purgatory/Makefile b/arch/x86/purgatory/Makefile
+index c2a29be35c01b..08aa0f25f12a0 100644
+--- a/arch/x86/purgatory/Makefile
++++ b/arch/x86/purgatory/Makefile
+@@ -19,6 +19,10 @@ CFLAGS_sha256.o := -D__DISABLE_EXPORTS -D__NO_FORTIFY
+ # optimization flags.
+ KBUILD_CFLAGS := $(filter-out -fprofile-sample-use=% -fprofile-use=%,$(KBUILD_CFLAGS))
+ 
++# When LTO is enabled, llvm emits many text sections, which is not supported
++# by kexec. Remove -flto=* flags.
++KBUILD_CFLAGS := $(filter-out $(CC_FLAGS_LTO),$(KBUILD_CFLAGS))
++
+ # When linking purgatory.ro with -r unresolved symbols are not checked,
+ # also link a purgatory.chk binary without -r to check for unresolved symbols.
+ PURGATORY_LDFLAGS := -e purgatory_start -z nodefaultlib
+diff --git a/block/blk-mq.c b/block/blk-mq.c
+index 953f08354c8c3..c21bc81a790ff 100644
+--- a/block/blk-mq.c
++++ b/block/blk-mq.c
+@@ -4402,6 +4402,7 @@ static int blk_mq_realloc_tag_set_tags(struct blk_mq_tag_set *set,
+ 				       int new_nr_hw_queues)
+ {
+ 	struct blk_mq_tags **new_tags;
++	int i;
+ 
+ 	if (set->nr_hw_queues >= new_nr_hw_queues)
+ 		goto done;
+@@ -4416,6 +4417,16 @@ static int blk_mq_realloc_tag_set_tags(struct blk_mq_tag_set *set,
+ 		       sizeof(*set->tags));
+ 	kfree(set->tags);
+ 	set->tags = new_tags;
++
++	for (i = set->nr_hw_queues; i < new_nr_hw_queues; i++) {
++		if (!__blk_mq_alloc_map_and_rqs(set, i)) {
++			while (--i >= set->nr_hw_queues)
++				__blk_mq_free_map_and_rqs(set, i);
++			return -ENOMEM;
++		}
++		cond_resched();
++	}
++
+ done:
+ 	set->nr_hw_queues = new_nr_hw_queues;
+ 	return 0;
+@@ -4704,7 +4715,8 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
+ {
+ 	struct request_queue *q;
+ 	LIST_HEAD(head);
+-	int prev_nr_hw_queues;
++	int prev_nr_hw_queues = set->nr_hw_queues;
++	int i;
+ 
+ 	lockdep_assert_held(&set->tag_list_lock);
+ 
+@@ -4731,7 +4743,6 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
+ 		blk_mq_sysfs_unregister_hctxs(q);
+ 	}
+ 
+-	prev_nr_hw_queues = set->nr_hw_queues;
+ 	if (blk_mq_realloc_tag_set_tags(set, nr_hw_queues) < 0)
+ 		goto reregister;
+ 
+@@ -4767,6 +4778,10 @@ switch_back:
+ 
+ 	list_for_each_entry(q, &set->tag_list, tag_set_list)
+ 		blk_mq_unfreeze_queue(q);
++
++	/* Free the excess tags when nr_hw_queues shrink. */
++	for (i = set->nr_hw_queues; i < prev_nr_hw_queues; i++)
++		__blk_mq_free_map_and_rqs(set, i);
+ }
+ 
+ void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues)
+diff --git a/crypto/lrw.c b/crypto/lrw.c
+index 1b0f76ba3eb5e..59260aefed280 100644
+--- a/crypto/lrw.c
++++ b/crypto/lrw.c
+@@ -357,10 +357,10 @@ static int lrw_create(struct crypto_template *tmpl, struct rtattr **tb)
+ 	 * cipher name.
+ 	 */
+ 	if (!strncmp(cipher_name, "ecb(", 4)) {
+-		unsigned len;
++		int len;
+ 
+-		len = strlcpy(ecb_name, cipher_name + 4, sizeof(ecb_name));
+-		if (len < 2 || len >= sizeof(ecb_name))
++		len = strscpy(ecb_name, cipher_name + 4, sizeof(ecb_name));
++		if (len < 2)
+ 			goto err_free_inst;
+ 
+ 		if (ecb_name[len - 1] != ')')
+diff --git a/crypto/xts.c b/crypto/xts.c
+index 09be909a6a1aa..548b302c6c6a0 100644
+--- a/crypto/xts.c
++++ b/crypto/xts.c
+@@ -396,10 +396,10 @@ static int xts_create(struct crypto_template *tmpl, struct rtattr **tb)
+ 	 * cipher name.
+ 	 */
+ 	if (!strncmp(cipher_name, "ecb(", 4)) {
+-		unsigned len;
++		int len;
+ 
+-		len = strlcpy(ctx->name, cipher_name + 4, sizeof(ctx->name));
+-		if (len < 2 || len >= sizeof(ctx->name))
++		len = strscpy(ctx->name, cipher_name + 4, sizeof(ctx->name));
++		if (len < 2)
+ 			goto err_free_inst;
+ 
+ 		if (ctx->name[len - 1] != ')')
+diff --git a/drivers/acpi/acpica/psopcode.c b/drivers/acpi/acpica/psopcode.c
+index 09029fe545f14..39e31030e5f49 100644
+--- a/drivers/acpi/acpica/psopcode.c
++++ b/drivers/acpi/acpica/psopcode.c
+@@ -603,7 +603,7 @@ const struct acpi_opcode_info acpi_gbl_aml_op_info[AML_NUM_OPCODES] = {
+ 
+ /* 7E */ ACPI_OP("Timer", ARGP_TIMER_OP, ARGI_TIMER_OP, ACPI_TYPE_ANY,
+ 			 AML_CLASS_EXECUTE, AML_TYPE_EXEC_0A_0T_1R,
+-			 AML_FLAGS_EXEC_0A_0T_1R),
++			 AML_FLAGS_EXEC_0A_0T_1R | AML_NO_OPERAND_RESOLVE),
+ 
+ /* ACPI 5.0 opcodes */
+ 
+diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c
+index 56d887323ae52..6496ff5a6ba20 100644
+--- a/drivers/acpi/arm64/iort.c
++++ b/drivers/acpi/arm64/iort.c
+@@ -1708,7 +1708,10 @@ static void __init arm_smmu_v3_pmcg_init_resources(struct resource *res,
+ static struct acpi_platform_list pmcg_plat_info[] __initdata = {
+ 	/* HiSilicon Hip08 Platform */
+ 	{"HISI  ", "HIP08   ", 0, ACPI_SIG_IORT, greater_than_or_equal,
+-	 "Erratum #162001800", IORT_SMMU_V3_PMCG_HISI_HIP08},
++	 "Erratum #162001800, Erratum #162001900", IORT_SMMU_V3_PMCG_HISI_HIP08},
++	/* HiSilicon Hip09 Platform */
++	{"HISI  ", "HIP09   ", 0, ACPI_SIG_IORT, greater_than_or_equal,
++	 "Erratum #162001900", IORT_SMMU_V3_PMCG_HISI_HIP09},
+ 	{ }
+ };
+ 
+diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
+index 18cc08c858cf2..442396f6ed1f9 100644
+--- a/drivers/acpi/video_detect.c
++++ b/drivers/acpi/video_detect.c
+@@ -445,6 +445,15 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
+ 		DMI_MATCH(DMI_BOARD_NAME, "Lenovo IdeaPad S405"),
+ 		},
+ 	},
++	{
++	 /* https://bugzilla.suse.com/show_bug.cgi?id=1208724 */
++	 .callback = video_detect_force_native,
++	 /* Lenovo Ideapad Z470 */
++	 .matches = {
++		DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++		DMI_MATCH(DMI_PRODUCT_VERSION, "IdeaPad Z470"),
++		},
++	},
+ 	{
+ 	 /* https://bugzilla.redhat.com/show_bug.cgi?id=1187004 */
+ 	 .callback = video_detect_force_native,
+@@ -486,6 +495,24 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
+ 		DMI_MATCH(DMI_PRODUCT_NAME, "iMac11,3"),
+ 		},
+ 	},
++	{
++	 /* https://gitlab.freedesktop.org/drm/amd/-/issues/1838 */
++	 .callback = video_detect_force_native,
++	 /* Apple iMac12,1 */
++	 .matches = {
++		DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
++		DMI_MATCH(DMI_PRODUCT_NAME, "iMac12,1"),
++		},
++	},
++	{
++	 /* https://gitlab.freedesktop.org/drm/amd/-/issues/2753 */
++	 .callback = video_detect_force_native,
++	 /* Apple iMac12,2 */
++	 .matches = {
++		DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
++		DMI_MATCH(DMI_PRODUCT_NAME, "iMac12,2"),
++		},
++	},
+ 	{
+ 	 /* https://bugzilla.redhat.com/show_bug.cgi?id=1217249 */
+ 	 .callback = video_detect_force_native,
+diff --git a/drivers/acpi/x86/s2idle.c b/drivers/acpi/x86/s2idle.c
+index 60cc4605169c5..60835953ebfc4 100644
+--- a/drivers/acpi/x86/s2idle.c
++++ b/drivers/acpi/x86/s2idle.c
+@@ -113,6 +113,12 @@ static void lpi_device_get_constraints_amd(void)
+ 		union acpi_object *package = &out_obj->package.elements[i];
+ 
+ 		if (package->type == ACPI_TYPE_PACKAGE) {
++			if (lpi_constraints_table) {
++				acpi_handle_err(lps0_device_handle,
++						"Duplicate constraints list\n");
++				goto free_acpi_buffer;
++			}
++
+ 			lpi_constraints_table = kcalloc(package->package.count,
+ 							sizeof(*lpi_constraints_table),
+ 							GFP_KERNEL);
+diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
+index c1e85e0ed43e5..7907b09fc27eb 100644
+--- a/drivers/ata/ahci.c
++++ b/drivers/ata/ahci.c
+@@ -1883,6 +1883,15 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 	else
+ 		dev_info(&pdev->dev, "SSS flag set, parallel bus scan disabled\n");
+ 
++	if (!(hpriv->cap & HOST_CAP_PART))
++		host->flags |= ATA_HOST_NO_PART;
++
++	if (!(hpriv->cap & HOST_CAP_SSC))
++		host->flags |= ATA_HOST_NO_SSC;
++
++	if (!(hpriv->cap2 & HOST_CAP2_SDS))
++		host->flags |= ATA_HOST_NO_DEVSLP;
++
+ 	if (pi.flags & ATA_FLAG_EM)
+ 		ahci_reset_em(host);
+ 
+diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
+index 06aec35f88f2c..a6212f6d37966 100644
+--- a/drivers/ata/libahci.c
++++ b/drivers/ata/libahci.c
+@@ -1256,6 +1256,26 @@ static ssize_t ahci_activity_show(struct ata_device *dev, char *buf)
+ 	return sprintf(buf, "%d\n", emp->blink_policy);
+ }
+ 
++static void ahci_port_clear_pending_irq(struct ata_port *ap)
++{
++	struct ahci_host_priv *hpriv = ap->host->private_data;
++	void __iomem *port_mmio = ahci_port_base(ap);
++	u32 tmp;
++
++	/* clear SError */
++	tmp = readl(port_mmio + PORT_SCR_ERR);
++	dev_dbg(ap->host->dev, "PORT_SCR_ERR 0x%x\n", tmp);
++	writel(tmp, port_mmio + PORT_SCR_ERR);
++
++	/* clear port IRQ */
++	tmp = readl(port_mmio + PORT_IRQ_STAT);
++	dev_dbg(ap->host->dev, "PORT_IRQ_STAT 0x%x\n", tmp);
++	if (tmp)
++		writel(tmp, port_mmio + PORT_IRQ_STAT);
++
++	writel(1 << ap->port_no, hpriv->mmio + HOST_IRQ_STAT);
++}
++
+ static void ahci_port_init(struct device *dev, struct ata_port *ap,
+ 			   int port_no, void __iomem *mmio,
+ 			   void __iomem *port_mmio)
+@@ -1270,18 +1290,7 @@ static void ahci_port_init(struct device *dev, struct ata_port *ap,
+ 	if (rc)
+ 		dev_warn(dev, "%s (%d)\n", emsg, rc);
+ 
+-	/* clear SError */
+-	tmp = readl(port_mmio + PORT_SCR_ERR);
+-	dev_dbg(dev, "PORT_SCR_ERR 0x%x\n", tmp);
+-	writel(tmp, port_mmio + PORT_SCR_ERR);
+-
+-	/* clear port IRQ */
+-	tmp = readl(port_mmio + PORT_IRQ_STAT);
+-	dev_dbg(dev, "PORT_IRQ_STAT 0x%x\n", tmp);
+-	if (tmp)
+-		writel(tmp, port_mmio + PORT_IRQ_STAT);
+-
+-	writel(1 << port_no, mmio + HOST_IRQ_STAT);
++	ahci_port_clear_pending_irq(ap);
+ 
+ 	/* mark esata ports */
+ 	tmp = readl(port_mmio + PORT_CMD);
+@@ -1602,6 +1611,8 @@ int ahci_do_hardreset(struct ata_link *link, unsigned int *class,
+ 	tf.status = ATA_BUSY;
+ 	ata_tf_to_fis(&tf, 0, 0, d2h_fis);
+ 
++	ahci_port_clear_pending_irq(ap);
++
+ 	rc = sata_link_hardreset(link, timing, deadline, online,
+ 				 ahci_check_ready);
+ 
+diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
+index 04db0f2c683a7..79d02eb4e4797 100644
+--- a/drivers/ata/libata-core.c
++++ b/drivers/ata/libata-core.c
+@@ -4935,11 +4935,8 @@ void ata_qc_complete(struct ata_queued_cmd *qc)
+ 		 * timeout using the policy 0xD. For these commands, invoke EH
+ 		 * to get the command sense data.
+ 		 */
+-		if (qc->result_tf.status & ATA_SENSE &&
+-		    ((ata_is_ncq(qc->tf.protocol) &&
+-		      dev->flags & ATA_DFLAG_CDL_ENABLED) ||
+-		     (!ata_is_ncq(qc->tf.protocol) &&
+-		      ata_id_sense_reporting_enabled(dev->id)))) {
++		if (qc->flags & ATA_QCFLAG_HAS_CDL &&
++		    qc->result_tf.status & ATA_SENSE) {
+ 			/*
+ 			 * Tell SCSI EH to not overwrite scmd->result even if
+ 			 * this command is finished with result SAM_STAT_GOOD.
+diff --git a/drivers/ata/libata-sata.c b/drivers/ata/libata-sata.c
+index 85e279a12f62c..09833f4d2079f 100644
+--- a/drivers/ata/libata-sata.c
++++ b/drivers/ata/libata-sata.c
+@@ -396,10 +396,23 @@ int sata_link_scr_lpm(struct ata_link *link, enum ata_lpm_policy policy,
+ 	case ATA_LPM_MED_POWER_WITH_DIPM:
+ 	case ATA_LPM_MIN_POWER_WITH_PARTIAL:
+ 	case ATA_LPM_MIN_POWER:
+-		if (ata_link_nr_enabled(link) > 0)
+-			/* no restrictions on LPM transitions */
++		if (ata_link_nr_enabled(link) > 0) {
++			/* assume no restrictions on LPM transitions */
+ 			scontrol &= ~(0x7 << 8);
+-		else {
++
++			/*
++			 * If the controller does not support partial, slumber,
++			 * or devsleep, then disallow these transitions.
++			 */
++			if (link->ap->host->flags & ATA_HOST_NO_PART)
++				scontrol |= (0x1 << 8);
++
++			if (link->ap->host->flags & ATA_HOST_NO_SSC)
++				scontrol |= (0x2 << 8);
++
++			if (link->ap->host->flags & ATA_HOST_NO_DEVSLP)
++				scontrol |= (0x4 << 8);
++		} else {
+ 			/* empty port, power off */
+ 			scontrol &= ~0xf;
+ 			scontrol |= (0x1 << 2);
+diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
+index e685acc5cacd9..dfdfb72d350fe 100644
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -625,9 +625,24 @@ static const struct usb_device_id blacklist_table[] = {
+ 	{ USB_DEVICE(0x0489, 0xe0e4), .driver_info = BTUSB_MEDIATEK |
+ 						     BTUSB_WIDEBAND_SPEECH |
+ 						     BTUSB_VALID_LE_STATES },
++	{ USB_DEVICE(0x0489, 0xe0f1), .driver_info = BTUSB_MEDIATEK |
++						     BTUSB_WIDEBAND_SPEECH |
++						     BTUSB_VALID_LE_STATES },
+ 	{ USB_DEVICE(0x0489, 0xe0f2), .driver_info = BTUSB_MEDIATEK |
+ 						     BTUSB_WIDEBAND_SPEECH |
+ 						     BTUSB_VALID_LE_STATES },
++	{ USB_DEVICE(0x0489, 0xe0f5), .driver_info = BTUSB_MEDIATEK |
++						     BTUSB_WIDEBAND_SPEECH |
++						     BTUSB_VALID_LE_STATES },
++	{ USB_DEVICE(0x0489, 0xe0f6), .driver_info = BTUSB_MEDIATEK |
++						     BTUSB_WIDEBAND_SPEECH |
++						     BTUSB_VALID_LE_STATES },
++	{ USB_DEVICE(0x0489, 0xe102), .driver_info = BTUSB_MEDIATEK |
++						     BTUSB_WIDEBAND_SPEECH |
++						     BTUSB_VALID_LE_STATES },
++	{ USB_DEVICE(0x04ca, 0x3804), .driver_info = BTUSB_MEDIATEK |
++						     BTUSB_WIDEBAND_SPEECH |
++						     BTUSB_VALID_LE_STATES },
+ 
+ 	/* Additional Realtek 8723AE Bluetooth devices */
+ 	{ USB_DEVICE(0x0930, 0x021d), .driver_info = BTUSB_REALTEK },
+diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c
+index c95fa4335fee2..9766dbf607f97 100644
+--- a/drivers/bus/ti-sysc.c
++++ b/drivers/bus/ti-sysc.c
+@@ -1525,6 +1525,8 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = {
+ 		   SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_LEGACY_IDLE),
+ 	SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x47422e03, 0xffffffff,
+ 		   SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_LEGACY_IDLE),
++	SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x47424e03, 0xffffffff,
++		   SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_LEGACY_IDLE),
+ 
+ 	/* Quirks that need to be set based on the module address */
+ 	SYSC_QUIRK("mcpdm", 0x40132000, 0, 0x10, -ENODEV, 0x50000800, 0xffffffff,
+diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c
+index b95963095729a..f4c4c027b062d 100644
+--- a/drivers/char/tpm/tpm_tis_core.c
++++ b/drivers/char/tpm/tpm_tis_core.c
+@@ -512,10 +512,17 @@ static int tpm_tis_send_main(struct tpm_chip *chip, const u8 *buf, size_t len)
+ 	int rc;
+ 	u32 ordinal;
+ 	unsigned long dur;
++	unsigned int try;
+ 
+-	rc = tpm_tis_send_data(chip, buf, len);
+-	if (rc < 0)
+-		return rc;
++	for (try = 0; try < TPM_RETRY; try++) {
++		rc = tpm_tis_send_data(chip, buf, len);
++		if (rc >= 0)
++			/* Data transfer done successfully */
++			break;
++		else if (rc != -EIO)
++			/* Data transfer failed, not recoverable */
++			return rc;
++	}
+ 
+ 	rc = tpm_tis_verify_crc(priv, len, buf);
+ 	if (rc < 0) {
+diff --git a/drivers/comedi/Kconfig b/drivers/comedi/Kconfig
+index 7a8d402f05be1..9af280735cbaa 100644
+--- a/drivers/comedi/Kconfig
++++ b/drivers/comedi/Kconfig
+@@ -67,7 +67,6 @@ config COMEDI_TEST
+ 
+ config COMEDI_PARPORT
+ 	tristate "Parallel port support"
+-	depends on HAS_IOPORT
+ 	help
+ 	  Enable support for the standard parallel port.
+ 	  A cheap and easy way to get a few more digital I/O lines. Steal
+@@ -80,7 +79,6 @@ config COMEDI_PARPORT
+ config COMEDI_SSV_DNP
+ 	tristate "SSV Embedded Systems DIL/Net-PC support"
+ 	depends on X86_32 || COMPILE_TEST
+-	depends on HAS_IOPORT
+ 	help
+ 	  Enable support for SSV Embedded Systems DIL/Net-PC
+ 
+@@ -91,7 +89,6 @@ endif # COMEDI_MISC_DRIVERS
+ 
+ menuconfig COMEDI_ISA_DRIVERS
+ 	bool "Comedi ISA and PC/104 drivers"
+-	depends on ISA
+ 	help
+ 	  Enable comedi ISA and PC/104 drivers to be built
+ 
+@@ -103,8 +100,7 @@ if COMEDI_ISA_DRIVERS
+ 
+ config COMEDI_PCL711
+ 	tristate "Advantech PCL-711/711b and ADlink ACL-8112 ISA card support"
+-	depends on HAS_IOPORT
+-	depends on COMEDI_8254
++	select COMEDI_8254
+ 	help
+ 	  Enable support for Advantech PCL-711 and 711b, ADlink ACL-8112
+ 
+@@ -165,9 +161,8 @@ config COMEDI_PCL730
+ 
+ config COMEDI_PCL812
+ 	tristate "Advantech PCL-812/813 and ADlink ACL-8112/8113/8113/8216"
+-	depends on HAS_IOPORT
+ 	select COMEDI_ISADMA if ISA_DMA_API
+-	depends on COMEDI_8254
++	select COMEDI_8254
+ 	help
+ 	  Enable support for Advantech PCL-812/PG, PCL-813/B, ADLink
+ 	  ACL-8112DG/HG/PG, ACL-8113, ACL-8216, ICP DAS A-821PGH/PGL/PGL-NDA,
+@@ -178,9 +173,8 @@ config COMEDI_PCL812
+ 
+ config COMEDI_PCL816
+ 	tristate "Advantech PCL-814 and PCL-816 ISA card support"
+-	depends on HAS_IOPORT
+ 	select COMEDI_ISADMA if ISA_DMA_API
+-	depends on COMEDI_8254
++	select COMEDI_8254
+ 	help
+ 	  Enable support for Advantech PCL-814 and PCL-816 ISA cards
+ 
+@@ -189,9 +183,8 @@ config COMEDI_PCL816
+ 
+ config COMEDI_PCL818
+ 	tristate "Advantech PCL-718 and PCL-818 ISA card support"
+-	depends on HAS_IOPORT
+ 	select COMEDI_ISADMA if ISA_DMA_API
+-	depends on COMEDI_8254
++	select COMEDI_8254
+ 	help
+ 	  Enable support for Advantech PCL-818 ISA cards
+ 	  PCL-818L, PCL-818H, PCL-818HD, PCL-818HG, PCL-818 and PCL-718
+@@ -210,7 +203,7 @@ config COMEDI_PCM3724
+ 
+ config COMEDI_AMPLC_DIO200_ISA
+ 	tristate "Amplicon PC212E/PC214E/PC215E/PC218E/PC272E"
+-	depends on COMEDI_AMPLC_DIO200
++	select COMEDI_AMPLC_DIO200
+ 	help
+ 	  Enable support for Amplicon PC212E, PC214E, PC215E, PC218E and
+ 	  PC272E ISA DIO boards
+@@ -262,8 +255,7 @@ config COMEDI_DAC02
+ 
+ config COMEDI_DAS16M1
+ 	tristate "MeasurementComputing CIO-DAS16/M1DAS-16 ISA card support"
+-	depends on HAS_IOPORT
+-	depends on COMEDI_8254
++	select COMEDI_8254
+ 	select COMEDI_8255
+ 	help
+ 	  Enable support for Measurement Computing CIO-DAS16/M1 ISA cards.
+@@ -273,7 +265,7 @@ config COMEDI_DAS16M1
+ 
+ config COMEDI_DAS08_ISA
+ 	tristate "DAS-08 compatible ISA and PC/104 card support"
+-	depends on COMEDI_DAS08
++	select COMEDI_DAS08
+ 	help
+ 	  Enable support for Keithley Metrabyte/ComputerBoards DAS08
+ 	  and compatible ISA and PC/104 cards:
+@@ -286,9 +278,8 @@ config COMEDI_DAS08_ISA
+ 
+ config COMEDI_DAS16
+ 	tristate "DAS-16 compatible ISA and PC/104 card support"
+-	depends on HAS_IOPORT
+ 	select COMEDI_ISADMA if ISA_DMA_API
+-	depends on COMEDI_8254
++	select COMEDI_8254
+ 	select COMEDI_8255
+ 	help
+ 	  Enable support for Keithley Metrabyte/ComputerBoards DAS16
+@@ -305,8 +296,7 @@ config COMEDI_DAS16
+ 
+ config COMEDI_DAS800
+ 	tristate "DAS800 and compatible ISA card support"
+-	depends on HAS_IOPORT
+-	depends on COMEDI_8254
++	select COMEDI_8254
+ 	help
+ 	  Enable support for Keithley Metrabyte DAS800 and compatible ISA cards
+ 	  Keithley Metrabyte DAS-800, DAS-801, DAS-802
+@@ -318,9 +308,8 @@ config COMEDI_DAS800
+ 
+ config COMEDI_DAS1800
+ 	tristate "DAS1800 and compatible ISA card support"
+-	depends on HAS_IOPORT
+ 	select COMEDI_ISADMA if ISA_DMA_API
+-	depends on COMEDI_8254
++	select COMEDI_8254
+ 	help
+ 	  Enable support for DAS1800 and compatible ISA cards
+ 	  Keithley Metrabyte DAS-1701ST, DAS-1701ST-DA, DAS-1701/AO,
+@@ -334,8 +323,7 @@ config COMEDI_DAS1800
+ 
+ config COMEDI_DAS6402
+ 	tristate "DAS6402 and compatible ISA card support"
+-	depends on HAS_IOPORT
+-	depends on COMEDI_8254
++	select COMEDI_8254
+ 	help
+ 	  Enable support for DAS6402 and compatible ISA cards
+ 	  Computerboards, Keithley Metrabyte DAS6402 and compatibles
+@@ -414,8 +402,7 @@ config COMEDI_FL512
+ 
+ config COMEDI_AIO_AIO12_8
+ 	tristate "I/O Products PC/104 AIO12-8 Analog I/O Board support"
+-	depends on HAS_IOPORT
+-	depends on COMEDI_8254
++	select COMEDI_8254
+ 	select COMEDI_8255
+ 	help
+ 	  Enable support for I/O Products PC/104 AIO12-8 Analog I/O Board
+@@ -469,9 +456,8 @@ config COMEDI_ADQ12B
+ 
+ config COMEDI_NI_AT_A2150
+ 	tristate "NI AT-A2150 ISA card support"
+-	depends on HAS_IOPORT
+ 	select COMEDI_ISADMA if ISA_DMA_API
+-	depends on COMEDI_8254
++	select COMEDI_8254
+ 	help
+ 	  Enable support for National Instruments AT-A2150 cards
+ 
+@@ -480,8 +466,7 @@ config COMEDI_NI_AT_A2150
+ 
+ config COMEDI_NI_AT_AO
+ 	tristate "NI AT-AO-6/10 EISA card support"
+-	depends on HAS_IOPORT
+-	depends on COMEDI_8254
++	select COMEDI_8254
+ 	help
+ 	  Enable support for National Instruments AT-AO-6/10 cards
+ 
+@@ -512,7 +497,7 @@ config COMEDI_NI_ATMIO16D
+ 
+ config COMEDI_NI_LABPC_ISA
+ 	tristate "NI Lab-PC and compatibles ISA support"
+-	depends on COMEDI_NI_LABPC
++	select COMEDI_NI_LABPC
+ 	help
+ 	  Enable support for National Instruments Lab-PC and compatibles
+ 	  Lab-PC-1200, Lab-PC-1200AI, Lab-PC+.
+@@ -576,7 +561,7 @@ endif # COMEDI_ISA_DRIVERS
+ 
+ menuconfig COMEDI_PCI_DRIVERS
+ 	tristate "Comedi PCI drivers"
+-	depends on PCI && HAS_IOPORT
++	depends on PCI
+ 	help
+ 	  Enable support for comedi PCI drivers.
+ 
+@@ -725,8 +710,7 @@ config COMEDI_ADL_PCI8164
+ 
+ config COMEDI_ADL_PCI9111
+ 	tristate "ADLink PCI-9111HR support"
+-	depends on HAS_IOPORT
+-	depends on COMEDI_8254
++	select COMEDI_8254
+ 	help
+ 	  Enable support for ADlink PCI9111 cards
+ 
+@@ -736,7 +720,7 @@ config COMEDI_ADL_PCI9111
+ config COMEDI_ADL_PCI9118
+ 	tristate "ADLink PCI-9118DG, PCI-9118HG, PCI-9118HR support"
+ 	depends on HAS_DMA
+-	depends on COMEDI_8254
++	select COMEDI_8254
+ 	help
+ 	  Enable support for ADlink PCI-9118DG, PCI-9118HG, PCI-9118HR cards
+ 
+@@ -745,8 +729,7 @@ config COMEDI_ADL_PCI9118
+ 
+ config COMEDI_ADV_PCI1710
+ 	tristate "Advantech PCI-171x and PCI-1731 support"
+-	depends on HAS_IOPORT
+-	depends on COMEDI_8254
++	select COMEDI_8254
+ 	help
+ 	  Enable support for Advantech PCI-1710, PCI-1710HG, PCI-1711,
+ 	  PCI-1713 and PCI-1731
+@@ -790,8 +773,7 @@ config COMEDI_ADV_PCI1760
+ 
+ config COMEDI_ADV_PCI_DIO
+ 	tristate "Advantech PCI DIO card support"
+-	depends on HAS_IOPORT
+-	depends on COMEDI_8254
++	select COMEDI_8254
+ 	select COMEDI_8255
+ 	help
+ 	  Enable support for Advantech PCI DIO cards
+@@ -804,7 +786,7 @@ config COMEDI_ADV_PCI_DIO
+ 
+ config COMEDI_AMPLC_DIO200_PCI
+ 	tristate "Amplicon PCI215/PCI272/PCIe215/PCIe236/PCIe296 DIO support"
+-	depends on COMEDI_AMPLC_DIO200
++	select COMEDI_AMPLC_DIO200
+ 	help
+ 	  Enable support for Amplicon PCI215, PCI272, PCIe215, PCIe236
+ 	  and PCIe296 DIO boards.
+@@ -832,8 +814,7 @@ config COMEDI_AMPLC_PC263_PCI
+ 
+ config COMEDI_AMPLC_PCI224
+ 	tristate "Amplicon PCI224 and PCI234 support"
+-	depends on HAS_IOPORT
+-	depends on COMEDI_8254
++	select COMEDI_8254
+ 	help
+ 	  Enable support for Amplicon PCI224 and PCI234 AO boards
+ 
+@@ -842,8 +823,7 @@ config COMEDI_AMPLC_PCI224
+ 
+ config COMEDI_AMPLC_PCI230
+ 	tristate "Amplicon PCI230 and PCI260 support"
+-	depends on HAS_IOPORT
+-	depends on COMEDI_8254
++	select COMEDI_8254
+ 	select COMEDI_8255
+ 	help
+ 	  Enable support for Amplicon PCI230 and PCI260 Multifunction I/O
+@@ -862,7 +842,7 @@ config COMEDI_CONTEC_PCI_DIO
+ 
+ config COMEDI_DAS08_PCI
+ 	tristate "DAS-08 PCI support"
+-	depends on COMEDI_DAS08
++	select COMEDI_DAS08
+ 	help
+ 	  Enable support for PCI DAS-08 cards.
+ 
+@@ -949,8 +929,7 @@ config COMEDI_CB_PCIDAS64
+ 
+ config COMEDI_CB_PCIDAS
+ 	tristate "MeasurementComputing PCI-DAS support"
+-	depends on HAS_IOPORT
+-	depends on COMEDI_8254
++	select COMEDI_8254
+ 	select COMEDI_8255
+ 	help
+ 	  Enable support for ComputerBoards/MeasurementComputing PCI-DAS with
+@@ -974,8 +953,7 @@ config COMEDI_CB_PCIDDA
+ 
+ config COMEDI_CB_PCIMDAS
+ 	tristate "MeasurementComputing PCIM-DAS1602/16, PCIe-DAS1602/16 support"
+-	depends on HAS_IOPORT
+-	depends on COMEDI_8254
++	select COMEDI_8254
+ 	select COMEDI_8255
+ 	help
+ 	  Enable support for ComputerBoards/MeasurementComputing PCI Migration
+@@ -995,8 +973,7 @@ config COMEDI_CB_PCIMDDA
+ 
+ config COMEDI_ME4000
+ 	tristate "Meilhaus ME-4000 support"
+-	depends on HAS_IOPORT
+-	depends on COMEDI_8254
++	select COMEDI_8254
+ 	help
+ 	  Enable support for Meilhaus PCI data acquisition cards
+ 	  ME-4650, ME-4670i, ME-4680, ME-4680i and ME-4680is
+@@ -1054,7 +1031,7 @@ config COMEDI_NI_670X
+ 
+ config COMEDI_NI_LABPC_PCI
+ 	tristate "NI Lab-PC PCI-1200 support"
+-	depends on COMEDI_NI_LABPC
++	select COMEDI_NI_LABPC
+ 	help
+ 	  Enable support for National Instruments Lab-PC PCI-1200.
+ 
+@@ -1076,7 +1053,6 @@ config COMEDI_NI_PCIDIO
+ config COMEDI_NI_PCIMIO
+ 	tristate "NI PCI-MIO-E series and M series support"
+ 	depends on HAS_DMA
+-	depends on HAS_IOPORT
+ 	select COMEDI_NI_TIOCMD
+ 	select COMEDI_8255
+ 	help
+@@ -1098,8 +1074,7 @@ config COMEDI_NI_PCIMIO
+ 
+ config COMEDI_RTD520
+ 	tristate "Real Time Devices PCI4520/DM7520 support"
+-	depends on HAS_IOPORT
+-	depends on COMEDI_8254
++	select COMEDI_8254
+ 	help
+ 	  Enable support for Real Time Devices PCI4520/DM7520
+ 
+@@ -1139,8 +1114,7 @@ if COMEDI_PCMCIA_DRIVERS
+ 
+ config COMEDI_CB_DAS16_CS
+ 	tristate "CB DAS16 series PCMCIA support"
+-	depends on HAS_IOPORT
+-	depends on COMEDI_8254
++	select COMEDI_8254
+ 	help
+ 	  Enable support for the ComputerBoards/MeasurementComputing PCMCIA
+ 	  cards DAS16/16, PCM-DAS16D/12 and PCM-DAS16s/16
+@@ -1150,7 +1124,7 @@ config COMEDI_CB_DAS16_CS
+ 
+ config COMEDI_DAS08_CS
+ 	tristate "CB DAS08 PCMCIA support"
+-	depends on COMEDI_DAS08
++	select COMEDI_DAS08
+ 	help
+ 	  Enable support for the ComputerBoards/MeasurementComputing DAS-08
+ 	  PCMCIA card
+@@ -1160,7 +1134,6 @@ config COMEDI_DAS08_CS
+ 
+ config COMEDI_NI_DAQ_700_CS
+ 	tristate "NI DAQCard-700 PCMCIA support"
+-	depends on HAS_IOPORT
+ 	help
+ 	  Enable support for the National Instruments PCMCIA DAQCard-700 DIO
+ 
+@@ -1169,7 +1142,6 @@ config COMEDI_NI_DAQ_700_CS
+ 
+ config COMEDI_NI_DAQ_DIO24_CS
+ 	tristate "NI DAQ-Card DIO-24 PCMCIA support"
+-	depends on HAS_IOPORT
+ 	select COMEDI_8255
+ 	help
+ 	  Enable support for the National Instruments PCMCIA DAQ-Card DIO-24
+@@ -1179,7 +1151,7 @@ config COMEDI_NI_DAQ_DIO24_CS
+ 
+ config COMEDI_NI_LABPC_CS
+ 	tristate "NI DAQCard-1200 PCMCIA support"
+-	depends on COMEDI_NI_LABPC
++	select COMEDI_NI_LABPC
+ 	help
+ 	  Enable support for the National Instruments PCMCIA DAQCard-1200
+ 
+@@ -1188,7 +1160,6 @@ config COMEDI_NI_LABPC_CS
+ 
+ config COMEDI_NI_MIO_CS
+ 	tristate "NI DAQCard E series PCMCIA support"
+-	depends on HAS_IOPORT
+ 	select COMEDI_NI_TIO
+ 	select COMEDI_8255
+ 	help
+@@ -1201,7 +1172,6 @@ config COMEDI_NI_MIO_CS
+ 
+ config COMEDI_QUATECH_DAQP_CS
+ 	tristate "Quatech DAQP PCMCIA data capture card support"
+-	depends on HAS_IOPORT
+ 	help
+ 	  Enable support for the Quatech DAQP PCMCIA data capture cards
+ 	  DAQP-208 and DAQP-308
+@@ -1278,14 +1248,12 @@ endif # COMEDI_USB_DRIVERS
+ 
+ config COMEDI_8254
+ 	tristate
+-	depends on HAS_IOPORT
+ 
+ config COMEDI_8255
+ 	tristate
+ 
+ config COMEDI_8255_SA
+ 	tristate "Standalone 8255 support"
+-	depends on HAS_IOPORT
+ 	select COMEDI_8255
+ 	help
+ 	  Enable support for 8255 digital I/O as a standalone driver.
+@@ -1317,7 +1285,7 @@ config COMEDI_KCOMEDILIB
+ 	  called kcomedilib.
+ 
+ config COMEDI_AMPLC_DIO200
+-	depends on COMEDI_8254
++	select COMEDI_8254
+ 	tristate
+ 
+ config COMEDI_AMPLC_PC236
+@@ -1326,7 +1294,7 @@ config COMEDI_AMPLC_PC236
+ 
+ config COMEDI_DAS08
+ 	tristate
+-	depends on COMEDI_8254
++	select COMEDI_8254
+ 	select COMEDI_8255
+ 
+ config COMEDI_ISADMA
+@@ -1334,8 +1302,7 @@ config COMEDI_ISADMA
+ 
+ config COMEDI_NI_LABPC
+ 	tristate
+-	depends on HAS_IOPORT
+-	depends on COMEDI_8254
++	select COMEDI_8254
+ 	select COMEDI_8255
+ 
+ config COMEDI_NI_LABPC_ISADMA
+diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c
+index a3104e35412c1..aa597cda0d887 100644
+--- a/drivers/firewire/core-device.c
++++ b/drivers/firewire/core-device.c
+@@ -1211,7 +1211,7 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
+ 		 * without actually having a link.
+ 		 */
+  create:
+-		device = kzalloc(sizeof(*device), GFP_KERNEL);
++		device = kzalloc(sizeof(*device), GFP_ATOMIC);
+ 		if (device == NULL)
+ 			break;
+ 
+diff --git a/drivers/firewire/core-topology.c b/drivers/firewire/core-topology.c
+index 88466b663482f..f40c815343812 100644
+--- a/drivers/firewire/core-topology.c
++++ b/drivers/firewire/core-topology.c
+@@ -101,7 +101,7 @@ static struct fw_node *fw_node_create(u32 sid, int port_count, int color)
+ {
+ 	struct fw_node *node;
+ 
+-	node = kzalloc(struct_size(node, ports, port_count), GFP_KERNEL);
++	node = kzalloc(struct_size(node, ports, port_count), GFP_ATOMIC);
+ 	if (node == NULL)
+ 		return NULL;
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+index 6dc950c1b6893..a3b86b86dc477 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+@@ -1296,7 +1296,6 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
+ void amdgpu_device_pci_config_reset(struct amdgpu_device *adev);
+ int amdgpu_device_pci_reset(struct amdgpu_device *adev);
+ bool amdgpu_device_need_post(struct amdgpu_device *adev);
+-bool amdgpu_sg_display_supported(struct amdgpu_device *adev);
+ bool amdgpu_device_pcie_dynamic_switching_supported(void);
+ bool amdgpu_device_should_use_aspm(struct amdgpu_device *adev);
+ bool amdgpu_device_aspm_support_quirk(void);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+index fb78a8f475879..946d031d2520e 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+@@ -127,7 +127,6 @@ static int amdgpu_cs_p1_user_fence(struct amdgpu_cs_parser *p,
+ 	struct drm_gem_object *gobj;
+ 	struct amdgpu_bo *bo;
+ 	unsigned long size;
+-	int r;
+ 
+ 	gobj = drm_gem_object_lookup(p->filp, data->handle);
+ 	if (gobj == NULL)
+@@ -139,23 +138,14 @@ static int amdgpu_cs_p1_user_fence(struct amdgpu_cs_parser *p,
+ 	drm_gem_object_put(gobj);
+ 
+ 	size = amdgpu_bo_size(bo);
+-	if (size != PAGE_SIZE || (data->offset + 8) > size) {
+-		r = -EINVAL;
+-		goto error_unref;
+-	}
++	if (size != PAGE_SIZE || data->offset > (size - 8))
++		return -EINVAL;
+ 
+-	if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
+-		r = -EINVAL;
+-		goto error_unref;
+-	}
++	if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm))
++		return -EINVAL;
+ 
+ 	*offset = data->offset;
+-
+ 	return 0;
+-
+-error_unref:
+-	amdgpu_bo_unref(&bo);
+-	return r;
+ }
+ 
+ static int amdgpu_cs_p1_bo_handles(struct amdgpu_cs_parser *p,
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index 6e5e4603a51a1..2168dc92c6704 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -1461,32 +1461,6 @@ bool amdgpu_device_need_post(struct amdgpu_device *adev)
+ 	return true;
+ }
+ 
+-/*
+- * On APUs with >= 64GB white flickering has been observed w/ SG enabled.
+- * Disable S/G on such systems until we have a proper fix.
+- * https://gitlab.freedesktop.org/drm/amd/-/issues/2354
+- * https://gitlab.freedesktop.org/drm/amd/-/issues/2735
+- */
+-bool amdgpu_sg_display_supported(struct amdgpu_device *adev)
+-{
+-	switch (amdgpu_sg_display) {
+-	case -1:
+-		break;
+-	case 0:
+-		return false;
+-	case 1:
+-		return true;
+-	default:
+-		return false;
+-	}
+-	if ((totalram_pages() << (PAGE_SHIFT - 10)) +
+-	    (adev->gmc.real_vram_size / 1024) >= 64000000) {
+-		DRM_WARN("Disabling S/G due to >=64GB RAM\n");
+-		return false;
+-	}
+-	return true;
+-}
+-
+ /*
+  * Intel hosts such as Raptor Lake and Sapphire Rapids don't support dynamic
+  * speed switching. Until we have confirmation from Intel that a specific host
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
+index fceb3b384955a..f3b0aaf3ebc69 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
+@@ -138,6 +138,7 @@ void amdgpu_ih_ring_fini(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih)
+ /**
+  * amdgpu_ih_ring_write - write IV to the ring buffer
+  *
++ * @adev: amdgpu_device pointer
+  * @ih: ih ring to write to
+  * @iv: the iv to write
+  * @num_dw: size of the iv in dw
+@@ -145,8 +146,8 @@ void amdgpu_ih_ring_fini(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih)
+  * Writes an IV to the ring buffer using the CPU and increment the wptr.
+  * Used for testing and delegating IVs to a software ring.
+  */
+-void amdgpu_ih_ring_write(struct amdgpu_ih_ring *ih, const uint32_t *iv,
+-			  unsigned int num_dw)
++void amdgpu_ih_ring_write(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih,
++			  const uint32_t *iv, unsigned int num_dw)
+ {
+ 	uint32_t wptr = le32_to_cpu(*ih->wptr_cpu) >> 2;
+ 	unsigned int i;
+@@ -161,6 +162,9 @@ void amdgpu_ih_ring_write(struct amdgpu_ih_ring *ih, const uint32_t *iv,
+ 	if (wptr != READ_ONCE(ih->rptr)) {
+ 		wmb();
+ 		WRITE_ONCE(*ih->wptr_cpu, cpu_to_le32(wptr));
++	} else if (adev->irq.retry_cam_enabled) {
++		dev_warn_once(adev->dev, "IH soft ring buffer overflow 0x%X, 0x%X\n",
++			      wptr, ih->rptr);
+ 	}
+ }
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
+index dd1c2eded6b9d..6c6184f0dbc17 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
+@@ -27,6 +27,9 @@
+ /* Maximum number of IVs processed at once */
+ #define AMDGPU_IH_MAX_NUM_IVS	32
+ 
++#define IH_RING_SIZE	(256 * 1024)
++#define IH_SW_RING_SIZE	(8 * 1024)	/* enough for 256 CAM entries */
++
+ struct amdgpu_device;
+ struct amdgpu_iv_entry;
+ 
+@@ -97,8 +100,8 @@ struct amdgpu_ih_funcs {
+ int amdgpu_ih_ring_init(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih,
+ 			unsigned ring_size, bool use_bus_addr);
+ void amdgpu_ih_ring_fini(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih);
+-void amdgpu_ih_ring_write(struct amdgpu_ih_ring *ih, const uint32_t *iv,
+-			  unsigned int num_dw);
++void amdgpu_ih_ring_write(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih,
++			  const uint32_t *iv, unsigned int num_dw);
+ int amdgpu_ih_wait_on_checkpoint_process_ts(struct amdgpu_device *adev,
+ 					    struct amdgpu_ih_ring *ih);
+ int amdgpu_ih_process(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+index 5273decc5753b..fa6d0adcec206 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+@@ -493,7 +493,7 @@ void amdgpu_irq_delegate(struct amdgpu_device *adev,
+ 			 struct amdgpu_iv_entry *entry,
+ 			 unsigned int num_dw)
+ {
+-	amdgpu_ih_ring_write(&adev->irq.ih_soft, entry->iv_entry, num_dw);
++	amdgpu_ih_ring_write(adev, &adev->irq.ih_soft, entry->iv_entry, num_dw);
+ 	schedule_work(&adev->irq.ih_soft_work);
+ }
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
+index c6b4337eb20c3..10df731998b22 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
+@@ -81,7 +81,7 @@ int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
+ 		     unsigned int size)
+ {
+ 	struct drm_suballoc *sa = drm_suballoc_new(&sa_manager->base, size,
+-						   GFP_KERNEL, true, 0);
++						   GFP_KERNEL, false, 0);
+ 
+ 	if (IS_ERR(sa)) {
+ 		*sa_bo = NULL;
+diff --git a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c
+index 72b629a78c62c..d0fc62784e821 100644
+--- a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c
++++ b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c
+@@ -134,7 +134,7 @@ static int aqua_vanjaram_xcp_sched_list_update(
+ 
+ 	for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
+ 		ring = adev->rings[i];
+-		if (!ring || !ring->sched.ready)
++		if (!ring || !ring->sched.ready || ring->no_scheduler)
+ 			continue;
+ 
+ 		aqua_vanjaram_xcp_gpu_sched_update(adev, ring, ring->xcp_id);
+diff --git a/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c b/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c
+index b02e1cef78a76..980b241200803 100644
+--- a/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c
+@@ -535,7 +535,7 @@ static int ih_v6_0_sw_init(void *handle)
+ 	 * use bus address for ih ring by psp bl */
+ 	use_bus_addr =
+ 		(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) ? false : true;
+-	r = amdgpu_ih_ring_init(adev, &adev->irq.ih, 256 * 1024, use_bus_addr);
++	r = amdgpu_ih_ring_init(adev, &adev->irq.ih, IH_RING_SIZE, use_bus_addr);
+ 	if (r)
+ 		return r;
+ 
+@@ -548,7 +548,7 @@ static int ih_v6_0_sw_init(void *handle)
+ 	/* initialize ih control register offset */
+ 	ih_v6_0_init_register_offset(adev);
+ 
+-	r = amdgpu_ih_ring_init(adev, &adev->irq.ih_soft, PAGE_SIZE, true);
++	r = amdgpu_ih_ring_init(adev, &adev->irq.ih_soft, IH_SW_RING_SIZE, true);
+ 	if (r)
+ 		return r;
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
+index eec13cb5bf758..b6a8478dabf43 100644
+--- a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
++++ b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
+@@ -565,7 +565,7 @@ static int navi10_ih_sw_init(void *handle)
+ 		use_bus_addr = false;
+ 	else
+ 		use_bus_addr = true;
+-	r = amdgpu_ih_ring_init(adev, &adev->irq.ih, 256 * 1024, use_bus_addr);
++	r = amdgpu_ih_ring_init(adev, &adev->irq.ih, IH_RING_SIZE, use_bus_addr);
+ 	if (r)
+ 		return r;
+ 
+@@ -578,7 +578,7 @@ static int navi10_ih_sw_init(void *handle)
+ 	/* initialize ih control registers offset */
+ 	navi10_ih_init_register_offset(adev);
+ 
+-	r = amdgpu_ih_ring_init(adev, &adev->irq.ih_soft, PAGE_SIZE, true);
++	r = amdgpu_ih_ring_init(adev, &adev->irq.ih_soft, IH_SW_RING_SIZE, true);
+ 	if (r)
+ 		return r;
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
+index 1e83db0c5438d..d364c6dd152c3 100644
+--- a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
++++ b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
+@@ -485,7 +485,7 @@ static int vega10_ih_sw_init(void *handle)
+ 	if (r)
+ 		return r;
+ 
+-	r = amdgpu_ih_ring_init(adev, &adev->irq.ih, 256 * 1024, true);
++	r = amdgpu_ih_ring_init(adev, &adev->irq.ih, IH_RING_SIZE, true);
+ 	if (r)
+ 		return r;
+ 
+@@ -510,7 +510,7 @@ static int vega10_ih_sw_init(void *handle)
+ 	/* initialize ih control registers offset */
+ 	vega10_ih_init_register_offset(adev);
+ 
+-	r = amdgpu_ih_ring_init(adev, &adev->irq.ih_soft, PAGE_SIZE, true);
++	r = amdgpu_ih_ring_init(adev, &adev->irq.ih_soft, IH_SW_RING_SIZE, true);
+ 	if (r)
+ 		return r;
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/vega20_ih.c b/drivers/gpu/drm/amd/amdgpu/vega20_ih.c
+index 4d719df376a72..544ee55a22da6 100644
+--- a/drivers/gpu/drm/amd/amdgpu/vega20_ih.c
++++ b/drivers/gpu/drm/amd/amdgpu/vega20_ih.c
+@@ -539,7 +539,7 @@ static int vega20_ih_sw_init(void *handle)
+ 	    (adev->ip_versions[OSSSYS_HWIP][0] == IP_VERSION(4, 4, 2)))
+ 		use_bus_addr = false;
+ 
+-	r = amdgpu_ih_ring_init(adev, &adev->irq.ih, 256 * 1024, use_bus_addr);
++	r = amdgpu_ih_ring_init(adev, &adev->irq.ih, IH_RING_SIZE, use_bus_addr);
+ 	if (r)
+ 		return r;
+ 
+@@ -565,7 +565,7 @@ static int vega20_ih_sw_init(void *handle)
+ 	/* initialize ih control registers offset */
+ 	vega20_ih_init_register_offset(adev);
+ 
+-	r = amdgpu_ih_ring_init(adev, &adev->irq.ih_soft, PAGE_SIZE, use_bus_addr);
++	r = amdgpu_ih_ring_init(adev, &adev->irq.ih_soft, IH_SW_RING_SIZE, use_bus_addr);
+ 	if (r)
+ 		return r;
+ 
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+index d4c9ee3f99533..1282559d2b10c 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+@@ -1487,8 +1487,7 @@ void kfd_flush_tlb(struct kfd_process_device *pdd, enum TLB_FLUSH_TYPE type);
+ 
+ static inline bool kfd_flush_tlb_after_unmap(struct kfd_dev *dev)
+ {
+-	return KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 3) ||
+-	       KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 2) ||
++	return KFD_GC_VERSION(dev) > IP_VERSION(9, 4, 2) ||
+ 	       (KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 1) && dev->sdma_fw_version >= 18) ||
+ 	       KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 0);
+ }
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index 3123ea2f4f30a..e0d556cf919f7 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -1272,11 +1272,15 @@ static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_
+ 
+ 	pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
+ 
+-	page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
+-	page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
+-	page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
+-	page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
+-	page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
++	page_table_start.high_part = upper_32_bits(adev->gmc.gart_start >>
++						   AMDGPU_GPU_PAGE_SHIFT);
++	page_table_start.low_part = lower_32_bits(adev->gmc.gart_start >>
++						  AMDGPU_GPU_PAGE_SHIFT);
++	page_table_end.high_part = upper_32_bits(adev->gmc.gart_end >>
++						 AMDGPU_GPU_PAGE_SHIFT);
++	page_table_end.low_part = lower_32_bits(adev->gmc.gart_end >>
++						AMDGPU_GPU_PAGE_SHIFT);
++	page_table_base.high_part = upper_32_bits(pt_base);
+ 	page_table_base.low_part = lower_32_bits(pt_base);
+ 
+ 	pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
+@@ -1638,8 +1642,9 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
+ 		}
+ 		break;
+ 	}
+-	if (init_data.flags.gpu_vm_support)
+-		init_data.flags.gpu_vm_support = amdgpu_sg_display_supported(adev);
++	if (init_data.flags.gpu_vm_support &&
++	    (amdgpu_sg_display == 0))
++		init_data.flags.gpu_vm_support = false;
+ 
+ 	if (init_data.flags.gpu_vm_support)
+ 		adev->mode_info.gpu_vm_support = true;
+@@ -2328,14 +2333,62 @@ static int dm_late_init(void *handle)
+ 	return detect_mst_link_for_all_connectors(adev_to_drm(adev));
+ }
+ 
++static void resume_mst_branch_status(struct drm_dp_mst_topology_mgr *mgr)
++{
++	int ret;
++	u8 guid[16];
++	u64 tmp64;
++
++	mutex_lock(&mgr->lock);
++	if (!mgr->mst_primary)
++		goto out_fail;
++
++	if (drm_dp_read_dpcd_caps(mgr->aux, mgr->dpcd) < 0) {
++		drm_dbg_kms(mgr->dev, "dpcd read failed - undocked during suspend?\n");
++		goto out_fail;
++	}
++
++	ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
++				 DP_MST_EN |
++				 DP_UP_REQ_EN |
++				 DP_UPSTREAM_IS_SRC);
++	if (ret < 0) {
++		drm_dbg_kms(mgr->dev, "mst write failed - undocked during suspend?\n");
++		goto out_fail;
++	}
++
++	/* Some hubs forget their guids after they resume */
++	ret = drm_dp_dpcd_read(mgr->aux, DP_GUID, guid, 16);
++	if (ret != 16) {
++		drm_dbg_kms(mgr->dev, "dpcd read failed - undocked during suspend?\n");
++		goto out_fail;
++	}
++
++	if (memchr_inv(guid, 0, 16) == NULL) {
++		tmp64 = get_jiffies_64();
++		memcpy(&guid[0], &tmp64, sizeof(u64));
++		memcpy(&guid[8], &tmp64, sizeof(u64));
++
++		ret = drm_dp_dpcd_write(mgr->aux, DP_GUID, guid, 16);
++
++		if (ret != 16) {
++			drm_dbg_kms(mgr->dev, "check mstb guid failed - undocked during suspend?\n");
++			goto out_fail;
++		}
++	}
++
++	memcpy(mgr->mst_primary->guid, guid, 16);
++
++out_fail:
++	mutex_unlock(&mgr->lock);
++}
++
+ static void s3_handle_mst(struct drm_device *dev, bool suspend)
+ {
+ 	struct amdgpu_dm_connector *aconnector;
+ 	struct drm_connector *connector;
+ 	struct drm_connector_list_iter iter;
+ 	struct drm_dp_mst_topology_mgr *mgr;
+-	int ret;
+-	bool need_hotplug = false;
+ 
+ 	drm_connector_list_iter_begin(dev, &iter);
+ 	drm_for_each_connector_iter(connector, &iter) {
+@@ -2357,18 +2410,15 @@ static void s3_handle_mst(struct drm_device *dev, bool suspend)
+ 			if (!dp_is_lttpr_present(aconnector->dc_link))
+ 				try_to_configure_aux_timeout(aconnector->dc_link->ddc, LINK_AUX_DEFAULT_TIMEOUT_PERIOD);
+ 
+-			ret = drm_dp_mst_topology_mgr_resume(mgr, true);
+-			if (ret < 0) {
+-				dm_helpers_dp_mst_stop_top_mgr(aconnector->dc_link->ctx,
+-					aconnector->dc_link);
+-				need_hotplug = true;
+-			}
++			/* TODO: move resume_mst_branch_status() into drm mst resume again
++			 * once topology probing work is pulled out from mst resume into mst
++			 * resume 2nd step. mst resume 2nd step should be called after old
++			 * state getting restored (i.e. drm_atomic_helper_resume()).
++			 */
++			resume_mst_branch_status(mgr);
+ 		}
+ 	}
+ 	drm_connector_list_iter_end(&iter);
+-
+-	if (need_hotplug)
+-		drm_kms_helper_hotplug_event(dev);
+ }
+ 
+ static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
+@@ -2762,7 +2812,8 @@ static int dm_resume(void *handle)
+ 	struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
+ 	enum dc_connection_type new_connection_type = dc_connection_none;
+ 	struct dc_state *dc_state;
+-	int i, r, j;
++	int i, r, j, ret;
++	bool need_hotplug = false;
+ 
+ 	if (amdgpu_in_reset(adev)) {
+ 		dc_state = dm->cached_dc_state;
+@@ -2860,7 +2911,7 @@ static int dm_resume(void *handle)
+ 			continue;
+ 
+ 		/*
+-		 * this is the case when traversing through already created
++		 * this is the case when traversing through already created end sink
+ 		 * MST connectors, should be skipped
+ 		 */
+ 		if (aconnector && aconnector->mst_root)
+@@ -2920,6 +2971,27 @@ static int dm_resume(void *handle)
+ 
+ 	dm->cached_state = NULL;
+ 
++	/* Do mst topology probing after resuming cached state*/
++	drm_connector_list_iter_begin(ddev, &iter);
++	drm_for_each_connector_iter(connector, &iter) {
++		aconnector = to_amdgpu_dm_connector(connector);
++		if (aconnector->dc_link->type != dc_connection_mst_branch ||
++		    aconnector->mst_root)
++			continue;
++
++		ret = drm_dp_mst_topology_mgr_resume(&aconnector->mst_mgr, true);
++
++		if (ret < 0) {
++			dm_helpers_dp_mst_stop_top_mgr(aconnector->dc_link->ctx,
++					aconnector->dc_link);
++			need_hotplug = true;
++		}
++	}
++	drm_connector_list_iter_end(&iter);
++
++	if (need_hotplug)
++		drm_kms_helper_hotplug_event(ddev);
++
+ 	amdgpu_dm_irq_resume_late(adev);
+ 
+ 	amdgpu_dm_smu_write_watermarks_table(adev);
+diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
+index cb992aca760dc..5fc78bf927bbc 100644
+--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
++++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
+@@ -802,7 +802,7 @@ static void dcn32_set_hard_min_memclk(struct clk_mgr *clk_mgr_base, bool current
+ 					khz_to_mhz_ceil(clk_mgr_base->clks.dramclk_khz));
+ 		else
+ 			dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK,
+-					clk_mgr_base->bw_params->clk_table.entries[clk_mgr_base->bw_params->clk_table.num_entries_per_clk.num_memclk_levels - 1].memclk_mhz);
++					clk_mgr_base->bw_params->max_memclk_mhz);
+ 	} else {
+ 		dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK,
+ 				clk_mgr_base->bw_params->clk_table.entries[0].memclk_mhz);
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c
+index 30c0644d4418f..be5a6d008b290 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c
+@@ -169,11 +169,23 @@ static void add_link_enc_assignment(
+ /* Return first available DIG link encoder. */
+ static enum engine_id find_first_avail_link_enc(
+ 		const struct dc_context *ctx,
+-		const struct dc_state *state)
++		const struct dc_state *state,
++		enum engine_id eng_id_requested)
+ {
+ 	enum engine_id eng_id = ENGINE_ID_UNKNOWN;
+ 	int i;
+ 
++	if (eng_id_requested != ENGINE_ID_UNKNOWN) {
++
++		for (i = 0; i < ctx->dc->res_pool->res_cap->num_dig_link_enc; i++) {
++			eng_id = state->res_ctx.link_enc_cfg_ctx.link_enc_avail[i];
++			if (eng_id == eng_id_requested)
++				return eng_id;
++		}
++	}
++
++	eng_id = ENGINE_ID_UNKNOWN;
++
+ 	for (i = 0; i < ctx->dc->res_pool->res_cap->num_dig_link_enc; i++) {
+ 		eng_id = state->res_ctx.link_enc_cfg_ctx.link_enc_avail[i];
+ 		if (eng_id != ENGINE_ID_UNKNOWN)
+@@ -287,7 +299,7 @@ void link_enc_cfg_link_encs_assign(
+ 		struct dc_stream_state *streams[],
+ 		uint8_t stream_count)
+ {
+-	enum engine_id eng_id = ENGINE_ID_UNKNOWN;
++	enum engine_id eng_id = ENGINE_ID_UNKNOWN, eng_id_req = ENGINE_ID_UNKNOWN;
+ 	int i;
+ 	int j;
+ 
+@@ -377,8 +389,14 @@ void link_enc_cfg_link_encs_assign(
+ 		 * assigned to that endpoint.
+ 		 */
+ 		link_enc = get_link_enc_used_by_link(state, stream->link);
+-		if (link_enc == NULL)
+-			eng_id = find_first_avail_link_enc(stream->ctx, state);
++		if (link_enc == NULL) {
++
++			if (stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA &&
++					stream->link->dpia_preferred_eng_id != ENGINE_ID_UNKNOWN)
++				eng_id_req = stream->link->dpia_preferred_eng_id;
++
++			eng_id = find_first_avail_link_enc(stream->ctx, state, eng_id_req);
++		}
+ 		else
+ 			eng_id =  link_enc->preferred_engine;
+ 
+@@ -402,7 +420,9 @@ void link_enc_cfg_link_encs_assign(
+ 			DC_LOG_DEBUG("%s: CUR %s(%d) - enc_id(%d)\n",
+ 					__func__,
+ 					assignment.ep_id.ep_type == DISPLAY_ENDPOINT_PHY ? "PHY" : "DPIA",
+-					assignment.ep_id.link_id.enum_id - 1,
++					assignment.ep_id.ep_type == DISPLAY_ENDPOINT_PHY ?
++							assignment.ep_id.link_id.enum_id :
++							assignment.ep_id.link_id.enum_id - 1,
+ 					assignment.eng_id);
+ 	}
+ 	for (i = 0; i < MAX_PIPES; i++) {
+@@ -413,7 +433,9 @@ void link_enc_cfg_link_encs_assign(
+ 			DC_LOG_DEBUG("%s: NEW %s(%d) - enc_id(%d)\n",
+ 					__func__,
+ 					assignment.ep_id.ep_type == DISPLAY_ENDPOINT_PHY ? "PHY" : "DPIA",
+-					assignment.ep_id.link_id.enum_id - 1,
++					assignment.ep_id.ep_type == DISPLAY_ENDPOINT_PHY ?
++							assignment.ep_id.link_id.enum_id :
++							assignment.ep_id.link_id.enum_id - 1,
+ 					assignment.eng_id);
+ 	}
+ 
+@@ -478,7 +500,6 @@ struct dc_link *link_enc_cfg_get_link_using_link_enc(
+ 	if (stream)
+ 		link = stream->link;
+ 
+-	// dm_output_to_console("%s: No link using DIG(%d).\n", __func__, eng_id);
+ 	return link;
+ }
+ 
+diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
+index 63948170fd6d9..81258392d44a1 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc.h
++++ b/drivers/gpu/drm/amd/display/dc/dc.h
+@@ -1479,6 +1479,7 @@ struct dc_link {
+ 	 * object creation.
+ 	 */
+ 	enum engine_id eng_id;
++	enum engine_id dpia_preferred_eng_id;
+ 
+ 	bool test_pattern_enabled;
+ 	union compliance_test_state compliance_test_state;
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
+index 6a9024aa32853..e79e1d690afa8 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
+@@ -1029,6 +1029,28 @@ static const struct dce_i2c_mask i2c_masks = {
+ 		I2C_COMMON_MASK_SH_LIST_DCN30(_MASK)
+ };
+ 
++/* ========================================================== */
++
++/*
++ * DPIA index | Preferred Encoder     |    Host Router
++ *   0        |      C                |       0
++ *   1        |      First Available  |       0
++ *   2        |      D                |       1
++ *   3        |      First Available  |       1
++ */
++/* ========================================================== */
++static const enum engine_id dpia_to_preferred_enc_id_table[] = {
++		ENGINE_ID_DIGC,
++		ENGINE_ID_DIGC,
++		ENGINE_ID_DIGD,
++		ENGINE_ID_DIGD
++};
++
++static enum engine_id dcn314_get_preferred_eng_id_dpia(unsigned int dpia_index)
++{
++	return dpia_to_preferred_enc_id_table[dpia_index];
++}
++
+ static struct dce_i2c_hw *dcn31_i2c_hw_create(
+ 	struct dc_context *ctx,
+ 	uint32_t inst)
+@@ -1777,6 +1799,7 @@ static struct resource_funcs dcn314_res_pool_funcs = {
+ 	.update_bw_bounding_box = dcn314_update_bw_bounding_box,
+ 	.patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
+ 	.get_panel_config_defaults = dcn314_get_panel_config_defaults,
++	.get_preferred_eng_id_dpia = dcn314_get_preferred_eng_id_dpia,
+ };
+ 
+ static struct clock_source *dcn30_clock_source_create(
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c
+index 61ceff6bc0b19..921f58c0c729b 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c
+@@ -281,7 +281,8 @@ static void dccg32_set_dpstreamclk(
+ 	struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+ 
+ 	/* set the dtbclk_p source */
+-	dccg32_set_dtbclk_p_src(dccg, src, otg_inst);
++	/* always program refclk as DTBCLK. No use-case expected to require DPREFCLK as refclk */
++	dccg32_set_dtbclk_p_src(dccg, DTBCLK0, otg_inst);
+ 
+ 	/* enabled to select one of the DTBCLKs for pipe */
+ 	switch (dp_hpo_inst) {
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
+index 43016c462251f..9d996d5fc3ffa 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
+@@ -4135,7 +4135,9 @@ void dml31_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
+ 				}
+ 				if (v->OutputFormat[k] == dm_420 && v->HActive[k] > DCN31_MAX_FMT_420_BUFFER_WIDTH
+ 						&& v->ODMCombineEnablePerState[i][k] != dm_odm_combine_mode_4to1) {
+-					if (v->HActive[k] / 2 > DCN31_MAX_FMT_420_BUFFER_WIDTH) {
++					if (v->Output[k] == dm_hdmi) {
++						FMTBufferExceeded = true;
++					} else if (v->HActive[k] / 2 > DCN31_MAX_FMT_420_BUFFER_WIDTH) {
+ 						v->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_4to1;
+ 						v->PlaneRequiredDISPCLK = v->PlaneRequiredDISPCLKWithODMCombine4To1;
+ 
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c b/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c
+index 9010c47476e92..b763786bfcc85 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c
++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c
+@@ -4227,7 +4227,9 @@ void dml314_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_
+ 				}
+ 				if (v->OutputFormat[k] == dm_420 && v->HActive[k] > DCN314_MAX_FMT_420_BUFFER_WIDTH
+ 						&& v->ODMCombineEnablePerState[i][k] != dm_odm_combine_mode_4to1) {
+-					if (v->HActive[k] / 2 > DCN314_MAX_FMT_420_BUFFER_WIDTH) {
++					if (v->Output[k] == dm_hdmi) {
++						FMTBufferExceeded = true;
++					} else if (v->HActive[k] / 2 > DCN314_MAX_FMT_420_BUFFER_WIDTH) {
+ 						v->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_4to1;
+ 						v->PlaneRequiredDISPCLK = v->PlaneRequiredDISPCLKWithODMCombine4To1;
+ 
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c
+index a50e7f4dce421..f74e5fc8218f5 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c
++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c
+@@ -3459,6 +3459,7 @@ bool dml32_CalculatePrefetchSchedule(
+ 	double TimeForFetchingMetaPTE = 0;
+ 	double TimeForFetchingRowInVBlank = 0;
+ 	double LinesToRequestPrefetchPixelData = 0;
++	double LinesForPrefetchBandwidth = 0;
+ 	unsigned int HostVMDynamicLevelsTrips;
+ 	double  trip_to_mem;
+ 	double  Tvm_trips;
+@@ -3888,11 +3889,15 @@ bool dml32_CalculatePrefetchSchedule(
+ 			TimeForFetchingMetaPTE = Tvm_oto;
+ 			TimeForFetchingRowInVBlank = Tr0_oto;
+ 			*PrefetchBandwidth = prefetch_bw_oto;
++			/* Clamp to oto for bandwidth calculation */
++			LinesForPrefetchBandwidth = dst_y_prefetch_oto;
+ 		} else {
+ 			*DestinationLinesForPrefetch = dst_y_prefetch_equ;
+ 			TimeForFetchingMetaPTE = Tvm_equ;
+ 			TimeForFetchingRowInVBlank = Tr0_equ;
+ 			*PrefetchBandwidth = prefetch_bw_equ;
++			/* Clamp to equ for bandwidth calculation */
++			LinesForPrefetchBandwidth = dst_y_prefetch_equ;
+ 		}
+ 
+ 		*DestinationLinesToRequestVMInVBlank = dml_ceil(4.0 * TimeForFetchingMetaPTE / LineTime, 1.0) / 4.0;
+@@ -3900,7 +3905,7 @@ bool dml32_CalculatePrefetchSchedule(
+ 		*DestinationLinesToRequestRowInVBlank =
+ 				dml_ceil(4.0 * TimeForFetchingRowInVBlank / LineTime, 1.0) / 4.0;
+ 
+-		LinesToRequestPrefetchPixelData = *DestinationLinesForPrefetch -
++		LinesToRequestPrefetchPixelData = LinesForPrefetchBandwidth -
+ 				*DestinationLinesToRequestVMInVBlank - 2 * *DestinationLinesToRequestRowInVBlank;
+ 
+ #ifdef __DML_VBA_DEBUG__
+diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+index 034610b74a37e..3752410bc987c 100644
+--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
++++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+@@ -65,6 +65,7 @@ struct resource_context;
+ struct clk_bw_params;
+ 
+ struct resource_funcs {
++	enum engine_id (*get_preferred_eng_id_dpia)(unsigned int dpia_index);
+ 	void (*destroy)(struct resource_pool **pool);
+ 	void (*link_init)(struct dc_link *link);
+ 	struct panel_cntl*(*panel_cntl_create)(
+diff --git a/drivers/gpu/drm/amd/display/dc/link/link_factory.c b/drivers/gpu/drm/amd/display/dc/link/link_factory.c
+index ac1c3e2e7c1d6..a1b800dceb65f 100644
+--- a/drivers/gpu/drm/amd/display/dc/link/link_factory.c
++++ b/drivers/gpu/drm/amd/display/dc/link/link_factory.c
+@@ -783,6 +783,10 @@ static bool construct_dpia(struct dc_link *link,
+ 	/* Set dpia port index : 0 to number of dpia ports */
+ 	link->ddc_hw_inst = init_params->connector_index;
+ 
++	// Assign Dpia preferred eng_id
++	if (link->dc->res_pool->funcs->get_preferred_eng_id_dpia)
++		link->dpia_preferred_eng_id = link->dc->res_pool->funcs->get_preferred_eng_id_dpia(link->ddc_hw_inst);
++
+ 	/* TODO: Create link encoder */
+ 
+ 	link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
+diff --git a/drivers/gpu/drm/bridge/samsung-dsim.c b/drivers/gpu/drm/bridge/samsung-dsim.c
+index 73ec60757dbcb..9e253af69c7a1 100644
+--- a/drivers/gpu/drm/bridge/samsung-dsim.c
++++ b/drivers/gpu/drm/bridge/samsung-dsim.c
+@@ -1009,7 +1009,7 @@ static int samsung_dsim_wait_for_hdr_fifo(struct samsung_dsim *dsi)
+ 	do {
+ 		u32 reg = samsung_dsim_read(dsi, DSIM_FIFOCTRL_REG);
+ 
+-		if (!(reg & DSIM_SFR_HEADER_FULL))
++		if (reg & DSIM_SFR_HEADER_EMPTY)
+ 			return 0;
+ 
+ 		if (!cond_resched())
+diff --git a/drivers/gpu/drm/bridge/tc358762.c b/drivers/gpu/drm/bridge/tc358762.c
+index 5641395fd310e..11445c50956e1 100644
+--- a/drivers/gpu/drm/bridge/tc358762.c
++++ b/drivers/gpu/drm/bridge/tc358762.c
+@@ -231,7 +231,7 @@ static int tc358762_probe(struct mipi_dsi_device *dsi)
+ 	dsi->lanes = 1;
+ 	dsi->format = MIPI_DSI_FMT_RGB888;
+ 	dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
+-			  MIPI_DSI_MODE_LPM;
++			  MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_VIDEO_HSE;
+ 
+ 	ret = tc358762_parse_dt(ctx);
+ 	if (ret < 0)
+diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
+index 1f470968ed14b..9271e47d66572 100644
+--- a/drivers/gpu/drm/drm_edid.c
++++ b/drivers/gpu/drm/drm_edid.c
+@@ -230,6 +230,7 @@ static const struct edid_quirk {
+ 
+ 	/* OSVR HDK and HDK2 VR Headsets */
+ 	EDID_QUIRK('S', 'V', 'R', 0x1019, EDID_QUIRK_NON_DESKTOP),
++	EDID_QUIRK('A', 'U', 'O', 0x1111, EDID_QUIRK_NON_DESKTOP),
+ };
+ 
+ /*
+diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
+index 4153f302de7c4..d19e796c20613 100644
+--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c
++++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
+@@ -39,13 +39,12 @@ static void exynos_drm_crtc_atomic_disable(struct drm_crtc *crtc,
+ 	if (exynos_crtc->ops->atomic_disable)
+ 		exynos_crtc->ops->atomic_disable(exynos_crtc);
+ 
++	spin_lock_irq(&crtc->dev->event_lock);
+ 	if (crtc->state->event && !crtc->state->active) {
+-		spin_lock_irq(&crtc->dev->event_lock);
+ 		drm_crtc_send_vblank_event(crtc, crtc->state->event);
+-		spin_unlock_irq(&crtc->dev->event_lock);
+-
+ 		crtc->state->event = NULL;
+ 	}
++	spin_unlock_irq(&crtc->dev->event_lock);
+ }
+ 
+ static int exynos_crtc_atomic_check(struct drm_crtc *crtc,
+diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c
+index 34a397adbd6b0..cc893c9730bff 100644
+--- a/drivers/gpu/drm/i915/display/intel_bios.c
++++ b/drivers/gpu/drm/i915/display/intel_bios.c
+@@ -3659,6 +3659,27 @@ enum aux_ch intel_bios_dp_aux_ch(const struct intel_bios_encoder_data *devdata)
+ 	return map_aux_ch(devdata->i915, devdata->child.aux_channel);
+ }
+ 
++bool intel_bios_dp_has_shared_aux_ch(const struct intel_bios_encoder_data *devdata)
++{
++	struct drm_i915_private *i915;
++	u8 aux_channel;
++	int count = 0;
++
++	if (!devdata || !devdata->child.aux_channel)
++		return false;
++
++	i915 = devdata->i915;
++	aux_channel = devdata->child.aux_channel;
++
++	list_for_each_entry(devdata, &i915->display.vbt.display_devices, node) {
++		if (intel_bios_encoder_supports_dp(devdata) &&
++		    aux_channel == devdata->child.aux_channel)
++			count++;
++	}
++
++	return count > 1;
++}
++
+ int intel_bios_dp_boost_level(const struct intel_bios_encoder_data *devdata)
+ {
+ 	if (!devdata || devdata->i915->display.vbt.version < 196 || !devdata->child.iboost)
+diff --git a/drivers/gpu/drm/i915/display/intel_bios.h b/drivers/gpu/drm/i915/display/intel_bios.h
+index 45fae97d97192..f60da533949d9 100644
+--- a/drivers/gpu/drm/i915/display/intel_bios.h
++++ b/drivers/gpu/drm/i915/display/intel_bios.h
+@@ -271,6 +271,7 @@ enum aux_ch intel_bios_dp_aux_ch(const struct intel_bios_encoder_data *devdata);
+ int intel_bios_dp_boost_level(const struct intel_bios_encoder_data *devdata);
+ int intel_bios_dp_max_lane_count(const struct intel_bios_encoder_data *devdata);
+ int intel_bios_dp_max_link_rate(const struct intel_bios_encoder_data *devdata);
++bool intel_bios_dp_has_shared_aux_ch(const struct intel_bios_encoder_data *devdata);
+ int intel_bios_hdmi_boost_level(const struct intel_bios_encoder_data *devdata);
+ int intel_bios_hdmi_ddc_pin(const struct intel_bios_encoder_data *devdata);
+ int intel_bios_hdmi_level_shift(const struct intel_bios_encoder_data *devdata);
+diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
+index 9f40da20e88d2..cb55112d60657 100644
+--- a/drivers/gpu/drm/i915/display/intel_dp.c
++++ b/drivers/gpu/drm/i915/display/intel_dp.c
+@@ -5503,8 +5503,13 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
+ 	/*
+ 	 * VBT and straps are liars. Also check HPD as that seems
+ 	 * to be the most reliable piece of information available.
++	 *
++	 * ... expect on devices that forgot to hook HPD up for eDP
++	 * (eg. Acer Chromebook C710), so we'll check it only if multiple
++	 * ports are attempting to use the same AUX CH, according to VBT.
+ 	 */
+-	if (!intel_digital_port_connected(encoder)) {
++	if (intel_bios_dp_has_shared_aux_ch(encoder->devdata) &&
++	    !intel_digital_port_connected(encoder)) {
+ 		/*
+ 		 * If this fails, presume the DPCD answer came
+ 		 * from some other port using the same AUX CH.
+diff --git a/drivers/gpu/drm/mediatek/mtk_dp.c b/drivers/gpu/drm/mediatek/mtk_dp.c
+index c58b775877a31..076aa54910571 100644
+--- a/drivers/gpu/drm/mediatek/mtk_dp.c
++++ b/drivers/gpu/drm/mediatek/mtk_dp.c
+@@ -847,7 +847,7 @@ static int mtk_dp_aux_do_transfer(struct mtk_dp *mtk_dp, bool is_read, u8 cmd,
+ 		u32 phy_status = mtk_dp_read(mtk_dp, MTK_DP_AUX_P0_3628) &
+ 				 AUX_RX_PHY_STATE_AUX_TX_P0_MASK;
+ 		if (phy_status != AUX_RX_PHY_STATE_AUX_TX_P0_RX_IDLE) {
+-			drm_err(mtk_dp->drm_dev,
++			dev_err(mtk_dp->dev,
+ 				"AUX Rx Aux hang, need SW reset\n");
+ 			return -EIO;
+ 		}
+@@ -2062,7 +2062,7 @@ static ssize_t mtk_dp_aux_transfer(struct drm_dp_aux *mtk_aux,
+ 		is_read = true;
+ 		break;
+ 	default:
+-		drm_err(mtk_aux->drm_dev, "invalid aux cmd = %d\n",
++		dev_err(mtk_dp->dev, "invalid aux cmd = %d\n",
+ 			msg->request);
+ 		ret = -EINVAL;
+ 		goto err;
+@@ -2078,7 +2078,7 @@ static ssize_t mtk_dp_aux_transfer(struct drm_dp_aux *mtk_aux,
+ 					     to_access, &msg->reply);
+ 
+ 		if (ret) {
+-			drm_info(mtk_dp->drm_dev,
++			dev_info(mtk_dp->dev,
+ 				 "Failed to do AUX transfer: %d\n", ret);
+ 			goto err;
+ 		}
+diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+index a2513f7168238..f6c6147640173 100644
+--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
++++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+@@ -2489,8 +2489,7 @@ struct msm_gpu *a6xx_gpu_init(struct drm_device *dev)
+ 	/* Quirk data */
+ 	adreno_gpu->info = info;
+ 
+-	if (adreno_is_a650(adreno_gpu) || adreno_is_a660_family(adreno_gpu))
+-		adreno_gpu->base.hw_apriv = true;
++	adreno_gpu->base.hw_apriv = !!(info->quirks & ADRENO_QUIRK_HAS_HW_APRIV);
+ 
+ 	a6xx_llc_slices_init(pdev, a6xx_gpu);
+ 
+diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c
+index 6e3c1368c5e15..b2283faa173ac 100644
+--- a/drivers/gpu/drm/msm/adreno/adreno_device.c
++++ b/drivers/gpu/drm/msm/adreno/adreno_device.c
+@@ -275,6 +275,7 @@ static const struct adreno_info gpulist[] = {
+ 		},
+ 		.gmem = SZ_512K,
+ 		.inactive_period = DRM_MSM_INACTIVE_PERIOD,
++		.quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT,
+ 		.init = a6xx_gpu_init,
+ 	}, {
+ 		.rev = ADRENO_REV(6, 1, 9, ANY_ID),
+@@ -286,6 +287,7 @@ static const struct adreno_info gpulist[] = {
+ 		},
+ 		.gmem = SZ_512K,
+ 		.inactive_period = DRM_MSM_INACTIVE_PERIOD,
++		.quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT,
+ 		.init = a6xx_gpu_init,
+ 		.zapfw = "a615_zap.mdt",
+ 		.hwcg = a615_hwcg,
+@@ -299,6 +301,7 @@ static const struct adreno_info gpulist[] = {
+ 		},
+ 		.gmem = SZ_1M,
+ 		.inactive_period = DRM_MSM_INACTIVE_PERIOD,
++		.quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT,
+ 		.init = a6xx_gpu_init,
+ 		.zapfw = "a630_zap.mdt",
+ 		.hwcg = a630_hwcg,
+@@ -312,6 +315,7 @@ static const struct adreno_info gpulist[] = {
+ 		},
+ 		.gmem = SZ_1M,
+ 		.inactive_period = DRM_MSM_INACTIVE_PERIOD,
++		.quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT,
+ 		.init = a6xx_gpu_init,
+ 		.zapfw = "a640_zap.mdt",
+ 		.hwcg = a640_hwcg,
+@@ -325,6 +329,8 @@ static const struct adreno_info gpulist[] = {
+ 		},
+ 		.gmem = SZ_1M + SZ_128K,
+ 		.inactive_period = DRM_MSM_INACTIVE_PERIOD,
++		.quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT |
++			ADRENO_QUIRK_HAS_HW_APRIV,
+ 		.init = a6xx_gpu_init,
+ 		.zapfw = "a650_zap.mdt",
+ 		.hwcg = a650_hwcg,
+@@ -339,6 +345,8 @@ static const struct adreno_info gpulist[] = {
+ 		},
+ 		.gmem = SZ_1M + SZ_512K,
+ 		.inactive_period = DRM_MSM_INACTIVE_PERIOD,
++		.quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT |
++			ADRENO_QUIRK_HAS_HW_APRIV,
+ 		.init = a6xx_gpu_init,
+ 		.zapfw = "a660_zap.mdt",
+ 		.hwcg = a660_hwcg,
+@@ -351,6 +359,8 @@ static const struct adreno_info gpulist[] = {
+ 		},
+ 		.gmem = SZ_512K,
+ 		.inactive_period = DRM_MSM_INACTIVE_PERIOD,
++		.quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT |
++			ADRENO_QUIRK_HAS_HW_APRIV,
+ 		.init = a6xx_gpu_init,
+ 		.hwcg = a660_hwcg,
+ 		.address_space_size = SZ_16G,
+@@ -364,6 +374,7 @@ static const struct adreno_info gpulist[] = {
+ 		},
+ 		.gmem = SZ_2M,
+ 		.inactive_period = DRM_MSM_INACTIVE_PERIOD,
++		.quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT,
+ 		.init = a6xx_gpu_init,
+ 		.zapfw = "a640_zap.mdt",
+ 		.hwcg = a640_hwcg,
+@@ -375,6 +386,8 @@ static const struct adreno_info gpulist[] = {
+ 		},
+ 		.gmem = SZ_4M,
+ 		.inactive_period = DRM_MSM_INACTIVE_PERIOD,
++		.quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT |
++			ADRENO_QUIRK_HAS_HW_APRIV,
+ 		.init = a6xx_gpu_init,
+ 		.zapfw = "a690_zap.mdt",
+ 		.hwcg = a690_hwcg,
+@@ -586,9 +599,9 @@ static int adreno_bind(struct device *dev, struct device *master, void *data)
+ 	if (ret)
+ 		return ret;
+ 
+-	if (config.rev.core >= 6)
+-		if (!adreno_has_gmu_wrapper(to_adreno_gpu(gpu)))
+-			priv->has_cached_coherent = true;
++	priv->has_cached_coherent =
++		!!(info->quirks & ADRENO_QUIRK_HAS_CACHED_COHERENT) &&
++		!adreno_has_gmu_wrapper(to_adreno_gpu(gpu));
+ 
+ 	return 0;
+ }
+diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
+index 845019891ad19..129771563f3fd 100644
+--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h
++++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
+@@ -32,6 +32,8 @@ enum {
+ #define ADRENO_QUIRK_TWO_PASS_USE_WFI		BIT(0)
+ #define ADRENO_QUIRK_FAULT_DETECT_MASK		BIT(1)
+ #define ADRENO_QUIRK_LMLOADKILL_DISABLE		BIT(2)
++#define ADRENO_QUIRK_HAS_HW_APRIV		BIT(3)
++#define ADRENO_QUIRK_HAS_CACHED_COHERENT	BIT(4)
+ 
+ struct adreno_rev {
+ 	uint8_t  core;
+diff --git a/drivers/gpu/drm/radeon/radeon_sa.c b/drivers/gpu/drm/radeon/radeon_sa.c
+index c87a57c9c592b..22dd8b4456855 100644
+--- a/drivers/gpu/drm/radeon/radeon_sa.c
++++ b/drivers/gpu/drm/radeon/radeon_sa.c
+@@ -123,7 +123,7 @@ int radeon_sa_bo_new(struct radeon_sa_manager *sa_manager,
+ 		     unsigned int size, unsigned int align)
+ {
+ 	struct drm_suballoc *sa = drm_suballoc_new(&sa_manager->base, size,
+-						   GFP_KERNEL, true, align);
++						   GFP_KERNEL, false, align);
+ 
+ 	if (IS_ERR(sa)) {
+ 		*sa_bo = NULL;
+diff --git a/drivers/gpu/drm/tiny/gm12u320.c b/drivers/gpu/drm/tiny/gm12u320.c
+index c5bb683e440c5..0187539ff5eaa 100644
+--- a/drivers/gpu/drm/tiny/gm12u320.c
++++ b/drivers/gpu/drm/tiny/gm12u320.c
+@@ -70,10 +70,10 @@ MODULE_PARM_DESC(eco_mode, "Turn on Eco mode (less bright, more silent)");
+ #define READ_STATUS_SIZE		13
+ #define MISC_VALUE_SIZE			4
+ 
+-#define CMD_TIMEOUT			msecs_to_jiffies(200)
+-#define DATA_TIMEOUT			msecs_to_jiffies(1000)
+-#define IDLE_TIMEOUT			msecs_to_jiffies(2000)
+-#define FIRST_FRAME_TIMEOUT		msecs_to_jiffies(2000)
++#define CMD_TIMEOUT			200
++#define DATA_TIMEOUT			1000
++#define IDLE_TIMEOUT			2000
++#define FIRST_FRAME_TIMEOUT		2000
+ 
+ #define MISC_REQ_GET_SET_ECO_A		0xff
+ #define MISC_REQ_GET_SET_ECO_B		0x35
+@@ -389,7 +389,7 @@ static void gm12u320_fb_update_work(struct work_struct *work)
+ 	 * switches back to showing its logo.
+ 	 */
+ 	queue_delayed_work(system_long_wq, &gm12u320->fb_update.work,
+-			   IDLE_TIMEOUT);
++			   msecs_to_jiffies(IDLE_TIMEOUT));
+ 
+ 	return;
+ err:
+diff --git a/drivers/i2c/busses/i2c-aspeed.c b/drivers/i2c/busses/i2c-aspeed.c
+index 2e5acfeb76c81..5a416b39b8183 100644
+--- a/drivers/i2c/busses/i2c-aspeed.c
++++ b/drivers/i2c/busses/i2c-aspeed.c
+@@ -698,13 +698,16 @@ static int aspeed_i2c_master_xfer(struct i2c_adapter *adap,
+ 
+ 	if (time_left == 0) {
+ 		/*
+-		 * If timed out and bus is still busy in a multi master
+-		 * environment, attempt recovery at here.
++		 * In a multi-master setup, if a timeout occurs, attempt
++		 * recovery. But if the bus is idle, we still need to reset the
++		 * i2c controller to clear the remaining interrupts.
+ 		 */
+ 		if (bus->multi_master &&
+ 		    (readl(bus->base + ASPEED_I2C_CMD_REG) &
+ 		     ASPEED_I2CD_BUS_BUSY_STS))
+ 			aspeed_i2c_recover_bus(bus);
++		else
++			aspeed_i2c_reset(bus);
+ 
+ 		/*
+ 		 * If timed out and the state is still pending, drop the pending
+diff --git a/drivers/interconnect/core.c b/drivers/interconnect/core.c
+index 5fac448c28fda..1afbc4f7c6e73 100644
+--- a/drivers/interconnect/core.c
++++ b/drivers/interconnect/core.c
+@@ -28,6 +28,7 @@ static LIST_HEAD(icc_providers);
+ static int providers_count;
+ static bool synced_state;
+ static DEFINE_MUTEX(icc_lock);
++static DEFINE_MUTEX(icc_bw_lock);
+ static struct dentry *icc_debugfs_dir;
+ 
+ static void icc_summary_show_one(struct seq_file *s, struct icc_node *n)
+@@ -631,7 +632,7 @@ int icc_set_bw(struct icc_path *path, u32 avg_bw, u32 peak_bw)
+ 	if (WARN_ON(IS_ERR(path) || !path->num_nodes))
+ 		return -EINVAL;
+ 
+-	mutex_lock(&icc_lock);
++	mutex_lock(&icc_bw_lock);
+ 
+ 	old_avg = path->reqs[0].avg_bw;
+ 	old_peak = path->reqs[0].peak_bw;
+@@ -663,7 +664,7 @@ int icc_set_bw(struct icc_path *path, u32 avg_bw, u32 peak_bw)
+ 		apply_constraints(path);
+ 	}
+ 
+-	mutex_unlock(&icc_lock);
++	mutex_unlock(&icc_bw_lock);
+ 
+ 	trace_icc_set_bw_end(path, ret);
+ 
+@@ -872,6 +873,7 @@ void icc_node_add(struct icc_node *node, struct icc_provider *provider)
+ 		return;
+ 
+ 	mutex_lock(&icc_lock);
++	mutex_lock(&icc_bw_lock);
+ 
+ 	node->provider = provider;
+ 	list_add_tail(&node->node_list, &provider->nodes);
+@@ -900,6 +902,7 @@ void icc_node_add(struct icc_node *node, struct icc_provider *provider)
+ 	node->avg_bw = 0;
+ 	node->peak_bw = 0;
+ 
++	mutex_unlock(&icc_bw_lock);
+ 	mutex_unlock(&icc_lock);
+ }
+ EXPORT_SYMBOL_GPL(icc_node_add);
+@@ -1025,6 +1028,7 @@ void icc_sync_state(struct device *dev)
+ 		return;
+ 
+ 	mutex_lock(&icc_lock);
++	mutex_lock(&icc_bw_lock);
+ 	synced_state = true;
+ 	list_for_each_entry(p, &icc_providers, provider_list) {
+ 		dev_dbg(p->dev, "interconnect provider is in synced state\n");
+@@ -1037,13 +1041,21 @@ void icc_sync_state(struct device *dev)
+ 			}
+ 		}
+ 	}
++	mutex_unlock(&icc_bw_lock);
+ 	mutex_unlock(&icc_lock);
+ }
+ EXPORT_SYMBOL_GPL(icc_sync_state);
+ 
+ static int __init icc_init(void)
+ {
+-	struct device_node *root = of_find_node_by_path("/");
++	struct device_node *root;
++
++	/* Teach lockdep about lock ordering wrt. shrinker: */
++	fs_reclaim_acquire(GFP_KERNEL);
++	might_lock(&icc_bw_lock);
++	fs_reclaim_release(GFP_KERNEL);
++
++	root = of_find_node_by_path("/");
+ 
+ 	providers_count = of_count_icc_providers(root);
+ 	of_node_put(root);
+diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h
+index 0d93661f88d30..095b9b49aa825 100644
+--- a/drivers/md/dm-core.h
++++ b/drivers/md/dm-core.h
+@@ -214,6 +214,7 @@ struct dm_table {
+ 
+ 	/* a list of devices used by this table */
+ 	struct list_head devices;
++	struct rw_semaphore devices_lock;
+ 
+ 	/* events get handed up using this callback */
+ 	void (*event_fn)(void *data);
+diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
+index f5ed729a8e0cd..21ebb6c39394b 100644
+--- a/drivers/md/dm-ioctl.c
++++ b/drivers/md/dm-ioctl.c
+@@ -1630,6 +1630,8 @@ static void retrieve_deps(struct dm_table *table,
+ 	struct dm_dev_internal *dd;
+ 	struct dm_target_deps *deps;
+ 
++	down_read(&table->devices_lock);
++
+ 	deps = get_result_buffer(param, param_size, &len);
+ 
+ 	/*
+@@ -1644,7 +1646,7 @@ static void retrieve_deps(struct dm_table *table,
+ 	needed = struct_size(deps, dev, count);
+ 	if (len < needed) {
+ 		param->flags |= DM_BUFFER_FULL_FLAG;
+-		return;
++		goto out;
+ 	}
+ 
+ 	/*
+@@ -1656,6 +1658,9 @@ static void retrieve_deps(struct dm_table *table,
+ 		deps->dev[count++] = huge_encode_dev(dd->dm_dev->bdev->bd_dev);
+ 
+ 	param->data_size = param->data_start + needed;
++
++out:
++	up_read(&table->devices_lock);
+ }
+ 
+ static int table_deps(struct file *filp, struct dm_ioctl *param, size_t param_size)
+diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
+index 7d208b2b1a192..37b48f63ae6a5 100644
+--- a/drivers/md/dm-table.c
++++ b/drivers/md/dm-table.c
+@@ -135,6 +135,7 @@ int dm_table_create(struct dm_table **result, blk_mode_t mode,
+ 		return -ENOMEM;
+ 
+ 	INIT_LIST_HEAD(&t->devices);
++	init_rwsem(&t->devices_lock);
+ 
+ 	if (!num_targets)
+ 		num_targets = KEYS_PER_NODE;
+@@ -359,16 +360,20 @@ int __ref dm_get_device(struct dm_target *ti, const char *path, blk_mode_t mode,
+ 	if (dev == disk_devt(t->md->disk))
+ 		return -EINVAL;
+ 
++	down_write(&t->devices_lock);
++
+ 	dd = find_device(&t->devices, dev);
+ 	if (!dd) {
+ 		dd = kmalloc(sizeof(*dd), GFP_KERNEL);
+-		if (!dd)
+-			return -ENOMEM;
++		if (!dd) {
++			r = -ENOMEM;
++			goto unlock_ret_r;
++		}
+ 
+ 		r = dm_get_table_device(t->md, dev, mode, &dd->dm_dev);
+ 		if (r) {
+ 			kfree(dd);
+-			return r;
++			goto unlock_ret_r;
+ 		}
+ 
+ 		refcount_set(&dd->count, 1);
+@@ -378,12 +383,17 @@ int __ref dm_get_device(struct dm_target *ti, const char *path, blk_mode_t mode,
+ 	} else if (dd->dm_dev->mode != (mode | dd->dm_dev->mode)) {
+ 		r = upgrade_mode(dd, mode, t->md);
+ 		if (r)
+-			return r;
++			goto unlock_ret_r;
+ 	}
+ 	refcount_inc(&dd->count);
+ out:
++	up_write(&t->devices_lock);
+ 	*result = dd->dm_dev;
+ 	return 0;
++
++unlock_ret_r:
++	up_write(&t->devices_lock);
++	return r;
+ }
+ EXPORT_SYMBOL(dm_get_device);
+ 
+@@ -419,9 +429,12 @@ static int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev,
+ void dm_put_device(struct dm_target *ti, struct dm_dev *d)
+ {
+ 	int found = 0;
+-	struct list_head *devices = &ti->table->devices;
++	struct dm_table *t = ti->table;
++	struct list_head *devices = &t->devices;
+ 	struct dm_dev_internal *dd;
+ 
++	down_write(&t->devices_lock);
++
+ 	list_for_each_entry(dd, devices, list) {
+ 		if (dd->dm_dev == d) {
+ 			found = 1;
+@@ -430,14 +443,17 @@ void dm_put_device(struct dm_target *ti, struct dm_dev *d)
+ 	}
+ 	if (!found) {
+ 		DMERR("%s: device %s not in table devices list",
+-		      dm_device_name(ti->table->md), d->name);
+-		return;
++		      dm_device_name(t->md), d->name);
++		goto unlock_ret;
+ 	}
+ 	if (refcount_dec_and_test(&dd->count)) {
+-		dm_put_table_device(ti->table->md, d);
++		dm_put_table_device(t->md, d);
+ 		list_del(&dd->list);
+ 		kfree(dd);
+ 	}
++
++unlock_ret:
++	up_write(&t->devices_lock);
+ }
+ EXPORT_SYMBOL(dm_put_device);
+ 
+diff --git a/drivers/md/dm.c b/drivers/md/dm.c
+index f0f118ab20fa2..64a1f306c96c1 100644
+--- a/drivers/md/dm.c
++++ b/drivers/md/dm.c
+@@ -715,24 +715,6 @@ static void dm_put_live_table_fast(struct mapped_device *md) __releases(RCU)
+ 	rcu_read_unlock();
+ }
+ 
+-static inline struct dm_table *dm_get_live_table_bio(struct mapped_device *md,
+-					int *srcu_idx, blk_opf_t bio_opf)
+-{
+-	if (bio_opf & REQ_NOWAIT)
+-		return dm_get_live_table_fast(md);
+-	else
+-		return dm_get_live_table(md, srcu_idx);
+-}
+-
+-static inline void dm_put_live_table_bio(struct mapped_device *md, int srcu_idx,
+-					 blk_opf_t bio_opf)
+-{
+-	if (bio_opf & REQ_NOWAIT)
+-		dm_put_live_table_fast(md);
+-	else
+-		dm_put_live_table(md, srcu_idx);
+-}
+-
+ static char *_dm_claim_ptr = "I belong to device-mapper";
+ 
+ /*
+@@ -1833,9 +1815,8 @@ static void dm_submit_bio(struct bio *bio)
+ 	struct mapped_device *md = bio->bi_bdev->bd_disk->private_data;
+ 	int srcu_idx;
+ 	struct dm_table *map;
+-	blk_opf_t bio_opf = bio->bi_opf;
+ 
+-	map = dm_get_live_table_bio(md, &srcu_idx, bio_opf);
++	map = dm_get_live_table(md, &srcu_idx);
+ 
+ 	/* If suspended, or map not yet available, queue this IO for later */
+ 	if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) ||
+@@ -1851,7 +1832,7 @@ static void dm_submit_bio(struct bio *bio)
+ 
+ 	dm_split_and_process_bio(md, map, bio);
+ out:
+-	dm_put_live_table_bio(md, srcu_idx, bio_opf);
++	dm_put_live_table(md, srcu_idx);
+ }
+ 
+ static bool dm_poll_dm_io(struct dm_io *io, struct io_comp_batch *iob,
+diff --git a/drivers/md/md.c b/drivers/md/md.c
+index 2a4a3d3039fae..78d51dddf3a00 100644
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -798,14 +798,14 @@ void mddev_unlock(struct mddev *mddev)
+ 	} else
+ 		mutex_unlock(&mddev->reconfig_mutex);
+ 
++	md_wakeup_thread(mddev->thread);
++	wake_up(&mddev->sb_wait);
++
+ 	list_for_each_entry_safe(rdev, tmp, &delete, same_set) {
+ 		list_del_init(&rdev->same_set);
+ 		kobject_del(&rdev->kobj);
+ 		export_rdev(rdev, mddev);
+ 	}
+-
+-	md_wakeup_thread(mddev->thread);
+-	wake_up(&mddev->sb_wait);
+ }
+ EXPORT_SYMBOL_GPL(mddev_unlock);
+ 
+@@ -2452,7 +2452,8 @@ static void export_rdev(struct md_rdev *rdev, struct mddev *mddev)
+ 	if (test_bit(AutoDetected, &rdev->flags))
+ 		md_autodetect_dev(rdev->bdev->bd_dev);
+ #endif
+-	blkdev_put(rdev->bdev, mddev->external ? &claim_rdev : rdev);
++	blkdev_put(rdev->bdev,
++		   test_bit(Holder, &rdev->flags) ? rdev : &claim_rdev);
+ 	rdev->bdev = NULL;
+ 	kobject_put(&rdev->kobj);
+ }
+@@ -3632,6 +3633,7 @@ EXPORT_SYMBOL_GPL(md_rdev_init);
+ static struct md_rdev *md_import_device(dev_t newdev, int super_format, int super_minor)
+ {
+ 	struct md_rdev *rdev;
++	struct md_rdev *holder;
+ 	sector_t size;
+ 	int err;
+ 
+@@ -3646,8 +3648,15 @@ static struct md_rdev *md_import_device(dev_t newdev, int super_format, int supe
+ 	if (err)
+ 		goto out_clear_rdev;
+ 
++	if (super_format == -2) {
++		holder = &claim_rdev;
++	} else {
++		holder = rdev;
++		set_bit(Holder, &rdev->flags);
++	}
++
+ 	rdev->bdev = blkdev_get_by_dev(newdev, BLK_OPEN_READ | BLK_OPEN_WRITE,
+-			super_format == -2 ? &claim_rdev : rdev, NULL);
++				       holder, NULL);
+ 	if (IS_ERR(rdev->bdev)) {
+ 		pr_warn("md: could not open device unknown-block(%u,%u).\n",
+ 			MAJOR(newdev), MINOR(newdev));
+@@ -3684,7 +3693,7 @@ static struct md_rdev *md_import_device(dev_t newdev, int super_format, int supe
+ 	return rdev;
+ 
+ out_blkdev_put:
+-	blkdev_put(rdev->bdev, super_format == -2 ? &claim_rdev : rdev);
++	blkdev_put(rdev->bdev, holder);
+ out_clear_rdev:
+ 	md_rdev_clear(rdev);
+ out_free_rdev:
+@@ -8205,7 +8214,7 @@ static void *md_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+ 	spin_unlock(&all_mddevs_lock);
+ 
+ 	if (to_put)
+-		mddev_put(mddev);
++		mddev_put(to_put);
+ 	return next_mddev;
+ 
+ }
+diff --git a/drivers/md/md.h b/drivers/md/md.h
+index 1aef86bf3fc31..b0a0f5a5c7836 100644
+--- a/drivers/md/md.h
++++ b/drivers/md/md.h
+@@ -211,6 +211,9 @@ enum flag_bits {
+ 				 * check if there is collision between raid1
+ 				 * serial bios.
+ 				 */
++	Holder,			/* rdev is used as holder while opening
++				 * underlying disk exclusively.
++				 */
+ };
+ 
+ static inline int is_badblock(struct md_rdev *rdev, sector_t s, int sectors,
+diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
+index dd25832eb0452..a60acd72210aa 100644
+--- a/drivers/md/raid1.c
++++ b/drivers/md/raid1.c
+@@ -1831,6 +1831,9 @@ static int raid1_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
+ 	int number = rdev->raid_disk;
+ 	struct raid1_info *p = conf->mirrors + number;
+ 
++	if (unlikely(number >= conf->raid_disks))
++		goto abort;
++
+ 	if (rdev != p->rdev)
+ 		p = conf->mirrors + conf->raid_disks + number;
+ 
+diff --git a/drivers/media/pci/cx23885/cx23885-video.c b/drivers/media/pci/cx23885/cx23885-video.c
+index 671fc0588e431..9af2c5596121c 100644
+--- a/drivers/media/pci/cx23885/cx23885-video.c
++++ b/drivers/media/pci/cx23885/cx23885-video.c
+@@ -413,7 +413,7 @@ static int buffer_prepare(struct vb2_buffer *vb)
+ 				dev->height >> 1);
+ 		break;
+ 	default:
+-		BUG();
++		return -EINVAL; /* should not happen */
+ 	}
+ 	dprintk(2, "[%p/%d] buffer_init - %dx%d %dbpp 0x%08x - dma=0x%08lx\n",
+ 		buf, buf->vb.vb2_buf.index,
+diff --git a/drivers/media/pci/intel/ipu3/ipu3-cio2-main.c b/drivers/media/pci/intel/ipu3/ipu3-cio2-main.c
+index dc09fbdb062b0..ca51776a961fb 100644
+--- a/drivers/media/pci/intel/ipu3/ipu3-cio2-main.c
++++ b/drivers/media/pci/intel/ipu3/ipu3-cio2-main.c
+@@ -355,7 +355,7 @@ static int cio2_hw_init(struct cio2_device *cio2, struct cio2_queue *q)
+ 	void __iomem *const base = cio2->base;
+ 	u8 lanes, csi2bus = q->csi2.port;
+ 	u8 sensor_vc = SENSOR_VIR_CH_DFLT;
+-	struct cio2_csi2_timing timing;
++	struct cio2_csi2_timing timing = { 0 };
+ 	int i, r;
+ 
+ 	fmt = cio2_find_format(NULL, &q->subdev_fmt.code);
+diff --git a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-comp.c b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-comp.c
+index a605e80c7dc36..b0ca2b3a8a739 100644
+--- a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-comp.c
++++ b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-comp.c
+@@ -892,11 +892,13 @@ static int mdp_get_subsys_id(struct mdp_dev *mdp, struct device *dev,
+ 	ret = cmdq_dev_get_client_reg(&comp_pdev->dev, &cmdq_reg, index);
+ 	if (ret != 0) {
+ 		dev_err(&comp_pdev->dev, "cmdq_dev_get_subsys fail!\n");
++		put_device(&comp_pdev->dev);
+ 		return -EINVAL;
+ 	}
+ 
+ 	comp->subsys_id = cmdq_reg.subsys;
+ 	dev_dbg(&comp_pdev->dev, "subsys id=%d\n", cmdq_reg.subsys);
++	put_device(&comp_pdev->dev);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/media/tuners/qt1010.c b/drivers/media/tuners/qt1010.c
+index 60931367b82ca..48fc79cd40273 100644
+--- a/drivers/media/tuners/qt1010.c
++++ b/drivers/media/tuners/qt1010.c
+@@ -345,11 +345,12 @@ static int qt1010_init(struct dvb_frontend *fe)
+ 			else
+ 				valptr = &tmpval;
+ 
+-			BUG_ON(i >= ARRAY_SIZE(i2c_data) - 1);
+-
+-			err = qt1010_init_meas1(priv, i2c_data[i+1].reg,
+-						i2c_data[i].reg,
+-						i2c_data[i].val, valptr);
++			if (i >= ARRAY_SIZE(i2c_data) - 1)
++				err = -EIO;
++			else
++				err = qt1010_init_meas1(priv, i2c_data[i + 1].reg,
++							i2c_data[i].reg,
++							i2c_data[i].val, valptr);
+ 			i++;
+ 			break;
+ 		}
+diff --git a/drivers/media/usb/dvb-usb-v2/af9035.c b/drivers/media/usb/dvb-usb-v2/af9035.c
+index 1e9c8d01523be..33a2aa8907e65 100644
+--- a/drivers/media/usb/dvb-usb-v2/af9035.c
++++ b/drivers/media/usb/dvb-usb-v2/af9035.c
+@@ -322,6 +322,8 @@ static int af9035_i2c_master_xfer(struct i2c_adapter *adap,
+ 			ret = -EOPNOTSUPP;
+ 		} else if ((msg[0].addr == state->af9033_i2c_addr[0]) ||
+ 			   (msg[0].addr == state->af9033_i2c_addr[1])) {
++			if (msg[0].len < 3 || msg[1].len < 1)
++				return -EOPNOTSUPP;
+ 			/* demod access via firmware interface */
+ 			u32 reg = msg[0].buf[0] << 16 | msg[0].buf[1] << 8 |
+ 					msg[0].buf[2];
+@@ -381,6 +383,8 @@ static int af9035_i2c_master_xfer(struct i2c_adapter *adap,
+ 			ret = -EOPNOTSUPP;
+ 		} else if ((msg[0].addr == state->af9033_i2c_addr[0]) ||
+ 			   (msg[0].addr == state->af9033_i2c_addr[1])) {
++			if (msg[0].len < 3)
++				return -EOPNOTSUPP;
+ 			/* demod access via firmware interface */
+ 			u32 reg = msg[0].buf[0] << 16 | msg[0].buf[1] << 8 |
+ 					msg[0].buf[2];
+@@ -388,10 +392,7 @@ static int af9035_i2c_master_xfer(struct i2c_adapter *adap,
+ 			if (msg[0].addr == state->af9033_i2c_addr[1])
+ 				reg |= 0x100000;
+ 
+-			ret = (msg[0].len >= 3) ? af9035_wr_regs(d, reg,
+-							         &msg[0].buf[3],
+-							         msg[0].len - 3)
+-					        : -EOPNOTSUPP;
++			ret = af9035_wr_regs(d, reg, &msg[0].buf[3], msg[0].len - 3);
+ 		} else {
+ 			/* I2C write */
+ 			u8 buf[MAX_XFER_SIZE];
+diff --git a/drivers/media/usb/dvb-usb-v2/anysee.c b/drivers/media/usb/dvb-usb-v2/anysee.c
+index aa45b5d263f6b..a1235d0cce92f 100644
+--- a/drivers/media/usb/dvb-usb-v2/anysee.c
++++ b/drivers/media/usb/dvb-usb-v2/anysee.c
+@@ -202,7 +202,7 @@ static int anysee_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msg,
+ 
+ 	while (i < num) {
+ 		if (num > i + 1 && (msg[i+1].flags & I2C_M_RD)) {
+-			if (msg[i].len > 2 || msg[i+1].len > 60) {
++			if (msg[i].len != 2 || msg[i + 1].len > 60) {
+ 				ret = -EOPNOTSUPP;
+ 				break;
+ 			}
+diff --git a/drivers/media/usb/dvb-usb-v2/az6007.c b/drivers/media/usb/dvb-usb-v2/az6007.c
+index 2dcbb49d66dab..2410054ddb2c3 100644
+--- a/drivers/media/usb/dvb-usb-v2/az6007.c
++++ b/drivers/media/usb/dvb-usb-v2/az6007.c
+@@ -788,6 +788,10 @@ static int az6007_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[],
+ 			if (az6007_xfer_debug)
+ 				printk(KERN_DEBUG "az6007: I2C W addr=0x%x len=%d\n",
+ 				       addr, msgs[i].len);
++			if (msgs[i].len < 1) {
++				ret = -EIO;
++				goto err;
++			}
+ 			req = AZ6007_I2C_WR;
+ 			index = msgs[i].buf[0];
+ 			value = addr | (1 << 8);
+@@ -802,6 +806,10 @@ static int az6007_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[],
+ 			if (az6007_xfer_debug)
+ 				printk(KERN_DEBUG "az6007: I2C R addr=0x%x len=%d\n",
+ 				       addr, msgs[i].len);
++			if (msgs[i].len < 1) {
++				ret = -EIO;
++				goto err;
++			}
+ 			req = AZ6007_I2C_RD;
+ 			index = msgs[i].buf[0];
+ 			value = addr;
+diff --git a/drivers/media/usb/dvb-usb-v2/gl861.c b/drivers/media/usb/dvb-usb-v2/gl861.c
+index 0c434259c36f1..c71e7b93476de 100644
+--- a/drivers/media/usb/dvb-usb-v2/gl861.c
++++ b/drivers/media/usb/dvb-usb-v2/gl861.c
+@@ -120,7 +120,7 @@ static int gl861_i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg msg[],
+ 	} else if (num == 2 && !(msg[0].flags & I2C_M_RD) &&
+ 		   (msg[1].flags & I2C_M_RD)) {
+ 		/* I2C write + read */
+-		if (msg[0].len > 1 || msg[1].len > sizeof(ctx->buf)) {
++		if (msg[0].len != 1 || msg[1].len > sizeof(ctx->buf)) {
+ 			ret = -EOPNOTSUPP;
+ 			goto err;
+ 		}
+diff --git a/drivers/media/usb/dvb-usb/af9005.c b/drivers/media/usb/dvb-usb/af9005.c
+index 0827bf3d4e8c7..13604e6acdb83 100644
+--- a/drivers/media/usb/dvb-usb/af9005.c
++++ b/drivers/media/usb/dvb-usb/af9005.c
+@@ -422,6 +422,10 @@ static int af9005_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[],
+ 		if (ret == 0)
+ 			ret = 2;
+ 	} else {
++		if (msg[0].len < 2) {
++			ret = -EOPNOTSUPP;
++			goto unlock;
++		}
+ 		/* write one or more registers */
+ 		reg = msg[0].buf[0];
+ 		addr = msg[0].addr;
+@@ -431,6 +435,7 @@ static int af9005_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[],
+ 			ret = 1;
+ 	}
+ 
++unlock:
+ 	mutex_unlock(&d->i2c_mutex);
+ 	return ret;
+ }
+diff --git a/drivers/media/usb/dvb-usb/dw2102.c b/drivers/media/usb/dvb-usb/dw2102.c
+index 970b84c3f0b5a..b3bb1805829ad 100644
+--- a/drivers/media/usb/dvb-usb/dw2102.c
++++ b/drivers/media/usb/dvb-usb/dw2102.c
+@@ -128,6 +128,10 @@ static int dw2102_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
+ 
+ 	switch (num) {
+ 	case 2:
++		if (msg[0].len < 1) {
++			num = -EOPNOTSUPP;
++			break;
++		}
+ 		/* read stv0299 register */
+ 		value = msg[0].buf[0];/* register */
+ 		for (i = 0; i < msg[1].len; i++) {
+@@ -139,6 +143,10 @@ static int dw2102_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
+ 	case 1:
+ 		switch (msg[0].addr) {
+ 		case 0x68:
++			if (msg[0].len < 2) {
++				num = -EOPNOTSUPP;
++				break;
++			}
+ 			/* write to stv0299 register */
+ 			buf6[0] = 0x2a;
+ 			buf6[1] = msg[0].buf[0];
+@@ -148,6 +156,10 @@ static int dw2102_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
+ 			break;
+ 		case 0x60:
+ 			if (msg[0].flags == 0) {
++				if (msg[0].len < 4) {
++					num = -EOPNOTSUPP;
++					break;
++				}
+ 			/* write to tuner pll */
+ 				buf6[0] = 0x2c;
+ 				buf6[1] = 5;
+@@ -159,6 +171,10 @@ static int dw2102_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
+ 				dw210x_op_rw(d->udev, 0xb2, 0, 0,
+ 						buf6, 7, DW210X_WRITE_MSG);
+ 			} else {
++				if (msg[0].len < 1) {
++					num = -EOPNOTSUPP;
++					break;
++				}
+ 			/* read from tuner */
+ 				dw210x_op_rw(d->udev, 0xb5, 0, 0,
+ 						buf6, 1, DW210X_READ_MSG);
+@@ -166,12 +182,20 @@ static int dw2102_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
+ 			}
+ 			break;
+ 		case (DW2102_RC_QUERY):
++			if (msg[0].len < 2) {
++				num = -EOPNOTSUPP;
++				break;
++			}
+ 			dw210x_op_rw(d->udev, 0xb8, 0, 0,
+ 					buf6, 2, DW210X_READ_MSG);
+ 			msg[0].buf[0] = buf6[0];
+ 			msg[0].buf[1] = buf6[1];
+ 			break;
+ 		case (DW2102_VOLTAGE_CTRL):
++			if (msg[0].len < 1) {
++				num = -EOPNOTSUPP;
++				break;
++			}
+ 			buf6[0] = 0x30;
+ 			buf6[1] = msg[0].buf[0];
+ 			dw210x_op_rw(d->udev, 0xb2, 0, 0,
+diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
+index 75e427f124b28..cadd4a820c033 100644
+--- a/drivers/misc/Kconfig
++++ b/drivers/misc/Kconfig
+@@ -496,6 +496,7 @@ config HISI_HIKEY_USB
+ config OPEN_DICE
+ 	tristate "Open Profile for DICE driver"
+ 	depends on OF_RESERVED_MEM
++	depends on HAS_IOMEM
+ 	help
+ 	  This driver exposes a DICE reserved memory region to userspace via
+ 	  a character device. The memory region contains Compound Device
+diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
+index eebf94604a7fd..cddecc1e1ac2f 100644
+--- a/drivers/mmc/host/sdhci-esdhc-imx.c
++++ b/drivers/mmc/host/sdhci-esdhc-imx.c
+@@ -171,8 +171,8 @@
+ #define ESDHC_FLAG_HS400		BIT(9)
+ /*
+  * The IP has errata ERR010450
+- * uSDHC: Due to the I/O timing limit, for SDR mode, SD card clock can't
+- * exceed 150MHz, for DDR mode, SD card clock can't exceed 45MHz.
++ * uSDHC: At 1.8V due to the I/O timing limit, for SDR mode, SD card
++ * clock can't exceed 150MHz, for DDR mode, SD card clock can't exceed 45MHz.
+  */
+ #define ESDHC_FLAG_ERR010450		BIT(10)
+ /* The IP supports HS400ES mode */
+@@ -961,7 +961,8 @@ static inline void esdhc_pltfm_set_clock(struct sdhci_host *host,
+ 		| ESDHC_CLOCK_MASK);
+ 	sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL);
+ 
+-	if (imx_data->socdata->flags & ESDHC_FLAG_ERR010450) {
++	if ((imx_data->socdata->flags & ESDHC_FLAG_ERR010450) &&
++	    (!(host->quirks2 & SDHCI_QUIRK2_NO_1_8_V))) {
+ 		unsigned int max_clock;
+ 
+ 		max_clock = imx_data->is_ddr ? 45000000 : 150000000;
+diff --git a/drivers/mtd/spi-nor/atmel.c b/drivers/mtd/spi-nor/atmel.c
+index 656dd80a0be79..58968c1e7d2f8 100644
+--- a/drivers/mtd/spi-nor/atmel.c
++++ b/drivers/mtd/spi-nor/atmel.c
+@@ -48,9 +48,11 @@ static const struct spi_nor_locking_ops at25fs_nor_locking_ops = {
+ 	.is_locked = at25fs_nor_is_locked,
+ };
+ 
+-static void at25fs_nor_late_init(struct spi_nor *nor)
++static int at25fs_nor_late_init(struct spi_nor *nor)
+ {
+ 	nor->params->locking_ops = &at25fs_nor_locking_ops;
++
++	return 0;
+ }
+ 
+ static const struct spi_nor_fixups at25fs_nor_fixups = {
+@@ -149,9 +151,11 @@ static const struct spi_nor_locking_ops atmel_nor_global_protection_ops = {
+ 	.is_locked = atmel_nor_is_global_protected,
+ };
+ 
+-static void atmel_nor_global_protection_late_init(struct spi_nor *nor)
++static int atmel_nor_global_protection_late_init(struct spi_nor *nor)
+ {
+ 	nor->params->locking_ops = &atmel_nor_global_protection_ops;
++
++	return 0;
+ }
+ 
+ static const struct spi_nor_fixups atmel_nor_global_protection_fixups = {
+diff --git a/drivers/mtd/spi-nor/core.c b/drivers/mtd/spi-nor/core.c
+index 55f4a902b8be9..72973cfb1d201 100644
+--- a/drivers/mtd/spi-nor/core.c
++++ b/drivers/mtd/spi-nor/core.c
+@@ -2898,16 +2898,23 @@ static void spi_nor_init_fixup_flags(struct spi_nor *nor)
+  * SFDP standard, or where SFDP tables are not defined at all.
+  * Will replace the spi_nor_manufacturer_init_params() method.
+  */
+-static void spi_nor_late_init_params(struct spi_nor *nor)
++static int spi_nor_late_init_params(struct spi_nor *nor)
+ {
+ 	struct spi_nor_flash_parameter *params = nor->params;
++	int ret;
+ 
+ 	if (nor->manufacturer && nor->manufacturer->fixups &&
+-	    nor->manufacturer->fixups->late_init)
+-		nor->manufacturer->fixups->late_init(nor);
++	    nor->manufacturer->fixups->late_init) {
++		ret = nor->manufacturer->fixups->late_init(nor);
++		if (ret)
++			return ret;
++	}
+ 
+-	if (nor->info->fixups && nor->info->fixups->late_init)
+-		nor->info->fixups->late_init(nor);
++	if (nor->info->fixups && nor->info->fixups->late_init) {
++		ret = nor->info->fixups->late_init(nor);
++		if (ret)
++			return ret;
++	}
+ 
+ 	/* Default method kept for backward compatibility. */
+ 	if (!params->set_4byte_addr_mode)
+@@ -2925,6 +2932,8 @@ static void spi_nor_late_init_params(struct spi_nor *nor)
+ 
+ 	if (nor->info->n_banks > 1)
+ 		params->bank_size = div64_u64(params->size, nor->info->n_banks);
++
++	return 0;
+ }
+ 
+ /**
+@@ -3083,9 +3092,7 @@ static int spi_nor_init_params(struct spi_nor *nor)
+ 		spi_nor_init_params_deprecated(nor);
+ 	}
+ 
+-	spi_nor_late_init_params(nor);
+-
+-	return 0;
++	return spi_nor_late_init_params(nor);
+ }
+ 
+ /** spi_nor_octal_dtr_enable() - enable Octal DTR I/O if needed
+diff --git a/drivers/mtd/spi-nor/core.h b/drivers/mtd/spi-nor/core.h
+index 4fb5ff09c63a9..2453bd5743ac9 100644
+--- a/drivers/mtd/spi-nor/core.h
++++ b/drivers/mtd/spi-nor/core.h
+@@ -377,6 +377,7 @@ struct spi_nor_otp {
+  *			than reading the status register to indicate they
+  *			are ready for a new command
+  * @locking_ops:	SPI NOR locking methods.
++ * @priv:		flash's private data.
+  */
+ struct spi_nor_flash_parameter {
+ 	u64				bank_size;
+@@ -405,6 +406,7 @@ struct spi_nor_flash_parameter {
+ 	int (*ready)(struct spi_nor *nor);
+ 
+ 	const struct spi_nor_locking_ops *locking_ops;
++	void *priv;
+ };
+ 
+ /**
+@@ -431,7 +433,7 @@ struct spi_nor_fixups {
+ 			 const struct sfdp_parameter_header *bfpt_header,
+ 			 const struct sfdp_bfpt *bfpt);
+ 	int (*post_sfdp)(struct spi_nor *nor);
+-	void (*late_init)(struct spi_nor *nor);
++	int (*late_init)(struct spi_nor *nor);
+ };
+ 
+ /**
+diff --git a/drivers/mtd/spi-nor/issi.c b/drivers/mtd/spi-nor/issi.c
+index 400e2b42f45af..accdf7aa2bfde 100644
+--- a/drivers/mtd/spi-nor/issi.c
++++ b/drivers/mtd/spi-nor/issi.c
+@@ -29,7 +29,7 @@ static const struct spi_nor_fixups is25lp256_fixups = {
+ 	.post_bfpt = is25lp256_post_bfpt_fixups,
+ };
+ 
+-static void pm25lv_nor_late_init(struct spi_nor *nor)
++static int pm25lv_nor_late_init(struct spi_nor *nor)
+ {
+ 	struct spi_nor_erase_map *map = &nor->params->erase_map;
+ 	int i;
+@@ -38,6 +38,8 @@ static void pm25lv_nor_late_init(struct spi_nor *nor)
+ 	for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++)
+ 		if (map->erase_type[i].size == 4096)
+ 			map->erase_type[i].opcode = SPINOR_OP_BE_4K_PMC;
++
++	return 0;
+ }
+ 
+ static const struct spi_nor_fixups pm25lv_nor_fixups = {
+diff --git a/drivers/mtd/spi-nor/macronix.c b/drivers/mtd/spi-nor/macronix.c
+index 04888258e8914..eb149e517c1fe 100644
+--- a/drivers/mtd/spi-nor/macronix.c
++++ b/drivers/mtd/spi-nor/macronix.c
+@@ -110,10 +110,12 @@ static void macronix_nor_default_init(struct spi_nor *nor)
+ 	nor->params->quad_enable = spi_nor_sr1_bit6_quad_enable;
+ }
+ 
+-static void macronix_nor_late_init(struct spi_nor *nor)
++static int macronix_nor_late_init(struct spi_nor *nor)
+ {
+ 	if (!nor->params->set_4byte_addr_mode)
+ 		nor->params->set_4byte_addr_mode = spi_nor_set_4byte_addr_mode_en4b_ex4b;
++
++	return 0;
+ }
+ 
+ static const struct spi_nor_fixups macronix_nor_fixups = {
+diff --git a/drivers/mtd/spi-nor/micron-st.c b/drivers/mtd/spi-nor/micron-st.c
+index 4b919756a2055..28c9c14a4b293 100644
+--- a/drivers/mtd/spi-nor/micron-st.c
++++ b/drivers/mtd/spi-nor/micron-st.c
+@@ -429,7 +429,7 @@ static void micron_st_nor_default_init(struct spi_nor *nor)
+ 	nor->params->quad_enable = NULL;
+ }
+ 
+-static void micron_st_nor_late_init(struct spi_nor *nor)
++static int micron_st_nor_late_init(struct spi_nor *nor)
+ {
+ 	struct spi_nor_flash_parameter *params = nor->params;
+ 
+@@ -438,6 +438,8 @@ static void micron_st_nor_late_init(struct spi_nor *nor)
+ 
+ 	if (!params->set_4byte_addr_mode)
+ 		params->set_4byte_addr_mode = spi_nor_set_4byte_addr_mode_wren_en4b_ex4b;
++
++	return 0;
+ }
+ 
+ static const struct spi_nor_fixups micron_st_nor_fixups = {
+diff --git a/drivers/mtd/spi-nor/spansion.c b/drivers/mtd/spi-nor/spansion.c
+index 15f9a80c10b9b..4fbaa6fba45a6 100644
+--- a/drivers/mtd/spi-nor/spansion.c
++++ b/drivers/mtd/spi-nor/spansion.c
+@@ -4,14 +4,18 @@
+  * Copyright (C) 2014, Freescale Semiconductor, Inc.
+  */
+ 
++#include <linux/bitfield.h>
++#include <linux/device.h>
+ #include <linux/mtd/spi-nor.h>
+ 
+ #include "core.h"
+ 
+ /* flash_info mfr_flag. Used to clear sticky prorietary SR bits. */
+ #define USE_CLSR	BIT(0)
++#define USE_CLPEF	BIT(1)
+ 
+ #define SPINOR_OP_CLSR		0x30	/* Clear status register 1 */
++#define SPINOR_OP_CLPEF		0x82	/* Clear program/erase failure flags */
+ #define SPINOR_OP_RD_ANY_REG			0x65	/* Read any register */
+ #define SPINOR_OP_WR_ANY_REG			0x71	/* Write any register */
+ #define SPINOR_REG_CYPRESS_VREG			0x00800000
+@@ -25,6 +29,7 @@
+ #define SPINOR_REG_CYPRESS_CFR2			0x3
+ #define SPINOR_REG_CYPRESS_CFR2V					\
+ 	(SPINOR_REG_CYPRESS_VREG + SPINOR_REG_CYPRESS_CFR2)
++#define SPINOR_REG_CYPRESS_CFR2_MEMLAT_MASK	GENMASK(3, 0)
+ #define SPINOR_REG_CYPRESS_CFR2_MEMLAT_11_24	0xb
+ #define SPINOR_REG_CYPRESS_CFR2_ADRBYT		BIT(7)
+ #define SPINOR_REG_CYPRESS_CFR3			0x4
+@@ -57,22 +62,32 @@
+ 		   SPI_MEM_OP_DUMMY(ndummy, 0),				\
+ 		   SPI_MEM_OP_DATA_IN(1, buf, 0))
+ 
+-#define SPANSION_CLSR_OP						\
+-	SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_CLSR, 0),			\
++#define SPANSION_OP(opcode)						\
++	SPI_MEM_OP(SPI_MEM_OP_CMD(opcode, 0),				\
+ 		   SPI_MEM_OP_NO_ADDR,					\
+ 		   SPI_MEM_OP_NO_DUMMY,					\
+ 		   SPI_MEM_OP_NO_DATA)
+ 
++/**
++ * struct spansion_nor_params - Spansion private parameters.
++ * @clsr:	Clear Status Register or Clear Program and Erase Failure Flag
++ *		opcode.
++ */
++struct spansion_nor_params {
++	u8 clsr;
++};
++
+ /**
+  * spansion_nor_clear_sr() - Clear the Status Register.
+  * @nor:	pointer to 'struct spi_nor'.
+  */
+ static void spansion_nor_clear_sr(struct spi_nor *nor)
+ {
++	const struct spansion_nor_params *priv_params = nor->params->priv;
+ 	int ret;
+ 
+ 	if (nor->spimem) {
+-		struct spi_mem_op op = SPANSION_CLSR_OP;
++		struct spi_mem_op op = SPANSION_OP(priv_params->clsr);
+ 
+ 		spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
+ 
+@@ -148,8 +163,18 @@ static int cypress_nor_octal_dtr_en(struct spi_nor *nor)
+ 	int ret;
+ 	u8 addr_mode_nbytes = nor->params->addr_mode_nbytes;
+ 
++	op = (struct spi_mem_op)
++		CYPRESS_NOR_RD_ANY_REG_OP(addr_mode_nbytes,
++					  SPINOR_REG_CYPRESS_CFR2V, 0, buf);
++
++	ret = spi_nor_read_any_reg(nor, &op, nor->reg_proto);
++	if (ret)
++		return ret;
++
+ 	/* Use 24 dummy cycles for memory array reads. */
+-	*buf = SPINOR_REG_CYPRESS_CFR2_MEMLAT_11_24;
++	*buf &= ~SPINOR_REG_CYPRESS_CFR2_MEMLAT_MASK;
++	*buf |= FIELD_PREP(SPINOR_REG_CYPRESS_CFR2_MEMLAT_MASK,
++			   SPINOR_REG_CYPRESS_CFR2_MEMLAT_11_24);
+ 	op = (struct spi_mem_op)
+ 		CYPRESS_NOR_WR_ANY_REG_OP(addr_mode_nbytes,
+ 					  SPINOR_REG_CYPRESS_CFR2V, 1, buf);
+@@ -528,9 +553,11 @@ static int s25fs256t_post_sfdp_fixup(struct spi_nor *nor)
+ 	return 0;
+ }
+ 
+-static void s25fs256t_late_init(struct spi_nor *nor)
++static int s25fs256t_late_init(struct spi_nor *nor)
+ {
+ 	cypress_nor_ecc_init(nor);
++
++	return 0;
+ }
+ 
+ static struct spi_nor_fixups s25fs256t_fixups = {
+@@ -586,7 +613,7 @@ static int s25hx_t_post_sfdp_fixup(struct spi_nor *nor)
+ 	return cypress_nor_get_page_size(nor);
+ }
+ 
+-static void s25hx_t_late_init(struct spi_nor *nor)
++static int s25hx_t_late_init(struct spi_nor *nor)
+ {
+ 	struct spi_nor_flash_parameter *params = nor->params;
+ 
+@@ -598,6 +625,8 @@ static void s25hx_t_late_init(struct spi_nor *nor)
+ 	/* Replace ready() with multi die version */
+ 	if (params->n_dice)
+ 		params->ready = cypress_nor_sr_ready_and_clear;
++
++	return 0;
+ }
+ 
+ static struct spi_nor_fixups s25hx_t_fixups = {
+@@ -665,10 +694,12 @@ static int s28hx_t_post_bfpt_fixup(struct spi_nor *nor,
+ 	return 0;
+ }
+ 
+-static void s28hx_t_late_init(struct spi_nor *nor)
++static int s28hx_t_late_init(struct spi_nor *nor)
+ {
+ 	nor->params->octal_dtr_enable = cypress_nor_octal_dtr_enable;
+ 	cypress_nor_ecc_init(nor);
++
++	return 0;
+ }
+ 
+ static const struct spi_nor_fixups s28hx_t_fixups = {
+@@ -792,47 +823,54 @@ static const struct flash_info spansion_nor_parts[] = {
+ 		FIXUP_FLAGS(SPI_NOR_4B_OPCODES) },
+ 	{ "s25fs256t",  INFO6(0x342b19, 0x0f0890, 0, 0)
+ 		PARSE_SFDP
++		MFR_FLAGS(USE_CLPEF)
+ 		.fixups = &s25fs256t_fixups },
+ 	{ "s25hl512t",  INFO6(0x342a1a, 0x0f0390, 256 * 1024, 256)
+ 		PARSE_SFDP
+-		MFR_FLAGS(USE_CLSR)
++		MFR_FLAGS(USE_CLPEF)
+ 		.fixups = &s25hx_t_fixups },
+ 	{ "s25hl01gt",  INFO6(0x342a1b, 0x0f0390, 256 * 1024, 512)
+ 		PARSE_SFDP
+-		MFR_FLAGS(USE_CLSR)
++		MFR_FLAGS(USE_CLPEF)
+ 		.fixups = &s25hx_t_fixups },
+ 	{ "s25hl02gt",  INFO6(0x342a1c, 0x0f0090, 0, 0)
+ 		PARSE_SFDP
++		MFR_FLAGS(USE_CLPEF)
+ 		FLAGS(NO_CHIP_ERASE)
+ 		.fixups = &s25hx_t_fixups },
+ 	{ "s25hs512t",  INFO6(0x342b1a, 0x0f0390, 256 * 1024, 256)
+ 		PARSE_SFDP
+-		MFR_FLAGS(USE_CLSR)
++		MFR_FLAGS(USE_CLPEF)
+ 		.fixups = &s25hx_t_fixups },
+ 	{ "s25hs01gt",  INFO6(0x342b1b, 0x0f0390, 256 * 1024, 512)
+ 		PARSE_SFDP
+-		MFR_FLAGS(USE_CLSR)
++		MFR_FLAGS(USE_CLPEF)
+ 		.fixups = &s25hx_t_fixups },
+ 	{ "s25hs02gt",  INFO6(0x342b1c, 0x0f0090, 0, 0)
+ 		PARSE_SFDP
++		MFR_FLAGS(USE_CLPEF)
+ 		FLAGS(NO_CHIP_ERASE)
+ 		.fixups = &s25hx_t_fixups },
+ 	{ "cy15x104q",  INFO6(0x042cc2, 0x7f7f7f, 512 * 1024, 1)
+ 		FLAGS(SPI_NOR_NO_ERASE) },
+ 	{ "s28hl512t",   INFO(0x345a1a,      0, 256 * 1024, 256)
+ 		PARSE_SFDP
++		MFR_FLAGS(USE_CLPEF)
+ 		.fixups = &s28hx_t_fixups,
+ 	},
+ 	{ "s28hl01gt",   INFO(0x345a1b,      0, 256 * 1024, 512)
+ 		PARSE_SFDP
++		MFR_FLAGS(USE_CLPEF)
+ 		.fixups = &s28hx_t_fixups,
+ 	},
+ 	{ "s28hs512t",   INFO(0x345b1a,      0, 256 * 1024, 256)
+ 		PARSE_SFDP
++		MFR_FLAGS(USE_CLPEF)
+ 		.fixups = &s28hx_t_fixups,
+ 	},
+ 	{ "s28hs01gt",   INFO(0x345b1b,      0, 256 * 1024, 512)
+ 		PARSE_SFDP
++		MFR_FLAGS(USE_CLPEF)
+ 		.fixups = &s28hx_t_fixups,
+ 	},
+ };
+@@ -876,17 +914,35 @@ static int spansion_nor_sr_ready_and_clear(struct spi_nor *nor)
+ 	return !(nor->bouncebuf[0] & SR_WIP);
+ }
+ 
+-static void spansion_nor_late_init(struct spi_nor *nor)
++static int spansion_nor_late_init(struct spi_nor *nor)
+ {
+-	if (nor->params->size > SZ_16M) {
++	struct spi_nor_flash_parameter *params = nor->params;
++	struct spansion_nor_params *priv_params;
++	u8 mfr_flags = nor->info->mfr_flags;
++
++	if (params->size > SZ_16M) {
+ 		nor->flags |= SNOR_F_4B_OPCODES;
+ 		/* No small sector erase for 4-byte command set */
+ 		nor->erase_opcode = SPINOR_OP_SE;
+ 		nor->mtd.erasesize = nor->info->sector_size;
+ 	}
+ 
+-	if (nor->info->mfr_flags & USE_CLSR)
+-		nor->params->ready = spansion_nor_sr_ready_and_clear;
++	if (mfr_flags & (USE_CLSR | USE_CLPEF)) {
++		priv_params = devm_kmalloc(nor->dev, sizeof(*priv_params),
++					   GFP_KERNEL);
++		if (!priv_params)
++			return -ENOMEM;
++
++		if (mfr_flags & USE_CLSR)
++			priv_params->clsr = SPINOR_OP_CLSR;
++		else if (mfr_flags & USE_CLPEF)
++			priv_params->clsr = SPINOR_OP_CLPEF;
++
++		params->priv = priv_params;
++		params->ready = spansion_nor_sr_ready_and_clear;
++	}
++
++	return 0;
+ }
+ 
+ static const struct spi_nor_fixups spansion_nor_fixups = {
+diff --git a/drivers/mtd/spi-nor/sst.c b/drivers/mtd/spi-nor/sst.c
+index 688eb20c763e9..09fdc7023e09a 100644
+--- a/drivers/mtd/spi-nor/sst.c
++++ b/drivers/mtd/spi-nor/sst.c
+@@ -49,9 +49,11 @@ static const struct spi_nor_locking_ops sst26vf_nor_locking_ops = {
+ 	.is_locked = sst26vf_nor_is_locked,
+ };
+ 
+-static void sst26vf_nor_late_init(struct spi_nor *nor)
++static int sst26vf_nor_late_init(struct spi_nor *nor)
+ {
+ 	nor->params->locking_ops = &sst26vf_nor_locking_ops;
++
++	return 0;
+ }
+ 
+ static const struct spi_nor_fixups sst26vf_nor_fixups = {
+@@ -203,10 +205,12 @@ out:
+ 	return ret;
+ }
+ 
+-static void sst_nor_late_init(struct spi_nor *nor)
++static int sst_nor_late_init(struct spi_nor *nor)
+ {
+ 	if (nor->info->mfr_flags & SST_WRITE)
+ 		nor->mtd._write = sst_nor_write;
++
++	return 0;
+ }
+ 
+ static const struct spi_nor_fixups sst_nor_fixups = {
+diff --git a/drivers/mtd/spi-nor/winbond.c b/drivers/mtd/spi-nor/winbond.c
+index 63ba8e3a96f51..cd99c9a1c5688 100644
+--- a/drivers/mtd/spi-nor/winbond.c
++++ b/drivers/mtd/spi-nor/winbond.c
+@@ -217,7 +217,7 @@ static const struct spi_nor_otp_ops winbond_nor_otp_ops = {
+ 	.is_locked = spi_nor_otp_is_locked_sr2,
+ };
+ 
+-static void winbond_nor_late_init(struct spi_nor *nor)
++static int winbond_nor_late_init(struct spi_nor *nor)
+ {
+ 	struct spi_nor_flash_parameter *params = nor->params;
+ 
+@@ -233,6 +233,8 @@ static void winbond_nor_late_init(struct spi_nor *nor)
+ 	 * from BFPT, if any.
+ 	 */
+ 	params->set_4byte_addr_mode = winbond_nor_set_4byte_addr_mode;
++
++	return 0;
+ }
+ 
+ static const struct spi_nor_fixups winbond_nor_fixups = {
+diff --git a/drivers/mtd/spi-nor/xilinx.c b/drivers/mtd/spi-nor/xilinx.c
+index 7175de8aa336a..00d53eae5ee83 100644
+--- a/drivers/mtd/spi-nor/xilinx.c
++++ b/drivers/mtd/spi-nor/xilinx.c
+@@ -155,10 +155,12 @@ static int xilinx_nor_setup(struct spi_nor *nor,
+ 	return 0;
+ }
+ 
+-static void xilinx_nor_late_init(struct spi_nor *nor)
++static int xilinx_nor_late_init(struct spi_nor *nor)
+ {
+ 	nor->params->setup = xilinx_nor_setup;
+ 	nor->params->ready = xilinx_nor_sr_ready;
++
++	return 0;
+ }
+ 
+ static const struct spi_nor_fixups xilinx_nor_fixups = {
+diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
+index a5c5036dfb943..e626de33e735d 100644
+--- a/drivers/net/can/Kconfig
++++ b/drivers/net/can/Kconfig
+@@ -185,10 +185,10 @@ config CAN_SLCAN
+ 
+ config CAN_SUN4I
+ 	tristate "Allwinner A10 CAN controller"
+-	depends on MACH_SUN4I || MACH_SUN7I || COMPILE_TEST
++	depends on MACH_SUN4I || MACH_SUN7I || RISCV || COMPILE_TEST
+ 	help
+ 	  Say Y here if you want to use CAN controller found on Allwinner
+-	  A10/A20 SoCs.
++	  A10/A20/D1 SoCs.
+ 
+ 	  To compile this driver as a module, choose M here: the module will
+ 	  be called sun4i_can.
+diff --git a/drivers/net/can/sun4i_can.c b/drivers/net/can/sun4i_can.c
+index 0827830bbf28c..c508a328e38d4 100644
+--- a/drivers/net/can/sun4i_can.c
++++ b/drivers/net/can/sun4i_can.c
+@@ -91,6 +91,8 @@
+ #define SUN4I_REG_BUF12_ADDR	0x0070	/* CAN Tx/Rx Buffer 12 */
+ #define SUN4I_REG_ACPC_ADDR	0x0040	/* CAN Acceptance Code 0 */
+ #define SUN4I_REG_ACPM_ADDR	0x0044	/* CAN Acceptance Mask 0 */
++#define SUN4I_REG_ACPC_ADDR_D1	0x0028	/* CAN Acceptance Code 0 on the D1 */
++#define SUN4I_REG_ACPM_ADDR_D1	0x002C	/* CAN Acceptance Mask 0 on the D1 */
+ #define SUN4I_REG_RBUF_RBACK_START_ADDR	0x0180	/* CAN transmit buffer start */
+ #define SUN4I_REG_RBUF_RBACK_END_ADDR	0x01b0	/* CAN transmit buffer end */
+ 
+@@ -205,9 +207,11 @@
+  * struct sun4ican_quirks - Differences between SoC variants.
+  *
+  * @has_reset: SoC needs reset deasserted.
++ * @acp_offset: Offset of ACPC and ACPM registers
+  */
+ struct sun4ican_quirks {
+ 	bool has_reset;
++	int acp_offset;
+ };
+ 
+ struct sun4ican_priv {
+@@ -216,6 +220,7 @@ struct sun4ican_priv {
+ 	struct clk *clk;
+ 	struct reset_control *reset;
+ 	spinlock_t cmdreg_lock;	/* lock for concurrent cmd register writes */
++	int acp_offset;
+ };
+ 
+ static const struct can_bittiming_const sun4ican_bittiming_const = {
+@@ -338,8 +343,8 @@ static int sun4i_can_start(struct net_device *dev)
+ 	}
+ 
+ 	/* set filters - we accept all */
+-	writel(0x00000000, priv->base + SUN4I_REG_ACPC_ADDR);
+-	writel(0xFFFFFFFF, priv->base + SUN4I_REG_ACPM_ADDR);
++	writel(0x00000000, priv->base + SUN4I_REG_ACPC_ADDR + priv->acp_offset);
++	writel(0xFFFFFFFF, priv->base + SUN4I_REG_ACPM_ADDR + priv->acp_offset);
+ 
+ 	/* clear error counters and error code capture */
+ 	writel(0, priv->base + SUN4I_REG_ERRC_ADDR);
+@@ -768,10 +773,17 @@ static const struct ethtool_ops sun4ican_ethtool_ops = {
+ 
+ static const struct sun4ican_quirks sun4ican_quirks_a10 = {
+ 	.has_reset = false,
++	.acp_offset = 0,
+ };
+ 
+ static const struct sun4ican_quirks sun4ican_quirks_r40 = {
+ 	.has_reset = true,
++	.acp_offset = 0,
++};
++
++static const struct sun4ican_quirks sun4ican_quirks_d1 = {
++	.has_reset = true,
++	.acp_offset = (SUN4I_REG_ACPC_ADDR_D1 - SUN4I_REG_ACPC_ADDR),
+ };
+ 
+ static const struct of_device_id sun4ican_of_match[] = {
+@@ -784,6 +796,9 @@ static const struct of_device_id sun4ican_of_match[] = {
+ 	}, {
+ 		.compatible = "allwinner,sun8i-r40-can",
+ 		.data = &sun4ican_quirks_r40
++	}, {
++		.compatible = "allwinner,sun20i-d1-can",
++		.data = &sun4ican_quirks_d1
+ 	}, {
+ 		/* sentinel */
+ 	},
+@@ -870,6 +885,7 @@ static int sun4ican_probe(struct platform_device *pdev)
+ 	priv->base = addr;
+ 	priv->clk = clk;
+ 	priv->reset = reset;
++	priv->acp_offset = quirks->acp_offset;
+ 	spin_lock_init(&priv->cmdreg_lock);
+ 
+ 	platform_set_drvdata(pdev, dev);
+@@ -907,4 +923,4 @@ module_platform_driver(sun4i_can_driver);
+ MODULE_AUTHOR("Peter Chen <xingkongcp@gmail.com>");
+ MODULE_AUTHOR("Gerhard Bertelsmann <info@gerhard-bertelsmann.de>");
+ MODULE_LICENSE("Dual BSD/GPL");
+-MODULE_DESCRIPTION("CAN driver for Allwinner SoCs (A10/A20)");
++MODULE_DESCRIPTION("CAN driver for Allwinner SoCs (A10/A20/D1)");
+diff --git a/drivers/net/ethernet/atheros/alx/ethtool.c b/drivers/net/ethernet/atheros/alx/ethtool.c
+index b716adacd8159..7f6b69a523676 100644
+--- a/drivers/net/ethernet/atheros/alx/ethtool.c
++++ b/drivers/net/ethernet/atheros/alx/ethtool.c
+@@ -292,9 +292,8 @@ static void alx_get_ethtool_stats(struct net_device *netdev,
+ 	spin_lock(&alx->stats_lock);
+ 
+ 	alx_update_hw_stats(hw);
+-	BUILD_BUG_ON(sizeof(hw->stats) - offsetof(struct alx_hw_stats, rx_ok) <
+-		     ALX_NUM_STATS * sizeof(u64));
+-	memcpy(data, &hw->stats.rx_ok, ALX_NUM_STATS * sizeof(u64));
++	BUILD_BUG_ON(sizeof(hw->stats) != ALX_NUM_STATS * sizeof(u64));
++	memcpy(data, &hw->stats, sizeof(hw->stats));
+ 
+ 	spin_unlock(&alx->stats_lock);
+ }
+diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.c b/drivers/net/ethernet/intel/ice/ice_eswitch.c
+index 8f232c41a89e3..459e32f6adb50 100644
+--- a/drivers/net/ethernet/intel/ice/ice_eswitch.c
++++ b/drivers/net/ethernet/intel/ice/ice_eswitch.c
+@@ -331,6 +331,9 @@ ice_eswitch_port_start_xmit(struct sk_buff *skb, struct net_device *netdev)
+ 	np = netdev_priv(netdev);
+ 	vsi = np->vsi;
+ 
++	if (!vsi || !ice_is_switchdev_running(vsi->back))
++		return NETDEV_TX_BUSY;
++
+ 	if (ice_is_reset_in_progress(vsi->back->state) ||
+ 	    test_bit(ICE_VF_DIS, vsi->back->state))
+ 		return NETDEV_TX_BUSY;
+diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
+index 9275a672f90cb..73463ded42045 100644
+--- a/drivers/net/wireless/ath/ath10k/pci.c
++++ b/drivers/net/wireless/ath/ath10k/pci.c
+@@ -3817,7 +3817,7 @@ static void __exit ath10k_pci_exit(void)
+ module_exit(ath10k_pci_exit);
+ 
+ MODULE_AUTHOR("Qualcomm Atheros");
+-MODULE_DESCRIPTION("Driver support for Qualcomm Atheros 802.11ac WLAN PCIe/AHB devices");
++MODULE_DESCRIPTION("Driver support for Qualcomm Atheros PCIe/AHB 802.11ac WLAN devices");
+ MODULE_LICENSE("Dual BSD/GPL");
+ 
+ /* QCA988x 2.0 firmware files */
+diff --git a/drivers/net/wireless/ath/ath10k/usb.c b/drivers/net/wireless/ath/ath10k/usb.c
+index b0067af685b16..3c482baacec10 100644
+--- a/drivers/net/wireless/ath/ath10k/usb.c
++++ b/drivers/net/wireless/ath/ath10k/usb.c
+@@ -1126,5 +1126,5 @@ static struct usb_driver ath10k_usb_driver = {
+ module_usb_driver(ath10k_usb_driver);
+ 
+ MODULE_AUTHOR("Atheros Communications, Inc.");
+-MODULE_DESCRIPTION("Driver support for Qualcomm Atheros 802.11ac WLAN USB devices");
++MODULE_DESCRIPTION("Driver support for Qualcomm Atheros USB 802.11ac WLAN devices");
+ MODULE_LICENSE("Dual BSD/GPL");
+diff --git a/drivers/net/wireless/ath/ath11k/pci.c b/drivers/net/wireless/ath/ath11k/pci.c
+index ec40adc1cb235..a181563ec0851 100644
+--- a/drivers/net/wireless/ath/ath11k/pci.c
++++ b/drivers/net/wireless/ath/ath11k/pci.c
+@@ -1036,7 +1036,7 @@ static void ath11k_pci_exit(void)
+ 
+ module_exit(ath11k_pci_exit);
+ 
+-MODULE_DESCRIPTION("Driver support for Qualcomm Technologies 802.11ax WLAN PCIe devices");
++MODULE_DESCRIPTION("Driver support for Qualcomm Technologies PCIe 802.11ax WLAN devices");
+ MODULE_LICENSE("Dual BSD/GPL");
+ 
+ /* firmware files */
+diff --git a/drivers/net/wireless/ath/ath12k/dp.c b/drivers/net/wireless/ath/ath12k/dp.c
+index ae1645d0f42a2..f933896f2a68d 100644
+--- a/drivers/net/wireless/ath/ath12k/dp.c
++++ b/drivers/net/wireless/ath/ath12k/dp.c
+@@ -1129,6 +1129,7 @@ static void ath12k_dp_cc_cleanup(struct ath12k_base *ab)
+ 	struct ath12k_dp *dp = &ab->dp;
+ 	struct sk_buff *skb;
+ 	int i;
++	u32 pool_id, tx_spt_page;
+ 
+ 	if (!dp->spt_info)
+ 		return;
+@@ -1148,6 +1149,14 @@ static void ath12k_dp_cc_cleanup(struct ath12k_base *ab)
+ 		dev_kfree_skb_any(skb);
+ 	}
+ 
++	for (i = 0; i < ATH12K_NUM_RX_SPT_PAGES; i++) {
++		if (!dp->spt_info->rxbaddr[i])
++			continue;
++
++		kfree(dp->spt_info->rxbaddr[i]);
++		dp->spt_info->rxbaddr[i] = NULL;
++	}
++
+ 	spin_unlock_bh(&dp->rx_desc_lock);
+ 
+ 	/* TX Descriptor cleanup */
+@@ -1170,6 +1179,21 @@ static void ath12k_dp_cc_cleanup(struct ath12k_base *ab)
+ 		spin_unlock_bh(&dp->tx_desc_lock[i]);
+ 	}
+ 
++	for (pool_id = 0; pool_id < ATH12K_HW_MAX_QUEUES; pool_id++) {
++		spin_lock_bh(&dp->tx_desc_lock[pool_id]);
++
++		for (i = 0; i < ATH12K_TX_SPT_PAGES_PER_POOL; i++) {
++			tx_spt_page = i + pool_id * ATH12K_TX_SPT_PAGES_PER_POOL;
++			if (!dp->spt_info->txbaddr[tx_spt_page])
++				continue;
++
++			kfree(dp->spt_info->txbaddr[tx_spt_page]);
++			dp->spt_info->txbaddr[tx_spt_page] = NULL;
++		}
++
++		spin_unlock_bh(&dp->tx_desc_lock[pool_id]);
++	}
++
+ 	/* unmap SPT pages */
+ 	for (i = 0; i < dp->num_spt_pages; i++) {
+ 		if (!dp->spt_info[i].vaddr)
+@@ -1343,6 +1367,8 @@ static int ath12k_dp_cc_desc_init(struct ath12k_base *ab)
+ 			return -ENOMEM;
+ 		}
+ 
++		dp->spt_info->rxbaddr[i] = &rx_descs[0];
++
+ 		for (j = 0; j < ATH12K_MAX_SPT_ENTRIES; j++) {
+ 			rx_descs[j].cookie = ath12k_dp_cc_cookie_gen(i, j);
+ 			rx_descs[j].magic = ATH12K_DP_RX_DESC_MAGIC;
+@@ -1368,8 +1394,10 @@ static int ath12k_dp_cc_desc_init(struct ath12k_base *ab)
+ 				return -ENOMEM;
+ 			}
+ 
++			tx_spt_page = i + pool_id * ATH12K_TX_SPT_PAGES_PER_POOL;
++			dp->spt_info->txbaddr[tx_spt_page] = &tx_descs[0];
++
+ 			for (j = 0; j < ATH12K_MAX_SPT_ENTRIES; j++) {
+-				tx_spt_page = i + pool_id * ATH12K_TX_SPT_PAGES_PER_POOL;
+ 				ppt_idx = ATH12K_NUM_RX_SPT_PAGES + tx_spt_page;
+ 				tx_descs[j].desc_id = ath12k_dp_cc_cookie_gen(ppt_idx, j);
+ 				tx_descs[j].pool_id = pool_id;
+diff --git a/drivers/net/wireless/ath/ath12k/dp.h b/drivers/net/wireless/ath/ath12k/dp.h
+index 7c5dafce5a68d..9aeda0321cd75 100644
+--- a/drivers/net/wireless/ath/ath12k/dp.h
++++ b/drivers/net/wireless/ath/ath12k/dp.h
+@@ -289,6 +289,8 @@ struct ath12k_tx_desc_info {
+ struct ath12k_spt_info {
+ 	dma_addr_t paddr;
+ 	u64 *vaddr;
++	struct ath12k_rx_desc_info *rxbaddr[ATH12K_NUM_RX_SPT_PAGES];
++	struct ath12k_tx_desc_info *txbaddr[ATH12K_NUM_TX_SPT_PAGES];
+ };
+ 
+ struct ath12k_reo_queue_ref {
+diff --git a/drivers/net/wireless/ath/ath12k/mac.c b/drivers/net/wireless/ath/ath12k/mac.c
+index 45d88e35fc2eb..d165b24094ad5 100644
+--- a/drivers/net/wireless/ath/ath12k/mac.c
++++ b/drivers/net/wireless/ath/ath12k/mac.c
+@@ -2755,9 +2755,12 @@ static int ath12k_mac_op_hw_scan(struct ieee80211_hw *hw,
+ 	arg.scan_id = ATH12K_SCAN_ID;
+ 
+ 	if (req->ie_len) {
++		arg.extraie.ptr = kmemdup(req->ie, req->ie_len, GFP_KERNEL);
++		if (!arg.extraie.ptr) {
++			ret = -ENOMEM;
++			goto exit;
++		}
+ 		arg.extraie.len = req->ie_len;
+-		arg.extraie.ptr = kzalloc(req->ie_len, GFP_KERNEL);
+-		memcpy(arg.extraie.ptr, req->ie, req->ie_len);
+ 	}
+ 
+ 	if (req->n_ssids) {
+diff --git a/drivers/net/wireless/ath/ath12k/pci.c b/drivers/net/wireless/ath/ath12k/pci.c
+index e4f08a066ca10..fae5dfd6e9d70 100644
+--- a/drivers/net/wireless/ath/ath12k/pci.c
++++ b/drivers/net/wireless/ath/ath12k/pci.c
+@@ -1411,5 +1411,5 @@ static void ath12k_pci_exit(void)
+ 
+ module_exit(ath12k_pci_exit);
+ 
+-MODULE_DESCRIPTION("Driver support for Qualcomm Technologies 802.11be WLAN PCIe devices");
++MODULE_DESCRIPTION("Driver support for Qualcomm Technologies PCIe 802.11be WLAN devices");
+ MODULE_LICENSE("Dual BSD/GPL");
+diff --git a/drivers/net/wireless/ath/ath12k/wmi.c b/drivers/net/wireless/ath/ath12k/wmi.c
+index 4928e4e916603..eebc5a65ce3b4 100644
+--- a/drivers/net/wireless/ath/ath12k/wmi.c
++++ b/drivers/net/wireless/ath/ath12k/wmi.c
+@@ -2162,12 +2162,6 @@ int ath12k_wmi_send_scan_start_cmd(struct ath12k *ar,
+ 	if (arg->num_bssid)
+ 		len += sizeof(*bssid) * arg->num_bssid;
+ 
+-	len += TLV_HDR_SIZE;
+-	if (arg->extraie.len)
+-		extraie_len_with_pad =
+-			roundup(arg->extraie.len, sizeof(u32));
+-	len += extraie_len_with_pad;
+-
+ 	if (arg->num_hint_bssid)
+ 		len += TLV_HDR_SIZE +
+ 		       arg->num_hint_bssid * sizeof(*hint_bssid);
+@@ -2176,6 +2170,18 @@ int ath12k_wmi_send_scan_start_cmd(struct ath12k *ar,
+ 		len += TLV_HDR_SIZE +
+ 		       arg->num_hint_s_ssid * sizeof(*s_ssid);
+ 
++	len += TLV_HDR_SIZE;
++	if (arg->extraie.len)
++		extraie_len_with_pad =
++			roundup(arg->extraie.len, sizeof(u32));
++	if (extraie_len_with_pad <= (wmi->wmi_ab->max_msg_len[ar->pdev_idx] - len)) {
++		len += extraie_len_with_pad;
++	} else {
++		ath12k_warn(ar->ab, "discard large size %d bytes extraie for scan start\n",
++			    arg->extraie.len);
++		extraie_len_with_pad = 0;
++	}
++
+ 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
+ 	if (!skb)
+ 		return -ENOMEM;
+@@ -2265,7 +2271,7 @@ int ath12k_wmi_send_scan_start_cmd(struct ath12k *ar,
+ 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, len);
+ 	ptr += TLV_HDR_SIZE;
+ 
+-	if (arg->extraie.len)
++	if (extraie_len_with_pad)
+ 		memcpy(ptr, arg->extraie.ptr,
+ 		       arg->extraie.len);
+ 
+@@ -3704,6 +3710,10 @@ static int ath12k_wmi_hw_mode_caps(struct ath12k_base *soc,
+ 	for (i = 0 ; i < svc_rdy_ext->n_hw_mode_caps; i++) {
+ 		hw_mode_caps = &svc_rdy_ext->hw_mode_caps[i];
+ 		mode = le32_to_cpu(hw_mode_caps->hw_mode_id);
++
++		if (mode >= WMI_HOST_HW_MODE_MAX)
++			continue;
++
+ 		pref = soc->wmi_ab.preferred_hw_mode;
+ 
+ 		if (ath12k_hw_mode_pri_map[mode] < ath12k_hw_mode_pri_map[pref]) {
+diff --git a/drivers/net/wireless/ath/ath9k/ahb.c b/drivers/net/wireless/ath/ath9k/ahb.c
+index 9cd12b20b18d8..9bfaadfa6c009 100644
+--- a/drivers/net/wireless/ath/ath9k/ahb.c
++++ b/drivers/net/wireless/ath/ath9k/ahb.c
+@@ -132,8 +132,8 @@ static int ath_ahb_probe(struct platform_device *pdev)
+ 
+ 	ah = sc->sc_ah;
+ 	ath9k_hw_name(ah, hw_name, sizeof(hw_name));
+-	wiphy_info(hw->wiphy, "%s mem=0x%lx, irq=%d\n",
+-		   hw_name, (unsigned long)mem, irq);
++	wiphy_info(hw->wiphy, "%s mem=0x%p, irq=%d\n",
++		   hw_name, mem, irq);
+ 
+ 	return 0;
+ 
+diff --git a/drivers/net/wireless/ath/ath9k/mac.h b/drivers/net/wireless/ath/ath9k/mac.h
+index af44b33814ddc..f03d792732da7 100644
+--- a/drivers/net/wireless/ath/ath9k/mac.h
++++ b/drivers/net/wireless/ath/ath9k/mac.h
+@@ -115,8 +115,10 @@ struct ath_tx_status {
+ 	u8 qid;
+ 	u16 desc_id;
+ 	u8 tid;
+-	u32 ba_low;
+-	u32 ba_high;
++	struct_group(ba,
++		u32 ba_low;
++		u32 ba_high;
++	);
+ 	u32 evm0;
+ 	u32 evm1;
+ 	u32 evm2;
+diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c
+index a09f9d223f3de..0633589b85c23 100644
+--- a/drivers/net/wireless/ath/ath9k/pci.c
++++ b/drivers/net/wireless/ath/ath9k/pci.c
+@@ -988,8 +988,8 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ 	sc->sc_ah->msi_reg = 0;
+ 
+ 	ath9k_hw_name(sc->sc_ah, hw_name, sizeof(hw_name));
+-	wiphy_info(hw->wiphy, "%s mem=0x%lx, irq=%d\n",
+-		   hw_name, (unsigned long)sc->mem, pdev->irq);
++	wiphy_info(hw->wiphy, "%s mem=0x%p, irq=%d\n",
++		   hw_name, sc->mem, pdev->irq);
+ 
+ 	return 0;
+ 
+diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
+index f6f2ab7a63ffc..42058368e6373 100644
+--- a/drivers/net/wireless/ath/ath9k/xmit.c
++++ b/drivers/net/wireless/ath/ath9k/xmit.c
+@@ -468,7 +468,7 @@ static void ath_tx_count_frames(struct ath_softc *sc, struct ath_buf *bf,
+ 	isaggr = bf_isaggr(bf);
+ 	if (isaggr) {
+ 		seq_st = ts->ts_seqnum;
+-		memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
++		memcpy(ba, &ts->ba, WME_BA_BMP_SIZE >> 3);
+ 	}
+ 
+ 	while (bf) {
+@@ -551,7 +551,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
+ 	if (isaggr && txok) {
+ 		if (ts->ts_flags & ATH9K_TX_BA) {
+ 			seq_st = ts->ts_seqnum;
+-			memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
++			memcpy(ba, &ts->ba, WME_BA_BMP_SIZE >> 3);
+ 		} else {
+ 			/*
+ 			 * AR5416 can become deaf/mute when BA
+diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c
+index 237cbd5c5060b..f29ac6de71399 100644
+--- a/drivers/net/wireless/ath/wil6210/txrx.c
++++ b/drivers/net/wireless/ath/wil6210/txrx.c
+@@ -666,7 +666,7 @@ static int wil_rx_crypto_check(struct wil6210_priv *wil, struct sk_buff *skb)
+ 	struct wil_tid_crypto_rx *c = mc ? &s->group_crypto_rx :
+ 				      &s->tid_crypto_rx[tid];
+ 	struct wil_tid_crypto_rx_single *cc = &c->key_id[key_id];
+-	const u8 *pn = (u8 *)&d->mac.pn_15_0;
++	const u8 *pn = (u8 *)&d->mac.pn;
+ 
+ 	if (!cc->key_set) {
+ 		wil_err_ratelimited(wil,
+diff --git a/drivers/net/wireless/ath/wil6210/txrx.h b/drivers/net/wireless/ath/wil6210/txrx.h
+index 1ae1bec1b97f1..689f68d89a440 100644
+--- a/drivers/net/wireless/ath/wil6210/txrx.h
++++ b/drivers/net/wireless/ath/wil6210/txrx.h
+@@ -343,8 +343,10 @@ struct vring_rx_mac {
+ 	u32 d0;
+ 	u32 d1;
+ 	u16 w4;
+-	u16 pn_15_0;
+-	u32 pn_47_16;
++	struct_group_attr(pn, __packed,
++		u16 pn_15_0;
++		u32 pn_47_16;
++	);
+ } __packed;
+ 
+ /* Rx descriptor - DMA part
+diff --git a/drivers/net/wireless/ath/wil6210/txrx_edma.c b/drivers/net/wireless/ath/wil6210/txrx_edma.c
+index 201c8c35e0c9e..1ba1f21ebea26 100644
+--- a/drivers/net/wireless/ath/wil6210/txrx_edma.c
++++ b/drivers/net/wireless/ath/wil6210/txrx_edma.c
+@@ -548,7 +548,7 @@ static int wil_rx_crypto_check_edma(struct wil6210_priv *wil,
+ 	s = &wil->sta[cid];
+ 	c = mc ? &s->group_crypto_rx : &s->tid_crypto_rx[tid];
+ 	cc = &c->key_id[key_id];
+-	pn = (u8 *)&st->ext.pn_15_0;
++	pn = (u8 *)&st->ext.pn;
+ 
+ 	if (!cc->key_set) {
+ 		wil_err_ratelimited(wil,
+diff --git a/drivers/net/wireless/ath/wil6210/txrx_edma.h b/drivers/net/wireless/ath/wil6210/txrx_edma.h
+index c736f7413a35f..ee90e225bb050 100644
+--- a/drivers/net/wireless/ath/wil6210/txrx_edma.h
++++ b/drivers/net/wireless/ath/wil6210/txrx_edma.h
+@@ -330,8 +330,10 @@ struct wil_rx_status_extension {
+ 	u32 d0;
+ 	u32 d1;
+ 	__le16 seq_num; /* only lower 12 bits */
+-	u16 pn_15_0;
+-	u32 pn_47_16;
++	struct_group_attr(pn, __packed,
++		u16 pn_15_0;
++		u32 pn_47_16;
++	);
+ } __packed;
+ 
+ struct wil_rx_status_extended {
+diff --git a/drivers/net/wireless/atmel/at76c50x-usb.c b/drivers/net/wireless/atmel/at76c50x-usb.c
+index 009bca34ece30..447b51cff8f96 100644
+--- a/drivers/net/wireless/atmel/at76c50x-usb.c
++++ b/drivers/net/wireless/atmel/at76c50x-usb.c
+@@ -10,7 +10,7 @@
+  * Copyright (c) 2007 Kalle Valo <kalle.valo@iki.fi>
+  * Copyright (c) 2010 Sebastian Smolorz <sesmo@gmx.net>
+  *
+- * This file is part of the Berlios driver for WLAN USB devices based on the
++ * This file is part of the Berlios driver for USB WLAN devices based on the
+  * Atmel AT76C503A/505/505A.
+  *
+  * Some iw_handler code was taken from airo.c, (C) 1999 Benjamin Reed
+@@ -143,7 +143,7 @@ static const struct usb_device_id dev_table[] = {
+ 	{ USB_DEVICE(0x0cde, 0x0001), USB_DEVICE_DATA(BOARD_503_ISL3861) },
+ 	/* Dynalink/Askey WLL013 (intersil) */
+ 	{ USB_DEVICE(0x069a, 0x0320), USB_DEVICE_DATA(BOARD_503_ISL3861) },
+-	/* EZ connect 11Mpbs Wireless USB Adapter SMC2662W v1 */
++	/* EZ connect 11Mpbs USB Wireless Adapter SMC2662W v1 */
+ 	{ USB_DEVICE(0x0d5c, 0xa001), USB_DEVICE_DATA(BOARD_503_ISL3861) },
+ 	/* BenQ AWL300 */
+ 	{ USB_DEVICE(0x04a5, 0x9000), USB_DEVICE_DATA(BOARD_503_ISL3861) },
+@@ -195,7 +195,7 @@ static const struct usb_device_id dev_table[] = {
+ 	{ USB_DEVICE(0x04a5, 0x9001), USB_DEVICE_DATA(BOARD_503) },
+ 	/* 3Com 3CRSHEW696 */
+ 	{ USB_DEVICE(0x0506, 0x0a01), USB_DEVICE_DATA(BOARD_503) },
+-	/* Siemens Santis ADSL WLAN USB adapter WLL 013 */
++	/* Siemens Santis ADSL USB WLAN adapter WLL 013 */
+ 	{ USB_DEVICE(0x0681, 0x001b), USB_DEVICE_DATA(BOARD_503) },
+ 	/* Belkin F5D6050, version 2 */
+ 	{ USB_DEVICE(0x050d, 0x0050), USB_DEVICE_DATA(BOARD_503) },
+@@ -238,7 +238,7 @@ static const struct usb_device_id dev_table[] = {
+ 	{ USB_DEVICE(0x1915, 0x2233), USB_DEVICE_DATA(BOARD_505_2958) },
+ 	/* Xterasys XN-2122B, IBlitzz BWU613B/BWU613SB */
+ 	{ USB_DEVICE(0x12fd, 0x1001), USB_DEVICE_DATA(BOARD_505_2958) },
+-	/* Corega WLAN USB Stick 11 */
++	/* Corega USB WLAN Stick 11 */
+ 	{ USB_DEVICE(0x07aa, 0x7613), USB_DEVICE_DATA(BOARD_505_2958) },
+ 	/* Microstar MSI Box MS6978 */
+ 	{ USB_DEVICE(0x0db0, 0x1020), USB_DEVICE_DATA(BOARD_505_2958) },
+diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+index 73c1fb3c0c5ec..bc83d2ba55c67 100644
+--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
++++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+@@ -1132,12 +1132,6 @@ static int get_crf_id(struct iwl_trans *iwl_trans)
+ 	else
+ 		sd_reg_ver_addr = SD_REG_VER;
+ 
+-	if (!iwl_trans_grab_nic_access(iwl_trans)) {
+-		IWL_ERR(iwl_trans, "Failed to grab nic access before reading crf id\n");
+-		ret = -EIO;
+-		goto out;
+-	}
+-
+ 	/* Enable access to peripheral registers */
+ 	val = iwl_read_umac_prph_no_grab(iwl_trans, WFPM_CTRL_REG);
+ 	val |= ENABLE_WFPM;
+@@ -1157,9 +1151,6 @@ static int get_crf_id(struct iwl_trans *iwl_trans)
+ 		 iwl_trans->hw_crf_id, iwl_trans->hw_cnv_id,
+ 		 iwl_trans->hw_wfpm_id);
+ 
+-	iwl_trans_release_nic_access(iwl_trans);
+-
+-out:
+ 	return ret;
+ }
+ 
+@@ -1351,6 +1342,7 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 		if (ret)
+ 			goto out_free_trans;
+ 		if (iwl_trans_grab_nic_access(iwl_trans)) {
++			get_crf_id(iwl_trans);
+ 			/* all good */
+ 			iwl_trans_release_nic_access(iwl_trans);
+ 		} else {
+@@ -1360,7 +1352,6 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 	}
+ 
+ 	iwl_trans->hw_rf_id = iwl_read32(iwl_trans, CSR_HW_RF_ID);
+-	get_crf_id(iwl_trans);
+ 
+ 	/*
+ 	 * The RF_ID is set to zero in blank OTP so read version to
+diff --git a/drivers/net/wireless/intersil/orinoco/orinoco_usb.c b/drivers/net/wireless/intersil/orinoco/orinoco_usb.c
+index dd31929261ab9..866e0230df251 100644
+--- a/drivers/net/wireless/intersil/orinoco/orinoco_usb.c
++++ b/drivers/net/wireless/intersil/orinoco/orinoco_usb.c
+@@ -129,18 +129,18 @@ MODULE_FIRMWARE("orinoco_ezusb_fw");
+ 
+ #define USB_AVAYA8_VENDOR_ID     0x0D98
+ #define USB_AVAYAE_VENDOR_ID     0x0D9E
+-#define USB_AVAYA_WIRELESS_ID    0x0300 /* Avaya Wireless USB Card */
++#define USB_AVAYA_WIRELESS_ID    0x0300 /* Avaya USB Wireless Card */
+ 
+ #define USB_AGERE_VENDOR_ID      0x0D4E /* Agere Systems */
+-#define USB_AGERE_MODEL0801_ID   0x1000 /* Wireless USB Card Model 0801 */
+-#define USB_AGERE_MODEL0802_ID   0x1001 /* Wireless USB Card Model 0802 */
+-#define USB_AGERE_REBRANDED_ID   0x047A /* WLAN USB Card */
++#define USB_AGERE_MODEL0801_ID   0x1000 /* USB Wireless Card Model 0801 */
++#define USB_AGERE_MODEL0802_ID   0x1001 /* USB Wireless Card Model 0802 */
++#define USB_AGERE_REBRANDED_ID   0x047A /* USB WLAN Card */
+ 
+ #define USB_ELSA_VENDOR_ID       0x05CC
+ #define USB_ELSA_AIRLANCER_ID    0x3100 /* ELSA AirLancer USB-11 */
+ 
+ #define USB_LEGEND_VENDOR_ID     0x0E7C
+-#define USB_LEGEND_JOYNET_ID     0x0300 /* Joynet WLAN USB Card */
++#define USB_LEGEND_JOYNET_ID     0x0300 /* Joynet USB WLAN Card */
+ 
+ #define USB_SAMSUNG_VENDOR_ID    0x04E8
+ #define USB_SAMSUNG_SEW2001U1_ID 0x5002 /* Samsung SEW-2001u Card */
+@@ -154,7 +154,7 @@ MODULE_FIRMWARE("orinoco_ezusb_fw");
+ #define USB_FUJITSU_E1100_ID     0x1002 /* connect2AIR WLAN E-1100 USB */
+ 
+ #define USB_2WIRE_VENDOR_ID      0x1630
+-#define USB_2WIRE_WIRELESS_ID    0xff81 /* 2Wire Wireless USB adapter */
++#define USB_2WIRE_WIRELESS_ID    0xff81 /* 2Wire USB Wireless adapter */
+ 
+ 
+ #define EZUSB_REQUEST_FW_TRANS		0xA0
+diff --git a/drivers/net/wireless/legacy/rndis_wlan.c b/drivers/net/wireless/legacy/rndis_wlan.c
+index 712038d46bdb3..e7fea7ded6d5c 100644
+--- a/drivers/net/wireless/legacy/rndis_wlan.c
++++ b/drivers/net/wireless/legacy/rndis_wlan.c
+@@ -1,6 +1,6 @@
+ // SPDX-License-Identifier: GPL-2.0-or-later
+ /*
+- * Driver for RNDIS based wireless USB devices.
++ * Driver for RNDIS based USB wireless devices.
+  *
+  * Copyright (C) 2007 by Bjorge Dijkstra <bjd@jooz.net>
+  * Copyright (C) 2008-2009 by Jussi Kivilinna <jussi.kivilinna@iki.fi>
+diff --git a/drivers/net/wireless/marvell/mwifiex/tdls.c b/drivers/net/wireless/marvell/mwifiex/tdls.c
+index 97bb87c3676bb..6c60621b6cccb 100644
+--- a/drivers/net/wireless/marvell/mwifiex/tdls.c
++++ b/drivers/net/wireless/marvell/mwifiex/tdls.c
+@@ -735,6 +735,7 @@ mwifiex_construct_tdls_action_frame(struct mwifiex_private *priv,
+ 	int ret;
+ 	u16 capab;
+ 	struct ieee80211_ht_cap *ht_cap;
++	unsigned int extra;
+ 	u8 radio, *pos;
+ 
+ 	capab = priv->curr_bss_params.bss_descriptor.cap_info_bitmap;
+@@ -753,7 +754,10 @@ mwifiex_construct_tdls_action_frame(struct mwifiex_private *priv,
+ 
+ 	switch (action_code) {
+ 	case WLAN_PUB_ACTION_TDLS_DISCOVER_RES:
+-		skb_put(skb, sizeof(mgmt->u.action.u.tdls_discover_resp) + 1);
++		/* See the layout of 'struct ieee80211_mgmt'. */
++		extra = sizeof(mgmt->u.action.u.tdls_discover_resp) +
++			sizeof(mgmt->u.action.category);
++		skb_put(skb, extra);
+ 		mgmt->u.action.category = WLAN_CATEGORY_PUBLIC;
+ 		mgmt->u.action.u.tdls_discover_resp.action_code =
+ 					      WLAN_PUB_ACTION_TDLS_DISCOVER_RES;
+@@ -762,8 +766,7 @@ mwifiex_construct_tdls_action_frame(struct mwifiex_private *priv,
+ 		mgmt->u.action.u.tdls_discover_resp.capability =
+ 							     cpu_to_le16(capab);
+ 		/* move back for addr4 */
+-		memmove(pos + ETH_ALEN, &mgmt->u.action.category,
+-			sizeof(mgmt->u.action.u.tdls_discover_resp));
++		memmove(pos + ETH_ALEN, &mgmt->u.action, extra);
+ 		/* init address 4 */
+ 		eth_broadcast_addr(pos);
+ 
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/Kconfig b/drivers/net/wireless/mediatek/mt76/mt7603/Kconfig
+index 6a0080f1d91c7..dd16acfd9735d 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7603/Kconfig
++++ b/drivers/net/wireless/mediatek/mt76/mt7603/Kconfig
+@@ -5,7 +5,7 @@ config MT7603E
+ 	depends on MAC80211
+ 	depends on PCI
+ 	help
+-	  This adds support for MT7603E wireless PCIe devices and the WLAN core
++	  This adds support for MT7603E PCIe wireless devices and the WLAN core
+ 	  on MT7628/MT7688 SoC devices. This family supports IEEE 802.11n 2x2
+ 	  to 300Mbps PHY rate
+ 
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/Kconfig b/drivers/net/wireless/mediatek/mt76/mt7615/Kconfig
+index 30fba36ff46bb..1ab1439143f41 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7615/Kconfig
++++ b/drivers/net/wireless/mediatek/mt76/mt7615/Kconfig
+@@ -11,7 +11,7 @@ config MT7615E
+ 	depends on MAC80211
+ 	depends on PCI
+ 	help
+-	  This adds support for MT7615-based wireless PCIe devices,
++	  This adds support for MT7615-based PCIe wireless devices,
+ 	  which support concurrent dual-band operation at both 5GHz
+ 	  and 2.4GHz, IEEE 802.11ac 4x4:4SS 1733Mbps PHY rate, wave2
+ 	  MU-MIMO up to 4 users/group and 160MHz channels.
+diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/Kconfig b/drivers/net/wireless/mediatek/mt76/mt76x0/Kconfig
+index 7c88ed8b8f1e9..3ed888782a709 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt76x0/Kconfig
++++ b/drivers/net/wireless/mediatek/mt76/mt76x0/Kconfig
+@@ -10,7 +10,7 @@ config MT76x0U
+ 	depends on MAC80211
+ 	depends on USB
+ 	help
+-	  This adds support for MT7610U-based wireless USB 2.0 dongles,
++	  This adds support for MT7610U-based USB 2.0 wireless dongles,
+ 	  which comply with IEEE 802.11ac standards and support 1x1
+ 	  433Mbps PHY rate.
+ 
+@@ -22,7 +22,7 @@ config MT76x0E
+ 	depends on MAC80211
+ 	depends on PCI
+ 	help
+-	  This adds support for MT7610/MT7630-based wireless PCIe devices,
++	  This adds support for MT7610/MT7630-based PCIe wireless devices,
+ 	  which comply with IEEE 802.11ac standards and support 1x1
+ 	  433Mbps PHY rate.
+ 
+diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/Kconfig b/drivers/net/wireless/mediatek/mt76/mt76x2/Kconfig
+index 5fd4973e32dfb..482a32b70ddfe 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt76x2/Kconfig
++++ b/drivers/net/wireless/mediatek/mt76/mt76x2/Kconfig
+@@ -9,7 +9,7 @@ config MT76x2E
+ 	depends on MAC80211
+ 	depends on PCI
+ 	help
+-	  This adds support for MT7612/MT7602/MT7662-based wireless PCIe
++	  This adds support for MT7612/MT7602/MT7662-based PCIe wireless
+ 	  devices, which comply with IEEE 802.11ac standards and support
+ 	  2SS to 866Mbit/s PHY rate.
+ 
+@@ -22,7 +22,7 @@ config MT76x2U
+ 	depends on MAC80211
+ 	depends on USB
+ 	help
+-	  This adds support for MT7612U-based wireless USB 3.0 dongles,
++	  This adds support for MT7612U-based USB 3.0 wireless dongles,
+ 	  which comply with IEEE 802.11ac standards and support 2SS to
+ 	  866Mbit/s PHY rate.
+ 
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/Kconfig b/drivers/net/wireless/mediatek/mt76/mt7915/Kconfig
+index d710726d47bfd..3337cdfed0109 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7915/Kconfig
++++ b/drivers/net/wireless/mediatek/mt76/mt7915/Kconfig
+@@ -7,7 +7,7 @@ config MT7915E
+ 	depends on PCI
+ 	select RELAY
+ 	help
+-	  This adds support for MT7915-based wireless PCIe devices,
++	  This adds support for MT7915-based PCIe wireless devices,
+ 	  which support concurrent dual-band operation at both 5GHz
+ 	  and 2.4GHz IEEE 802.11ax 4x4:4SS 1024-QAM, 160MHz channels,
+ 	  OFDMA, spatial reuse and dual carrier modulation.
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mac.c b/drivers/net/wireless/mediatek/mt76/mt7921/mac.c
+index 1675bf5204812..a671c601c5836 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7921/mac.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7921/mac.c
+@@ -1180,6 +1180,10 @@ int mt7921_usb_sdio_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
+ 	if (unlikely(tx_info->skb->len <= ETH_HLEN))
+ 		return -EINVAL;
+ 
++	err = skb_cow_head(skb, MT_SDIO_TXD_SIZE + MT_SDIO_HDR_SIZE);
++	if (err)
++		return err;
++
+ 	if (!wcid)
+ 		wcid = &dev->mt76.global_wcid;
+ 
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/Kconfig b/drivers/net/wireless/mediatek/mt76/mt7996/Kconfig
+index 1afa2f662e473..bb44d4a5e2dc9 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7996/Kconfig
++++ b/drivers/net/wireless/mediatek/mt76/mt7996/Kconfig
+@@ -7,7 +7,7 @@ config MT7996E
+ 	depends on MAC80211
+ 	depends on PCI
+ 	help
+-	  This adds support for MT7996-based wireless PCIe devices,
++	  This adds support for MT7996-based PCIe wireless devices,
+ 	  which support concurrent tri-band operation at 6GHz, 5GHz,
+ 	  and 2.4GHz IEEE 802.11be 4x4:4SS 4096-QAM, 320MHz channels.
+ 
+diff --git a/drivers/net/wireless/mediatek/mt7601u/Kconfig b/drivers/net/wireless/mediatek/mt7601u/Kconfig
+index 4a8b962806707..4880fc053d9d3 100644
+--- a/drivers/net/wireless/mediatek/mt7601u/Kconfig
++++ b/drivers/net/wireless/mediatek/mt7601u/Kconfig
+@@ -4,4 +4,4 @@ config MT7601U
+ 	depends on MAC80211
+ 	depends on USB
+ 	help
+-	  This adds support for MT7601U-based wireless USB dongles.
++	  This adds support for MT7601U-based USB wireless dongles.
+diff --git a/drivers/net/wireless/purelifi/plfxlc/Kconfig b/drivers/net/wireless/purelifi/plfxlc/Kconfig
+index 4e0be27a5e0eb..dd5fca480d7ef 100644
+--- a/drivers/net/wireless/purelifi/plfxlc/Kconfig
++++ b/drivers/net/wireless/purelifi/plfxlc/Kconfig
+@@ -3,7 +3,7 @@ config PLFXLC
+ 	tristate "pureLiFi X, XL, XC device support"
+ 	depends on CFG80211 && MAC80211 && USB
+ 	help
+-	   This option adds support for pureLiFi LiFi wireless USB
++	   This option adds support for pureLiFi LiFi USB wireless
+ 	   adapters. The pureLiFi X, XL, XC USB devices are based on
+ 	   802.11 OFDM PHY but uses light as the transmission medium.
+ 	   The driver supports common 802.11 encryption/authentication
+diff --git a/drivers/net/wireless/ralink/rt2x00/Kconfig b/drivers/net/wireless/ralink/rt2x00/Kconfig
+index dcccc290a7f52..d1fd66d44a7ed 100644
+--- a/drivers/net/wireless/ralink/rt2x00/Kconfig
++++ b/drivers/net/wireless/ralink/rt2x00/Kconfig
+@@ -170,7 +170,7 @@ config RT2800USB_RT35XX
+ config RT2800USB_RT3573
+ 	bool "rt2800usb - Include support for rt3573 devices (EXPERIMENTAL)"
+ 	help
+-	  This enables support for RT3573 chipset based wireless USB devices
++	  This enables support for RT3573 chipset based USB wireless devices
+ 	  in the rt2800usb driver.
+ 
+ config RT2800USB_RT53XX
+diff --git a/drivers/net/wireless/realtek/rtw88/main.c b/drivers/net/wireless/realtek/rtw88/main.c
+index c853e2f2d448f..c2ddb4d382af5 100644
+--- a/drivers/net/wireless/realtek/rtw88/main.c
++++ b/drivers/net/wireless/realtek/rtw88/main.c
+@@ -2183,10 +2183,12 @@ void rtw_core_deinit(struct rtw_dev *rtwdev)
+ 		release_firmware(wow_fw->firmware);
+ 
+ 	destroy_workqueue(rtwdev->tx_wq);
++	timer_delete_sync(&rtwdev->tx_report.purge_timer);
+ 	spin_lock_irqsave(&rtwdev->tx_report.q_lock, flags);
+ 	skb_queue_purge(&rtwdev->tx_report.queue);
+-	skb_queue_purge(&rtwdev->coex.queue);
+ 	spin_unlock_irqrestore(&rtwdev->tx_report.q_lock, flags);
++	skb_queue_purge(&rtwdev->coex.queue);
++	skb_queue_purge(&rtwdev->c2h_queue);
+ 
+ 	list_for_each_entry_safe(rsvd_pkt, tmp, &rtwdev->rsvd_page_list,
+ 				 build_list) {
+diff --git a/drivers/net/wireless/realtek/rtw88/pci.c b/drivers/net/wireless/realtek/rtw88/pci.c
+index 44a8fff34cddf..2bfc0e822b8d0 100644
+--- a/drivers/net/wireless/realtek/rtw88/pci.c
++++ b/drivers/net/wireless/realtek/rtw88/pci.c
+@@ -1828,5 +1828,5 @@ void rtw_pci_shutdown(struct pci_dev *pdev)
+ EXPORT_SYMBOL(rtw_pci_shutdown);
+ 
+ MODULE_AUTHOR("Realtek Corporation");
+-MODULE_DESCRIPTION("Realtek 802.11ac wireless PCI driver");
++MODULE_DESCRIPTION("Realtek PCI 802.11ac wireless driver");
+ MODULE_LICENSE("Dual BSD/GPL");
+diff --git a/drivers/net/wireless/realtek/rtw88/usb.c b/drivers/net/wireless/realtek/rtw88/usb.c
+index 875a61c9c80d4..c279a500b4bdb 100644
+--- a/drivers/net/wireless/realtek/rtw88/usb.c
++++ b/drivers/net/wireless/realtek/rtw88/usb.c
+@@ -930,5 +930,5 @@ void rtw_usb_disconnect(struct usb_interface *intf)
+ EXPORT_SYMBOL(rtw_usb_disconnect);
+ 
+ MODULE_AUTHOR("Realtek Corporation");
+-MODULE_DESCRIPTION("Realtek 802.11ac wireless USB driver");
++MODULE_DESCRIPTION("Realtek USB 802.11ac wireless driver");
+ MODULE_LICENSE("Dual BSD/GPL");
+diff --git a/drivers/net/wireless/realtek/rtw89/pci.c b/drivers/net/wireless/realtek/rtw89/pci.c
+index 9402f1a0caea8..3a4bfc44142b6 100644
+--- a/drivers/net/wireless/realtek/rtw89/pci.c
++++ b/drivers/net/wireless/realtek/rtw89/pci.c
+@@ -3939,5 +3939,5 @@ void rtw89_pci_remove(struct pci_dev *pdev)
+ EXPORT_SYMBOL(rtw89_pci_remove);
+ 
+ MODULE_AUTHOR("Realtek Corporation");
+-MODULE_DESCRIPTION("Realtek 802.11ax wireless PCI driver");
++MODULE_DESCRIPTION("Realtek PCI 802.11ax wireless driver");
+ MODULE_LICENSE("Dual BSD/GPL");
+diff --git a/drivers/net/wireless/virtual/mac80211_hwsim.c b/drivers/net/wireless/virtual/mac80211_hwsim.c
+index f446fd0e8cd0d..23307c8baea21 100644
+--- a/drivers/net/wireless/virtual/mac80211_hwsim.c
++++ b/drivers/net/wireless/virtual/mac80211_hwsim.c
+@@ -582,9 +582,8 @@ static int mac80211_hwsim_vendor_cmd_test(struct wiphy *wiphy,
+ 		 */
+ 
+ 		/* Add vendor data */
+-		err = nla_put_u32(skb, QCA_WLAN_VENDOR_ATTR_TEST, val + 1);
+-		if (err)
+-			return err;
++		nla_put_u32(skb, QCA_WLAN_VENDOR_ATTR_TEST, val + 1);
++
+ 		/* Send the event - this will call nla_nest_end() */
+ 		cfg80211_vendor_event(skb, GFP_KERNEL);
+ 	}
+@@ -5626,14 +5625,15 @@ static int hwsim_cloned_frame_received_nl(struct sk_buff *skb_2,
+ 	frame_data_len = nla_len(info->attrs[HWSIM_ATTR_FRAME]);
+ 	frame_data = (void *)nla_data(info->attrs[HWSIM_ATTR_FRAME]);
+ 
++	if (frame_data_len < sizeof(struct ieee80211_hdr_3addr) ||
++	    frame_data_len > IEEE80211_MAX_DATA_LEN)
++		goto err;
++
+ 	/* Allocate new skb here */
+ 	skb = alloc_skb(frame_data_len, GFP_KERNEL);
+ 	if (skb == NULL)
+ 		goto err;
+ 
+-	if (frame_data_len > IEEE80211_MAX_DATA_LEN)
+-		goto err;
+-
+ 	/* Copy the data */
+ 	skb_put_data(skb, frame_data, frame_data_len);
+ 
+diff --git a/drivers/net/wireless/zydas/zd1201.c b/drivers/net/wireless/zydas/zd1201.c
+index a85fe7e4c6d47..2814df1ecc78f 100644
+--- a/drivers/net/wireless/zydas/zd1201.c
++++ b/drivers/net/wireless/zydas/zd1201.c
+@@ -1,6 +1,6 @@
+ // SPDX-License-Identifier: GPL-2.0-only
+ /*
+- *	Driver for ZyDAS zd1201 based wireless USB devices.
++ *	Driver for ZyDAS zd1201 based USB wireless devices.
+  *
+  *	Copyright (c) 2004, 2005 Jeroen Vreeken (pe1rxq@amsat.org)
+  *
+@@ -23,8 +23,8 @@
+ #include "zd1201.h"
+ 
+ static const struct usb_device_id zd1201_table[] = {
+-	{USB_DEVICE(0x0586, 0x3400)}, /* Peabird Wireless USB Adapter */
+-	{USB_DEVICE(0x0ace, 0x1201)}, /* ZyDAS ZD1201 Wireless USB Adapter */
++	{USB_DEVICE(0x0586, 0x3400)}, /* Peabird USB Wireless Adapter */
++	{USB_DEVICE(0x0ace, 0x1201)}, /* ZyDAS ZD1201 USB Wireless Adapter */
+ 	{USB_DEVICE(0x050d, 0x6051)}, /* Belkin F5D6051 usb  adapter */
+ 	{USB_DEVICE(0x0db0, 0x6823)}, /* MSI UB11B usb  adapter */
+ 	{USB_DEVICE(0x1044, 0x8004)}, /* Gigabyte GN-WLBZ101 */
+diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
+index f3a01b79148cb..21783aa2ee8e1 100644
+--- a/drivers/nvme/host/core.c
++++ b/drivers/nvme/host/core.c
+@@ -2245,25 +2245,8 @@ int nvme_enable_ctrl(struct nvme_ctrl *ctrl)
+ 	else
+ 		ctrl->ctrl_config = NVME_CC_CSS_NVM;
+ 
+-	if (ctrl->cap & NVME_CAP_CRMS_CRWMS) {
+-		u32 crto;
+-
+-		ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CRTO, &crto);
+-		if (ret) {
+-			dev_err(ctrl->device, "Reading CRTO failed (%d)\n",
+-				ret);
+-			return ret;
+-		}
+-
+-		if (ctrl->cap & NVME_CAP_CRMS_CRIMS) {
+-			ctrl->ctrl_config |= NVME_CC_CRIME;
+-			timeout = NVME_CRTO_CRIMT(crto);
+-		} else {
+-			timeout = NVME_CRTO_CRWMT(crto);
+-		}
+-	} else {
+-		timeout = NVME_CAP_TIMEOUT(ctrl->cap);
+-	}
++	if (ctrl->cap & NVME_CAP_CRMS_CRWMS && ctrl->cap & NVME_CAP_CRMS_CRIMS)
++		ctrl->ctrl_config |= NVME_CC_CRIME;
+ 
+ 	ctrl->ctrl_config |= (NVME_CTRL_PAGE_SHIFT - 12) << NVME_CC_MPS_SHIFT;
+ 	ctrl->ctrl_config |= NVME_CC_AMS_RR | NVME_CC_SHN_NONE;
+@@ -2277,6 +2260,39 @@ int nvme_enable_ctrl(struct nvme_ctrl *ctrl)
+ 	if (ret)
+ 		return ret;
+ 
++	/* CAP value may change after initial CC write */
++	ret = ctrl->ops->reg_read64(ctrl, NVME_REG_CAP, &ctrl->cap);
++	if (ret)
++		return ret;
++
++	timeout = NVME_CAP_TIMEOUT(ctrl->cap);
++	if (ctrl->cap & NVME_CAP_CRMS_CRWMS) {
++		u32 crto, ready_timeout;
++
++		ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CRTO, &crto);
++		if (ret) {
++			dev_err(ctrl->device, "Reading CRTO failed (%d)\n",
++				ret);
++			return ret;
++		}
++
++		/*
++		 * CRTO should always be greater or equal to CAP.TO, but some
++		 * devices are known to get this wrong. Use the larger of the
++		 * two values.
++		 */
++		if (ctrl->ctrl_config & NVME_CC_CRIME)
++			ready_timeout = NVME_CRTO_CRIMT(crto);
++		else
++			ready_timeout = NVME_CRTO_CRWMT(crto);
++
++		if (ready_timeout < timeout)
++			dev_warn_once(ctrl->device, "bad crto:%x cap:%llx\n",
++				      crto, ctrl->cap);
++		else
++			timeout = ready_timeout;
++	}
++
+ 	ctrl->ctrl_config |= NVME_CC_ENABLE;
+ 	ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config);
+ 	if (ret)
+diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
+index 868aa4de2e4c4..cd92d7ddf5ed1 100644
+--- a/drivers/nvme/target/tcp.c
++++ b/drivers/nvme/target/tcp.c
+@@ -348,7 +348,7 @@ static void nvmet_tcp_build_pdu_iovec(struct nvmet_tcp_cmd *cmd)
+ 	while (length) {
+ 		u32 iov_len = min_t(u32, length, sg->length - sg_offset);
+ 
+-		bvec_set_page(iov, sg_page(sg), sg->length,
++		bvec_set_page(iov, sg_page(sg), iov_len,
+ 				sg->offset + sg_offset);
+ 
+ 		length -= iov_len;
+diff --git a/drivers/pci/controller/dwc/pci-imx6.c b/drivers/pci/controller/dwc/pci-imx6.c
+index 27aaa2a6bf391..a18c20085e940 100644
+--- a/drivers/pci/controller/dwc/pci-imx6.c
++++ b/drivers/pci/controller/dwc/pci-imx6.c
+@@ -1040,6 +1040,7 @@ static void imx6_pcie_host_exit(struct dw_pcie_rp *pp)
+ 
+ static const struct dw_pcie_host_ops imx6_pcie_host_ops = {
+ 	.host_init = imx6_pcie_host_init,
++	.host_deinit = imx6_pcie_host_exit,
+ };
+ 
+ static const struct dw_pcie_ops dw_pcie_ops = {
+diff --git a/drivers/pci/controller/dwc/pcie-fu740.c b/drivers/pci/controller/dwc/pcie-fu740.c
+index 0c90583c078bf..1e9b44b8bba48 100644
+--- a/drivers/pci/controller/dwc/pcie-fu740.c
++++ b/drivers/pci/controller/dwc/pcie-fu740.c
+@@ -299,6 +299,7 @@ static int fu740_pcie_probe(struct platform_device *pdev)
+ 	pci->dev = dev;
+ 	pci->ops = &dw_pcie_ops;
+ 	pci->pp.ops = &fu740_pcie_host_ops;
++	pci->pp.num_vectors = MAX_MSI_IRQS;
+ 
+ 	/* SiFive specific region: mgmt */
+ 	afp->mgmt_base = devm_platform_ioremap_resource_byname(pdev, "mgmt");
+diff --git a/drivers/pci/controller/vmd.c b/drivers/pci/controller/vmd.c
+index e718a816d4814..ad56df98b8e63 100644
+--- a/drivers/pci/controller/vmd.c
++++ b/drivers/pci/controller/vmd.c
+@@ -541,8 +541,23 @@ static void vmd_domain_reset(struct vmd_dev *vmd)
+ 				     PCI_CLASS_BRIDGE_PCI))
+ 					continue;
+ 
+-				memset_io(base + PCI_IO_BASE, 0,
+-					  PCI_ROM_ADDRESS1 - PCI_IO_BASE);
++				/*
++				 * Temporarily disable the I/O range before updating
++				 * PCI_IO_BASE.
++				 */
++				writel(0x0000ffff, base + PCI_IO_BASE_UPPER16);
++				/* Update lower 16 bits of I/O base/limit */
++				writew(0x00f0, base + PCI_IO_BASE);
++				/* Update upper 16 bits of I/O base/limit */
++				writel(0, base + PCI_IO_BASE_UPPER16);
++
++				/* MMIO Base/Limit */
++				writel(0x0000fff0, base + PCI_MEMORY_BASE);
++
++				/* Prefetchable MMIO Base/Limit */
++				writel(0, base + PCI_PREF_LIMIT_UPPER32);
++				writel(0x0000fff0, base + PCI_PREF_MEMORY_BASE);
++				writel(0xffffffff, base + PCI_PREF_BASE_UPPER32);
+ 			}
+ 		}
+ 	}
+diff --git a/drivers/perf/arm_smmuv3_pmu.c b/drivers/perf/arm_smmuv3_pmu.c
+index 25a269d431e45..0e17c57ddb876 100644
+--- a/drivers/perf/arm_smmuv3_pmu.c
++++ b/drivers/perf/arm_smmuv3_pmu.c
+@@ -115,6 +115,7 @@
+ #define SMMU_PMCG_PA_SHIFT              12
+ 
+ #define SMMU_PMCG_EVCNTR_RDONLY         BIT(0)
++#define SMMU_PMCG_HARDEN_DISABLE        BIT(1)
+ 
+ static int cpuhp_state_num;
+ 
+@@ -159,6 +160,20 @@ static inline void smmu_pmu_enable(struct pmu *pmu)
+ 	writel(SMMU_PMCG_CR_ENABLE, smmu_pmu->reg_base + SMMU_PMCG_CR);
+ }
+ 
++static int smmu_pmu_apply_event_filter(struct smmu_pmu *smmu_pmu,
++				       struct perf_event *event, int idx);
++
++static inline void smmu_pmu_enable_quirk_hip08_09(struct pmu *pmu)
++{
++	struct smmu_pmu *smmu_pmu = to_smmu_pmu(pmu);
++	unsigned int idx;
++
++	for_each_set_bit(idx, smmu_pmu->used_counters, smmu_pmu->num_counters)
++		smmu_pmu_apply_event_filter(smmu_pmu, smmu_pmu->events[idx], idx);
++
++	smmu_pmu_enable(pmu);
++}
++
+ static inline void smmu_pmu_disable(struct pmu *pmu)
+ {
+ 	struct smmu_pmu *smmu_pmu = to_smmu_pmu(pmu);
+@@ -167,6 +182,22 @@ static inline void smmu_pmu_disable(struct pmu *pmu)
+ 	writel(0, smmu_pmu->reg_base + SMMU_PMCG_IRQ_CTRL);
+ }
+ 
++static inline void smmu_pmu_disable_quirk_hip08_09(struct pmu *pmu)
++{
++	struct smmu_pmu *smmu_pmu = to_smmu_pmu(pmu);
++	unsigned int idx;
++
++	/*
++	 * The global disable of PMU sometimes fail to stop the counting.
++	 * Harden this by writing an invalid event type to each used counter
++	 * to forcibly stop counting.
++	 */
++	for_each_set_bit(idx, smmu_pmu->used_counters, smmu_pmu->num_counters)
++		writel(0xffff, smmu_pmu->reg_base + SMMU_PMCG_EVTYPER(idx));
++
++	smmu_pmu_disable(pmu);
++}
++
+ static inline void smmu_pmu_counter_set_value(struct smmu_pmu *smmu_pmu,
+ 					      u32 idx, u64 value)
+ {
+@@ -765,7 +796,10 @@ static void smmu_pmu_get_acpi_options(struct smmu_pmu *smmu_pmu)
+ 	switch (model) {
+ 	case IORT_SMMU_V3_PMCG_HISI_HIP08:
+ 		/* HiSilicon Erratum 162001800 */
+-		smmu_pmu->options |= SMMU_PMCG_EVCNTR_RDONLY;
++		smmu_pmu->options |= SMMU_PMCG_EVCNTR_RDONLY | SMMU_PMCG_HARDEN_DISABLE;
++		break;
++	case IORT_SMMU_V3_PMCG_HISI_HIP09:
++		smmu_pmu->options |= SMMU_PMCG_HARDEN_DISABLE;
+ 		break;
+ 	}
+ 
+@@ -890,6 +924,16 @@ static int smmu_pmu_probe(struct platform_device *pdev)
+ 	if (!dev->of_node)
+ 		smmu_pmu_get_acpi_options(smmu_pmu);
+ 
++	/*
++	 * For platforms suffer this quirk, the PMU disable sometimes fails to
++	 * stop the counters. This will leads to inaccurate or error counting.
++	 * Forcibly disable the counters with these quirk handler.
++	 */
++	if (smmu_pmu->options & SMMU_PMCG_HARDEN_DISABLE) {
++		smmu_pmu->pmu.pmu_enable = smmu_pmu_enable_quirk_hip08_09;
++		smmu_pmu->pmu.pmu_disable = smmu_pmu_disable_quirk_hip08_09;
++	}
++
+ 	/* Pick one CPU to be the preferred one to use */
+ 	smmu_pmu->on_cpu = raw_smp_processor_id();
+ 	WARN_ON(irq_set_affinity(smmu_pmu->irq, cpumask_of(smmu_pmu->on_cpu)));
+diff --git a/drivers/perf/fsl_imx8_ddr_perf.c b/drivers/perf/fsl_imx8_ddr_perf.c
+index c684aab407f86..e78b30521be25 100644
+--- a/drivers/perf/fsl_imx8_ddr_perf.c
++++ b/drivers/perf/fsl_imx8_ddr_perf.c
+@@ -28,6 +28,8 @@
+ #define CNTL_CLEAR_MASK		0xFFFFFFFD
+ #define CNTL_OVER_MASK		0xFFFFFFFE
+ 
++#define CNTL_CP_SHIFT		16
++#define CNTL_CP_MASK		(0xFF << CNTL_CP_SHIFT)
+ #define CNTL_CSV_SHIFT		24
+ #define CNTL_CSV_MASK		(0xFFU << CNTL_CSV_SHIFT)
+ 
+@@ -35,6 +37,8 @@
+ #define EVENT_CYCLES_COUNTER	0
+ #define NUM_COUNTERS		4
+ 
++/* For removing bias if cycle counter CNTL.CP is set to 0xf0 */
++#define CYCLES_COUNTER_MASK	0x0FFFFFFF
+ #define AXI_MASKING_REVERT	0xffff0000	/* AXI_MASKING(MSB 16bits) + AXI_ID(LSB 16bits) */
+ 
+ #define to_ddr_pmu(p)		container_of(p, struct ddr_pmu, pmu)
+@@ -428,6 +432,17 @@ static void ddr_perf_counter_enable(struct ddr_pmu *pmu, int config,
+ 		writel(0, pmu->base + reg);
+ 		val = CNTL_EN | CNTL_CLEAR;
+ 		val |= FIELD_PREP(CNTL_CSV_MASK, config);
++
++		/*
++		 * On i.MX8MP we need to bias the cycle counter to overflow more often.
++		 * We do this by initializing bits [23:16] of the counter value via the
++		 * COUNTER_CTRL Counter Parameter (CP) field.
++		 */
++		if (pmu->devtype_data->quirks & DDR_CAP_AXI_ID_FILTER_ENHANCED) {
++			if (counter == EVENT_CYCLES_COUNTER)
++				val |= FIELD_PREP(CNTL_CP_MASK, 0xf0);
++		}
++
+ 		writel(val, pmu->base + reg);
+ 	} else {
+ 		/* Disable counter */
+@@ -467,6 +482,12 @@ static void ddr_perf_event_update(struct perf_event *event)
+ 	int ret;
+ 
+ 	new_raw_count = ddr_perf_read_counter(pmu, counter);
++	/* Remove the bias applied in ddr_perf_counter_enable(). */
++	if (pmu->devtype_data->quirks & DDR_CAP_AXI_ID_FILTER_ENHANCED) {
++		if (counter == EVENT_CYCLES_COUNTER)
++			new_raw_count &= CYCLES_COUNTER_MASK;
++	}
++
+ 	local64_add(new_raw_count, &event->count);
+ 
+ 	/*
+diff --git a/drivers/platform/chrome/cros_ec_lpc.c b/drivers/platform/chrome/cros_ec_lpc.c
+index 500a61b093e47..356572452898d 100644
+--- a/drivers/platform/chrome/cros_ec_lpc.c
++++ b/drivers/platform/chrome/cros_ec_lpc.c
+@@ -327,8 +327,8 @@ static void cros_ec_lpc_acpi_notify(acpi_handle device, u32 value, void *data)
+ 		dev_emerg(ec_dev->dev, "CrOS EC Panic Reported. Shutdown is imminent!");
+ 		blocking_notifier_call_chain(&ec_dev->panic_notifier, 0, ec_dev);
+ 		kobject_uevent_env(&ec_dev->dev->kobj, KOBJ_CHANGE, (char **)env);
+-		/* Begin orderly shutdown. Force shutdown after 1 second. */
+-		hw_protection_shutdown("CrOS EC Panic", 1000);
++		/* Begin orderly shutdown. EC will force reset after a short period. */
++		hw_protection_shutdown("CrOS EC Panic", -1);
+ 		/* Do not query for other events after a panic is reported */
+ 		return;
+ 	}
+diff --git a/drivers/platform/x86/eeepc-laptop.c b/drivers/platform/x86/eeepc-laptop.c
+index 62b71e8e3567a..ff1b70269ccbf 100644
+--- a/drivers/platform/x86/eeepc-laptop.c
++++ b/drivers/platform/x86/eeepc-laptop.c
+@@ -1394,7 +1394,7 @@ static int eeepc_acpi_add(struct acpi_device *device)
+ 	 * and machine-specific scripts find the fixed name convenient.  But
+ 	 * It's also good for us to exclude multiple instances because both
+ 	 * our hwmon and our wlan rfkill subdevice use global ACPI objects
+-	 * (the EC and the wlan PCI slot respectively).
++	 * (the EC and the PCI wlan slot respectively).
+ 	 */
+ 	result = eeepc_platform_init(eeepc);
+ 	if (result)
+diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
+index 9a89636843693..e8d7eeeb21856 100644
+--- a/drivers/scsi/lpfc/lpfc.h
++++ b/drivers/scsi/lpfc/lpfc.h
+@@ -872,6 +872,7 @@ enum lpfc_irq_chann_mode {
+ enum lpfc_hba_bit_flags {
+ 	FABRIC_COMANDS_BLOCKED,
+ 	HBA_PCI_ERR,
++	MBX_TMO_ERR,
+ };
+ 
+ struct lpfc_hba {
+diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
+index 7f9b221e7c34a..ea9b42225e629 100644
+--- a/drivers/scsi/lpfc/lpfc_debugfs.c
++++ b/drivers/scsi/lpfc/lpfc_debugfs.c
+@@ -6073,7 +6073,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
+ 					    phba->hba_debugfs_root,
+ 					    phba,
+ 					    &lpfc_debugfs_op_multixripools);
+-		if (!phba->debug_multixri_pools) {
++		if (IS_ERR(phba->debug_multixri_pools)) {
+ 			lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+ 					 "0527 Cannot create debugfs multixripools\n");
+ 			goto debug_failed;
+@@ -6085,7 +6085,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
+ 			debugfs_create_file(name, S_IFREG | 0644,
+ 					    phba->hba_debugfs_root,
+ 					    phba, &lpfc_cgn_buffer_op);
+-		if (!phba->debug_cgn_buffer) {
++		if (IS_ERR(phba->debug_cgn_buffer)) {
+ 			lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+ 					 "6527 Cannot create debugfs "
+ 					 "cgn_buffer\n");
+@@ -6098,7 +6098,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
+ 			debugfs_create_file(name, S_IFREG | 0644,
+ 					    phba->hba_debugfs_root,
+ 					    phba, &lpfc_rx_monitor_op);
+-		if (!phba->debug_rx_monitor) {
++		if (IS_ERR(phba->debug_rx_monitor)) {
+ 			lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+ 					 "6528 Cannot create debugfs "
+ 					 "rx_monitor\n");
+@@ -6111,7 +6111,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
+ 			debugfs_create_file(name, 0644,
+ 					    phba->hba_debugfs_root,
+ 					    phba, &lpfc_debugfs_ras_log);
+-		if (!phba->debug_ras_log) {
++		if (IS_ERR(phba->debug_ras_log)) {
+ 			lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+ 					 "6148 Cannot create debugfs"
+ 					 " ras_log\n");
+@@ -6132,7 +6132,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
+ 			debugfs_create_file(name, S_IFREG | 0644,
+ 					    phba->hba_debugfs_root,
+ 					    phba, &lpfc_debugfs_op_lockstat);
+-		if (!phba->debug_lockstat) {
++		if (IS_ERR(phba->debug_lockstat)) {
+ 			lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+ 					 "4610 Can't create debugfs lockstat\n");
+ 			goto debug_failed;
+@@ -6358,7 +6358,7 @@ nvmeio_off:
+ 		debugfs_create_file(name, 0644,
+ 				    vport->vport_debugfs_root,
+ 				    vport, &lpfc_debugfs_op_scsistat);
+-	if (!vport->debug_scsistat) {
++	if (IS_ERR(vport->debug_scsistat)) {
+ 		lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+ 				 "4611 Cannot create debugfs scsistat\n");
+ 		goto debug_failed;
+@@ -6369,7 +6369,7 @@ nvmeio_off:
+ 		debugfs_create_file(name, 0644,
+ 				    vport->vport_debugfs_root,
+ 				    vport, &lpfc_debugfs_op_ioktime);
+-	if (!vport->debug_ioktime) {
++	if (IS_ERR(vport->debug_ioktime)) {
+ 		lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+ 				 "0815 Cannot create debugfs ioktime\n");
+ 		goto debug_failed;
+diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
+index 2bad9954c355f..6f6ef5235ee3b 100644
+--- a/drivers/scsi/lpfc/lpfc_els.c
++++ b/drivers/scsi/lpfc/lpfc_els.c
+@@ -9588,11 +9588,13 @@ void
+ lpfc_els_flush_cmd(struct lpfc_vport *vport)
+ {
+ 	LIST_HEAD(abort_list);
++	LIST_HEAD(cancel_list);
+ 	struct lpfc_hba  *phba = vport->phba;
+ 	struct lpfc_sli_ring *pring;
+ 	struct lpfc_iocbq *tmp_iocb, *piocb;
+ 	u32 ulp_command;
+ 	unsigned long iflags = 0;
++	bool mbx_tmo_err;
+ 
+ 	lpfc_fabric_abort_vport(vport);
+ 
+@@ -9614,15 +9616,16 @@ lpfc_els_flush_cmd(struct lpfc_vport *vport)
+ 	if (phba->sli_rev == LPFC_SLI_REV4)
+ 		spin_lock(&pring->ring_lock);
+ 
++	mbx_tmo_err = test_bit(MBX_TMO_ERR, &phba->bit_flags);
+ 	/* First we need to issue aborts to outstanding cmds on txcmpl */
+ 	list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) {
+-		if (piocb->cmd_flag & LPFC_IO_LIBDFC)
++		if (piocb->cmd_flag & LPFC_IO_LIBDFC && !mbx_tmo_err)
+ 			continue;
+ 
+ 		if (piocb->vport != vport)
+ 			continue;
+ 
+-		if (piocb->cmd_flag & LPFC_DRIVER_ABORTED)
++		if (piocb->cmd_flag & LPFC_DRIVER_ABORTED && !mbx_tmo_err)
+ 			continue;
+ 
+ 		/* On the ELS ring we can have ELS_REQUESTs or
+@@ -9641,8 +9644,8 @@ lpfc_els_flush_cmd(struct lpfc_vport *vport)
+ 			 */
+ 			if (phba->link_state == LPFC_LINK_DOWN)
+ 				piocb->cmd_cmpl = lpfc_cmpl_els_link_down;
+-		}
+-		if (ulp_command == CMD_GEN_REQUEST64_CR)
++		} else if (ulp_command == CMD_GEN_REQUEST64_CR ||
++			   mbx_tmo_err)
+ 			list_add_tail(&piocb->dlist, &abort_list);
+ 	}
+ 
+@@ -9654,11 +9657,19 @@ lpfc_els_flush_cmd(struct lpfc_vport *vport)
+ 	list_for_each_entry_safe(piocb, tmp_iocb, &abort_list, dlist) {
+ 		spin_lock_irqsave(&phba->hbalock, iflags);
+ 		list_del_init(&piocb->dlist);
+-		lpfc_sli_issue_abort_iotag(phba, pring, piocb, NULL);
++		if (mbx_tmo_err)
++			list_move_tail(&piocb->list, &cancel_list);
++		else
++			lpfc_sli_issue_abort_iotag(phba, pring, piocb, NULL);
++
+ 		spin_unlock_irqrestore(&phba->hbalock, iflags);
+ 	}
+-	/* Make sure HBA is alive */
+-	lpfc_issue_hb_tmo(phba);
++	if (!list_empty(&cancel_list))
++		lpfc_sli_cancel_iocbs(phba, &cancel_list, IOSTAT_LOCAL_REJECT,
++				      IOERR_SLI_ABORTED);
++	else
++		/* Make sure HBA is alive */
++		lpfc_issue_hb_tmo(phba);
+ 
+ 	if (!list_empty(&abort_list))
+ 		lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
+diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
+index 3221a934066bb..ce9e4cdd6004c 100644
+--- a/drivers/scsi/lpfc/lpfc_init.c
++++ b/drivers/scsi/lpfc/lpfc_init.c
+@@ -7550,6 +7550,8 @@ lpfc_disable_pci_dev(struct lpfc_hba *phba)
+ void
+ lpfc_reset_hba(struct lpfc_hba *phba)
+ {
++	int rc = 0;
++
+ 	/* If resets are disabled then set error state and return. */
+ 	if (!phba->cfg_enable_hba_reset) {
+ 		phba->link_state = LPFC_HBA_ERROR;
+@@ -7560,13 +7562,25 @@ lpfc_reset_hba(struct lpfc_hba *phba)
+ 	if (phba->sli.sli_flag & LPFC_SLI_ACTIVE) {
+ 		lpfc_offline_prep(phba, LPFC_MBX_WAIT);
+ 	} else {
++		if (test_bit(MBX_TMO_ERR, &phba->bit_flags)) {
++			/* Perform a PCI function reset to start from clean */
++			rc = lpfc_pci_function_reset(phba);
++			lpfc_els_flush_all_cmd(phba);
++		}
+ 		lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
+ 		lpfc_sli_flush_io_rings(phba);
+ 	}
+ 	lpfc_offline(phba);
+-	lpfc_sli_brdrestart(phba);
+-	lpfc_online(phba);
+-	lpfc_unblock_mgmt_io(phba);
++	clear_bit(MBX_TMO_ERR, &phba->bit_flags);
++	if (unlikely(rc)) {
++		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
++				"8888 PCI function reset failed rc %x\n",
++				rc);
++	} else {
++		lpfc_sli_brdrestart(phba);
++		lpfc_online(phba);
++		lpfc_unblock_mgmt_io(phba);
++	}
+ }
+ 
+ /**
+diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
+index 58d10f8f75a78..4dfadf254a727 100644
+--- a/drivers/scsi/lpfc/lpfc_sli.c
++++ b/drivers/scsi/lpfc/lpfc_sli.c
+@@ -3935,6 +3935,8 @@ void lpfc_poll_eratt(struct timer_list *t)
+ 	uint64_t sli_intr, cnt;
+ 
+ 	phba = from_timer(phba, t, eratt_poll);
++	if (!(phba->hba_flag & HBA_SETUP))
++		return;
+ 
+ 	/* Here we will also keep track of interrupts per sec of the hba */
+ 	sli_intr = phba->sli.slistat.sli_intr;
+@@ -7693,7 +7695,9 @@ lpfc_sli4_repost_sgl_list(struct lpfc_hba *phba,
+ 		spin_unlock_irq(&phba->hbalock);
+ 	} else {
+ 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+-				"3161 Failure to post sgl to port.\n");
++				"3161 Failure to post sgl to port,status %x "
++				"blkcnt %d totalcnt %d postcnt %d\n",
++				status, block_cnt, total_cnt, post_cnt);
+ 		return -EIO;
+ 	}
+ 
+@@ -8478,6 +8482,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
+ 			spin_unlock_irq(&phba->hbalock);
+ 		}
+ 	}
++	phba->hba_flag &= ~HBA_SETUP;
+ 
+ 	lpfc_sli4_dip(phba);
+ 
+@@ -9282,6 +9287,7 @@ lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
+ 	 * would get IOCB_ERROR from lpfc_sli_issue_iocb, allowing
+ 	 * it to fail all outstanding SCSI IO.
+ 	 */
++	set_bit(MBX_TMO_ERR, &phba->bit_flags);
+ 	spin_lock_irq(&phba->pport->work_port_lock);
+ 	phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
+ 	spin_unlock_irq(&phba->pport->work_port_lock);
+diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
+index 3554f6b077273..94abba57582d5 100644
+--- a/drivers/scsi/megaraid/megaraid_sas.h
++++ b/drivers/scsi/megaraid/megaraid_sas.h
+@@ -2332,7 +2332,7 @@ struct megasas_instance {
+ 	u32 support_morethan256jbod; /* FW support for more than 256 PD/JBOD */
+ 	bool use_seqnum_jbod_fp;   /* Added for PD sequence */
+ 	bool smp_affinity_enable;
+-	spinlock_t crashdump_lock;
++	struct mutex crashdump_lock;
+ 
+ 	struct megasas_register_set __iomem *reg_set;
+ 	u32 __iomem *reply_post_host_index_addr[MR_MAX_MSIX_REG_ARRAY];
+diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
+index 050eed8e26846..c0d47141f6d38 100644
+--- a/drivers/scsi/megaraid/megaraid_sas_base.c
++++ b/drivers/scsi/megaraid/megaraid_sas_base.c
+@@ -3271,14 +3271,13 @@ fw_crash_buffer_store(struct device *cdev,
+ 	struct megasas_instance *instance =
+ 		(struct megasas_instance *) shost->hostdata;
+ 	int val = 0;
+-	unsigned long flags;
+ 
+ 	if (kstrtoint(buf, 0, &val) != 0)
+ 		return -EINVAL;
+ 
+-	spin_lock_irqsave(&instance->crashdump_lock, flags);
++	mutex_lock(&instance->crashdump_lock);
+ 	instance->fw_crash_buffer_offset = val;
+-	spin_unlock_irqrestore(&instance->crashdump_lock, flags);
++	mutex_unlock(&instance->crashdump_lock);
+ 	return strlen(buf);
+ }
+ 
+@@ -3293,24 +3292,23 @@ fw_crash_buffer_show(struct device *cdev,
+ 	unsigned long dmachunk = CRASH_DMA_BUF_SIZE;
+ 	unsigned long chunk_left_bytes;
+ 	unsigned long src_addr;
+-	unsigned long flags;
+ 	u32 buff_offset;
+ 
+-	spin_lock_irqsave(&instance->crashdump_lock, flags);
++	mutex_lock(&instance->crashdump_lock);
+ 	buff_offset = instance->fw_crash_buffer_offset;
+ 	if (!instance->crash_dump_buf ||
+ 		!((instance->fw_crash_state == AVAILABLE) ||
+ 		(instance->fw_crash_state == COPYING))) {
+ 		dev_err(&instance->pdev->dev,
+ 			"Firmware crash dump is not available\n");
+-		spin_unlock_irqrestore(&instance->crashdump_lock, flags);
++		mutex_unlock(&instance->crashdump_lock);
+ 		return -EINVAL;
+ 	}
+ 
+ 	if (buff_offset > (instance->fw_crash_buffer_size * dmachunk)) {
+ 		dev_err(&instance->pdev->dev,
+ 			"Firmware crash dump offset is out of range\n");
+-		spin_unlock_irqrestore(&instance->crashdump_lock, flags);
++		mutex_unlock(&instance->crashdump_lock);
+ 		return 0;
+ 	}
+ 
+@@ -3322,7 +3320,7 @@ fw_crash_buffer_show(struct device *cdev,
+ 	src_addr = (unsigned long)instance->crash_buf[buff_offset / dmachunk] +
+ 		(buff_offset % dmachunk);
+ 	memcpy(buf, (void *)src_addr, size);
+-	spin_unlock_irqrestore(&instance->crashdump_lock, flags);
++	mutex_unlock(&instance->crashdump_lock);
+ 
+ 	return size;
+ }
+@@ -3347,7 +3345,6 @@ fw_crash_state_store(struct device *cdev,
+ 	struct megasas_instance *instance =
+ 		(struct megasas_instance *) shost->hostdata;
+ 	int val = 0;
+-	unsigned long flags;
+ 
+ 	if (kstrtoint(buf, 0, &val) != 0)
+ 		return -EINVAL;
+@@ -3361,9 +3358,9 @@ fw_crash_state_store(struct device *cdev,
+ 	instance->fw_crash_state = val;
+ 
+ 	if ((val == COPIED) || (val == COPY_ERROR)) {
+-		spin_lock_irqsave(&instance->crashdump_lock, flags);
++		mutex_lock(&instance->crashdump_lock);
+ 		megasas_free_host_crash_buffer(instance);
+-		spin_unlock_irqrestore(&instance->crashdump_lock, flags);
++		mutex_unlock(&instance->crashdump_lock);
+ 		if (val == COPY_ERROR)
+ 			dev_info(&instance->pdev->dev, "application failed to "
+ 				"copy Firmware crash dump\n");
+@@ -7422,7 +7419,7 @@ static inline void megasas_init_ctrl_params(struct megasas_instance *instance)
+ 	init_waitqueue_head(&instance->int_cmd_wait_q);
+ 	init_waitqueue_head(&instance->abort_cmd_wait_q);
+ 
+-	spin_lock_init(&instance->crashdump_lock);
++	mutex_init(&instance->crashdump_lock);
+ 	spin_lock_init(&instance->mfi_pool_lock);
+ 	spin_lock_init(&instance->hba_lock);
+ 	spin_lock_init(&instance->stream_lock);
+diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c
+index 4995e1ef4e0e5..6ebd1336a8991 100644
+--- a/drivers/scsi/pm8001/pm8001_init.c
++++ b/drivers/scsi/pm8001/pm8001_init.c
+@@ -275,7 +275,6 @@ static irqreturn_t pm8001_interrupt_handler_intx(int irq, void *dev_id)
+ 	return ret;
+ }
+ 
+-static u32 pm8001_setup_irq(struct pm8001_hba_info *pm8001_ha);
+ static u32 pm8001_request_irq(struct pm8001_hba_info *pm8001_ha);
+ 
+ /**
+@@ -296,13 +295,6 @@ static int pm8001_alloc(struct pm8001_hba_info *pm8001_ha,
+ 	pm8001_dbg(pm8001_ha, INIT, "pm8001_alloc: PHY:%x\n",
+ 		   pm8001_ha->chip->n_phy);
+ 
+-	/* Setup Interrupt */
+-	rc = pm8001_setup_irq(pm8001_ha);
+-	if (rc) {
+-		pm8001_dbg(pm8001_ha, FAIL,
+-			   "pm8001_setup_irq failed [ret: %d]\n", rc);
+-		goto err_out;
+-	}
+ 	/* Request Interrupt */
+ 	rc = pm8001_request_irq(pm8001_ha);
+ 	if (rc)
+@@ -1034,47 +1026,38 @@ static u32 pm8001_request_msix(struct pm8001_hba_info *pm8001_ha)
+ }
+ #endif
+ 
+-static u32 pm8001_setup_irq(struct pm8001_hba_info *pm8001_ha)
+-{
+-	struct pci_dev *pdev;
+-
+-	pdev = pm8001_ha->pdev;
+-
+-#ifdef PM8001_USE_MSIX
+-	if (pci_find_capability(pdev, PCI_CAP_ID_MSIX))
+-		return pm8001_setup_msix(pm8001_ha);
+-	pm8001_dbg(pm8001_ha, INIT, "MSIX not supported!!!\n");
+-#endif
+-	return 0;
+-}
+-
+ /**
+  * pm8001_request_irq - register interrupt
+  * @pm8001_ha: our ha struct.
+  */
+ static u32 pm8001_request_irq(struct pm8001_hba_info *pm8001_ha)
+ {
+-	struct pci_dev *pdev;
++	struct pci_dev *pdev = pm8001_ha->pdev;
++#ifdef PM8001_USE_MSIX
+ 	int rc;
+ 
+-	pdev = pm8001_ha->pdev;
++	if (pci_find_capability(pdev, PCI_CAP_ID_MSIX)) {
++		rc = pm8001_setup_msix(pm8001_ha);
++		if (rc) {
++			pm8001_dbg(pm8001_ha, FAIL,
++				   "pm8001_setup_irq failed [ret: %d]\n", rc);
++			return rc;
++		}
+ 
+-#ifdef PM8001_USE_MSIX
+-	if (pdev->msix_cap && pci_msi_enabled())
+-		return pm8001_request_msix(pm8001_ha);
+-	else {
+-		pm8001_dbg(pm8001_ha, INIT, "MSIX not supported!!!\n");
+-		goto intx;
++		if (pdev->msix_cap && pci_msi_enabled())
++			return pm8001_request_msix(pm8001_ha);
+ 	}
++
++	pm8001_dbg(pm8001_ha, INIT, "MSIX not supported!!!\n");
+ #endif
+ 
+-intx:
+ 	/* initialize the INT-X interrupt */
+ 	pm8001_ha->irq_vector[0].irq_id = 0;
+ 	pm8001_ha->irq_vector[0].drv_inst = pm8001_ha;
+-	rc = request_irq(pdev->irq, pm8001_interrupt_handler_intx, IRQF_SHARED,
+-		pm8001_ha->name, SHOST_TO_SAS_HA(pm8001_ha->shost));
+-	return rc;
++
++	return request_irq(pdev->irq, pm8001_interrupt_handler_intx,
++			   IRQF_SHARED, pm8001_ha->name,
++			   SHOST_TO_SAS_HA(pm8001_ha->shost));
+ }
+ 
+ /**
+diff --git a/drivers/scsi/qla2xxx/qla_dfs.c b/drivers/scsi/qla2xxx/qla_dfs.c
+index f060e593685de..a7a364760b800 100644
+--- a/drivers/scsi/qla2xxx/qla_dfs.c
++++ b/drivers/scsi/qla2xxx/qla_dfs.c
+@@ -116,7 +116,7 @@ qla2x00_dfs_create_rport(scsi_qla_host_t *vha, struct fc_port *fp)
+ 
+ 	sprintf(wwn, "pn-%016llx", wwn_to_u64(fp->port_name));
+ 	fp->dfs_rport_dir = debugfs_create_dir(wwn, vha->dfs_rport_root);
+-	if (!fp->dfs_rport_dir)
++	if (IS_ERR(fp->dfs_rport_dir))
+ 		return;
+ 	if (NVME_TARGET(vha->hw, fp))
+ 		debugfs_create_file("dev_loss_tmo", 0600, fp->dfs_rport_dir,
+@@ -708,14 +708,14 @@ create_nodes:
+ 	if (IS_QLA27XX(ha) || IS_QLA83XX(ha) || IS_QLA28XX(ha)) {
+ 		ha->tgt.dfs_naqp = debugfs_create_file("naqp",
+ 		    0400, ha->dfs_dir, vha, &dfs_naqp_ops);
+-		if (!ha->tgt.dfs_naqp) {
++		if (IS_ERR(ha->tgt.dfs_naqp)) {
+ 			ql_log(ql_log_warn, vha, 0xd011,
+ 			       "Unable to create debugFS naqp node.\n");
+ 			goto out;
+ 		}
+ 	}
+ 	vha->dfs_rport_root = debugfs_create_dir("rports", ha->dfs_dir);
+-	if (!vha->dfs_rport_root) {
++	if (IS_ERR(vha->dfs_rport_root)) {
+ 		ql_log(ql_log_warn, vha, 0xd012,
+ 		       "Unable to create debugFS rports node.\n");
+ 		goto out;
+diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
+index 0556969f6dc1b..a4a56ab0ba747 100644
+--- a/drivers/scsi/qla2xxx/qla_inline.h
++++ b/drivers/scsi/qla2xxx/qla_inline.h
+@@ -577,7 +577,7 @@ fcport_is_bigger(fc_port_t *fcport)
+ static inline struct qla_qpair *
+ qla_mapq_nvme_select_qpair(struct qla_hw_data *ha, struct qla_qpair *qpair)
+ {
+-	int cpuid = smp_processor_id();
++	int cpuid = raw_smp_processor_id();
+ 
+ 	if (qpair->cpuid != cpuid &&
+ 	    ha->qp_cpu_map[cpuid]) {
+diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
+index 1f42a413b5988..a7321e02c6412 100644
+--- a/drivers/scsi/qla2xxx/qla_isr.c
++++ b/drivers/scsi/qla2xxx/qla_isr.c
+@@ -3817,7 +3817,7 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,
+ 	if (!ha->flags.fw_started)
+ 		return;
+ 
+-	if (rsp->qpair->cpuid != smp_processor_id() || !rsp->qpair->rcv_intr) {
++	if (rsp->qpair->cpuid != raw_smp_processor_id() || !rsp->qpair->rcv_intr) {
+ 		rsp->qpair->rcv_intr = 1;
+ 
+ 		if (!rsp->qpair->cpu_mapped)
+@@ -4308,7 +4308,7 @@ qla2xxx_msix_rsp_q(int irq, void *dev_id)
+ 	}
+ 	ha = qpair->hw;
+ 
+-	queue_work_on(smp_processor_id(), ha->wq, &qpair->q_work);
++	queue_work(ha->wq, &qpair->q_work);
+ 
+ 	return IRQ_HANDLED;
+ }
+@@ -4334,7 +4334,7 @@ qla2xxx_msix_rsp_q_hs(int irq, void *dev_id)
+ 	wrt_reg_dword(&reg->hccr, HCCRX_CLR_RISC_INT);
+ 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ 
+-	queue_work_on(smp_processor_id(), ha->wq, &qpair->q_work);
++	queue_work(ha->wq, &qpair->q_work);
+ 
+ 	return IRQ_HANDLED;
+ }
+diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
+index 2b815a9928ea3..2ef2dbac0db27 100644
+--- a/drivers/scsi/qla2xxx/qla_target.c
++++ b/drivers/scsi/qla2xxx/qla_target.c
+@@ -4425,8 +4425,7 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
+ 		queue_work_on(cmd->se_cmd.cpuid, qla_tgt_wq, &cmd->work);
+ 	} else if (ha->msix_count) {
+ 		if (cmd->atio.u.isp24.fcp_cmnd.rddata)
+-			queue_work_on(smp_processor_id(), qla_tgt_wq,
+-			    &cmd->work);
++			queue_work(qla_tgt_wq, &cmd->work);
+ 		else
+ 			queue_work_on(cmd->se_cmd.cpuid, qla_tgt_wq,
+ 			    &cmd->work);
+diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+index 3b5ba4b47b3b2..68a0e6a2fb6e9 100644
+--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
++++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+@@ -310,7 +310,7 @@ static void tcm_qla2xxx_free_cmd(struct qla_tgt_cmd *cmd)
+ 	cmd->trc_flags |= TRC_CMD_DONE;
+ 
+ 	INIT_WORK(&cmd->work, tcm_qla2xxx_complete_free);
+-	queue_work_on(smp_processor_id(), tcm_qla2xxx_free_wq, &cmd->work);
++	queue_work(tcm_qla2xxx_free_wq, &cmd->work);
+ }
+ 
+ /*
+@@ -547,7 +547,7 @@ static void tcm_qla2xxx_handle_data(struct qla_tgt_cmd *cmd)
+ 	cmd->trc_flags |= TRC_DATA_IN;
+ 	cmd->cmd_in_wq = 1;
+ 	INIT_WORK(&cmd->work, tcm_qla2xxx_handle_data_work);
+-	queue_work_on(smp_processor_id(), tcm_qla2xxx_free_wq, &cmd->work);
++	queue_work(tcm_qla2xxx_free_wq, &cmd->work);
+ }
+ 
+ static int tcm_qla2xxx_chk_dif_tags(uint32_t tag)
+diff --git a/drivers/spi/spi-sun6i.c b/drivers/spi/spi-sun6i.c
+index 30d541612253e..cec2747235abf 100644
+--- a/drivers/spi/spi-sun6i.c
++++ b/drivers/spi/spi-sun6i.c
+@@ -83,6 +83,9 @@
+ #define SUN6I_XMIT_CNT_REG		0x34
+ 
+ #define SUN6I_BURST_CTL_CNT_REG		0x38
++#define SUN6I_BURST_CTL_CNT_STC_MASK		GENMASK(23, 0)
++#define SUN6I_BURST_CTL_CNT_DRM			BIT(28)
++#define SUN6I_BURST_CTL_CNT_QUAD_EN		BIT(29)
+ 
+ #define SUN6I_TXDATA_REG		0x200
+ #define SUN6I_RXDATA_REG		0x300
+@@ -90,6 +93,7 @@
+ struct sun6i_spi_cfg {
+ 	unsigned long		fifo_depth;
+ 	bool			has_clk_ctl;
++	u32			mode_bits;
+ };
+ 
+ struct sun6i_spi {
+@@ -266,7 +270,7 @@ static int sun6i_spi_transfer_one(struct spi_master *master,
+ 	unsigned int div, div_cdr1, div_cdr2, timeout;
+ 	unsigned int start, end, tx_time;
+ 	unsigned int trig_level;
+-	unsigned int tx_len = 0, rx_len = 0;
++	unsigned int tx_len = 0, rx_len = 0, nbits = 0;
+ 	bool use_dma;
+ 	int ret = 0;
+ 	u32 reg;
+@@ -418,13 +422,29 @@ static int sun6i_spi_transfer_one(struct spi_master *master,
+ 	sun6i_spi_write(sspi, SUN6I_GBL_CTL_REG, reg);
+ 
+ 	/* Setup the transfer now... */
+-	if (sspi->tx_buf)
++	if (sspi->tx_buf) {
+ 		tx_len = tfr->len;
++		nbits = tfr->tx_nbits;
++	} else if (tfr->rx_buf) {
++		nbits = tfr->rx_nbits;
++	}
++
++	switch (nbits) {
++	case SPI_NBITS_DUAL:
++		reg = SUN6I_BURST_CTL_CNT_DRM;
++		break;
++	case SPI_NBITS_QUAD:
++		reg = SUN6I_BURST_CTL_CNT_QUAD_EN;
++		break;
++	case SPI_NBITS_SINGLE:
++	default:
++		reg = FIELD_PREP(SUN6I_BURST_CTL_CNT_STC_MASK, tx_len);
++	}
+ 
+ 	/* Setup the counters */
++	sun6i_spi_write(sspi, SUN6I_BURST_CTL_CNT_REG, reg);
+ 	sun6i_spi_write(sspi, SUN6I_BURST_CNT_REG, tfr->len);
+ 	sun6i_spi_write(sspi, SUN6I_XMIT_CNT_REG, tx_len);
+-	sun6i_spi_write(sspi, SUN6I_BURST_CTL_CNT_REG, tx_len);
+ 
+ 	if (!use_dma) {
+ 		/* Fill the TX FIFO */
+@@ -623,7 +643,8 @@ static int sun6i_spi_probe(struct platform_device *pdev)
+ 	master->set_cs = sun6i_spi_set_cs;
+ 	master->transfer_one = sun6i_spi_transfer_one;
+ 	master->num_chipselect = 4;
+-	master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LSB_FIRST;
++	master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LSB_FIRST |
++			    sspi->cfg->mode_bits;
+ 	master->bits_per_word_mask = SPI_BPW_MASK(8);
+ 	master->dev.of_node = pdev->dev.of_node;
+ 	master->auto_runtime_pm = true;
+diff --git a/drivers/staging/wlan-ng/prism2usb.c b/drivers/staging/wlan-ng/prism2usb.c
+index 80e36d03c4e25..0e0ccef4871e9 100644
+--- a/drivers/staging/wlan-ng/prism2usb.c
++++ b/drivers/staging/wlan-ng/prism2usb.c
+@@ -11,45 +11,45 @@
+ 
+ static const struct usb_device_id usb_prism_tbl[] = {
+ 	PRISM_DEV(0x04bb, 0x0922, "IOData AirPort WN-B11/USBS"),
+-	PRISM_DEV(0x07aa, 0x0012, "Corega Wireless LAN USB Stick-11"),
+-	PRISM_DEV(0x09aa, 0x3642, "Prism2.x 11Mbps WLAN USB Adapter"),
+-	PRISM_DEV(0x1668, 0x0408, "Actiontec Prism2.5 11Mbps WLAN USB Adapter"),
+-	PRISM_DEV(0x1668, 0x0421, "Actiontec Prism2.5 11Mbps WLAN USB Adapter"),
+-	PRISM_DEV(0x1915, 0x2236, "Linksys WUSB11v3.0 11Mbps WLAN USB Adapter"),
+-	PRISM_DEV(0x066b, 0x2212, "Linksys WUSB11v2.5 11Mbps WLAN USB Adapter"),
+-	PRISM_DEV(0x066b, 0x2213, "Linksys WUSB12v1.1 11Mbps WLAN USB Adapter"),
++	PRISM_DEV(0x07aa, 0x0012, "Corega USB Wireless LAN Stick-11"),
++	PRISM_DEV(0x09aa, 0x3642, "Prism2.x 11Mbps USB WLAN Adapter"),
++	PRISM_DEV(0x1668, 0x0408, "Actiontec Prism2.5 11Mbps USB WLAN Adapter"),
++	PRISM_DEV(0x1668, 0x0421, "Actiontec Prism2.5 11Mbps USB WLAN Adapter"),
++	PRISM_DEV(0x1915, 0x2236, "Linksys WUSB11v3.0 11Mbps USB WLAN Adapter"),
++	PRISM_DEV(0x066b, 0x2212, "Linksys WUSB11v2.5 11Mbps USB WLAN Adapter"),
++	PRISM_DEV(0x066b, 0x2213, "Linksys WUSB12v1.1 11Mbps USB WLAN Adapter"),
+ 	PRISM_DEV(0x0411, 0x0016, "Melco WLI-USB-S11 11Mbps WLAN Adapter"),
+-	PRISM_DEV(0x08de, 0x7a01, "PRISM25 IEEE 802.11 Mini USB Adapter"),
+-	PRISM_DEV(0x8086, 0x1111, "Intel PRO/Wireless 2011B LAN USB Adapter"),
++	PRISM_DEV(0x08de, 0x7a01, "PRISM25 USB IEEE 802.11 Mini Adapter"),
++	PRISM_DEV(0x8086, 0x1111, "Intel PRO/Wireless 2011B USB LAN Adapter"),
+ 	PRISM_DEV(0x0d8e, 0x7a01, "PRISM25 IEEE 802.11 Mini USB Adapter"),
+-	PRISM_DEV(0x045e, 0x006e, "Microsoft MN510 Wireless USB Adapter"),
++	PRISM_DEV(0x045e, 0x006e, "Microsoft MN510 USB Wireless Adapter"),
+ 	PRISM_DEV(0x0967, 0x0204, "Acer Warplink USB Adapter"),
+ 	PRISM_DEV(0x0cde, 0x0002, "Z-Com 725/726 Prism2.5 USB/USB Integrated"),
+-	PRISM_DEV(0x0cde, 0x0005, "Z-Com Xl735 Wireless 802.11b USB Adapter"),
+-	PRISM_DEV(0x413c, 0x8100, "Dell TrueMobile 1180 Wireless USB Adapter"),
+-	PRISM_DEV(0x0b3b, 0x1601, "ALLNET 0193 11Mbps WLAN USB Adapter"),
+-	PRISM_DEV(0x0b3b, 0x1602, "ZyXEL ZyAIR B200 Wireless USB Adapter"),
+-	PRISM_DEV(0x0baf, 0x00eb, "USRobotics USR1120 Wireless USB Adapter"),
++	PRISM_DEV(0x0cde, 0x0005, "Z-Com Xl735 USB Wireless 802.11b Adapter"),
++	PRISM_DEV(0x413c, 0x8100, "Dell TrueMobile 1180 USB Wireless Adapter"),
++	PRISM_DEV(0x0b3b, 0x1601, "ALLNET 0193 11Mbps USB WLAN Adapter"),
++	PRISM_DEV(0x0b3b, 0x1602, "ZyXEL ZyAIR B200 USB Wireless Adapter"),
++	PRISM_DEV(0x0baf, 0x00eb, "USRobotics USR1120 USB Wireless Adapter"),
+ 	PRISM_DEV(0x0411, 0x0027, "Melco WLI-USB-KS11G 11Mbps WLAN Adapter"),
+ 	PRISM_DEV(0x04f1, 0x3009, "JVC MP-XP7250 Builtin USB WLAN Adapter"),
+ 	PRISM_DEV(0x0846, 0x4110, "NetGear MA111"),
+ 	PRISM_DEV(0x03f3, 0x0020, "Adaptec AWN-8020 USB WLAN Adapter"),
+-	PRISM_DEV(0x2821, 0x3300, "ASUS-WL140 / Hawking HighDB Wireless USB Adapter"),
+-	PRISM_DEV(0x2001, 0x3700, "DWL-122 Wireless USB Adapter"),
+-	PRISM_DEV(0x2001, 0x3702, "DWL-120 Rev F Wireless USB Adapter"),
++	PRISM_DEV(0x2821, 0x3300, "ASUS-WL140 / Hawking HighDB USB Wireless Adapter"),
++	PRISM_DEV(0x2001, 0x3700, "DWL-122 USB Wireless Adapter"),
++	PRISM_DEV(0x2001, 0x3702, "DWL-120 Rev F USB Wireless Adapter"),
+ 	PRISM_DEV(0x50c2, 0x4013, "Averatec USB WLAN Adapter"),
+-	PRISM_DEV(0x2c02, 0x14ea, "Planex GW-US11H WLAN USB Adapter"),
+-	PRISM_DEV(0x124a, 0x168b, "Airvast PRISM3 WLAN USB Adapter"),
++	PRISM_DEV(0x2c02, 0x14ea, "Planex GW-US11H USB WLAN Adapter"),
++	PRISM_DEV(0x124a, 0x168b, "Airvast PRISM3 USB WLAN Adapter"),
+ 	PRISM_DEV(0x083a, 0x3503, "T-Sinus 111 USB WLAN Adapter"),
+ 	PRISM_DEV(0x0411, 0x0044, "Melco WLI-USB-KB11 11Mbps WLAN Adapter"),
+-	PRISM_DEV(0x1668, 0x6106, "ROPEX FreeLan 802.11b USB Adapter"),
+-	PRISM_DEV(0x124a, 0x4017, "Pheenet WL-503IA 802.11b USB Adapter"),
++	PRISM_DEV(0x1668, 0x6106, "ROPEX FreeLan USB 802.11b Adapter"),
++	PRISM_DEV(0x124a, 0x4017, "Pheenet WL-503IA USB 802.11b Adapter"),
+ 	PRISM_DEV(0x0bb2, 0x0302, "Ambit Microsystems Corp."),
+-	PRISM_DEV(0x9016, 0x182d, "Sitecom WL-022 802.11b USB Adapter"),
++	PRISM_DEV(0x9016, 0x182d, "Sitecom WL-022 USB 802.11b Adapter"),
+ 	PRISM_DEV(0x0543, 0x0f01,
+ 		  "ViewSonic Airsync USB Adapter 11Mbps (Prism2.5)"),
+ 	PRISM_DEV(0x067c, 0x1022,
+-		  "Siemens SpeedStream 1022 11Mbps WLAN USB Adapter"),
++		  "Siemens SpeedStream 1022 11Mbps USB WLAN Adapter"),
+ 	PRISM_DEV(0x049f, 0x0033,
+ 		  "Compaq/Intel W100 PRO/Wireless 11Mbps multiport WLAN Adapter"),
+ 	{ } /* terminator */
+diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c
+index 5d0f51822414e..c142a67dc7cc2 100644
+--- a/drivers/target/iscsi/iscsi_target_configfs.c
++++ b/drivers/target/iscsi/iscsi_target_configfs.c
+@@ -533,102 +533,102 @@ static ssize_t lio_target_nacl_info_show(struct config_item *item, char *page)
+ 	spin_lock_bh(&se_nacl->nacl_sess_lock);
+ 	se_sess = se_nacl->nacl_sess;
+ 	if (!se_sess) {
+-		rb += sprintf(page+rb, "No active iSCSI Session for Initiator"
++		rb += sysfs_emit_at(page, rb, "No active iSCSI Session for Initiator"
+ 			" Endpoint: %s\n", se_nacl->initiatorname);
+ 	} else {
+ 		sess = se_sess->fabric_sess_ptr;
+ 
+-		rb += sprintf(page+rb, "InitiatorName: %s\n",
++		rb += sysfs_emit_at(page, rb, "InitiatorName: %s\n",
+ 			sess->sess_ops->InitiatorName);
+-		rb += sprintf(page+rb, "InitiatorAlias: %s\n",
++		rb += sysfs_emit_at(page, rb, "InitiatorAlias: %s\n",
+ 			sess->sess_ops->InitiatorAlias);
+ 
+-		rb += sprintf(page+rb,
++		rb += sysfs_emit_at(page, rb,
+ 			      "LIO Session ID: %u   ISID: 0x%6ph  TSIH: %hu  ",
+ 			      sess->sid, sess->isid, sess->tsih);
+-		rb += sprintf(page+rb, "SessionType: %s\n",
++		rb += sysfs_emit_at(page, rb, "SessionType: %s\n",
+ 				(sess->sess_ops->SessionType) ?
+ 				"Discovery" : "Normal");
+-		rb += sprintf(page+rb, "Session State: ");
++		rb += sysfs_emit_at(page, rb, "Session State: ");
+ 		switch (sess->session_state) {
+ 		case TARG_SESS_STATE_FREE:
+-			rb += sprintf(page+rb, "TARG_SESS_FREE\n");
++			rb += sysfs_emit_at(page, rb, "TARG_SESS_FREE\n");
+ 			break;
+ 		case TARG_SESS_STATE_ACTIVE:
+-			rb += sprintf(page+rb, "TARG_SESS_STATE_ACTIVE\n");
++			rb += sysfs_emit_at(page, rb, "TARG_SESS_STATE_ACTIVE\n");
+ 			break;
+ 		case TARG_SESS_STATE_LOGGED_IN:
+-			rb += sprintf(page+rb, "TARG_SESS_STATE_LOGGED_IN\n");
++			rb += sysfs_emit_at(page, rb, "TARG_SESS_STATE_LOGGED_IN\n");
+ 			break;
+ 		case TARG_SESS_STATE_FAILED:
+-			rb += sprintf(page+rb, "TARG_SESS_STATE_FAILED\n");
++			rb += sysfs_emit_at(page, rb, "TARG_SESS_STATE_FAILED\n");
+ 			break;
+ 		case TARG_SESS_STATE_IN_CONTINUE:
+-			rb += sprintf(page+rb, "TARG_SESS_STATE_IN_CONTINUE\n");
++			rb += sysfs_emit_at(page, rb, "TARG_SESS_STATE_IN_CONTINUE\n");
+ 			break;
+ 		default:
+-			rb += sprintf(page+rb, "ERROR: Unknown Session"
++			rb += sysfs_emit_at(page, rb, "ERROR: Unknown Session"
+ 					" State!\n");
+ 			break;
+ 		}
+ 
+-		rb += sprintf(page+rb, "---------------------[iSCSI Session"
++		rb += sysfs_emit_at(page, rb, "---------------------[iSCSI Session"
+ 				" Values]-----------------------\n");
+-		rb += sprintf(page+rb, "  CmdSN/WR  :  CmdSN/WC  :  ExpCmdSN"
++		rb += sysfs_emit_at(page, rb, "  CmdSN/WR  :  CmdSN/WC  :  ExpCmdSN"
+ 				"  :  MaxCmdSN  :     ITT    :     TTT\n");
+ 		max_cmd_sn = (u32) atomic_read(&sess->max_cmd_sn);
+-		rb += sprintf(page+rb, " 0x%08x   0x%08x   0x%08x   0x%08x"
++		rb += sysfs_emit_at(page, rb, " 0x%08x   0x%08x   0x%08x   0x%08x"
+ 				"   0x%08x   0x%08x\n",
+ 			sess->cmdsn_window,
+ 			(max_cmd_sn - sess->exp_cmd_sn) + 1,
+ 			sess->exp_cmd_sn, max_cmd_sn,
+ 			sess->init_task_tag, sess->targ_xfer_tag);
+-		rb += sprintf(page+rb, "----------------------[iSCSI"
++		rb += sysfs_emit_at(page, rb, "----------------------[iSCSI"
+ 				" Connections]-------------------------\n");
+ 
+ 		spin_lock(&sess->conn_lock);
+ 		list_for_each_entry(conn, &sess->sess_conn_list, conn_list) {
+-			rb += sprintf(page+rb, "CID: %hu  Connection"
++			rb += sysfs_emit_at(page, rb, "CID: %hu  Connection"
+ 					" State: ", conn->cid);
+ 			switch (conn->conn_state) {
+ 			case TARG_CONN_STATE_FREE:
+-				rb += sprintf(page+rb,
++				rb += sysfs_emit_at(page, rb,
+ 					"TARG_CONN_STATE_FREE\n");
+ 				break;
+ 			case TARG_CONN_STATE_XPT_UP:
+-				rb += sprintf(page+rb,
++				rb += sysfs_emit_at(page, rb,
+ 					"TARG_CONN_STATE_XPT_UP\n");
+ 				break;
+ 			case TARG_CONN_STATE_IN_LOGIN:
+-				rb += sprintf(page+rb,
++				rb += sysfs_emit_at(page, rb,
+ 					"TARG_CONN_STATE_IN_LOGIN\n");
+ 				break;
+ 			case TARG_CONN_STATE_LOGGED_IN:
+-				rb += sprintf(page+rb,
++				rb += sysfs_emit_at(page, rb,
+ 					"TARG_CONN_STATE_LOGGED_IN\n");
+ 				break;
+ 			case TARG_CONN_STATE_IN_LOGOUT:
+-				rb += sprintf(page+rb,
++				rb += sysfs_emit_at(page, rb,
+ 					"TARG_CONN_STATE_IN_LOGOUT\n");
+ 				break;
+ 			case TARG_CONN_STATE_LOGOUT_REQUESTED:
+-				rb += sprintf(page+rb,
++				rb += sysfs_emit_at(page, rb,
+ 					"TARG_CONN_STATE_LOGOUT_REQUESTED\n");
+ 				break;
+ 			case TARG_CONN_STATE_CLEANUP_WAIT:
+-				rb += sprintf(page+rb,
++				rb += sysfs_emit_at(page, rb,
+ 					"TARG_CONN_STATE_CLEANUP_WAIT\n");
+ 				break;
+ 			default:
+-				rb += sprintf(page+rb,
++				rb += sysfs_emit_at(page, rb,
+ 					"ERROR: Unknown Connection State!\n");
+ 				break;
+ 			}
+ 
+-			rb += sprintf(page+rb, "   Address %pISc %s", &conn->login_sockaddr,
++			rb += sysfs_emit_at(page, rb, "   Address %pISc %s", &conn->login_sockaddr,
+ 				(conn->network_transport == ISCSI_TCP) ?
+ 				"TCP" : "SCTP");
+-			rb += sprintf(page+rb, "  StatSN: 0x%08x\n",
++			rb += sysfs_emit_at(page, rb, "  StatSN: 0x%08x\n",
+ 				conn->stat_sn);
+ 		}
+ 		spin_unlock(&sess->conn_lock);
+diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
+index 687adc9e086ca..0686882bcbda3 100644
+--- a/drivers/target/target_core_transport.c
++++ b/drivers/target/target_core_transport.c
+@@ -264,6 +264,7 @@ void target_free_cmd_counter(struct target_cmd_counter *cmd_cnt)
+ 		percpu_ref_put(&cmd_cnt->refcnt);
+ 
+ 	percpu_ref_exit(&cmd_cnt->refcnt);
++	kfree(cmd_cnt);
+ }
+ EXPORT_SYMBOL_GPL(target_free_cmd_counter);
+ 
+diff --git a/drivers/tty/serial/cpm_uart/cpm_uart_core.c b/drivers/tty/serial/cpm_uart/cpm_uart_core.c
+index 66afa9bea6bfe..71366a4cea22c 100644
+--- a/drivers/tty/serial/cpm_uart/cpm_uart_core.c
++++ b/drivers/tty/serial/cpm_uart/cpm_uart_core.c
+@@ -1255,19 +1255,14 @@ static void cpm_uart_console_write(struct console *co, const char *s,
+ {
+ 	struct uart_cpm_port *pinfo = &cpm_uart_ports[co->index];
+ 	unsigned long flags;
+-	int nolock = oops_in_progress;
+ 
+-	if (unlikely(nolock)) {
++	if (unlikely(oops_in_progress)) {
+ 		local_irq_save(flags);
+-	} else {
+-		spin_lock_irqsave(&pinfo->port.lock, flags);
+-	}
+-
+-	cpm_uart_early_write(pinfo, s, count, true);
+-
+-	if (unlikely(nolock)) {
++		cpm_uart_early_write(pinfo, s, count, true);
+ 		local_irq_restore(flags);
+ 	} else {
++		spin_lock_irqsave(&pinfo->port.lock, flags);
++		cpm_uart_early_write(pinfo, s, count, true);
+ 		spin_unlock_irqrestore(&pinfo->port.lock, flags);
+ 	}
+ }
+diff --git a/drivers/usb/cdns3/cdns3-plat.c b/drivers/usb/cdns3/cdns3-plat.c
+index 884e2301237f4..1168dbeed2ce0 100644
+--- a/drivers/usb/cdns3/cdns3-plat.c
++++ b/drivers/usb/cdns3/cdns3-plat.c
+@@ -255,9 +255,10 @@ static int cdns3_controller_resume(struct device *dev, pm_message_t msg)
+ 	cdns3_set_platform_suspend(cdns->dev, false, false);
+ 
+ 	spin_lock_irqsave(&cdns->lock, flags);
+-	cdns_resume(cdns, !PMSG_IS_AUTO(msg));
++	cdns_resume(cdns);
+ 	cdns->in_lpm = false;
+ 	spin_unlock_irqrestore(&cdns->lock, flags);
++	cdns_set_active(cdns, !PMSG_IS_AUTO(msg));
+ 	if (cdns->wakeup_pending) {
+ 		cdns->wakeup_pending = false;
+ 		enable_irq(cdns->wakeup_irq);
+diff --git a/drivers/usb/cdns3/cdnsp-pci.c b/drivers/usb/cdns3/cdnsp-pci.c
+index 7b151f5af3ccb..0725668ffea4c 100644
+--- a/drivers/usb/cdns3/cdnsp-pci.c
++++ b/drivers/usb/cdns3/cdnsp-pci.c
+@@ -208,8 +208,9 @@ static int __maybe_unused cdnsp_pci_resume(struct device *dev)
+ 	int ret;
+ 
+ 	spin_lock_irqsave(&cdns->lock, flags);
+-	ret = cdns_resume(cdns, 1);
++	ret = cdns_resume(cdns);
+ 	spin_unlock_irqrestore(&cdns->lock, flags);
++	cdns_set_active(cdns, 1);
+ 
+ 	return ret;
+ }
+diff --git a/drivers/usb/cdns3/core.c b/drivers/usb/cdns3/core.c
+index dbcdf3b24b477..7b20d2d5c262e 100644
+--- a/drivers/usb/cdns3/core.c
++++ b/drivers/usb/cdns3/core.c
+@@ -522,9 +522,8 @@ int cdns_suspend(struct cdns *cdns)
+ }
+ EXPORT_SYMBOL_GPL(cdns_suspend);
+ 
+-int cdns_resume(struct cdns *cdns, u8 set_active)
++int cdns_resume(struct cdns *cdns)
+ {
+-	struct device *dev = cdns->dev;
+ 	enum usb_role real_role;
+ 	bool role_changed = false;
+ 	int ret = 0;
+@@ -556,15 +555,23 @@ int cdns_resume(struct cdns *cdns, u8 set_active)
+ 	if (cdns->roles[cdns->role]->resume)
+ 		cdns->roles[cdns->role]->resume(cdns, cdns_power_is_lost(cdns));
+ 
++	return 0;
++}
++EXPORT_SYMBOL_GPL(cdns_resume);
++
++void cdns_set_active(struct cdns *cdns, u8 set_active)
++{
++	struct device *dev = cdns->dev;
++
+ 	if (set_active) {
+ 		pm_runtime_disable(dev);
+ 		pm_runtime_set_active(dev);
+ 		pm_runtime_enable(dev);
+ 	}
+ 
+-	return 0;
++	return;
+ }
+-EXPORT_SYMBOL_GPL(cdns_resume);
++EXPORT_SYMBOL_GPL(cdns_set_active);
+ #endif /* CONFIG_PM_SLEEP */
+ 
+ MODULE_AUTHOR("Peter Chen <peter.chen@nxp.com>");
+diff --git a/drivers/usb/cdns3/core.h b/drivers/usb/cdns3/core.h
+index 2d332a788871e..4a4dbc2c15615 100644
+--- a/drivers/usb/cdns3/core.h
++++ b/drivers/usb/cdns3/core.h
+@@ -125,10 +125,13 @@ int cdns_init(struct cdns *cdns);
+ int cdns_remove(struct cdns *cdns);
+ 
+ #ifdef CONFIG_PM_SLEEP
+-int cdns_resume(struct cdns *cdns, u8 set_active);
++int cdns_resume(struct cdns *cdns);
+ int cdns_suspend(struct cdns *cdns);
++void cdns_set_active(struct cdns *cdns, u8 set_active);
+ #else /* CONFIG_PM_SLEEP */
+-static inline int cdns_resume(struct cdns *cdns, u8 set_active)
++static inline int cdns_resume(struct cdns *cdns)
++{ return 0; }
++static inline int cdns_set_active(struct cdns *cdns, u8 set_active)
+ { return 0; }
+ static inline int cdns_suspend(struct cdns *cdns)
+ { return 0; }
+diff --git a/drivers/usb/chipidea/ci.h b/drivers/usb/chipidea/ci.h
+index f210b7489fd5b..78cfbe621272c 100644
+--- a/drivers/usb/chipidea/ci.h
++++ b/drivers/usb/chipidea/ci.h
+@@ -257,6 +257,7 @@ struct ci_hdrc {
+ 	bool				id_event;
+ 	bool				b_sess_valid_event;
+ 	bool				imx28_write_fix;
++	bool				has_portsc_pec_bug;
+ 	bool				supports_runtime_pm;
+ 	bool				in_lpm;
+ 	bool				wakeup_int;
+diff --git a/drivers/usb/chipidea/ci_hdrc_imx.c b/drivers/usb/chipidea/ci_hdrc_imx.c
+index 873539f9a2c0a..a96d8af935382 100644
+--- a/drivers/usb/chipidea/ci_hdrc_imx.c
++++ b/drivers/usb/chipidea/ci_hdrc_imx.c
+@@ -67,11 +67,13 @@ static const struct ci_hdrc_imx_platform_flag imx7d_usb_data = {
+ 
+ static const struct ci_hdrc_imx_platform_flag imx7ulp_usb_data = {
+ 	.flags = CI_HDRC_SUPPORTS_RUNTIME_PM |
++		CI_HDRC_HAS_PORTSC_PEC_MISSED |
+ 		CI_HDRC_PMQOS,
+ };
+ 
+ static const struct ci_hdrc_imx_platform_flag imx8ulp_usb_data = {
+-	.flags = CI_HDRC_SUPPORTS_RUNTIME_PM,
++	.flags = CI_HDRC_SUPPORTS_RUNTIME_PM |
++		CI_HDRC_HAS_PORTSC_PEC_MISSED,
+ };
+ 
+ static const struct of_device_id ci_hdrc_imx_dt_ids[] = {
+diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c
+index 51994d655b821..500286a4576b5 100644
+--- a/drivers/usb/chipidea/core.c
++++ b/drivers/usb/chipidea/core.c
+@@ -1045,6 +1045,8 @@ static int ci_hdrc_probe(struct platform_device *pdev)
+ 		CI_HDRC_IMX28_WRITE_FIX);
+ 	ci->supports_runtime_pm = !!(ci->platdata->flags &
+ 		CI_HDRC_SUPPORTS_RUNTIME_PM);
++	ci->has_portsc_pec_bug = !!(ci->platdata->flags &
++		CI_HDRC_HAS_PORTSC_PEC_MISSED);
+ 	platform_set_drvdata(pdev, ci);
+ 
+ 	ret = hw_device_init(ci, base);
+diff --git a/drivers/usb/chipidea/host.c b/drivers/usb/chipidea/host.c
+index ebe7400243b12..08af26b762a2d 100644
+--- a/drivers/usb/chipidea/host.c
++++ b/drivers/usb/chipidea/host.c
+@@ -151,6 +151,7 @@ static int host_start(struct ci_hdrc *ci)
+ 	ehci->has_hostpc = ci->hw_bank.lpm;
+ 	ehci->has_tdi_phy_lpm = ci->hw_bank.lpm;
+ 	ehci->imx28_write_fix = ci->imx28_write_fix;
++	ehci->has_ci_pec_bug = ci->has_portsc_pec_bug;
+ 
+ 	priv = (struct ehci_ci_priv *)ehci->priv;
+ 	priv->reg_vbus = NULL;
+diff --git a/drivers/usb/gadget/udc/fsl_qe_udc.c b/drivers/usb/gadget/udc/fsl_qe_udc.c
+index 9c5dc1c1a68ea..4aae86b47edfc 100644
+--- a/drivers/usb/gadget/udc/fsl_qe_udc.c
++++ b/drivers/usb/gadget/udc/fsl_qe_udc.c
+@@ -1959,6 +1959,8 @@ static void ch9getstatus(struct qe_udc *udc, u8 request_type, u16 value,
+ 	} else if ((request_type & USB_RECIP_MASK) == USB_RECIP_ENDPOINT) {
+ 		/* Get endpoint status */
+ 		int pipe = index & USB_ENDPOINT_NUMBER_MASK;
++		if (pipe >= USB_MAX_ENDPOINTS)
++			goto stall;
+ 		struct qe_ep *target_ep = &udc->eps[pipe];
+ 		u16 usep;
+ 
+diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
+index a1930db0da1c3..802bfafb1012b 100644
+--- a/drivers/usb/host/ehci-hcd.c
++++ b/drivers/usb/host/ehci-hcd.c
+@@ -755,10 +755,14 @@ restart:
+ 
+ 	/* normal [4.15.1.2] or error [4.15.1.1] completion */
+ 	if (likely ((status & (STS_INT|STS_ERR)) != 0)) {
+-		if (likely ((status & STS_ERR) == 0))
++		if (likely ((status & STS_ERR) == 0)) {
+ 			INCR(ehci->stats.normal);
+-		else
++		} else {
++			/* Force to check port status */
++			if (ehci->has_ci_pec_bug)
++				status |= STS_PCD;
+ 			INCR(ehci->stats.error);
++		}
+ 		bh = 1;
+ 	}
+ 
+diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
+index efe30e3be22f7..1aee392e84927 100644
+--- a/drivers/usb/host/ehci-hub.c
++++ b/drivers/usb/host/ehci-hub.c
+@@ -674,7 +674,8 @@ ehci_hub_status_data (struct usb_hcd *hcd, char *buf)
+ 
+ 		if ((temp & mask) != 0 || test_bit(i, &ehci->port_c_suspend)
+ 				|| (ehci->reset_done[i] && time_after_eq(
+-					jiffies, ehci->reset_done[i]))) {
++					jiffies, ehci->reset_done[i]))
++				|| ehci_has_ci_pec_bug(ehci, temp)) {
+ 			if (i < 7)
+ 			    buf [0] |= 1 << (i + 1);
+ 			else
+@@ -875,6 +876,13 @@ int ehci_hub_control(
+ 		if (temp & PORT_PEC)
+ 			status |= USB_PORT_STAT_C_ENABLE << 16;
+ 
++		if (ehci_has_ci_pec_bug(ehci, temp)) {
++			status |= USB_PORT_STAT_C_ENABLE << 16;
++			ehci_info(ehci,
++				"PE is cleared by HW port:%d PORTSC:%08x\n",
++				wIndex + 1, temp);
++		}
++
+ 		if ((temp & PORT_OCC) && (!ignore_oc && !ehci->spurious_oc)){
+ 			status |= USB_PORT_STAT_C_OVERCURRENT << 16;
+ 
+diff --git a/drivers/usb/host/ehci.h b/drivers/usb/host/ehci.h
+index c5c7f87825493..1441e34007961 100644
+--- a/drivers/usb/host/ehci.h
++++ b/drivers/usb/host/ehci.h
+@@ -207,6 +207,7 @@ struct ehci_hcd {			/* one per controller */
+ 	unsigned		has_fsl_port_bug:1; /* FreeScale */
+ 	unsigned		has_fsl_hs_errata:1;	/* Freescale HS quirk */
+ 	unsigned		has_fsl_susp_errata:1;	/* NXP SUSP quirk */
++	unsigned		has_ci_pec_bug:1;	/* ChipIdea PEC bug */
+ 	unsigned		big_endian_mmio:1;
+ 	unsigned		big_endian_desc:1;
+ 	unsigned		big_endian_capbase:1;
+@@ -707,6 +708,15 @@ ehci_port_speed(struct ehci_hcd *ehci, unsigned int portsc)
+  */
+ #define ehci_has_fsl_susp_errata(e)	((e)->has_fsl_susp_errata)
+ 
++/*
++ * Some Freescale/NXP processors using ChipIdea IP have a bug in which
++ * disabling the port (PE is cleared) does not cause PEC to be asserted
++ * when frame babble is detected.
++ */
++#define ehci_has_ci_pec_bug(e, portsc) \
++	((e)->has_ci_pec_bug && ((e)->command & CMD_PSE) \
++	 && !(portsc & PORT_PEC) && !(portsc & PORT_PE))
++
+ /*
+  * While most USB host controllers implement their registers in
+  * little-endian format, a minority (celleb companion chip) implement
+diff --git a/drivers/usb/typec/mux/intel_pmc_mux.c b/drivers/usb/typec/mux/intel_pmc_mux.c
+index 5e8edf3881c0d..61a88f68b458c 100644
+--- a/drivers/usb/typec/mux/intel_pmc_mux.c
++++ b/drivers/usb/typec/mux/intel_pmc_mux.c
+@@ -117,6 +117,16 @@ enum {
+ 	  IOM_PORT_STATUS_DHPD_HPD_STATUS_SHIFT) &			\
+ 	 IOM_PORT_STATUS_DHPD_HPD_STATUS_ASSERT)
+ 
++/* IOM port status register */
++#define IOM_PORT_STATUS_REGS(_offset_, _size_)	((_offset_) | (_size_))
++#define IOM_PORT_STATUS_REGS_SZ_MASK		BIT(0)
++#define IOM_PORT_STATUS_REGS_SZ_4		0
++#define IOM_PORT_STATUS_REGS_SZ_8		1
++#define IOM_PORT_STATUS_REGS_OFFSET(_d_)				\
++	((_d_) & ~IOM_PORT_STATUS_REGS_SZ_MASK)
++#define IOM_PORT_STATUS_REGS_SIZE(_d_)					\
++	(4 << ((_d_) & IOM_PORT_STATUS_REGS_SZ_MASK))
++
+ struct pmc_usb;
+ 
+ struct pmc_usb_port {
+@@ -145,6 +155,7 @@ struct pmc_usb {
+ 	struct acpi_device *iom_adev;
+ 	void __iomem *iom_base;
+ 	u32 iom_port_status_offset;
++	u8 iom_port_status_size;
+ 
+ 	struct dentry *dentry;
+ };
+@@ -160,7 +171,7 @@ static void update_port_status(struct pmc_usb_port *port)
+ 
+ 	port->iom_status = readl(port->pmc->iom_base +
+ 				 port->pmc->iom_port_status_offset +
+-				 port_num * sizeof(u32));
++				 port_num * port->pmc->iom_port_status_size);
+ }
+ 
+ static int sbu_orientation(struct pmc_usb_port *port)
+@@ -589,13 +600,16 @@ err_unregister_switch:
+ /* IOM ACPI IDs and IOM_PORT_STATUS_OFFSET */
+ static const struct acpi_device_id iom_acpi_ids[] = {
+ 	/* TigerLake */
+-	{ "INTC1072", 0x560, },
++	{ "INTC1072", IOM_PORT_STATUS_REGS(0x560, IOM_PORT_STATUS_REGS_SZ_4) },
+ 
+ 	/* AlderLake */
+-	{ "INTC1079", 0x160, },
++	{ "INTC1079", IOM_PORT_STATUS_REGS(0x160, IOM_PORT_STATUS_REGS_SZ_4) },
+ 
+ 	/* Meteor Lake */
+-	{ "INTC107A", 0x160, },
++	{ "INTC107A", IOM_PORT_STATUS_REGS(0x160, IOM_PORT_STATUS_REGS_SZ_4) },
++
++	/* Lunar Lake */
++	{ "INTC10EA", IOM_PORT_STATUS_REGS(0x150, IOM_PORT_STATUS_REGS_SZ_8) },
+ 	{}
+ };
+ 
+@@ -615,7 +629,8 @@ static int pmc_usb_probe_iom(struct pmc_usb *pmc)
+ 	if (!adev)
+ 		return -ENODEV;
+ 
+-	pmc->iom_port_status_offset = (u32)dev_id->driver_data;
++	pmc->iom_port_status_offset = IOM_PORT_STATUS_REGS_OFFSET(dev_id->driver_data);
++	pmc->iom_port_status_size = IOM_PORT_STATUS_REGS_SIZE(dev_id->driver_data);
+ 
+ 	INIT_LIST_HEAD(&resource_list);
+ 	ret = acpi_dev_get_memory_resources(adev, &resource_list);
+diff --git a/drivers/usb/typec/tcpm/Kconfig b/drivers/usb/typec/tcpm/Kconfig
+index 5d393f520fc2f..0b2993fef564b 100644
+--- a/drivers/usb/typec/tcpm/Kconfig
++++ b/drivers/usb/typec/tcpm/Kconfig
+@@ -79,6 +79,7 @@ config TYPEC_WCOVE
+ config TYPEC_QCOM_PMIC
+ 	tristate "Qualcomm PMIC USB Type-C Port Controller Manager driver"
+ 	depends on ARCH_QCOM || COMPILE_TEST
++	depends on DRM || DRM=n
+ 	help
+ 	  A Type-C port and Power Delivery driver which aggregates two
+ 	  discrete pieces of silicon in the PM8150b PMIC block: the
+diff --git a/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec.c b/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec.c
+index 9b467a346114e..273b4811b4ac8 100644
+--- a/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec.c
++++ b/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec.c
+@@ -17,6 +17,9 @@
+ #include <linux/usb/role.h>
+ #include <linux/usb/tcpm.h>
+ #include <linux/usb/typec_mux.h>
++
++#include <drm/drm_bridge.h>
++
+ #include "qcom_pmic_typec_pdphy.h"
+ #include "qcom_pmic_typec_port.h"
+ 
+@@ -33,6 +36,7 @@ struct pmic_typec {
+ 	struct pmic_typec_port	*pmic_typec_port;
+ 	bool			vbus_enabled;
+ 	struct mutex		lock;		/* VBUS state serialization */
++	struct drm_bridge	bridge;
+ };
+ 
+ #define tcpc_to_tcpm(_tcpc_) container_of(_tcpc_, struct pmic_typec, tcpc)
+@@ -146,6 +150,35 @@ static int qcom_pmic_typec_init(struct tcpc_dev *tcpc)
+ 	return 0;
+ }
+ 
++#if IS_ENABLED(CONFIG_DRM)
++static int qcom_pmic_typec_attach(struct drm_bridge *bridge,
++				     enum drm_bridge_attach_flags flags)
++{
++	return flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR ? 0 : -EINVAL;
++}
++
++static const struct drm_bridge_funcs qcom_pmic_typec_bridge_funcs = {
++	.attach = qcom_pmic_typec_attach,
++};
++
++static int qcom_pmic_typec_init_drm(struct pmic_typec *tcpm)
++{
++	tcpm->bridge.funcs = &qcom_pmic_typec_bridge_funcs;
++#ifdef CONFIG_OF
++	tcpm->bridge.of_node = of_get_child_by_name(tcpm->dev->of_node, "connector");
++#endif
++	tcpm->bridge.ops = DRM_BRIDGE_OP_HPD;
++	tcpm->bridge.type = DRM_MODE_CONNECTOR_DisplayPort;
++
++	return devm_drm_bridge_add(tcpm->dev, &tcpm->bridge);
++}
++#else
++static int qcom_pmic_typec_init_drm(struct pmic_typec *tcpm)
++{
++	return 0;
++}
++#endif
++
+ static int qcom_pmic_typec_probe(struct platform_device *pdev)
+ {
+ 	struct pmic_typec *tcpm;
+@@ -208,6 +241,10 @@ static int qcom_pmic_typec_probe(struct platform_device *pdev)
+ 	mutex_init(&tcpm->lock);
+ 	platform_set_drvdata(pdev, tcpm);
+ 
++	ret = qcom_pmic_typec_init_drm(tcpm);
++	if (ret)
++		return ret;
++
+ 	tcpm->tcpc.fwnode = device_get_named_child_node(tcpm->dev, "connector");
+ 	if (!tcpm->tcpc.fwnode)
+ 		return -EINVAL;
+diff --git a/fs/attr.c b/fs/attr.c
+index d60dc1edb5268..87af68bb8ad21 100644
+--- a/fs/attr.c
++++ b/fs/attr.c
+@@ -394,9 +394,25 @@ int notify_change(struct mnt_idmap *idmap, struct dentry *dentry,
+ 		return error;
+ 
+ 	if ((ia_valid & ATTR_MODE)) {
+-		umode_t amode = attr->ia_mode;
++		/*
++		 * Don't allow changing the mode of symlinks:
++		 *
++		 * (1) The vfs doesn't take the mode of symlinks into account
++		 *     during permission checking.
++		 * (2) This has never worked correctly. Most major filesystems
++		 *     did return EOPNOTSUPP due to interactions with POSIX ACLs
++		 *     but did still updated the mode of the symlink.
++		 *     This inconsistency led system call wrapper providers such
++		 *     as libc to block changing the mode of symlinks with
++		 *     EOPNOTSUPP already.
++		 * (3) To even do this in the first place one would have to use
++		 *     specific file descriptors and quite some effort.
++		 */
++		if (S_ISLNK(inode->i_mode))
++			return -EOPNOTSUPP;
++
+ 		/* Flag setting protected by i_mutex */
+-		if (is_sxid(amode))
++		if (is_sxid(attr->ia_mode))
+ 			inode->i_flags &= ~S_NOSEC;
+ 	}
+ 
+diff --git a/fs/autofs/waitq.c b/fs/autofs/waitq.c
+index 54c1f8b8b0757..efdc76732faed 100644
+--- a/fs/autofs/waitq.c
++++ b/fs/autofs/waitq.c
+@@ -32,8 +32,9 @@ void autofs_catatonic_mode(struct autofs_sb_info *sbi)
+ 		wq->status = -ENOENT; /* Magic is gone - report failure */
+ 		kfree(wq->name.name - wq->offset);
+ 		wq->name.name = NULL;
+-		wq->wait_ctr--;
+ 		wake_up_interruptible(&wq->queue);
++		if (!--wq->wait_ctr)
++			kfree(wq);
+ 		wq = nwq;
+ 	}
+ 	fput(sbi->pipe);	/* Close the pipe */
+diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c
+index 82324c327a502..5e7a19fca79c4 100644
+--- a/fs/btrfs/block-group.c
++++ b/fs/btrfs/block-group.c
+@@ -3014,8 +3014,16 @@ static int update_block_group_item(struct btrfs_trans_handle *trans,
+ 	btrfs_mark_buffer_dirty(leaf);
+ fail:
+ 	btrfs_release_path(path);
+-	/* We didn't update the block group item, need to revert @commit_used. */
+-	if (ret < 0) {
++	/*
++	 * We didn't update the block group item, need to revert commit_used
++	 * unless the block group item didn't exist yet - this is to prevent a
++	 * race with a concurrent insertion of the block group item, with
++	 * insert_block_group_item(), that happened just after we attempted to
++	 * update. In that case we would reset commit_used to 0 just after the
++	 * insertion set it to a value greater than 0 - if the block group later
++	 * becomes with 0 used bytes, we would incorrectly skip its update.
++	 */
++	if (ret < 0 && ret != -ENOENT) {
+ 		spin_lock(&cache->lock);
+ 		cache->commit_used = old_commit_used;
+ 		spin_unlock(&cache->lock);
+@@ -4273,6 +4281,17 @@ int btrfs_free_block_groups(struct btrfs_fs_info *info)
+ 	struct btrfs_caching_control *caching_ctl;
+ 	struct rb_node *n;
+ 
++	if (btrfs_is_zoned(info)) {
++		if (info->active_meta_bg) {
++			btrfs_put_block_group(info->active_meta_bg);
++			info->active_meta_bg = NULL;
++		}
++		if (info->active_system_bg) {
++			btrfs_put_block_group(info->active_system_bg);
++			info->active_system_bg = NULL;
++		}
++	}
++
+ 	write_lock(&info->block_group_cache_lock);
+ 	while (!list_empty(&info->caching_block_groups)) {
+ 		caching_ctl = list_entry(info->caching_block_groups.next,
+diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
+index 6d51db066503b..0f147240ce9b8 100644
+--- a/fs/btrfs/delayed-inode.c
++++ b/fs/btrfs/delayed-inode.c
+@@ -1153,20 +1153,33 @@ static int __btrfs_run_delayed_items(struct btrfs_trans_handle *trans, int nr)
+ 		ret = __btrfs_commit_inode_delayed_items(trans, path,
+ 							 curr_node);
+ 		if (ret) {
+-			btrfs_release_delayed_node(curr_node);
+-			curr_node = NULL;
+ 			btrfs_abort_transaction(trans, ret);
+ 			break;
+ 		}
+ 
+ 		prev_node = curr_node;
+ 		curr_node = btrfs_next_delayed_node(curr_node);
++		/*
++		 * See the comment below about releasing path before releasing
++		 * node. If the commit of delayed items was successful the path
++		 * should always be released, but in case of an error, it may
++		 * point to locked extent buffers (a leaf at the very least).
++		 */
++		ASSERT(path->nodes[0] == NULL);
+ 		btrfs_release_delayed_node(prev_node);
+ 	}
+ 
++	/*
++	 * Release the path to avoid a potential deadlock and lockdep splat when
++	 * releasing the delayed node, as that requires taking the delayed node's
++	 * mutex. If another task starts running delayed items before we take
++	 * the mutex, it will first lock the mutex and then it may try to lock
++	 * the same btree path (leaf).
++	 */
++	btrfs_free_path(path);
++
+ 	if (curr_node)
+ 		btrfs_release_delayed_node(curr_node);
+-	btrfs_free_path(path);
+ 	trans->block_rsv = block_rsv;
+ 
+ 	return ret;
+diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
+index 4494883a19abc..681594df7334f 100644
+--- a/fs/btrfs/disk-io.c
++++ b/fs/btrfs/disk-io.c
+@@ -525,6 +525,7 @@ static bool btree_dirty_folio(struct address_space *mapping,
+ 		struct folio *folio)
+ {
+ 	struct btrfs_fs_info *fs_info = btrfs_sb(mapping->host->i_sb);
++	struct btrfs_subpage_info *spi = fs_info->subpage_info;
+ 	struct btrfs_subpage *subpage;
+ 	struct extent_buffer *eb;
+ 	int cur_bit = 0;
+@@ -538,18 +539,19 @@ static bool btree_dirty_folio(struct address_space *mapping,
+ 		btrfs_assert_tree_write_locked(eb);
+ 		return filemap_dirty_folio(mapping, folio);
+ 	}
++
++	ASSERT(spi);
+ 	subpage = folio_get_private(folio);
+ 
+-	ASSERT(subpage->dirty_bitmap);
+-	while (cur_bit < BTRFS_SUBPAGE_BITMAP_SIZE) {
++	for (cur_bit = spi->dirty_offset;
++	     cur_bit < spi->dirty_offset + spi->bitmap_nr_bits;
++	     cur_bit++) {
+ 		unsigned long flags;
+ 		u64 cur;
+-		u16 tmp = (1 << cur_bit);
+ 
+ 		spin_lock_irqsave(&subpage->lock, flags);
+-		if (!(tmp & subpage->dirty_bitmap)) {
++		if (!test_bit(cur_bit, subpage->bitmaps)) {
+ 			spin_unlock_irqrestore(&subpage->lock, flags);
+-			cur_bit++;
+ 			continue;
+ 		}
+ 		spin_unlock_irqrestore(&subpage->lock, flags);
+@@ -562,7 +564,7 @@ static bool btree_dirty_folio(struct address_space *mapping,
+ 		btrfs_assert_tree_write_locked(eb);
+ 		free_extent_buffer(eb);
+ 
+-		cur_bit += (fs_info->nodesize >> fs_info->sectorsize_bits);
++		cur_bit += (fs_info->nodesize >> fs_info->sectorsize_bits) - 1;
+ 	}
+ 	return filemap_dirty_folio(mapping, folio);
+ }
+@@ -2391,13 +2393,11 @@ int btrfs_validate_super(struct btrfs_fs_info *fs_info,
+ 		ret = -EINVAL;
+ 	}
+ 
+-	if (btrfs_fs_incompat(fs_info, METADATA_UUID) &&
+-	    memcmp(fs_info->fs_devices->metadata_uuid,
+-		   fs_info->super_copy->metadata_uuid, BTRFS_FSID_SIZE)) {
++	if (memcmp(fs_info->fs_devices->metadata_uuid, btrfs_sb_fsid_ptr(sb),
++		   BTRFS_FSID_SIZE) != 0) {
+ 		btrfs_err(fs_info,
+ "superblock metadata_uuid doesn't match metadata uuid of fs_devices: %pU != %pU",
+-			fs_info->super_copy->metadata_uuid,
+-			fs_info->fs_devices->metadata_uuid);
++			  btrfs_sb_fsid_ptr(sb), fs_info->fs_devices->metadata_uuid);
+ 		ret = -EINVAL;
+ 	}
+ 
+diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
+index e5566827da17e..0917c5f39e3d0 100644
+--- a/fs/btrfs/extent-tree.c
++++ b/fs/btrfs/extent-tree.c
+@@ -402,11 +402,11 @@ int btrfs_get_extent_inline_ref_type(const struct extent_buffer *eb,
+ 		}
+ 	}
+ 
++	WARN_ON(1);
+ 	btrfs_print_leaf(eb);
+ 	btrfs_err(eb->fs_info,
+ 		  "eb %llu iref 0x%lx invalid extent inline ref type %d",
+ 		  eb->start, (unsigned long)iref, type);
+-	WARN_ON(1);
+ 
+ 	return BTRFS_REF_TYPE_INVALID;
+ }
+@@ -869,6 +869,11 @@ again:
+ 		err = -ENOENT;
+ 		goto out;
+ 	} else if (WARN_ON(ret)) {
++		btrfs_print_leaf(path->nodes[0]);
++		btrfs_err(fs_info,
++"extent item not found for insert, bytenr %llu num_bytes %llu parent %llu root_objectid %llu owner %llu offset %llu",
++			  bytenr, num_bytes, parent, root_objectid, owner,
++			  offset);
+ 		err = -EIO;
+ 		goto out;
+ 	}
+@@ -1079,13 +1084,13 @@ static int lookup_extent_backref(struct btrfs_trans_handle *trans,
+ /*
+  * helper to update/remove inline back ref
+  */
+-static noinline_for_stack
+-void update_inline_extent_backref(struct btrfs_path *path,
++static noinline_for_stack int update_inline_extent_backref(struct btrfs_path *path,
+ 				  struct btrfs_extent_inline_ref *iref,
+ 				  int refs_to_mod,
+ 				  struct btrfs_delayed_extent_op *extent_op)
+ {
+ 	struct extent_buffer *leaf = path->nodes[0];
++	struct btrfs_fs_info *fs_info = leaf->fs_info;
+ 	struct btrfs_extent_item *ei;
+ 	struct btrfs_extent_data_ref *dref = NULL;
+ 	struct btrfs_shared_data_ref *sref = NULL;
+@@ -1098,18 +1103,33 @@ void update_inline_extent_backref(struct btrfs_path *path,
+ 
+ 	ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
+ 	refs = btrfs_extent_refs(leaf, ei);
+-	WARN_ON(refs_to_mod < 0 && refs + refs_to_mod <= 0);
++	if (unlikely(refs_to_mod < 0 && refs + refs_to_mod <= 0)) {
++		struct btrfs_key key;
++		u32 extent_size;
++
++		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
++		if (key.type == BTRFS_METADATA_ITEM_KEY)
++			extent_size = fs_info->nodesize;
++		else
++			extent_size = key.offset;
++		btrfs_print_leaf(leaf);
++		btrfs_err(fs_info,
++	"invalid refs_to_mod for extent %llu num_bytes %u, has %d expect >= -%llu",
++			  key.objectid, extent_size, refs_to_mod, refs);
++		return -EUCLEAN;
++	}
+ 	refs += refs_to_mod;
+ 	btrfs_set_extent_refs(leaf, ei, refs);
+ 	if (extent_op)
+ 		__run_delayed_extent_op(extent_op, leaf, ei);
+ 
++	type = btrfs_get_extent_inline_ref_type(leaf, iref, BTRFS_REF_TYPE_ANY);
+ 	/*
+-	 * If type is invalid, we should have bailed out after
+-	 * lookup_inline_extent_backref().
++	 * Function btrfs_get_extent_inline_ref_type() has already printed
++	 * error messages.
+ 	 */
+-	type = btrfs_get_extent_inline_ref_type(leaf, iref, BTRFS_REF_TYPE_ANY);
+-	ASSERT(type != BTRFS_REF_TYPE_INVALID);
++	if (unlikely(type == BTRFS_REF_TYPE_INVALID))
++		return -EUCLEAN;
+ 
+ 	if (type == BTRFS_EXTENT_DATA_REF_KEY) {
+ 		dref = (struct btrfs_extent_data_ref *)(&iref->offset);
+@@ -1119,10 +1139,43 @@ void update_inline_extent_backref(struct btrfs_path *path,
+ 		refs = btrfs_shared_data_ref_count(leaf, sref);
+ 	} else {
+ 		refs = 1;
+-		BUG_ON(refs_to_mod != -1);
++		/*
++		 * For tree blocks we can only drop one ref for it, and tree
++		 * blocks should not have refs > 1.
++		 *
++		 * Furthermore if we're inserting a new inline backref, we
++		 * won't reach this path either. That would be
++		 * setup_inline_extent_backref().
++		 */
++		if (unlikely(refs_to_mod != -1)) {
++			struct btrfs_key key;
++
++			btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
++
++			btrfs_print_leaf(leaf);
++			btrfs_err(fs_info,
++			"invalid refs_to_mod for tree block %llu, has %d expect -1",
++				  key.objectid, refs_to_mod);
++			return -EUCLEAN;
++		}
+ 	}
+ 
+-	BUG_ON(refs_to_mod < 0 && refs < -refs_to_mod);
++	if (unlikely(refs_to_mod < 0 && refs < -refs_to_mod)) {
++		struct btrfs_key key;
++		u32 extent_size;
++
++		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
++		if (key.type == BTRFS_METADATA_ITEM_KEY)
++			extent_size = fs_info->nodesize;
++		else
++			extent_size = key.offset;
++		btrfs_print_leaf(leaf);
++		btrfs_err(fs_info,
++"invalid refs_to_mod for backref entry, iref %lu extent %llu num_bytes %u, has %d expect >= -%llu",
++			  (unsigned long)iref, key.objectid, extent_size,
++			  refs_to_mod, refs);
++		return -EUCLEAN;
++	}
+ 	refs += refs_to_mod;
+ 
+ 	if (refs > 0) {
+@@ -1142,6 +1195,7 @@ void update_inline_extent_backref(struct btrfs_path *path,
+ 		btrfs_truncate_item(path, item_size, 1);
+ 	}
+ 	btrfs_mark_buffer_dirty(leaf);
++	return 0;
+ }
+ 
+ static noinline_for_stack
+@@ -1170,7 +1224,7 @@ int insert_inline_extent_backref(struct btrfs_trans_handle *trans,
+ 				   bytenr, num_bytes, root_objectid, path->slots[0]);
+ 			return -EUCLEAN;
+ 		}
+-		update_inline_extent_backref(path, iref, refs_to_add, extent_op);
++		ret = update_inline_extent_backref(path, iref, refs_to_add, extent_op);
+ 	} else if (ret == -ENOENT) {
+ 		setup_inline_extent_backref(trans->fs_info, path, iref, parent,
+ 					    root_objectid, owner, offset,
+@@ -1190,7 +1244,7 @@ static int remove_extent_backref(struct btrfs_trans_handle *trans,
+ 
+ 	BUG_ON(!is_data && refs_to_drop != 1);
+ 	if (iref)
+-		update_inline_extent_backref(path, iref, -refs_to_drop, NULL);
++		ret = update_inline_extent_backref(path, iref, -refs_to_drop, NULL);
+ 	else if (is_data)
+ 		ret = remove_extent_data_ref(trans, root, path, refs_to_drop);
+ 	else
+diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
+index 90ad3006ef3a7..2ebc982e8eccb 100644
+--- a/fs/btrfs/extent_io.c
++++ b/fs/btrfs/extent_io.c
+@@ -1877,11 +1877,10 @@ static int submit_eb_subpage(struct page *page, struct writeback_control *wbc)
+  * previous call.
+  * Return <0 for fatal error.
+  */
+-static int submit_eb_page(struct page *page, struct writeback_control *wbc,
+-			  struct extent_buffer **eb_context)
++static int submit_eb_page(struct page *page, struct btrfs_eb_write_context *ctx)
+ {
++	struct writeback_control *wbc = ctx->wbc;
+ 	struct address_space *mapping = page->mapping;
+-	struct btrfs_block_group *cache = NULL;
+ 	struct extent_buffer *eb;
+ 	int ret;
+ 
+@@ -1908,7 +1907,7 @@ static int submit_eb_page(struct page *page, struct writeback_control *wbc,
+ 		return 0;
+ 	}
+ 
+-	if (eb == *eb_context) {
++	if (eb == ctx->eb) {
+ 		spin_unlock(&mapping->private_lock);
+ 		return 0;
+ 	}
+@@ -1917,34 +1916,25 @@ static int submit_eb_page(struct page *page, struct writeback_control *wbc,
+ 	if (!ret)
+ 		return 0;
+ 
+-	if (!btrfs_check_meta_write_pointer(eb->fs_info, eb, &cache)) {
+-		/*
+-		 * If for_sync, this hole will be filled with
+-		 * trasnsaction commit.
+-		 */
+-		if (wbc->sync_mode == WB_SYNC_ALL && !wbc->for_sync)
+-			ret = -EAGAIN;
+-		else
++	ctx->eb = eb;
++
++	ret = btrfs_check_meta_write_pointer(eb->fs_info, ctx);
++	if (ret) {
++		if (ret == -EBUSY)
+ 			ret = 0;
+ 		free_extent_buffer(eb);
+ 		return ret;
+ 	}
+ 
+-	*eb_context = eb;
+-
+ 	if (!lock_extent_buffer_for_io(eb, wbc)) {
+-		btrfs_revert_meta_write_pointer(cache, eb);
+-		if (cache)
+-			btrfs_put_block_group(cache);
+ 		free_extent_buffer(eb);
+ 		return 0;
+ 	}
+-	if (cache) {
+-		/*
+-		 * Implies write in zoned mode. Mark the last eb in a block group.
+-		 */
+-		btrfs_schedule_zone_finish_bg(cache, eb);
+-		btrfs_put_block_group(cache);
++	/* Implies write in zoned mode. */
++	if (ctx->zoned_bg) {
++		/* Mark the last eb in the block group. */
++		btrfs_schedule_zone_finish_bg(ctx->zoned_bg, eb);
++		ctx->zoned_bg->meta_write_pointer += eb->len;
+ 	}
+ 	write_one_eb(eb, wbc);
+ 	free_extent_buffer(eb);
+@@ -1954,7 +1944,7 @@ static int submit_eb_page(struct page *page, struct writeback_control *wbc,
+ int btree_write_cache_pages(struct address_space *mapping,
+ 				   struct writeback_control *wbc)
+ {
+-	struct extent_buffer *eb_context = NULL;
++	struct btrfs_eb_write_context ctx = { .wbc = wbc };
+ 	struct btrfs_fs_info *fs_info = BTRFS_I(mapping->host)->root->fs_info;
+ 	int ret = 0;
+ 	int done = 0;
+@@ -1996,7 +1986,7 @@ retry:
+ 		for (i = 0; i < nr_folios; i++) {
+ 			struct folio *folio = fbatch.folios[i];
+ 
+-			ret = submit_eb_page(&folio->page, wbc, &eb_context);
++			ret = submit_eb_page(&folio->page, &ctx);
+ 			if (ret == 0)
+ 				continue;
+ 			if (ret < 0) {
+@@ -2057,6 +2047,9 @@ retry:
+ 		ret = 0;
+ 	if (!ret && BTRFS_FS_ERROR(fs_info))
+ 		ret = -EROFS;
++
++	if (ctx.zoned_bg)
++		btrfs_put_block_group(ctx.zoned_bg);
+ 	btrfs_zoned_meta_io_unlock(fs_info);
+ 	return ret;
+ }
+diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
+index c5fae3a7d911b..f61b7896320a1 100644
+--- a/fs/btrfs/extent_io.h
++++ b/fs/btrfs/extent_io.h
+@@ -94,6 +94,13 @@ struct extent_buffer {
+ #endif
+ };
+ 
++struct btrfs_eb_write_context {
++	struct writeback_control *wbc;
++	struct extent_buffer *eb;
++	/* Block group @eb resides in. Only used for zoned mode. */
++	struct btrfs_block_group *zoned_bg;
++};
++
+ /*
+  * Get the correct offset inside the page of extent buffer.
+  *
+diff --git a/fs/btrfs/fs.h b/fs/btrfs/fs.h
+index 203d2a2678287..1f2d331121064 100644
+--- a/fs/btrfs/fs.h
++++ b/fs/btrfs/fs.h
+@@ -766,6 +766,9 @@ struct btrfs_fs_info {
+ 	u64 data_reloc_bg;
+ 	struct mutex zoned_data_reloc_io_lock;
+ 
++	struct btrfs_block_group *active_meta_bg;
++	struct btrfs_block_group *active_system_bg;
++
+ 	u64 nr_global_roots;
+ 
+ 	spinlock_t zone_active_bgs_lock;
+diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
+index a895d105464b6..d27b0d86b8e2c 100644
+--- a/fs/btrfs/ioctl.c
++++ b/fs/btrfs/ioctl.c
+@@ -1958,6 +1958,13 @@ static int btrfs_search_path_in_tree_user(struct mnt_idmap *idmap,
+ 				goto out_put;
+ 			}
+ 
++			/*
++			 * We don't need the path anymore, so release it and
++			 * avoid deadlocks and lockdep warnings in case
++			 * btrfs_iget() needs to lookup the inode from its root
++			 * btree and lock the same leaf.
++			 */
++			btrfs_release_path(path);
+ 			temp_inode = btrfs_iget(sb, key2.objectid, root);
+ 			if (IS_ERR(temp_inode)) {
+ 				ret = PTR_ERR(temp_inode);
+@@ -1978,7 +1985,6 @@ static int btrfs_search_path_in_tree_user(struct mnt_idmap *idmap,
+ 				goto out_put;
+ 			}
+ 
+-			btrfs_release_path(path);
+ 			key.objectid = key.offset;
+ 			key.offset = (u64)-1;
+ 			dirid = key.objectid;
+diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
+index a629532283bc3..5b1aac3fc8e4a 100644
+--- a/fs/btrfs/ordered-data.c
++++ b/fs/btrfs/ordered-data.c
+@@ -635,7 +635,7 @@ void btrfs_remove_ordered_extent(struct btrfs_inode *btrfs_inode,
+ 			refcount_inc(&trans->use_count);
+ 		spin_unlock(&fs_info->trans_lock);
+ 
+-		ASSERT(trans);
++		ASSERT(trans || BTRFS_FS_ERROR(fs_info));
+ 		if (trans) {
+ 			if (atomic_dec_and_test(&trans->pending_ordered))
+ 				wake_up(&trans->pending_wait);
+diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
+index 6aa9bf3661ac8..51070c0d4141e 100644
+--- a/fs/btrfs/volumes.c
++++ b/fs/btrfs/volumes.c
+@@ -681,6 +681,14 @@ error_free_page:
+ 	return -EINVAL;
+ }
+ 
++u8 *btrfs_sb_fsid_ptr(struct btrfs_super_block *sb)
++{
++	bool has_metadata_uuid = (btrfs_super_incompat_flags(sb) &
++				  BTRFS_FEATURE_INCOMPAT_METADATA_UUID);
++
++	return has_metadata_uuid ? sb->metadata_uuid : sb->fsid;
++}
++
+ /*
+  * Handle scanned device having its CHANGING_FSID_V2 flag set and the fs_devices
+  * being created with a disk that has already completed its fsid change. Such
+diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h
+index b8c51f16ba867..0f87057bb575f 100644
+--- a/fs/btrfs/volumes.h
++++ b/fs/btrfs/volumes.h
+@@ -749,5 +749,6 @@ int btrfs_verify_dev_extents(struct btrfs_fs_info *fs_info);
+ bool btrfs_repair_one_zone(struct btrfs_fs_info *fs_info, u64 logical);
+ 
+ bool btrfs_pinned_by_swapfile(struct btrfs_fs_info *fs_info, void *ptr);
++u8 *btrfs_sb_fsid_ptr(struct btrfs_super_block *sb);
+ 
+ #endif
+diff --git a/fs/btrfs/zoned.c b/fs/btrfs/zoned.c
+index d9e6df2da272c..f97e927499d7a 100644
+--- a/fs/btrfs/zoned.c
++++ b/fs/btrfs/zoned.c
+@@ -65,6 +65,9 @@
+ 
+ #define SUPER_INFO_SECTORS	((u64)BTRFS_SUPER_INFO_SIZE >> SECTOR_SHIFT)
+ 
++static void wait_eb_writebacks(struct btrfs_block_group *block_group);
++static int do_zone_finish(struct btrfs_block_group *block_group, bool fully_written);
++
+ static inline bool sb_zone_is_full(const struct blk_zone *zone)
+ {
+ 	return (zone->cond == BLK_ZONE_COND_FULL) ||
+@@ -1758,41 +1761,121 @@ out:
+ 	}
+ }
+ 
+-bool btrfs_check_meta_write_pointer(struct btrfs_fs_info *fs_info,
+-				    struct extent_buffer *eb,
+-				    struct btrfs_block_group **cache_ret)
++static bool check_bg_is_active(struct btrfs_eb_write_context *ctx,
++			       struct btrfs_block_group **active_bg)
+ {
+-	struct btrfs_block_group *cache;
+-	bool ret = true;
++	const struct writeback_control *wbc = ctx->wbc;
++	struct btrfs_block_group *block_group = ctx->zoned_bg;
++	struct btrfs_fs_info *fs_info = block_group->fs_info;
+ 
+-	if (!btrfs_is_zoned(fs_info))
++	if (test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &block_group->runtime_flags))
+ 		return true;
+ 
+-	cache = btrfs_lookup_block_group(fs_info, eb->start);
+-	if (!cache)
+-		return true;
++	if (fs_info->treelog_bg == block_group->start) {
++		if (!btrfs_zone_activate(block_group)) {
++			int ret_fin = btrfs_zone_finish_one_bg(fs_info);
+ 
+-	if (cache->meta_write_pointer != eb->start) {
+-		btrfs_put_block_group(cache);
+-		cache = NULL;
+-		ret = false;
+-	} else {
+-		cache->meta_write_pointer = eb->start + eb->len;
+-	}
++			if (ret_fin != 1 || !btrfs_zone_activate(block_group))
++				return false;
++		}
++	} else if (*active_bg != block_group) {
++		struct btrfs_block_group *tgt = *active_bg;
+ 
+-	*cache_ret = cache;
++		/* zoned_meta_io_lock protects fs_info->active_{meta,system}_bg. */
++		lockdep_assert_held(&fs_info->zoned_meta_io_lock);
+ 
+-	return ret;
++		if (tgt) {
++			/*
++			 * If there is an unsent IO left in the allocated area,
++			 * we cannot wait for them as it may cause a deadlock.
++			 */
++			if (tgt->meta_write_pointer < tgt->start + tgt->alloc_offset) {
++				if (wbc->sync_mode == WB_SYNC_NONE ||
++				    (wbc->sync_mode == WB_SYNC_ALL && !wbc->for_sync))
++					return false;
++			}
++
++			/* Pivot active metadata/system block group. */
++			btrfs_zoned_meta_io_unlock(fs_info);
++			wait_eb_writebacks(tgt);
++			do_zone_finish(tgt, true);
++			btrfs_zoned_meta_io_lock(fs_info);
++			if (*active_bg == tgt) {
++				btrfs_put_block_group(tgt);
++				*active_bg = NULL;
++			}
++		}
++		if (!btrfs_zone_activate(block_group))
++			return false;
++		if (*active_bg != block_group) {
++			ASSERT(*active_bg == NULL);
++			*active_bg = block_group;
++			btrfs_get_block_group(block_group);
++		}
++	}
++
++	return true;
+ }
+ 
+-void btrfs_revert_meta_write_pointer(struct btrfs_block_group *cache,
+-				     struct extent_buffer *eb)
++/*
++ * Check if @ctx->eb is aligned to the write pointer.
++ *
++ * Return:
++ *   0:        @ctx->eb is at the write pointer. You can write it.
++ *   -EAGAIN:  There is a hole. The caller should handle the case.
++ *   -EBUSY:   There is a hole, but the caller can just bail out.
++ */
++int btrfs_check_meta_write_pointer(struct btrfs_fs_info *fs_info,
++				   struct btrfs_eb_write_context *ctx)
+ {
+-	if (!btrfs_is_zoned(eb->fs_info) || !cache)
+-		return;
++	const struct writeback_control *wbc = ctx->wbc;
++	const struct extent_buffer *eb = ctx->eb;
++	struct btrfs_block_group *block_group = ctx->zoned_bg;
++
++	if (!btrfs_is_zoned(fs_info))
++		return 0;
++
++	if (block_group) {
++		if (block_group->start > eb->start ||
++		    block_group->start + block_group->length <= eb->start) {
++			btrfs_put_block_group(block_group);
++			block_group = NULL;
++			ctx->zoned_bg = NULL;
++		}
++	}
++
++	if (!block_group) {
++		block_group = btrfs_lookup_block_group(fs_info, eb->start);
++		if (!block_group)
++			return 0;
++		ctx->zoned_bg = block_group;
++	}
+ 
+-	ASSERT(cache->meta_write_pointer == eb->start + eb->len);
+-	cache->meta_write_pointer = eb->start;
++	if (block_group->meta_write_pointer == eb->start) {
++		struct btrfs_block_group **tgt;
++
++		if (!test_bit(BTRFS_FS_ACTIVE_ZONE_TRACKING, &fs_info->flags))
++			return 0;
++
++		if (block_group->flags & BTRFS_BLOCK_GROUP_SYSTEM)
++			tgt = &fs_info->active_system_bg;
++		else
++			tgt = &fs_info->active_meta_bg;
++		if (check_bg_is_active(ctx, tgt))
++			return 0;
++	}
++
++	/*
++	 * Since we may release fs_info->zoned_meta_io_lock, someone can already
++	 * start writing this eb. In that case, we can just bail out.
++	 */
++	if (block_group->meta_write_pointer > eb->start)
++		return -EBUSY;
++
++	/* If for_sync, this hole will be filled with trasnsaction commit. */
++	if (wbc->sync_mode == WB_SYNC_ALL && !wbc->for_sync)
++		return -EAGAIN;
++	return -EBUSY;
+ }
+ 
+ int btrfs_zoned_issue_zeroout(struct btrfs_device *device, u64 physical, u64 length)
+diff --git a/fs/btrfs/zoned.h b/fs/btrfs/zoned.h
+index 27322b926038c..74ec37a25808a 100644
+--- a/fs/btrfs/zoned.h
++++ b/fs/btrfs/zoned.h
+@@ -58,11 +58,8 @@ void btrfs_redirty_list_add(struct btrfs_transaction *trans,
+ 			    struct extent_buffer *eb);
+ bool btrfs_use_zone_append(struct btrfs_bio *bbio);
+ void btrfs_record_physical_zoned(struct btrfs_bio *bbio);
+-bool btrfs_check_meta_write_pointer(struct btrfs_fs_info *fs_info,
+-				    struct extent_buffer *eb,
+-				    struct btrfs_block_group **cache_ret);
+-void btrfs_revert_meta_write_pointer(struct btrfs_block_group *cache,
+-				     struct extent_buffer *eb);
++int btrfs_check_meta_write_pointer(struct btrfs_fs_info *fs_info,
++				   struct btrfs_eb_write_context *ctx);
+ int btrfs_zoned_issue_zeroout(struct btrfs_device *device, u64 physical, u64 length);
+ int btrfs_sync_zone_write_pointer(struct btrfs_device *tgt_dev, u64 logical,
+ 				  u64 physical_start, u64 physical_pos);
+@@ -189,17 +186,10 @@ static inline void btrfs_record_physical_zoned(struct btrfs_bio *bbio)
+ {
+ }
+ 
+-static inline bool btrfs_check_meta_write_pointer(struct btrfs_fs_info *fs_info,
+-			       struct extent_buffer *eb,
+-			       struct btrfs_block_group **cache_ret)
+-{
+-	return true;
+-}
+-
+-static inline void btrfs_revert_meta_write_pointer(
+-						struct btrfs_block_group *cache,
+-						struct extent_buffer *eb)
++static inline int btrfs_check_meta_write_pointer(struct btrfs_fs_info *fs_info,
++						 struct btrfs_eb_write_context *ctx)
+ {
++	return 0;
+ }
+ 
+ static inline int btrfs_zoned_issue_zeroout(struct btrfs_device *device,
+diff --git a/fs/efivarfs/super.c b/fs/efivarfs/super.c
+index e028fafa04f38..996271473609a 100644
+--- a/fs/efivarfs/super.c
++++ b/fs/efivarfs/super.c
+@@ -32,10 +32,16 @@ static int efivarfs_statfs(struct dentry *dentry, struct kstatfs *buf)
+ 	u64 storage_space, remaining_space, max_variable_size;
+ 	efi_status_t status;
+ 
+-	status = efivar_query_variable_info(attr, &storage_space, &remaining_space,
+-					    &max_variable_size);
+-	if (status != EFI_SUCCESS)
+-		return efi_status_to_err(status);
++	/* Some UEFI firmware does not implement QueryVariableInfo() */
++	storage_space = remaining_space = 0;
++	if (efi_rt_services_supported(EFI_RT_SUPPORTED_QUERY_VARIABLE_INFO)) {
++		status = efivar_query_variable_info(attr, &storage_space,
++						    &remaining_space,
++						    &max_variable_size);
++		if (status != EFI_SUCCESS && status != EFI_UNSUPPORTED)
++			pr_warn_ratelimited("query_variable_info() failed: 0x%lx\n",
++					    status);
++	}
+ 
+ 	/*
+ 	 * This is not a normal filesystem, so no point in pretending it has a block
+diff --git a/fs/ext2/xattr.c b/fs/ext2/xattr.c
+index 8906ba479aafb..89517937d36c4 100644
+--- a/fs/ext2/xattr.c
++++ b/fs/ext2/xattr.c
+@@ -742,10 +742,10 @@ ext2_xattr_set2(struct inode *inode, struct buffer_head *old_bh,
+ 			/* We need to allocate a new block */
+ 			ext2_fsblk_t goal = ext2_group_first_block_no(sb,
+ 						EXT2_I(inode)->i_block_group);
+-			int block = ext2_new_block(inode, goal, &error);
++			ext2_fsblk_t block = ext2_new_block(inode, goal, &error);
+ 			if (error)
+ 				goto cleanup;
+-			ea_idebug(inode, "creating block %d", block);
++			ea_idebug(inode, "creating block %lu", block);
+ 
+ 			new_bh = sb_getblk(sb, block);
+ 			if (unlikely(!new_bh)) {
+diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
+index a197ef71b7b02..bd7557d8dec41 100644
+--- a/fs/ext4/mballoc.c
++++ b/fs/ext4/mballoc.c
+@@ -4223,12 +4223,13 @@ ext4_mb_pa_rb_next_iter(ext4_lblk_t new_start, ext4_lblk_t cur_start, struct rb_
+ 
+ static inline void
+ ext4_mb_pa_assert_overlap(struct ext4_allocation_context *ac,
+-			  ext4_lblk_t start, ext4_lblk_t end)
++			  ext4_lblk_t start, loff_t end)
+ {
+ 	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
+ 	struct ext4_inode_info *ei = EXT4_I(ac->ac_inode);
+ 	struct ext4_prealloc_space *tmp_pa;
+-	ext4_lblk_t tmp_pa_start, tmp_pa_end;
++	ext4_lblk_t tmp_pa_start;
++	loff_t tmp_pa_end;
+ 	struct rb_node *iter;
+ 
+ 	read_lock(&ei->i_prealloc_lock);
+@@ -4237,7 +4238,7 @@ ext4_mb_pa_assert_overlap(struct ext4_allocation_context *ac,
+ 		tmp_pa = rb_entry(iter, struct ext4_prealloc_space,
+ 				  pa_node.inode_node);
+ 		tmp_pa_start = tmp_pa->pa_lstart;
+-		tmp_pa_end = tmp_pa->pa_lstart + EXT4_C2B(sbi, tmp_pa->pa_len);
++		tmp_pa_end = pa_logical_end(sbi, tmp_pa);
+ 
+ 		spin_lock(&tmp_pa->pa_lock);
+ 		if (tmp_pa->pa_deleted == 0)
+@@ -4259,14 +4260,14 @@ ext4_mb_pa_assert_overlap(struct ext4_allocation_context *ac,
+  */
+ static inline void
+ ext4_mb_pa_adjust_overlap(struct ext4_allocation_context *ac,
+-			  ext4_lblk_t *start, ext4_lblk_t *end)
++			  ext4_lblk_t *start, loff_t *end)
+ {
+ 	struct ext4_inode_info *ei = EXT4_I(ac->ac_inode);
+ 	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
+ 	struct ext4_prealloc_space *tmp_pa = NULL, *left_pa = NULL, *right_pa = NULL;
+ 	struct rb_node *iter;
+-	ext4_lblk_t new_start, new_end;
+-	ext4_lblk_t tmp_pa_start, tmp_pa_end, left_pa_end = -1, right_pa_start = -1;
++	ext4_lblk_t new_start, tmp_pa_start, right_pa_start = -1;
++	loff_t new_end, tmp_pa_end, left_pa_end = -1;
+ 
+ 	new_start = *start;
+ 	new_end = *end;
+@@ -4285,7 +4286,7 @@ ext4_mb_pa_adjust_overlap(struct ext4_allocation_context *ac,
+ 		tmp_pa = rb_entry(iter, struct ext4_prealloc_space,
+ 				  pa_node.inode_node);
+ 		tmp_pa_start = tmp_pa->pa_lstart;
+-		tmp_pa_end = tmp_pa->pa_lstart + EXT4_C2B(sbi, tmp_pa->pa_len);
++		tmp_pa_end = pa_logical_end(sbi, tmp_pa);
+ 
+ 		/* PA must not overlap original request */
+ 		spin_lock(&tmp_pa->pa_lock);
+@@ -4365,8 +4366,7 @@ ext4_mb_pa_adjust_overlap(struct ext4_allocation_context *ac,
+ 	}
+ 
+ 	if (left_pa) {
+-		left_pa_end =
+-			left_pa->pa_lstart + EXT4_C2B(sbi, left_pa->pa_len);
++		left_pa_end = pa_logical_end(sbi, left_pa);
+ 		BUG_ON(left_pa_end > ac->ac_o_ex.fe_logical);
+ 	}
+ 
+@@ -4405,8 +4405,7 @@ ext4_mb_normalize_request(struct ext4_allocation_context *ac,
+ 	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
+ 	struct ext4_super_block *es = sbi->s_es;
+ 	int bsbits, max;
+-	ext4_lblk_t end;
+-	loff_t size, start_off;
++	loff_t size, start_off, end;
+ 	loff_t orig_size __maybe_unused;
+ 	ext4_lblk_t start;
+ 
+@@ -4433,7 +4432,7 @@ ext4_mb_normalize_request(struct ext4_allocation_context *ac,
+ 
+ 	/* first, let's learn actual file size
+ 	 * given current request is allocated */
+-	size = ac->ac_o_ex.fe_logical + EXT4_C2B(sbi, ac->ac_o_ex.fe_len);
++	size = extent_logical_end(sbi, &ac->ac_o_ex);
+ 	size = size << bsbits;
+ 	if (size < i_size_read(ac->ac_inode))
+ 		size = i_size_read(ac->ac_inode);
+@@ -4767,7 +4766,6 @@ ext4_mb_use_preallocated(struct ext4_allocation_context *ac)
+ 	struct ext4_inode_info *ei = EXT4_I(ac->ac_inode);
+ 	struct ext4_locality_group *lg;
+ 	struct ext4_prealloc_space *tmp_pa = NULL, *cpa = NULL;
+-	loff_t tmp_pa_end;
+ 	struct rb_node *iter;
+ 	ext4_fsblk_t goal_block;
+ 
+@@ -4863,9 +4861,7 @@ ext4_mb_use_preallocated(struct ext4_allocation_context *ac)
+ 	 * pa can possibly satisfy the request hence check if it overlaps
+ 	 * original logical start and stop searching if it doesn't.
+ 	 */
+-	tmp_pa_end = (loff_t)tmp_pa->pa_lstart + EXT4_C2B(sbi, tmp_pa->pa_len);
+-
+-	if (ac->ac_o_ex.fe_logical >= tmp_pa_end) {
++	if (ac->ac_o_ex.fe_logical >= pa_logical_end(sbi, tmp_pa)) {
+ 		spin_unlock(&tmp_pa->pa_lock);
+ 		goto try_group_pa;
+ 	}
+@@ -5181,8 +5177,11 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
+ 	pa = ac->ac_pa;
+ 
+ 	if (ac->ac_b_ex.fe_len < ac->ac_orig_goal_len) {
+-		int new_bex_start;
+-		int new_bex_end;
++		struct ext4_free_extent ex = {
++			.fe_logical = ac->ac_g_ex.fe_logical,
++			.fe_len = ac->ac_orig_goal_len,
++		};
++		loff_t orig_goal_end = extent_logical_end(sbi, &ex);
+ 
+ 		/* we can't allocate as much as normalizer wants.
+ 		 * so, found space must get proper lstart
+@@ -5201,29 +5200,23 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
+ 		 *    still cover original start
+ 		 * 3. Else, keep the best ex at start of original request.
+ 		 */
+-		new_bex_end = ac->ac_g_ex.fe_logical +
+-			EXT4_C2B(sbi, ac->ac_orig_goal_len);
+-		new_bex_start = new_bex_end - EXT4_C2B(sbi, ac->ac_b_ex.fe_len);
+-		if (ac->ac_o_ex.fe_logical >= new_bex_start)
+-			goto adjust_bex;
++		ex.fe_len = ac->ac_b_ex.fe_len;
+ 
+-		new_bex_start = ac->ac_g_ex.fe_logical;
+-		new_bex_end =
+-			new_bex_start + EXT4_C2B(sbi, ac->ac_b_ex.fe_len);
+-		if (ac->ac_o_ex.fe_logical < new_bex_end)
++		ex.fe_logical = orig_goal_end - EXT4_C2B(sbi, ex.fe_len);
++		if (ac->ac_o_ex.fe_logical >= ex.fe_logical)
+ 			goto adjust_bex;
+ 
+-		new_bex_start = ac->ac_o_ex.fe_logical;
+-		new_bex_end =
+-			new_bex_start + EXT4_C2B(sbi, ac->ac_b_ex.fe_len);
++		ex.fe_logical = ac->ac_g_ex.fe_logical;
++		if (ac->ac_o_ex.fe_logical < extent_logical_end(sbi, &ex))
++			goto adjust_bex;
+ 
++		ex.fe_logical = ac->ac_o_ex.fe_logical;
+ adjust_bex:
+-		ac->ac_b_ex.fe_logical = new_bex_start;
++		ac->ac_b_ex.fe_logical = ex.fe_logical;
+ 
+ 		BUG_ON(ac->ac_o_ex.fe_logical < ac->ac_b_ex.fe_logical);
+ 		BUG_ON(ac->ac_o_ex.fe_len > ac->ac_b_ex.fe_len);
+-		BUG_ON(new_bex_end > (ac->ac_g_ex.fe_logical +
+-				      EXT4_C2B(sbi, ac->ac_orig_goal_len)));
++		BUG_ON(extent_logical_end(sbi, &ex) > orig_goal_end);
+ 	}
+ 
+ 	pa->pa_lstart = ac->ac_b_ex.fe_logical;
+@@ -5770,7 +5763,7 @@ static void ext4_mb_group_or_file(struct ext4_allocation_context *ac)
+ 
+ 	group_pa_eligible = sbi->s_mb_group_prealloc > 0;
+ 	inode_pa_eligible = true;
+-	size = ac->ac_o_ex.fe_logical + EXT4_C2B(sbi, ac->ac_o_ex.fe_len);
++	size = extent_logical_end(sbi, &ac->ac_o_ex);
+ 	isize = (i_size_read(ac->ac_inode) + ac->ac_sb->s_blocksize - 1)
+ 		>> bsbits;
+ 
+diff --git a/fs/ext4/mballoc.h b/fs/ext4/mballoc.h
+index df6b5e7c22741..d7aeb5da7d867 100644
+--- a/fs/ext4/mballoc.h
++++ b/fs/ext4/mballoc.h
+@@ -233,6 +233,20 @@ static inline ext4_fsblk_t ext4_grp_offs_to_block(struct super_block *sb,
+ 		(fex->fe_start << EXT4_SB(sb)->s_cluster_bits);
+ }
+ 
++static inline loff_t extent_logical_end(struct ext4_sb_info *sbi,
++					struct ext4_free_extent *fex)
++{
++	/* Use loff_t to avoid end exceeding ext4_lblk_t max. */
++	return (loff_t)fex->fe_logical + EXT4_C2B(sbi, fex->fe_len);
++}
++
++static inline loff_t pa_logical_end(struct ext4_sb_info *sbi,
++				    struct ext4_prealloc_space *pa)
++{
++	/* Use loff_t to avoid end exceeding ext4_lblk_t max. */
++	return (loff_t)pa->pa_lstart + EXT4_C2B(sbi, pa->pa_len);
++}
++
+ typedef int (*ext4_mballoc_query_range_fn)(
+ 	struct super_block		*sb,
+ 	ext4_group_t			agno,
+diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
+index 6bcc3770ee19f..46a2ddd9259ee 100644
+--- a/fs/ext4/namei.c
++++ b/fs/ext4/namei.c
+@@ -343,17 +343,17 @@ static struct ext4_dir_entry_tail *get_dirent_tail(struct inode *inode,
+ 						   struct buffer_head *bh)
+ {
+ 	struct ext4_dir_entry_tail *t;
++	int blocksize = EXT4_BLOCK_SIZE(inode->i_sb);
+ 
+ #ifdef PARANOID
+ 	struct ext4_dir_entry *d, *top;
+ 
+ 	d = (struct ext4_dir_entry *)bh->b_data;
+ 	top = (struct ext4_dir_entry *)(bh->b_data +
+-		(EXT4_BLOCK_SIZE(inode->i_sb) -
+-		 sizeof(struct ext4_dir_entry_tail)));
+-	while (d < top && d->rec_len)
++		(blocksize - sizeof(struct ext4_dir_entry_tail)));
++	while (d < top && ext4_rec_len_from_disk(d->rec_len, blocksize))
+ 		d = (struct ext4_dir_entry *)(((void *)d) +
+-		    le16_to_cpu(d->rec_len));
++		    ext4_rec_len_from_disk(d->rec_len, blocksize));
+ 
+ 	if (d != top)
+ 		return NULL;
+@@ -364,7 +364,8 @@ static struct ext4_dir_entry_tail *get_dirent_tail(struct inode *inode,
+ #endif
+ 
+ 	if (t->det_reserved_zero1 ||
+-	    le16_to_cpu(t->det_rec_len) != sizeof(struct ext4_dir_entry_tail) ||
++	    (ext4_rec_len_from_disk(t->det_rec_len, blocksize) !=
++	     sizeof(struct ext4_dir_entry_tail)) ||
+ 	    t->det_reserved_zero2 ||
+ 	    t->det_reserved_ft != EXT4_FT_DIR_CSUM)
+ 		return NULL;
+@@ -445,13 +446,14 @@ static struct dx_countlimit *get_dx_countlimit(struct inode *inode,
+ 	struct ext4_dir_entry *dp;
+ 	struct dx_root_info *root;
+ 	int count_offset;
++	int blocksize = EXT4_BLOCK_SIZE(inode->i_sb);
++	unsigned int rlen = ext4_rec_len_from_disk(dirent->rec_len, blocksize);
+ 
+-	if (le16_to_cpu(dirent->rec_len) == EXT4_BLOCK_SIZE(inode->i_sb))
++	if (rlen == blocksize)
+ 		count_offset = 8;
+-	else if (le16_to_cpu(dirent->rec_len) == 12) {
++	else if (rlen == 12) {
+ 		dp = (struct ext4_dir_entry *)(((void *)dirent) + 12);
+-		if (le16_to_cpu(dp->rec_len) !=
+-		    EXT4_BLOCK_SIZE(inode->i_sb) - 12)
++		if (ext4_rec_len_from_disk(dp->rec_len, blocksize) != blocksize - 12)
+ 			return NULL;
+ 		root = (struct dx_root_info *)(((void *)dp + 12));
+ 		if (root->reserved_zero ||
+@@ -1315,6 +1317,7 @@ static int dx_make_map(struct inode *dir, struct buffer_head *bh,
+ 	unsigned int buflen = bh->b_size;
+ 	char *base = bh->b_data;
+ 	struct dx_hash_info h = *hinfo;
++	int blocksize = EXT4_BLOCK_SIZE(dir->i_sb);
+ 
+ 	if (ext4_has_metadata_csum(dir->i_sb))
+ 		buflen -= sizeof(struct ext4_dir_entry_tail);
+@@ -1335,11 +1338,12 @@ static int dx_make_map(struct inode *dir, struct buffer_head *bh,
+ 			map_tail--;
+ 			map_tail->hash = h.hash;
+ 			map_tail->offs = ((char *) de - base)>>2;
+-			map_tail->size = le16_to_cpu(de->rec_len);
++			map_tail->size = ext4_rec_len_from_disk(de->rec_len,
++								blocksize);
+ 			count++;
+ 			cond_resched();
+ 		}
+-		de = ext4_next_entry(de, dir->i_sb->s_blocksize);
++		de = ext4_next_entry(de, blocksize);
+ 	}
+ 	return count;
+ }
+diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
+index 7d2f70708f37d..794fda5bd9bc6 100644
+--- a/fs/iomap/buffered-io.c
++++ b/fs/iomap/buffered-io.c
+@@ -927,7 +927,7 @@ static int iomap_write_delalloc_scan(struct inode *inode,
+ 			 * the end of this data range, not the end of the folio.
+ 			 */
+ 			*punch_start_byte = min_t(loff_t, end_byte,
+-					folio_next_index(folio) << PAGE_SHIFT);
++					folio_pos(folio) + folio_size(folio));
+ 		}
+ 
+ 		/* move offset to start of next folio in range */
+diff --git a/fs/jfs/jfs_dmap.c b/fs/jfs/jfs_dmap.c
+index a14a0f18a4c40..88afd108c2dd2 100644
+--- a/fs/jfs/jfs_dmap.c
++++ b/fs/jfs/jfs_dmap.c
+@@ -269,6 +269,7 @@ int dbUnmount(struct inode *ipbmap, int mounterror)
+ 
+ 	/* free the memory for the in-memory bmap. */
+ 	kfree(bmp);
++	JFS_SBI(ipbmap->i_sb)->bmap = NULL;
+ 
+ 	return (0);
+ }
+diff --git a/fs/jfs/jfs_imap.c b/fs/jfs/jfs_imap.c
+index 390cbfce391fc..6fb28572cb2c6 100644
+--- a/fs/jfs/jfs_imap.c
++++ b/fs/jfs/jfs_imap.c
+@@ -193,6 +193,7 @@ int diUnmount(struct inode *ipimap, int mounterror)
+ 	 * free in-memory control structure
+ 	 */
+ 	kfree(imap);
++	JFS_IP(ipimap)->i_imap = NULL;
+ 
+ 	return (0);
+ }
+diff --git a/fs/locks.c b/fs/locks.c
+index df8b26a425248..a552bdb6badc0 100644
+--- a/fs/locks.c
++++ b/fs/locks.c
+@@ -1301,6 +1301,7 @@ retry:
+  out:
+ 	spin_unlock(&ctx->flc_lock);
+ 	percpu_up_read(&file_rwsem);
++	trace_posix_lock_inode(inode, request, error);
+ 	/*
+ 	 * Free any unused locks.
+ 	 */
+@@ -1309,7 +1310,6 @@ retry:
+ 	if (new_fl2)
+ 		locks_free_lock(new_fl2);
+ 	locks_dispose_list(&dispose);
+-	trace_posix_lock_inode(inode, request, error);
+ 
+ 	return error;
+ }
+diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
+index 5ae670807449b..7fe97c95938b8 100644
+--- a/fs/nfsd/nfs4proc.c
++++ b/fs/nfsd/nfs4proc.c
+@@ -1024,8 +1024,8 @@ nfsd4_rename(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+ 			     rename->rn_tname, rename->rn_tnamelen);
+ 	if (status)
+ 		return status;
+-	set_change_info(&rename->rn_sinfo, &cstate->current_fh);
+-	set_change_info(&rename->rn_tinfo, &cstate->save_fh);
++	set_change_info(&rename->rn_sinfo, &cstate->save_fh);
++	set_change_info(&rename->rn_tinfo, &cstate->current_fh);
+ 	return nfs_ok;
+ }
+ 
+diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c
+index 568f743a5584a..986d37a4c2750 100644
+--- a/fs/overlayfs/copy_up.c
++++ b/fs/overlayfs/copy_up.c
+@@ -617,7 +617,8 @@ static int ovl_copy_up_metadata(struct ovl_copy_up_ctx *c, struct dentry *temp)
+ 	if (err)
+ 		return err;
+ 
+-	if (inode->i_flags & OVL_COPY_I_FLAGS_MASK) {
++	if (inode->i_flags & OVL_COPY_I_FLAGS_MASK &&
++	    (S_ISREG(c->stat.mode) || S_ISDIR(c->stat.mode))) {
+ 		/*
+ 		 * Copy the fileattr inode flags that are the source of already
+ 		 * copied i_flags
+diff --git a/fs/overlayfs/file.c b/fs/overlayfs/file.c
+index 21245b00722af..1d80583d72eda 100644
+--- a/fs/overlayfs/file.c
++++ b/fs/overlayfs/file.c
+@@ -19,7 +19,6 @@ struct ovl_aio_req {
+ 	struct kiocb iocb;
+ 	refcount_t ref;
+ 	struct kiocb *orig_iocb;
+-	struct fd fd;
+ };
+ 
+ static struct kmem_cache *ovl_aio_request_cachep;
+@@ -277,7 +276,7 @@ static rwf_t ovl_iocb_to_rwf(int ifl)
+ static inline void ovl_aio_put(struct ovl_aio_req *aio_req)
+ {
+ 	if (refcount_dec_and_test(&aio_req->ref)) {
+-		fdput(aio_req->fd);
++		fput(aio_req->iocb.ki_filp);
+ 		kmem_cache_free(ovl_aio_request_cachep, aio_req);
+ 	}
+ }
+@@ -342,10 +341,9 @@ static ssize_t ovl_read_iter(struct kiocb *iocb, struct iov_iter *iter)
+ 		if (!aio_req)
+ 			goto out;
+ 
+-		aio_req->fd = real;
+ 		real.flags = 0;
+ 		aio_req->orig_iocb = iocb;
+-		kiocb_clone(&aio_req->iocb, iocb, real.file);
++		kiocb_clone(&aio_req->iocb, iocb, get_file(real.file));
+ 		aio_req->iocb.ki_complete = ovl_aio_rw_complete;
+ 		refcount_set(&aio_req->ref, 2);
+ 		ret = vfs_iocb_iter_read(real.file, &aio_req->iocb, iter);
+@@ -413,10 +411,9 @@ static ssize_t ovl_write_iter(struct kiocb *iocb, struct iov_iter *iter)
+ 		/* Pacify lockdep, same trick as done in aio_write() */
+ 		__sb_writers_release(file_inode(real.file)->i_sb,
+ 				     SB_FREEZE_WRITE);
+-		aio_req->fd = real;
+ 		real.flags = 0;
+ 		aio_req->orig_iocb = iocb;
+-		kiocb_clone(&aio_req->iocb, iocb, real.file);
++		kiocb_clone(&aio_req->iocb, iocb, get_file(real.file));
+ 		aio_req->iocb.ki_flags = ifl;
+ 		aio_req->iocb.ki_complete = ovl_aio_rw_complete;
+ 		refcount_set(&aio_req->ref, 2);
+diff --git a/fs/tracefs/inode.c b/fs/tracefs/inode.c
+index 57ac8aa4a724d..65e8bfa483b19 100644
+--- a/fs/tracefs/inode.c
++++ b/fs/tracefs/inode.c
+@@ -556,6 +556,9 @@ static struct dentry *__create_dir(const char *name, struct dentry *parent,
+  */
+ struct dentry *tracefs_create_dir(const char *name, struct dentry *parent)
+ {
++	if (security_locked_down(LOCKDOWN_TRACEFS))
++		return NULL;
++
+ 	return __create_dir(name, parent, &simple_dir_inode_operations);
+ }
+ 
+diff --git a/include/drm/drm_kunit_helpers.h b/include/drm/drm_kunit_helpers.h
+index ed013fdcc1ffb..f0182ad3eacf7 100644
+--- a/include/drm/drm_kunit_helpers.h
++++ b/include/drm/drm_kunit_helpers.h
+@@ -3,6 +3,8 @@
+ #ifndef DRM_KUNIT_HELPERS_H_
+ #define DRM_KUNIT_HELPERS_H_
+ 
++#include <linux/device.h>
++
+ #include <kunit/test.h>
+ 
+ struct drm_device;
+@@ -51,7 +53,7 @@ __drm_kunit_helper_alloc_drm_device(struct kunit *test,
+ {
+ 	struct drm_driver *driver;
+ 
+-	driver = kunit_kzalloc(test, sizeof(*driver), GFP_KERNEL);
++	driver = devm_kzalloc(dev, sizeof(*driver), GFP_KERNEL);
+ 	KUNIT_ASSERT_NOT_NULL(test, driver);
+ 
+ 	driver->driver_features = features;
+diff --git a/include/linux/acpi_iort.h b/include/linux/acpi_iort.h
+index ee7cb6aaff718..1cb65592c95dd 100644
+--- a/include/linux/acpi_iort.h
++++ b/include/linux/acpi_iort.h
+@@ -21,6 +21,7 @@
+  */
+ #define IORT_SMMU_V3_PMCG_GENERIC        0x00000000 /* Generic SMMUv3 PMCG */
+ #define IORT_SMMU_V3_PMCG_HISI_HIP08     0x00000001 /* HiSilicon HIP08 PMCG */
++#define IORT_SMMU_V3_PMCG_HISI_HIP09     0x00000002 /* HiSilicon HIP09 PMCG */
+ 
+ int iort_register_domain_token(int trans_id, phys_addr_t base,
+ 			       struct fwnode_handle *fw_node);
+diff --git a/include/linux/bpf.h b/include/linux/bpf.h
+index f316affcd2e13..28e2e0ce2ed07 100644
+--- a/include/linux/bpf.h
++++ b/include/linux/bpf.h
+@@ -640,7 +640,8 @@ enum bpf_type_flag {
+ 	MEM_RCU			= BIT(13 + BPF_BASE_TYPE_BITS),
+ 
+ 	/* Used to tag PTR_TO_BTF_ID | MEM_ALLOC references which are non-owning.
+-	 * Currently only valid for linked-list and rbtree nodes.
++	 * Currently only valid for linked-list and rbtree nodes. If the nodes
++	 * have a bpf_refcount_field, they must be tagged MEM_RCU as well.
+ 	 */
+ 	NON_OWN_REF		= BIT(14 + BPF_BASE_TYPE_BITS),
+ 
+diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
+index f70f9ac884d24..b6e58dab8e275 100644
+--- a/include/linux/bpf_verifier.h
++++ b/include/linux/bpf_verifier.h
+@@ -745,7 +745,7 @@ static inline bool bpf_prog_check_recur(const struct bpf_prog *prog)
+ 	}
+ }
+ 
+-#define BPF_REG_TRUSTED_MODIFIERS (MEM_ALLOC | PTR_TRUSTED)
++#define BPF_REG_TRUSTED_MODIFIERS (MEM_ALLOC | PTR_TRUSTED | NON_OWN_REF)
+ 
+ static inline bool bpf_type_has_unsafe_modifiers(u32 type)
+ {
+diff --git a/include/linux/instruction_pointer.h b/include/linux/instruction_pointer.h
+index cda1f706eaeb1..aa0b3ffea9353 100644
+--- a/include/linux/instruction_pointer.h
++++ b/include/linux/instruction_pointer.h
+@@ -2,7 +2,12 @@
+ #ifndef _LINUX_INSTRUCTION_POINTER_H
+ #define _LINUX_INSTRUCTION_POINTER_H
+ 
++#include <asm/linkage.h>
++
+ #define _RET_IP_		(unsigned long)__builtin_return_address(0)
++
++#ifndef _THIS_IP_
+ #define _THIS_IP_  ({ __label__ __here; __here: (unsigned long)&&__here; })
++#endif
+ 
+ #endif /* _LINUX_INSTRUCTION_POINTER_H */
+diff --git a/include/linux/libata.h b/include/linux/libata.h
+index 820f7a3a2749b..f5bb4415c5e2d 100644
+--- a/include/linux/libata.h
++++ b/include/linux/libata.h
+@@ -222,6 +222,10 @@ enum {
+ 	ATA_HOST_PARALLEL_SCAN	= (1 << 2),	/* Ports on this host can be scanned in parallel */
+ 	ATA_HOST_IGNORE_ATA	= (1 << 3),	/* Ignore ATA devices on this host. */
+ 
++	ATA_HOST_NO_PART	= (1 << 4), /* Host does not support partial */
++	ATA_HOST_NO_SSC		= (1 << 5), /* Host does not support slumber */
++	ATA_HOST_NO_DEVSLP	= (1 << 6), /* Host does not support devslp */
++
+ 	/* bits 24:31 of host->flags are reserved for LLD specific flags */
+ 
+ 	/* various lengths of time */
+diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
+index 2dc75df1437fb..8f9a459e16718 100644
+--- a/include/linux/pci_ids.h
++++ b/include/linux/pci_ids.h
+@@ -576,6 +576,8 @@
+ #define PCI_DEVICE_ID_AMD_19H_M60H_DF_F3 0x14e3
+ #define PCI_DEVICE_ID_AMD_19H_M70H_DF_F3 0x14f3
+ #define PCI_DEVICE_ID_AMD_19H_M78H_DF_F3 0x12fb
++#define PCI_DEVICE_ID_AMD_1AH_M00H_DF_F3 0x12c3
++#define PCI_DEVICE_ID_AMD_1AH_M20H_DF_F3 0x16fb
+ #define PCI_DEVICE_ID_AMD_MI200_DF_F3	0x14d3
+ #define PCI_DEVICE_ID_AMD_CNB17H_F3	0x1703
+ #define PCI_DEVICE_ID_AMD_LANCE		0x2000
+diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
+index 2166a69e3bf2e..e657916c9509c 100644
+--- a/include/linux/perf_event.h
++++ b/include/linux/perf_event.h
+@@ -1316,15 +1316,31 @@ extern int perf_event_output(struct perf_event *event,
+ 			     struct pt_regs *regs);
+ 
+ static inline bool
+-is_default_overflow_handler(struct perf_event *event)
++__is_default_overflow_handler(perf_overflow_handler_t overflow_handler)
+ {
+-	if (likely(event->overflow_handler == perf_event_output_forward))
++	if (likely(overflow_handler == perf_event_output_forward))
+ 		return true;
+-	if (unlikely(event->overflow_handler == perf_event_output_backward))
++	if (unlikely(overflow_handler == perf_event_output_backward))
+ 		return true;
+ 	return false;
+ }
+ 
++#define is_default_overflow_handler(event) \
++	__is_default_overflow_handler((event)->overflow_handler)
++
++#ifdef CONFIG_BPF_SYSCALL
++static inline bool uses_default_overflow_handler(struct perf_event *event)
++{
++	if (likely(is_default_overflow_handler(event)))
++		return true;
++
++	return __is_default_overflow_handler(event->orig_overflow_handler);
++}
++#else
++#define uses_default_overflow_handler(event) \
++	is_default_overflow_handler(event)
++#endif
++
+ extern void
+ perf_event_header__init_id(struct perf_event_header *header,
+ 			   struct perf_sample_data *data,
+diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h
+index dd35ce28bb908..6b687c155fb6c 100644
+--- a/include/linux/sched/task.h
++++ b/include/linux/sched/task.h
+@@ -118,10 +118,36 @@ static inline struct task_struct *get_task_struct(struct task_struct *t)
+ }
+ 
+ extern void __put_task_struct(struct task_struct *t);
++extern void __put_task_struct_rcu_cb(struct rcu_head *rhp);
+ 
+ static inline void put_task_struct(struct task_struct *t)
+ {
+-	if (refcount_dec_and_test(&t->usage))
++	if (!refcount_dec_and_test(&t->usage))
++		return;
++
++	/*
++	 * under PREEMPT_RT, we can't call put_task_struct
++	 * in atomic context because it will indirectly
++	 * acquire sleeping locks.
++	 *
++	 * call_rcu() will schedule delayed_put_task_struct_rcu()
++	 * to be called in process context.
++	 *
++	 * __put_task_struct() is called when
++	 * refcount_dec_and_test(&t->usage) succeeds.
++	 *
++	 * This means that it can't "conflict" with
++	 * put_task_struct_rcu_user() which abuses ->rcu the same
++	 * way; rcu_users has a reference so task->usage can't be
++	 * zero after rcu_users 1 -> 0 transition.
++	 *
++	 * delayed_free_task() also uses ->rcu, but it is only called
++	 * when it fails to fork a process. Therefore, there is no
++	 * way it can conflict with put_task_struct().
++	 */
++	if (IS_ENABLED(CONFIG_PREEMPT_RT) && !preemptible())
++		call_rcu(&t->rcu, __put_task_struct_rcu_cb);
++	else
+ 		__put_task_struct(t);
+ }
+ 
+diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h
+index f99d798093ab3..faa579209a724 100644
+--- a/include/linux/trace_events.h
++++ b/include/linux/trace_events.h
+@@ -62,13 +62,13 @@ void trace_event_printf(struct trace_iterator *iter, const char *fmt, ...);
+ /* Used to find the offset and length of dynamic fields in trace events */
+ struct trace_dynamic_info {
+ #ifdef CONFIG_CPU_BIG_ENDIAN
+-	u16	offset;
+ 	u16	len;
++	u16	offset;
+ #else
+-	u16	len;
+ 	u16	offset;
++	u16	len;
+ #endif
+-};
++} __packed;
+ 
+ /*
+  * The trace entry - the most basic unit of tracing. This is what
+diff --git a/include/linux/usb/chipidea.h b/include/linux/usb/chipidea.h
+index ee38835ed77cc..0b4f2d5faa080 100644
+--- a/include/linux/usb/chipidea.h
++++ b/include/linux/usb/chipidea.h
+@@ -63,6 +63,7 @@ struct ci_hdrc_platform_data {
+ #define CI_HDRC_IMX_IS_HSIC		BIT(14)
+ #define CI_HDRC_PMQOS			BIT(15)
+ #define CI_HDRC_PHY_VBUS_CONTROL	BIT(16)
++#define CI_HDRC_HAS_PORTSC_PEC_MISSED	BIT(17)
+ 	enum usb_dr_mode	dr_mode;
+ #define CI_HDRC_CONTROLLER_RESET_EVENT		0
+ #define CI_HDRC_CONTROLLER_STOPPED_EVENT	1
+diff --git a/include/uapi/linux/netfilter_bridge/ebtables.h b/include/uapi/linux/netfilter_bridge/ebtables.h
+index a494cf43a7552..b0caad82b6937 100644
+--- a/include/uapi/linux/netfilter_bridge/ebtables.h
++++ b/include/uapi/linux/netfilter_bridge/ebtables.h
+@@ -182,12 +182,14 @@ struct ebt_entry {
+ 	unsigned char sourcemsk[ETH_ALEN];
+ 	unsigned char destmac[ETH_ALEN];
+ 	unsigned char destmsk[ETH_ALEN];
+-	/* sizeof ebt_entry + matches */
+-	unsigned int watchers_offset;
+-	/* sizeof ebt_entry + matches + watchers */
+-	unsigned int target_offset;
+-	/* sizeof ebt_entry + matches + watchers + target */
+-	unsigned int next_offset;
++	__struct_group(/* no tag */, offsets, /* no attrs */,
++		/* sizeof ebt_entry + matches */
++		unsigned int watchers_offset;
++		/* sizeof ebt_entry + matches + watchers */
++		unsigned int target_offset;
++		/* sizeof ebt_entry + matches + watchers + target */
++		unsigned int next_offset;
++	);
+ 	unsigned char elems[0] __attribute__ ((aligned (__alignof__(struct ebt_replace))));
+ };
+ 
+diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
+index 4e9217c1eb2e0..a1562f2cf3f3c 100644
+--- a/io_uring/io_uring.c
++++ b/io_uring/io_uring.c
+@@ -4628,8 +4628,20 @@ static int __init io_uring_init(void)
+ 
+ 	io_uring_optable_init();
+ 
+-	req_cachep = KMEM_CACHE(io_kiocb, SLAB_HWCACHE_ALIGN | SLAB_PANIC |
+-				SLAB_ACCOUNT | SLAB_TYPESAFE_BY_RCU);
++	/*
++	 * Allow user copy in the per-command field, which starts after the
++	 * file in io_kiocb and until the opcode field. The openat2 handling
++	 * requires copying in user memory into the io_kiocb object in that
++	 * range, and HARDENED_USERCOPY will complain if we haven't
++	 * correctly annotated this range.
++	 */
++	req_cachep = kmem_cache_create_usercopy("io_kiocb",
++				sizeof(struct io_kiocb), 0,
++				SLAB_HWCACHE_ALIGN | SLAB_PANIC |
++				SLAB_ACCOUNT | SLAB_TYPESAFE_BY_RCU,
++				offsetof(struct io_kiocb, cmd.data),
++				sizeof_field(struct io_kiocb, cmd.data), NULL);
++
+ 	return 0;
+ };
+ __initcall(io_uring_init);
+diff --git a/io_uring/net.c b/io_uring/net.c
+index 8c419c01a5dba..47ecb070194ba 100644
+--- a/io_uring/net.c
++++ b/io_uring/net.c
+@@ -183,6 +183,10 @@ static int io_setup_async_msg(struct io_kiocb *req,
+ 	memcpy(async_msg, kmsg, sizeof(*kmsg));
+ 	if (async_msg->msg.msg_name)
+ 		async_msg->msg.msg_name = &async_msg->addr;
++
++	if ((req->flags & REQ_F_BUFFER_SELECT) && !async_msg->msg.msg_iter.nr_segs)
++		return -EAGAIN;
++
+ 	/* if were using fast_iov, set it to the new one */
+ 	if (iter_is_iovec(&kmsg->msg.msg_iter) && !kmsg->free_iov) {
+ 		size_t fast_idx = iter_iov(&kmsg->msg.msg_iter) - kmsg->fast_iov;
+@@ -542,6 +546,7 @@ static int io_recvmsg_copy_hdr(struct io_kiocb *req,
+ 			       struct io_async_msghdr *iomsg)
+ {
+ 	iomsg->msg.msg_name = &iomsg->addr;
++	iomsg->msg.msg_iter.nr_segs = 0;
+ 
+ #ifdef CONFIG_COMPAT
+ 	if (req->ctx->compat)
+diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
+index 76845dd22cd26..9cdba4ce23d2b 100644
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -7841,6 +7841,7 @@ int check_func_arg_reg_off(struct bpf_verifier_env *env,
+ 	case PTR_TO_BTF_ID | PTR_TRUSTED:
+ 	case PTR_TO_BTF_ID | MEM_RCU:
+ 	case PTR_TO_BTF_ID | MEM_ALLOC | NON_OWN_REF:
++	case PTR_TO_BTF_ID | MEM_ALLOC | NON_OWN_REF | MEM_RCU:
+ 		/* When referenced PTR_TO_BTF_ID is passed to release function,
+ 		 * its fixed offset must be 0. In the other cases, fixed offset
+ 		 * can be non-zero. This was already checked above. So pass
+@@ -10302,6 +10303,7 @@ static int process_kf_arg_ptr_to_btf_id(struct bpf_verifier_env *env,
+ static int ref_set_non_owning(struct bpf_verifier_env *env, struct bpf_reg_state *reg)
+ {
+ 	struct bpf_verifier_state *state = env->cur_state;
++	struct btf_record *rec = reg_btf_record(reg);
+ 
+ 	if (!state->active_lock.ptr) {
+ 		verbose(env, "verifier internal error: ref_set_non_owning w/o active lock\n");
+@@ -10314,6 +10316,9 @@ static int ref_set_non_owning(struct bpf_verifier_env *env, struct bpf_reg_state
+ 	}
+ 
+ 	reg->type |= NON_OWN_REF;
++	if (rec->refcount_off >= 0)
++		reg->type |= MEM_RCU;
++
+ 	return 0;
+ }
+ 
+@@ -11154,6 +11159,11 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
+ 		struct bpf_func_state *state;
+ 		struct bpf_reg_state *reg;
+ 
++		if (in_rbtree_lock_required_cb(env) && (rcu_lock || rcu_unlock)) {
++			verbose(env, "Calling bpf_rcu_read_{lock,unlock} in unnecessary rbtree callback\n");
++			return -EACCES;
++		}
++
+ 		if (rcu_lock) {
+ 			verbose(env, "nested rcu read lock (kernel function %s)\n", func_name);
+ 			return -EINVAL;
+@@ -16453,7 +16463,8 @@ static int do_check(struct bpf_verifier_env *env)
+ 					return -EINVAL;
+ 				}
+ 
+-				if (env->cur_state->active_rcu_lock) {
++				if (env->cur_state->active_rcu_lock &&
++				    !in_rbtree_lock_required_cb(env)) {
+ 					verbose(env, "bpf_rcu_read_unlock is missing\n");
+ 					return -EINVAL;
+ 				}
+diff --git a/kernel/fork.c b/kernel/fork.c
+index d2e12b6d2b180..f81149739eb9f 100644
+--- a/kernel/fork.c
++++ b/kernel/fork.c
+@@ -985,6 +985,14 @@ void __put_task_struct(struct task_struct *tsk)
+ }
+ EXPORT_SYMBOL_GPL(__put_task_struct);
+ 
++void __put_task_struct_rcu_cb(struct rcu_head *rhp)
++{
++	struct task_struct *task = container_of(rhp, struct task_struct, rcu);
++
++	__put_task_struct(task);
++}
++EXPORT_SYMBOL_GPL(__put_task_struct_rcu_cb);
++
+ void __init __weak arch_task_cache_init(void) { }
+ 
+ /*
+diff --git a/kernel/panic.c b/kernel/panic.c
+index 10effe40a3fa6..ea1c5fcb2d191 100644
+--- a/kernel/panic.c
++++ b/kernel/panic.c
+@@ -697,6 +697,7 @@ void warn_slowpath_fmt(const char *file, int line, unsigned taint,
+ 	if (!fmt) {
+ 		__warn(file, line, __builtin_return_address(0), taint,
+ 		       NULL, NULL);
++		warn_rcu_exit(rcu);
+ 		return;
+ 	}
+ 
+diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
+index 2b4a946a6ff5c..8d35b9f9aaa3f 100644
+--- a/kernel/power/hibernate.c
++++ b/kernel/power/hibernate.c
+@@ -786,9 +786,9 @@ int hibernate(void)
+ 	unlock_device_hotplug();
+ 	if (snapshot_test) {
+ 		pm_pr_dbg("Checking hibernation image\n");
+-		error = swsusp_check(snapshot_test);
++		error = swsusp_check(false);
+ 		if (!error)
+-			error = load_image_and_restore(snapshot_test);
++			error = load_image_and_restore(false);
+ 	}
+ 	thaw_processes();
+ 
+@@ -945,14 +945,14 @@ static int software_resume(void)
+ 	pm_pr_dbg("Looking for hibernation image.\n");
+ 
+ 	mutex_lock(&system_transition_mutex);
+-	error = swsusp_check(false);
++	error = swsusp_check(true);
+ 	if (error)
+ 		goto Unlock;
+ 
+ 	/* The snapshot device should not be opened while we're running */
+ 	if (!hibernate_acquire()) {
+ 		error = -EBUSY;
+-		swsusp_close(false);
++		swsusp_close(true);
+ 		goto Unlock;
+ 	}
+ 
+@@ -973,7 +973,7 @@ static int software_resume(void)
+ 		goto Close_Finish;
+ 	}
+ 
+-	error = load_image_and_restore(false);
++	error = load_image_and_restore(true);
+ 	thaw_processes();
+  Finish:
+ 	pm_notifier_call_chain(PM_POST_RESTORE);
+@@ -987,7 +987,7 @@ static int software_resume(void)
+ 	pm_pr_dbg("Hibernation image not present or could not be loaded.\n");
+ 	return error;
+  Close_Finish:
+-	swsusp_close(false);
++	swsusp_close(true);
+ 	goto Finish;
+ }
+ 
+diff --git a/kernel/printk/internal.h b/kernel/printk/internal.h
+index 2a17704136f1d..7d4979d5c3ce6 100644
+--- a/kernel/printk/internal.h
++++ b/kernel/printk/internal.h
+@@ -103,3 +103,5 @@ struct printk_message {
+ 	u64			seq;
+ 	unsigned long		dropped;
+ };
++
++bool other_cpu_in_panic(void);
+diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
+index 357a4d18f6387..08a9419046b65 100644
+--- a/kernel/printk/printk.c
++++ b/kernel/printk/printk.c
+@@ -2308,7 +2308,11 @@ asmlinkage int vprintk_emit(int facility, int level,
+ 		preempt_enable();
+ 	}
+ 
+-	wake_up_klogd();
++	if (in_sched)
++		defer_console_output();
++	else
++		wake_up_klogd();
++
+ 	return printed_len;
+ }
+ EXPORT_SYMBOL(vprintk_emit);
+@@ -2585,6 +2589,26 @@ static int console_cpu_notify(unsigned int cpu)
+ 	return 0;
+ }
+ 
++/*
++ * Return true if a panic is in progress on a remote CPU.
++ *
++ * On true, the local CPU should immediately release any printing resources
++ * that may be needed by the panic CPU.
++ */
++bool other_cpu_in_panic(void)
++{
++	if (!panic_in_progress())
++		return false;
++
++	/*
++	 * We can use raw_smp_processor_id() here because it is impossible for
++	 * the task to be migrated to the panic_cpu, or away from it. If
++	 * panic_cpu has already been set, and we're not currently executing on
++	 * that CPU, then we never will be.
++	 */
++	return atomic_read(&panic_cpu) != raw_smp_processor_id();
++}
++
+ /**
+  * console_lock - block the console subsystem from printing
+  *
+@@ -2597,6 +2621,10 @@ void console_lock(void)
+ {
+ 	might_sleep();
+ 
++	/* On panic, the console_lock must be left to the panic cpu. */
++	while (other_cpu_in_panic())
++		msleep(1000);
++
+ 	down_console_sem();
+ 	if (console_suspended)
+ 		return;
+@@ -2615,6 +2643,9 @@ EXPORT_SYMBOL(console_lock);
+  */
+ int console_trylock(void)
+ {
++	/* On panic, the console_lock must be left to the panic cpu. */
++	if (other_cpu_in_panic())
++		return 0;
+ 	if (down_trylock_console_sem())
+ 		return 0;
+ 	if (console_suspended) {
+@@ -2633,25 +2664,6 @@ int is_console_locked(void)
+ }
+ EXPORT_SYMBOL(is_console_locked);
+ 
+-/*
+- * Return true when this CPU should unlock console_sem without pushing all
+- * messages to the console. This reduces the chance that the console is
+- * locked when the panic CPU tries to use it.
+- */
+-static bool abandon_console_lock_in_panic(void)
+-{
+-	if (!panic_in_progress())
+-		return false;
+-
+-	/*
+-	 * We can use raw_smp_processor_id() here because it is impossible for
+-	 * the task to be migrated to the panic_cpu, or away from it. If
+-	 * panic_cpu has already been set, and we're not currently executing on
+-	 * that CPU, then we never will be.
+-	 */
+-	return atomic_read(&panic_cpu) != raw_smp_processor_id();
+-}
+-
+ /*
+  * Check if the given console is currently capable and allowed to print
+  * records.
+@@ -2948,7 +2960,7 @@ static bool console_flush_all(bool do_cond_resched, u64 *next_seq, bool *handove
+ 			any_progress = true;
+ 
+ 			/* Allow panic_cpu to take over the consoles safely. */
+-			if (abandon_console_lock_in_panic())
++			if (other_cpu_in_panic())
+ 				goto abandon;
+ 
+ 			if (do_cond_resched)
+@@ -3045,9 +3057,27 @@ EXPORT_SYMBOL(console_conditional_schedule);
+ 
+ void console_unblank(void)
+ {
++	bool found_unblank = false;
+ 	struct console *c;
+ 	int cookie;
+ 
++	/*
++	 * First check if there are any consoles implementing the unblank()
++	 * callback. If not, there is no reason to continue and take the
++	 * console lock, which in particular can be dangerous if
++	 * @oops_in_progress is set.
++	 */
++	cookie = console_srcu_read_lock();
++	for_each_console_srcu(c) {
++		if ((console_srcu_read_flags(c) & CON_ENABLED) && c->unblank) {
++			found_unblank = true;
++			break;
++		}
++	}
++	console_srcu_read_unlock(cookie);
++	if (!found_unblank)
++		return;
++
+ 	/*
+ 	 * Stop console printing because the unblank() callback may
+ 	 * assume the console is not within its write() callback.
+@@ -3056,6 +3086,16 @@ void console_unblank(void)
+ 	 * In that case, attempt a trylock as best-effort.
+ 	 */
+ 	if (oops_in_progress) {
++		/* Semaphores are not NMI-safe. */
++		if (in_nmi())
++			return;
++
++		/*
++		 * Attempting to trylock the console lock can deadlock
++		 * if another CPU was stopped while modifying the
++		 * semaphore. "Hope and pray" that this is not the
++		 * current situation.
++		 */
+ 		if (down_trylock_console_sem() != 0)
+ 			return;
+ 	} else
+@@ -3085,14 +3125,24 @@ void console_unblank(void)
+  */
+ void console_flush_on_panic(enum con_flush_mode mode)
+ {
++	bool handover;
++	u64 next_seq;
++
++	/*
++	 * Ignore the console lock and flush out the messages. Attempting a
++	 * trylock would not be useful because:
++	 *
++	 *   - if it is contended, it must be ignored anyway
++	 *   - console_lock() and console_trylock() block and fail
++	 *     respectively in panic for non-panic CPUs
++	 *   - semaphores are not NMI-safe
++	 */
++
+ 	/*
+-	 * If someone else is holding the console lock, trylock will fail
+-	 * and may_schedule may be set.  Ignore and proceed to unlock so
+-	 * that messages are flushed out.  As this can be called from any
+-	 * context and we don't want to get preempted while flushing,
+-	 * ensure may_schedule is cleared.
++	 * If another context is holding the console lock,
++	 * @console_may_schedule might be set. Clear it so that
++	 * this context does not call cond_resched() while flushing.
+ 	 */
+-	console_trylock();
+ 	console_may_schedule = 0;
+ 
+ 	if (mode == CONSOLE_REPLAY_ALL) {
+@@ -3105,15 +3155,15 @@ void console_flush_on_panic(enum con_flush_mode mode)
+ 		cookie = console_srcu_read_lock();
+ 		for_each_console_srcu(c) {
+ 			/*
+-			 * If the above console_trylock() failed, this is an
+-			 * unsynchronized assignment. But in that case, the
++			 * This is an unsynchronized assignment, but the
+ 			 * kernel is in "hope and pray" mode anyway.
+ 			 */
+ 			c->seq = seq;
+ 		}
+ 		console_srcu_read_unlock(cookie);
+ 	}
+-	console_unlock();
++
++	console_flush_all(false, &next_seq, &handover);
+ }
+ 
+ /*
+@@ -3798,11 +3848,33 @@ static void __wake_up_klogd(int val)
+ 	preempt_enable();
+ }
+ 
++/**
++ * wake_up_klogd - Wake kernel logging daemon
++ *
++ * Use this function when new records have been added to the ringbuffer
++ * and the console printing of those records has already occurred or is
++ * known to be handled by some other context. This function will only
++ * wake the logging daemon.
++ *
++ * Context: Any context.
++ */
+ void wake_up_klogd(void)
+ {
+ 	__wake_up_klogd(PRINTK_PENDING_WAKEUP);
+ }
+ 
++/**
++ * defer_console_output - Wake kernel logging daemon and trigger
++ *	console printing in a deferred context
++ *
++ * Use this function when new records have been added to the ringbuffer,
++ * this context is responsible for console printing those records, but
++ * the current context is not allowed to perform the console printing.
++ * Trigger an irq_work context to perform the console printing. This
++ * function also wakes the logging daemon.
++ *
++ * Context: Any context.
++ */
+ void defer_console_output(void)
+ {
+ 	/*
+@@ -3819,12 +3891,7 @@ void printk_trigger_flush(void)
+ 
+ int vprintk_deferred(const char *fmt, va_list args)
+ {
+-	int r;
+-
+-	r = vprintk_emit(0, LOGLEVEL_SCHED, NULL, fmt, args);
+-	defer_console_output();
+-
+-	return r;
++	return vprintk_emit(0, LOGLEVEL_SCHED, NULL, fmt, args);
+ }
+ 
+ int _printk_deferred(const char *fmt, ...)
+diff --git a/kernel/printk/printk_safe.c b/kernel/printk/printk_safe.c
+index ef0f9a2044da1..6d10927a07d83 100644
+--- a/kernel/printk/printk_safe.c
++++ b/kernel/printk/printk_safe.c
+@@ -38,13 +38,8 @@ asmlinkage int vprintk(const char *fmt, va_list args)
+ 	 * Use the main logbuf even in NMI. But avoid calling console
+ 	 * drivers that might have their own locks.
+ 	 */
+-	if (this_cpu_read(printk_context) || in_nmi()) {
+-		int len;
+-
+-		len = vprintk_store(0, LOGLEVEL_DEFAULT, NULL, fmt, args);
+-		defer_console_output();
+-		return len;
+-	}
++	if (this_cpu_read(printk_context) || in_nmi())
++		return vprintk_deferred(fmt, args);
+ 
+ 	/* No obstacles. */
+ 	return vprintk_default(fmt, args);
+diff --git a/kernel/rcu/rcuscale.c b/kernel/rcu/rcuscale.c
+index d1221731c7cfd..35aab6cbba583 100644
+--- a/kernel/rcu/rcuscale.c
++++ b/kernel/rcu/rcuscale.c
+@@ -424,7 +424,7 @@ rcu_scale_writer(void *arg)
+ 	sched_set_fifo_low(current);
+ 
+ 	if (holdoff)
+-		schedule_timeout_uninterruptible(holdoff * HZ);
++		schedule_timeout_idle(holdoff * HZ);
+ 
+ 	/*
+ 	 * Wait until rcu_end_inkernel_boot() is called for normal GP tests
+diff --git a/kernel/scftorture.c b/kernel/scftorture.c
+index 5d113aa59e773..83c33ba0ca7e0 100644
+--- a/kernel/scftorture.c
++++ b/kernel/scftorture.c
+@@ -171,7 +171,8 @@ static void scf_torture_stats_print(void)
+ 		scfs.n_all_wait += scf_stats_p[i].n_all_wait;
+ 	}
+ 	if (atomic_read(&n_errs) || atomic_read(&n_mb_in_errs) ||
+-	    atomic_read(&n_mb_out_errs) || atomic_read(&n_alloc_errs))
++	    atomic_read(&n_mb_out_errs) ||
++	    (!IS_ENABLED(CONFIG_KASAN) && atomic_read(&n_alloc_errs)))
+ 		bangstr = "!!! ";
+ 	pr_alert("%s %sscf_invoked_count %s: %lld resched: %lld single: %lld/%lld single_ofl: %lld/%lld single_rpc: %lld single_rpc_ofl: %lld many: %lld/%lld all: %lld/%lld ",
+ 		 SCFTORT_FLAG, bangstr, isdone ? "VER" : "ver", invoked_count, scfs.n_resched,
+@@ -323,7 +324,8 @@ static void scftorture_invoke_one(struct scf_statistics *scfp, struct torture_ra
+ 		preempt_disable();
+ 	if (scfsp->scfs_prim == SCF_PRIM_SINGLE || scfsp->scfs_wait) {
+ 		scfcp = kmalloc(sizeof(*scfcp), GFP_ATOMIC);
+-		if (WARN_ON_ONCE(!scfcp)) {
++		if (!scfcp) {
++			WARN_ON_ONCE(!IS_ENABLED(CONFIG_KASAN));
+ 			atomic_inc(&n_alloc_errs);
+ 		} else {
+ 			scfcp->scfc_cpu = -1;
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index 745332d10b3e1..81c4dade3763e 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -1772,7 +1772,7 @@ static void trace_create_maxlat_file(struct trace_array *tr,
+ 	init_irq_work(&tr->fsnotify_irqwork, latency_fsnotify_workfn_irq);
+ 	tr->d_max_latency = trace_create_file("tracing_max_latency",
+ 					      TRACE_MODE_WRITE,
+-					      d_tracer, &tr->max_latency,
++					      d_tracer, tr,
+ 					      &tracing_max_lat_fops);
+ }
+ 
+@@ -1805,7 +1805,7 @@ void latency_fsnotify(struct trace_array *tr)
+ 
+ #define trace_create_maxlat_file(tr, d_tracer)				\
+ 	trace_create_file("tracing_max_latency", TRACE_MODE_WRITE,	\
+-			  d_tracer, &tr->max_latency, &tracing_max_lat_fops)
++			  d_tracer, tr, &tracing_max_lat_fops)
+ 
+ #endif
+ 
+@@ -4987,6 +4987,33 @@ int tracing_open_generic_tr(struct inode *inode, struct file *filp)
+ 	return 0;
+ }
+ 
++/*
++ * The private pointer of the inode is the trace_event_file.
++ * Update the tr ref count associated to it.
++ */
++int tracing_open_file_tr(struct inode *inode, struct file *filp)
++{
++	struct trace_event_file *file = inode->i_private;
++	int ret;
++
++	ret = tracing_check_open_get_tr(file->tr);
++	if (ret)
++		return ret;
++
++	filp->private_data = inode->i_private;
++
++	return 0;
++}
++
++int tracing_release_file_tr(struct inode *inode, struct file *filp)
++{
++	struct trace_event_file *file = inode->i_private;
++
++	trace_array_put(file->tr);
++
++	return 0;
++}
++
+ static int tracing_mark_open(struct inode *inode, struct file *filp)
+ {
+ 	stream_open(inode, filp);
+@@ -6706,14 +6733,18 @@ static ssize_t
+ tracing_max_lat_read(struct file *filp, char __user *ubuf,
+ 		     size_t cnt, loff_t *ppos)
+ {
+-	return tracing_nsecs_read(filp->private_data, ubuf, cnt, ppos);
++	struct trace_array *tr = filp->private_data;
++
++	return tracing_nsecs_read(&tr->max_latency, ubuf, cnt, ppos);
+ }
+ 
+ static ssize_t
+ tracing_max_lat_write(struct file *filp, const char __user *ubuf,
+ 		      size_t cnt, loff_t *ppos)
+ {
+-	return tracing_nsecs_write(filp->private_data, ubuf, cnt, ppos);
++	struct trace_array *tr = filp->private_data;
++
++	return tracing_nsecs_write(&tr->max_latency, ubuf, cnt, ppos);
+ }
+ 
+ #endif
+@@ -7770,18 +7801,20 @@ static const struct file_operations tracing_thresh_fops = {
+ 
+ #ifdef CONFIG_TRACER_MAX_TRACE
+ static const struct file_operations tracing_max_lat_fops = {
+-	.open		= tracing_open_generic,
++	.open		= tracing_open_generic_tr,
+ 	.read		= tracing_max_lat_read,
+ 	.write		= tracing_max_lat_write,
+ 	.llseek		= generic_file_llseek,
++	.release	= tracing_release_generic_tr,
+ };
+ #endif
+ 
+ static const struct file_operations set_tracer_fops = {
+-	.open		= tracing_open_generic,
++	.open		= tracing_open_generic_tr,
+ 	.read		= tracing_set_trace_read,
+ 	.write		= tracing_set_trace_write,
+ 	.llseek		= generic_file_llseek,
++	.release	= tracing_release_generic_tr,
+ };
+ 
+ static const struct file_operations tracing_pipe_fops = {
+@@ -8974,12 +9007,33 @@ trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
+ 	return cnt;
+ }
+ 
++static int tracing_open_options(struct inode *inode, struct file *filp)
++{
++	struct trace_option_dentry *topt = inode->i_private;
++	int ret;
++
++	ret = tracing_check_open_get_tr(topt->tr);
++	if (ret)
++		return ret;
++
++	filp->private_data = inode->i_private;
++	return 0;
++}
++
++static int tracing_release_options(struct inode *inode, struct file *file)
++{
++	struct trace_option_dentry *topt = file->private_data;
++
++	trace_array_put(topt->tr);
++	return 0;
++}
+ 
+ static const struct file_operations trace_options_fops = {
+-	.open = tracing_open_generic,
++	.open = tracing_open_options,
+ 	.read = trace_options_read,
+ 	.write = trace_options_write,
+ 	.llseek	= generic_file_llseek,
++	.release = tracing_release_options,
+ };
+ 
+ /*
+diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
+index 73eaec158473e..c98c3f42c3862 100644
+--- a/kernel/trace/trace.h
++++ b/kernel/trace/trace.h
+@@ -601,6 +601,8 @@ void tracing_reset_all_online_cpus(void);
+ void tracing_reset_all_online_cpus_unlocked(void);
+ int tracing_open_generic(struct inode *inode, struct file *filp);
+ int tracing_open_generic_tr(struct inode *inode, struct file *filp);
++int tracing_open_file_tr(struct inode *inode, struct file *filp);
++int tracing_release_file_tr(struct inode *inode, struct file *filp);
+ bool tracing_is_disabled(void);
+ bool tracer_tracing_is_on(struct trace_array *tr);
+ void tracer_tracing_on(struct trace_array *tr);
+diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
+index 578f1f7d49a61..0cf84a7449f5b 100644
+--- a/kernel/trace/trace_events.c
++++ b/kernel/trace/trace_events.c
+@@ -2103,9 +2103,10 @@ static const struct file_operations ftrace_set_event_notrace_pid_fops = {
+ };
+ 
+ static const struct file_operations ftrace_enable_fops = {
+-	.open = tracing_open_generic,
++	.open = tracing_open_file_tr,
+ 	.read = event_enable_read,
+ 	.write = event_enable_write,
++	.release = tracing_release_file_tr,
+ 	.llseek = default_llseek,
+ };
+ 
+@@ -2122,9 +2123,10 @@ static const struct file_operations ftrace_event_id_fops = {
+ };
+ 
+ static const struct file_operations ftrace_event_filter_fops = {
+-	.open = tracing_open_generic,
++	.open = tracing_open_file_tr,
+ 	.read = event_filter_read,
+ 	.write = event_filter_write,
++	.release = tracing_release_file_tr,
+ 	.llseek = default_llseek,
+ };
+ 
+diff --git a/kernel/trace/trace_events_inject.c b/kernel/trace/trace_events_inject.c
+index abe805d471eb8..8650562bdaa98 100644
+--- a/kernel/trace/trace_events_inject.c
++++ b/kernel/trace/trace_events_inject.c
+@@ -328,7 +328,8 @@ event_inject_read(struct file *file, char __user *buf, size_t size,
+ }
+ 
+ const struct file_operations event_inject_fops = {
+-	.open = tracing_open_generic,
++	.open = tracing_open_file_tr,
+ 	.read = event_inject_read,
+ 	.write = event_inject_write,
++	.release = tracing_release_file_tr,
+ };
+diff --git a/kernel/trace/trace_events_synth.c b/kernel/trace/trace_events_synth.c
+index 9897d0bfcab71..14cb275a0bab0 100644
+--- a/kernel/trace/trace_events_synth.c
++++ b/kernel/trace/trace_events_synth.c
+@@ -337,7 +337,7 @@ static void print_synth_event_num_val(struct trace_seq *s,
+ 		break;
+ 
+ 	default:
+-		trace_seq_printf(s, print_fmt, name, val, space);
++		trace_seq_printf(s, print_fmt, name, val->as_u64, space);
+ 		break;
+ 	}
+ }
+diff --git a/lib/kobject.c b/lib/kobject.c
+index 16d530f9c174b..4c7a990d6f120 100644
+--- a/lib/kobject.c
++++ b/lib/kobject.c
+@@ -854,6 +854,11 @@ int kset_register(struct kset *k)
+ 	if (!k)
+ 		return -EINVAL;
+ 
++	if (!k->kobj.ktype) {
++		pr_err("must have a ktype to be initialized properly!\n");
++		return -EINVAL;
++	}
++
+ 	kset_init(k);
+ 	err = kobject_add_internal(&k->kobj);
+ 	if (err) {
+diff --git a/lib/mpi/mpi-cmp.c b/lib/mpi/mpi-cmp.c
+index c4cfa3ff05818..0835b6213235e 100644
+--- a/lib/mpi/mpi-cmp.c
++++ b/lib/mpi/mpi-cmp.c
+@@ -25,8 +25,12 @@ int mpi_cmp_ui(MPI u, unsigned long v)
+ 	mpi_limb_t limb = v;
+ 
+ 	mpi_normalize(u);
+-	if (!u->nlimbs && !limb)
+-		return 0;
++	if (u->nlimbs == 0) {
++		if (v == 0)
++			return 0;
++		else
++			return -1;
++	}
+ 	if (u->sign)
+ 		return -1;
+ 	if (u->nlimbs > 1)
+diff --git a/mm/memcontrol.c b/mm/memcontrol.c
+index e79267c1eee01..4fe5a562d0bbc 100644
+--- a/mm/memcontrol.c
++++ b/mm/memcontrol.c
+@@ -3871,6 +3871,10 @@ static ssize_t mem_cgroup_write(struct kernfs_open_file *of,
+ 		case _MEMSWAP:
+ 			ret = mem_cgroup_resize_max(memcg, nr_pages, true);
+ 			break;
++		case _KMEM:
++			/* kmem.limit_in_bytes is deprecated. */
++			ret = -EOPNOTSUPP;
++			break;
+ 		case _TCP:
+ 			ret = memcg_update_tcp_max(memcg, nr_pages);
+ 			break;
+@@ -5081,6 +5085,12 @@ static struct cftype mem_cgroup_legacy_files[] = {
+ 		.seq_show = memcg_numa_stat_show,
+ 	},
+ #endif
++	{
++		.name = "kmem.limit_in_bytes",
++		.private = MEMFILE_PRIVATE(_KMEM, RES_LIMIT),
++		.write = mem_cgroup_write,
++		.read_u64 = mem_cgroup_read_u64,
++	},
+ 	{
+ 		.name = "kmem.usage_in_bytes",
+ 		.private = MEMFILE_PRIVATE(_KMEM, RES_USAGE),
+diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
+index 2c845c9a26be0..29ae9b254a34e 100644
+--- a/net/bluetooth/hci_core.c
++++ b/net/bluetooth/hci_core.c
+@@ -2436,6 +2436,9 @@ static int hci_suspend_notifier(struct notifier_block *nb, unsigned long action,
+ 	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
+ 		return NOTIFY_DONE;
+ 
++	/* To avoid a potential race with hci_unregister_dev. */
++	hci_dev_hold(hdev);
++
+ 	if (action == PM_SUSPEND_PREPARE)
+ 		ret = hci_suspend_dev(hdev);
+ 	else if (action == PM_POST_SUSPEND)
+@@ -2445,6 +2448,7 @@ static int hci_suspend_notifier(struct notifier_block *nb, unsigned long action,
+ 		bt_dev_err(hdev, "Suspend notifier action (%lu) failed: %d",
+ 			   action, ret);
+ 
++	hci_dev_put(hdev);
+ 	return NOTIFY_DONE;
+ }
+ 
+diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
+index 757ec46fc45a0..aa23479b20b2a 100644
+--- a/net/bridge/netfilter/ebtables.c
++++ b/net/bridge/netfilter/ebtables.c
+@@ -2115,8 +2115,7 @@ static int size_entry_mwt(const struct ebt_entry *entry, const unsigned char *ba
+ 		return ret;
+ 
+ 	offsets[0] = sizeof(struct ebt_entry); /* matches come first */
+-	memcpy(&offsets[1], &entry->watchers_offset,
+-			sizeof(offsets) - sizeof(offsets[0]));
++	memcpy(&offsets[1], &entry->offsets, sizeof(entry->offsets));
+ 
+ 	if (state->buf_kern_start) {
+ 		buf_start = state->buf_kern_start + state->buf_kern_offset;
+diff --git a/net/core/sock.c b/net/core/sock.c
+index 29c6cb030818b..eef27812013a4 100644
+--- a/net/core/sock.c
++++ b/net/core/sock.c
+@@ -1824,14 +1824,14 @@ int sk_getsockopt(struct sock *sk, int level, int optname,
+ 
+ 	case SO_PEERNAME:
+ 	{
+-		char address[128];
++		struct sockaddr_storage address;
+ 
+-		lv = sock->ops->getname(sock, (struct sockaddr *)address, 2);
++		lv = sock->ops->getname(sock, (struct sockaddr *)&address, 2);
+ 		if (lv < 0)
+ 			return -ENOTCONN;
+ 		if (lv < len)
+ 			return -EINVAL;
+-		if (copy_to_sockptr(optval, address, len))
++		if (copy_to_sockptr(optval, &address, len))
+ 			return -EFAULT;
+ 		goto lenout;
+ 	}
+diff --git a/net/devlink/leftover.c b/net/devlink/leftover.c
+index bfed7929a904f..5277eb3c7d0a1 100644
+--- a/net/devlink/leftover.c
++++ b/net/devlink/leftover.c
+@@ -3946,7 +3946,7 @@ static int devlink_param_get(struct devlink *devlink,
+ 			     const struct devlink_param *param,
+ 			     struct devlink_param_gset_ctx *ctx)
+ {
+-	if (!param->get || devlink->reload_failed)
++	if (!param->get)
+ 		return -EOPNOTSUPP;
+ 	return param->get(devlink, param->id, ctx);
+ }
+@@ -3955,7 +3955,7 @@ static int devlink_param_set(struct devlink *devlink,
+ 			     const struct devlink_param *param,
+ 			     struct devlink_param_gset_ctx *ctx)
+ {
+-	if (!param->set || devlink->reload_failed)
++	if (!param->set)
+ 		return -EOPNOTSUPP;
+ 	return param->set(devlink, param->id, ctx);
+ }
+diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
+index 6935d07a60c35..a8d2e8b1ff415 100644
+--- a/net/ipv4/ip_output.c
++++ b/net/ipv4/ip_output.c
+@@ -236,7 +236,7 @@ static int ip_finish_output2(struct net *net, struct sock *sk, struct sk_buff *s
+ 	net_dbg_ratelimited("%s: No header cache and no neighbour!\n",
+ 			    __func__);
+ 	kfree_skb_reason(skb, SKB_DROP_REASON_NEIGH_CREATEFAIL);
+-	return -EINVAL;
++	return PTR_ERR(neigh);
+ }
+ 
+ static int ip_finish_output_gso(struct net *net, struct sock *sk,
+diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
+index d354b32a20f8f..45e7a5d9c7d94 100644
+--- a/net/mac80211/cfg.c
++++ b/net/mac80211/cfg.c
+@@ -4133,19 +4133,20 @@ static int ieee80211_probe_client(struct wiphy *wiphy, struct net_device *dev,
+ 	mutex_lock(&local->mtx);
+ 
+ 	rcu_read_lock();
++	sta = sta_info_get_bss(sdata, peer);
++	if (!sta) {
++		ret = -ENOLINK;
++		goto unlock;
++	}
++
++	qos = sta->sta.wme;
++
+ 	chanctx_conf = rcu_dereference(sdata->vif.bss_conf.chanctx_conf);
+ 	if (WARN_ON(!chanctx_conf)) {
+ 		ret = -EINVAL;
+ 		goto unlock;
+ 	}
+ 	band = chanctx_conf->def.chan->band;
+-	sta = sta_info_get_bss(sdata, peer);
+-	if (sta) {
+-		qos = sta->sta.wme;
+-	} else {
+-		ret = -ENOLINK;
+-		goto unlock;
+-	}
+ 
+ 	if (qos) {
+ 		fc = cpu_to_le16(IEEE80211_FTYPE_DATA |
+diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
+index 0af2599c17e8d..e751cda5eef69 100644
+--- a/net/mac80211/rx.c
++++ b/net/mac80211/rx.c
+@@ -3734,6 +3734,10 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
+ 			break;
+ 		goto queue;
+ 	case WLAN_CATEGORY_S1G:
++		if (len < offsetofend(typeof(*mgmt),
++				      u.action.u.s1g.action_code))
++			break;
++
+ 		switch (mgmt->u.action.u.s1g.action_code) {
+ 		case WLAN_S1G_TWT_SETUP:
+ 		case WLAN_S1G_TWT_TEARDOWN:
+diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
+index 3836318737483..20082171f24a3 100644
+--- a/net/netlink/af_netlink.c
++++ b/net/netlink/af_netlink.c
+@@ -84,7 +84,7 @@ struct listeners {
+ 
+ static inline int netlink_is_kernel(struct sock *sk)
+ {
+-	return nlk_sk(sk)->flags & NETLINK_F_KERNEL_SOCKET;
++	return nlk_test_bit(KERNEL_SOCKET, sk);
+ }
+ 
+ struct netlink_table *nl_table __read_mostly;
+@@ -349,9 +349,7 @@ static void netlink_deliver_tap_kernel(struct sock *dst, struct sock *src,
+ 
+ static void netlink_overrun(struct sock *sk)
+ {
+-	struct netlink_sock *nlk = nlk_sk(sk);
+-
+-	if (!(nlk->flags & NETLINK_F_RECV_NO_ENOBUFS)) {
++	if (!nlk_test_bit(RECV_NO_ENOBUFS, sk)) {
+ 		if (!test_and_set_bit(NETLINK_S_CONGESTED,
+ 				      &nlk_sk(sk)->state)) {
+ 			sk->sk_err = ENOBUFS;
+@@ -1402,9 +1400,7 @@ EXPORT_SYMBOL_GPL(netlink_has_listeners);
+ 
+ bool netlink_strict_get_check(struct sk_buff *skb)
+ {
+-	const struct netlink_sock *nlk = nlk_sk(NETLINK_CB(skb).sk);
+-
+-	return nlk->flags & NETLINK_F_STRICT_CHK;
++	return nlk_test_bit(STRICT_CHK, NETLINK_CB(skb).sk);
+ }
+ EXPORT_SYMBOL_GPL(netlink_strict_get_check);
+ 
+@@ -1448,7 +1444,7 @@ static void do_one_broadcast(struct sock *sk,
+ 		return;
+ 
+ 	if (!net_eq(sock_net(sk), p->net)) {
+-		if (!(nlk->flags & NETLINK_F_LISTEN_ALL_NSID))
++		if (!nlk_test_bit(LISTEN_ALL_NSID, sk))
+ 			return;
+ 
+ 		if (!peernet_has_id(sock_net(sk), p->net))
+@@ -1481,7 +1477,7 @@ static void do_one_broadcast(struct sock *sk,
+ 		netlink_overrun(sk);
+ 		/* Clone failed. Notify ALL listeners. */
+ 		p->failure = 1;
+-		if (nlk->flags & NETLINK_F_BROADCAST_SEND_ERROR)
++		if (nlk_test_bit(BROADCAST_SEND_ERROR, sk))
+ 			p->delivery_failure = 1;
+ 		goto out;
+ 	}
+@@ -1496,7 +1492,7 @@ static void do_one_broadcast(struct sock *sk,
+ 	val = netlink_broadcast_deliver(sk, p->skb2);
+ 	if (val < 0) {
+ 		netlink_overrun(sk);
+-		if (nlk->flags & NETLINK_F_BROADCAST_SEND_ERROR)
++		if (nlk_test_bit(BROADCAST_SEND_ERROR, sk))
+ 			p->delivery_failure = 1;
+ 	} else {
+ 		p->congested |= val;
+@@ -1576,7 +1572,7 @@ static int do_one_set_err(struct sock *sk, struct netlink_set_err_data *p)
+ 	    !test_bit(p->group - 1, nlk->groups))
+ 		goto out;
+ 
+-	if (p->code == ENOBUFS && nlk->flags & NETLINK_F_RECV_NO_ENOBUFS) {
++	if (p->code == ENOBUFS && nlk_test_bit(RECV_NO_ENOBUFS, sk)) {
+ 		ret = 1;
+ 		goto out;
+ 	}
+@@ -1643,7 +1639,7 @@ static int netlink_setsockopt(struct socket *sock, int level, int optname,
+ 	struct sock *sk = sock->sk;
+ 	struct netlink_sock *nlk = nlk_sk(sk);
+ 	unsigned int val = 0;
+-	int err;
++	int nr = -1;
+ 
+ 	if (level != SOL_NETLINK)
+ 		return -ENOPROTOOPT;
+@@ -1654,14 +1650,12 @@ static int netlink_setsockopt(struct socket *sock, int level, int optname,
+ 
+ 	switch (optname) {
+ 	case NETLINK_PKTINFO:
+-		if (val)
+-			nlk->flags |= NETLINK_F_RECV_PKTINFO;
+-		else
+-			nlk->flags &= ~NETLINK_F_RECV_PKTINFO;
+-		err = 0;
++		nr = NETLINK_F_RECV_PKTINFO;
+ 		break;
+ 	case NETLINK_ADD_MEMBERSHIP:
+ 	case NETLINK_DROP_MEMBERSHIP: {
++		int err;
++
+ 		if (!netlink_allowed(sock, NL_CFG_F_NONROOT_RECV))
+ 			return -EPERM;
+ 		err = netlink_realloc_groups(sk);
+@@ -1681,61 +1675,38 @@ static int netlink_setsockopt(struct socket *sock, int level, int optname,
+ 		if (optname == NETLINK_DROP_MEMBERSHIP && nlk->netlink_unbind)
+ 			nlk->netlink_unbind(sock_net(sk), val);
+ 
+-		err = 0;
+ 		break;
+ 	}
+ 	case NETLINK_BROADCAST_ERROR:
+-		if (val)
+-			nlk->flags |= NETLINK_F_BROADCAST_SEND_ERROR;
+-		else
+-			nlk->flags &= ~NETLINK_F_BROADCAST_SEND_ERROR;
+-		err = 0;
++		nr = NETLINK_F_BROADCAST_SEND_ERROR;
+ 		break;
+ 	case NETLINK_NO_ENOBUFS:
++		assign_bit(NETLINK_F_RECV_NO_ENOBUFS, &nlk->flags, val);
+ 		if (val) {
+-			nlk->flags |= NETLINK_F_RECV_NO_ENOBUFS;
+ 			clear_bit(NETLINK_S_CONGESTED, &nlk->state);
+ 			wake_up_interruptible(&nlk->wait);
+-		} else {
+-			nlk->flags &= ~NETLINK_F_RECV_NO_ENOBUFS;
+ 		}
+-		err = 0;
+ 		break;
+ 	case NETLINK_LISTEN_ALL_NSID:
+ 		if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_BROADCAST))
+ 			return -EPERM;
+-
+-		if (val)
+-			nlk->flags |= NETLINK_F_LISTEN_ALL_NSID;
+-		else
+-			nlk->flags &= ~NETLINK_F_LISTEN_ALL_NSID;
+-		err = 0;
++		nr = NETLINK_F_LISTEN_ALL_NSID;
+ 		break;
+ 	case NETLINK_CAP_ACK:
+-		if (val)
+-			nlk->flags |= NETLINK_F_CAP_ACK;
+-		else
+-			nlk->flags &= ~NETLINK_F_CAP_ACK;
+-		err = 0;
++		nr = NETLINK_F_CAP_ACK;
+ 		break;
+ 	case NETLINK_EXT_ACK:
+-		if (val)
+-			nlk->flags |= NETLINK_F_EXT_ACK;
+-		else
+-			nlk->flags &= ~NETLINK_F_EXT_ACK;
+-		err = 0;
++		nr = NETLINK_F_EXT_ACK;
+ 		break;
+ 	case NETLINK_GET_STRICT_CHK:
+-		if (val)
+-			nlk->flags |= NETLINK_F_STRICT_CHK;
+-		else
+-			nlk->flags &= ~NETLINK_F_STRICT_CHK;
+-		err = 0;
++		nr = NETLINK_F_STRICT_CHK;
+ 		break;
+ 	default:
+-		err = -ENOPROTOOPT;
++		return -ENOPROTOOPT;
+ 	}
+-	return err;
++	if (nr >= 0)
++		assign_bit(nr, &nlk->flags, val);
++	return 0;
+ }
+ 
+ static int netlink_getsockopt(struct socket *sock, int level, int optname,
+@@ -1802,7 +1773,7 @@ static int netlink_getsockopt(struct socket *sock, int level, int optname,
+ 		return -EINVAL;
+ 
+ 	len = sizeof(int);
+-	val = nlk->flags & flag ? 1 : 0;
++	val = test_bit(flag, &nlk->flags);
+ 
+ 	if (put_user(len, optlen) ||
+ 	    copy_to_user(optval, &val, len))
+@@ -1979,9 +1950,9 @@ static int netlink_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
+ 		msg->msg_namelen = sizeof(*addr);
+ 	}
+ 
+-	if (nlk->flags & NETLINK_F_RECV_PKTINFO)
++	if (nlk_test_bit(RECV_PKTINFO, sk))
+ 		netlink_cmsg_recv_pktinfo(msg, skb);
+-	if (nlk->flags & NETLINK_F_LISTEN_ALL_NSID)
++	if (nlk_test_bit(LISTEN_ALL_NSID, sk))
+ 		netlink_cmsg_listen_all_nsid(sk, msg, skb);
+ 
+ 	memset(&scm, 0, sizeof(scm));
+@@ -2058,7 +2029,7 @@ __netlink_kernel_create(struct net *net, int unit, struct module *module,
+ 		goto out_sock_release;
+ 
+ 	nlk = nlk_sk(sk);
+-	nlk->flags |= NETLINK_F_KERNEL_SOCKET;
++	set_bit(NETLINK_F_KERNEL_SOCKET, &nlk->flags);
+ 
+ 	netlink_table_grab();
+ 	if (!nl_table[unit].registered) {
+@@ -2192,7 +2163,7 @@ static int netlink_dump_done(struct netlink_sock *nlk, struct sk_buff *skb,
+ 	nl_dump_check_consistent(cb, nlh);
+ 	memcpy(nlmsg_data(nlh), &nlk->dump_done_errno, sizeof(nlk->dump_done_errno));
+ 
+-	if (extack->_msg && nlk->flags & NETLINK_F_EXT_ACK) {
++	if (extack->_msg && test_bit(NETLINK_F_EXT_ACK, &nlk->flags)) {
+ 		nlh->nlmsg_flags |= NLM_F_ACK_TLVS;
+ 		if (!nla_put_string(skb, NLMSGERR_ATTR_MSG, extack->_msg))
+ 			nlmsg_end(skb, nlh);
+@@ -2321,8 +2292,8 @@ int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
+ 			 const struct nlmsghdr *nlh,
+ 			 struct netlink_dump_control *control)
+ {
+-	struct netlink_sock *nlk, *nlk2;
+ 	struct netlink_callback *cb;
++	struct netlink_sock *nlk;
+ 	struct sock *sk;
+ 	int ret;
+ 
+@@ -2357,8 +2328,7 @@ int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
+ 	cb->min_dump_alloc = control->min_dump_alloc;
+ 	cb->skb = skb;
+ 
+-	nlk2 = nlk_sk(NETLINK_CB(skb).sk);
+-	cb->strict_check = !!(nlk2->flags & NETLINK_F_STRICT_CHK);
++	cb->strict_check = nlk_test_bit(STRICT_CHK, NETLINK_CB(skb).sk);
+ 
+ 	if (control->start) {
+ 		cb->extack = control->extack;
+@@ -2402,7 +2372,7 @@ netlink_ack_tlv_len(struct netlink_sock *nlk, int err,
+ {
+ 	size_t tlvlen;
+ 
+-	if (!extack || !(nlk->flags & NETLINK_F_EXT_ACK))
++	if (!extack || !test_bit(NETLINK_F_EXT_ACK, &nlk->flags))
+ 		return 0;
+ 
+ 	tlvlen = 0;
+@@ -2474,7 +2444,7 @@ void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err,
+ 	 * requests to cap the error message, and get extra error data if
+ 	 * requested.
+ 	 */
+-	if (err && !(nlk->flags & NETLINK_F_CAP_ACK))
++	if (err && !test_bit(NETLINK_F_CAP_ACK, &nlk->flags))
+ 		payload += nlmsg_len(nlh);
+ 	else
+ 		flags |= NLM_F_CAPPED;
+diff --git a/net/netlink/af_netlink.h b/net/netlink/af_netlink.h
+index 90a3198a9b7f7..3dbd38aef50a4 100644
+--- a/net/netlink/af_netlink.h
++++ b/net/netlink/af_netlink.h
+@@ -8,14 +8,16 @@
+ #include <net/sock.h>
+ 
+ /* flags */
+-#define NETLINK_F_KERNEL_SOCKET		0x1
+-#define NETLINK_F_RECV_PKTINFO		0x2
+-#define NETLINK_F_BROADCAST_SEND_ERROR	0x4
+-#define NETLINK_F_RECV_NO_ENOBUFS	0x8
+-#define NETLINK_F_LISTEN_ALL_NSID	0x10
+-#define NETLINK_F_CAP_ACK		0x20
+-#define NETLINK_F_EXT_ACK		0x40
+-#define NETLINK_F_STRICT_CHK		0x80
++enum {
++	NETLINK_F_KERNEL_SOCKET,
++	NETLINK_F_RECV_PKTINFO,
++	NETLINK_F_BROADCAST_SEND_ERROR,
++	NETLINK_F_RECV_NO_ENOBUFS,
++	NETLINK_F_LISTEN_ALL_NSID,
++	NETLINK_F_CAP_ACK,
++	NETLINK_F_EXT_ACK,
++	NETLINK_F_STRICT_CHK,
++};
+ 
+ #define NLGRPSZ(x)	(ALIGN(x, sizeof(unsigned long) * 8) / 8)
+ #define NLGRPLONGS(x)	(NLGRPSZ(x)/sizeof(unsigned long))
+@@ -23,10 +25,10 @@
+ struct netlink_sock {
+ 	/* struct sock has to be the first member of netlink_sock */
+ 	struct sock		sk;
++	unsigned long		flags;
+ 	u32			portid;
+ 	u32			dst_portid;
+ 	u32			dst_group;
+-	u32			flags;
+ 	u32			subscriptions;
+ 	u32			ngroups;
+ 	unsigned long		*groups;
+@@ -54,6 +56,8 @@ static inline struct netlink_sock *nlk_sk(struct sock *sk)
+ 	return container_of(sk, struct netlink_sock, sk);
+ }
+ 
++#define nlk_test_bit(nr, sk) test_bit(NETLINK_F_##nr, &nlk_sk(sk)->flags)
++
+ struct netlink_table {
+ 	struct rhashtable	hash;
+ 	struct hlist_head	mc_list;
+diff --git a/net/netlink/diag.c b/net/netlink/diag.c
+index e4f21b1067bcc..9c4f231be2757 100644
+--- a/net/netlink/diag.c
++++ b/net/netlink/diag.c
+@@ -27,15 +27,15 @@ static int sk_diag_put_flags(struct sock *sk, struct sk_buff *skb)
+ 
+ 	if (nlk->cb_running)
+ 		flags |= NDIAG_FLAG_CB_RUNNING;
+-	if (nlk->flags & NETLINK_F_RECV_PKTINFO)
++	if (nlk_test_bit(RECV_PKTINFO, sk))
+ 		flags |= NDIAG_FLAG_PKTINFO;
+-	if (nlk->flags & NETLINK_F_BROADCAST_SEND_ERROR)
++	if (nlk_test_bit(BROADCAST_SEND_ERROR, sk))
+ 		flags |= NDIAG_FLAG_BROADCAST_ERROR;
+-	if (nlk->flags & NETLINK_F_RECV_NO_ENOBUFS)
++	if (nlk_test_bit(RECV_NO_ENOBUFS, sk))
+ 		flags |= NDIAG_FLAG_NO_ENOBUFS;
+-	if (nlk->flags & NETLINK_F_LISTEN_ALL_NSID)
++	if (nlk_test_bit(LISTEN_ALL_NSID, sk))
+ 		flags |= NDIAG_FLAG_LISTEN_ALL_NSID;
+-	if (nlk->flags & NETLINK_F_CAP_ACK)
++	if (nlk_test_bit(CAP_ACK, sk))
+ 		flags |= NDIAG_FLAG_CAP_ACK;
+ 
+ 	return nla_put_u32(skb, NETLINK_DIAG_FLAGS, flags);
+diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
+index d7c697af3762f..315bd59dea056 100644
+--- a/net/sunrpc/clnt.c
++++ b/net/sunrpc/clnt.c
+@@ -2722,7 +2722,7 @@ out_unparsable:
+ 
+ out_verifier:
+ 	trace_rpc_bad_verifier(task);
+-	goto out_err;
++	goto out_garbage;
+ 
+ out_msg_denied:
+ 	error = -EACCES;
+diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c
+index ac059cefbeb39..775cac4d61006 100644
+--- a/net/wireless/mlme.c
++++ b/net/wireless/mlme.c
+@@ -281,6 +281,11 @@ int cfg80211_mlme_auth(struct cfg80211_registered_device *rdev,
+ 	    ether_addr_equal(req->bss->bssid, wdev->u.client.connected_addr))
+ 		return -EALREADY;
+ 
++	if (ether_addr_equal(req->bss->bssid, dev->dev_addr) ||
++	    (req->link_id >= 0 &&
++	     ether_addr_equal(req->ap_mld_addr, dev->dev_addr)))
++		return -EINVAL;
++
+ 	return rdev_auth(rdev, dev, req);
+ }
+ 
+@@ -335,6 +340,9 @@ int cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev,
+ 			if (req->links[i].bss == req->links[j].bss)
+ 				return -EINVAL;
+ 		}
++
++		if (ether_addr_equal(req->links[i].bss->bssid, dev->dev_addr))
++			return -EINVAL;
+ 	}
+ 
+ 	if (wdev->connected &&
+@@ -342,6 +350,11 @@ int cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev,
+ 	     !ether_addr_equal(wdev->u.client.connected_addr, req->prev_bssid)))
+ 		return -EALREADY;
+ 
++	if ((req->bss && ether_addr_equal(req->bss->bssid, dev->dev_addr)) ||
++	    (req->link_id >= 0 &&
++	     ether_addr_equal(req->ap_mld_addr, dev->dev_addr)))
++		return -EINVAL;
++
+ 	cfg80211_oper_and_ht_capa(&req->ht_capa_mask,
+ 				  rdev->wiphy.ht_capa_mod_mask);
+ 	cfg80211_oper_and_vht_capa(&req->vht_capa_mask,
+diff --git a/net/wireless/ocb.c b/net/wireless/ocb.c
+index 27a1732264f95..29afaf3da54f3 100644
+--- a/net/wireless/ocb.c
++++ b/net/wireless/ocb.c
+@@ -68,6 +68,9 @@ int __cfg80211_leave_ocb(struct cfg80211_registered_device *rdev,
+ 	if (!rdev->ops->leave_ocb)
+ 		return -EOPNOTSUPP;
+ 
++	if (!wdev->u.ocb.chandef.chan)
++		return -ENOTCONN;
++
+ 	err = rdev_leave_ocb(rdev, dev);
+ 	if (!err)
+ 		memset(&wdev->u.ocb.chandef, 0, sizeof(wdev->u.ocb.chandef));
+diff --git a/samples/hw_breakpoint/data_breakpoint.c b/samples/hw_breakpoint/data_breakpoint.c
+index 418c46fe5ffc3..b99322f188e59 100644
+--- a/samples/hw_breakpoint/data_breakpoint.c
++++ b/samples/hw_breakpoint/data_breakpoint.c
+@@ -70,7 +70,9 @@ fail:
+ static void __exit hw_break_module_exit(void)
+ {
+ 	unregister_wide_hw_breakpoint(sample_hbp);
+-	symbol_put(ksym_name);
++#ifdef CONFIG_MODULE_UNLOAD
++	__symbol_put(ksym_name);
++#endif
+ 	printk(KERN_INFO "HW Breakpoint for %s write uninstalled\n", ksym_name);
+ }
+ 
+diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
+index afd6637440418..5c16fe737a27a 100644
+--- a/security/selinux/hooks.c
++++ b/security/selinux/hooks.c
+@@ -2748,14 +2748,20 @@ static int selinux_umount(struct vfsmount *mnt, int flags)
+ static int selinux_fs_context_submount(struct fs_context *fc,
+ 				   struct super_block *reference)
+ {
+-	const struct superblock_security_struct *sbsec;
++	const struct superblock_security_struct *sbsec = selinux_superblock(reference);
+ 	struct selinux_mnt_opts *opts;
+ 
++	/*
++	 * Ensure that fc->security remains NULL when no options are set
++	 * as expected by selinux_set_mnt_opts().
++	 */
++	if (!(sbsec->flags & (FSCONTEXT_MNT|CONTEXT_MNT|DEFCONTEXT_MNT)))
++		return 0;
++
+ 	opts = kzalloc(sizeof(*opts), GFP_KERNEL);
+ 	if (!opts)
+ 		return -ENOMEM;
+ 
+-	sbsec = selinux_superblock(reference);
+ 	if (sbsec->flags & FSCONTEXT_MNT)
+ 		opts->fscontext_sid = sbsec->sid;
+ 	if (sbsec->flags & CONTEXT_MNT)
+diff --git a/sound/hda/intel-dsp-config.c b/sound/hda/intel-dsp-config.c
+index 317bdf6dcbef4..2873420c9aca8 100644
+--- a/sound/hda/intel-dsp-config.c
++++ b/sound/hda/intel-dsp-config.c
+@@ -481,6 +481,14 @@ static const struct config_entry config_table[] = {
+ 	},
+ #endif
+ 
++/* Lunar Lake */
++#if IS_ENABLED(CONFIG_SND_SOC_SOF_LUNARLAKE)
++	/* Lunarlake-P */
++	{
++		.flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE,
++		.device = PCI_DEVICE_ID_INTEL_HDA_LNL_P,
++	},
++#endif
+ };
+ 
+ static const struct config_entry *snd_intel_dsp_find_config
+diff --git a/sound/soc/intel/boards/sof_sdw.c b/sound/soc/intel/boards/sof_sdw.c
+index 0201029899cad..c86f8f9a61003 100644
+--- a/sound/soc/intel/boards/sof_sdw.c
++++ b/sound/soc/intel/boards/sof_sdw.c
+@@ -467,7 +467,9 @@ static const struct dmi_system_id sof_sdw_quirk_table[] = {
+ 			DMI_MATCH(DMI_SYS_VENDOR, "Google"),
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "Rex"),
+ 		},
+-		.driver_data = (void *)(SOF_SDW_PCH_DMIC),
++		.driver_data = (void *)(SOF_SDW_PCH_DMIC |
++					SOF_BT_OFFLOAD_SSP(1) |
++					SOF_SSP_BT_OFFLOAD_PRESENT),
+ 	},
+ 	/* LunarLake devices */
+ 	{
+diff --git a/sound/soc/sof/amd/acp-ipc.c b/sound/soc/sof/amd/acp-ipc.c
+index 8a0fc635a997c..d07dc78074cc3 100644
+--- a/sound/soc/sof/amd/acp-ipc.c
++++ b/sound/soc/sof/amd/acp-ipc.c
+@@ -168,6 +168,8 @@ irqreturn_t acp_sof_ipc_irq_thread(int irq, void *context)
+ 		if ((status & SOF_IPC_PANIC_MAGIC_MASK) == SOF_IPC_PANIC_MAGIC) {
+ 			snd_sof_dsp_panic(sdev, sdev->dsp_box.offset + sizeof(status),
+ 					  true);
++			status = 0;
++			acp_mailbox_write(sdev, sdev->dsp_box.offset, &status, sizeof(status));
+ 			return IRQ_HANDLED;
+ 		}
+ 		snd_sof_ipc_msgs_rx(sdev);
+@@ -197,6 +199,8 @@ irqreturn_t acp_sof_ipc_irq_thread(int irq, void *context)
+ 	acp_mailbox_read(sdev, sdev->debug_box.offset, &status, sizeof(u32));
+ 	if ((status & SOF_IPC_PANIC_MAGIC_MASK) == SOF_IPC_PANIC_MAGIC) {
+ 		snd_sof_dsp_panic(sdev, sdev->dsp_oops_offset, true);
++		status = 0;
++		acp_mailbox_write(sdev, sdev->debug_box.offset, &status, sizeof(status));
+ 		return IRQ_HANDLED;
+ 	}
+ 
+diff --git a/sound/soc/sof/topology.c b/sound/soc/sof/topology.c
+index 698129dccc7df..3866dd3cba695 100644
+--- a/sound/soc/sof/topology.c
++++ b/sound/soc/sof/topology.c
+@@ -1117,10 +1117,11 @@ static void sof_disconnect_dai_widget(struct snd_soc_component *scomp,
+ {
+ 	struct snd_soc_card *card = scomp->card;
+ 	struct snd_soc_pcm_runtime *rtd;
++	const char *sname = w->sname;
+ 	struct snd_soc_dai *cpu_dai;
+ 	int i, stream;
+ 
+-	if (!w->sname)
++	if (!sname)
+ 		return;
+ 
+ 	if (w->id == snd_soc_dapm_dai_out)
+@@ -1133,7 +1134,7 @@ static void sof_disconnect_dai_widget(struct snd_soc_component *scomp,
+ 	list_for_each_entry(rtd, &card->rtd_list, list) {
+ 		/* does stream match DAI link ? */
+ 		if (!rtd->dai_link->stream_name ||
+-		    strcmp(w->sname, rtd->dai_link->stream_name))
++		    strcmp(sname, rtd->dai_link->stream_name))
+ 			continue;
+ 
+ 		for_each_rtd_cpu_dais(rtd, i, cpu_dai)
+diff --git a/tools/iio/iio_generic_buffer.c b/tools/iio/iio_generic_buffer.c
+index f8deae4e26a15..44bbf80f0cfdd 100644
+--- a/tools/iio/iio_generic_buffer.c
++++ b/tools/iio/iio_generic_buffer.c
+@@ -51,9 +51,9 @@ enum autochan {
+  * Has the side effect of filling the channels[i].location values used
+  * in processing the buffer output.
+  **/
+-static int size_from_channelarray(struct iio_channel_info *channels, int num_channels)
++static unsigned int size_from_channelarray(struct iio_channel_info *channels, int num_channels)
+ {
+-	int bytes = 0;
++	unsigned int bytes = 0;
+ 	int i = 0;
+ 
+ 	while (i < num_channels) {
+@@ -348,7 +348,7 @@ int main(int argc, char **argv)
+ 	ssize_t read_size;
+ 	int dev_num = -1, trig_num = -1;
+ 	char *buffer_access = NULL;
+-	int scan_size;
++	unsigned int scan_size;
+ 	int noevents = 0;
+ 	int notrigger = 0;
+ 	char *dummy;
+@@ -674,7 +674,16 @@ int main(int argc, char **argv)
+ 	}
+ 
+ 	scan_size = size_from_channelarray(channels, num_channels);
+-	data = malloc(scan_size * buf_len);
++
++	size_t total_buf_len = scan_size * buf_len;
++
++	if (scan_size > 0 && total_buf_len / scan_size != buf_len) {
++		ret = -EFAULT;
++		perror("Integer overflow happened when calculate scan_size * buf_len");
++		goto error;
++	}
++
++	data = malloc(total_buf_len);
+ 	if (!data) {
+ 		ret = -ENOMEM;
+ 		goto error;
+diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
+index e07dff7eba600..13b5623b720f1 100644
+--- a/tools/lib/bpf/libbpf.c
++++ b/tools/lib/bpf/libbpf.c
+@@ -8356,6 +8356,7 @@ void bpf_object__close(struct bpf_object *obj)
+ 	bpf_object__elf_finish(obj);
+ 	bpf_object_unload(obj);
+ 	btf__free(obj->btf);
++	btf__free(obj->btf_vmlinux);
+ 	btf_ext__free(obj->btf_ext);
+ 
+ 	for (i = 0; i < obj->nr_maps; i++)
+diff --git a/tools/testing/selftests/arm64/signal/testcases/zt_regs.c b/tools/testing/selftests/arm64/signal/testcases/zt_regs.c
+index e1eb4d5c027ab..2e384d731618b 100644
+--- a/tools/testing/selftests/arm64/signal/testcases/zt_regs.c
++++ b/tools/testing/selftests/arm64/signal/testcases/zt_regs.c
+@@ -65,6 +65,7 @@ int zt_regs_run(struct tdescr *td, siginfo_t *si, ucontext_t *uc)
+ 	if (memcmp(zeros, (char *)zt + ZT_SIG_REGS_OFFSET,
+ 		   ZT_SIG_REGS_SIZE(zt->nregs)) != 0) {
+ 		fprintf(stderr, "ZT data invalid\n");
++		free(zeros);
+ 		return 1;
+ 	}
+ 
+diff --git a/tools/testing/selftests/ftrace/ftracetest b/tools/testing/selftests/ftrace/ftracetest
+index cb5f18c06593d..d68264a5f3f03 100755
+--- a/tools/testing/selftests/ftrace/ftracetest
++++ b/tools/testing/selftests/ftrace/ftracetest
+@@ -31,6 +31,9 @@ err_ret=1
+ # kselftest skip code is 4
+ err_skip=4
+ 
++# umount required
++UMOUNT_DIR=""
++
+ # cgroup RT scheduling prevents chrt commands from succeeding, which
+ # induces failures in test wakeup tests.  Disable for the duration of
+ # the tests.
+@@ -45,6 +48,9 @@ setup() {
+ 
+ cleanup() {
+   echo $sched_rt_runtime_orig > $sched_rt_runtime
++  if [ -n "${UMOUNT_DIR}" ]; then
++    umount ${UMOUNT_DIR} ||:
++  fi
+ }
+ 
+ errexit() { # message
+@@ -160,11 +166,13 @@ if [ -z "$TRACING_DIR" ]; then
+ 	    mount -t tracefs nodev /sys/kernel/tracing ||
+ 	      errexit "Failed to mount /sys/kernel/tracing"
+ 	    TRACING_DIR="/sys/kernel/tracing"
++	    UMOUNT_DIR=${TRACING_DIR}
+ 	# If debugfs exists, then so does /sys/kernel/debug
+ 	elif [ -d "/sys/kernel/debug" ]; then
+ 	    mount -t debugfs nodev /sys/kernel/debug ||
+ 	      errexit "Failed to mount /sys/kernel/debug"
+ 	    TRACING_DIR="/sys/kernel/debug/tracing"
++	    UMOUNT_DIR=${TRACING_DIR}
+ 	else
+ 	    err_ret=$err_skip
+ 	    errexit "debugfs and tracefs are not configured in this kernel"
+diff --git a/tools/testing/selftests/nolibc/nolibc-test.c b/tools/testing/selftests/nolibc/nolibc-test.c
+index 486334981e601..8e7750e2eb97c 100644
+--- a/tools/testing/selftests/nolibc/nolibc-test.c
++++ b/tools/testing/selftests/nolibc/nolibc-test.c
+@@ -769,7 +769,6 @@ static int expect_vfprintf(int llen, size_t c, const char *expected, const char
+ 	lseek(fd, 0, SEEK_SET);
+ 
+ 	r = read(fd, buf, sizeof(buf) - 1);
+-	buf[r] = '\0';
+ 
+ 	fclose(memfile);
+ 
+@@ -779,6 +778,7 @@ static int expect_vfprintf(int llen, size_t c, const char *expected, const char
+ 		return 1;
+ 	}
+ 
++	buf[r] = '\0';
+ 	llen += printf(" \"%s\" = \"%s\"", expected, buf);
+ 	ret = strncmp(expected, buf, c);
+ 
+@@ -939,6 +939,35 @@ static const struct test test_names[] = {
+ 	{ 0 }
+ };
+ 
++int is_setting_valid(char *test)
++{
++	int idx, len, test_len, valid = 0;
++	char delimiter;
++
++	if (!test)
++		return valid;
++
++	test_len = strlen(test);
++
++	for (idx = 0; test_names[idx].name; idx++) {
++		len = strlen(test_names[idx].name);
++		if (test_len < len)
++			continue;
++
++		if (strncmp(test, test_names[idx].name, len) != 0)
++			continue;
++
++		delimiter = test[len];
++		if (delimiter != ':' && delimiter != ',' && delimiter != '\0')
++			continue;
++
++		valid = 1;
++		break;
++	}
++
++	return valid;
++}
++
+ int main(int argc, char **argv, char **envp)
+ {
+ 	int min = 0;
+@@ -964,10 +993,10 @@ int main(int argc, char **argv, char **envp)
+ 	 *    syscall:5-15[:.*],stdlib:8-10
+ 	 */
+ 	test = argv[1];
+-	if (!test)
++	if (!is_setting_valid(test))
+ 		test = getenv("NOLIBC_TEST");
+ 
+-	if (test) {
++	if (is_setting_valid(test)) {
+ 		char *comma, *colon, *dash, *value;
+ 
+ 		do {


^ permalink raw reply related	[flat|nested] 24+ messages in thread
* [gentoo-commits] proj/linux-patches:6.5 commit in: /
@ 2023-09-19 13:18 Mike Pagano
  0 siblings, 0 replies; 24+ messages in thread
From: Mike Pagano @ 2023-09-19 13:18 UTC (permalink / raw
  To: gentoo-commits

commit:     8c73c649f83b6c2a9e42832967ad8da1ad50ab93
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Tue Sep 19 13:17:45 2023 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Tue Sep 19 13:17:45 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=8c73c649

Linux patch 6.5.4

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README            |     4 +
 1003_linux-6.5.4.patch | 14405 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 14409 insertions(+)

diff --git a/0000_README b/0000_README
index 270cfbdf..37ce8d41 100644
--- a/0000_README
+++ b/0000_README
@@ -55,6 +55,10 @@ Patch:  1002_linux-6.5.3.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.5.3
 
+Patch:  1003_linux-6.5.4.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.5.4
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1003_linux-6.5.4.patch b/1003_linux-6.5.4.patch
new file mode 100644
index 00000000..65cc5c95
--- /dev/null
+++ b/1003_linux-6.5.4.patch
@@ -0,0 +1,14405 @@
+diff --git a/Documentation/admin-guide/cgroup-v1/memory.rst b/Documentation/admin-guide/cgroup-v1/memory.rst
+index fabaad3fd9c21..8d3afeede10e4 100644
+--- a/Documentation/admin-guide/cgroup-v1/memory.rst
++++ b/Documentation/admin-guide/cgroup-v1/memory.rst
+@@ -92,8 +92,6 @@ Brief summary of control files.
+  memory.oom_control		     set/show oom controls.
+  memory.numa_stat		     show the number of memory usage per numa
+ 				     node
+- memory.kmem.limit_in_bytes          This knob is deprecated and writing to
+-                                     it will return -ENOTSUPP.
+  memory.kmem.usage_in_bytes          show current kernel memory allocation
+  memory.kmem.failcnt                 show the number of kernel memory usage
+ 				     hits limits
+diff --git a/Documentation/devicetree/bindings/clock/xlnx,versal-clk.yaml b/Documentation/devicetree/bindings/clock/xlnx,versal-clk.yaml
+index 5cbb34d0b61b3..d3dd9c2c3019a 100644
+--- a/Documentation/devicetree/bindings/clock/xlnx,versal-clk.yaml
++++ b/Documentation/devicetree/bindings/clock/xlnx,versal-clk.yaml
+@@ -14,8 +14,6 @@ description: |
+   reads required input clock frequencies from the devicetree and acts as clock
+   provider for all clock consumers of PS clocks.
+ 
+-select: false
+-
+ properties:
+   compatible:
+     const: xlnx,versal-clk
+diff --git a/Makefile b/Makefile
+index 901cdfa5e7d3b..beddccac32831 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 5
+-SUBLEVEL = 3
++SUBLEVEL = 4
+ EXTRAVERSION =
+ NAME = Hurr durr I'ma ninja sloth
+ 
+@@ -1962,7 +1962,9 @@ quiet_cmd_depmod = DEPMOD  $(MODLIB)
+ 
+ modules_install:
+ 	$(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modinst
++ifndef modules_sign_only
+ 	$(call cmd,depmod)
++endif
+ 
+ else # CONFIG_MODULES
+ 
+diff --git a/arch/arc/include/asm/atomic-llsc.h b/arch/arc/include/asm/atomic-llsc.h
+index 1b0ffaeee16d0..5258cb81a16b4 100644
+--- a/arch/arc/include/asm/atomic-llsc.h
++++ b/arch/arc/include/asm/atomic-llsc.h
+@@ -18,7 +18,7 @@ static inline void arch_atomic_##op(int i, atomic_t *v)			\
+ 	: [val]	"=&r"	(val) /* Early clobber to prevent reg reuse */	\
+ 	: [ctr]	"r"	(&v->counter), /* Not "m": llock only supports reg direct addr mode */	\
+ 	  [i]	"ir"	(i)						\
+-	: "cc");							\
++	: "cc", "memory");						\
+ }									\
+ 
+ #define ATOMIC_OP_RETURN(op, asm_op)				\
+@@ -34,7 +34,7 @@ static inline int arch_atomic_##op##_return_relaxed(int i, atomic_t *v)	\
+ 	: [val]	"=&r"	(val)						\
+ 	: [ctr]	"r"	(&v->counter),					\
+ 	  [i]	"ir"	(i)						\
+-	: "cc");							\
++	: "cc", "memory");						\
+ 									\
+ 	return val;							\
+ }
+@@ -56,7 +56,7 @@ static inline int arch_atomic_fetch_##op##_relaxed(int i, atomic_t *v)	\
+ 	  [orig] "=&r" (orig)						\
+ 	: [ctr]	"r"	(&v->counter),					\
+ 	  [i]	"ir"	(i)						\
+-	: "cc");							\
++	: "cc", "memory");						\
+ 									\
+ 	return orig;							\
+ }
+diff --git a/arch/arc/include/asm/atomic64-arcv2.h b/arch/arc/include/asm/atomic64-arcv2.h
+index 6b6db981967ae..9b5791b854713 100644
+--- a/arch/arc/include/asm/atomic64-arcv2.h
++++ b/arch/arc/include/asm/atomic64-arcv2.h
+@@ -60,7 +60,7 @@ static inline void arch_atomic64_##op(s64 a, atomic64_t *v)		\
+ 	"	bnz     1b		\n"				\
+ 	: "=&r"(val)							\
+ 	: "r"(&v->counter), "ir"(a)					\
+-	: "cc");							\
++	: "cc", "memory");						\
+ }									\
+ 
+ #define ATOMIC64_OP_RETURN(op, op1, op2)		        	\
+@@ -77,7 +77,7 @@ static inline s64 arch_atomic64_##op##_return_relaxed(s64 a, atomic64_t *v)	\
+ 	"	bnz     1b		\n"				\
+ 	: [val] "=&r"(val)						\
+ 	: "r"(&v->counter), "ir"(a)					\
+-	: "cc");	/* memory clobber comes from smp_mb() */	\
++	: "cc", "memory");						\
+ 									\
+ 	return val;							\
+ }
+@@ -99,7 +99,7 @@ static inline s64 arch_atomic64_fetch_##op##_relaxed(s64 a, atomic64_t *v)	\
+ 	"	bnz     1b		\n"				\
+ 	: "=&r"(orig), "=&r"(val)					\
+ 	: "r"(&v->counter), "ir"(a)					\
+-	: "cc");	/* memory clobber comes from smp_mb() */	\
++	: "cc", "memory");						\
+ 									\
+ 	return orig;							\
+ }
+diff --git a/arch/arm/boot/dts/broadcom/bcm4708-linksys-ea6500-v2.dts b/arch/arm/boot/dts/broadcom/bcm4708-linksys-ea6500-v2.dts
+index f1412ba83defb..0454423fe166c 100644
+--- a/arch/arm/boot/dts/broadcom/bcm4708-linksys-ea6500-v2.dts
++++ b/arch/arm/boot/dts/broadcom/bcm4708-linksys-ea6500-v2.dts
+@@ -19,7 +19,8 @@
+ 
+ 	memory@0 {
+ 		device_type = "memory";
+-		reg = <0x00000000 0x08000000>;
++		reg = <0x00000000 0x08000000>,
++		      <0x88000000 0x08000000>;
+ 	};
+ 
+ 	gpio-keys {
+diff --git a/arch/arm/boot/dts/qcom/qcom-msm8974pro-sony-xperia-shinano-castor.dts b/arch/arm/boot/dts/qcom/qcom-msm8974pro-sony-xperia-shinano-castor.dts
+index 154639d56f357..11468d1409f72 100644
+--- a/arch/arm/boot/dts/qcom/qcom-msm8974pro-sony-xperia-shinano-castor.dts
++++ b/arch/arm/boot/dts/qcom/qcom-msm8974pro-sony-xperia-shinano-castor.dts
+@@ -125,15 +125,15 @@
+ 
+ 		syna,startup-delay-ms = <100>;
+ 
+-		rmi-f01@1 {
++		rmi4-f01@1 {
+ 			reg = <0x1>;
+-			syna,nosleep = <1>;
++			syna,nosleep-mode = <1>;
+ 		};
+ 
+-		rmi-f11@11 {
++		rmi4-f11@11 {
+ 			reg = <0x11>;
+-			syna,f11-flip-x = <1>;
+ 			syna,sensor-type = <1>;
++			touchscreen-inverted-x;
+ 		};
+ 	};
+ };
+diff --git a/arch/arm/boot/dts/samsung/exynos4210-i9100.dts b/arch/arm/boot/dts/samsung/exynos4210-i9100.dts
+index 37cd4dde53e47..a9ec1f6c1dea1 100644
+--- a/arch/arm/boot/dts/samsung/exynos4210-i9100.dts
++++ b/arch/arm/boot/dts/samsung/exynos4210-i9100.dts
+@@ -207,8 +207,8 @@
+ 			power-on-delay = <10>;
+ 			reset-delay = <10>;
+ 
+-			panel-width-mm = <90>;
+-			panel-height-mm = <154>;
++			panel-width-mm = <56>;
++			panel-height-mm = <93>;
+ 
+ 			display-timings {
+ 				timing {
+diff --git a/arch/arm64/boot/dts/nvidia/tegra186.dtsi b/arch/arm64/boot/dts/nvidia/tegra186.dtsi
+index 7e4c496fd91ce..2b3bb5d0af17b 100644
+--- a/arch/arm64/boot/dts/nvidia/tegra186.dtsi
++++ b/arch/arm64/boot/dts/nvidia/tegra186.dtsi
+@@ -135,7 +135,8 @@
+ 			clocks = <&bpmp TEGRA186_CLK_AHUB>;
+ 			clock-names = "ahub";
+ 			assigned-clocks = <&bpmp TEGRA186_CLK_AHUB>;
+-			assigned-clock-parents = <&bpmp TEGRA186_CLK_PLL_A_OUT0>;
++			assigned-clock-parents = <&bpmp TEGRA186_CLK_PLLP_OUT0>;
++			assigned-clock-rates = <81600000>;
+ 			#address-cells = <1>;
+ 			#size-cells = <1>;
+ 			ranges = <0x02900800 0x02900800 0x11800>;
+diff --git a/arch/arm64/boot/dts/nvidia/tegra194.dtsi b/arch/arm64/boot/dts/nvidia/tegra194.dtsi
+index 154fc8c0eb6d4..33f92b77cd9d9 100644
+--- a/arch/arm64/boot/dts/nvidia/tegra194.dtsi
++++ b/arch/arm64/boot/dts/nvidia/tegra194.dtsi
+@@ -231,7 +231,8 @@
+ 				clocks = <&bpmp TEGRA194_CLK_AHUB>;
+ 				clock-names = "ahub";
+ 				assigned-clocks = <&bpmp TEGRA194_CLK_AHUB>;
+-				assigned-clock-parents = <&bpmp TEGRA194_CLK_PLLA_OUT0>;
++				assigned-clock-parents = <&bpmp TEGRA194_CLK_PLLP_OUT0>;
++				assigned-clock-rates = <81600000>;
+ 				status = "disabled";
+ 
+ 				#address-cells = <2>;
+diff --git a/arch/arm64/boot/dts/nvidia/tegra210.dtsi b/arch/arm64/boot/dts/nvidia/tegra210.dtsi
+index 617583ff27366..e7b4e30139645 100644
+--- a/arch/arm64/boot/dts/nvidia/tegra210.dtsi
++++ b/arch/arm64/boot/dts/nvidia/tegra210.dtsi
+@@ -1386,7 +1386,8 @@
+ 			clocks = <&tegra_car TEGRA210_CLK_D_AUDIO>;
+ 			clock-names = "ahub";
+ 			assigned-clocks = <&tegra_car TEGRA210_CLK_D_AUDIO>;
+-			assigned-clock-parents = <&tegra_car TEGRA210_CLK_PLL_A_OUT0>;
++			assigned-clock-parents = <&tegra_car TEGRA210_CLK_PLL_P>;
++			assigned-clock-rates = <81600000>;
+ 			#address-cells = <1>;
+ 			#size-cells = <1>;
+ 			ranges = <0x702d0000 0x702d0000 0x0000e400>;
+diff --git a/arch/arm64/boot/dts/nvidia/tegra234.dtsi b/arch/arm64/boot/dts/nvidia/tegra234.dtsi
+index f4974e81dd4b0..0f12a8debd8ae 100644
+--- a/arch/arm64/boot/dts/nvidia/tegra234.dtsi
++++ b/arch/arm64/boot/dts/nvidia/tegra234.dtsi
+@@ -180,7 +180,8 @@
+ 				clocks = <&bpmp TEGRA234_CLK_AHUB>;
+ 				clock-names = "ahub";
+ 				assigned-clocks = <&bpmp TEGRA234_CLK_AHUB>;
+-				assigned-clock-parents = <&bpmp TEGRA234_CLK_PLLA_OUT0>;
++				assigned-clock-parents = <&bpmp TEGRA234_CLK_PLLP_OUT0>;
++				assigned-clock-rates = <81600000>;
+ 				status = "disabled";
+ 
+ 				#address-cells = <2>;
+diff --git a/arch/arm64/boot/dts/qcom/msm8953-xiaomi-vince.dts b/arch/arm64/boot/dts/qcom/msm8953-xiaomi-vince.dts
+index 0956c866d6cb1..1a1d3f92a5116 100644
+--- a/arch/arm64/boot/dts/qcom/msm8953-xiaomi-vince.dts
++++ b/arch/arm64/boot/dts/qcom/msm8953-xiaomi-vince.dts
+@@ -132,7 +132,6 @@
+ 	touchscreen@20 {
+ 		reg = <0x20>;
+ 		compatible = "syna,rmi4-i2c";
+-		interrupts-parent = <&tlmm>;
+ 		interrupts-extended = <&tlmm 65 IRQ_TYPE_EDGE_FALLING>;
+ 
+ 		#address-cells = <1>;
+diff --git a/arch/arm64/boot/dts/renesas/rzg2l-smarc-som.dtsi b/arch/arm64/boot/dts/renesas/rzg2l-smarc-som.dtsi
+index fbbb4f03440b3..d0515769e66d5 100644
+--- a/arch/arm64/boot/dts/renesas/rzg2l-smarc-som.dtsi
++++ b/arch/arm64/boot/dts/renesas/rzg2l-smarc-som.dtsi
+@@ -100,7 +100,7 @@
+ 		rxc-skew-psec = <2400>;
+ 		txc-skew-psec = <2400>;
+ 		rxdv-skew-psec = <0>;
+-		txdv-skew-psec = <0>;
++		txen-skew-psec = <0>;
+ 		rxd0-skew-psec = <0>;
+ 		rxd1-skew-psec = <0>;
+ 		rxd2-skew-psec = <0>;
+@@ -128,7 +128,7 @@
+ 		rxc-skew-psec = <2400>;
+ 		txc-skew-psec = <2400>;
+ 		rxdv-skew-psec = <0>;
+-		txdv-skew-psec = <0>;
++		txen-skew-psec = <0>;
+ 		rxd0-skew-psec = <0>;
+ 		rxd1-skew-psec = <0>;
+ 		rxd2-skew-psec = <0>;
+diff --git a/arch/arm64/boot/dts/renesas/rzg2lc-smarc-som.dtsi b/arch/arm64/boot/dts/renesas/rzg2lc-smarc-som.dtsi
+index 8a0d56872de77..79279ffb4099d 100644
+--- a/arch/arm64/boot/dts/renesas/rzg2lc-smarc-som.dtsi
++++ b/arch/arm64/boot/dts/renesas/rzg2lc-smarc-som.dtsi
+@@ -77,7 +77,7 @@
+ 		rxc-skew-psec = <2400>;
+ 		txc-skew-psec = <2400>;
+ 		rxdv-skew-psec = <0>;
+-		txdv-skew-psec = <0>;
++		txen-skew-psec = <0>;
+ 		rxd0-skew-psec = <0>;
+ 		rxd1-skew-psec = <0>;
+ 		rxd2-skew-psec = <0>;
+diff --git a/arch/arm64/boot/dts/renesas/rzg2ul-smarc-som.dtsi b/arch/arm64/boot/dts/renesas/rzg2ul-smarc-som.dtsi
+index 49ecd33aeeb82..97cdad2a12e2e 100644
+--- a/arch/arm64/boot/dts/renesas/rzg2ul-smarc-som.dtsi
++++ b/arch/arm64/boot/dts/renesas/rzg2ul-smarc-som.dtsi
+@@ -83,7 +83,7 @@
+ 		rxc-skew-psec = <2400>;
+ 		txc-skew-psec = <2400>;
+ 		rxdv-skew-psec = <0>;
+-		txdv-skew-psec = <0>;
++		txen-skew-psec = <0>;
+ 		rxd0-skew-psec = <0>;
+ 		rxd1-skew-psec = <0>;
+ 		rxd2-skew-psec = <0>;
+@@ -112,7 +112,7 @@
+ 		rxc-skew-psec = <2400>;
+ 		txc-skew-psec = <2400>;
+ 		rxdv-skew-psec = <0>;
+-		txdv-skew-psec = <0>;
++		txen-skew-psec = <0>;
+ 		rxd0-skew-psec = <0>;
+ 		rxd1-skew-psec = <0>;
+ 		rxd2-skew-psec = <0>;
+diff --git a/arch/mips/Makefile b/arch/mips/Makefile
+index a47593d72f6f5..d624b87c150d6 100644
+--- a/arch/mips/Makefile
++++ b/arch/mips/Makefile
+@@ -299,8 +299,8 @@ ifdef CONFIG_64BIT
+     endif
+   endif
+ 
+-  ifeq ($(KBUILD_SYM32)$(call cc-option-yn,-msym32), yy)
+-    cflags-y += -msym32 -DKBUILD_64BIT_SYM32
++  ifeq ($(KBUILD_SYM32), y)
++    cflags-$(KBUILD_SYM32) += -msym32 -DKBUILD_64BIT_SYM32
+   else
+     ifeq ($(CONFIG_CPU_DADDI_WORKAROUNDS), y)
+       $(error CONFIG_CPU_DADDI_WORKAROUNDS unsupported without -msym32)
+@@ -341,7 +341,7 @@ KBUILD_CFLAGS += -fno-asynchronous-unwind-tables
+ 
+ KBUILD_LDFLAGS		+= -m $(ld-emul)
+ 
+-ifdef CONFIG_MIPS
++ifdef need-compiler
+ CHECKFLAGS += $(shell $(CC) $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS) -dM -E -x c /dev/null | \
+ 	grep -E -vw '__GNUC_(MINOR_|PATCHLEVEL_)?_' | \
+ 	sed -e "s/^\#define /-D'/" -e "s/ /'='/" -e "s/$$/'/" -e 's/\$$/&&/g')
+diff --git a/arch/parisc/include/asm/led.h b/arch/parisc/include/asm/led.h
+index 6de13d08a3886..b70b9094fb7cd 100644
+--- a/arch/parisc/include/asm/led.h
++++ b/arch/parisc/include/asm/led.h
+@@ -11,8 +11,8 @@
+ #define	LED1		0x02
+ #define	LED0		0x01		/* bottom (or furthest left) LED */
+ 
+-#define	LED_LAN_TX	LED0		/* for LAN transmit activity */
+-#define	LED_LAN_RCV	LED1		/* for LAN receive activity */
++#define	LED_LAN_RCV	LED0		/* for LAN receive activity */
++#define	LED_LAN_TX	LED1		/* for LAN transmit activity */
+ #define	LED_DISK_IO	LED2		/* for disk activity */
+ #define	LED_HEARTBEAT	LED3		/* heartbeat */
+ 
+diff --git a/arch/parisc/include/asm/mckinley.h b/arch/parisc/include/asm/mckinley.h
+deleted file mode 100644
+index 1314390b9034b..0000000000000
+--- a/arch/parisc/include/asm/mckinley.h
++++ /dev/null
+@@ -1,8 +0,0 @@
+-/* SPDX-License-Identifier: GPL-2.0 */
+-#ifndef ASM_PARISC_MCKINLEY_H
+-#define ASM_PARISC_MCKINLEY_H
+-
+-/* declared in arch/parisc/kernel/setup.c */
+-extern struct proc_dir_entry * proc_mckinley_root;
+-
+-#endif /*ASM_PARISC_MCKINLEY_H*/
+diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
+index 5e9371fbf3d5f..de2fb12120d2e 100644
+--- a/arch/s390/net/bpf_jit_comp.c
++++ b/arch/s390/net/bpf_jit_comp.c
+@@ -2088,6 +2088,7 @@ struct bpf_tramp_jit {
+ 				 */
+ 	int r14_off;		/* Offset of saved %r14 */
+ 	int run_ctx_off;	/* Offset of struct bpf_tramp_run_ctx */
++	int tccnt_off;		/* Offset of saved tailcall counter */
+ 	int do_fexit;		/* do_fexit: label */
+ };
+ 
+@@ -2258,12 +2259,16 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im,
+ 	tjit->r14_off = alloc_stack(tjit, sizeof(u64));
+ 	tjit->run_ctx_off = alloc_stack(tjit,
+ 					sizeof(struct bpf_tramp_run_ctx));
++	tjit->tccnt_off = alloc_stack(tjit, sizeof(u64));
+ 	/* The caller has already reserved STACK_FRAME_OVERHEAD bytes. */
+ 	tjit->stack_size -= STACK_FRAME_OVERHEAD;
+ 	tjit->orig_stack_args_off = tjit->stack_size + STACK_FRAME_OVERHEAD;
+ 
+ 	/* aghi %r15,-stack_size */
+ 	EMIT4_IMM(0xa70b0000, REG_15, -tjit->stack_size);
++	/* mvc tccnt_off(4,%r15),stack_size+STK_OFF_TCCNT(%r15) */
++	_EMIT6(0xd203f000 | tjit->tccnt_off,
++	       0xf000 | (tjit->stack_size + STK_OFF_TCCNT));
+ 	/* stmg %r2,%rN,fwd_reg_args_off(%r15) */
+ 	if (nr_reg_args)
+ 		EMIT6_DISP_LH(0xeb000000, 0x0024, REG_2,
+@@ -2400,6 +2405,8 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im,
+ 				       (nr_stack_args * sizeof(u64) - 1) << 16 |
+ 				       tjit->stack_args_off,
+ 			       0xf000 | tjit->orig_stack_args_off);
++		/* mvc STK_OFF_TCCNT(4,%r15),tccnt_off(%r15) */
++		_EMIT6(0xd203f000 | STK_OFF_TCCNT, 0xf000 | tjit->tccnt_off);
+ 		/* lgr %r1,%r8 */
+ 		EMIT4(0xb9040000, REG_1, REG_8);
+ 		/* %r1() */
+@@ -2456,6 +2463,9 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im,
+ 	if (flags & (BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_RET_FENTRY_RET))
+ 		EMIT6_DISP_LH(0xe3000000, 0x0004, REG_2, REG_0, REG_15,
+ 			      tjit->retval_off);
++	/* mvc stack_size+STK_OFF_TCCNT(4,%r15),tccnt_off(%r15) */
++	_EMIT6(0xd203f000 | (tjit->stack_size + STK_OFF_TCCNT),
++	       0xf000 | tjit->tccnt_off);
+ 	/* aghi %r15,stack_size */
+ 	EMIT4_IMM(0xa70b0000, REG_15, tjit->stack_size);
+ 	/* Emit an expoline for the following indirect jump. */
+diff --git a/arch/sh/boards/mach-ap325rxa/setup.c b/arch/sh/boards/mach-ap325rxa/setup.c
+index 151792162152c..645cccf3da88e 100644
+--- a/arch/sh/boards/mach-ap325rxa/setup.c
++++ b/arch/sh/boards/mach-ap325rxa/setup.c
+@@ -531,7 +531,7 @@ static int __init ap325rxa_devices_setup(void)
+ 	device_initialize(&ap325rxa_ceu_device.dev);
+ 	dma_declare_coherent_memory(&ap325rxa_ceu_device.dev,
+ 			ceu_dma_membase, ceu_dma_membase,
+-			ceu_dma_membase + CEU_BUFFER_MEMORY_SIZE - 1);
++			CEU_BUFFER_MEMORY_SIZE);
+ 
+ 	platform_device_add(&ap325rxa_ceu_device);
+ 
+diff --git a/arch/sh/boards/mach-ecovec24/setup.c b/arch/sh/boards/mach-ecovec24/setup.c
+index 674da7ebd8b7f..7ec03d4a4edf0 100644
+--- a/arch/sh/boards/mach-ecovec24/setup.c
++++ b/arch/sh/boards/mach-ecovec24/setup.c
+@@ -1454,15 +1454,13 @@ static int __init arch_setup(void)
+ 	device_initialize(&ecovec_ceu_devices[0]->dev);
+ 	dma_declare_coherent_memory(&ecovec_ceu_devices[0]->dev,
+ 				    ceu0_dma_membase, ceu0_dma_membase,
+-				    ceu0_dma_membase +
+-				    CEU_BUFFER_MEMORY_SIZE - 1);
++				    CEU_BUFFER_MEMORY_SIZE);
+ 	platform_device_add(ecovec_ceu_devices[0]);
+ 
+ 	device_initialize(&ecovec_ceu_devices[1]->dev);
+ 	dma_declare_coherent_memory(&ecovec_ceu_devices[1]->dev,
+ 				    ceu1_dma_membase, ceu1_dma_membase,
+-				    ceu1_dma_membase +
+-				    CEU_BUFFER_MEMORY_SIZE - 1);
++				    CEU_BUFFER_MEMORY_SIZE);
+ 	platform_device_add(ecovec_ceu_devices[1]);
+ 
+ 	gpiod_add_lookup_table(&cn12_power_gpiod_table);
+diff --git a/arch/sh/boards/mach-kfr2r09/setup.c b/arch/sh/boards/mach-kfr2r09/setup.c
+index 20f4db778ed6a..c6d556dfbbbe6 100644
+--- a/arch/sh/boards/mach-kfr2r09/setup.c
++++ b/arch/sh/boards/mach-kfr2r09/setup.c
+@@ -603,7 +603,7 @@ static int __init kfr2r09_devices_setup(void)
+ 	device_initialize(&kfr2r09_ceu_device.dev);
+ 	dma_declare_coherent_memory(&kfr2r09_ceu_device.dev,
+ 			ceu_dma_membase, ceu_dma_membase,
+-			ceu_dma_membase + CEU_BUFFER_MEMORY_SIZE - 1);
++			CEU_BUFFER_MEMORY_SIZE);
+ 
+ 	platform_device_add(&kfr2r09_ceu_device);
+ 
+diff --git a/arch/sh/boards/mach-migor/setup.c b/arch/sh/boards/mach-migor/setup.c
+index f60061283c482..773ee767d0c4e 100644
+--- a/arch/sh/boards/mach-migor/setup.c
++++ b/arch/sh/boards/mach-migor/setup.c
+@@ -604,7 +604,7 @@ static int __init migor_devices_setup(void)
+ 	device_initialize(&migor_ceu_device.dev);
+ 	dma_declare_coherent_memory(&migor_ceu_device.dev,
+ 			ceu_dma_membase, ceu_dma_membase,
+-			ceu_dma_membase + CEU_BUFFER_MEMORY_SIZE - 1);
++			CEU_BUFFER_MEMORY_SIZE);
+ 
+ 	platform_device_add(&migor_ceu_device);
+ 
+diff --git a/arch/sh/boards/mach-se/7724/setup.c b/arch/sh/boards/mach-se/7724/setup.c
+index b60a2626e18b2..6495f93540654 100644
+--- a/arch/sh/boards/mach-se/7724/setup.c
++++ b/arch/sh/boards/mach-se/7724/setup.c
+@@ -940,15 +940,13 @@ static int __init devices_setup(void)
+ 	device_initialize(&ms7724se_ceu_devices[0]->dev);
+ 	dma_declare_coherent_memory(&ms7724se_ceu_devices[0]->dev,
+ 				    ceu0_dma_membase, ceu0_dma_membase,
+-				    ceu0_dma_membase +
+-				    CEU_BUFFER_MEMORY_SIZE - 1);
++				    CEU_BUFFER_MEMORY_SIZE);
+ 	platform_device_add(ms7724se_ceu_devices[0]);
+ 
+ 	device_initialize(&ms7724se_ceu_devices[1]->dev);
+ 	dma_declare_coherent_memory(&ms7724se_ceu_devices[1]->dev,
+ 				    ceu1_dma_membase, ceu1_dma_membase,
+-				    ceu1_dma_membase +
+-				    CEU_BUFFER_MEMORY_SIZE - 1);
++				    CEU_BUFFER_MEMORY_SIZE);
+ 	platform_device_add(ms7724se_ceu_devices[1]);
+ 
+ 	return platform_add_devices(ms7724se_devices,
+diff --git a/arch/sh/drivers/push-switch.c b/arch/sh/drivers/push-switch.c
+index c95f48ff3f6fb..6ecba5f521eb6 100644
+--- a/arch/sh/drivers/push-switch.c
++++ b/arch/sh/drivers/push-switch.c
+@@ -101,8 +101,8 @@ static int switch_drv_remove(struct platform_device *pdev)
+ 		device_remove_file(&pdev->dev, &dev_attr_switch);
+ 
+ 	platform_set_drvdata(pdev, NULL);
+-	flush_work(&psw->work);
+ 	timer_shutdown_sync(&psw->debounce);
++	flush_work(&psw->work);
+ 	free_irq(irq, pdev);
+ 
+ 	kfree(psw);
+diff --git a/arch/x86/include/asm/virtext.h b/arch/x86/include/asm/virtext.h
+index 3b12e6b994123..6c2e3ff3cb28f 100644
+--- a/arch/x86/include/asm/virtext.h
++++ b/arch/x86/include/asm/virtext.h
+@@ -101,12 +101,6 @@ static inline int cpu_has_svm(const char **msg)
+ 		return 0;
+ 	}
+ 
+-	if (boot_cpu_data.extended_cpuid_level < SVM_CPUID_FUNC) {
+-		if (msg)
+-			*msg = "can't execute cpuid_8000000a";
+-		return 0;
+-	}
+-
+ 	if (!boot_cpu_has(X86_FEATURE_SVM)) {
+ 		if (msg)
+ 			*msg = "svm not available";
+diff --git a/arch/x86/kvm/svm/avic.c b/arch/x86/kvm/svm/avic.c
+index cfc8ab7730250..2092db892d7d0 100644
+--- a/arch/x86/kvm/svm/avic.c
++++ b/arch/x86/kvm/svm/avic.c
+@@ -791,6 +791,7 @@ static int svm_ir_list_add(struct vcpu_svm *svm, struct amd_iommu_pi_data *pi)
+ 	int ret = 0;
+ 	unsigned long flags;
+ 	struct amd_svm_iommu_ir *ir;
++	u64 entry;
+ 
+ 	/**
+ 	 * In some cases, the existing irte is updated and re-set,
+@@ -824,6 +825,18 @@ static int svm_ir_list_add(struct vcpu_svm *svm, struct amd_iommu_pi_data *pi)
+ 	ir->data = pi->ir_data;
+ 
+ 	spin_lock_irqsave(&svm->ir_list_lock, flags);
++
++	/*
++	 * Update the target pCPU for IOMMU doorbells if the vCPU is running.
++	 * If the vCPU is NOT running, i.e. is blocking or scheduled out, KVM
++	 * will update the pCPU info when the vCPU awkened and/or scheduled in.
++	 * See also avic_vcpu_load().
++	 */
++	entry = READ_ONCE(*(svm->avic_physical_id_cache));
++	if (entry & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK)
++		amd_iommu_update_ga(entry & AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK,
++				    true, pi->ir_data);
++
+ 	list_add(&ir->node, &svm->ir_list);
+ 	spin_unlock_irqrestore(&svm->ir_list_lock, flags);
+ out:
+@@ -986,10 +999,11 @@ static inline int
+ avic_update_iommu_vcpu_affinity(struct kvm_vcpu *vcpu, int cpu, bool r)
+ {
+ 	int ret = 0;
+-	unsigned long flags;
+ 	struct amd_svm_iommu_ir *ir;
+ 	struct vcpu_svm *svm = to_svm(vcpu);
+ 
++	lockdep_assert_held(&svm->ir_list_lock);
++
+ 	if (!kvm_arch_has_assigned_device(vcpu->kvm))
+ 		return 0;
+ 
+@@ -997,19 +1011,15 @@ avic_update_iommu_vcpu_affinity(struct kvm_vcpu *vcpu, int cpu, bool r)
+ 	 * Here, we go through the per-vcpu ir_list to update all existing
+ 	 * interrupt remapping table entry targeting this vcpu.
+ 	 */
+-	spin_lock_irqsave(&svm->ir_list_lock, flags);
+-
+ 	if (list_empty(&svm->ir_list))
+-		goto out;
++		return 0;
+ 
+ 	list_for_each_entry(ir, &svm->ir_list, node) {
+ 		ret = amd_iommu_update_ga(cpu, r, ir->data);
+ 		if (ret)
+-			break;
++			return ret;
+ 	}
+-out:
+-	spin_unlock_irqrestore(&svm->ir_list_lock, flags);
+-	return ret;
++	return 0;
+ }
+ 
+ void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
+@@ -1017,6 +1027,7 @@ void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
+ 	u64 entry;
+ 	int h_physical_id = kvm_cpu_get_apicid(cpu);
+ 	struct vcpu_svm *svm = to_svm(vcpu);
++	unsigned long flags;
+ 
+ 	lockdep_assert_preemption_disabled();
+ 
+@@ -1033,6 +1044,15 @@ void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
+ 	if (kvm_vcpu_is_blocking(vcpu))
+ 		return;
+ 
++	/*
++	 * Grab the per-vCPU interrupt remapping lock even if the VM doesn't
++	 * _currently_ have assigned devices, as that can change.  Holding
++	 * ir_list_lock ensures that either svm_ir_list_add() will consume
++	 * up-to-date entry information, or that this task will wait until
++	 * svm_ir_list_add() completes to set the new target pCPU.
++	 */
++	spin_lock_irqsave(&svm->ir_list_lock, flags);
++
+ 	entry = READ_ONCE(*(svm->avic_physical_id_cache));
+ 	WARN_ON_ONCE(entry & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK);
+ 
+@@ -1042,25 +1062,48 @@ void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
+ 
+ 	WRITE_ONCE(*(svm->avic_physical_id_cache), entry);
+ 	avic_update_iommu_vcpu_affinity(vcpu, h_physical_id, true);
++
++	spin_unlock_irqrestore(&svm->ir_list_lock, flags);
+ }
+ 
+ void avic_vcpu_put(struct kvm_vcpu *vcpu)
+ {
+ 	u64 entry;
+ 	struct vcpu_svm *svm = to_svm(vcpu);
++	unsigned long flags;
+ 
+ 	lockdep_assert_preemption_disabled();
+ 
++	/*
++	 * Note, reading the Physical ID entry outside of ir_list_lock is safe
++	 * as only the pCPU that has loaded (or is loading) the vCPU is allowed
++	 * to modify the entry, and preemption is disabled.  I.e. the vCPU
++	 * can't be scheduled out and thus avic_vcpu_{put,load}() can't run
++	 * recursively.
++	 */
+ 	entry = READ_ONCE(*(svm->avic_physical_id_cache));
+ 
+ 	/* Nothing to do if IsRunning == '0' due to vCPU blocking. */
+ 	if (!(entry & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK))
+ 		return;
+ 
++	/*
++	 * Take and hold the per-vCPU interrupt remapping lock while updating
++	 * the Physical ID entry even though the lock doesn't protect against
++	 * multiple writers (see above).  Holding ir_list_lock ensures that
++	 * either svm_ir_list_add() will consume up-to-date entry information,
++	 * or that this task will wait until svm_ir_list_add() completes to
++	 * mark the vCPU as not running.
++	 */
++	spin_lock_irqsave(&svm->ir_list_lock, flags);
++
+ 	avic_update_iommu_vcpu_affinity(vcpu, -1, 0);
+ 
+ 	entry &= ~AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK;
+ 	WRITE_ONCE(*(svm->avic_physical_id_cache), entry);
++
++	spin_unlock_irqrestore(&svm->ir_list_lock, flags);
++
+ }
+ 
+ void avic_refresh_virtual_apic_mode(struct kvm_vcpu *vcpu)
+diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
+index 96936ddf1b3c5..c66c823ae222a 100644
+--- a/arch/x86/kvm/svm/nested.c
++++ b/arch/x86/kvm/svm/nested.c
+@@ -695,10 +695,9 @@ static void nested_vmcb02_prepare_control(struct vcpu_svm *svm,
+ 
+ 	vmcb02->control.tsc_offset = vcpu->arch.tsc_offset;
+ 
+-	if (svm->tsc_ratio_msr != kvm_caps.default_tsc_scaling_ratio) {
+-		WARN_ON(!svm->tsc_scaling_enabled);
++	if (svm->tsc_scaling_enabled &&
++	    svm->tsc_ratio_msr != kvm_caps.default_tsc_scaling_ratio)
+ 		nested_svm_update_tsc_ratio_msr(vcpu);
+-	}
+ 
+ 	vmcb02->control.int_ctl             =
+ 		(svm->nested.ctl.int_ctl & int_ctl_vmcb12_bits) |
+@@ -1101,8 +1100,8 @@ int nested_svm_vmexit(struct vcpu_svm *svm)
+ 		vmcb_mark_dirty(vmcb01, VMCB_INTERCEPTS);
+ 	}
+ 
+-	if (svm->tsc_ratio_msr != kvm_caps.default_tsc_scaling_ratio) {
+-		WARN_ON(!svm->tsc_scaling_enabled);
++	if (kvm_caps.has_tsc_control &&
++	    vcpu->arch.tsc_scaling_ratio != vcpu->arch.l1_tsc_scaling_ratio) {
+ 		vcpu->arch.tsc_scaling_ratio = vcpu->arch.l1_tsc_scaling_ratio;
+ 		__svm_write_tsc_multiplier(vcpu->arch.tsc_scaling_ratio);
+ 	}
+diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
+index d3aec1f2cad20..cefb67a8c668c 100644
+--- a/arch/x86/kvm/svm/sev.c
++++ b/arch/x86/kvm/svm/sev.c
+@@ -1725,7 +1725,7 @@ static void sev_migrate_from(struct kvm *dst_kvm, struct kvm *src_kvm)
+ 		 * Note, the source is not required to have the same number of
+ 		 * vCPUs as the destination when migrating a vanilla SEV VM.
+ 		 */
+-		src_vcpu = kvm_get_vcpu(dst_kvm, i);
++		src_vcpu = kvm_get_vcpu(src_kvm, i);
+ 		src_svm = to_svm(src_vcpu);
+ 
+ 		/*
+@@ -2881,7 +2881,10 @@ int sev_handle_vmgexit(struct kvm_vcpu *vcpu)
+ 					    svm->sev_es.ghcb_sa);
+ 		break;
+ 	case SVM_VMGEXIT_NMI_COMPLETE:
+-		ret = svm_invoke_exit_handler(vcpu, SVM_EXIT_IRET);
++		++vcpu->stat.nmi_window_exits;
++		svm->nmi_masked = false;
++		kvm_make_request(KVM_REQ_EVENT, vcpu);
++		ret = 1;
+ 		break;
+ 	case SVM_VMGEXIT_AP_HLT_LOOP:
+ 		ret = kvm_emulate_ap_reset_hold(vcpu);
+@@ -2952,9 +2955,12 @@ static void sev_es_init_vmcb(struct vcpu_svm *svm)
+ 	/*
+ 	 * An SEV-ES guest requires a VMSA area that is a separate from the
+ 	 * VMCB page. Do not include the encryption mask on the VMSA physical
+-	 * address since hardware will access it using the guest key.
++	 * address since hardware will access it using the guest key.  Note,
++	 * the VMSA will be NULL if this vCPU is the destination for intrahost
++	 * migration, and will be copied later.
+ 	 */
+-	svm->vmcb->control.vmsa_pa = __pa(svm->sev_es.vmsa);
++	if (svm->sev_es.vmsa)
++		svm->vmcb->control.vmsa_pa = __pa(svm->sev_es.vmsa);
+ 
+ 	/* Can't intercept CR register access, HV can't modify CR registers */
+ 	svm_clr_intercept(svm, INTERCEPT_CR0_READ);
+diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
+index d4bfdc607fe7f..e3acccc126166 100644
+--- a/arch/x86/kvm/svm/svm.c
++++ b/arch/x86/kvm/svm/svm.c
+@@ -365,6 +365,8 @@ static void svm_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
+ 		svm->vmcb->control.int_state |= SVM_INTERRUPT_SHADOW_MASK;
+ 
+ }
++static bool svm_can_emulate_instruction(struct kvm_vcpu *vcpu, int emul_type,
++					void *insn, int insn_len);
+ 
+ static int __svm_skip_emulated_instruction(struct kvm_vcpu *vcpu,
+ 					   bool commit_side_effects)
+@@ -385,6 +387,14 @@ static int __svm_skip_emulated_instruction(struct kvm_vcpu *vcpu,
+ 	}
+ 
+ 	if (!svm->next_rip) {
++		/*
++		 * FIXME: Drop this when kvm_emulate_instruction() does the
++		 * right thing and treats "can't emulate" as outright failure
++		 * for EMULTYPE_SKIP.
++		 */
++		if (!svm_can_emulate_instruction(vcpu, EMULTYPE_SKIP, NULL, 0))
++			return 0;
++
+ 		if (unlikely(!commit_side_effects))
+ 			old_rflags = svm->vmcb->save.rflags;
+ 
+@@ -2510,12 +2520,13 @@ static int iret_interception(struct kvm_vcpu *vcpu)
+ {
+ 	struct vcpu_svm *svm = to_svm(vcpu);
+ 
++	WARN_ON_ONCE(sev_es_guest(vcpu->kvm));
++
+ 	++vcpu->stat.nmi_window_exits;
+ 	svm->awaiting_iret_completion = true;
+ 
+ 	svm_clr_iret_intercept(svm);
+-	if (!sev_es_guest(vcpu->kvm))
+-		svm->nmi_iret_rip = kvm_rip_read(vcpu);
++	svm->nmi_iret_rip = kvm_rip_read(vcpu);
+ 
+ 	kvm_make_request(KVM_REQ_EVENT, vcpu);
+ 	return 1;
+@@ -3918,12 +3929,11 @@ static void svm_complete_interrupts(struct kvm_vcpu *vcpu)
+ 	svm->soft_int_injected = false;
+ 
+ 	/*
+-	 * If we've made progress since setting HF_IRET_MASK, we've
++	 * If we've made progress since setting awaiting_iret_completion, we've
+ 	 * executed an IRET and can allow NMI injection.
+ 	 */
+ 	if (svm->awaiting_iret_completion &&
+-	    (sev_es_guest(vcpu->kvm) ||
+-	     kvm_rip_read(vcpu) != svm->nmi_iret_rip)) {
++	    kvm_rip_read(vcpu) != svm->nmi_iret_rip) {
+ 		svm->awaiting_iret_completion = false;
+ 		svm->nmi_masked = false;
+ 		kvm_make_request(KVM_REQ_EVENT, vcpu);
+@@ -4651,16 +4661,25 @@ static bool svm_can_emulate_instruction(struct kvm_vcpu *vcpu, int emul_type,
+ 	 * and cannot be decrypted by KVM, i.e. KVM would read cyphertext and
+ 	 * decode garbage.
+ 	 *
+-	 * Inject #UD if KVM reached this point without an instruction buffer.
+-	 * In practice, this path should never be hit by a well-behaved guest,
+-	 * e.g. KVM doesn't intercept #UD or #GP for SEV guests, but this path
+-	 * is still theoretically reachable, e.g. via unaccelerated fault-like
+-	 * AVIC access, and needs to be handled by KVM to avoid putting the
+-	 * guest into an infinite loop.   Injecting #UD is somewhat arbitrary,
+-	 * but its the least awful option given lack of insight into the guest.
++	 * If KVM is NOT trying to simply skip an instruction, inject #UD if
++	 * KVM reached this point without an instruction buffer.  In practice,
++	 * this path should never be hit by a well-behaved guest, e.g. KVM
++	 * doesn't intercept #UD or #GP for SEV guests, but this path is still
++	 * theoretically reachable, e.g. via unaccelerated fault-like AVIC
++	 * access, and needs to be handled by KVM to avoid putting the guest
++	 * into an infinite loop.   Injecting #UD is somewhat arbitrary, but
++	 * its the least awful option given lack of insight into the guest.
++	 *
++	 * If KVM is trying to skip an instruction, simply resume the guest.
++	 * If a #NPF occurs while the guest is vectoring an INT3/INTO, then KVM
++	 * will attempt to re-inject the INT3/INTO and skip the instruction.
++	 * In that scenario, retrying the INT3/INTO and hoping the guest will
++	 * make forward progress is the only option that has a chance of
++	 * success (and in practice it will work the vast majority of the time).
+ 	 */
+ 	if (unlikely(!insn)) {
+-		kvm_queue_exception(vcpu, UD_VECTOR);
++		if (!(emul_type & EMULTYPE_SKIP))
++			kvm_queue_exception(vcpu, UD_VECTOR);
+ 		return false;
+ 	}
+ 
+diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
+index df461f387e20d..f2fb67a9dc050 100644
+--- a/arch/x86/kvm/vmx/vmx.c
++++ b/arch/x86/kvm/vmx/vmx.c
+@@ -7243,13 +7243,20 @@ static noinstr void vmx_vcpu_enter_exit(struct kvm_vcpu *vcpu,
+ 				   flags);
+ 
+ 	vcpu->arch.cr2 = native_read_cr2();
++	vcpu->arch.regs_avail &= ~VMX_REGS_LAZY_LOAD_SET;
++
++	vmx->idt_vectoring_info = 0;
+ 
+ 	vmx_enable_fb_clear(vmx);
+ 
+-	if (unlikely(vmx->fail))
++	if (unlikely(vmx->fail)) {
+ 		vmx->exit_reason.full = 0xdead;
+-	else
+-		vmx->exit_reason.full = vmcs_read32(VM_EXIT_REASON);
++		goto out;
++	}
++
++	vmx->exit_reason.full = vmcs_read32(VM_EXIT_REASON);
++	if (likely(!vmx->exit_reason.failed_vmentry))
++		vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
+ 
+ 	if ((u16)vmx->exit_reason.basic == EXIT_REASON_EXCEPTION_NMI &&
+ 	    is_nmi(vmx_get_intr_info(vcpu))) {
+@@ -7258,6 +7265,7 @@ static noinstr void vmx_vcpu_enter_exit(struct kvm_vcpu *vcpu,
+ 		kvm_after_interrupt(vcpu);
+ 	}
+ 
++out:
+ 	guest_state_exit_irqoff();
+ }
+ 
+@@ -7379,8 +7387,6 @@ static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu)
+ 	loadsegment(es, __USER_DS);
+ #endif
+ 
+-	vcpu->arch.regs_avail &= ~VMX_REGS_LAZY_LOAD_SET;
+-
+ 	pt_guest_exit(vmx);
+ 
+ 	kvm_load_host_xsave_state(vcpu);
+@@ -7397,17 +7403,12 @@ static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu)
+ 		vmx->nested.nested_run_pending = 0;
+ 	}
+ 
+-	vmx->idt_vectoring_info = 0;
+-
+ 	if (unlikely(vmx->fail))
+ 		return EXIT_FASTPATH_NONE;
+ 
+ 	if (unlikely((u16)vmx->exit_reason.basic == EXIT_REASON_MCE_DURING_VMENTRY))
+ 		kvm_machine_check();
+ 
+-	if (likely(!vmx->exit_reason.failed_vmentry))
+-		vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
+-
+ 	trace_kvm_exit(vcpu, KVM_ISA_VMX);
+ 
+ 	if (unlikely(vmx->exit_reason.failed_vmentry))
+diff --git a/block/blk-throttle.c b/block/blk-throttle.c
+index 7397ff199d669..e78bc3b65ec80 100644
+--- a/block/blk-throttle.c
++++ b/block/blk-throttle.c
+@@ -697,11 +697,41 @@ static bool throtl_slice_used(struct throtl_grp *tg, bool rw)
+ 	return true;
+ }
+ 
++static unsigned int calculate_io_allowed(u32 iops_limit,
++					 unsigned long jiffy_elapsed)
++{
++	unsigned int io_allowed;
++	u64 tmp;
++
++	/*
++	 * jiffy_elapsed should not be a big value as minimum iops can be
++	 * 1 then at max jiffy elapsed should be equivalent of 1 second as we
++	 * will allow dispatch after 1 second and after that slice should
++	 * have been trimmed.
++	 */
++
++	tmp = (u64)iops_limit * jiffy_elapsed;
++	do_div(tmp, HZ);
++
++	if (tmp > UINT_MAX)
++		io_allowed = UINT_MAX;
++	else
++		io_allowed = tmp;
++
++	return io_allowed;
++}
++
++static u64 calculate_bytes_allowed(u64 bps_limit, unsigned long jiffy_elapsed)
++{
++	return mul_u64_u64_div_u64(bps_limit, (u64)jiffy_elapsed, (u64)HZ);
++}
++
+ /* Trim the used slices and adjust slice start accordingly */
+ static inline void throtl_trim_slice(struct throtl_grp *tg, bool rw)
+ {
+-	unsigned long nr_slices, time_elapsed, io_trim;
+-	u64 bytes_trim, tmp;
++	unsigned long time_elapsed;
++	long long bytes_trim;
++	int io_trim;
+ 
+ 	BUG_ON(time_before(tg->slice_end[rw], tg->slice_start[rw]));
+ 
+@@ -723,67 +753,38 @@ static inline void throtl_trim_slice(struct throtl_grp *tg, bool rw)
+ 
+ 	throtl_set_slice_end(tg, rw, jiffies + tg->td->throtl_slice);
+ 
+-	time_elapsed = jiffies - tg->slice_start[rw];
+-
+-	nr_slices = time_elapsed / tg->td->throtl_slice;
+-
+-	if (!nr_slices)
++	time_elapsed = rounddown(jiffies - tg->slice_start[rw],
++				 tg->td->throtl_slice);
++	if (!time_elapsed)
+ 		return;
+-	tmp = tg_bps_limit(tg, rw) * tg->td->throtl_slice * nr_slices;
+-	do_div(tmp, HZ);
+-	bytes_trim = tmp;
+ 
+-	io_trim = (tg_iops_limit(tg, rw) * tg->td->throtl_slice * nr_slices) /
+-		HZ;
+-
+-	if (!bytes_trim && !io_trim)
++	bytes_trim = calculate_bytes_allowed(tg_bps_limit(tg, rw),
++					     time_elapsed) +
++		     tg->carryover_bytes[rw];
++	io_trim = calculate_io_allowed(tg_iops_limit(tg, rw), time_elapsed) +
++		  tg->carryover_ios[rw];
++	if (bytes_trim <= 0 && io_trim <= 0)
+ 		return;
+ 
+-	if (tg->bytes_disp[rw] >= bytes_trim)
++	tg->carryover_bytes[rw] = 0;
++	if ((long long)tg->bytes_disp[rw] >= bytes_trim)
+ 		tg->bytes_disp[rw] -= bytes_trim;
+ 	else
+ 		tg->bytes_disp[rw] = 0;
+ 
+-	if (tg->io_disp[rw] >= io_trim)
++	tg->carryover_ios[rw] = 0;
++	if ((int)tg->io_disp[rw] >= io_trim)
+ 		tg->io_disp[rw] -= io_trim;
+ 	else
+ 		tg->io_disp[rw] = 0;
+ 
+-	tg->slice_start[rw] += nr_slices * tg->td->throtl_slice;
++	tg->slice_start[rw] += time_elapsed;
+ 
+ 	throtl_log(&tg->service_queue,
+-		   "[%c] trim slice nr=%lu bytes=%llu io=%lu start=%lu end=%lu jiffies=%lu",
+-		   rw == READ ? 'R' : 'W', nr_slices, bytes_trim, io_trim,
+-		   tg->slice_start[rw], tg->slice_end[rw], jiffies);
+-}
+-
+-static unsigned int calculate_io_allowed(u32 iops_limit,
+-					 unsigned long jiffy_elapsed)
+-{
+-	unsigned int io_allowed;
+-	u64 tmp;
+-
+-	/*
+-	 * jiffy_elapsed should not be a big value as minimum iops can be
+-	 * 1 then at max jiffy elapsed should be equivalent of 1 second as we
+-	 * will allow dispatch after 1 second and after that slice should
+-	 * have been trimmed.
+-	 */
+-
+-	tmp = (u64)iops_limit * jiffy_elapsed;
+-	do_div(tmp, HZ);
+-
+-	if (tmp > UINT_MAX)
+-		io_allowed = UINT_MAX;
+-	else
+-		io_allowed = tmp;
+-
+-	return io_allowed;
+-}
+-
+-static u64 calculate_bytes_allowed(u64 bps_limit, unsigned long jiffy_elapsed)
+-{
+-	return mul_u64_u64_div_u64(bps_limit, (u64)jiffy_elapsed, (u64)HZ);
++		   "[%c] trim slice nr=%lu bytes=%lld io=%d start=%lu end=%lu jiffies=%lu",
++		   rw == READ ? 'R' : 'W', time_elapsed / tg->td->throtl_slice,
++		   bytes_trim, io_trim, tg->slice_start[rw], tg->slice_end[rw],
++		   jiffies);
+ }
+ 
+ static void __tg_update_carryover(struct throtl_grp *tg, bool rw)
+diff --git a/drivers/accel/ivpu/ivpu_jsm_msg.c b/drivers/accel/ivpu/ivpu_jsm_msg.c
+index 831bfd2b2d39d..bdddef2c59eec 100644
+--- a/drivers/accel/ivpu/ivpu_jsm_msg.c
++++ b/drivers/accel/ivpu/ivpu_jsm_msg.c
+@@ -118,8 +118,7 @@ int ivpu_jsm_dyndbg_control(struct ivpu_device *vdev, char *command, size_t size
+ 	struct vpu_jsm_msg resp;
+ 	int ret;
+ 
+-	if (!strncpy(req.payload.dyndbg_control.dyndbg_cmd, command, VPU_DYNDBG_CMD_MAX_LEN - 1))
+-		return -ENOMEM;
++	strscpy(req.payload.dyndbg_control.dyndbg_cmd, command, VPU_DYNDBG_CMD_MAX_LEN);
+ 
+ 	ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_DYNDBG_CONTROL_RSP, &resp,
+ 				    VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
+diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
+index addba109406be..c1e85e0ed43e5 100644
+--- a/drivers/ata/ahci.c
++++ b/drivers/ata/ahci.c
+@@ -421,6 +421,8 @@ static const struct pci_device_id ahci_pci_tbl[] = {
+ 	{ PCI_VDEVICE(INTEL, 0x34d3), board_ahci_low_power }, /* Ice Lake LP AHCI */
+ 	{ PCI_VDEVICE(INTEL, 0x02d3), board_ahci_low_power }, /* Comet Lake PCH-U AHCI */
+ 	{ PCI_VDEVICE(INTEL, 0x02d7), board_ahci_low_power }, /* Comet Lake PCH RAID */
++	/* Elkhart Lake IDs 0x4b60 & 0x4b62 https://sata-io.org/product/8803 not tested yet */
++	{ PCI_VDEVICE(INTEL, 0x4b63), board_ahci_low_power }, /* Elkhart Lake AHCI */
+ 
+ 	/* JMicron 360/1/3/5/6, match class to avoid IDE function */
+ 	{ PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
+diff --git a/drivers/ata/pata_falcon.c b/drivers/ata/pata_falcon.c
+index 996516e64f135..616064b02de6e 100644
+--- a/drivers/ata/pata_falcon.c
++++ b/drivers/ata/pata_falcon.c
+@@ -123,8 +123,8 @@ static int __init pata_falcon_init_one(struct platform_device *pdev)
+ 	struct resource *base_res, *ctl_res, *irq_res;
+ 	struct ata_host *host;
+ 	struct ata_port *ap;
+-	void __iomem *base;
+-	int irq = 0;
++	void __iomem *base, *ctl_base;
++	int irq = 0, io_offset = 1, reg_shift = 2; /* Falcon defaults */
+ 
+ 	dev_info(&pdev->dev, "Atari Falcon and Q40/Q60 PATA controller\n");
+ 
+@@ -165,26 +165,34 @@ static int __init pata_falcon_init_one(struct platform_device *pdev)
+ 	ap->pio_mask = ATA_PIO4;
+ 	ap->flags |= ATA_FLAG_SLAVE_POSS | ATA_FLAG_NO_IORDY;
+ 
+-	base = (void __iomem *)base_mem_res->start;
+ 	/* N.B. this assumes data_addr will be used for word-sized I/O only */
+-	ap->ioaddr.data_addr		= base + 0 + 0 * 4;
+-	ap->ioaddr.error_addr		= base + 1 + 1 * 4;
+-	ap->ioaddr.feature_addr		= base + 1 + 1 * 4;
+-	ap->ioaddr.nsect_addr		= base + 1 + 2 * 4;
+-	ap->ioaddr.lbal_addr		= base + 1 + 3 * 4;
+-	ap->ioaddr.lbam_addr		= base + 1 + 4 * 4;
+-	ap->ioaddr.lbah_addr		= base + 1 + 5 * 4;
+-	ap->ioaddr.device_addr		= base + 1 + 6 * 4;
+-	ap->ioaddr.status_addr		= base + 1 + 7 * 4;
+-	ap->ioaddr.command_addr		= base + 1 + 7 * 4;
+-
+-	base = (void __iomem *)ctl_mem_res->start;
+-	ap->ioaddr.altstatus_addr	= base + 1;
+-	ap->ioaddr.ctl_addr		= base + 1;
+-
+-	ata_port_desc(ap, "cmd 0x%lx ctl 0x%lx",
+-		      (unsigned long)base_mem_res->start,
+-		      (unsigned long)ctl_mem_res->start);
++	ap->ioaddr.data_addr = (void __iomem *)base_mem_res->start;
++
++	if (base_res) {		/* only Q40 has IO resources */
++		io_offset = 0x10000;
++		reg_shift = 0;
++		base = (void __iomem *)base_res->start;
++		ctl_base = (void __iomem *)ctl_res->start;
++	} else {
++		base = (void __iomem *)base_mem_res->start;
++		ctl_base = (void __iomem *)ctl_mem_res->start;
++	}
++
++	ap->ioaddr.error_addr	= base + io_offset + (1 << reg_shift);
++	ap->ioaddr.feature_addr	= base + io_offset + (1 << reg_shift);
++	ap->ioaddr.nsect_addr	= base + io_offset + (2 << reg_shift);
++	ap->ioaddr.lbal_addr	= base + io_offset + (3 << reg_shift);
++	ap->ioaddr.lbam_addr	= base + io_offset + (4 << reg_shift);
++	ap->ioaddr.lbah_addr	= base + io_offset + (5 << reg_shift);
++	ap->ioaddr.device_addr	= base + io_offset + (6 << reg_shift);
++	ap->ioaddr.status_addr	= base + io_offset + (7 << reg_shift);
++	ap->ioaddr.command_addr	= base + io_offset + (7 << reg_shift);
++
++	ap->ioaddr.altstatus_addr	= ctl_base + io_offset;
++	ap->ioaddr.ctl_addr		= ctl_base + io_offset;
++
++	ata_port_desc(ap, "cmd %px ctl %px data %px",
++		      base, ctl_base, ap->ioaddr.data_addr);
+ 
+ 	irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ 	if (irq_res && irq_res->start > 0) {
+diff --git a/drivers/ata/pata_ftide010.c b/drivers/ata/pata_ftide010.c
+index 6f6734c09b111..3ab8dca266cc4 100644
+--- a/drivers/ata/pata_ftide010.c
++++ b/drivers/ata/pata_ftide010.c
+@@ -567,6 +567,7 @@ static struct platform_driver pata_ftide010_driver = {
+ };
+ module_platform_driver(pata_ftide010_driver);
+ 
++MODULE_DESCRIPTION("low level driver for Faraday Technology FTIDE010");
+ MODULE_AUTHOR("Linus Walleij <linus.walleij@linaro.org>");
+ MODULE_LICENSE("GPL");
+ MODULE_ALIAS("platform:" DRV_NAME);
+diff --git a/drivers/ata/sata_gemini.c b/drivers/ata/sata_gemini.c
+index c42cc9bbbc4ed..b52e4157443e7 100644
+--- a/drivers/ata/sata_gemini.c
++++ b/drivers/ata/sata_gemini.c
+@@ -428,6 +428,7 @@ static struct platform_driver gemini_sata_driver = {
+ };
+ module_platform_driver(gemini_sata_driver);
+ 
++MODULE_DESCRIPTION("low level driver for Cortina Systems Gemini SATA bridge");
+ MODULE_AUTHOR("Linus Walleij <linus.walleij@linaro.org>");
+ MODULE_LICENSE("GPL");
+ MODULE_ALIAS("platform:" DRV_NAME);
+diff --git a/drivers/block/null_blk/main.c b/drivers/block/null_blk/main.c
+index 864013019d6b6..968090935eb23 100644
+--- a/drivers/block/null_blk/main.c
++++ b/drivers/block/null_blk/main.c
+@@ -1643,9 +1643,12 @@ static int null_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
+ 	struct nullb_queue *nq = hctx->driver_data;
+ 	LIST_HEAD(list);
+ 	int nr = 0;
++	struct request *rq;
+ 
+ 	spin_lock(&nq->poll_lock);
+ 	list_splice_init(&nq->poll_list, &list);
++	list_for_each_entry(rq, &list, queuelist)
++		blk_mq_set_request_complete(rq);
+ 	spin_unlock(&nq->poll_lock);
+ 
+ 	while (!list_empty(&list)) {
+@@ -1671,16 +1674,21 @@ static enum blk_eh_timer_return null_timeout_rq(struct request *rq)
+ 	struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
+ 	struct nullb_cmd *cmd = blk_mq_rq_to_pdu(rq);
+ 
+-	pr_info("rq %p timed out\n", rq);
+-
+ 	if (hctx->type == HCTX_TYPE_POLL) {
+ 		struct nullb_queue *nq = hctx->driver_data;
+ 
+ 		spin_lock(&nq->poll_lock);
++		/* The request may have completed meanwhile. */
++		if (blk_mq_request_completed(rq)) {
++			spin_unlock(&nq->poll_lock);
++			return BLK_EH_DONE;
++		}
+ 		list_del_init(&rq->queuelist);
+ 		spin_unlock(&nq->poll_lock);
+ 	}
+ 
++	pr_info("rq %p timed out\n", rq);
++
+ 	/*
+ 	 * If the device is marked as blocking (i.e. memory backed or zoned
+ 	 * device), the submission path may be blocked waiting for resources
+diff --git a/drivers/bus/mhi/host/pm.c b/drivers/bus/mhi/host/pm.c
+index 083459028a4b8..8a4362d75fc43 100644
+--- a/drivers/bus/mhi/host/pm.c
++++ b/drivers/bus/mhi/host/pm.c
+@@ -470,6 +470,10 @@ static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl)
+ 
+ 	/* Trigger MHI RESET so that the device will not access host memory */
+ 	if (!MHI_PM_IN_FATAL_STATE(mhi_cntrl->pm_state)) {
++		/* Skip MHI RESET if in RDDM state */
++		if (mhi_cntrl->rddm_image && mhi_get_exec_env(mhi_cntrl) == MHI_EE_RDDM)
++			goto skip_mhi_reset;
++
+ 		dev_dbg(dev, "Triggering MHI Reset in device\n");
+ 		mhi_set_mhi_state(mhi_cntrl, MHI_STATE_RESET);
+ 
+@@ -495,6 +499,7 @@ static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl)
+ 		}
+ 	}
+ 
++skip_mhi_reset:
+ 	dev_dbg(dev,
+ 		 "Waiting for all pending event ring processing to complete\n");
+ 	mhi_event = mhi_cntrl->mhi_event;
+diff --git a/drivers/char/tpm/tpm_crb.c b/drivers/char/tpm/tpm_crb.c
+index a5dbebb1acfcf..ea085b14ab7c9 100644
+--- a/drivers/char/tpm/tpm_crb.c
++++ b/drivers/char/tpm/tpm_crb.c
+@@ -775,12 +775,13 @@ static int crb_acpi_add(struct acpi_device *device)
+ 				FW_BUG "TPM2 ACPI table has wrong size %u for start method type %d\n",
+ 				buf->header.length,
+ 				ACPI_TPM2_COMMAND_BUFFER_WITH_PLUTON);
+-			return -EINVAL;
++			rc = -EINVAL;
++			goto out;
+ 		}
+ 		crb_pluton = ACPI_ADD_PTR(struct tpm2_crb_pluton, buf, sizeof(*buf));
+ 		rc = crb_map_pluton(dev, priv, buf, crb_pluton);
+ 		if (rc)
+-			return rc;
++			goto out;
+ 	}
+ 
+ 	priv->sm = sm;
+diff --git a/drivers/clk/imx/clk-pll14xx.c b/drivers/clk/imx/clk-pll14xx.c
+index 7150c59bbfc95..0d58d85c375e9 100644
+--- a/drivers/clk/imx/clk-pll14xx.c
++++ b/drivers/clk/imx/clk-pll14xx.c
+@@ -64,8 +64,6 @@ static const struct imx_pll14xx_rate_table imx_pll1443x_tbl[] = {
+ 	PLL_1443X_RATE(650000000U, 325, 3, 2, 0),
+ 	PLL_1443X_RATE(594000000U, 198, 2, 2, 0),
+ 	PLL_1443X_RATE(519750000U, 173, 2, 2, 16384),
+-	PLL_1443X_RATE(393216000U, 262, 2, 3, 9437),
+-	PLL_1443X_RATE(361267200U, 361, 3, 3, 17511),
+ };
+ 
+ struct imx_pll14xx_clk imx_1443x_pll = {
+@@ -139,11 +137,10 @@ static void imx_pll14xx_calc_settings(struct clk_pll14xx *pll, unsigned long rat
+ 	/*
+ 	 * Fractional PLL constrains:
+ 	 *
+-	 * a) 6MHz <= prate <= 25MHz
+-	 * b) 1 <= p <= 63 (1 <= p <= 4 prate = 24MHz)
+-	 * c) 64 <= m <= 1023
+-	 * d) 0 <= s <= 6
+-	 * e) -32768 <= k <= 32767
++	 * a) 1 <= p <= 63
++	 * b) 64 <= m <= 1023
++	 * c) 0 <= s <= 6
++	 * d) -32768 <= k <= 32767
+ 	 *
+ 	 * fvco = (m * 65536 + k) * prate / (p * 65536)
+ 	 */
+@@ -186,7 +183,7 @@ static void imx_pll14xx_calc_settings(struct clk_pll14xx *pll, unsigned long rat
+ 	}
+ 
+ 	/* Finally calculate best values */
+-	for (pdiv = 1; pdiv <= 7; pdiv++) {
++	for (pdiv = 1; pdiv <= 63; pdiv++) {
+ 		for (sdiv = 0; sdiv <= 6; sdiv++) {
+ 			/* calc mdiv = round(rate * pdiv * 2^sdiv) / prate) */
+ 			mdiv = DIV_ROUND_CLOSEST(rate * (pdiv << sdiv), prate);
+diff --git a/drivers/clk/qcom/camcc-sc7180.c b/drivers/clk/qcom/camcc-sc7180.c
+index 8a4ba7a19ed12..6f56bdbf02047 100644
+--- a/drivers/clk/qcom/camcc-sc7180.c
++++ b/drivers/clk/qcom/camcc-sc7180.c
+@@ -1664,7 +1664,7 @@ static int cam_cc_sc7180_probe(struct platform_device *pdev)
+ 		return ret;
+ 	}
+ 
+-	ret = pm_runtime_get(&pdev->dev);
++	ret = pm_runtime_resume_and_get(&pdev->dev);
+ 	if (ret)
+ 		return ret;
+ 
+diff --git a/drivers/clk/qcom/dispcc-sm8450.c b/drivers/clk/qcom/dispcc-sm8450.c
+index adbfd30bfc964..84db4ff5485aa 100644
+--- a/drivers/clk/qcom/dispcc-sm8450.c
++++ b/drivers/clk/qcom/dispcc-sm8450.c
+@@ -1776,8 +1776,10 @@ static int disp_cc_sm8450_probe(struct platform_device *pdev)
+ 		return ret;
+ 
+ 	regmap = qcom_cc_map(pdev, &disp_cc_sm8450_desc);
+-	if (IS_ERR(regmap))
+-		return PTR_ERR(regmap);
++	if (IS_ERR(regmap)) {
++		ret = PTR_ERR(regmap);
++		goto err_put_rpm;
++	}
+ 
+ 	clk_lucid_evo_pll_configure(&disp_cc_pll0, regmap, &disp_cc_pll0_config);
+ 	clk_lucid_evo_pll_configure(&disp_cc_pll1, regmap, &disp_cc_pll1_config);
+@@ -1792,9 +1794,16 @@ static int disp_cc_sm8450_probe(struct platform_device *pdev)
+ 	regmap_update_bits(regmap, 0xe05c, BIT(0), BIT(0));
+ 
+ 	ret = qcom_cc_really_probe(pdev, &disp_cc_sm8450_desc, regmap);
++	if (ret)
++		goto err_put_rpm;
+ 
+ 	pm_runtime_put(&pdev->dev);
+ 
++	return 0;
++
++err_put_rpm:
++	pm_runtime_put_sync(&pdev->dev);
++
+ 	return ret;
+ }
+ 
+diff --git a/drivers/clk/qcom/dispcc-sm8550.c b/drivers/clk/qcom/dispcc-sm8550.c
+index 1e5a110818603..b2fae9001ff23 100644
+--- a/drivers/clk/qcom/dispcc-sm8550.c
++++ b/drivers/clk/qcom/dispcc-sm8550.c
+@@ -1761,8 +1761,10 @@ static int disp_cc_sm8550_probe(struct platform_device *pdev)
+ 		return ret;
+ 
+ 	regmap = qcom_cc_map(pdev, &disp_cc_sm8550_desc);
+-	if (IS_ERR(regmap))
+-		return PTR_ERR(regmap);
++	if (IS_ERR(regmap)) {
++		ret = PTR_ERR(regmap);
++		goto err_put_rpm;
++	}
+ 
+ 	clk_lucid_evo_pll_configure(&disp_cc_pll0, regmap, &disp_cc_pll0_config);
+ 	clk_lucid_evo_pll_configure(&disp_cc_pll1, regmap, &disp_cc_pll1_config);
+@@ -1777,9 +1779,16 @@ static int disp_cc_sm8550_probe(struct platform_device *pdev)
+ 	regmap_update_bits(regmap, 0xe054, BIT(0), BIT(0));
+ 
+ 	ret = qcom_cc_really_probe(pdev, &disp_cc_sm8550_desc, regmap);
++	if (ret)
++		goto err_put_rpm;
+ 
+ 	pm_runtime_put(&pdev->dev);
+ 
++	return 0;
++
++err_put_rpm:
++	pm_runtime_put_sync(&pdev->dev);
++
+ 	return ret;
+ }
+ 
+diff --git a/drivers/clk/qcom/gcc-mdm9615.c b/drivers/clk/qcom/gcc-mdm9615.c
+index 8bed02a748aba..470a277603a92 100644
+--- a/drivers/clk/qcom/gcc-mdm9615.c
++++ b/drivers/clk/qcom/gcc-mdm9615.c
+@@ -58,7 +58,7 @@ static struct clk_regmap pll0_vote = {
+ 	.enable_mask = BIT(0),
+ 	.hw.init = &(struct clk_init_data){
+ 		.name = "pll0_vote",
+-		.parent_names = (const char *[]){ "pll8" },
++		.parent_names = (const char *[]){ "pll0" },
+ 		.num_parents = 1,
+ 		.ops = &clk_pll_vote_ops,
+ 	},
+diff --git a/drivers/clk/qcom/lpasscc-sc7280.c b/drivers/clk/qcom/lpasscc-sc7280.c
+index 0df2b29e95e31..e6b815aec46a1 100644
+--- a/drivers/clk/qcom/lpasscc-sc7280.c
++++ b/drivers/clk/qcom/lpasscc-sc7280.c
+@@ -118,9 +118,13 @@ static int lpass_cc_sc7280_probe(struct platform_device *pdev)
+ 	ret = pm_clk_add(&pdev->dev, "iface");
+ 	if (ret < 0) {
+ 		dev_err(&pdev->dev, "failed to acquire iface clock\n");
+-		goto destroy_pm_clk;
++		goto err_destroy_pm_clk;
+ 	}
+ 
++	ret = pm_runtime_resume_and_get(&pdev->dev);
++	if (ret)
++		goto err_destroy_pm_clk;
++
+ 	if (!of_property_read_bool(pdev->dev.of_node, "qcom,adsp-pil-mode")) {
+ 		lpass_regmap_config.name = "qdsp6ss";
+ 		lpass_regmap_config.max_register = 0x3f;
+@@ -128,7 +132,7 @@ static int lpass_cc_sc7280_probe(struct platform_device *pdev)
+ 
+ 		ret = qcom_cc_probe_by_index(pdev, 0, desc);
+ 		if (ret)
+-			goto destroy_pm_clk;
++			goto err_put_rpm;
+ 	}
+ 
+ 	lpass_regmap_config.name = "top_cc";
+@@ -137,11 +141,15 @@ static int lpass_cc_sc7280_probe(struct platform_device *pdev)
+ 
+ 	ret = qcom_cc_probe_by_index(pdev, 1, desc);
+ 	if (ret)
+-		goto destroy_pm_clk;
++		goto err_put_rpm;
++
++	pm_runtime_put(&pdev->dev);
+ 
+ 	return 0;
+ 
+-destroy_pm_clk:
++err_put_rpm:
++	pm_runtime_put_sync(&pdev->dev);
++err_destroy_pm_clk:
+ 	pm_clk_destroy(&pdev->dev);
+ 
+ 	return ret;
+diff --git a/drivers/clk/qcom/mss-sc7180.c b/drivers/clk/qcom/mss-sc7180.c
+index 5a14074406623..d106bc65470e1 100644
+--- a/drivers/clk/qcom/mss-sc7180.c
++++ b/drivers/clk/qcom/mss-sc7180.c
+@@ -87,11 +87,22 @@ static int mss_sc7180_probe(struct platform_device *pdev)
+ 		return ret;
+ 	}
+ 
++	ret = pm_runtime_resume_and_get(&pdev->dev);
++	if (ret)
++		return ret;
++
+ 	ret = qcom_cc_probe(pdev, &mss_sc7180_desc);
+ 	if (ret < 0)
+-		return ret;
++		goto err_put_rpm;
++
++	pm_runtime_put(&pdev->dev);
+ 
+ 	return 0;
++
++err_put_rpm:
++	pm_runtime_put_sync(&pdev->dev);
++
++	return ret;
+ }
+ 
+ static const struct dev_pm_ops mss_sc7180_pm_ops = {
+diff --git a/drivers/clk/qcom/q6sstop-qcs404.c b/drivers/clk/qcom/q6sstop-qcs404.c
+index 780074e05841b..26e2d63614ac3 100644
+--- a/drivers/clk/qcom/q6sstop-qcs404.c
++++ b/drivers/clk/qcom/q6sstop-qcs404.c
+@@ -174,21 +174,32 @@ static int q6sstopcc_qcs404_probe(struct platform_device *pdev)
+ 		return ret;
+ 	}
+ 
++	ret = pm_runtime_resume_and_get(&pdev->dev);
++	if (ret)
++		return ret;
++
+ 	q6sstop_regmap_config.name = "q6sstop_tcsr";
+ 	desc = &tcsr_qcs404_desc;
+ 
+ 	ret = qcom_cc_probe_by_index(pdev, 1, desc);
+ 	if (ret)
+-		return ret;
++		goto err_put_rpm;
+ 
+ 	q6sstop_regmap_config.name = "q6sstop_cc";
+ 	desc = &q6sstop_qcs404_desc;
+ 
+ 	ret = qcom_cc_probe_by_index(pdev, 0, desc);
+ 	if (ret)
+-		return ret;
++		goto err_put_rpm;
++
++	pm_runtime_put(&pdev->dev);
+ 
+ 	return 0;
++
++err_put_rpm:
++	pm_runtime_put_sync(&pdev->dev);
++
++	return ret;
+ }
+ 
+ static const struct dev_pm_ops q6sstopcc_pm_ops = {
+diff --git a/drivers/clk/qcom/turingcc-qcs404.c b/drivers/clk/qcom/turingcc-qcs404.c
+index 43184459228fd..2cd288d6c3e4d 100644
+--- a/drivers/clk/qcom/turingcc-qcs404.c
++++ b/drivers/clk/qcom/turingcc-qcs404.c
+@@ -125,11 +125,22 @@ static int turingcc_probe(struct platform_device *pdev)
+ 		return ret;
+ 	}
+ 
++	ret = pm_runtime_resume_and_get(&pdev->dev);
++	if (ret)
++		return ret;
++
+ 	ret = qcom_cc_probe(pdev, &turingcc_desc);
+ 	if (ret < 0)
+-		return ret;
++		goto err_put_rpm;
++
++	pm_runtime_put(&pdev->dev);
+ 
+ 	return 0;
++
++err_put_rpm:
++	pm_runtime_put_sync(&pdev->dev);
++
++	return ret;
+ }
+ 
+ static const struct dev_pm_ops turingcc_pm_ops = {
+diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
+index e733a2a1927a4..7dd2c615bce23 100644
+--- a/drivers/clocksource/arm_arch_timer.c
++++ b/drivers/clocksource/arm_arch_timer.c
+@@ -792,6 +792,13 @@ static __always_inline void set_next_event_mem(const int access, unsigned long e
+ 	u64 cnt;
+ 
+ 	ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk);
++
++	/* Timer must be disabled before programming CVAL */
++	if (ctrl & ARCH_TIMER_CTRL_ENABLE) {
++		ctrl &= ~ARCH_TIMER_CTRL_ENABLE;
++		arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
++	}
++
+ 	ctrl |= ARCH_TIMER_CTRL_ENABLE;
+ 	ctrl &= ~ARCH_TIMER_CTRL_IT_MASK;
+ 
+diff --git a/drivers/dma/sh/rz-dmac.c b/drivers/dma/sh/rz-dmac.c
+index 9479f29692d3e..a5af6a6666048 100644
+--- a/drivers/dma/sh/rz-dmac.c
++++ b/drivers/dma/sh/rz-dmac.c
+@@ -9,6 +9,7 @@
+  * Copyright 2012 Javier Martin, Vista Silicon <javier.martin@vista-silicon.com>
+  */
+ 
++#include <linux/bitfield.h>
+ #include <linux/dma-mapping.h>
+ #include <linux/dmaengine.h>
+ #include <linux/interrupt.h>
+@@ -145,8 +146,8 @@ struct rz_dmac {
+ #define CHCFG_REQD			BIT(3)
+ #define CHCFG_SEL(bits)			((bits) & 0x07)
+ #define CHCFG_MEM_COPY			(0x80400008)
+-#define CHCFG_FILL_DDS(a)		(((a) << 16) & GENMASK(19, 16))
+-#define CHCFG_FILL_SDS(a)		(((a) << 12) & GENMASK(15, 12))
++#define CHCFG_FILL_DDS_MASK		GENMASK(19, 16)
++#define CHCFG_FILL_SDS_MASK		GENMASK(15, 12)
+ #define CHCFG_FILL_TM(a)		(((a) & BIT(5)) << 22)
+ #define CHCFG_FILL_AM(a)		(((a) & GENMASK(4, 2)) << 6)
+ #define CHCFG_FILL_LVL(a)		(((a) & BIT(1)) << 5)
+@@ -607,13 +608,15 @@ static int rz_dmac_config(struct dma_chan *chan,
+ 	if (val == CHCFG_DS_INVALID)
+ 		return -EINVAL;
+ 
+-	channel->chcfg |= CHCFG_FILL_DDS(val);
++	channel->chcfg &= ~CHCFG_FILL_DDS_MASK;
++	channel->chcfg |= FIELD_PREP(CHCFG_FILL_DDS_MASK, val);
+ 
+ 	val = rz_dmac_ds_to_val_mapping(config->src_addr_width);
+ 	if (val == CHCFG_DS_INVALID)
+ 		return -EINVAL;
+ 
+-	channel->chcfg |= CHCFG_FILL_SDS(val);
++	channel->chcfg &= ~CHCFG_FILL_SDS_MASK;
++	channel->chcfg |= FIELD_PREP(CHCFG_FILL_SDS_MASK, val);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+index b702f499f5fb3..923cbebcf520b 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+@@ -38,6 +38,8 @@
+ #include <linux/pci.h>
+ #include <linux/pm_runtime.h>
+ #include <drm/drm_crtc_helper.h>
++#include <drm/drm_damage_helper.h>
++#include <drm/drm_drv.h>
+ #include <drm/drm_edid.h>
+ #include <drm/drm_fb_helper.h>
+ #include <drm/drm_gem_framebuffer_helper.h>
+@@ -529,11 +531,29 @@ bool amdgpu_display_ddc_probe(struct amdgpu_connector *amdgpu_connector,
+ 	return true;
+ }
+ 
++static int amdgpu_dirtyfb(struct drm_framebuffer *fb, struct drm_file *file,
++			  unsigned int flags, unsigned int color,
++			  struct drm_clip_rect *clips, unsigned int num_clips)
++{
++
++	if (file)
++		return -ENOSYS;
++
++	return drm_atomic_helper_dirtyfb(fb, file, flags, color, clips,
++					 num_clips);
++}
++
+ static const struct drm_framebuffer_funcs amdgpu_fb_funcs = {
+ 	.destroy = drm_gem_fb_destroy,
+ 	.create_handle = drm_gem_fb_create_handle,
+ };
+ 
++static const struct drm_framebuffer_funcs amdgpu_fb_funcs_atomic = {
++	.destroy = drm_gem_fb_destroy,
++	.create_handle = drm_gem_fb_create_handle,
++	.dirty = amdgpu_dirtyfb
++};
++
+ uint32_t amdgpu_display_supported_domains(struct amdgpu_device *adev,
+ 					  uint64_t bo_flags)
+ {
+@@ -1136,7 +1156,11 @@ static int amdgpu_display_gem_fb_verify_and_init(struct drm_device *dev,
+ 	if (ret)
+ 		goto err;
+ 
+-	ret = drm_framebuffer_init(dev, &rfb->base, &amdgpu_fb_funcs);
++	if (drm_drv_uses_atomic_modeset(dev))
++		ret = drm_framebuffer_init(dev, &rfb->base,
++					   &amdgpu_fb_funcs_atomic);
++	else
++		ret = drm_framebuffer_init(dev, &rfb->base, &amdgpu_fb_funcs);
+ 
+ 	if (ret)
+ 		goto err;
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
+index 31fec5e70d135..23b30783dce31 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
+@@ -437,6 +437,7 @@ struct mqd_manager *mqd_manager_init_v11(enum KFD_MQD_TYPE type,
+ 		mqd->is_occupied = kfd_is_occupied_cp;
+ 		mqd->mqd_size = sizeof(struct v11_compute_mqd);
+ 		mqd->get_wave_state = get_wave_state;
++		mqd->mqd_stride = kfd_mqd_stride;
+ #if defined(CONFIG_DEBUG_FS)
+ 		mqd->debugfs_show_mqd = debugfs_show_mqd;
+ #endif
+@@ -452,6 +453,7 @@ struct mqd_manager *mqd_manager_init_v11(enum KFD_MQD_TYPE type,
+ 		mqd->destroy_mqd = kfd_destroy_mqd_cp;
+ 		mqd->is_occupied = kfd_is_occupied_cp;
+ 		mqd->mqd_size = sizeof(struct v11_compute_mqd);
++		mqd->mqd_stride = kfd_mqd_stride;
+ #if defined(CONFIG_DEBUG_FS)
+ 		mqd->debugfs_show_mqd = debugfs_show_mqd;
+ #endif
+@@ -481,6 +483,7 @@ struct mqd_manager *mqd_manager_init_v11(enum KFD_MQD_TYPE type,
+ 		mqd->destroy_mqd = kfd_destroy_mqd_sdma;
+ 		mqd->is_occupied = kfd_is_occupied_sdma;
+ 		mqd->mqd_size = sizeof(struct v11_sdma_mqd);
++		mqd->mqd_stride = kfd_mqd_stride;
+ #if defined(CONFIG_DEBUG_FS)
+ 		mqd->debugfs_show_mqd = debugfs_show_mqd_sdma;
+ #endif
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index 3a7e7d2ce847b..3123ea2f4f30a 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -5990,7 +5990,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
+ 
+ 	if (recalculate_timing)
+ 		drm_mode_set_crtcinfo(&saved_mode, 0);
+-	else
++	else if (!old_stream)
+ 		drm_mode_set_crtcinfo(&mode, 0);
+ 
+ 	/*
+@@ -7295,7 +7295,7 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
+ 	if (connector_type == DRM_MODE_CONNECTOR_HDMIA) {
+ 		if (!drm_mode_create_hdmi_colorspace_property(&aconnector->base, supported_colorspaces))
+ 			drm_connector_attach_colorspace_property(&aconnector->base);
+-	} else if (connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
++	} else if ((connector_type == DRM_MODE_CONNECTOR_DisplayPort && !aconnector->mst_root) ||
+ 		   connector_type == DRM_MODE_CONNECTOR_eDP) {
+ 		if (!drm_mode_create_dp_colorspace_property(&aconnector->base, supported_colorspaces))
+ 			drm_connector_attach_colorspace_property(&aconnector->base);
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
+index 3226689737479..6c84ca2ae373a 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
+@@ -1260,6 +1260,13 @@ void amdgpu_dm_plane_handle_cursor_update(struct drm_plane *plane,
+ 	attributes.rotation_angle    = 0;
+ 	attributes.attribute_flags.value = 0;
+ 
++	/* Enable cursor degamma ROM on DCN3+ for implicit sRGB degamma in DRM
++	 * legacy gamma setup.
++	 */
++	if (crtc_state->cm_is_degamma_srgb &&
++	    adev->dm.dc->caps.color.dpp.gamma_corr)
++		attributes.attribute_flags.bits.ENABLE_CURSOR_DEGAMMA = 1;
++
+ 	attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
+ 
+ 	if (crtc_state->stream) {
+diff --git a/drivers/gpu/drm/amd/display/dc/Makefile b/drivers/gpu/drm/amd/display/dc/Makefile
+index 69ffd4424dc7b..1b8c2aef4633c 100644
+--- a/drivers/gpu/drm/amd/display/dc/Makefile
++++ b/drivers/gpu/drm/amd/display/dc/Makefile
+@@ -78,3 +78,4 @@ DC_EDID += dc_edid_parser.o
+ AMD_DISPLAY_DMUB = $(addprefix $(AMDDALPATH)/dc/,$(DC_DMUB))
+ AMD_DISPLAY_EDID = $(addprefix $(AMDDALPATH)/dc/,$(DC_EDID))
+ AMD_DISPLAY_FILES += $(AMD_DISPLAY_DMUB) $(AMD_DISPLAY_EDID)
++
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
+index d133e4186a520..1729fb727333c 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
+@@ -2061,12 +2061,12 @@ enum dc_status dc_commit_streams(struct dc *dc,
+ 		}
+ 	}
+ 
+-	/* Check for case where we are going from odm 2:1 to max
+-	 *  pipe scenario.  For these cases, we will call
+-	 *  commit_minimal_transition_state() to exit out of odm 2:1
+-	 *  first before processing new streams
++	/* ODM Combine 2:1 power optimization is only applied for single stream
++	 * scenario, it uses extra pipes than needed to reduce power consumption
++	 * We need to switch off this feature to make room for new streams.
+ 	 */
+-	if (stream_count == dc->res_pool->pipe_count) {
++	if (stream_count > dc->current_state->stream_count &&
++			dc->current_state->stream_count == 1) {
+ 		for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ 			pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+ 			if (pipe->next_odm_pipe)
+@@ -3589,6 +3589,45 @@ static void commit_planes_for_stream_fast(struct dc *dc,
+ 		top_pipe_to_program->stream->update_flags.raw = 0;
+ }
+ 
++static void wait_for_outstanding_hw_updates(struct dc *dc, const struct dc_state *dc_context)
++{
++/*
++ * This function calls HWSS to wait for any potentially double buffered
++ * operations to complete. It should be invoked as a pre-amble prior
++ * to full update programming before asserting any HW locks.
++ */
++	int pipe_idx;
++	int opp_inst;
++	int opp_count = dc->res_pool->pipe_count;
++	struct hubp *hubp;
++	int mpcc_inst;
++	const struct pipe_ctx *pipe_ctx;
++
++	for (pipe_idx = 0; pipe_idx < dc->res_pool->pipe_count; pipe_idx++) {
++		pipe_ctx = &dc_context->res_ctx.pipe_ctx[pipe_idx];
++
++		if (!pipe_ctx->stream)
++			continue;
++
++		if (pipe_ctx->stream_res.tg->funcs->wait_drr_doublebuffer_pending_clear)
++			pipe_ctx->stream_res.tg->funcs->wait_drr_doublebuffer_pending_clear(pipe_ctx->stream_res.tg);
++
++		hubp = pipe_ctx->plane_res.hubp;
++		if (!hubp)
++			continue;
++
++		mpcc_inst = hubp->inst;
++		// MPCC inst is equal to pipe index in practice
++		for (opp_inst = 0; opp_inst < opp_count; opp_inst++) {
++			if (dc->res_pool->opps[opp_inst]->mpcc_disconnect_pending[mpcc_inst]) {
++				dc->res_pool->mpc->funcs->wait_for_idle(dc->res_pool->mpc, mpcc_inst);
++				dc->res_pool->opps[opp_inst]->mpcc_disconnect_pending[mpcc_inst] = false;
++				break;
++			}
++		}
++	}
++}
++
+ static void commit_planes_for_stream(struct dc *dc,
+ 		struct dc_surface_update *srf_updates,
+ 		int surface_count,
+@@ -3607,24 +3646,9 @@ static void commit_planes_for_stream(struct dc *dc,
+ 	// dc->current_state anymore, so we have to cache it before we apply
+ 	// the new SubVP context
+ 	subvp_prev_use = false;
+-
+-
+ 	dc_z10_restore(dc);
+-
+-	if (update_type == UPDATE_TYPE_FULL) {
+-		/* wait for all double-buffer activity to clear on all pipes */
+-		int pipe_idx;
+-
+-		for (pipe_idx = 0; pipe_idx < dc->res_pool->pipe_count; pipe_idx++) {
+-			struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[pipe_idx];
+-
+-			if (!pipe_ctx->stream)
+-				continue;
+-
+-			if (pipe_ctx->stream_res.tg->funcs->wait_drr_doublebuffer_pending_clear)
+-				pipe_ctx->stream_res.tg->funcs->wait_drr_doublebuffer_pending_clear(pipe_ctx->stream_res.tg);
+-		}
+-	}
++	if (update_type == UPDATE_TYPE_FULL)
++		wait_for_outstanding_hw_updates(dc, context);
+ 
+ 	if (update_type == UPDATE_TYPE_FULL) {
+ 		dc_allow_idle_optimizations(dc, false);
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c
+index 8e9384094f6d6..f2f55565e98a4 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c
+@@ -212,8 +212,9 @@ struct mpcc *mpc1_insert_plane(
+ 		/* check insert_above_mpcc exist in tree->opp_list */
+ 		struct mpcc *temp_mpcc = tree->opp_list;
+ 
+-		while (temp_mpcc && temp_mpcc->mpcc_bot != insert_above_mpcc)
+-			temp_mpcc = temp_mpcc->mpcc_bot;
++		if (temp_mpcc != insert_above_mpcc)
++			while (temp_mpcc && temp_mpcc->mpcc_bot != insert_above_mpcc)
++				temp_mpcc = temp_mpcc->mpcc_bot;
+ 		if (temp_mpcc == NULL)
+ 			return NULL;
+ 	}
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
+index 5cfa37804d7c6..7c344132a0072 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
+@@ -1580,17 +1580,6 @@ static void dcn20_update_dchubp_dpp(
+ 			|| plane_state->update_flags.bits.global_alpha_change
+ 			|| plane_state->update_flags.bits.per_pixel_alpha_change) {
+ 		// MPCC inst is equal to pipe index in practice
+-		int mpcc_inst = hubp->inst;
+-		int opp_inst;
+-		int opp_count = dc->res_pool->pipe_count;
+-
+-		for (opp_inst = 0; opp_inst < opp_count; opp_inst++) {
+-			if (dc->res_pool->opps[opp_inst]->mpcc_disconnect_pending[mpcc_inst]) {
+-				dc->res_pool->mpc->funcs->wait_for_idle(dc->res_pool->mpc, mpcc_inst);
+-				dc->res_pool->opps[opp_inst]->mpcc_disconnect_pending[mpcc_inst] = false;
+-				break;
+-			}
+-		}
+ 		hws->funcs.update_mpcc(dc, pipe_ctx);
+ 	}
+ 
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
+index 8ae5ddbd1b271..e6b9c2469a560 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
+@@ -1099,6 +1099,11 @@ void dcn20_calculate_dlg_params(struct dc *dc,
+ 		context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz =
+ 						pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000;
+ 		context->res_ctx.pipe_ctx[i].pipe_dlg_param = pipes[pipe_idx].pipe.dest;
++		if (dc->ctx->dce_version < DCN_VERSION_3_1 &&
++		    context->res_ctx.pipe_ctx[i].stream->adaptive_sync_infopacket.valid)
++			dcn20_adjust_freesync_v_startup(
++				&context->res_ctx.pipe_ctx[i].stream->timing,
++				&context->res_ctx.pipe_ctx[i].pipe_dlg_param.vstartup_start);
+ 
+ 		pipe_idx++;
+ 	}
+@@ -1927,7 +1932,6 @@ static bool dcn20_validate_bandwidth_internal(struct dc *dc, struct dc_state *co
+ 	int vlevel = 0;
+ 	int pipe_split_from[MAX_PIPES];
+ 	int pipe_cnt = 0;
+-	int i = 0;
+ 	display_e2e_pipe_params_st *pipes = kzalloc(dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st), GFP_ATOMIC);
+ 	DC_LOGGER_INIT(dc->ctx->logger);
+ 
+@@ -1951,15 +1955,6 @@ static bool dcn20_validate_bandwidth_internal(struct dc *dc, struct dc_state *co
+ 	dcn20_calculate_wm(dc, context, pipes, &pipe_cnt, pipe_split_from, vlevel, fast_validate);
+ 	dcn20_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel);
+ 
+-	for (i = 0; i < dc->res_pool->pipe_count; i++) {
+-		if (!context->res_ctx.pipe_ctx[i].stream)
+-			continue;
+-		if (context->res_ctx.pipe_ctx[i].stream->adaptive_sync_infopacket.valid)
+-			dcn20_adjust_freesync_v_startup(
+-				&context->res_ctx.pipe_ctx[i].stream->timing,
+-				&context->res_ctx.pipe_ctx[i].pipe_dlg_param.vstartup_start);
+-	}
+-
+ 	BW_VAL_TRACE_END_WATERMARKS();
+ 
+ 	goto validate_out;
+@@ -2232,7 +2227,6 @@ bool dcn21_validate_bandwidth_fp(struct dc *dc,
+ 	int vlevel = 0;
+ 	int pipe_split_from[MAX_PIPES];
+ 	int pipe_cnt = 0;
+-	int i = 0;
+ 	display_e2e_pipe_params_st *pipes = kzalloc(dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st), GFP_ATOMIC);
+ 	DC_LOGGER_INIT(dc->ctx->logger);
+ 
+@@ -2261,15 +2255,6 @@ bool dcn21_validate_bandwidth_fp(struct dc *dc,
+ 	dcn21_calculate_wm(dc, context, pipes, &pipe_cnt, pipe_split_from, vlevel, fast_validate);
+ 	dcn20_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel);
+ 
+-	for (i = 0; i < dc->res_pool->pipe_count; i++) {
+-		if (!context->res_ctx.pipe_ctx[i].stream)
+-			continue;
+-		if (context->res_ctx.pipe_ctx[i].stream->adaptive_sync_infopacket.valid)
+-			dcn20_adjust_freesync_v_startup(
+-				&context->res_ctx.pipe_ctx[i].stream->timing,
+-				&context->res_ctx.pipe_ctx[i].pipe_dlg_param.vstartup_start);
+-	}
+-
+ 	BW_VAL_TRACE_END_WATERMARKS();
+ 
+ 	goto validate_out;
+diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
+index dbd60811f95da..ef3a674090211 100644
+--- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
++++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
+@@ -338,7 +338,9 @@ static void apply_below_the_range(struct core_freesync *core_freesync,
+ 		 *  - Delta for CEIL: delta_from_mid_point_in_us_1
+ 		 *  - Delta for FLOOR: delta_from_mid_point_in_us_2
+ 		 */
+-		if ((last_render_time_in_us / mid_point_frames_ceil) < in_out_vrr->min_duration_in_us) {
++		if (mid_point_frames_ceil &&
++		    (last_render_time_in_us / mid_point_frames_ceil) <
++		    in_out_vrr->min_duration_in_us) {
+ 			/* Check for out of range.
+ 			 * If using CEIL produces a value that is out of range,
+ 			 * then we are forced to use FLOOR.
+@@ -385,8 +387,9 @@ static void apply_below_the_range(struct core_freesync *core_freesync,
+ 		/* Either we've calculated the number of frames to insert,
+ 		 * or we need to insert min duration frames
+ 		 */
+-		if (last_render_time_in_us / frames_to_insert <
+-				in_out_vrr->min_duration_in_us){
++		if (frames_to_insert &&
++		    (last_render_time_in_us / frames_to_insert) <
++		    in_out_vrr->min_duration_in_us){
+ 			frames_to_insert -= (frames_to_insert > 1) ?
+ 					1 : 0;
+ 		}
+diff --git a/drivers/gpu/drm/ast/ast_post.c b/drivers/gpu/drm/ast/ast_post.c
+index a005aec18a020..0262aaafdb1c5 100644
+--- a/drivers/gpu/drm/ast/ast_post.c
++++ b/drivers/gpu/drm/ast/ast_post.c
+@@ -291,7 +291,7 @@ static void ast_init_dram_reg(struct drm_device *dev)
+ 				;
+ 			} while (ast_read32(ast, 0x10100) != 0xa8);
+ 		} else {/* AST2100/1100 */
+-			if (ast->chip == AST2100 || ast->chip == 2200)
++			if (ast->chip == AST2100 || ast->chip == AST2200)
+ 				dram_reg_info = ast2100_dram_table_data;
+ 			else
+ 				dram_reg_info = ast1100_dram_table_data;
+diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h
+index e99a6fa03d453..a7e6775980043 100644
+--- a/drivers/gpu/drm/i915/gt/intel_engine_types.h
++++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h
+@@ -58,6 +58,7 @@ struct i915_perf_group;
+ 
+ typedef u32 intel_engine_mask_t;
+ #define ALL_ENGINES ((intel_engine_mask_t)~0ul)
++#define VIRTUAL_ENGINES BIT(BITS_PER_TYPE(intel_engine_mask_t) - 1)
+ 
+ struct intel_hw_status_page {
+ 	struct list_head timelines;
+diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+index a0e3ef1c65d24..b5b7f2fe8c78e 100644
+--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
++++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+@@ -5470,6 +5470,9 @@ guc_create_virtual(struct intel_engine_cs **siblings, unsigned int count,
+ 
+ 	ve->base.flags = I915_ENGINE_IS_VIRTUAL;
+ 
++	BUILD_BUG_ON(ilog2(VIRTUAL_ENGINES) < I915_NUM_ENGINES);
++	ve->base.mask = VIRTUAL_ENGINES;
++
+ 	intel_context_init(&ve->context, &ve->base);
+ 
+ 	for (n = 0; n < count; n++) {
+diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
+index 4ec85308379a4..ef5517ecd9a0c 100644
+--- a/drivers/gpu/drm/i915/gvt/gtt.c
++++ b/drivers/gpu/drm/i915/gvt/gtt.c
+@@ -1174,6 +1174,7 @@ static int is_2MB_gtt_possible(struct intel_vgpu *vgpu,
+ {
+ 	const struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
+ 	kvm_pfn_t pfn;
++	int ret;
+ 
+ 	if (!HAS_PAGE_SIZES(vgpu->gvt->gt->i915, I915_GTT_PAGE_SIZE_2M))
+ 		return 0;
+@@ -1183,7 +1184,13 @@ static int is_2MB_gtt_possible(struct intel_vgpu *vgpu,
+ 	pfn = gfn_to_pfn(vgpu->vfio_device.kvm, ops->get_pfn(entry));
+ 	if (is_error_noslot_pfn(pfn))
+ 		return -EINVAL;
+-	return PageTransHuge(pfn_to_page(pfn));
++
++	if (!pfn_valid(pfn))
++		return -EINVAL;
++
++	ret = PageTransHuge(pfn_to_page(pfn));
++	kvm_release_pfn_clean(pfn);
++	return ret;
+ }
+ 
+ static int split_2MB_gtt_entry(struct intel_vgpu *vgpu,
+@@ -2875,24 +2882,6 @@ void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu, bool invalidate_old)
+ 	ggtt_invalidate(gvt->gt);
+ }
+ 
+-/**
+- * intel_vgpu_reset_gtt - reset the all GTT related status
+- * @vgpu: a vGPU
+- *
+- * This function is called from vfio core to reset reset all
+- * GTT related status, including GGTT, PPGTT, scratch page.
+- *
+- */
+-void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu)
+-{
+-	/* Shadow pages are only created when there is no page
+-	 * table tracking data, so remove page tracking data after
+-	 * removing the shadow pages.
+-	 */
+-	intel_vgpu_destroy_all_ppgtt_mm(vgpu);
+-	intel_vgpu_reset_ggtt(vgpu, true);
+-}
+-
+ /**
+  * intel_gvt_restore_ggtt - restore all vGPU's ggtt entries
+  * @gvt: intel gvt device
+diff --git a/drivers/gpu/drm/i915/gvt/gtt.h b/drivers/gpu/drm/i915/gvt/gtt.h
+index a3b0f59ec8bd9..4cb183e06e95a 100644
+--- a/drivers/gpu/drm/i915/gvt/gtt.h
++++ b/drivers/gpu/drm/i915/gvt/gtt.h
+@@ -224,7 +224,6 @@ void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu, bool invalidate_old);
+ void intel_vgpu_invalidate_ppgtt(struct intel_vgpu *vgpu);
+ 
+ int intel_gvt_init_gtt(struct intel_gvt *gvt);
+-void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu);
+ void intel_gvt_clean_gtt(struct intel_gvt *gvt);
+ 
+ struct intel_vgpu_mm *intel_gvt_find_ppgtt_mm(struct intel_vgpu *vgpu,
+diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
+index 833b73edefdbb..eb2a3000ad66b 100644
+--- a/drivers/gpu/drm/i915/i915_request.c
++++ b/drivers/gpu/drm/i915/i915_request.c
+@@ -134,9 +134,7 @@ static void i915_fence_release(struct dma_fence *fence)
+ 	i915_sw_fence_fini(&rq->semaphore);
+ 
+ 	/*
+-	 * Keep one request on each engine for reserved use under mempressure
+-	 * do not use with virtual engines as this really is only needed for
+-	 * kernel contexts.
++	 * Keep one request on each engine for reserved use under mempressure.
+ 	 *
+ 	 * We do not hold a reference to the engine here and so have to be
+ 	 * very careful in what rq->engine we poke. The virtual engine is
+@@ -166,8 +164,7 @@ static void i915_fence_release(struct dma_fence *fence)
+ 	 * know that if the rq->execution_mask is a single bit, rq->engine
+ 	 * can be a physical engine with the exact corresponding mask.
+ 	 */
+-	if (!intel_engine_is_virtual(rq->engine) &&
+-	    is_power_of_2(rq->execution_mask) &&
++	if (is_power_of_2(rq->execution_mask) &&
+ 	    !cmpxchg(&rq->engine->request_pool, NULL, rq))
+ 		return;
+ 
+diff --git a/drivers/gpu/drm/mxsfb/mxsfb_kms.c b/drivers/gpu/drm/mxsfb/mxsfb_kms.c
+index 3bcc9c0f20193..7ed2516b6de05 100644
+--- a/drivers/gpu/drm/mxsfb/mxsfb_kms.c
++++ b/drivers/gpu/drm/mxsfb/mxsfb_kms.c
+@@ -611,6 +611,14 @@ static void mxsfb_plane_overlay_atomic_update(struct drm_plane *plane,
+ 	writel(ctrl, mxsfb->base + LCDC_AS_CTRL);
+ }
+ 
++static void mxsfb_plane_overlay_atomic_disable(struct drm_plane *plane,
++					       struct drm_atomic_state *state)
++{
++	struct mxsfb_drm_private *mxsfb = to_mxsfb_drm_private(plane->dev);
++
++	writel(0, mxsfb->base + LCDC_AS_CTRL);
++}
++
+ static bool mxsfb_format_mod_supported(struct drm_plane *plane,
+ 				       uint32_t format,
+ 				       uint64_t modifier)
+@@ -626,6 +634,7 @@ static const struct drm_plane_helper_funcs mxsfb_plane_primary_helper_funcs = {
+ static const struct drm_plane_helper_funcs mxsfb_plane_overlay_helper_funcs = {
+ 	.atomic_check = mxsfb_plane_atomic_check,
+ 	.atomic_update = mxsfb_plane_overlay_atomic_update,
++	.atomic_disable = mxsfb_plane_overlay_atomic_disable,
+ };
+ 
+ static const struct drm_plane_funcs mxsfb_plane_funcs = {
+diff --git a/drivers/gpu/drm/virtio/virtgpu_submit.c b/drivers/gpu/drm/virtio/virtgpu_submit.c
+index cf3c04b16a7a8..1d010c66910d8 100644
+--- a/drivers/gpu/drm/virtio/virtgpu_submit.c
++++ b/drivers/gpu/drm/virtio/virtgpu_submit.c
+@@ -64,13 +64,9 @@ static int virtio_gpu_fence_event_create(struct drm_device *dev,
+ 					 struct virtio_gpu_fence *fence,
+ 					 u32 ring_idx)
+ {
+-	struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
+ 	struct virtio_gpu_fence_event *e = NULL;
+ 	int ret;
+ 
+-	if (!(vfpriv->ring_idx_mask & BIT_ULL(ring_idx)))
+-		return 0;
+-
+ 	e = kzalloc(sizeof(*e), GFP_KERNEL);
+ 	if (!e)
+ 		return -ENOMEM;
+@@ -164,18 +160,30 @@ static int virtio_gpu_init_submit(struct virtio_gpu_submit *submit,
+ 	struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
+ 	struct virtio_gpu_device *vgdev = dev->dev_private;
+ 	struct virtio_gpu_fence *out_fence;
++	bool drm_fence_event;
+ 	int err;
+ 
+ 	memset(submit, 0, sizeof(*submit));
+ 
+-	out_fence = virtio_gpu_fence_alloc(vgdev, fence_ctx, ring_idx);
+-	if (!out_fence)
+-		return -ENOMEM;
+-
+-	err = virtio_gpu_fence_event_create(dev, file, out_fence, ring_idx);
+-	if (err) {
+-		dma_fence_put(&out_fence->f);
+-		return err;
++	if ((exbuf->flags & VIRTGPU_EXECBUF_RING_IDX) &&
++	    (vfpriv->ring_idx_mask & BIT_ULL(ring_idx)))
++		drm_fence_event = true;
++	else
++		drm_fence_event = false;
++
++	if ((exbuf->flags & VIRTGPU_EXECBUF_FENCE_FD_OUT) ||
++	    exbuf->num_bo_handles ||
++	    drm_fence_event)
++		out_fence = virtio_gpu_fence_alloc(vgdev, fence_ctx, ring_idx);
++	else
++		out_fence = NULL;
++
++	if (drm_fence_event) {
++		err = virtio_gpu_fence_event_create(dev, file, out_fence, ring_idx);
++		if (err) {
++			dma_fence_put(&out_fence->f);
++			return err;
++		}
+ 	}
+ 
+ 	submit->out_fence = out_fence;
+diff --git a/drivers/hwspinlock/qcom_hwspinlock.c b/drivers/hwspinlock/qcom_hwspinlock.c
+index 9cf186362ae2f..3f08cd4a5c280 100644
+--- a/drivers/hwspinlock/qcom_hwspinlock.c
++++ b/drivers/hwspinlock/qcom_hwspinlock.c
+@@ -69,9 +69,18 @@ static const struct hwspinlock_ops qcom_hwspinlock_ops = {
+ 	.unlock		= qcom_hwspinlock_unlock,
+ };
+ 
++static const struct regmap_config sfpb_mutex_config = {
++	.reg_bits		= 32,
++	.reg_stride		= 4,
++	.val_bits		= 32,
++	.max_register		= 0x100,
++	.fast_io		= true,
++};
++
+ static const struct qcom_hwspinlock_of_data of_sfpb_mutex = {
+ 	.offset = 0x4,
+ 	.stride = 0x4,
++	.regmap_config = &sfpb_mutex_config,
+ };
+ 
+ static const struct regmap_config tcsr_msm8226_mutex_config = {
+diff --git a/drivers/i3c/master/svc-i3c-master.c b/drivers/i3c/master/svc-i3c-master.c
+index 2fefbe55c1675..6c43992c8cf6b 100644
+--- a/drivers/i3c/master/svc-i3c-master.c
++++ b/drivers/i3c/master/svc-i3c-master.c
+@@ -156,6 +156,7 @@ struct svc_i3c_regs_save {
+  * @base: I3C master controller
+  * @dev: Corresponding device
+  * @regs: Memory mapping
++ * @saved_regs: Volatile values for PM operations
+  * @free_slots: Bit array of available slots
+  * @addrs: Array containing the dynamic addresses of each attached device
+  * @descs: Array of descriptors, one per attached device
+diff --git a/drivers/input/keyboard/tca6416-keypad.c b/drivers/input/keyboard/tca6416-keypad.c
+index 2f745cabf4f24..d20cbddfae68c 100644
+--- a/drivers/input/keyboard/tca6416-keypad.c
++++ b/drivers/input/keyboard/tca6416-keypad.c
+@@ -148,7 +148,7 @@ static int tca6416_keys_open(struct input_dev *dev)
+ 	if (chip->use_polling)
+ 		schedule_delayed_work(&chip->dwork, msecs_to_jiffies(100));
+ 	else
+-		enable_irq(chip->irqnum);
++		enable_irq(chip->client->irq);
+ 
+ 	return 0;
+ }
+@@ -160,7 +160,7 @@ static void tca6416_keys_close(struct input_dev *dev)
+ 	if (chip->use_polling)
+ 		cancel_delayed_work_sync(&chip->dwork);
+ 	else
+-		disable_irq(chip->irqnum);
++		disable_irq(chip->client->irq);
+ }
+ 
+ static int tca6416_setup_registers(struct tca6416_keypad_chip *chip)
+@@ -266,12 +266,7 @@ static int tca6416_keypad_probe(struct i2c_client *client)
+ 		goto fail1;
+ 
+ 	if (!chip->use_polling) {
+-		if (pdata->irq_is_gpio)
+-			chip->irqnum = gpio_to_irq(client->irq);
+-		else
+-			chip->irqnum = client->irq;
+-
+-		error = request_threaded_irq(chip->irqnum, NULL,
++		error = request_threaded_irq(client->irq, NULL,
+ 					     tca6416_keys_isr,
+ 					     IRQF_TRIGGER_FALLING |
+ 					     IRQF_ONESHOT | IRQF_NO_AUTOEN,
+@@ -279,7 +274,7 @@ static int tca6416_keypad_probe(struct i2c_client *client)
+ 		if (error) {
+ 			dev_dbg(&client->dev,
+ 				"Unable to claim irq %d; error %d\n",
+-				chip->irqnum, error);
++				client->irq, error);
+ 			goto fail1;
+ 		}
+ 	}
+@@ -297,10 +292,8 @@ static int tca6416_keypad_probe(struct i2c_client *client)
+ 	return 0;
+ 
+ fail2:
+-	if (!chip->use_polling) {
+-		free_irq(chip->irqnum, chip);
+-		enable_irq(chip->irqnum);
+-	}
++	if (!chip->use_polling)
++		free_irq(client->irq, chip);
+ fail1:
+ 	input_free_device(input);
+ 	kfree(chip);
+@@ -311,10 +304,8 @@ static void tca6416_keypad_remove(struct i2c_client *client)
+ {
+ 	struct tca6416_keypad_chip *chip = i2c_get_clientdata(client);
+ 
+-	if (!chip->use_polling) {
+-		free_irq(chip->irqnum, chip);
+-		enable_irq(chip->irqnum);
+-	}
++	if (!chip->use_polling)
++		free_irq(client->irq, chip);
+ 
+ 	input_unregister_device(chip->input);
+ 	kfree(chip);
+@@ -323,10 +314,9 @@ static void tca6416_keypad_remove(struct i2c_client *client)
+ static int tca6416_keypad_suspend(struct device *dev)
+ {
+ 	struct i2c_client *client = to_i2c_client(dev);
+-	struct tca6416_keypad_chip *chip = i2c_get_clientdata(client);
+ 
+ 	if (device_may_wakeup(dev))
+-		enable_irq_wake(chip->irqnum);
++		enable_irq_wake(client->irq);
+ 
+ 	return 0;
+ }
+@@ -334,10 +324,9 @@ static int tca6416_keypad_suspend(struct device *dev)
+ static int tca6416_keypad_resume(struct device *dev)
+ {
+ 	struct i2c_client *client = to_i2c_client(dev);
+-	struct tca6416_keypad_chip *chip = i2c_get_clientdata(client);
+ 
+ 	if (device_may_wakeup(dev))
+-		disable_irq_wake(chip->irqnum);
++		disable_irq_wake(client->irq);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/input/misc/iqs7222.c b/drivers/input/misc/iqs7222.c
+index 096b0925f41ba..acb95048e8230 100644
+--- a/drivers/input/misc/iqs7222.c
++++ b/drivers/input/misc/iqs7222.c
+@@ -1381,9 +1381,6 @@ static int iqs7222_ati_trigger(struct iqs7222_private *iqs7222)
+ 	if (error)
+ 		return error;
+ 
+-	sys_setup &= ~IQS7222_SYS_SETUP_INTF_MODE_MASK;
+-	sys_setup &= ~IQS7222_SYS_SETUP_PWR_MODE_MASK;
+-
+ 	for (i = 0; i < IQS7222_NUM_RETRIES; i++) {
+ 		/*
+ 		 * Trigger ATI from streaming and normal-power modes so that
+@@ -1561,8 +1558,11 @@ static int iqs7222_dev_init(struct iqs7222_private *iqs7222, int dir)
+ 			return error;
+ 	}
+ 
+-	if (dir == READ)
++	if (dir == READ) {
++		iqs7222->sys_setup[0] &= ~IQS7222_SYS_SETUP_INTF_MODE_MASK;
++		iqs7222->sys_setup[0] &= ~IQS7222_SYS_SETUP_PWR_MODE_MASK;
+ 		return 0;
++	}
+ 
+ 	return iqs7222_ati_trigger(iqs7222);
+ }
+diff --git a/drivers/mailbox/qcom-ipcc.c b/drivers/mailbox/qcom-ipcc.c
+index 7e27acf6c0cca..f597a1bd56847 100644
+--- a/drivers/mailbox/qcom-ipcc.c
++++ b/drivers/mailbox/qcom-ipcc.c
+@@ -227,10 +227,8 @@ static int qcom_ipcc_setup_mbox(struct qcom_ipcc *ipcc,
+ 			ret = of_parse_phandle_with_args(client_dn, "mboxes",
+ 						"#mbox-cells", j, &curr_ph);
+ 			of_node_put(curr_ph.np);
+-			if (!ret && curr_ph.np == controller_dn) {
++			if (!ret && curr_ph.np == controller_dn)
+ 				ipcc->num_chans++;
+-				break;
+-			}
+ 		}
+ 	}
+ 
+diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c
+index 5a134fa8a174c..42c4f603ec81f 100644
+--- a/drivers/misc/fastrpc.c
++++ b/drivers/misc/fastrpc.c
+@@ -756,6 +756,7 @@ static int fastrpc_map_create(struct fastrpc_user *fl, int fd,
+ {
+ 	struct fastrpc_session_ctx *sess = fl->sctx;
+ 	struct fastrpc_map *map = NULL;
++	struct sg_table *table;
+ 	int err = 0;
+ 
+ 	if (!fastrpc_map_lookup(fl, fd, ppmap, true))
+@@ -783,11 +784,12 @@ static int fastrpc_map_create(struct fastrpc_user *fl, int fd,
+ 		goto attach_err;
+ 	}
+ 
+-	map->table = dma_buf_map_attachment_unlocked(map->attach, DMA_BIDIRECTIONAL);
+-	if (IS_ERR(map->table)) {
+-		err = PTR_ERR(map->table);
++	table = dma_buf_map_attachment_unlocked(map->attach, DMA_BIDIRECTIONAL);
++	if (IS_ERR(table)) {
++		err = PTR_ERR(table);
+ 		goto map_err;
+ 	}
++	map->table = table;
+ 
+ 	if (attr & FASTRPC_ATTR_SECUREMAP) {
+ 		map->phys = sg_phys(map->table->sgl);
+@@ -1871,7 +1873,11 @@ static int fastrpc_req_mmap(struct fastrpc_user *fl, char __user *argp)
+ 		return -EINVAL;
+ 	}
+ 
+-	err = fastrpc_buf_alloc(fl, fl->sctx->dev, req.size, &buf);
++	if (req.flags == ADSP_MMAP_REMOTE_HEAP_ADDR)
++		err = fastrpc_remote_heap_alloc(fl, dev, req.size, &buf);
++	else
++		err = fastrpc_buf_alloc(fl, dev, req.size, &buf);
++
+ 	if (err) {
+ 		dev_err(dev, "failed to allocate buffer\n");
+ 		return err;
+@@ -1910,12 +1916,8 @@ static int fastrpc_req_mmap(struct fastrpc_user *fl, char __user *argp)
+ 
+ 	/* Add memory to static PD pool, protection thru hypervisor */
+ 	if (req.flags == ADSP_MMAP_REMOTE_HEAP_ADDR && fl->cctx->vmcount) {
+-		struct qcom_scm_vmperm perm;
+-
+-		perm.vmid = QCOM_SCM_VMID_HLOS;
+-		perm.perm = QCOM_SCM_PERM_RWX;
+-		err = qcom_scm_assign_mem(buf->phys, buf->size,
+-			&fl->cctx->perms, &perm, 1);
++		err = qcom_scm_assign_mem(buf->phys, (u64)buf->size,
++			&fl->cctx->perms, fl->cctx->vmperms, fl->cctx->vmcount);
+ 		if (err) {
+ 			dev_err(fl->sctx->dev, "Failed to assign memory phys 0x%llx size 0x%llx err %d",
+ 					buf->phys, buf->size, err);
+diff --git a/drivers/mtd/nand/raw/brcmnand/brcmnand.c b/drivers/mtd/nand/raw/brcmnand/brcmnand.c
+index d8418d7fcc372..39661e23d7d4f 100644
+--- a/drivers/mtd/nand/raw/brcmnand/brcmnand.c
++++ b/drivers/mtd/nand/raw/brcmnand/brcmnand.c
+@@ -272,6 +272,7 @@ struct brcmnand_controller {
+ 	const unsigned int	*page_sizes;
+ 	unsigned int		page_size_shift;
+ 	unsigned int		max_oob;
++	u32			ecc_level_shift;
+ 	u32			features;
+ 
+ 	/* for low-power standby/resume only */
+@@ -596,6 +597,34 @@ enum {
+ 	INTFC_CTLR_READY		= BIT(31),
+ };
+ 
++/***********************************************************************
++ * NAND ACC CONTROL bitfield
++ *
++ * Some bits have remained constant throughout hardware revision, while
++ * others have shifted around.
++ ***********************************************************************/
++
++/* Constant for all versions (where supported) */
++enum {
++	/* See BRCMNAND_HAS_CACHE_MODE */
++	ACC_CONTROL_CACHE_MODE				= BIT(22),
++
++	/* See BRCMNAND_HAS_PREFETCH */
++	ACC_CONTROL_PREFETCH				= BIT(23),
++
++	ACC_CONTROL_PAGE_HIT				= BIT(24),
++	ACC_CONTROL_WR_PREEMPT				= BIT(25),
++	ACC_CONTROL_PARTIAL_PAGE			= BIT(26),
++	ACC_CONTROL_RD_ERASED				= BIT(27),
++	ACC_CONTROL_FAST_PGM_RDIN			= BIT(28),
++	ACC_CONTROL_WR_ECC				= BIT(30),
++	ACC_CONTROL_RD_ECC				= BIT(31),
++};
++
++#define	ACC_CONTROL_ECC_SHIFT			16
++/* Only for v7.2 */
++#define	ACC_CONTROL_ECC_EXT_SHIFT		13
++
+ static inline bool brcmnand_non_mmio_ops(struct brcmnand_controller *ctrl)
+ {
+ #if IS_ENABLED(CONFIG_MTD_NAND_BRCMNAND_BCMA)
+@@ -737,6 +766,12 @@ static int brcmnand_revision_init(struct brcmnand_controller *ctrl)
+ 	else if (of_property_read_bool(ctrl->dev->of_node, "brcm,nand-has-wp"))
+ 		ctrl->features |= BRCMNAND_HAS_WP;
+ 
++	/* v7.2 has different ecc level shift in the acc register */
++	if (ctrl->nand_version == 0x0702)
++		ctrl->ecc_level_shift = ACC_CONTROL_ECC_EXT_SHIFT;
++	else
++		ctrl->ecc_level_shift = ACC_CONTROL_ECC_SHIFT;
++
+ 	return 0;
+ }
+ 
+@@ -931,30 +966,6 @@ static inline int brcmnand_cmd_shift(struct brcmnand_controller *ctrl)
+ 	return 0;
+ }
+ 
+-/***********************************************************************
+- * NAND ACC CONTROL bitfield
+- *
+- * Some bits have remained constant throughout hardware revision, while
+- * others have shifted around.
+- ***********************************************************************/
+-
+-/* Constant for all versions (where supported) */
+-enum {
+-	/* See BRCMNAND_HAS_CACHE_MODE */
+-	ACC_CONTROL_CACHE_MODE				= BIT(22),
+-
+-	/* See BRCMNAND_HAS_PREFETCH */
+-	ACC_CONTROL_PREFETCH				= BIT(23),
+-
+-	ACC_CONTROL_PAGE_HIT				= BIT(24),
+-	ACC_CONTROL_WR_PREEMPT				= BIT(25),
+-	ACC_CONTROL_PARTIAL_PAGE			= BIT(26),
+-	ACC_CONTROL_RD_ERASED				= BIT(27),
+-	ACC_CONTROL_FAST_PGM_RDIN			= BIT(28),
+-	ACC_CONTROL_WR_ECC				= BIT(30),
+-	ACC_CONTROL_RD_ECC				= BIT(31),
+-};
+-
+ static inline u32 brcmnand_spare_area_mask(struct brcmnand_controller *ctrl)
+ {
+ 	if (ctrl->nand_version == 0x0702)
+@@ -967,18 +978,15 @@ static inline u32 brcmnand_spare_area_mask(struct brcmnand_controller *ctrl)
+ 		return GENMASK(4, 0);
+ }
+ 
+-#define NAND_ACC_CONTROL_ECC_SHIFT	16
+-#define NAND_ACC_CONTROL_ECC_EXT_SHIFT	13
+-
+ static inline u32 brcmnand_ecc_level_mask(struct brcmnand_controller *ctrl)
+ {
+ 	u32 mask = (ctrl->nand_version >= 0x0600) ? 0x1f : 0x0f;
+ 
+-	mask <<= NAND_ACC_CONTROL_ECC_SHIFT;
++	mask <<= ACC_CONTROL_ECC_SHIFT;
+ 
+ 	/* v7.2 includes additional ECC levels */
+-	if (ctrl->nand_version >= 0x0702)
+-		mask |= 0x7 << NAND_ACC_CONTROL_ECC_EXT_SHIFT;
++	if (ctrl->nand_version == 0x0702)
++		mask |= 0x7 << ACC_CONTROL_ECC_EXT_SHIFT;
+ 
+ 	return mask;
+ }
+@@ -992,8 +1000,8 @@ static void brcmnand_set_ecc_enabled(struct brcmnand_host *host, int en)
+ 
+ 	if (en) {
+ 		acc_control |= ecc_flags; /* enable RD/WR ECC */
+-		acc_control |= host->hwcfg.ecc_level
+-			       << NAND_ACC_CONTROL_ECC_SHIFT;
++		acc_control &= ~brcmnand_ecc_level_mask(ctrl);
++		acc_control |= host->hwcfg.ecc_level << ctrl->ecc_level_shift;
+ 	} else {
+ 		acc_control &= ~ecc_flags; /* disable RD/WR ECC */
+ 		acc_control &= ~brcmnand_ecc_level_mask(ctrl);
+@@ -1072,6 +1080,14 @@ static int bcmnand_ctrl_poll_status(struct brcmnand_controller *ctrl,
+ 		cpu_relax();
+ 	} while (time_after(limit, jiffies));
+ 
++	/*
++	 * do a final check after time out in case the CPU was busy and the driver
++	 * did not get enough time to perform the polling to avoid false alarms
++	 */
++	val = brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS);
++	if ((val & mask) == expected_val)
++		return 0;
++
+ 	dev_warn(ctrl->dev, "timeout on status poll (expected %x got %x)\n",
+ 		 expected_val, val & mask);
+ 
+@@ -1461,19 +1477,33 @@ static int write_oob_to_regs(struct brcmnand_controller *ctrl, int i,
+ 			     const u8 *oob, int sas, int sector_1k)
+ {
+ 	int tbytes = sas << sector_1k;
+-	int j;
++	int j, k = 0;
++	u32 last = 0xffffffff;
++	u8 *plast = (u8 *)&last;
+ 
+ 	/* Adjust OOB values for 1K sector size */
+ 	if (sector_1k && (i & 0x01))
+ 		tbytes = max(0, tbytes - (int)ctrl->max_oob);
+ 	tbytes = min_t(int, tbytes, ctrl->max_oob);
+ 
+-	for (j = 0; j < tbytes; j += 4)
++	/*
++	 * tbytes may not be multiple of words. Make sure we don't read out of
++	 * the boundary and stop at last word.
++	 */
++	for (j = 0; (j + 3) < tbytes; j += 4)
+ 		oob_reg_write(ctrl, j,
+ 				(oob[j + 0] << 24) |
+ 				(oob[j + 1] << 16) |
+ 				(oob[j + 2] <<  8) |
+ 				(oob[j + 3] <<  0));
++
++	/* handle the remaing bytes */
++	while (j < tbytes)
++		plast[k++] = oob[j++];
++
++	if (tbytes & 0x3)
++		oob_reg_write(ctrl, (tbytes & ~0x3), (__force u32)cpu_to_be32(last));
++
+ 	return tbytes;
+ }
+ 
+@@ -1592,7 +1622,17 @@ static void brcmnand_send_cmd(struct brcmnand_host *host, int cmd)
+ 
+ 	dev_dbg(ctrl->dev, "send native cmd %d addr 0x%llx\n", cmd, cmd_addr);
+ 
+-	BUG_ON(ctrl->cmd_pending != 0);
++	/*
++	 * If we came here through _panic_write and there is a pending
++	 * command, try to wait for it. If it times out, rather than
++	 * hitting BUG_ON, just return so we don't crash while crashing.
++	 */
++	if (oops_in_progress) {
++		if (ctrl->cmd_pending &&
++			bcmnand_ctrl_poll_status(ctrl, NAND_CTRL_RDY, NAND_CTRL_RDY, 0))
++			return;
++	} else
++		BUG_ON(ctrl->cmd_pending != 0);
+ 	ctrl->cmd_pending = cmd;
+ 
+ 	ret = bcmnand_ctrl_poll_status(ctrl, NAND_CTRL_RDY, NAND_CTRL_RDY, 0);
+@@ -2561,7 +2601,7 @@ static int brcmnand_set_cfg(struct brcmnand_host *host,
+ 	tmp &= ~brcmnand_ecc_level_mask(ctrl);
+ 	tmp &= ~brcmnand_spare_area_mask(ctrl);
+ 	if (ctrl->nand_version >= 0x0302) {
+-		tmp |= cfg->ecc_level << NAND_ACC_CONTROL_ECC_SHIFT;
++		tmp |= cfg->ecc_level << ctrl->ecc_level_shift;
+ 		tmp |= cfg->spare_area_size;
+ 	}
+ 	nand_writereg(ctrl, acc_control_offs, tmp);
+diff --git a/drivers/mtd/spi-nor/winbond.c b/drivers/mtd/spi-nor/winbond.c
+index 834d6ba5ce703..63ba8e3a96f51 100644
+--- a/drivers/mtd/spi-nor/winbond.c
++++ b/drivers/mtd/spi-nor/winbond.c
+@@ -120,8 +120,9 @@ static const struct flash_info winbond_nor_parts[] = {
+ 		NO_SFDP_FLAGS(SECT_4K) },
+ 	{ "w25q80bl", INFO(0xef4014, 0, 64 * 1024,  16)
+ 		NO_SFDP_FLAGS(SECT_4K) },
+-	{ "w25q128", INFO(0xef4018, 0, 64 * 1024, 256)
+-		NO_SFDP_FLAGS(SECT_4K) },
++	{ "w25q128", INFO(0xef4018, 0, 0, 0)
++		PARSE_SFDP
++		FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) },
+ 	{ "w25q256", INFO(0xef4019, 0, 64 * 1024, 512)
+ 		NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
+ 		.fixups = &w25q256_fixups },
+diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c
+index 6c0623f88654e..d9d843efd111f 100644
+--- a/drivers/net/dsa/microchip/ksz_common.c
++++ b/drivers/net/dsa/microchip/ksz_common.c
+@@ -2337,13 +2337,27 @@ static u32 ksz_get_phy_flags(struct dsa_switch *ds, int port)
+ {
+ 	struct ksz_device *dev = ds->priv;
+ 
+-	if (dev->chip_id == KSZ8830_CHIP_ID) {
++	switch (dev->chip_id) {
++	case KSZ8830_CHIP_ID:
+ 		/* Silicon Errata Sheet (DS80000830A):
+ 		 * Port 1 does not work with LinkMD Cable-Testing.
+ 		 * Port 1 does not respond to received PAUSE control frames.
+ 		 */
+ 		if (!port)
+ 			return MICREL_KSZ8_P1_ERRATA;
++		break;
++	case KSZ9477_CHIP_ID:
++		/* KSZ9477 Errata DS80000754C
++		 *
++		 * Module 4: Energy Efficient Ethernet (EEE) feature select must
++		 * be manually disabled
++		 *   The EEE feature is enabled by default, but it is not fully
++		 *   operational. It must be manually disabled through register
++		 *   controls. If not disabled, the PHY ports can auto-negotiate
++		 *   to enable EEE, and this feature can cause link drops when
++		 *   linked to another device supporting EEE.
++		 */
++		return MICREL_NO_EEE;
+ 	}
+ 
+ 	return 0;
+diff --git a/drivers/net/dsa/sja1105/sja1105.h b/drivers/net/dsa/sja1105/sja1105.h
+index dee35ba924ad2..8c66d3bf61f02 100644
+--- a/drivers/net/dsa/sja1105/sja1105.h
++++ b/drivers/net/dsa/sja1105/sja1105.h
+@@ -132,6 +132,8 @@ struct sja1105_info {
+ 	int max_frame_mem;
+ 	int num_ports;
+ 	bool multiple_cascade_ports;
++	/* Every {port, TXQ} has its own CBS shaper */
++	bool fixed_cbs_mapping;
+ 	enum dsa_tag_protocol tag_proto;
+ 	const struct sja1105_dynamic_table_ops *dyn_ops;
+ 	const struct sja1105_table_ops *static_ops;
+@@ -264,6 +266,8 @@ struct sja1105_private {
+ 	 * the switch doesn't confuse them with one another.
+ 	 */
+ 	struct mutex mgmt_lock;
++	/* Serializes accesses to the FDB */
++	struct mutex fdb_lock;
+ 	/* PTP two-step TX timestamp ID, and its serialization lock */
+ 	spinlock_t ts_id_lock;
+ 	u8 ts_id;
+diff --git a/drivers/net/dsa/sja1105/sja1105_dynamic_config.c b/drivers/net/dsa/sja1105/sja1105_dynamic_config.c
+index 7729d3f8b7f50..984c0e604e8de 100644
+--- a/drivers/net/dsa/sja1105/sja1105_dynamic_config.c
++++ b/drivers/net/dsa/sja1105/sja1105_dynamic_config.c
+@@ -1175,18 +1175,15 @@ const struct sja1105_dynamic_table_ops sja1110_dyn_ops[BLK_IDX_MAX_DYN] = {
+ 
+ static int
+ sja1105_dynamic_config_poll_valid(struct sja1105_private *priv,
+-				  struct sja1105_dyn_cmd *cmd,
+-				  const struct sja1105_dynamic_table_ops *ops)
++				  const struct sja1105_dynamic_table_ops *ops,
++				  void *entry, bool check_valident,
++				  bool check_errors)
+ {
+ 	u8 packed_buf[SJA1105_MAX_DYN_CMD_SIZE] = {};
++	struct sja1105_dyn_cmd cmd = {};
+ 	int rc;
+ 
+-	/* We don't _need_ to read the full entry, just the command area which
+-	 * is a fixed SJA1105_SIZE_DYN_CMD. But our cmd_packing() API expects a
+-	 * buffer that contains the full entry too. Additionally, our API
+-	 * doesn't really know how many bytes into the buffer does the command
+-	 * area really begin. So just read back the whole entry.
+-	 */
++	/* Read back the whole entry + command structure. */
+ 	rc = sja1105_xfer_buf(priv, SPI_READ, ops->addr, packed_buf,
+ 			      ops->packed_size);
+ 	if (rc)
+@@ -1195,11 +1192,25 @@ sja1105_dynamic_config_poll_valid(struct sja1105_private *priv,
+ 	/* Unpack the command structure, and return it to the caller in case it
+ 	 * needs to perform further checks on it (VALIDENT).
+ 	 */
+-	memset(cmd, 0, sizeof(*cmd));
+-	ops->cmd_packing(packed_buf, cmd, UNPACK);
++	ops->cmd_packing(packed_buf, &cmd, UNPACK);
+ 
+ 	/* Hardware hasn't cleared VALID => still working on it */
+-	return cmd->valid ? -EAGAIN : 0;
++	if (cmd.valid)
++		return -EAGAIN;
++
++	if (check_valident && !cmd.valident && !(ops->access & OP_VALID_ANYWAY))
++		return -ENOENT;
++
++	if (check_errors && cmd.errors)
++		return -EINVAL;
++
++	/* Don't dereference possibly NULL pointer - maybe caller
++	 * only wanted to see whether the entry existed or not.
++	 */
++	if (entry)
++		ops->entry_packing(packed_buf, entry, UNPACK);
++
++	return 0;
+ }
+ 
+ /* Poll the dynamic config entry's control area until the hardware has
+@@ -1208,16 +1219,19 @@ sja1105_dynamic_config_poll_valid(struct sja1105_private *priv,
+  */
+ static int
+ sja1105_dynamic_config_wait_complete(struct sja1105_private *priv,
+-				     struct sja1105_dyn_cmd *cmd,
+-				     const struct sja1105_dynamic_table_ops *ops)
++				     const struct sja1105_dynamic_table_ops *ops,
++				     void *entry, bool check_valident,
++				     bool check_errors)
+ {
+-	int rc;
+-
+-	return read_poll_timeout(sja1105_dynamic_config_poll_valid,
+-				 rc, rc != -EAGAIN,
+-				 SJA1105_DYNAMIC_CONFIG_SLEEP_US,
+-				 SJA1105_DYNAMIC_CONFIG_TIMEOUT_US,
+-				 false, priv, cmd, ops);
++	int err, rc;
++
++	err = read_poll_timeout(sja1105_dynamic_config_poll_valid,
++				rc, rc != -EAGAIN,
++				SJA1105_DYNAMIC_CONFIG_SLEEP_US,
++				SJA1105_DYNAMIC_CONFIG_TIMEOUT_US,
++				false, priv, ops, entry, check_valident,
++				check_errors);
++	return err < 0 ? err : rc;
+ }
+ 
+ /* Provides read access to the settings through the dynamic interface
+@@ -1286,25 +1300,14 @@ int sja1105_dynamic_config_read(struct sja1105_private *priv,
+ 	mutex_lock(&priv->dynamic_config_lock);
+ 	rc = sja1105_xfer_buf(priv, SPI_WRITE, ops->addr, packed_buf,
+ 			      ops->packed_size);
+-	if (rc < 0) {
+-		mutex_unlock(&priv->dynamic_config_lock);
+-		return rc;
+-	}
+-
+-	rc = sja1105_dynamic_config_wait_complete(priv, &cmd, ops);
+-	mutex_unlock(&priv->dynamic_config_lock);
+ 	if (rc < 0)
+-		return rc;
++		goto out;
+ 
+-	if (!cmd.valident && !(ops->access & OP_VALID_ANYWAY))
+-		return -ENOENT;
++	rc = sja1105_dynamic_config_wait_complete(priv, ops, entry, true, false);
++out:
++	mutex_unlock(&priv->dynamic_config_lock);
+ 
+-	/* Don't dereference possibly NULL pointer - maybe caller
+-	 * only wanted to see whether the entry existed or not.
+-	 */
+-	if (entry)
+-		ops->entry_packing(packed_buf, entry, UNPACK);
+-	return 0;
++	return rc;
+ }
+ 
+ int sja1105_dynamic_config_write(struct sja1105_private *priv,
+@@ -1356,22 +1359,14 @@ int sja1105_dynamic_config_write(struct sja1105_private *priv,
+ 	mutex_lock(&priv->dynamic_config_lock);
+ 	rc = sja1105_xfer_buf(priv, SPI_WRITE, ops->addr, packed_buf,
+ 			      ops->packed_size);
+-	if (rc < 0) {
+-		mutex_unlock(&priv->dynamic_config_lock);
+-		return rc;
+-	}
+-
+-	rc = sja1105_dynamic_config_wait_complete(priv, &cmd, ops);
+-	mutex_unlock(&priv->dynamic_config_lock);
+ 	if (rc < 0)
+-		return rc;
++		goto out;
+ 
+-	cmd = (struct sja1105_dyn_cmd) {0};
+-	ops->cmd_packing(packed_buf, &cmd, UNPACK);
+-	if (cmd.errors)
+-		return -EINVAL;
++	rc = sja1105_dynamic_config_wait_complete(priv, ops, NULL, false, true);
++out:
++	mutex_unlock(&priv->dynamic_config_lock);
+ 
+-	return 0;
++	return rc;
+ }
+ 
+ static u8 sja1105_crc8_add(u8 crc, u8 byte, u8 poly)
+diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c
+index 3529a565b4aaf..013976b0af9f1 100644
+--- a/drivers/net/dsa/sja1105/sja1105_main.c
++++ b/drivers/net/dsa/sja1105/sja1105_main.c
+@@ -1805,6 +1805,7 @@ static int sja1105_fdb_add(struct dsa_switch *ds, int port,
+ 			   struct dsa_db db)
+ {
+ 	struct sja1105_private *priv = ds->priv;
++	int rc;
+ 
+ 	if (!vid) {
+ 		switch (db.type) {
+@@ -1819,12 +1820,16 @@ static int sja1105_fdb_add(struct dsa_switch *ds, int port,
+ 		}
+ 	}
+ 
+-	return priv->info->fdb_add_cmd(ds, port, addr, vid);
++	mutex_lock(&priv->fdb_lock);
++	rc = priv->info->fdb_add_cmd(ds, port, addr, vid);
++	mutex_unlock(&priv->fdb_lock);
++
++	return rc;
+ }
+ 
+-static int sja1105_fdb_del(struct dsa_switch *ds, int port,
+-			   const unsigned char *addr, u16 vid,
+-			   struct dsa_db db)
++static int __sja1105_fdb_del(struct dsa_switch *ds, int port,
++			     const unsigned char *addr, u16 vid,
++			     struct dsa_db db)
+ {
+ 	struct sja1105_private *priv = ds->priv;
+ 
+@@ -1844,6 +1849,20 @@ static int sja1105_fdb_del(struct dsa_switch *ds, int port,
+ 	return priv->info->fdb_del_cmd(ds, port, addr, vid);
+ }
+ 
++static int sja1105_fdb_del(struct dsa_switch *ds, int port,
++			   const unsigned char *addr, u16 vid,
++			   struct dsa_db db)
++{
++	struct sja1105_private *priv = ds->priv;
++	int rc;
++
++	mutex_lock(&priv->fdb_lock);
++	rc = __sja1105_fdb_del(ds, port, addr, vid, db);
++	mutex_unlock(&priv->fdb_lock);
++
++	return rc;
++}
++
+ static int sja1105_fdb_dump(struct dsa_switch *ds, int port,
+ 			    dsa_fdb_dump_cb_t *cb, void *data)
+ {
+@@ -1875,13 +1894,14 @@ static int sja1105_fdb_dump(struct dsa_switch *ds, int port,
+ 		if (!(l2_lookup.destports & BIT(port)))
+ 			continue;
+ 
+-		/* We need to hide the FDB entry for unknown multicast */
+-		if (l2_lookup.macaddr == SJA1105_UNKNOWN_MULTICAST &&
+-		    l2_lookup.mask_macaddr == SJA1105_UNKNOWN_MULTICAST)
+-			continue;
+-
+ 		u64_to_ether_addr(l2_lookup.macaddr, macaddr);
+ 
++		/* Hardware FDB is shared for fdb and mdb, "bridge fdb show"
++		 * only wants to see unicast
++		 */
++		if (is_multicast_ether_addr(macaddr))
++			continue;
++
+ 		/* We need to hide the dsa_8021q VLANs from the user. */
+ 		if (vid_is_dsa_8021q(l2_lookup.vlanid))
+ 			l2_lookup.vlanid = 0;
+@@ -1905,6 +1925,8 @@ static void sja1105_fast_age(struct dsa_switch *ds, int port)
+ 	};
+ 	int i;
+ 
++	mutex_lock(&priv->fdb_lock);
++
+ 	for (i = 0; i < SJA1105_MAX_L2_LOOKUP_COUNT; i++) {
+ 		struct sja1105_l2_lookup_entry l2_lookup = {0};
+ 		u8 macaddr[ETH_ALEN];
+@@ -1918,7 +1940,7 @@ static void sja1105_fast_age(struct dsa_switch *ds, int port)
+ 		if (rc) {
+ 			dev_err(ds->dev, "Failed to read FDB: %pe\n",
+ 				ERR_PTR(rc));
+-			return;
++			break;
+ 		}
+ 
+ 		if (!(l2_lookup.destports & BIT(port)))
+@@ -1930,14 +1952,16 @@ static void sja1105_fast_age(struct dsa_switch *ds, int port)
+ 
+ 		u64_to_ether_addr(l2_lookup.macaddr, macaddr);
+ 
+-		rc = sja1105_fdb_del(ds, port, macaddr, l2_lookup.vlanid, db);
++		rc = __sja1105_fdb_del(ds, port, macaddr, l2_lookup.vlanid, db);
+ 		if (rc) {
+ 			dev_err(ds->dev,
+ 				"Failed to delete FDB entry %pM vid %lld: %pe\n",
+ 				macaddr, l2_lookup.vlanid, ERR_PTR(rc));
+-			return;
++			break;
+ 		}
+ 	}
++
++	mutex_unlock(&priv->fdb_lock);
+ }
+ 
+ static int sja1105_mdb_add(struct dsa_switch *ds, int port,
+@@ -2122,11 +2146,36 @@ static void sja1105_bridge_leave(struct dsa_switch *ds, int port,
+ }
+ 
+ #define BYTES_PER_KBIT (1000LL / 8)
++/* Port 0 (the uC port) does not have CBS shapers */
++#define SJA1110_FIXED_CBS(port, prio) ((((port) - 1) * SJA1105_NUM_TC) + (prio))
++
++static int sja1105_find_cbs_shaper(struct sja1105_private *priv,
++				   int port, int prio)
++{
++	int i;
++
++	if (priv->info->fixed_cbs_mapping) {
++		i = SJA1110_FIXED_CBS(port, prio);
++		if (i >= 0 && i < priv->info->num_cbs_shapers)
++			return i;
++
++		return -1;
++	}
++
++	for (i = 0; i < priv->info->num_cbs_shapers; i++)
++		if (priv->cbs[i].port == port && priv->cbs[i].prio == prio)
++			return i;
++
++	return -1;
++}
+ 
+ static int sja1105_find_unused_cbs_shaper(struct sja1105_private *priv)
+ {
+ 	int i;
+ 
++	if (priv->info->fixed_cbs_mapping)
++		return -1;
++
+ 	for (i = 0; i < priv->info->num_cbs_shapers; i++)
+ 		if (!priv->cbs[i].idle_slope && !priv->cbs[i].send_slope)
+ 			return i;
+@@ -2157,14 +2206,20 @@ static int sja1105_setup_tc_cbs(struct dsa_switch *ds, int port,
+ {
+ 	struct sja1105_private *priv = ds->priv;
+ 	struct sja1105_cbs_entry *cbs;
++	s64 port_transmit_rate_kbps;
+ 	int index;
+ 
+ 	if (!offload->enable)
+ 		return sja1105_delete_cbs_shaper(priv, port, offload->queue);
+ 
+-	index = sja1105_find_unused_cbs_shaper(priv);
+-	if (index < 0)
+-		return -ENOSPC;
++	/* The user may be replacing an existing shaper */
++	index = sja1105_find_cbs_shaper(priv, port, offload->queue);
++	if (index < 0) {
++		/* That isn't the case - see if we can allocate a new one */
++		index = sja1105_find_unused_cbs_shaper(priv);
++		if (index < 0)
++			return -ENOSPC;
++	}
+ 
+ 	cbs = &priv->cbs[index];
+ 	cbs->port = port;
+@@ -2174,9 +2229,17 @@ static int sja1105_setup_tc_cbs(struct dsa_switch *ds, int port,
+ 	 */
+ 	cbs->credit_hi = offload->hicredit;
+ 	cbs->credit_lo = abs(offload->locredit);
+-	/* User space is in kbits/sec, hardware in bytes/sec */
+-	cbs->idle_slope = offload->idleslope * BYTES_PER_KBIT;
+-	cbs->send_slope = abs(offload->sendslope * BYTES_PER_KBIT);
++	/* User space is in kbits/sec, while the hardware in bytes/sec times
++	 * link speed. Since the given offload->sendslope is good only for the
++	 * current link speed anyway, and user space is likely to reprogram it
++	 * when that changes, don't even bother to track the port's link speed,
++	 * but deduce the port transmit rate from idleslope - sendslope.
++	 */
++	port_transmit_rate_kbps = offload->idleslope - offload->sendslope;
++	cbs->idle_slope = div_s64(offload->idleslope * BYTES_PER_KBIT,
++				  port_transmit_rate_kbps);
++	cbs->send_slope = div_s64(abs(offload->sendslope * BYTES_PER_KBIT),
++				  port_transmit_rate_kbps);
+ 	/* Convert the negative values from 64-bit 2's complement
+ 	 * to 32-bit 2's complement (for the case of 0x80000000 whose
+ 	 * negative is still negative).
+@@ -2241,6 +2304,7 @@ int sja1105_static_config_reload(struct sja1105_private *priv,
+ 	int rc, i;
+ 	s64 now;
+ 
++	mutex_lock(&priv->fdb_lock);
+ 	mutex_lock(&priv->mgmt_lock);
+ 
+ 	mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
+@@ -2353,6 +2417,7 @@ int sja1105_static_config_reload(struct sja1105_private *priv,
+ 		goto out;
+ out:
+ 	mutex_unlock(&priv->mgmt_lock);
++	mutex_unlock(&priv->fdb_lock);
+ 
+ 	return rc;
+ }
+@@ -2922,7 +2987,9 @@ static int sja1105_port_mcast_flood(struct sja1105_private *priv, int to,
+ {
+ 	struct sja1105_l2_lookup_entry *l2_lookup;
+ 	struct sja1105_table *table;
+-	int match;
++	int match, rc;
++
++	mutex_lock(&priv->fdb_lock);
+ 
+ 	table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP];
+ 	l2_lookup = table->entries;
+@@ -2935,7 +3002,8 @@ static int sja1105_port_mcast_flood(struct sja1105_private *priv, int to,
+ 	if (match == table->entry_count) {
+ 		NL_SET_ERR_MSG_MOD(extack,
+ 				   "Could not find FDB entry for unknown multicast");
+-		return -ENOSPC;
++		rc = -ENOSPC;
++		goto out;
+ 	}
+ 
+ 	if (flags.val & BR_MCAST_FLOOD)
+@@ -2943,10 +3011,13 @@ static int sja1105_port_mcast_flood(struct sja1105_private *priv, int to,
+ 	else
+ 		l2_lookup[match].destports &= ~BIT(to);
+ 
+-	return sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
+-					    l2_lookup[match].index,
+-					    &l2_lookup[match],
+-					    true);
++	rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
++					  l2_lookup[match].index,
++					  &l2_lookup[match], true);
++out:
++	mutex_unlock(&priv->fdb_lock);
++
++	return rc;
+ }
+ 
+ static int sja1105_port_pre_bridge_flags(struct dsa_switch *ds, int port,
+@@ -3316,6 +3387,7 @@ static int sja1105_probe(struct spi_device *spi)
+ 	mutex_init(&priv->ptp_data.lock);
+ 	mutex_init(&priv->dynamic_config_lock);
+ 	mutex_init(&priv->mgmt_lock);
++	mutex_init(&priv->fdb_lock);
+ 	spin_lock_init(&priv->ts_id_lock);
+ 
+ 	rc = sja1105_parse_dt(priv);
+diff --git a/drivers/net/dsa/sja1105/sja1105_spi.c b/drivers/net/dsa/sja1105/sja1105_spi.c
+index 5ce29c8057a41..834b5c1b4db0c 100644
+--- a/drivers/net/dsa/sja1105/sja1105_spi.c
++++ b/drivers/net/dsa/sja1105/sja1105_spi.c
+@@ -781,6 +781,7 @@ const struct sja1105_info sja1110a_info = {
+ 	.tag_proto		= DSA_TAG_PROTO_SJA1110,
+ 	.can_limit_mcast_flood	= true,
+ 	.multiple_cascade_ports	= true,
++	.fixed_cbs_mapping	= true,
+ 	.ptp_ts_bits		= 32,
+ 	.ptpegr_ts_bytes	= 8,
+ 	.max_frame_mem		= SJA1110_MAX_FRAME_MEMORY,
+@@ -831,6 +832,7 @@ const struct sja1105_info sja1110b_info = {
+ 	.tag_proto		= DSA_TAG_PROTO_SJA1110,
+ 	.can_limit_mcast_flood	= true,
+ 	.multiple_cascade_ports	= true,
++	.fixed_cbs_mapping	= true,
+ 	.ptp_ts_bits		= 32,
+ 	.ptpegr_ts_bytes	= 8,
+ 	.max_frame_mem		= SJA1110_MAX_FRAME_MEMORY,
+@@ -881,6 +883,7 @@ const struct sja1105_info sja1110c_info = {
+ 	.tag_proto		= DSA_TAG_PROTO_SJA1110,
+ 	.can_limit_mcast_flood	= true,
+ 	.multiple_cascade_ports	= true,
++	.fixed_cbs_mapping	= true,
+ 	.ptp_ts_bits		= 32,
+ 	.ptpegr_ts_bytes	= 8,
+ 	.max_frame_mem		= SJA1110_MAX_FRAME_MEMORY,
+@@ -931,6 +934,7 @@ const struct sja1105_info sja1110d_info = {
+ 	.tag_proto		= DSA_TAG_PROTO_SJA1110,
+ 	.can_limit_mcast_flood	= true,
+ 	.multiple_cascade_ports	= true,
++	.fixed_cbs_mapping	= true,
+ 	.ptp_ts_bits		= 32,
+ 	.ptpegr_ts_bytes	= 8,
+ 	.max_frame_mem		= SJA1110_MAX_FRAME_MEMORY,
+diff --git a/drivers/net/ethernet/adi/adin1110.c b/drivers/net/ethernet/adi/adin1110.c
+index f5c2d7a9abc10..ca66b747b7c5d 100644
+--- a/drivers/net/ethernet/adi/adin1110.c
++++ b/drivers/net/ethernet/adi/adin1110.c
+@@ -739,7 +739,7 @@ static int adin1110_broadcasts_filter(struct adin1110_port_priv *port_priv,
+ 	u32 port_rules = 0;
+ 	u8 mask[ETH_ALEN];
+ 
+-	memset(mask, 0xFF, ETH_ALEN);
++	eth_broadcast_addr(mask);
+ 
+ 	if (accept_broadcast && port_priv->state == BR_STATE_FORWARDING)
+ 		port_rules = adin1110_port_rules(port_priv, true, true);
+@@ -760,7 +760,7 @@ static int adin1110_set_mac_address(struct net_device *netdev,
+ 		return -EADDRNOTAVAIL;
+ 
+ 	eth_hw_addr_set(netdev, dev_addr);
+-	memset(mask, 0xFF, ETH_ALEN);
++	eth_broadcast_addr(mask);
+ 
+ 	mac_slot = (!port_priv->nr) ?  ADIN_MAC_P1_ADDR_SLOT : ADIN_MAC_P2_ADDR_SLOT;
+ 	port_rules = adin1110_port_rules(port_priv, true, false);
+@@ -1271,7 +1271,7 @@ static int adin1110_port_set_blocking_state(struct adin1110_port_priv *port_priv
+ 		goto out;
+ 
+ 	/* Allow only BPDUs to be passed to the CPU */
+-	memset(mask, 0xFF, ETH_ALEN);
++	eth_broadcast_addr(mask);
+ 	port_rules = adin1110_port_rules(port_priv, true, false);
+ 	ret = adin1110_write_mac_address(port_priv, mac_slot, mac,
+ 					 mask, port_rules);
+@@ -1385,8 +1385,8 @@ static int adin1110_fdb_add(struct adin1110_port_priv *port_priv,
+ 		return -ENOMEM;
+ 
+ 	other_port = priv->ports[!port_priv->nr];
+-	port_rules = adin1110_port_rules(port_priv, false, true);
+-	memset(mask, 0xFF, ETH_ALEN);
++	port_rules = adin1110_port_rules(other_port, false, true);
++	eth_broadcast_addr(mask);
+ 
+ 	return adin1110_write_mac_address(other_port, mac_nr, (u8 *)fdb->addr,
+ 					  mask, port_rules);
+diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
+index 82929ee76739d..2d18acf3d5eae 100644
+--- a/drivers/net/ethernet/cadence/macb_main.c
++++ b/drivers/net/ethernet/cadence/macb_main.c
+@@ -757,8 +757,6 @@ static void macb_mac_link_up(struct phylink_config *config,
+ 		if (rx_pause)
+ 			ctrl |= MACB_BIT(PAE);
+ 
+-		macb_set_tx_clk(bp, speed);
+-
+ 		/* Initialize rings & buffers as clearing MACB_BIT(TE) in link down
+ 		 * cleared the pipeline and control registers.
+ 		 */
+@@ -778,6 +776,9 @@ static void macb_mac_link_up(struct phylink_config *config,
+ 
+ 	spin_unlock_irqrestore(&bp->lock, flags);
+ 
++	if (!(bp->caps & MACB_CAPS_MACB_IS_EMAC))
++		macb_set_tx_clk(bp, speed);
++
+ 	/* Enable Rx and Tx; Enable PTP unicast */
+ 	ctrl = macb_readl(bp, NCR);
+ 	if (gem_has_ptp(bp))
+diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
+index e0a4cb7e3f501..c153dc083aff0 100644
+--- a/drivers/net/ethernet/freescale/enetc/enetc_pf.c
++++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
+@@ -1402,7 +1402,7 @@ static void enetc_fixup_clear_rss_rfs(struct pci_dev *pdev)
+ 		return;
+ 
+ 	si = enetc_psi_create(pdev);
+-	if (si)
++	if (!IS_ERR(si))
+ 		enetc_psi_destroy(pdev);
+ }
+ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_FREESCALE, ENETC_DEV_ID_PF,
+diff --git a/drivers/net/ethernet/google/gve/gve_rx_dqo.c b/drivers/net/ethernet/google/gve/gve_rx_dqo.c
+index e57b73eb70f62..ac041cc5714c0 100644
+--- a/drivers/net/ethernet/google/gve/gve_rx_dqo.c
++++ b/drivers/net/ethernet/google/gve/gve_rx_dqo.c
+@@ -492,7 +492,10 @@ static int gve_rx_append_frags(struct napi_struct *napi,
+ 		if (!skb)
+ 			return -1;
+ 
+-		skb_shinfo(rx->ctx.skb_tail)->frag_list = skb;
++		if (rx->ctx.skb_tail == rx->ctx.skb_head)
++			skb_shinfo(rx->ctx.skb_head)->frag_list = skb;
++		else
++			rx->ctx.skb_tail->next = skb;
+ 		rx->ctx.skb_tail = skb;
+ 		num_frags = 0;
+ 	}
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+index a4b43bcd2f0c9..aaf1f42624a79 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
++++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+@@ -814,6 +814,7 @@ struct hnae3_tc_info {
+ 	u8 max_tc; /* Total number of TCs */
+ 	u8 num_tc; /* Total number of enabled TCs */
+ 	bool mqprio_active;
++	bool dcb_ets_active;
+ };
+ 
+ #define HNAE3_MAX_DSCP			64
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
+index f276b5ecb431f..26fb6fefcb9d9 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
+@@ -1411,9 +1411,9 @@ int hns3_dbg_init(struct hnae3_handle *handle)
+ 	return 0;
+ 
+ out:
+-	mutex_destroy(&handle->dbgfs_lock);
+ 	debugfs_remove_recursive(handle->hnae3_dbgfs);
+ 	handle->hnae3_dbgfs = NULL;
++	mutex_destroy(&handle->dbgfs_lock);
+ 	return ret;
+ }
+ 
+@@ -1421,6 +1421,9 @@ void hns3_dbg_uninit(struct hnae3_handle *handle)
+ {
+ 	u32 i;
+ 
++	debugfs_remove_recursive(handle->hnae3_dbgfs);
++	handle->hnae3_dbgfs = NULL;
++
+ 	for (i = 0; i < ARRAY_SIZE(hns3_dbg_cmd); i++)
+ 		if (handle->dbgfs_buf[i]) {
+ 			kvfree(handle->dbgfs_buf[i]);
+@@ -1428,8 +1431,6 @@ void hns3_dbg_uninit(struct hnae3_handle *handle)
+ 		}
+ 
+ 	mutex_destroy(&handle->dbgfs_lock);
+-	debugfs_remove_recursive(handle->hnae3_dbgfs);
+-	handle->hnae3_dbgfs = NULL;
+ }
+ 
+ void hns3_dbg_register_debugfs(const char *debugfs_dir_name)
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+index b7b51e56b0308..613d0a779cef2 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+@@ -2102,8 +2102,12 @@ static void hns3_tx_doorbell(struct hns3_enet_ring *ring, int num,
+ 	 */
+ 	if (test_bit(HNS3_NIC_STATE_TX_PUSH_ENABLE, &priv->state) && num &&
+ 	    !ring->pending_buf && num <= HNS3_MAX_PUSH_BD_NUM && doorbell) {
++		/* This smp_store_release() pairs with smp_load_aquire() in
++		 * hns3_nic_reclaim_desc(). Ensure that the BD valid bit
++		 * is updated.
++		 */
++		smp_store_release(&ring->last_to_use, ring->next_to_use);
+ 		hns3_tx_push_bd(ring, num);
+-		WRITE_ONCE(ring->last_to_use, ring->next_to_use);
+ 		return;
+ 	}
+ 
+@@ -2114,6 +2118,11 @@ static void hns3_tx_doorbell(struct hns3_enet_ring *ring, int num,
+ 		return;
+ 	}
+ 
++	/* This smp_store_release() pairs with smp_load_aquire() in
++	 * hns3_nic_reclaim_desc(). Ensure that the BD valid bit is updated.
++	 */
++	smp_store_release(&ring->last_to_use, ring->next_to_use);
++
+ 	if (ring->tqp->mem_base)
+ 		hns3_tx_mem_doorbell(ring);
+ 	else
+@@ -2121,7 +2130,6 @@ static void hns3_tx_doorbell(struct hns3_enet_ring *ring, int num,
+ 		       ring->tqp->io_base + HNS3_RING_TX_RING_TAIL_REG);
+ 
+ 	ring->pending_buf = 0;
+-	WRITE_ONCE(ring->last_to_use, ring->next_to_use);
+ }
+ 
+ static void hns3_tsyn(struct net_device *netdev, struct sk_buff *skb,
+@@ -3307,8 +3315,6 @@ static void hns3_set_default_feature(struct net_device *netdev)
+ 
+ 	netdev->priv_flags |= IFF_UNICAST_FLT;
+ 
+-	netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
+-
+ 	netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER |
+ 		NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
+ 		NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
+@@ -3562,9 +3568,8 @@ static void hns3_reuse_buffer(struct hns3_enet_ring *ring, int i)
+ static bool hns3_nic_reclaim_desc(struct hns3_enet_ring *ring,
+ 				  int *bytes, int *pkts, int budget)
+ {
+-	/* pair with ring->last_to_use update in hns3_tx_doorbell(),
+-	 * smp_store_release() is not used in hns3_tx_doorbell() because
+-	 * the doorbell operation already have the needed barrier operation.
++	/* This smp_load_acquire() pairs with smp_store_release() in
++	 * hns3_tx_doorbell().
+ 	 */
+ 	int ltu = smp_load_acquire(&ring->last_to_use);
+ 	int ntc = ring->next_to_clean;
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
+index 407d30ee55d2e..64858b3114ac9 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
+@@ -773,7 +773,9 @@ static int hns3_get_link_ksettings(struct net_device *netdev,
+ 		hns3_get_ksettings(h, cmd);
+ 		break;
+ 	case HNAE3_MEDIA_TYPE_FIBER:
+-		if (module_type == HNAE3_MODULE_TYPE_CR)
++		if (module_type == HNAE3_MODULE_TYPE_UNKNOWN)
++			cmd->base.port = PORT_OTHER;
++		else if (module_type == HNAE3_MODULE_TYPE_CR)
+ 			cmd->base.port = PORT_DA;
+ 		else
+ 			cmd->base.port = PORT_FIBRE;
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
+index fad5a5ff3cda5..b98301e205f7f 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
+@@ -259,7 +259,7 @@ static int hclge_ieee_setets(struct hnae3_handle *h, struct ieee_ets *ets)
+ 	int ret;
+ 
+ 	if (!(hdev->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) ||
+-	    hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE)
++	    h->kinfo.tc_info.mqprio_active)
+ 		return -EINVAL;
+ 
+ 	ret = hclge_ets_validate(hdev, ets, &num_tc, &map_changed);
+@@ -275,10 +275,7 @@ static int hclge_ieee_setets(struct hnae3_handle *h, struct ieee_ets *ets)
+ 	}
+ 
+ 	hclge_tm_schd_info_update(hdev, num_tc);
+-	if (num_tc > 1)
+-		hdev->flag |= HCLGE_FLAG_DCB_ENABLE;
+-	else
+-		hdev->flag &= ~HCLGE_FLAG_DCB_ENABLE;
++	h->kinfo.tc_info.dcb_ets_active = num_tc > 1;
+ 
+ 	ret = hclge_ieee_ets_to_tm_info(hdev, ets);
+ 	if (ret)
+@@ -487,7 +484,7 @@ static u8 hclge_getdcbx(struct hnae3_handle *h)
+ 	struct hclge_vport *vport = hclge_get_vport(h);
+ 	struct hclge_dev *hdev = vport->back;
+ 
+-	if (hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE)
++	if (h->kinfo.tc_info.mqprio_active)
+ 		return 0;
+ 
+ 	return hdev->dcbx_cap;
+@@ -611,7 +608,8 @@ static int hclge_setup_tc(struct hnae3_handle *h,
+ 	if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state))
+ 		return -EBUSY;
+ 
+-	if (hdev->flag & HCLGE_FLAG_DCB_ENABLE)
++	kinfo = &vport->nic.kinfo;
++	if (kinfo->tc_info.dcb_ets_active)
+ 		return -EINVAL;
+ 
+ 	ret = hclge_mqprio_qopt_check(hdev, mqprio_qopt);
+@@ -625,7 +623,6 @@ static int hclge_setup_tc(struct hnae3_handle *h,
+ 	if (ret)
+ 		return ret;
+ 
+-	kinfo = &vport->nic.kinfo;
+ 	memcpy(&old_tc_info, &kinfo->tc_info, sizeof(old_tc_info));
+ 	hclge_sync_mqprio_qopt(&kinfo->tc_info, mqprio_qopt);
+ 	kinfo->tc_info.mqprio_active = tc > 0;
+@@ -634,13 +631,6 @@ static int hclge_setup_tc(struct hnae3_handle *h,
+ 	if (ret)
+ 		goto err_out;
+ 
+-	hdev->flag &= ~HCLGE_FLAG_DCB_ENABLE;
+-
+-	if (tc > 1)
+-		hdev->flag |= HCLGE_FLAG_MQPRIO_ENABLE;
+-	else
+-		hdev->flag &= ~HCLGE_FLAG_MQPRIO_ENABLE;
+-
+ 	return hclge_notify_init_up(hdev);
+ 
+ err_out:
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
+index f01a7a9ee02ca..ff3f8f424ad90 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
+@@ -1519,7 +1519,7 @@ static int hclge_dbg_fd_tcam_read(struct hclge_dev *hdev, bool sel_x,
+ 	struct hclge_desc desc[3];
+ 	int pos = 0;
+ 	int ret, i;
+-	u32 *req;
++	__le32 *req;
+ 
+ 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, true);
+ 	desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
+@@ -1544,22 +1544,22 @@ static int hclge_dbg_fd_tcam_read(struct hclge_dev *hdev, bool sel_x,
+ 			 tcam_msg.loc);
+ 
+ 	/* tcam_data0 ~ tcam_data1 */
+-	req = (u32 *)req1->tcam_data;
++	req = (__le32 *)req1->tcam_data;
+ 	for (i = 0; i < 2; i++)
+ 		pos += scnprintf(tcam_buf + pos, HCLGE_DBG_TCAM_BUF_SIZE - pos,
+-				 "%08x\n", *req++);
++				 "%08x\n", le32_to_cpu(*req++));
+ 
+ 	/* tcam_data2 ~ tcam_data7 */
+-	req = (u32 *)req2->tcam_data;
++	req = (__le32 *)req2->tcam_data;
+ 	for (i = 0; i < 6; i++)
+ 		pos += scnprintf(tcam_buf + pos, HCLGE_DBG_TCAM_BUF_SIZE - pos,
+-				 "%08x\n", *req++);
++				 "%08x\n", le32_to_cpu(*req++));
+ 
+ 	/* tcam_data8 ~ tcam_data12 */
+-	req = (u32 *)req3->tcam_data;
++	req = (__le32 *)req3->tcam_data;
+ 	for (i = 0; i < 5; i++)
+ 		pos += scnprintf(tcam_buf + pos, HCLGE_DBG_TCAM_BUF_SIZE - pos,
+-				 "%08x\n", *req++);
++				 "%08x\n", le32_to_cpu(*req++));
+ 
+ 	return ret;
+ }
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+index 2d5a2e1ef664d..ce6b658a930cc 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+@@ -11026,6 +11026,7 @@ static void hclge_get_mdix_mode(struct hnae3_handle *handle,
+ 
+ static void hclge_info_show(struct hclge_dev *hdev)
+ {
++	struct hnae3_handle *handle = &hdev->vport->nic;
+ 	struct device *dev = &hdev->pdev->dev;
+ 
+ 	dev_info(dev, "PF info begin:\n");
+@@ -11042,9 +11043,9 @@ static void hclge_info_show(struct hclge_dev *hdev)
+ 	dev_info(dev, "This is %s PF\n",
+ 		 hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
+ 	dev_info(dev, "DCB %s\n",
+-		 hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
++		 handle->kinfo.tc_info.dcb_ets_active ? "enable" : "disable");
+ 	dev_info(dev, "MQPRIO %s\n",
+-		 hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");
++		 handle->kinfo.tc_info.mqprio_active ? "enable" : "disable");
+ 	dev_info(dev, "Default tx spare buffer size: %u\n",
+ 		 hdev->tx_spare_buf_size);
+ 
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
+index 8f76b568c1bf6..70319ce49a1d2 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
+@@ -919,8 +919,6 @@ struct hclge_dev {
+ 
+ #define HCLGE_FLAG_MAIN			BIT(0)
+ #define HCLGE_FLAG_DCB_CAPABLE		BIT(1)
+-#define HCLGE_FLAG_DCB_ENABLE		BIT(2)
+-#define HCLGE_FLAG_MQPRIO_ENABLE	BIT(3)
+ 	u32 flag;
+ 
+ 	u32 pkt_buf_size; /* Total pf buf size for tx/rx */
+diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
+index 015b781441149..a2b759531cb7b 100644
+--- a/drivers/net/ethernet/intel/igb/igb.h
++++ b/drivers/net/ethernet/intel/igb/igb.h
+@@ -34,11 +34,11 @@ struct igb_adapter;
+ /* TX/RX descriptor defines */
+ #define IGB_DEFAULT_TXD		256
+ #define IGB_DEFAULT_TX_WORK	128
+-#define IGB_MIN_TXD		80
++#define IGB_MIN_TXD		64
+ #define IGB_MAX_TXD		4096
+ 
+ #define IGB_DEFAULT_RXD		256
+-#define IGB_MIN_RXD		80
++#define IGB_MIN_RXD		64
+ #define IGB_MAX_RXD		4096
+ 
+ #define IGB_DEFAULT_ITR		3 /* dynamic */
+diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
+index 08e3df37089fe..12f106b3a878b 100644
+--- a/drivers/net/ethernet/intel/igb/igb_main.c
++++ b/drivers/net/ethernet/intel/igb/igb_main.c
+@@ -3827,8 +3827,11 @@ static int igb_enable_sriov(struct pci_dev *pdev, int num_vfs, bool reinit)
+ 	}
+ 
+ 	/* only call pci_enable_sriov() if no VFs are allocated already */
+-	if (!old_vfs)
++	if (!old_vfs) {
+ 		err = pci_enable_sriov(pdev, adapter->vfs_allocated_count);
++		if (err)
++			goto err_out;
++	}
+ 
+ 	goto out;
+ 
+@@ -3933,8 +3936,9 @@ static void igb_probe_vfs(struct igb_adapter *adapter)
+ 	struct pci_dev *pdev = adapter->pdev;
+ 	struct e1000_hw *hw = &adapter->hw;
+ 
+-	/* Virtualization features not supported on i210 family. */
+-	if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211))
++	/* Virtualization features not supported on i210 and 82580 family. */
++	if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211) ||
++	    (hw->mac.type == e1000_82580))
+ 		return;
+ 
+ 	/* Of the below we really only want the effect of getting
+diff --git a/drivers/net/ethernet/intel/igbvf/igbvf.h b/drivers/net/ethernet/intel/igbvf/igbvf.h
+index 57d39ee00b585..7b83678ba83a6 100644
+--- a/drivers/net/ethernet/intel/igbvf/igbvf.h
++++ b/drivers/net/ethernet/intel/igbvf/igbvf.h
+@@ -39,11 +39,11 @@ enum latency_range {
+ /* Tx/Rx descriptor defines */
+ #define IGBVF_DEFAULT_TXD	256
+ #define IGBVF_MAX_TXD		4096
+-#define IGBVF_MIN_TXD		80
++#define IGBVF_MIN_TXD		64
+ 
+ #define IGBVF_DEFAULT_RXD	256
+ #define IGBVF_MAX_RXD		4096
+-#define IGBVF_MIN_RXD		80
++#define IGBVF_MIN_RXD		64
+ 
+ #define IGBVF_MIN_ITR_USECS	10 /* 100000 irq/sec */
+ #define IGBVF_MAX_ITR_USECS	10000 /* 100    irq/sec */
+diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h
+index 38901d2a46807..b4077c3f62ed1 100644
+--- a/drivers/net/ethernet/intel/igc/igc.h
++++ b/drivers/net/ethernet/intel/igc/igc.h
+@@ -368,11 +368,11 @@ static inline u32 igc_rss_type(const union igc_adv_rx_desc *rx_desc)
+ /* TX/RX descriptor defines */
+ #define IGC_DEFAULT_TXD		256
+ #define IGC_DEFAULT_TX_WORK	128
+-#define IGC_MIN_TXD		80
++#define IGC_MIN_TXD		64
+ #define IGC_MAX_TXD		4096
+ 
+ #define IGC_DEFAULT_RXD		256
+-#define IGC_MIN_RXD		80
++#define IGC_MIN_RXD		64
+ #define IGC_MAX_RXD		4096
+ 
+ /* Supported Rx Buffer Sizes */
+diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
+index 0310af851086b..9339edbd90821 100644
+--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
+@@ -979,6 +979,7 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter,
+ 	u32 tsync_tx_ctl = IXGBE_TSYNCTXCTL_ENABLED;
+ 	u32 tsync_rx_ctl = IXGBE_TSYNCRXCTL_ENABLED;
+ 	u32 tsync_rx_mtrl = PTP_EV_PORT << 16;
++	u32 aflags = adapter->flags;
+ 	bool is_l2 = false;
+ 	u32 regval;
+ 
+@@ -996,20 +997,20 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter,
+ 	case HWTSTAMP_FILTER_NONE:
+ 		tsync_rx_ctl = 0;
+ 		tsync_rx_mtrl = 0;
+-		adapter->flags &= ~(IXGBE_FLAG_RX_HWTSTAMP_ENABLED |
+-				    IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER);
++		aflags &= ~(IXGBE_FLAG_RX_HWTSTAMP_ENABLED |
++			    IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER);
+ 		break;
+ 	case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ 		tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_L4_V1;
+ 		tsync_rx_mtrl |= IXGBE_RXMTRL_V1_SYNC_MSG;
+-		adapter->flags |= (IXGBE_FLAG_RX_HWTSTAMP_ENABLED |
+-				   IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER);
++		aflags |= (IXGBE_FLAG_RX_HWTSTAMP_ENABLED |
++			   IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER);
+ 		break;
+ 	case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ 		tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_L4_V1;
+ 		tsync_rx_mtrl |= IXGBE_RXMTRL_V1_DELAY_REQ_MSG;
+-		adapter->flags |= (IXGBE_FLAG_RX_HWTSTAMP_ENABLED |
+-				   IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER);
++		aflags |= (IXGBE_FLAG_RX_HWTSTAMP_ENABLED |
++			   IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER);
+ 		break;
+ 	case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ 	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+@@ -1023,8 +1024,8 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter,
+ 		tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_EVENT_V2;
+ 		is_l2 = true;
+ 		config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+-		adapter->flags |= (IXGBE_FLAG_RX_HWTSTAMP_ENABLED |
+-				   IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER);
++		aflags |= (IXGBE_FLAG_RX_HWTSTAMP_ENABLED |
++			   IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER);
+ 		break;
+ 	case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ 	case HWTSTAMP_FILTER_NTP_ALL:
+@@ -1035,7 +1036,7 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter,
+ 		if (hw->mac.type >= ixgbe_mac_X550) {
+ 			tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_ALL;
+ 			config->rx_filter = HWTSTAMP_FILTER_ALL;
+-			adapter->flags |= IXGBE_FLAG_RX_HWTSTAMP_ENABLED;
++			aflags |= IXGBE_FLAG_RX_HWTSTAMP_ENABLED;
+ 			break;
+ 		}
+ 		fallthrough;
+@@ -1046,8 +1047,6 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter,
+ 		 * Delay_Req messages and hardware does not support
+ 		 * timestamping all packets => return error
+ 		 */
+-		adapter->flags &= ~(IXGBE_FLAG_RX_HWTSTAMP_ENABLED |
+-				    IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER);
+ 		config->rx_filter = HWTSTAMP_FILTER_NONE;
+ 		return -ERANGE;
+ 	}
+@@ -1079,8 +1078,8 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter,
+ 			       IXGBE_TSYNCRXCTL_TYPE_ALL |
+ 			       IXGBE_TSYNCRXCTL_TSIP_UT_EN;
+ 		config->rx_filter = HWTSTAMP_FILTER_ALL;
+-		adapter->flags |= IXGBE_FLAG_RX_HWTSTAMP_ENABLED;
+-		adapter->flags &= ~IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER;
++		aflags |= IXGBE_FLAG_RX_HWTSTAMP_ENABLED;
++		aflags &= ~IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER;
+ 		is_l2 = true;
+ 		break;
+ 	default:
+@@ -1113,6 +1112,9 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter,
+ 
+ 	IXGBE_WRITE_FLUSH(hw);
+ 
++	/* configure adapter flags only when HW is actually configured */
++	adapter->flags = aflags;
++
+ 	/* clear TX/RX time stamp registers, just to be sure */
+ 	ixgbe_ptp_clear_tx_timestamp(adapter);
+ 	IXGBE_READ_REG(hw, IXGBE_RXSTMPH);
+diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+index 1fec84b4c068d..0129afa1210e6 100644
+--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
++++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+@@ -5586,6 +5586,11 @@ static int mvpp2_ethtool_get_rxnfc(struct net_device *dev,
+ 		break;
+ 	case ETHTOOL_GRXCLSRLALL:
+ 		for (i = 0; i < MVPP2_N_RFS_ENTRIES_PER_FLOW; i++) {
++			if (loc == info->rule_cnt) {
++				ret = -EMSGSIZE;
++				break;
++			}
++
+ 			if (port->rfs_rules[i])
+ 				rules[loc++] = i;
+ 		}
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+index c2f68678e947e..23c2f2ed2fb83 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+@@ -846,6 +846,21 @@ static int nix_aq_enqueue_wait(struct rvu *rvu, struct rvu_block *block,
+ 	return 0;
+ }
+ 
++static void nix_get_aq_req_smq(struct rvu *rvu, struct nix_aq_enq_req *req,
++			       u16 *smq, u16 *smq_mask)
++{
++	struct nix_cn10k_aq_enq_req *aq_req;
++
++	if (!is_rvu_otx2(rvu)) {
++		aq_req = (struct nix_cn10k_aq_enq_req *)req;
++		*smq = aq_req->sq.smq;
++		*smq_mask = aq_req->sq_mask.smq;
++	} else {
++		*smq = req->sq.smq;
++		*smq_mask = req->sq_mask.smq;
++	}
++}
++
+ static int rvu_nix_blk_aq_enq_inst(struct rvu *rvu, struct nix_hw *nix_hw,
+ 				   struct nix_aq_enq_req *req,
+ 				   struct nix_aq_enq_rsp *rsp)
+@@ -857,6 +872,7 @@ static int rvu_nix_blk_aq_enq_inst(struct rvu *rvu, struct nix_hw *nix_hw,
+ 	struct rvu_block *block;
+ 	struct admin_queue *aq;
+ 	struct rvu_pfvf *pfvf;
++	u16 smq, smq_mask;
+ 	void *ctx, *mask;
+ 	bool ena;
+ 	u64 cfg;
+@@ -928,13 +944,14 @@ static int rvu_nix_blk_aq_enq_inst(struct rvu *rvu, struct nix_hw *nix_hw,
+ 	if (rc)
+ 		return rc;
+ 
++	nix_get_aq_req_smq(rvu, req, &smq, &smq_mask);
+ 	/* Check if SQ pointed SMQ belongs to this PF/VF or not */
+ 	if (req->ctype == NIX_AQ_CTYPE_SQ &&
+ 	    ((req->op == NIX_AQ_INSTOP_INIT && req->sq.ena) ||
+ 	     (req->op == NIX_AQ_INSTOP_WRITE &&
+-	      req->sq_mask.ena && req->sq_mask.smq && req->sq.ena))) {
++	      req->sq_mask.ena && req->sq.ena && smq_mask))) {
+ 		if (!is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_SMQ,
+-				     pcifunc, req->sq.smq))
++				     pcifunc, smq))
+ 			return NIX_AF_ERR_AQ_ENQUEUE;
+ 	}
+ 
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
+index 826f691de2595..a4a258da8dd59 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
+@@ -107,12 +107,13 @@ int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura)
+ }
+ 
+ #define NPA_MAX_BURST 16
+-void cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq)
++int cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq)
+ {
+ 	struct otx2_nic *pfvf = dev;
++	int cnt = cq->pool_ptrs;
+ 	u64 ptrs[NPA_MAX_BURST];
+-	int num_ptrs = 1;
+ 	dma_addr_t bufptr;
++	int num_ptrs = 1;
+ 
+ 	/* Refill pool with new buffers */
+ 	while (cq->pool_ptrs) {
+@@ -131,6 +132,7 @@ void cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq)
+ 			num_ptrs = 1;
+ 		}
+ 	}
++	return cnt - cq->pool_ptrs;
+ }
+ 
+ void cn10k_sqe_flush(void *dev, struct otx2_snd_queue *sq, int size, int qidx)
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h
+index 8ae96815865e6..c1861f7de2545 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h
+@@ -24,7 +24,7 @@ static inline int mtu_to_dwrr_weight(struct otx2_nic *pfvf, int mtu)
+ 	return weight;
+ }
+ 
+-void cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq);
++int cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq);
+ void cn10k_sqe_flush(void *dev, struct otx2_snd_queue *sq, int size, int qidx);
+ int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura);
+ int cn10k_lmtst_init(struct otx2_nic *pfvf);
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
+index b9712040a0bc2..20ecc90d203e0 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
+@@ -573,20 +573,8 @@ int otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool,
+ int otx2_alloc_buffer(struct otx2_nic *pfvf, struct otx2_cq_queue *cq,
+ 		      dma_addr_t *dma)
+ {
+-	if (unlikely(__otx2_alloc_rbuf(pfvf, cq->rbpool, dma))) {
+-		struct refill_work *work;
+-		struct delayed_work *dwork;
+-
+-		work = &pfvf->refill_wrk[cq->cq_idx];
+-		dwork = &work->pool_refill_work;
+-		/* Schedule a task if no other task is running */
+-		if (!cq->refill_task_sched) {
+-			cq->refill_task_sched = true;
+-			schedule_delayed_work(dwork,
+-					      msecs_to_jiffies(100));
+-		}
++	if (unlikely(__otx2_alloc_rbuf(pfvf, cq->rbpool, dma)))
+ 		return -ENOMEM;
+-	}
+ 	return 0;
+ }
+ 
+@@ -1080,39 +1068,20 @@ static int otx2_cq_init(struct otx2_nic *pfvf, u16 qidx)
+ static void otx2_pool_refill_task(struct work_struct *work)
+ {
+ 	struct otx2_cq_queue *cq;
+-	struct otx2_pool *rbpool;
+ 	struct refill_work *wrk;
+-	int qidx, free_ptrs = 0;
+ 	struct otx2_nic *pfvf;
+-	dma_addr_t bufptr;
++	int qidx;
+ 
+ 	wrk = container_of(work, struct refill_work, pool_refill_work.work);
+ 	pfvf = wrk->pf;
+ 	qidx = wrk - pfvf->refill_wrk;
+ 	cq = &pfvf->qset.cq[qidx];
+-	rbpool = cq->rbpool;
+-	free_ptrs = cq->pool_ptrs;
+ 
+-	while (cq->pool_ptrs) {
+-		if (otx2_alloc_rbuf(pfvf, rbpool, &bufptr)) {
+-			/* Schedule a WQ if we fails to free atleast half of the
+-			 * pointers else enable napi for this RQ.
+-			 */
+-			if (!((free_ptrs - cq->pool_ptrs) > free_ptrs / 2)) {
+-				struct delayed_work *dwork;
+-
+-				dwork = &wrk->pool_refill_work;
+-				schedule_delayed_work(dwork,
+-						      msecs_to_jiffies(100));
+-			} else {
+-				cq->refill_task_sched = false;
+-			}
+-			return;
+-		}
+-		pfvf->hw_ops->aura_freeptr(pfvf, qidx, bufptr + OTX2_HEAD_ROOM);
+-		cq->pool_ptrs--;
+-	}
+ 	cq->refill_task_sched = false;
++
++	local_bh_disable();
++	napi_schedule(wrk->napi);
++	local_bh_enable();
+ }
+ 
+ int otx2_config_nix_queues(struct otx2_nic *pfvf)
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
+index ba8091131ec08..0e81849db3538 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
+@@ -301,6 +301,7 @@ struct flr_work {
+ struct refill_work {
+ 	struct delayed_work pool_refill_work;
+ 	struct otx2_nic *pf;
++	struct napi_struct *napi;
+ };
+ 
+ /* PTPv2 originTimestamp structure */
+@@ -373,7 +374,7 @@ struct dev_hw_ops {
+ 	int	(*sq_aq_init)(void *dev, u16 qidx, u16 sqb_aura);
+ 	void	(*sqe_flush)(void *dev, struct otx2_snd_queue *sq,
+ 			     int size, int qidx);
+-	void	(*refill_pool_ptrs)(void *dev, struct otx2_cq_queue *cq);
++	int	(*refill_pool_ptrs)(void *dev, struct otx2_cq_queue *cq);
+ 	void	(*aura_freeptr)(void *dev, int aura, u64 buf);
+ };
+ 
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+index 9551b422622a4..9ded98bb1c890 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+@@ -1942,6 +1942,10 @@ int otx2_stop(struct net_device *netdev)
+ 
+ 	netif_tx_disable(netdev);
+ 
++	for (wrk = 0; wrk < pf->qset.cq_cnt; wrk++)
++		cancel_delayed_work_sync(&pf->refill_wrk[wrk].pool_refill_work);
++	devm_kfree(pf->dev, pf->refill_wrk);
++
+ 	otx2_free_hw_resources(pf);
+ 	otx2_free_cints(pf, pf->hw.cint_cnt);
+ 	otx2_disable_napi(pf);
+@@ -1949,9 +1953,6 @@ int otx2_stop(struct net_device *netdev)
+ 	for (qidx = 0; qidx < netdev->num_tx_queues; qidx++)
+ 		netdev_tx_reset_queue(netdev_get_tx_queue(netdev, qidx));
+ 
+-	for (wrk = 0; wrk < pf->qset.cq_cnt; wrk++)
+-		cancel_delayed_work_sync(&pf->refill_wrk[wrk].pool_refill_work);
+-	devm_kfree(pf->dev, pf->refill_wrk);
+ 
+ 	kfree(qset->sq);
+ 	kfree(qset->cq);
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
+index e369baf115301..e77d438489557 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
+@@ -424,9 +424,10 @@ process_cqe:
+ 	return processed_cqe;
+ }
+ 
+-void otx2_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq)
++int otx2_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq)
+ {
+ 	struct otx2_nic *pfvf = dev;
++	int cnt = cq->pool_ptrs;
+ 	dma_addr_t bufptr;
+ 
+ 	while (cq->pool_ptrs) {
+@@ -435,6 +436,8 @@ void otx2_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq)
+ 		otx2_aura_freeptr(pfvf, cq->cq_idx, bufptr + OTX2_HEAD_ROOM);
+ 		cq->pool_ptrs--;
+ 	}
++
++	return cnt - cq->pool_ptrs;
+ }
+ 
+ static int otx2_tx_napi_handler(struct otx2_nic *pfvf,
+@@ -521,6 +524,7 @@ int otx2_napi_handler(struct napi_struct *napi, int budget)
+ 	struct otx2_cq_queue *cq;
+ 	struct otx2_qset *qset;
+ 	struct otx2_nic *pfvf;
++	int filled_cnt = -1;
+ 
+ 	cq_poll = container_of(napi, struct otx2_cq_poll, napi);
+ 	pfvf = (struct otx2_nic *)cq_poll->dev;
+@@ -541,7 +545,7 @@ int otx2_napi_handler(struct napi_struct *napi, int budget)
+ 	}
+ 
+ 	if (rx_cq && rx_cq->pool_ptrs)
+-		pfvf->hw_ops->refill_pool_ptrs(pfvf, rx_cq);
++		filled_cnt = pfvf->hw_ops->refill_pool_ptrs(pfvf, rx_cq);
+ 	/* Clear the IRQ */
+ 	otx2_write64(pfvf, NIX_LF_CINTX_INT(cq_poll->cint_idx), BIT_ULL(0));
+ 
+@@ -561,9 +565,25 @@ int otx2_napi_handler(struct napi_struct *napi, int budget)
+ 				otx2_config_irq_coalescing(pfvf, i);
+ 		}
+ 
+-		/* Re-enable interrupts */
+-		otx2_write64(pfvf, NIX_LF_CINTX_ENA_W1S(cq_poll->cint_idx),
+-			     BIT_ULL(0));
++		if (unlikely(!filled_cnt)) {
++			struct refill_work *work;
++			struct delayed_work *dwork;
++
++			work = &pfvf->refill_wrk[cq->cq_idx];
++			dwork = &work->pool_refill_work;
++			/* Schedule a task if no other task is running */
++			if (!cq->refill_task_sched) {
++				work->napi = napi;
++				cq->refill_task_sched = true;
++				schedule_delayed_work(dwork,
++						      msecs_to_jiffies(100));
++			}
++		} else {
++			/* Re-enable interrupts */
++			otx2_write64(pfvf,
++				     NIX_LF_CINTX_ENA_W1S(cq_poll->cint_idx),
++				     BIT_ULL(0));
++		}
+ 	}
+ 	return workdone;
+ }
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
+index 9e3bfbe5c4809..a82ffca8ce1b1 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
+@@ -170,6 +170,6 @@ void cn10k_sqe_flush(void *dev, struct otx2_snd_queue *sq,
+ 		     int size, int qidx);
+ void otx2_sqe_flush(void *dev, struct otx2_snd_queue *sq,
+ 		    int size, int qidx);
+-void otx2_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq);
+-void cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq);
++int otx2_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq);
++int cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq);
+ #endif /* OTX2_TXRX_H */
+diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+index 2d15342c260ae..7f0807672071f 100644
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+@@ -2860,6 +2860,9 @@ static int mtk_hwlro_get_fdir_all(struct net_device *dev,
+ 	int i;
+ 
+ 	for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
++		if (cnt == cmd->rule_cnt)
++			return -EMSGSIZE;
++
+ 		if (mac->hwlro_ip[i]) {
+ 			rule_locs[cnt] = i;
+ 			cnt++;
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ct.c
+index 92d3952dfa8b7..feeb41693c176 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ct.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ct.c
+@@ -17,8 +17,10 @@ tc_act_parse_ct(struct mlx5e_tc_act_parse_state *parse_state,
+ 	if (err)
+ 		return err;
+ 
+-	if (mlx5e_is_eswitch_flow(parse_state->flow))
++	if (mlx5e_is_eswitch_flow(parse_state->flow)) {
+ 		attr->esw_attr->split_count = attr->esw_attr->out_count;
++		parse_state->if_count = 0;
++	}
+ 
+ 	attr->flags |= MLX5_ATTR_FLAG_CT;
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred.c
+index 291193f7120d5..f63402c480280 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred.c
+@@ -294,6 +294,7 @@ parse_mirred_ovs_master(struct mlx5e_tc_act_parse_state *parse_state,
+ 	if (err)
+ 		return err;
+ 
++	parse_state->if_count = 0;
+ 	esw_attr->out_count++;
+ 	return 0;
+ }
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/pedit.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/pedit.c
+index 3b272bbf4c538..368a95fa77d32 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/pedit.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/pedit.c
+@@ -98,8 +98,10 @@ tc_act_parse_pedit(struct mlx5e_tc_act_parse_state *parse_state,
+ 
+ 	attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
+ 
+-	if (ns_type == MLX5_FLOW_NAMESPACE_FDB)
++	if (ns_type == MLX5_FLOW_NAMESPACE_FDB) {
+ 		esw_attr->split_count = esw_attr->out_count;
++		parse_state->if_count = 0;
++	}
+ 
+ 	return 0;
+ }
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/redirect_ingress.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/redirect_ingress.c
+index ad09a8a5f36e0..2d1d4a04501b4 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/redirect_ingress.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/redirect_ingress.c
+@@ -66,6 +66,7 @@ tc_act_parse_redirect_ingress(struct mlx5e_tc_act_parse_state *parse_state,
+ 	if (err)
+ 		return err;
+ 
++	parse_state->if_count = 0;
+ 	esw_attr->out_count++;
+ 
+ 	return 0;
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan.c
+index c8a3eaf189f6a..a13c5e707b83c 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan.c
+@@ -166,6 +166,7 @@ tc_act_parse_vlan(struct mlx5e_tc_act_parse_state *parse_state,
+ 		return err;
+ 
+ 	esw_attr->split_count = esw_attr->out_count;
++	parse_state->if_count = 0;
+ 
+ 	return 0;
+ }
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan_mangle.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan_mangle.c
+index 310b992307607..f17575b09788d 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan_mangle.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan_mangle.c
+@@ -65,8 +65,10 @@ tc_act_parse_vlan_mangle(struct mlx5e_tc_act_parse_state *parse_state,
+ 	if (err)
+ 		return err;
+ 
+-	if (ns_type == MLX5_FLOW_NAMESPACE_FDB)
++	if (ns_type == MLX5_FLOW_NAMESPACE_FDB) {
+ 		attr->esw_attr->split_count = attr->esw_attr->out_count;
++		parse_state->if_count = 0;
++	}
+ 
+ 	return 0;
+ }
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+index 31708d5aa6087..4b22a91482cec 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+@@ -3939,6 +3939,7 @@ parse_tc_actions(struct mlx5e_tc_act_parse_state *parse_state,
+ 			}
+ 
+ 			i_split = i + 1;
++			parse_state->if_count = 0;
+ 			list_add(&attr->list, &flow->attrs);
+ 		}
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c
+index fdf2be548e855..2170539461fa2 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c
+@@ -22,20 +22,17 @@ static bool mlx5_esw_devlink_port_supported(struct mlx5_eswitch *esw, u16 vport_
+ 	       mlx5_core_is_ec_vf_vport(esw->dev, vport_num);
+ }
+ 
+-static struct devlink_port *mlx5_esw_dl_port_alloc(struct mlx5_eswitch *esw, u16 vport_num)
++static void mlx5_esw_offloads_pf_vf_devlink_port_attrs_set(struct mlx5_eswitch *esw,
++							   u16 vport_num,
++							   struct devlink_port *dl_port)
+ {
+ 	struct mlx5_core_dev *dev = esw->dev;
+ 	struct devlink_port_attrs attrs = {};
+ 	struct netdev_phys_item_id ppid = {};
+-	struct devlink_port *dl_port;
+ 	u32 controller_num = 0;
+ 	bool external;
+ 	u16 pfnum;
+ 
+-	dl_port = kzalloc(sizeof(*dl_port), GFP_KERNEL);
+-	if (!dl_port)
+-		return NULL;
+-
+ 	mlx5_esw_get_port_parent_id(dev, &ppid);
+ 	pfnum = mlx5_get_dev_index(dev);
+ 	external = mlx5_core_is_ecpf_esw_manager(dev);
+@@ -63,12 +60,40 @@ static struct devlink_port *mlx5_esw_dl_port_alloc(struct mlx5_eswitch *esw, u16
+ 		devlink_port_attrs_pci_vf_set(dl_port, 0, pfnum,
+ 					      vport_num - 1, false);
+ 	}
+-	return dl_port;
+ }
+ 
+-static void mlx5_esw_dl_port_free(struct devlink_port *dl_port)
++int mlx5_esw_offloads_pf_vf_devlink_port_init(struct mlx5_eswitch *esw, u16 vport_num)
++{
++	struct devlink_port *dl_port;
++	struct mlx5_vport *vport;
++
++	if (!mlx5_esw_devlink_port_supported(esw, vport_num))
++		return 0;
++
++	vport = mlx5_eswitch_get_vport(esw, vport_num);
++	if (IS_ERR(vport))
++		return PTR_ERR(vport);
++
++	dl_port = kzalloc(sizeof(*dl_port), GFP_KERNEL);
++	if (!dl_port)
++		return -ENOMEM;
++
++	mlx5_esw_offloads_pf_vf_devlink_port_attrs_set(esw, vport_num, dl_port);
++
++	vport->dl_port = dl_port;
++	return 0;
++}
++
++void mlx5_esw_offloads_pf_vf_devlink_port_cleanup(struct mlx5_eswitch *esw, u16 vport_num)
+ {
+-	kfree(dl_port);
++	struct mlx5_vport *vport;
++
++	vport = mlx5_eswitch_get_vport(esw, vport_num);
++	if (IS_ERR(vport) || !vport->dl_port)
++		return;
++
++	kfree(vport->dl_port);
++	vport->dl_port = NULL;
+ }
+ 
+ static const struct devlink_port_ops mlx5_esw_dl_port_ops = {
+@@ -89,35 +114,29 @@ int mlx5_esw_offloads_devlink_port_register(struct mlx5_eswitch *esw, u16 vport_
+ 	struct devlink *devlink;
+ 	int err;
+ 
+-	if (!mlx5_esw_devlink_port_supported(esw, vport_num))
+-		return 0;
+-
+ 	vport = mlx5_eswitch_get_vport(esw, vport_num);
+ 	if (IS_ERR(vport))
+ 		return PTR_ERR(vport);
+ 
+-	dl_port = mlx5_esw_dl_port_alloc(esw, vport_num);
++	dl_port = vport->dl_port;
+ 	if (!dl_port)
+-		return -ENOMEM;
++		return 0;
+ 
+ 	devlink = priv_to_devlink(dev);
+ 	dl_port_index = mlx5_esw_vport_to_devlink_port_index(dev, vport_num);
+ 	err = devl_port_register_with_ops(devlink, dl_port, dl_port_index,
+ 					  &mlx5_esw_dl_port_ops);
+ 	if (err)
+-		goto reg_err;
++		return err;
+ 
+ 	err = devl_rate_leaf_create(dl_port, vport, NULL);
+ 	if (err)
+ 		goto rate_err;
+ 
+-	vport->dl_port = dl_port;
+ 	return 0;
+ 
+ rate_err:
+ 	devl_port_unregister(dl_port);
+-reg_err:
+-	mlx5_esw_dl_port_free(dl_port);
+ 	return err;
+ }
+ 
+@@ -125,11 +144,8 @@ void mlx5_esw_offloads_devlink_port_unregister(struct mlx5_eswitch *esw, u16 vpo
+ {
+ 	struct mlx5_vport *vport;
+ 
+-	if (!mlx5_esw_devlink_port_supported(esw, vport_num))
+-		return;
+-
+ 	vport = mlx5_eswitch_get_vport(esw, vport_num);
+-	if (IS_ERR(vport))
++	if (IS_ERR(vport) || !vport->dl_port)
+ 		return;
+ 
+ 	if (vport->dl_port->devlink_rate) {
+@@ -138,8 +154,6 @@ void mlx5_esw_offloads_devlink_port_unregister(struct mlx5_eswitch *esw, u16 vpo
+ 	}
+ 
+ 	devl_port_unregister(vport->dl_port);
+-	mlx5_esw_dl_port_free(vport->dl_port);
+-	vport->dl_port = NULL;
+ }
+ 
+ struct devlink_port *mlx5_esw_offloads_devlink_port(struct mlx5_eswitch *esw, u16 vport_num)
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+index 243c455f10297..6e9b1b183190d 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+@@ -1078,7 +1078,7 @@ int mlx5_eswitch_load_vport(struct mlx5_eswitch *esw, u16 vport_num,
+ 	if (err)
+ 		return err;
+ 
+-	err = esw_offloads_load_rep(esw, vport_num);
++	err = mlx5_esw_offloads_load_rep(esw, vport_num);
+ 	if (err)
+ 		goto err_rep;
+ 
+@@ -1091,10 +1091,35 @@ err_rep:
+ 
+ void mlx5_eswitch_unload_vport(struct mlx5_eswitch *esw, u16 vport_num)
+ {
+-	esw_offloads_unload_rep(esw, vport_num);
++	mlx5_esw_offloads_unload_rep(esw, vport_num);
+ 	mlx5_esw_vport_disable(esw, vport_num);
+ }
+ 
++static int mlx5_eswitch_load_pf_vf_vport(struct mlx5_eswitch *esw, u16 vport_num,
++					 enum mlx5_eswitch_vport_event enabled_events)
++{
++	int err;
++
++	err = mlx5_esw_offloads_init_pf_vf_rep(esw, vport_num);
++	if (err)
++		return err;
++
++	err = mlx5_eswitch_load_vport(esw, vport_num, enabled_events);
++	if (err)
++		goto err_load;
++	return 0;
++
++err_load:
++	mlx5_esw_offloads_cleanup_pf_vf_rep(esw, vport_num);
++	return err;
++}
++
++static void mlx5_eswitch_unload_pf_vf_vport(struct mlx5_eswitch *esw, u16 vport_num)
++{
++	mlx5_eswitch_unload_vport(esw, vport_num);
++	mlx5_esw_offloads_cleanup_pf_vf_rep(esw, vport_num);
++}
++
+ void mlx5_eswitch_unload_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs)
+ {
+ 	struct mlx5_vport *vport;
+@@ -1103,7 +1128,7 @@ void mlx5_eswitch_unload_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs)
+ 	mlx5_esw_for_each_vf_vport(esw, i, vport, num_vfs) {
+ 		if (!vport->enabled)
+ 			continue;
+-		mlx5_eswitch_unload_vport(esw, vport->vport);
++		mlx5_eswitch_unload_pf_vf_vport(esw, vport->vport);
+ 	}
+ }
+ 
+@@ -1116,7 +1141,7 @@ static void mlx5_eswitch_unload_ec_vf_vports(struct mlx5_eswitch *esw,
+ 	mlx5_esw_for_each_ec_vf_vport(esw, i, vport, num_ec_vfs) {
+ 		if (!vport->enabled)
+ 			continue;
+-		mlx5_eswitch_unload_vport(esw, vport->vport);
++		mlx5_eswitch_unload_pf_vf_vport(esw, vport->vport);
+ 	}
+ }
+ 
+@@ -1128,7 +1153,7 @@ int mlx5_eswitch_load_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs,
+ 	int err;
+ 
+ 	mlx5_esw_for_each_vf_vport(esw, i, vport, num_vfs) {
+-		err = mlx5_eswitch_load_vport(esw, vport->vport, enabled_events);
++		err = mlx5_eswitch_load_pf_vf_vport(esw, vport->vport, enabled_events);
+ 		if (err)
+ 			goto vf_err;
+ 	}
+@@ -1148,7 +1173,7 @@ static int mlx5_eswitch_load_ec_vf_vports(struct mlx5_eswitch *esw, u16 num_ec_v
+ 	int err;
+ 
+ 	mlx5_esw_for_each_ec_vf_vport(esw, i, vport, num_ec_vfs) {
+-		err = mlx5_eswitch_load_vport(esw, vport->vport, enabled_events);
++		err = mlx5_eswitch_load_pf_vf_vport(esw, vport->vport, enabled_events);
+ 		if (err)
+ 			goto vf_err;
+ 	}
+@@ -1187,12 +1212,19 @@ int
+ mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch *esw,
+ 				 enum mlx5_eswitch_vport_event enabled_events)
+ {
++	bool pf_needed;
+ 	int ret;
+ 
++	pf_needed = mlx5_core_is_ecpf_esw_manager(esw->dev) ||
++		    esw->mode == MLX5_ESWITCH_LEGACY;
++
+ 	/* Enable PF vport */
+-	ret = mlx5_eswitch_load_vport(esw, MLX5_VPORT_PF, enabled_events);
+-	if (ret)
+-		return ret;
++	if (pf_needed) {
++		ret = mlx5_eswitch_load_pf_vf_vport(esw, MLX5_VPORT_PF,
++						    enabled_events);
++		if (ret)
++			return ret;
++	}
+ 
+ 	/* Enable external host PF HCA */
+ 	ret = host_pf_enable_hca(esw->dev);
+@@ -1201,7 +1233,7 @@ mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch *esw,
+ 
+ 	/* Enable ECPF vport */
+ 	if (mlx5_ecpf_vport_exists(esw->dev)) {
+-		ret = mlx5_eswitch_load_vport(esw, MLX5_VPORT_ECPF, enabled_events);
++		ret = mlx5_eswitch_load_pf_vf_vport(esw, MLX5_VPORT_ECPF, enabled_events);
+ 		if (ret)
+ 			goto ecpf_err;
+ 		if (mlx5_core_ec_sriov_enabled(esw->dev)) {
+@@ -1224,11 +1256,12 @@ vf_err:
+ 		mlx5_eswitch_unload_ec_vf_vports(esw, esw->esw_funcs.num_ec_vfs);
+ ec_vf_err:
+ 	if (mlx5_ecpf_vport_exists(esw->dev))
+-		mlx5_eswitch_unload_vport(esw, MLX5_VPORT_ECPF);
++		mlx5_eswitch_unload_pf_vf_vport(esw, MLX5_VPORT_ECPF);
+ ecpf_err:
+ 	host_pf_disable_hca(esw->dev);
+ pf_hca_err:
+-	mlx5_eswitch_unload_vport(esw, MLX5_VPORT_PF);
++	if (pf_needed)
++		mlx5_eswitch_unload_pf_vf_vport(esw, MLX5_VPORT_PF);
+ 	return ret;
+ }
+ 
+@@ -1242,11 +1275,14 @@ void mlx5_eswitch_disable_pf_vf_vports(struct mlx5_eswitch *esw)
+ 	if (mlx5_ecpf_vport_exists(esw->dev)) {
+ 		if (mlx5_core_ec_sriov_enabled(esw->dev))
+ 			mlx5_eswitch_unload_ec_vf_vports(esw, esw->esw_funcs.num_vfs);
+-		mlx5_eswitch_unload_vport(esw, MLX5_VPORT_ECPF);
++		mlx5_eswitch_unload_pf_vf_vport(esw, MLX5_VPORT_ECPF);
+ 	}
+ 
+ 	host_pf_disable_hca(esw->dev);
+-	mlx5_eswitch_unload_vport(esw, MLX5_VPORT_PF);
++
++	if (mlx5_core_is_ecpf_esw_manager(esw->dev) ||
++	    esw->mode == MLX5_ESWITCH_LEGACY)
++		mlx5_eswitch_unload_pf_vf_vport(esw, MLX5_VPORT_PF);
+ }
+ 
+ static void mlx5_eswitch_get_devlink_param(struct mlx5_eswitch *esw)
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+index ae0dc8a3060d7..56d9a261a5c80 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+@@ -725,8 +725,10 @@ void mlx5_esw_set_spec_source_port(struct mlx5_eswitch *esw,
+ 				   u16 vport,
+ 				   struct mlx5_flow_spec *spec);
+ 
+-int esw_offloads_load_rep(struct mlx5_eswitch *esw, u16 vport_num);
+-void esw_offloads_unload_rep(struct mlx5_eswitch *esw, u16 vport_num);
++int mlx5_esw_offloads_init_pf_vf_rep(struct mlx5_eswitch *esw, u16 vport_num);
++void mlx5_esw_offloads_cleanup_pf_vf_rep(struct mlx5_eswitch *esw, u16 vport_num);
++int mlx5_esw_offloads_load_rep(struct mlx5_eswitch *esw, u16 vport_num);
++void mlx5_esw_offloads_unload_rep(struct mlx5_eswitch *esw, u16 vport_num);
+ 
+ int mlx5_esw_offloads_rep_load(struct mlx5_eswitch *esw, u16 vport_num);
+ void mlx5_esw_offloads_rep_unload(struct mlx5_eswitch *esw, u16 vport_num);
+@@ -739,6 +741,8 @@ int mlx5_eswitch_load_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs,
+ 				enum mlx5_eswitch_vport_event enabled_events);
+ void mlx5_eswitch_unload_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs);
+ 
++int mlx5_esw_offloads_pf_vf_devlink_port_init(struct mlx5_eswitch *esw, u16 vport_num);
++void mlx5_esw_offloads_pf_vf_devlink_port_cleanup(struct mlx5_eswitch *esw, u16 vport_num);
+ int mlx5_esw_offloads_devlink_port_register(struct mlx5_eswitch *esw, u16 vport_num);
+ void mlx5_esw_offloads_devlink_port_unregister(struct mlx5_eswitch *esw, u16 vport_num);
+ struct devlink_port *mlx5_esw_offloads_devlink_port(struct mlx5_eswitch *esw, u16 vport_num);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+index e59380ee1ead3..1ad5a72dcc3fd 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+@@ -2424,7 +2424,23 @@ void mlx5_esw_offloads_rep_unload(struct mlx5_eswitch *esw, u16 vport_num)
+ 		__esw_offloads_unload_rep(esw, rep, rep_type);
+ }
+ 
+-int esw_offloads_load_rep(struct mlx5_eswitch *esw, u16 vport_num)
++int mlx5_esw_offloads_init_pf_vf_rep(struct mlx5_eswitch *esw, u16 vport_num)
++{
++	if (esw->mode != MLX5_ESWITCH_OFFLOADS)
++		return 0;
++
++	return mlx5_esw_offloads_pf_vf_devlink_port_init(esw, vport_num);
++}
++
++void mlx5_esw_offloads_cleanup_pf_vf_rep(struct mlx5_eswitch *esw, u16 vport_num)
++{
++	if (esw->mode != MLX5_ESWITCH_OFFLOADS)
++		return;
++
++	mlx5_esw_offloads_pf_vf_devlink_port_cleanup(esw, vport_num);
++}
++
++int mlx5_esw_offloads_load_rep(struct mlx5_eswitch *esw, u16 vport_num)
+ {
+ 	int err;
+ 
+@@ -2448,7 +2464,7 @@ load_err:
+ 	return err;
+ }
+ 
+-void esw_offloads_unload_rep(struct mlx5_eswitch *esw, u16 vport_num)
++void mlx5_esw_offloads_unload_rep(struct mlx5_eswitch *esw, u16 vport_num)
+ {
+ 	if (esw->mode != MLX5_ESWITCH_OFFLOADS)
+ 		return;
+@@ -3076,26 +3092,47 @@ esw_vport_destroy_offloads_acl_tables(struct mlx5_eswitch *esw,
+ 	esw_acl_ingress_ofld_cleanup(esw, vport);
+ }
+ 
+-static int esw_create_uplink_offloads_acl_tables(struct mlx5_eswitch *esw)
++static int esw_create_offloads_acl_tables(struct mlx5_eswitch *esw)
+ {
+-	struct mlx5_vport *vport;
++	struct mlx5_vport *uplink, *manager;
++	int ret;
+ 
+-	vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK);
+-	if (IS_ERR(vport))
+-		return PTR_ERR(vport);
++	uplink = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK);
++	if (IS_ERR(uplink))
++		return PTR_ERR(uplink);
++
++	ret = esw_vport_create_offloads_acl_tables(esw, uplink);
++	if (ret)
++		return ret;
++
++	manager = mlx5_eswitch_get_vport(esw, esw->manager_vport);
++	if (IS_ERR(manager)) {
++		ret = PTR_ERR(manager);
++		goto err_manager;
++	}
++
++	ret = esw_vport_create_offloads_acl_tables(esw, manager);
++	if (ret)
++		goto err_manager;
+ 
+-	return esw_vport_create_offloads_acl_tables(esw, vport);
++	return 0;
++
++err_manager:
++	esw_vport_destroy_offloads_acl_tables(esw, uplink);
++	return ret;
+ }
+ 
+-static void esw_destroy_uplink_offloads_acl_tables(struct mlx5_eswitch *esw)
++static void esw_destroy_offloads_acl_tables(struct mlx5_eswitch *esw)
+ {
+ 	struct mlx5_vport *vport;
+ 
+-	vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK);
+-	if (IS_ERR(vport))
+-		return;
++	vport = mlx5_eswitch_get_vport(esw, esw->manager_vport);
++	if (!IS_ERR(vport))
++		esw_vport_destroy_offloads_acl_tables(esw, vport);
+ 
+-	esw_vport_destroy_offloads_acl_tables(esw, vport);
++	vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK);
++	if (!IS_ERR(vport))
++		esw_vport_destroy_offloads_acl_tables(esw, vport);
+ }
+ 
+ int mlx5_eswitch_reload_reps(struct mlx5_eswitch *esw)
+@@ -3140,7 +3177,7 @@ static int esw_offloads_steering_init(struct mlx5_eswitch *esw)
+ 	}
+ 	esw->fdb_table.offloads.indir = indir;
+ 
+-	err = esw_create_uplink_offloads_acl_tables(esw);
++	err = esw_create_offloads_acl_tables(esw);
+ 	if (err)
+ 		goto create_acl_err;
+ 
+@@ -3181,7 +3218,7 @@ create_fdb_err:
+ create_restore_err:
+ 	esw_destroy_offloads_table(esw);
+ create_offloads_err:
+-	esw_destroy_uplink_offloads_acl_tables(esw);
++	esw_destroy_offloads_acl_tables(esw);
+ create_acl_err:
+ 	mlx5_esw_indir_table_destroy(esw->fdb_table.offloads.indir);
+ create_indir_err:
+@@ -3197,7 +3234,7 @@ static void esw_offloads_steering_cleanup(struct mlx5_eswitch *esw)
+ 	esw_destroy_offloads_fdb_tables(esw);
+ 	esw_destroy_restore_table(esw);
+ 	esw_destroy_offloads_table(esw);
+-	esw_destroy_uplink_offloads_acl_tables(esw);
++	esw_destroy_offloads_acl_tables(esw);
+ 	mlx5_esw_indir_table_destroy(esw->fdb_table.offloads.indir);
+ 	mutex_destroy(&esw->fdb_table.offloads.vports.lock);
+ }
+@@ -3355,7 +3392,7 @@ int esw_offloads_enable(struct mlx5_eswitch *esw)
+ 			vport->info.link_state = MLX5_VPORT_ADMIN_STATE_DOWN;
+ 
+ 	/* Uplink vport rep must load first. */
+-	err = esw_offloads_load_rep(esw, MLX5_VPORT_UPLINK);
++	err = mlx5_esw_offloads_load_rep(esw, MLX5_VPORT_UPLINK);
+ 	if (err)
+ 		goto err_uplink;
+ 
+@@ -3366,7 +3403,7 @@ int esw_offloads_enable(struct mlx5_eswitch *esw)
+ 	return 0;
+ 
+ err_vports:
+-	esw_offloads_unload_rep(esw, MLX5_VPORT_UPLINK);
++	mlx5_esw_offloads_unload_rep(esw, MLX5_VPORT_UPLINK);
+ err_uplink:
+ 	esw_offloads_steering_cleanup(esw);
+ err_steering_init:
+@@ -3404,7 +3441,7 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw,
+ void esw_offloads_disable(struct mlx5_eswitch *esw)
+ {
+ 	mlx5_eswitch_disable_pf_vf_vports(esw);
+-	esw_offloads_unload_rep(esw, MLX5_VPORT_UPLINK);
++	mlx5_esw_offloads_unload_rep(esw, MLX5_VPORT_UPLINK);
+ 	esw_set_passing_vport_metadata(esw, false);
+ 	esw_offloads_steering_cleanup(esw);
+ 	mapping_destroy(esw->offloads.reg_c0_obj_pool);
+diff --git a/drivers/net/ethernet/microchip/vcap/vcap_api.c b/drivers/net/ethernet/microchip/vcap/vcap_api.c
+index a418ad8e8770a..15a3a31b15d45 100644
+--- a/drivers/net/ethernet/microchip/vcap/vcap_api.c
++++ b/drivers/net/ethernet/microchip/vcap/vcap_api.c
+@@ -1021,18 +1021,32 @@ static struct vcap_rule_internal *vcap_dup_rule(struct vcap_rule_internal *ri,
+ 	list_for_each_entry(ckf, &ri->data.keyfields, ctrl.list) {
+ 		newckf = kmemdup(ckf, sizeof(*newckf), GFP_KERNEL);
+ 		if (!newckf)
+-			return ERR_PTR(-ENOMEM);
++			goto err;
+ 		list_add_tail(&newckf->ctrl.list, &duprule->data.keyfields);
+ 	}
+ 
+ 	list_for_each_entry(caf, &ri->data.actionfields, ctrl.list) {
+ 		newcaf = kmemdup(caf, sizeof(*newcaf), GFP_KERNEL);
+ 		if (!newcaf)
+-			return ERR_PTR(-ENOMEM);
++			goto err;
+ 		list_add_tail(&newcaf->ctrl.list, &duprule->data.actionfields);
+ 	}
+ 
+ 	return duprule;
++
++err:
++	list_for_each_entry_safe(ckf, newckf, &duprule->data.keyfields, ctrl.list) {
++		list_del(&ckf->ctrl.list);
++		kfree(ckf);
++	}
++
++	list_for_each_entry_safe(caf, newcaf, &duprule->data.actionfields, ctrl.list) {
++		list_del(&caf->ctrl.list);
++		kfree(caf);
++	}
++
++	kfree(duprule);
++	return ERR_PTR(-ENOMEM);
+ }
+ 
+ static void vcap_apply_width(u8 *dst, int width, int bytes)
+diff --git a/drivers/net/ethernet/renesas/rswitch.c b/drivers/net/ethernet/renesas/rswitch.c
+index 4e412ac0965a4..449ed1f5624c9 100644
+--- a/drivers/net/ethernet/renesas/rswitch.c
++++ b/drivers/net/ethernet/renesas/rswitch.c
+@@ -816,10 +816,10 @@ retry:
+ 
+ 	netif_wake_subqueue(ndev, 0);
+ 
+-	napi_complete(napi);
+-
+-	rswitch_enadis_data_irq(priv, rdev->tx_queue->index, true);
+-	rswitch_enadis_data_irq(priv, rdev->rx_queue->index, true);
++	if (napi_complete_done(napi, budget - quota)) {
++		rswitch_enadis_data_irq(priv, rdev->tx_queue->index, true);
++		rswitch_enadis_data_irq(priv, rdev->rx_queue->index, true);
++	}
+ 
+ out:
+ 	return budget - quota;
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+index 4727f7be4f86e..6931973028aef 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+@@ -2703,9 +2703,7 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
+ 
+ 	/* We still have pending packets, let's call for a new scheduling */
+ 	if (tx_q->dirty_tx != tx_q->cur_tx)
+-		hrtimer_start(&tx_q->txtimer,
+-			      STMMAC_COAL_TIMER(priv->tx_coal_timer[queue]),
+-			      HRTIMER_MODE_REL);
++		stmmac_tx_timer_arm(priv, queue);
+ 
+ 	__netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));
+ 
+@@ -2986,9 +2984,13 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv)
+ static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
+ {
+ 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
++	u32 tx_coal_timer = priv->tx_coal_timer[queue];
++
++	if (!tx_coal_timer)
++		return;
+ 
+ 	hrtimer_start(&tx_q->txtimer,
+-		      STMMAC_COAL_TIMER(priv->tx_coal_timer[queue]),
++		      STMMAC_COAL_TIMER(tx_coal_timer),
+ 		      HRTIMER_MODE_REL);
+ }
+ 
+diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
+index b6d7981b2d1ee..927d3d54658ef 100644
+--- a/drivers/net/phy/micrel.c
++++ b/drivers/net/phy/micrel.c
+@@ -1800,9 +1800,6 @@ static const struct ksz9477_errata_write ksz9477_errata_writes[] = {
+ 	/* Transmit waveform amplitude can be improved (1000BASE-T, 100BASE-TX, 10BASE-Te) */
+ 	{0x1c, 0x04, 0x00d0},
+ 
+-	/* Energy Efficient Ethernet (EEE) feature select must be manually disabled */
+-	{0x07, 0x3c, 0x0000},
+-
+ 	/* Register settings are required to meet data sheet supply current specifications */
+ 	{0x1c, 0x13, 0x6eff},
+ 	{0x1c, 0x14, 0xe6ff},
+@@ -1847,6 +1844,12 @@ static int ksz9477_config_init(struct phy_device *phydev)
+ 			return err;
+ 	}
+ 
++	/* According to KSZ9477 Errata DS80000754C (Module 4) all EEE modes
++	 * in this switch shall be regarded as broken.
++	 */
++	if (phydev->dev_flags & MICREL_NO_EEE)
++		phydev->eee_broken_modes = -1;
++
+ 	err = genphy_restart_aneg(phydev);
+ 	if (err)
+ 		return err;
+diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
+index 0738baa5b82e4..e88bedca8f32f 100644
+--- a/drivers/net/usb/r8152.c
++++ b/drivers/net/usb/r8152.c
+@@ -2629,6 +2629,9 @@ static int r8152_poll(struct napi_struct *napi, int budget)
+ 	struct r8152 *tp = container_of(napi, struct r8152, napi);
+ 	int work_done;
+ 
++	if (!budget)
++		return 0;
++
+ 	work_done = rx_bottom(tp, budget);
+ 
+ 	if (work_done < budget) {
+diff --git a/drivers/net/veth.c b/drivers/net/veth.c
+index ef8eacb596f73..fc0d0114d8c27 100644
+--- a/drivers/net/veth.c
++++ b/drivers/net/veth.c
+@@ -344,6 +344,7 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
+ {
+ 	struct veth_priv *rcv_priv, *priv = netdev_priv(dev);
+ 	struct veth_rq *rq = NULL;
++	int ret = NETDEV_TX_OK;
+ 	struct net_device *rcv;
+ 	int length = skb->len;
+ 	bool use_napi = false;
+@@ -376,6 +377,7 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
+ 	} else {
+ drop:
+ 		atomic64_inc(&priv->dropped);
++		ret = NET_XMIT_DROP;
+ 	}
+ 
+ 	if (use_napi)
+@@ -383,7 +385,7 @@ drop:
+ 
+ 	rcu_read_unlock();
+ 
+-	return NETDEV_TX_OK;
++	return ret;
+ }
+ 
+ static u64 veth_stats_tx(struct net_device *dev, u64 *packets, u64 *bytes)
+@@ -1445,6 +1447,8 @@ static int veth_open(struct net_device *dev)
+ 		netif_carrier_on(peer);
+ 	}
+ 
++	veth_set_xdp_features(dev);
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/parisc/led.c b/drivers/parisc/led.c
+index 8bdc5e043831c..3737c1021f88b 100644
+--- a/drivers/parisc/led.c
++++ b/drivers/parisc/led.c
+@@ -56,8 +56,8 @@
+ static int led_type __read_mostly = -1;
+ static unsigned char lastleds;	/* LED state from most recent update */
+ static unsigned int led_heartbeat __read_mostly = 1;
+-static unsigned int led_diskio    __read_mostly = 1;
+-static unsigned int led_lanrxtx   __read_mostly = 1;
++static unsigned int led_diskio    __read_mostly;
++static unsigned int led_lanrxtx   __read_mostly;
+ static char lcd_text[32]          __read_mostly;
+ static char lcd_text_default[32]  __read_mostly;
+ static int  lcd_no_led_support    __read_mostly = 0; /* KittyHawk doesn't support LED on its LCD */
+diff --git a/drivers/parisc/sba_iommu.c b/drivers/parisc/sba_iommu.c
+index 8f28f8696bf32..b8e91cbb60567 100644
+--- a/drivers/parisc/sba_iommu.c
++++ b/drivers/parisc/sba_iommu.c
+@@ -46,8 +46,6 @@
+ #include <linux/module.h>
+ 
+ #include <asm/ropes.h>
+-#include <asm/mckinley.h>	/* for proc_mckinley_root */
+-#include <asm/runway.h>		/* for proc_runway_root */
+ #include <asm/page.h>		/* for PAGE0 */
+ #include <asm/pdc.h>		/* for PDC_MODEL_* */
+ #include <asm/pdcpat.h>		/* for is_pdc_pat() */
+@@ -122,7 +120,7 @@ MODULE_PARM_DESC(sba_reserve_agpgart, "Reserve half of IO pdir as AGPGART");
+ #endif
+ 
+ static struct proc_dir_entry *proc_runway_root __ro_after_init;
+-struct proc_dir_entry *proc_mckinley_root __ro_after_init;
++static struct proc_dir_entry *proc_mckinley_root __ro_after_init;
+ 
+ /************************************
+ ** SBA register read and write support
+@@ -1899,9 +1897,7 @@ static int __init sba_driver_callback(struct parisc_device *dev)
+ 	int i;
+ 	char *version;
+ 	void __iomem *sba_addr = ioremap(dev->hpa.start, SBA_FUNC_SIZE);
+-#ifdef CONFIG_PROC_FS
+-	struct proc_dir_entry *root;
+-#endif
++	struct proc_dir_entry *root __maybe_unused;
+ 
+ 	sba_dump_ranges(sba_addr);
+ 
+@@ -1967,7 +1963,6 @@ static int __init sba_driver_callback(struct parisc_device *dev)
+ 
+ 	hppa_dma_ops = &sba_ops;
+ 
+-#ifdef CONFIG_PROC_FS
+ 	switch (dev->id.hversion) {
+ 	case PLUTO_MCKINLEY_PORT:
+ 		if (!proc_mckinley_root)
+@@ -1985,7 +1980,6 @@ static int __init sba_driver_callback(struct parisc_device *dev)
+ 
+ 	proc_create_single("sba_iommu", 0, root, sba_proc_info);
+ 	proc_create_single("sba_iommu-bitmap", 0, root, sba_proc_bitmap_info);
+-#endif
+ 	return 0;
+ }
+ 
+diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c
+index eee0f9bc3d323..4502bc849c871 100644
+--- a/drivers/pinctrl/intel/pinctrl-cherryview.c
++++ b/drivers/pinctrl/intel/pinctrl-cherryview.c
+@@ -1701,7 +1701,6 @@ static int chv_pinctrl_probe(struct platform_device *pdev)
+ 	struct intel_community_context *cctx;
+ 	struct intel_community *community;
+ 	struct device *dev = &pdev->dev;
+-	struct acpi_device *adev = ACPI_COMPANION(dev);
+ 	struct intel_pinctrl *pctrl;
+ 	acpi_status status;
+ 	unsigned int i;
+@@ -1769,7 +1768,7 @@ static int chv_pinctrl_probe(struct platform_device *pdev)
+ 	if (ret)
+ 		return ret;
+ 
+-	status = acpi_install_address_space_handler(adev->handle,
++	status = acpi_install_address_space_handler(ACPI_HANDLE(dev),
+ 					community->acpi_space_id,
+ 					chv_pinctrl_mmio_access_handler,
+ 					NULL, pctrl);
+@@ -1786,7 +1785,7 @@ static int chv_pinctrl_remove(struct platform_device *pdev)
+ 	struct intel_pinctrl *pctrl = platform_get_drvdata(pdev);
+ 	const struct intel_community *community = &pctrl->communities[0];
+ 
+-	acpi_remove_address_space_handler(ACPI_COMPANION(&pdev->dev),
++	acpi_remove_address_space_handler(ACPI_HANDLE(&pdev->dev),
+ 					  community->acpi_space_id,
+ 					  chv_pinctrl_mmio_access_handler);
+ 
+diff --git a/drivers/platform/mellanox/Kconfig b/drivers/platform/mellanox/Kconfig
+index 382793e73a60a..30b50920b278c 100644
+--- a/drivers/platform/mellanox/Kconfig
++++ b/drivers/platform/mellanox/Kconfig
+@@ -80,8 +80,8 @@ config MLXBF_PMC
+ 
+ config NVSW_SN2201
+ 	tristate "Nvidia SN2201 platform driver support"
+-	depends on HWMON
+-	depends on I2C
++	depends on HWMON && I2C
++	depends on ACPI || COMPILE_TEST
+ 	select REGMAP_I2C
+ 	help
+ 	  This driver provides support for the Nvidia SN2201 platform.
+diff --git a/drivers/platform/mellanox/mlxbf-pmc.c b/drivers/platform/mellanox/mlxbf-pmc.c
+index be967d797c28e..2d4bbe99959ef 100644
+--- a/drivers/platform/mellanox/mlxbf-pmc.c
++++ b/drivers/platform/mellanox/mlxbf-pmc.c
+@@ -191,6 +191,7 @@ static const struct mlxbf_pmc_events mlxbf_pmc_smgen_events[] = {
+ };
+ 
+ static const struct mlxbf_pmc_events mlxbf_pmc_trio_events_1[] = {
++	{ 0x0, "DISABLE" },
+ 	{ 0xa0, "TPIO_DATA_BEAT" },
+ 	{ 0xa1, "TDMA_DATA_BEAT" },
+ 	{ 0xa2, "MAP_DATA_BEAT" },
+@@ -214,6 +215,7 @@ static const struct mlxbf_pmc_events mlxbf_pmc_trio_events_1[] = {
+ };
+ 
+ static const struct mlxbf_pmc_events mlxbf_pmc_trio_events_2[] = {
++	{ 0x0, "DISABLE" },
+ 	{ 0xa0, "TPIO_DATA_BEAT" },
+ 	{ 0xa1, "TDMA_DATA_BEAT" },
+ 	{ 0xa2, "MAP_DATA_BEAT" },
+@@ -246,6 +248,7 @@ static const struct mlxbf_pmc_events mlxbf_pmc_trio_events_2[] = {
+ };
+ 
+ static const struct mlxbf_pmc_events mlxbf_pmc_ecc_events[] = {
++	{ 0x0, "DISABLE" },
+ 	{ 0x100, "ECC_SINGLE_ERROR_CNT" },
+ 	{ 0x104, "ECC_DOUBLE_ERROR_CNT" },
+ 	{ 0x114, "SERR_INJ" },
+@@ -258,6 +261,7 @@ static const struct mlxbf_pmc_events mlxbf_pmc_ecc_events[] = {
+ };
+ 
+ static const struct mlxbf_pmc_events mlxbf_pmc_mss_events[] = {
++	{ 0x0, "DISABLE" },
+ 	{ 0xc0, "RXREQ_MSS" },
+ 	{ 0xc1, "RXDAT_MSS" },
+ 	{ 0xc2, "TXRSP_MSS" },
+@@ -265,6 +269,7 @@ static const struct mlxbf_pmc_events mlxbf_pmc_mss_events[] = {
+ };
+ 
+ static const struct mlxbf_pmc_events mlxbf_pmc_hnf_events[] = {
++	{ 0x0, "DISABLE" },
+ 	{ 0x45, "HNF_REQUESTS" },
+ 	{ 0x46, "HNF_REJECTS" },
+ 	{ 0x47, "ALL_BUSY" },
+@@ -323,6 +328,7 @@ static const struct mlxbf_pmc_events mlxbf_pmc_hnf_events[] = {
+ };
+ 
+ static const struct mlxbf_pmc_events mlxbf_pmc_hnfnet_events[] = {
++	{ 0x0, "DISABLE" },
+ 	{ 0x12, "CDN_REQ" },
+ 	{ 0x13, "DDN_REQ" },
+ 	{ 0x14, "NDN_REQ" },
+@@ -892,7 +898,7 @@ static int mlxbf_pmc_read_event(int blk_num, uint32_t cnt_num, bool is_l3,
+ 				uint64_t *result)
+ {
+ 	uint32_t perfcfg_offset, perfval_offset;
+-	uint64_t perfmon_cfg, perfevt, perfctl;
++	uint64_t perfmon_cfg, perfevt;
+ 
+ 	if (cnt_num >= pmc->block[blk_num].counters)
+ 		return -EINVAL;
+@@ -904,25 +910,6 @@ static int mlxbf_pmc_read_event(int blk_num, uint32_t cnt_num, bool is_l3,
+ 	perfval_offset = perfcfg_offset +
+ 			 pmc->block[blk_num].counters * MLXBF_PMC_REG_SIZE;
+ 
+-	/* Set counter in "read" mode */
+-	perfmon_cfg = FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_ADDR,
+-				 MLXBF_PMC_PERFCTL);
+-	perfmon_cfg |= FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_STROBE, 1);
+-	perfmon_cfg |= FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_WR_R_B, 0);
+-
+-	if (mlxbf_pmc_write(pmc->block[blk_num].mmio_base + perfcfg_offset,
+-			    MLXBF_PMC_WRITE_REG_64, perfmon_cfg))
+-		return -EFAULT;
+-
+-	/* Check if the counter is enabled */
+-
+-	if (mlxbf_pmc_read(pmc->block[blk_num].mmio_base + perfval_offset,
+-			   MLXBF_PMC_READ_REG_64, &perfctl))
+-		return -EFAULT;
+-
+-	if (!FIELD_GET(MLXBF_PMC_PERFCTL_EN0, perfctl))
+-		return -EINVAL;
+-
+ 	/* Set counter in "read" mode */
+ 	perfmon_cfg = FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_ADDR,
+ 				 MLXBF_PMC_PERFEVT);
+@@ -1008,7 +995,7 @@ static ssize_t mlxbf_pmc_counter_show(struct device *dev,
+ 	} else
+ 		return -EINVAL;
+ 
+-	return sprintf(buf, "0x%llx\n", value);
++	return sysfs_emit(buf, "0x%llx\n", value);
+ }
+ 
+ /* Store function for "counter" sysfs files */
+@@ -1078,13 +1065,13 @@ static ssize_t mlxbf_pmc_event_show(struct device *dev,
+ 
+ 	err = mlxbf_pmc_read_event(blk_num, cnt_num, is_l3, &evt_num);
+ 	if (err)
+-		return sprintf(buf, "No event being monitored\n");
++		return sysfs_emit(buf, "No event being monitored\n");
+ 
+ 	evt_name = mlxbf_pmc_get_event_name(pmc->block_name[blk_num], evt_num);
+ 	if (!evt_name)
+ 		return -EINVAL;
+ 
+-	return sprintf(buf, "0x%llx: %s\n", evt_num, evt_name);
++	return sysfs_emit(buf, "0x%llx: %s\n", evt_num, evt_name);
+ }
+ 
+ /* Store function for "event" sysfs files */
+@@ -1139,9 +1126,9 @@ static ssize_t mlxbf_pmc_event_list_show(struct device *dev,
+ 		return -EINVAL;
+ 
+ 	for (i = 0, buf[0] = '\0'; i < size; ++i) {
+-		len += sprintf(e_info, "0x%x: %s\n", events[i].evt_num,
+-			       events[i].evt_name);
+-		if (len > PAGE_SIZE)
++		len += snprintf(e_info, sizeof(e_info), "0x%x: %s\n",
++				events[i].evt_num, events[i].evt_name);
++		if (len >= PAGE_SIZE)
+ 			break;
+ 		strcat(buf, e_info);
+ 		ret = len;
+@@ -1168,7 +1155,7 @@ static ssize_t mlxbf_pmc_enable_show(struct device *dev,
+ 
+ 	value = FIELD_GET(MLXBF_PMC_L3C_PERF_CNT_CFG_EN, perfcnt_cfg);
+ 
+-	return sprintf(buf, "%d\n", value);
++	return sysfs_emit(buf, "%d\n", value);
+ }
+ 
+ /* Store function for "enable" sysfs files - only for l3cache */
+diff --git a/drivers/platform/mellanox/mlxbf-tmfifo.c b/drivers/platform/mellanox/mlxbf-tmfifo.c
+index b600b77d91ef2..f3696a54a2bd7 100644
+--- a/drivers/platform/mellanox/mlxbf-tmfifo.c
++++ b/drivers/platform/mellanox/mlxbf-tmfifo.c
+@@ -59,6 +59,7 @@ struct mlxbf_tmfifo;
+  * @vq: pointer to the virtio virtqueue
+  * @desc: current descriptor of the pending packet
+  * @desc_head: head descriptor of the pending packet
++ * @drop_desc: dummy desc for packet dropping
+  * @cur_len: processed length of the current descriptor
+  * @rem_len: remaining length of the pending packet
+  * @pkt_len: total length of the pending packet
+@@ -75,6 +76,7 @@ struct mlxbf_tmfifo_vring {
+ 	struct virtqueue *vq;
+ 	struct vring_desc *desc;
+ 	struct vring_desc *desc_head;
++	struct vring_desc drop_desc;
+ 	int cur_len;
+ 	int rem_len;
+ 	u32 pkt_len;
+@@ -86,6 +88,14 @@ struct mlxbf_tmfifo_vring {
+ 	struct mlxbf_tmfifo *fifo;
+ };
+ 
++/* Check whether vring is in drop mode. */
++#define IS_VRING_DROP(_r) ({ \
++	typeof(_r) (r) = (_r); \
++	(r->desc_head == &r->drop_desc ? true : false); })
++
++/* A stub length to drop maximum length packet. */
++#define VRING_DROP_DESC_MAX_LEN		GENMASK(15, 0)
++
+ /* Interrupt types. */
+ enum {
+ 	MLXBF_TM_RX_LWM_IRQ,
+@@ -214,7 +224,7 @@ static u8 mlxbf_tmfifo_net_default_mac[ETH_ALEN] = {
+ static efi_char16_t mlxbf_tmfifo_efi_name[] = L"RshimMacAddr";
+ 
+ /* Maximum L2 header length. */
+-#define MLXBF_TMFIFO_NET_L2_OVERHEAD	36
++#define MLXBF_TMFIFO_NET_L2_OVERHEAD	(ETH_HLEN + VLAN_HLEN)
+ 
+ /* Supported virtio-net features. */
+ #define MLXBF_TMFIFO_NET_FEATURES \
+@@ -262,6 +272,7 @@ static int mlxbf_tmfifo_alloc_vrings(struct mlxbf_tmfifo *fifo,
+ 		vring->align = SMP_CACHE_BYTES;
+ 		vring->index = i;
+ 		vring->vdev_id = tm_vdev->vdev.id.device;
++		vring->drop_desc.len = VRING_DROP_DESC_MAX_LEN;
+ 		dev = &tm_vdev->vdev.dev;
+ 
+ 		size = vring_size(vring->num, vring->align);
+@@ -367,7 +378,7 @@ static u32 mlxbf_tmfifo_get_pkt_len(struct mlxbf_tmfifo_vring *vring,
+ 	return len;
+ }
+ 
+-static void mlxbf_tmfifo_release_pending_pkt(struct mlxbf_tmfifo_vring *vring)
++static void mlxbf_tmfifo_release_pkt(struct mlxbf_tmfifo_vring *vring)
+ {
+ 	struct vring_desc *desc_head;
+ 	u32 len = 0;
+@@ -596,19 +607,25 @@ static void mlxbf_tmfifo_rxtx_word(struct mlxbf_tmfifo_vring *vring,
+ 
+ 	if (vring->cur_len + sizeof(u64) <= len) {
+ 		/* The whole word. */
+-		if (is_rx)
+-			memcpy(addr + vring->cur_len, &data, sizeof(u64));
+-		else
+-			memcpy(&data, addr + vring->cur_len, sizeof(u64));
++		if (!IS_VRING_DROP(vring)) {
++			if (is_rx)
++				memcpy(addr + vring->cur_len, &data,
++				       sizeof(u64));
++			else
++				memcpy(&data, addr + vring->cur_len,
++				       sizeof(u64));
++		}
+ 		vring->cur_len += sizeof(u64);
+ 	} else {
+ 		/* Leftover bytes. */
+-		if (is_rx)
+-			memcpy(addr + vring->cur_len, &data,
+-			       len - vring->cur_len);
+-		else
+-			memcpy(&data, addr + vring->cur_len,
+-			       len - vring->cur_len);
++		if (!IS_VRING_DROP(vring)) {
++			if (is_rx)
++				memcpy(addr + vring->cur_len, &data,
++				       len - vring->cur_len);
++			else
++				memcpy(&data, addr + vring->cur_len,
++				       len - vring->cur_len);
++		}
+ 		vring->cur_len = len;
+ 	}
+ 
+@@ -625,13 +642,14 @@ static void mlxbf_tmfifo_rxtx_word(struct mlxbf_tmfifo_vring *vring,
+  * flag is set.
+  */
+ static void mlxbf_tmfifo_rxtx_header(struct mlxbf_tmfifo_vring *vring,
+-				     struct vring_desc *desc,
++				     struct vring_desc **desc,
+ 				     bool is_rx, bool *vring_change)
+ {
+ 	struct mlxbf_tmfifo *fifo = vring->fifo;
+ 	struct virtio_net_config *config;
+ 	struct mlxbf_tmfifo_msg_hdr hdr;
+ 	int vdev_id, hdr_len;
++	bool drop_rx = false;
+ 
+ 	/* Read/Write packet header. */
+ 	if (is_rx) {
+@@ -651,8 +669,8 @@ static void mlxbf_tmfifo_rxtx_header(struct mlxbf_tmfifo_vring *vring,
+ 			if (ntohs(hdr.len) >
+ 			    __virtio16_to_cpu(virtio_legacy_is_little_endian(),
+ 					      config->mtu) +
+-			    MLXBF_TMFIFO_NET_L2_OVERHEAD)
+-				return;
++					      MLXBF_TMFIFO_NET_L2_OVERHEAD)
++				drop_rx = true;
+ 		} else {
+ 			vdev_id = VIRTIO_ID_CONSOLE;
+ 			hdr_len = 0;
+@@ -667,16 +685,25 @@ static void mlxbf_tmfifo_rxtx_header(struct mlxbf_tmfifo_vring *vring,
+ 
+ 			if (!tm_dev2)
+ 				return;
+-			vring->desc = desc;
++			vring->desc = *desc;
+ 			vring = &tm_dev2->vrings[MLXBF_TMFIFO_VRING_RX];
+ 			*vring_change = true;
+ 		}
++
++		if (drop_rx && !IS_VRING_DROP(vring)) {
++			if (vring->desc_head)
++				mlxbf_tmfifo_release_pkt(vring);
++			*desc = &vring->drop_desc;
++			vring->desc_head = *desc;
++			vring->desc = *desc;
++		}
++
+ 		vring->pkt_len = ntohs(hdr.len) + hdr_len;
+ 	} else {
+ 		/* Network virtio has an extra header. */
+ 		hdr_len = (vring->vdev_id == VIRTIO_ID_NET) ?
+ 			   sizeof(struct virtio_net_hdr) : 0;
+-		vring->pkt_len = mlxbf_tmfifo_get_pkt_len(vring, desc);
++		vring->pkt_len = mlxbf_tmfifo_get_pkt_len(vring, *desc);
+ 		hdr.type = (vring->vdev_id == VIRTIO_ID_NET) ?
+ 			    VIRTIO_ID_NET : VIRTIO_ID_CONSOLE;
+ 		hdr.len = htons(vring->pkt_len - hdr_len);
+@@ -709,15 +736,23 @@ static bool mlxbf_tmfifo_rxtx_one_desc(struct mlxbf_tmfifo_vring *vring,
+ 	/* Get the descriptor of the next packet. */
+ 	if (!vring->desc) {
+ 		desc = mlxbf_tmfifo_get_next_pkt(vring, is_rx);
+-		if (!desc)
+-			return false;
++		if (!desc) {
++			/* Drop next Rx packet to avoid stuck. */
++			if (is_rx) {
++				desc = &vring->drop_desc;
++				vring->desc_head = desc;
++				vring->desc = desc;
++			} else {
++				return false;
++			}
++		}
+ 	} else {
+ 		desc = vring->desc;
+ 	}
+ 
+ 	/* Beginning of a packet. Start to Rx/Tx packet header. */
+ 	if (vring->pkt_len == 0) {
+-		mlxbf_tmfifo_rxtx_header(vring, desc, is_rx, &vring_change);
++		mlxbf_tmfifo_rxtx_header(vring, &desc, is_rx, &vring_change);
+ 		(*avail)--;
+ 
+ 		/* Return if new packet is for another ring. */
+@@ -743,17 +778,24 @@ static bool mlxbf_tmfifo_rxtx_one_desc(struct mlxbf_tmfifo_vring *vring,
+ 		vring->rem_len -= len;
+ 
+ 		/* Get the next desc on the chain. */
+-		if (vring->rem_len > 0 &&
++		if (!IS_VRING_DROP(vring) && vring->rem_len > 0 &&
+ 		    (virtio16_to_cpu(vdev, desc->flags) & VRING_DESC_F_NEXT)) {
+ 			idx = virtio16_to_cpu(vdev, desc->next);
+ 			desc = &vr->desc[idx];
+ 			goto mlxbf_tmfifo_desc_done;
+ 		}
+ 
+-		/* Done and release the pending packet. */
+-		mlxbf_tmfifo_release_pending_pkt(vring);
++		/* Done and release the packet. */
+ 		desc = NULL;
+ 		fifo->vring[is_rx] = NULL;
++		if (!IS_VRING_DROP(vring)) {
++			mlxbf_tmfifo_release_pkt(vring);
++		} else {
++			vring->pkt_len = 0;
++			vring->desc_head = NULL;
++			vring->desc = NULL;
++			return false;
++		}
+ 
+ 		/*
+ 		 * Make sure the load/store are in order before
+@@ -933,7 +975,7 @@ static void mlxbf_tmfifo_virtio_del_vqs(struct virtio_device *vdev)
+ 
+ 		/* Release the pending packet. */
+ 		if (vring->desc)
+-			mlxbf_tmfifo_release_pending_pkt(vring);
++			mlxbf_tmfifo_release_pkt(vring);
+ 		vq = vring->vq;
+ 		if (vq) {
+ 			vring->vq = NULL;
+diff --git a/drivers/pwm/pwm-atmel-tcb.c b/drivers/pwm/pwm-atmel-tcb.c
+index 4a116dc44f6e7..2826fc216d291 100644
+--- a/drivers/pwm/pwm-atmel-tcb.c
++++ b/drivers/pwm/pwm-atmel-tcb.c
+@@ -422,13 +422,14 @@ static int atmel_tcb_pwm_probe(struct platform_device *pdev)
+ 	struct atmel_tcb_pwm_chip *tcbpwm;
+ 	const struct atmel_tcb_config *config;
+ 	struct device_node *np = pdev->dev.of_node;
+-	struct regmap *regmap;
+-	struct clk *clk, *gclk = NULL;
+-	struct clk *slow_clk;
+ 	char clk_name[] = "t0_clk";
+ 	int err;
+ 	int channel;
+ 
++	tcbpwm = devm_kzalloc(&pdev->dev, sizeof(*tcbpwm), GFP_KERNEL);
++	if (tcbpwm == NULL)
++		return -ENOMEM;
++
+ 	err = of_property_read_u32(np, "reg", &channel);
+ 	if (err < 0) {
+ 		dev_err(&pdev->dev,
+@@ -437,49 +438,43 @@ static int atmel_tcb_pwm_probe(struct platform_device *pdev)
+ 		return err;
+ 	}
+ 
+-	regmap = syscon_node_to_regmap(np->parent);
+-	if (IS_ERR(regmap))
+-		return PTR_ERR(regmap);
++	tcbpwm->regmap = syscon_node_to_regmap(np->parent);
++	if (IS_ERR(tcbpwm->regmap))
++		return PTR_ERR(tcbpwm->regmap);
+ 
+-	slow_clk = of_clk_get_by_name(np->parent, "slow_clk");
+-	if (IS_ERR(slow_clk))
+-		return PTR_ERR(slow_clk);
++	tcbpwm->slow_clk = of_clk_get_by_name(np->parent, "slow_clk");
++	if (IS_ERR(tcbpwm->slow_clk))
++		return PTR_ERR(tcbpwm->slow_clk);
+ 
+ 	clk_name[1] += channel;
+-	clk = of_clk_get_by_name(np->parent, clk_name);
+-	if (IS_ERR(clk))
+-		clk = of_clk_get_by_name(np->parent, "t0_clk");
+-	if (IS_ERR(clk))
+-		return PTR_ERR(clk);
++	tcbpwm->clk = of_clk_get_by_name(np->parent, clk_name);
++	if (IS_ERR(tcbpwm->clk))
++		tcbpwm->clk = of_clk_get_by_name(np->parent, "t0_clk");
++	if (IS_ERR(tcbpwm->clk)) {
++		err = PTR_ERR(tcbpwm->clk);
++		goto err_slow_clk;
++	}
+ 
+ 	match = of_match_node(atmel_tcb_of_match, np->parent);
+ 	config = match->data;
+ 
+ 	if (config->has_gclk) {
+-		gclk = of_clk_get_by_name(np->parent, "gclk");
+-		if (IS_ERR(gclk))
+-			return PTR_ERR(gclk);
+-	}
+-
+-	tcbpwm = devm_kzalloc(&pdev->dev, sizeof(*tcbpwm), GFP_KERNEL);
+-	if (tcbpwm == NULL) {
+-		err = -ENOMEM;
+-		goto err_slow_clk;
++		tcbpwm->gclk = of_clk_get_by_name(np->parent, "gclk");
++		if (IS_ERR(tcbpwm->gclk)) {
++			err = PTR_ERR(tcbpwm->gclk);
++			goto err_clk;
++		}
+ 	}
+ 
+ 	tcbpwm->chip.dev = &pdev->dev;
+ 	tcbpwm->chip.ops = &atmel_tcb_pwm_ops;
+ 	tcbpwm->chip.npwm = NPWM;
+ 	tcbpwm->channel = channel;
+-	tcbpwm->regmap = regmap;
+-	tcbpwm->clk = clk;
+-	tcbpwm->gclk = gclk;
+-	tcbpwm->slow_clk = slow_clk;
+ 	tcbpwm->width = config->counter_width;
+ 
+-	err = clk_prepare_enable(slow_clk);
++	err = clk_prepare_enable(tcbpwm->slow_clk);
+ 	if (err)
+-		goto err_slow_clk;
++		goto err_gclk;
+ 
+ 	spin_lock_init(&tcbpwm->lock);
+ 
+@@ -494,8 +489,14 @@ static int atmel_tcb_pwm_probe(struct platform_device *pdev)
+ err_disable_clk:
+ 	clk_disable_unprepare(tcbpwm->slow_clk);
+ 
++err_gclk:
++	clk_put(tcbpwm->gclk);
++
++err_clk:
++	clk_put(tcbpwm->clk);
++
+ err_slow_clk:
+-	clk_put(slow_clk);
++	clk_put(tcbpwm->slow_clk);
+ 
+ 	return err;
+ }
+@@ -507,8 +508,9 @@ static void atmel_tcb_pwm_remove(struct platform_device *pdev)
+ 	pwmchip_remove(&tcbpwm->chip);
+ 
+ 	clk_disable_unprepare(tcbpwm->slow_clk);
+-	clk_put(tcbpwm->slow_clk);
++	clk_put(tcbpwm->gclk);
+ 	clk_put(tcbpwm->clk);
++	clk_put(tcbpwm->slow_clk);
+ }
+ 
+ static const struct of_device_id atmel_tcb_pwm_dt_ids[] = {
+diff --git a/drivers/pwm/pwm-lpc32xx.c b/drivers/pwm/pwm-lpc32xx.c
+index 86a0ea0f6955c..806f0bb3ad6d8 100644
+--- a/drivers/pwm/pwm-lpc32xx.c
++++ b/drivers/pwm/pwm-lpc32xx.c
+@@ -51,10 +51,10 @@ static int lpc32xx_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
+ 	if (duty_cycles > 255)
+ 		duty_cycles = 255;
+ 
+-	val = readl(lpc32xx->base + (pwm->hwpwm << 2));
++	val = readl(lpc32xx->base);
+ 	val &= ~0xFFFF;
+ 	val |= (period_cycles << 8) | duty_cycles;
+-	writel(val, lpc32xx->base + (pwm->hwpwm << 2));
++	writel(val, lpc32xx->base);
+ 
+ 	return 0;
+ }
+@@ -69,9 +69,9 @@ static int lpc32xx_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
+ 	if (ret)
+ 		return ret;
+ 
+-	val = readl(lpc32xx->base + (pwm->hwpwm << 2));
++	val = readl(lpc32xx->base);
+ 	val |= PWM_ENABLE;
+-	writel(val, lpc32xx->base + (pwm->hwpwm << 2));
++	writel(val, lpc32xx->base);
+ 
+ 	return 0;
+ }
+@@ -81,9 +81,9 @@ static void lpc32xx_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
+ 	struct lpc32xx_pwm_chip *lpc32xx = to_lpc32xx_pwm_chip(chip);
+ 	u32 val;
+ 
+-	val = readl(lpc32xx->base + (pwm->hwpwm << 2));
++	val = readl(lpc32xx->base);
+ 	val &= ~PWM_ENABLE;
+-	writel(val, lpc32xx->base + (pwm->hwpwm << 2));
++	writel(val, lpc32xx->base);
+ 
+ 	clk_disable_unprepare(lpc32xx->clk);
+ }
+@@ -141,9 +141,9 @@ static int lpc32xx_pwm_probe(struct platform_device *pdev)
+ 	lpc32xx->chip.npwm = 1;
+ 
+ 	/* If PWM is disabled, configure the output to the default value */
+-	val = readl(lpc32xx->base + (lpc32xx->chip.pwms[0].hwpwm << 2));
++	val = readl(lpc32xx->base);
+ 	val &= ~PWM_PIN_LEVEL;
+-	writel(val, lpc32xx->base + (lpc32xx->chip.pwms[0].hwpwm << 2));
++	writel(val, lpc32xx->base);
+ 
+ 	ret = devm_pwmchip_add(&pdev->dev, &lpc32xx->chip);
+ 	if (ret < 0) {
+diff --git a/drivers/regulator/raa215300.c b/drivers/regulator/raa215300.c
+index 24a1c89f5dbc9..253645696d0bb 100644
+--- a/drivers/regulator/raa215300.c
++++ b/drivers/regulator/raa215300.c
+@@ -38,10 +38,6 @@
+ #define RAA215300_REG_BLOCK_EN_RTC_EN	BIT(6)
+ #define RAA215300_RTC_DEFAULT_ADDR	0x6f
+ 
+-const char *clkin_name = "clkin";
+-const char *xin_name = "xin";
+-static struct clk *clk;
+-
+ static const struct regmap_config raa215300_regmap_config = {
+ 	.reg_bits = 8,
+ 	.val_bits = 8,
+@@ -51,10 +47,6 @@ static const struct regmap_config raa215300_regmap_config = {
+ static void raa215300_rtc_unregister_device(void *data)
+ {
+ 	i2c_unregister_device(data);
+-	if (!clk) {
+-		clk_unregister_fixed_rate(clk);
+-		clk = NULL;
+-	}
+ }
+ 
+ static int raa215300_clk_present(struct i2c_client *client, const char *name)
+@@ -71,8 +63,10 @@ static int raa215300_clk_present(struct i2c_client *client, const char *name)
+ static int raa215300_i2c_probe(struct i2c_client *client)
+ {
+ 	struct device *dev = &client->dev;
+-	const char *clk_name = xin_name;
++	const char *clkin_name = "clkin";
+ 	unsigned int pmic_version, val;
++	const char *xin_name = "xin";
++	const char *clk_name = NULL;
+ 	struct regmap *regmap;
+ 	int ret;
+ 
+@@ -114,24 +108,32 @@ static int raa215300_i2c_probe(struct i2c_client *client)
+ 	ret = raa215300_clk_present(client, xin_name);
+ 	if (ret < 0) {
+ 		return ret;
+-	} else if (!ret) {
++	} else if (ret) {
++		clk_name = xin_name;
++	} else {
+ 		ret = raa215300_clk_present(client, clkin_name);
+ 		if (ret < 0)
+ 			return ret;
+-
+-		clk_name = clkin_name;
++		if (ret)
++			clk_name = clkin_name;
+ 	}
+ 
+-	if (ret) {
++	if (clk_name) {
+ 		char *name = pmic_version >= 0x12 ? "isl1208" : "raa215300_a0";
+ 		struct device_node *np = client->dev.of_node;
+ 		u32 addr = RAA215300_RTC_DEFAULT_ADDR;
+ 		struct i2c_board_info info = {};
+ 		struct i2c_client *rtc_client;
++		struct clk_hw *hw;
+ 		ssize_t size;
+ 
+-		clk = clk_register_fixed_rate(NULL, clk_name, NULL, 0, 32000);
+-		clk_register_clkdev(clk, clk_name, NULL);
++		hw = devm_clk_hw_register_fixed_rate(dev, clk_name, NULL, 0, 32000);
++		if (IS_ERR(hw))
++			return PTR_ERR(hw);
++
++		ret = devm_clk_hw_register_clkdev(dev, hw, clk_name, NULL);
++		if (ret)
++			return dev_err_probe(dev, ret, "Failed to initialize clkdev\n");
+ 
+ 		if (np) {
+ 			int i;
+diff --git a/drivers/regulator/tps6287x-regulator.c b/drivers/regulator/tps6287x-regulator.c
+index b1c0963586ace..e45579a4498c6 100644
+--- a/drivers/regulator/tps6287x-regulator.c
++++ b/drivers/regulator/tps6287x-regulator.c
+@@ -119,7 +119,7 @@ static struct regulator_desc tps6287x_reg = {
+ 	.ramp_mask = TPS6287X_CTRL1_VRAMP,
+ 	.ramp_delay_table = tps6287x_ramp_table,
+ 	.n_ramp_values = ARRAY_SIZE(tps6287x_ramp_table),
+-	.n_voltages = 256,
++	.n_voltages = 256 * ARRAY_SIZE(tps6287x_voltage_ranges),
+ 	.linear_ranges = tps6287x_voltage_ranges,
+ 	.n_linear_ranges = ARRAY_SIZE(tps6287x_voltage_ranges),
+ 	.linear_range_selectors = tps6287x_voltage_range_sel,
+diff --git a/drivers/regulator/tps6594-regulator.c b/drivers/regulator/tps6594-regulator.c
+index d5a574ec6d12f..47c3b7efe145e 100644
+--- a/drivers/regulator/tps6594-regulator.c
++++ b/drivers/regulator/tps6594-regulator.c
+@@ -384,21 +384,19 @@ static int tps6594_request_reg_irqs(struct platform_device *pdev,
+ 		if (irq < 0)
+ 			return -EINVAL;
+ 
+-		irq_data[*irq_idx + j].dev = tps->dev;
+-		irq_data[*irq_idx + j].type = irq_type;
+-		irq_data[*irq_idx + j].rdev = rdev;
++		irq_data[*irq_idx].dev = tps->dev;
++		irq_data[*irq_idx].type = irq_type;
++		irq_data[*irq_idx].rdev = rdev;
+ 
+ 		error = devm_request_threaded_irq(tps->dev, irq, NULL,
+-						  tps6594_regulator_irq_handler,
+-						  IRQF_ONESHOT,
+-						  irq_type->irq_name,
+-						  &irq_data[*irq_idx]);
+-		(*irq_idx)++;
++						  tps6594_regulator_irq_handler, IRQF_ONESHOT,
++						  irq_type->irq_name, &irq_data[*irq_idx]);
+ 		if (error) {
+ 			dev_err(tps->dev, "tps6594 failed to request %s IRQ %d: %d\n",
+ 				irq_type->irq_name, irq, error);
+ 			return error;
+ 		}
++		(*irq_idx)++;
+ 	}
+ 	return 0;
+ }
+@@ -420,8 +418,8 @@ static int tps6594_regulator_probe(struct platform_device *pdev)
+ 	int error, i, irq, multi, delta;
+ 	int irq_idx = 0;
+ 	int buck_idx = 0;
+-	int ext_reg_irq_nb = 2;
+-
++	size_t ext_reg_irq_nb = 2;
++	size_t reg_irq_nb;
+ 	enum {
+ 		MULTI_BUCK12,
+ 		MULTI_BUCK123,
+@@ -484,15 +482,16 @@ static int tps6594_regulator_probe(struct platform_device *pdev)
+ 		}
+ 	}
+ 
+-	if (tps->chip_id == LP8764)
++	if (tps->chip_id == LP8764) {
+ 		/* There is only 4 buck on LP8764 */
+ 		buck_configured[4] = 1;
++		reg_irq_nb = size_mul(REGS_INT_NB, (BUCK_NB - 1));
++	} else {
++		reg_irq_nb = size_mul(REGS_INT_NB, (size_add(BUCK_NB, LDO_NB)));
++	}
+ 
+-	irq_data = devm_kmalloc_array(tps->dev,
+-				REGS_INT_NB * sizeof(struct tps6594_regulator_irq_data),
+-				ARRAY_SIZE(tps6594_bucks_irq_types) +
+-				ARRAY_SIZE(tps6594_ldos_irq_types),
+-				GFP_KERNEL);
++	irq_data = devm_kmalloc_array(tps->dev, reg_irq_nb,
++				      sizeof(struct tps6594_regulator_irq_data), GFP_KERNEL);
+ 	if (!irq_data)
+ 		return -ENOMEM;
+ 
+diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
+index 4b23c9f7f3e54..6b99f7dd06433 100644
+--- a/drivers/s390/crypto/zcrypt_api.c
++++ b/drivers/s390/crypto/zcrypt_api.c
+@@ -413,6 +413,7 @@ static int zcdn_create(const char *name)
+ 			 ZCRYPT_NAME "_%d", (int)MINOR(devt));
+ 	nodename[sizeof(nodename) - 1] = '\0';
+ 	if (dev_set_name(&zcdndev->device, nodename)) {
++		kfree(zcdndev);
+ 		rc = -EINVAL;
+ 		goto unlockout;
+ 	}
+diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
+index b00222459607a..44449c70a375f 100644
+--- a/drivers/scsi/qla2xxx/qla_attr.c
++++ b/drivers/scsi/qla2xxx/qla_attr.c
+@@ -3093,8 +3093,6 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
+ 			vha->flags.difdix_supported = 1;
+ 			ql_dbg(ql_dbg_user, vha, 0x7082,
+ 			    "Registered for DIF/DIX type 1 and 3 protection.\n");
+-			if (ql2xenabledif == 1)
+-				prot = SHOST_DIX_TYPE0_PROTECTION;
+ 			scsi_host_set_prot(vha->host,
+ 			    prot | SHOST_DIF_TYPE1_PROTECTION
+ 			    | SHOST_DIF_TYPE2_PROTECTION
+diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
+index d7e8454304cee..ab637324262ff 100644
+--- a/drivers/scsi/qla2xxx/qla_dbg.c
++++ b/drivers/scsi/qla2xxx/qla_dbg.c
+@@ -18,7 +18,7 @@
+  * | Queue Command and IO tracing |       0x3074       | 0x300b         |
+  * |                              |                    | 0x3027-0x3028  |
+  * |                              |                    | 0x303d-0x3041  |
+- * |                              |                    | 0x302d,0x3033  |
++ * |                              |                    | 0x302e,0x3033  |
+  * |                              |                    | 0x3036,0x3038  |
+  * |                              |                    | 0x303a		|
+  * | DPC Thread                   |       0x4023       | 0x4002,0x4013  |
+diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
+index 4ae38305c15a1..97852e5f7a87f 100644
+--- a/drivers/scsi/qla2xxx/qla_def.h
++++ b/drivers/scsi/qla2xxx/qla_def.h
+@@ -466,6 +466,7 @@ static inline be_id_t port_id_to_be_id(port_id_t port_id)
+ }
+ 
+ struct tmf_arg {
++	struct list_head tmf_elem;
+ 	struct qla_qpair *qpair;
+ 	struct fc_port *fcport;
+ 	struct scsi_qla_host *vha;
+@@ -2541,7 +2542,6 @@ enum rscn_addr_format {
+ typedef struct fc_port {
+ 	struct list_head list;
+ 	struct scsi_qla_host *vha;
+-	struct list_head tmf_pending;
+ 
+ 	unsigned int conf_compl_supported:1;
+ 	unsigned int deleted:2;
+@@ -2562,9 +2562,6 @@ typedef struct fc_port {
+ 	unsigned int do_prli_nvme:1;
+ 
+ 	uint8_t nvme_flag;
+-	uint8_t active_tmf;
+-#define MAX_ACTIVE_TMF 8
+-
+ 	uint8_t node_name[WWN_SIZE];
+ 	uint8_t port_name[WWN_SIZE];
+ 	port_id_t d_id;
+@@ -3745,6 +3742,16 @@ struct qla_fw_resources {
+ 	u16 pad;
+ };
+ 
++struct qla_fw_res {
++	u16      iocb_total;
++	u16      iocb_limit;
++	atomic_t iocb_used;
++
++	u16      exch_total;
++	u16      exch_limit;
++	atomic_t exch_used;
++};
++
+ #define QLA_IOCB_PCT_LIMIT 95
+ 
+ struct  qla_buf_pool {
+@@ -4387,7 +4394,6 @@ struct qla_hw_data {
+ 	uint8_t		aen_mbx_count;
+ 	atomic_t	num_pend_mbx_stage1;
+ 	atomic_t	num_pend_mbx_stage2;
+-	atomic_t	num_pend_mbx_stage3;
+ 	uint16_t	frame_payload_size;
+ 
+ 	uint32_t	login_retry_count;
+@@ -4656,6 +4662,8 @@ struct qla_hw_data {
+ 		uint32_t	flt_region_aux_img_status_sec;
+ 	};
+ 	uint8_t         active_image;
++	uint8_t active_tmf;
++#define MAX_ACTIVE_TMF 8
+ 
+ 	/* Needed for BEACON */
+ 	uint16_t        beacon_blink_led;
+@@ -4670,6 +4678,8 @@ struct qla_hw_data {
+ 
+ 	struct qla_msix_entry *msix_entries;
+ 
++	struct list_head tmf_pending;
++	struct list_head tmf_active;
+ 	struct list_head        vp_list;        /* list of VP */
+ 	unsigned long   vp_idx_map[(MAX_MULTI_ID_FABRIC / 8) /
+ 			sizeof(unsigned long)];
+@@ -4799,6 +4809,7 @@ struct qla_hw_data {
+ 	struct els_reject elsrej;
+ 	u8 edif_post_stop_cnt_down;
+ 	struct qla_vp_map *vp_map;
++	struct qla_fw_res fwres ____cacheline_aligned;
+ };
+ 
+ #define RX_ELS_SIZE (roundup(sizeof(struct enode) + ELS_MAX_PAYLOAD, SMP_CACHE_BYTES))
+diff --git a/drivers/scsi/qla2xxx/qla_dfs.c b/drivers/scsi/qla2xxx/qla_dfs.c
+index 1925cc6897b68..f060e593685de 100644
+--- a/drivers/scsi/qla2xxx/qla_dfs.c
++++ b/drivers/scsi/qla2xxx/qla_dfs.c
+@@ -276,6 +276,16 @@ qla_dfs_fw_resource_cnt_show(struct seq_file *s, void *unused)
+ 
+ 		seq_printf(s, "estimate exchange used[%d] high water limit [%d] n",
+ 			   exch_used, ha->base_qpair->fwres.exch_limit);
++
++		if (ql2xenforce_iocb_limit == 2) {
++			iocbs_used = atomic_read(&ha->fwres.iocb_used);
++			exch_used  = atomic_read(&ha->fwres.exch_used);
++			seq_printf(s, "        estimate iocb2 used [%d] high water limit [%d]\n",
++					iocbs_used, ha->fwres.iocb_limit);
++
++			seq_printf(s, "        estimate exchange2 used[%d] high water limit [%d] \n",
++					exch_used, ha->fwres.exch_limit);
++		}
+ 	}
+ 
+ 	return 0;
+diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
+index ba7831f24734f..33fba9d629693 100644
+--- a/drivers/scsi/qla2xxx/qla_gbl.h
++++ b/drivers/scsi/qla2xxx/qla_gbl.h
+@@ -143,6 +143,7 @@ void qla_edif_sess_down(struct scsi_qla_host *vha, struct fc_port *sess);
+ void qla_edif_clear_appdata(struct scsi_qla_host *vha,
+ 			    struct fc_port *fcport);
+ const char *sc_to_str(uint16_t cmd);
++void qla_adjust_iocb_limit(scsi_qla_host_t *vha);
+ 
+ /*
+  * Global Data in qla_os.c source file.
+diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
+index 33d4914e19fa6..0cd4f909f9407 100644
+--- a/drivers/scsi/qla2xxx/qla_init.c
++++ b/drivers/scsi/qla2xxx/qla_init.c
+@@ -508,6 +508,7 @@ static
+ void qla24xx_handle_adisc_event(scsi_qla_host_t *vha, struct event_arg *ea)
+ {
+ 	struct fc_port *fcport = ea->fcport;
++	unsigned long flags;
+ 
+ 	ql_dbg(ql_dbg_disc, vha, 0x20d2,
+ 	    "%s %8phC DS %d LS %d rc %d login %d|%d rscn %d|%d lid %d\n",
+@@ -522,9 +523,15 @@ void qla24xx_handle_adisc_event(scsi_qla_host_t *vha, struct event_arg *ea)
+ 		ql_dbg(ql_dbg_disc, vha, 0x2066,
+ 		    "%s %8phC: adisc fail: post delete\n",
+ 		    __func__, ea->fcport->port_name);
++
++		spin_lock_irqsave(&vha->work_lock, flags);
+ 		/* deleted = 0 & logout_on_delete = force fw cleanup */
+-		fcport->deleted = 0;
++		if (fcport->deleted == QLA_SESS_DELETED)
++			fcport->deleted = 0;
++
+ 		fcport->logout_on_delete = 1;
++		spin_unlock_irqrestore(&vha->work_lock, flags);
++
+ 		qlt_schedule_sess_for_deletion(ea->fcport);
+ 		return;
+ 	}
+@@ -1134,7 +1141,7 @@ int qla24xx_async_gnl(struct scsi_qla_host *vha, fc_port_t *fcport)
+ 	u16 *mb;
+ 
+ 	if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
+-		return rval;
++		goto done;
+ 
+ 	ql_dbg(ql_dbg_disc, vha, 0x20d9,
+ 	    "Async-gnlist WWPN %8phC \n", fcport->port_name);
+@@ -1188,8 +1195,9 @@ int qla24xx_async_gnl(struct scsi_qla_host *vha, fc_port_t *fcport)
+ done_free_sp:
+ 	/* ref: INIT */
+ 	kref_put(&sp->cmd_kref, qla2x00_sp_release);
++	fcport->flags &= ~(FCF_ASYNC_SENT);
+ done:
+-	fcport->flags &= ~(FCF_ASYNC_ACTIVE | FCF_ASYNC_SENT);
++	fcport->flags &= ~(FCF_ASYNC_ACTIVE);
+ 	return rval;
+ }
+ 
+@@ -1446,7 +1454,6 @@ void __qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea)
+ 
+ 	spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
+ 	ea->fcport->login_gen++;
+-	ea->fcport->deleted = 0;
+ 	ea->fcport->logout_on_delete = 1;
+ 
+ 	if (!ea->fcport->login_succ && !IS_SW_RESV_ADDR(ea->fcport->d_id)) {
+@@ -1996,12 +2003,11 @@ qla2x00_tmf_iocb_timeout(void *data)
+ 	int rc, h;
+ 	unsigned long flags;
+ 
+-	if (sp->type == SRB_MARKER) {
+-		complete(&tmf->u.tmf.comp);
+-		return;
+-	}
++	if (sp->type == SRB_MARKER)
++		rc = QLA_FUNCTION_FAILED;
++	else
++		rc = qla24xx_async_abort_cmd(sp, false);
+ 
+-	rc = qla24xx_async_abort_cmd(sp, false);
+ 	if (rc) {
+ 		spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags);
+ 		for (h = 1; h < sp->qpair->req->num_outstanding_cmds; h++) {
+@@ -2032,10 +2038,14 @@ static void qla_marker_sp_done(srb_t *sp, int res)
+ 	complete(&tmf->u.tmf.comp);
+ }
+ 
+-#define  START_SP_W_RETRIES(_sp, _rval) \
++#define  START_SP_W_RETRIES(_sp, _rval, _chip_gen, _login_gen) \
+ {\
+ 	int cnt = 5; \
+ 	do { \
++		if (_chip_gen != sp->vha->hw->chip_reset || _login_gen != sp->fcport->login_gen) {\
++			_rval = EINVAL; \
++			break; \
++		} \
+ 		_rval = qla2x00_start_sp(_sp); \
+ 		if (_rval == EAGAIN) \
+ 			msleep(1); \
+@@ -2058,6 +2068,7 @@ qla26xx_marker(struct tmf_arg *arg)
+ 	srb_t *sp;
+ 	int rval = QLA_FUNCTION_FAILED;
+ 	fc_port_t *fcport = arg->fcport;
++	u32 chip_gen, login_gen;
+ 
+ 	if (TMF_NOT_READY(arg->fcport)) {
+ 		ql_dbg(ql_dbg_taskm, vha, 0x8039,
+@@ -2067,6 +2078,9 @@ qla26xx_marker(struct tmf_arg *arg)
+ 		return QLA_SUSPENDED;
+ 	}
+ 
++	chip_gen = vha->hw->chip_reset;
++	login_gen = fcport->login_gen;
++
+ 	/* ref: INIT */
+ 	sp = qla2xxx_get_qpair_sp(vha, arg->qpair, fcport, GFP_KERNEL);
+ 	if (!sp)
+@@ -2084,7 +2098,7 @@ qla26xx_marker(struct tmf_arg *arg)
+ 	tm_iocb->u.tmf.loop_id = fcport->loop_id;
+ 	tm_iocb->u.tmf.vp_index = vha->vp_idx;
+ 
+-	START_SP_W_RETRIES(sp, rval);
++	START_SP_W_RETRIES(sp, rval, chip_gen, login_gen);
+ 
+ 	ql_dbg(ql_dbg_taskm, vha, 0x8006,
+ 	    "Async-marker hdl=%x loop-id=%x portid=%06x modifier=%x lun=%lld qp=%d rval %d.\n",
+@@ -2123,6 +2137,17 @@ static void qla2x00_tmf_sp_done(srb_t *sp, int res)
+ 	complete(&tmf->u.tmf.comp);
+ }
+ 
++static int qla_tmf_wait(struct tmf_arg *arg)
++{
++	/* there are only 2 types of error handling that reaches here, lun or target reset */
++	if (arg->flags & (TCF_LUN_RESET | TCF_ABORT_TASK_SET | TCF_CLEAR_TASK_SET))
++		return qla2x00_eh_wait_for_pending_commands(arg->vha,
++		    arg->fcport->d_id.b24, arg->lun, WAIT_LUN);
++	else
++		return qla2x00_eh_wait_for_pending_commands(arg->vha,
++		    arg->fcport->d_id.b24, arg->lun, WAIT_TARGET);
++}
++
+ static int
+ __qla2x00_async_tm_cmd(struct tmf_arg *arg)
+ {
+@@ -2130,8 +2155,9 @@ __qla2x00_async_tm_cmd(struct tmf_arg *arg)
+ 	struct srb_iocb *tm_iocb;
+ 	srb_t *sp;
+ 	int rval = QLA_FUNCTION_FAILED;
+-
+ 	fc_port_t *fcport = arg->fcport;
++	u32 chip_gen, login_gen;
++	u64 jif;
+ 
+ 	if (TMF_NOT_READY(arg->fcport)) {
+ 		ql_dbg(ql_dbg_taskm, vha, 0x8032,
+@@ -2141,6 +2167,9 @@ __qla2x00_async_tm_cmd(struct tmf_arg *arg)
+ 		return QLA_SUSPENDED;
+ 	}
+ 
++	chip_gen = vha->hw->chip_reset;
++	login_gen = fcport->login_gen;
++
+ 	/* ref: INIT */
+ 	sp = qla2xxx_get_qpair_sp(vha, arg->qpair, fcport, GFP_KERNEL);
+ 	if (!sp)
+@@ -2158,7 +2187,7 @@ __qla2x00_async_tm_cmd(struct tmf_arg *arg)
+ 	tm_iocb->u.tmf.flags = arg->flags;
+ 	tm_iocb->u.tmf.lun = arg->lun;
+ 
+-	START_SP_W_RETRIES(sp, rval);
++	START_SP_W_RETRIES(sp, rval, chip_gen, login_gen);
+ 
+ 	ql_dbg(ql_dbg_taskm, vha, 0x802f,
+ 	    "Async-tmf hdl=%x loop-id=%x portid=%06x ctrl=%x lun=%lld qp=%d rval=%x.\n",
+@@ -2176,8 +2205,26 @@ __qla2x00_async_tm_cmd(struct tmf_arg *arg)
+ 		    "TM IOCB failed (%x).\n", rval);
+ 	}
+ 
+-	if (!test_bit(UNLOADING, &vha->dpc_flags) && !IS_QLAFX00(vha->hw))
+-		rval = qla26xx_marker(arg);
++	if (!test_bit(UNLOADING, &vha->dpc_flags) && !IS_QLAFX00(vha->hw)) {
++		jif = jiffies;
++		if (qla_tmf_wait(arg)) {
++			ql_log(ql_log_info, vha, 0x803e,
++			       "Waited %u ms Nexus=%ld:%06x:%llu.\n",
++			       jiffies_to_msecs(jiffies - jif), vha->host_no,
++			       fcport->d_id.b24, arg->lun);
++		}
++
++		if (chip_gen == vha->hw->chip_reset && login_gen == fcport->login_gen) {
++			rval = qla26xx_marker(arg);
++		} else {
++			ql_log(ql_log_info, vha, 0x803e,
++			       "Skip Marker due to disruption. Nexus=%ld:%06x:%llu.\n",
++			       vha->host_no, fcport->d_id.b24, arg->lun);
++			rval = QLA_FUNCTION_FAILED;
++		}
++	}
++	if (tm_iocb->u.tmf.data)
++		rval = tm_iocb->u.tmf.data;
+ 
+ done_free_sp:
+ 	/* ref: INIT */
+@@ -2186,30 +2233,42 @@ done:
+ 	return rval;
+ }
+ 
+-static void qla_put_tmf(fc_port_t *fcport)
++static void qla_put_tmf(struct tmf_arg *arg)
+ {
+-	struct scsi_qla_host *vha = fcport->vha;
++	struct scsi_qla_host *vha = arg->vha;
+ 	struct qla_hw_data *ha = vha->hw;
+ 	unsigned long flags;
+ 
+ 	spin_lock_irqsave(&ha->tgt.sess_lock, flags);
+-	fcport->active_tmf--;
++	ha->active_tmf--;
++	list_del(&arg->tmf_elem);
+ 	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
+ }
+ 
+ static
+-int qla_get_tmf(fc_port_t *fcport)
++int qla_get_tmf(struct tmf_arg *arg)
+ {
+-	struct scsi_qla_host *vha = fcport->vha;
++	struct scsi_qla_host *vha = arg->vha;
+ 	struct qla_hw_data *ha = vha->hw;
+ 	unsigned long flags;
++	fc_port_t *fcport = arg->fcport;
+ 	int rc = 0;
+-	LIST_HEAD(tmf_elem);
++	struct tmf_arg *t;
+ 
+ 	spin_lock_irqsave(&ha->tgt.sess_lock, flags);
+-	list_add_tail(&tmf_elem, &fcport->tmf_pending);
++	list_for_each_entry(t, &ha->tmf_active, tmf_elem) {
++		if (t->fcport == arg->fcport && t->lun == arg->lun) {
++			/* reject duplicate TMF */
++			ql_log(ql_log_warn, vha, 0x802c,
++			       "found duplicate TMF.  Nexus=%ld:%06x:%llu.\n",
++			       vha->host_no, fcport->d_id.b24, arg->lun);
++			spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
++			return -EINVAL;
++		}
++	}
+ 
+-	while (fcport->active_tmf >= MAX_ACTIVE_TMF) {
++	list_add_tail(&arg->tmf_elem, &ha->tmf_pending);
++	while (ha->active_tmf >= MAX_ACTIVE_TMF) {
+ 		spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
+ 
+ 		msleep(1);
+@@ -2221,15 +2280,17 @@ int qla_get_tmf(fc_port_t *fcport)
+ 			rc = EIO;
+ 			break;
+ 		}
+-		if (fcport->active_tmf < MAX_ACTIVE_TMF &&
+-		    list_is_first(&tmf_elem, &fcport->tmf_pending))
++		if (ha->active_tmf < MAX_ACTIVE_TMF &&
++		    list_is_first(&arg->tmf_elem, &ha->tmf_pending))
+ 			break;
+ 	}
+ 
+-	list_del(&tmf_elem);
++	list_del(&arg->tmf_elem);
+ 
+-	if (!rc)
+-		fcport->active_tmf++;
++	if (!rc) {
++		ha->active_tmf++;
++		list_add_tail(&arg->tmf_elem, &ha->tmf_active);
++	}
+ 
+ 	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
+ 
+@@ -2241,9 +2302,8 @@ qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint64_t lun,
+ 		     uint32_t tag)
+ {
+ 	struct scsi_qla_host *vha = fcport->vha;
+-	struct qla_qpair *qpair;
+ 	struct tmf_arg a;
+-	int i, rval = QLA_SUCCESS;
++	int rval = QLA_SUCCESS;
+ 
+ 	if (TMF_NOT_READY(fcport))
+ 		return QLA_SUSPENDED;
+@@ -2251,47 +2311,22 @@ qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint64_t lun,
+ 	a.vha = fcport->vha;
+ 	a.fcport = fcport;
+ 	a.lun = lun;
++	a.flags = flags;
++	INIT_LIST_HEAD(&a.tmf_elem);
++
+ 	if (flags & (TCF_LUN_RESET|TCF_ABORT_TASK_SET|TCF_CLEAR_TASK_SET|TCF_CLEAR_ACA)) {
+ 		a.modifier = MK_SYNC_ID_LUN;
+-
+-		if (qla_get_tmf(fcport))
+-			return QLA_FUNCTION_FAILED;
+ 	} else {
+ 		a.modifier = MK_SYNC_ID;
+ 	}
+ 
+-	if (vha->hw->mqenable) {
+-		for (i = 0; i < vha->hw->num_qpairs; i++) {
+-			qpair = vha->hw->queue_pair_map[i];
+-			if (!qpair)
+-				continue;
+-
+-			if (TMF_NOT_READY(fcport)) {
+-				ql_log(ql_log_warn, vha, 0x8026,
+-				    "Unable to send TM due to disruption.\n");
+-				rval = QLA_SUSPENDED;
+-				break;
+-			}
+-
+-			a.qpair = qpair;
+-			a.flags = flags|TCF_NOTMCMD_TO_TARGET;
+-			rval = __qla2x00_async_tm_cmd(&a);
+-			if (rval)
+-				break;
+-		}
+-	}
+-
+-	if (rval)
+-		goto bailout;
++	if (qla_get_tmf(&a))
++		return QLA_FUNCTION_FAILED;
+ 
+ 	a.qpair = vha->hw->base_qpair;
+-	a.flags = flags;
+ 	rval = __qla2x00_async_tm_cmd(&a);
+ 
+-bailout:
+-	if (a.modifier == MK_SYNC_ID_LUN)
+-		qla_put_tmf(fcport);
+-
++	qla_put_tmf(&a);
+ 	return rval;
+ }
+ 
+@@ -4147,39 +4182,61 @@ out:
+ 	return ha->flags.lr_detected;
+ }
+ 
+-void qla_init_iocb_limit(scsi_qla_host_t *vha)
++static void __qla_adjust_iocb_limit(struct qla_qpair *qpair)
+ {
+-	u16 i, num_qps;
+-	u32 limit;
+-	struct qla_hw_data *ha = vha->hw;
++	u8 num_qps;
++	u16 limit;
++	struct qla_hw_data *ha = qpair->vha->hw;
+ 
+ 	num_qps = ha->num_qpairs + 1;
+ 	limit = (ha->orig_fw_iocb_count * QLA_IOCB_PCT_LIMIT) / 100;
+ 
+-	ha->base_qpair->fwres.iocbs_total = ha->orig_fw_iocb_count;
+-	ha->base_qpair->fwres.iocbs_limit = limit;
+-	ha->base_qpair->fwres.iocbs_qp_limit = limit / num_qps;
+-	ha->base_qpair->fwres.iocbs_used = 0;
++	qpair->fwres.iocbs_total = ha->orig_fw_iocb_count;
++	qpair->fwres.iocbs_limit = limit;
++	qpair->fwres.iocbs_qp_limit = limit / num_qps;
++
++	qpair->fwres.exch_total = ha->orig_fw_xcb_count;
++	qpair->fwres.exch_limit = (ha->orig_fw_xcb_count *
++				   QLA_IOCB_PCT_LIMIT) / 100;
++}
++
++void qla_init_iocb_limit(scsi_qla_host_t *vha)
++{
++	u8 i;
++	struct qla_hw_data *ha = vha->hw;
+ 
+-	ha->base_qpair->fwres.exch_total = ha->orig_fw_xcb_count;
+-	ha->base_qpair->fwres.exch_limit = (ha->orig_fw_xcb_count *
+-					    QLA_IOCB_PCT_LIMIT) / 100;
++	__qla_adjust_iocb_limit(ha->base_qpair);
++	ha->base_qpair->fwres.iocbs_used = 0;
+ 	ha->base_qpair->fwres.exch_used  = 0;
+ 
+ 	for (i = 0; i < ha->max_qpairs; i++) {
+ 		if (ha->queue_pair_map[i])  {
+-			ha->queue_pair_map[i]->fwres.iocbs_total =
+-				ha->orig_fw_iocb_count;
+-			ha->queue_pair_map[i]->fwres.iocbs_limit = limit;
+-			ha->queue_pair_map[i]->fwres.iocbs_qp_limit =
+-				limit / num_qps;
++			__qla_adjust_iocb_limit(ha->queue_pair_map[i]);
+ 			ha->queue_pair_map[i]->fwres.iocbs_used = 0;
+-			ha->queue_pair_map[i]->fwres.exch_total = ha->orig_fw_xcb_count;
+-			ha->queue_pair_map[i]->fwres.exch_limit =
+-				(ha->orig_fw_xcb_count * QLA_IOCB_PCT_LIMIT) / 100;
+ 			ha->queue_pair_map[i]->fwres.exch_used = 0;
+ 		}
+ 	}
++
++	ha->fwres.iocb_total = ha->orig_fw_iocb_count;
++	ha->fwres.iocb_limit = (ha->orig_fw_iocb_count * QLA_IOCB_PCT_LIMIT) / 100;
++	ha->fwres.exch_total = ha->orig_fw_xcb_count;
++	ha->fwres.exch_limit = (ha->orig_fw_xcb_count * QLA_IOCB_PCT_LIMIT) / 100;
++
++	atomic_set(&ha->fwres.iocb_used, 0);
++	atomic_set(&ha->fwres.exch_used, 0);
++}
++
++void qla_adjust_iocb_limit(scsi_qla_host_t *vha)
++{
++	u8 i;
++	struct qla_hw_data *ha = vha->hw;
++
++	__qla_adjust_iocb_limit(ha->base_qpair);
++
++	for (i = 0; i < ha->max_qpairs; i++) {
++		if (ha->queue_pair_map[i])
++			__qla_adjust_iocb_limit(ha->queue_pair_map[i]);
++	}
+ }
+ 
+ /**
+@@ -4777,15 +4834,16 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
+ 	if (ha->flags.edif_enabled)
+ 		mid_init_cb->init_cb.frame_payload_size = cpu_to_le16(ELS_MAX_PAYLOAD);
+ 
++	QLA_FW_STARTED(ha);
+ 	rval = qla2x00_init_firmware(vha, ha->init_cb_size);
+ next_check:
+ 	if (rval) {
++		QLA_FW_STOPPED(ha);
+ 		ql_log(ql_log_fatal, vha, 0x00d2,
+ 		    "Init Firmware **** FAILED ****.\n");
+ 	} else {
+ 		ql_dbg(ql_dbg_init, vha, 0x00d3,
+ 		    "Init Firmware -- success.\n");
+-		QLA_FW_STARTED(ha);
+ 		vha->u_ql2xexchoffld = vha->u_ql2xiniexchg = 0;
+ 	}
+ 
+@@ -5506,7 +5564,6 @@ qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags)
+ 	INIT_WORK(&fcport->reg_work, qla_register_fcport_fn);
+ 	INIT_LIST_HEAD(&fcport->gnl_entry);
+ 	INIT_LIST_HEAD(&fcport->list);
+-	INIT_LIST_HEAD(&fcport->tmf_pending);
+ 
+ 	INIT_LIST_HEAD(&fcport->sess_cmd_list);
+ 	spin_lock_init(&fcport->sess_cmd_lock);
+@@ -6090,6 +6147,8 @@ qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport)
+ void
+ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
+ {
++	unsigned long flags;
++
+ 	if (IS_SW_RESV_ADDR(fcport->d_id))
+ 		return;
+ 
+@@ -6099,7 +6158,11 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
+ 	qla2x00_set_fcport_disc_state(fcport, DSC_UPD_FCPORT);
+ 	fcport->login_retry = vha->hw->login_retry_count;
+ 	fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
++
++	spin_lock_irqsave(&vha->work_lock, flags);
+ 	fcport->deleted = 0;
++	spin_unlock_irqrestore(&vha->work_lock, flags);
++
+ 	if (vha->hw->current_topology == ISP_CFG_NL)
+ 		fcport->logout_on_delete = 0;
+ 	else
+@@ -7337,14 +7400,15 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
+ 	}
+ 
+ 	/* purge MBox commands */
+-	if (atomic_read(&ha->num_pend_mbx_stage3)) {
++	spin_lock_irqsave(&ha->hardware_lock, flags);
++	if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags)) {
+ 		clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
+ 		complete(&ha->mbx_intr_comp);
+ 	}
++	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ 
+ 	i = 0;
+-	while (atomic_read(&ha->num_pend_mbx_stage3) ||
+-	    atomic_read(&ha->num_pend_mbx_stage2) ||
++	while (atomic_read(&ha->num_pend_mbx_stage2) ||
+ 	    atomic_read(&ha->num_pend_mbx_stage1)) {
+ 		msleep(20);
+ 		i++;
+diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
+index 0167e85ba0587..0556969f6dc1b 100644
+--- a/drivers/scsi/qla2xxx/qla_inline.h
++++ b/drivers/scsi/qla2xxx/qla_inline.h
+@@ -386,6 +386,7 @@ enum {
+ 	RESOURCE_IOCB = BIT_0,
+ 	RESOURCE_EXCH = BIT_1,  /* exchange */
+ 	RESOURCE_FORCE = BIT_2,
++	RESOURCE_HA = BIT_3,
+ };
+ 
+ static inline int
+@@ -393,7 +394,7 @@ qla_get_fw_resources(struct qla_qpair *qp, struct iocb_resource *iores)
+ {
+ 	u16 iocbs_used, i;
+ 	u16 exch_used;
+-	struct qla_hw_data *ha = qp->vha->hw;
++	struct qla_hw_data *ha = qp->hw;
+ 
+ 	if (!ql2xenforce_iocb_limit) {
+ 		iores->res_type = RESOURCE_NONE;
+@@ -428,15 +429,69 @@ qla_get_fw_resources(struct qla_qpair *qp, struct iocb_resource *iores)
+ 			return -ENOSPC;
+ 		}
+ 	}
++
++	if (ql2xenforce_iocb_limit == 2) {
++		if ((iores->iocb_cnt + atomic_read(&ha->fwres.iocb_used)) >=
++		    ha->fwres.iocb_limit) {
++			iores->res_type = RESOURCE_NONE;
++			return -ENOSPC;
++		}
++
++		if (iores->res_type & RESOURCE_EXCH) {
++			if ((iores->exch_cnt + atomic_read(&ha->fwres.exch_used)) >=
++			    ha->fwres.exch_limit) {
++				iores->res_type = RESOURCE_NONE;
++				return -ENOSPC;
++			}
++		}
++	}
++
+ force:
+ 	qp->fwres.iocbs_used += iores->iocb_cnt;
+ 	qp->fwres.exch_used += iores->exch_cnt;
++	if (ql2xenforce_iocb_limit == 2) {
++		atomic_add(iores->iocb_cnt, &ha->fwres.iocb_used);
++		atomic_add(iores->exch_cnt, &ha->fwres.exch_used);
++		iores->res_type |= RESOURCE_HA;
++	}
+ 	return 0;
+ }
+ 
++/*
++ * decrement to zero.  This routine will not decrement below zero
++ * @v:  pointer of type atomic_t
++ * @amount: amount to decrement from v
++ */
++static void qla_atomic_dtz(atomic_t *v, int amount)
++{
++	int c, old, dec;
++
++	c = atomic_read(v);
++	for (;;) {
++		dec = c - amount;
++		if (unlikely(dec < 0))
++			dec = 0;
++
++		old = atomic_cmpxchg((v), c, dec);
++		if (likely(old == c))
++			break;
++		c = old;
++	}
++}
++
+ static inline void
+ qla_put_fw_resources(struct qla_qpair *qp, struct iocb_resource *iores)
+ {
++	struct qla_hw_data *ha = qp->hw;
++
++	if (iores->res_type & RESOURCE_HA) {
++		if (iores->res_type & RESOURCE_IOCB)
++			qla_atomic_dtz(&ha->fwres.iocb_used, iores->iocb_cnt);
++
++		if (iores->res_type & RESOURCE_EXCH)
++			qla_atomic_dtz(&ha->fwres.exch_used, iores->exch_cnt);
++	}
++
+ 	if (iores->res_type & RESOURCE_IOCB) {
+ 		if (qp->fwres.iocbs_used >= iores->iocb_cnt) {
+ 			qp->fwres.iocbs_used -= iores->iocb_cnt;
+diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
+index 730d8609276c6..1ee9b7d5fc15e 100644
+--- a/drivers/scsi/qla2xxx/qla_iocb.c
++++ b/drivers/scsi/qla2xxx/qla_iocb.c
+@@ -3882,6 +3882,7 @@ qla_marker_iocb(srb_t *sp, struct mrk_entry_24xx *mrk)
+ {
+ 	mrk->entry_type = MARKER_TYPE;
+ 	mrk->modifier = sp->u.iocb_cmd.u.tmf.modifier;
++	mrk->handle = make_handle(sp->qpair->req->id, sp->handle);
+ 	if (sp->u.iocb_cmd.u.tmf.modifier != MK_SYNC_ALL) {
+ 		mrk->nport_handle = cpu_to_le16(sp->u.iocb_cmd.u.tmf.loop_id);
+ 		int_to_scsilun(sp->u.iocb_cmd.u.tmf.lun, (struct scsi_lun *)&mrk->lun);
+diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
+index 656700f793259..1f42a413b5988 100644
+--- a/drivers/scsi/qla2xxx/qla_isr.c
++++ b/drivers/scsi/qla2xxx/qla_isr.c
+@@ -1121,8 +1121,12 @@ qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
+ 	unsigned long	flags;
+ 	fc_port_t	*fcport = NULL;
+ 
+-	if (!vha->hw->flags.fw_started)
++	if (!vha->hw->flags.fw_started) {
++		ql_log(ql_log_warn, vha, 0x50ff,
++		    "Dropping AEN - %04x %04x %04x %04x.\n",
++		    mb[0], mb[1], mb[2], mb[3]);
+ 		return;
++	}
+ 
+ 	/* Setup to process RIO completion. */
+ 	handle_cnt = 0;
+@@ -2539,7 +2543,6 @@ qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, void *tsk)
+ 	case CS_PORT_BUSY:
+ 	case CS_INCOMPLETE:
+ 	case CS_PORT_UNAVAILABLE:
+-	case CS_TIMEOUT:
+ 	case CS_RESET:
+ 		if (atomic_read(&fcport->state) == FCS_ONLINE) {
+ 			ql_dbg(ql_dbg_disc, fcport->vha, 0x3021,
+diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
+index 254fd4c642628..21ec32b4fb280 100644
+--- a/drivers/scsi/qla2xxx/qla_mbx.c
++++ b/drivers/scsi/qla2xxx/qla_mbx.c
+@@ -273,7 +273,6 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
+ 		spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ 
+ 		wait_time = jiffies;
+-		atomic_inc(&ha->num_pend_mbx_stage3);
+ 		if (!wait_for_completion_timeout(&ha->mbx_intr_comp,
+ 		    mcp->tov * HZ)) {
+ 			ql_dbg(ql_dbg_mbx, vha, 0x117a,
+@@ -290,7 +289,6 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
+ 				spin_unlock_irqrestore(&ha->hardware_lock,
+ 				    flags);
+ 				atomic_dec(&ha->num_pend_mbx_stage2);
+-				atomic_dec(&ha->num_pend_mbx_stage3);
+ 				rval = QLA_ABORTED;
+ 				goto premature_exit;
+ 			}
+@@ -302,11 +300,9 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
+ 			ha->flags.mbox_busy = 0;
+ 			spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ 			atomic_dec(&ha->num_pend_mbx_stage2);
+-			atomic_dec(&ha->num_pend_mbx_stage3);
+ 			rval = QLA_ABORTED;
+ 			goto premature_exit;
+ 		}
+-		atomic_dec(&ha->num_pend_mbx_stage3);
+ 
+ 		if (time_after(jiffies, wait_time + 5 * HZ))
+ 			ql_log(ql_log_warn, vha, 0x1015, "cmd=0x%x, waited %d msecs\n",
+@@ -2213,6 +2209,9 @@ qla2x00_get_firmware_state(scsi_qla_host_t *vha, uint16_t *states)
+ 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1054,
+ 	    "Entered %s.\n", __func__);
+ 
++	if (!ha->flags.fw_started)
++		return QLA_FUNCTION_FAILED;
++
+ 	mcp->mb[0] = MBC_GET_FIRMWARE_STATE;
+ 	mcp->out_mb = MBX_0;
+ 	if (IS_FWI2_CAPABLE(vha->hw))
+diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c
+index 86e85f2f4782f..9941b38eac93c 100644
+--- a/drivers/scsi/qla2xxx/qla_nvme.c
++++ b/drivers/scsi/qla2xxx/qla_nvme.c
+@@ -132,6 +132,7 @@ static int qla_nvme_alloc_queue(struct nvme_fc_local_port *lport,
+ 			       "Failed to allocate qpair\n");
+ 			return -EINVAL;
+ 		}
++		qla_adjust_iocb_limit(vha);
+ 	}
+ 	*handle = qpair;
+ 
+@@ -667,7 +668,7 @@ static int qla_nvme_post_cmd(struct nvme_fc_local_port *lport,
+ 
+ 	rval = qla2x00_start_nvme_mq(sp);
+ 	if (rval != QLA_SUCCESS) {
+-		ql_log(ql_log_warn, vha, 0x212d,
++		ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x212d,
+ 		    "qla2x00_start_nvme_mq failed = %d\n", rval);
+ 		sp->priv = NULL;
+ 		priv->sp = NULL;
+diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
+index 877e4f446709d..67176be79dffd 100644
+--- a/drivers/scsi/qla2xxx/qla_os.c
++++ b/drivers/scsi/qla2xxx/qla_os.c
+@@ -44,10 +44,11 @@ module_param(ql2xfulldump_on_mpifail, int, S_IRUGO | S_IWUSR);
+ MODULE_PARM_DESC(ql2xfulldump_on_mpifail,
+ 		 "Set this to take full dump on MPI hang.");
+ 
+-int ql2xenforce_iocb_limit = 1;
++int ql2xenforce_iocb_limit = 2;
+ module_param(ql2xenforce_iocb_limit, int, S_IRUGO | S_IWUSR);
+ MODULE_PARM_DESC(ql2xenforce_iocb_limit,
+-		 "Enforce IOCB throttling, to avoid FW congestion. (default: 1)");
++		 "Enforce IOCB throttling, to avoid FW congestion. (default: 2) "
++		 "1: track usage per queue, 2: track usage per adapter");
+ 
+ /*
+  * CT6 CTX allocation cache
+@@ -1488,8 +1489,9 @@ qla2xxx_eh_device_reset(struct scsi_cmnd *cmd)
+ 		goto eh_reset_failed;
+ 	}
+ 	err = 3;
+-	if (qla2x00_eh_wait_for_pending_commands(vha, sdev->id,
+-	    sdev->lun, WAIT_LUN) != QLA_SUCCESS) {
++	if (qla2x00_eh_wait_for_pending_commands(vha, fcport->d_id.b24,
++						 cmd->device->lun,
++						 WAIT_LUN) != QLA_SUCCESS) {
+ 		ql_log(ql_log_warn, vha, 0x800d,
+ 		    "wait for pending cmds failed for cmd=%p.\n", cmd);
+ 		goto eh_reset_failed;
+@@ -1555,8 +1557,8 @@ qla2xxx_eh_target_reset(struct scsi_cmnd *cmd)
+ 		goto eh_reset_failed;
+ 	}
+ 	err = 3;
+-	if (qla2x00_eh_wait_for_pending_commands(vha, sdev->id,
+-	    0, WAIT_TARGET) != QLA_SUCCESS) {
++	if (qla2x00_eh_wait_for_pending_commands(vha, fcport->d_id.b24, 0,
++						 WAIT_TARGET) != QLA_SUCCESS) {
+ 		ql_log(ql_log_warn, vha, 0x800d,
+ 		    "wait for pending cmds failed for cmd=%p.\n", cmd);
+ 		goto eh_reset_failed;
+@@ -3006,9 +3008,10 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
+ 	ha->max_exchg = FW_MAX_EXCHANGES_CNT;
+ 	atomic_set(&ha->num_pend_mbx_stage1, 0);
+ 	atomic_set(&ha->num_pend_mbx_stage2, 0);
+-	atomic_set(&ha->num_pend_mbx_stage3, 0);
+ 	atomic_set(&ha->zio_threshold, DEFAULT_ZIO_THRESHOLD);
+ 	ha->last_zio_threshold = DEFAULT_ZIO_THRESHOLD;
++	INIT_LIST_HEAD(&ha->tmf_pending);
++	INIT_LIST_HEAD(&ha->tmf_active);
+ 
+ 	/* Assign ISP specific operations. */
+ 	if (IS_QLA2100(ha)) {
+@@ -3285,6 +3288,13 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
+ 	host->max_id = ha->max_fibre_devices;
+ 	host->cmd_per_lun = 3;
+ 	host->unique_id = host->host_no;
++
++	if (ql2xenabledif && ql2xenabledif != 2) {
++		ql_log(ql_log_warn, base_vha, 0x302d,
++		       "Invalid value for ql2xenabledif, resetting it to default (2)\n");
++		ql2xenabledif = 2;
++	}
++
+ 	if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif)
+ 		host->max_cmd_len = 32;
+ 	else
+@@ -3521,8 +3531,6 @@ skip_dpc:
+ 			base_vha->flags.difdix_supported = 1;
+ 			ql_dbg(ql_dbg_init, base_vha, 0x00f1,
+ 			    "Registering for DIF/DIX type 1 and 3 protection.\n");
+-			if (ql2xenabledif == 1)
+-				prot = SHOST_DIX_TYPE0_PROTECTION;
+ 			if (ql2xprotmask)
+ 				scsi_host_set_prot(host, ql2xprotmask);
+ 			else
+diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
+index 5258b07687a94..2b815a9928ea3 100644
+--- a/drivers/scsi/qla2xxx/qla_target.c
++++ b/drivers/scsi/qla2xxx/qla_target.c
+@@ -1068,10 +1068,6 @@ void qlt_free_session_done(struct work_struct *work)
+ 			(struct imm_ntfy_from_isp *)sess->iocb, SRB_NACK_LOGO);
+ 	}
+ 
+-	spin_lock_irqsave(&vha->work_lock, flags);
+-	sess->flags &= ~FCF_ASYNC_SENT;
+-	spin_unlock_irqrestore(&vha->work_lock, flags);
+-
+ 	spin_lock_irqsave(&ha->tgt.sess_lock, flags);
+ 	if (sess->se_sess) {
+ 		sess->se_sess = NULL;
+@@ -1081,7 +1077,6 @@ void qlt_free_session_done(struct work_struct *work)
+ 
+ 	qla2x00_set_fcport_disc_state(sess, DSC_DELETED);
+ 	sess->fw_login_state = DSC_LS_PORT_UNAVAIL;
+-	sess->deleted = QLA_SESS_DELETED;
+ 
+ 	if (sess->login_succ && !IS_SW_RESV_ADDR(sess->d_id)) {
+ 		vha->fcport_count--;
+@@ -1133,10 +1128,15 @@ void qlt_free_session_done(struct work_struct *work)
+ 
+ 	sess->explicit_logout = 0;
+ 	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
+-	sess->free_pending = 0;
+ 
+ 	qla2x00_dfs_remove_rport(vha, sess);
+ 
++	spin_lock_irqsave(&vha->work_lock, flags);
++	sess->flags &= ~FCF_ASYNC_SENT;
++	sess->deleted = QLA_SESS_DELETED;
++	sess->free_pending = 0;
++	spin_unlock_irqrestore(&vha->work_lock, flags);
++
+ 	ql_dbg(ql_dbg_disc, vha, 0xf001,
+ 	    "Unregistration of sess %p %8phC finished fcp_cnt %d\n",
+ 		sess, sess->port_name, vha->fcport_count);
+@@ -1185,12 +1185,12 @@ void qlt_unreg_sess(struct fc_port *sess)
+ 	 * management from being sent.
+ 	 */
+ 	sess->flags |= FCF_ASYNC_SENT;
++	sess->deleted = QLA_SESS_DELETION_IN_PROGRESS;
+ 	spin_unlock_irqrestore(&sess->vha->work_lock, flags);
+ 
+ 	if (sess->se_sess)
+ 		vha->hw->tgt.tgt_ops->clear_nacl_from_fcport_map(sess);
+ 
+-	sess->deleted = QLA_SESS_DELETION_IN_PROGRESS;
+ 	qla2x00_set_fcport_disc_state(sess, DSC_DELETE_PEND);
+ 	sess->last_rscn_gen = sess->rscn_gen;
+ 	sess->last_login_gen = sess->login_gen;
+diff --git a/drivers/soc/qcom/qmi_encdec.c b/drivers/soc/qcom/qmi_encdec.c
+index b7158e3c3a0bd..5c7161b18b724 100644
+--- a/drivers/soc/qcom/qmi_encdec.c
++++ b/drivers/soc/qcom/qmi_encdec.c
+@@ -534,8 +534,8 @@ static int qmi_decode_string_elem(const struct qmi_elem_info *ei_array,
+ 		decoded_bytes += rc;
+ 	}
+ 
+-	if (string_len > temp_ei->elem_len) {
+-		pr_err("%s: String len %d > Max Len %d\n",
++	if (string_len >= temp_ei->elem_len) {
++		pr_err("%s: String len %d >= Max Len %d\n",
+ 		       __func__, string_len, temp_ei->elem_len);
+ 		return -ETOOSMALL;
+ 	} else if (string_len > tlv_len) {
+diff --git a/drivers/ufs/core/ufs_bsg.c b/drivers/ufs/core/ufs_bsg.c
+index 0d38e7fa34cc8..97f4d86f8d90d 100644
+--- a/drivers/ufs/core/ufs_bsg.c
++++ b/drivers/ufs/core/ufs_bsg.c
+@@ -76,8 +76,7 @@ static int ufs_bsg_exec_advanced_rpmb_req(struct ufs_hba *hba, struct bsg_job *j
+ 	int ret;
+ 	int data_len;
+ 
+-	if (hba->ufs_version < ufshci_version(4, 0) || !hba->dev_info.b_advanced_rpmb_en ||
+-	    !(hba->capabilities & MASK_EHSLUTRD_SUPPORTED))
++	if (hba->ufs_version < ufshci_version(4, 0) || !hba->dev_info.b_advanced_rpmb_en)
+ 		return -EINVAL;
+ 
+ 	if (rpmb_request->ehs_req.length != 2 || rpmb_request->ehs_req.ehs_type != 1)
+diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
+index fa18806e80b61..9615a076735bd 100644
+--- a/drivers/ufs/core/ufshcd.c
++++ b/drivers/ufs/core/ufshcd.c
+@@ -7296,7 +7296,15 @@ int ufshcd_advanced_rpmb_req_handler(struct ufs_hba *hba, struct utp_upiu_req *r
+ 	/* Advanced RPMB starts from UFS 4.0, so its command type is UTP_CMD_TYPE_UFS_STORAGE */
+ 	lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
+ 
+-	ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, dir, 2);
++	/*
++	 * According to UFSHCI 4.0 specification page 24, if EHSLUTRDS is 0, host controller takes
++	 * EHS length from CMD UPIU, and SW driver use EHS Length field in CMD UPIU. if it is 1,
++	 * HW controller takes EHS length from UTRD.
++	 */
++	if (hba->capabilities & MASK_EHSLUTRD_SUPPORTED)
++		ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, dir, 2);
++	else
++		ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, dir, 0);
+ 
+ 	/* update the task tag and LUN in the request upiu */
+ 	req_upiu->header.dword_0 |= cpu_to_be32(upiu_flags << 16 | UFS_UPIU_RPMB_WLUN << 8 | tag);
+diff --git a/drivers/video/backlight/gpio_backlight.c b/drivers/video/backlight/gpio_backlight.c
+index 5c5c99f7979e3..30ec5b6845335 100644
+--- a/drivers/video/backlight/gpio_backlight.c
++++ b/drivers/video/backlight/gpio_backlight.c
+@@ -87,8 +87,7 @@ static int gpio_backlight_probe(struct platform_device *pdev)
+ 		/* Not booted with device tree or no phandle link to the node */
+ 		bl->props.power = def_value ? FB_BLANK_UNBLANK
+ 					    : FB_BLANK_POWERDOWN;
+-	else if (gpiod_get_direction(gbl->gpiod) == 0 &&
+-		 gpiod_get_value_cansleep(gbl->gpiod) == 0)
++	else if (gpiod_get_value_cansleep(gbl->gpiod) == 0)
+ 		bl->props.power = FB_BLANK_POWERDOWN;
+ 	else
+ 		bl->props.power = FB_BLANK_UNBLANK;
+diff --git a/drivers/video/backlight/lp855x_bl.c b/drivers/video/backlight/lp855x_bl.c
+index 1c9e921bca14a..349ec324bc1ea 100644
+--- a/drivers/video/backlight/lp855x_bl.c
++++ b/drivers/video/backlight/lp855x_bl.c
+@@ -71,6 +71,7 @@ struct lp855x {
+ 	struct device *dev;
+ 	struct lp855x_platform_data *pdata;
+ 	struct pwm_device *pwm;
++	bool needs_pwm_init;
+ 	struct regulator *supply;	/* regulator for VDD input */
+ 	struct regulator *enable;	/* regulator for EN/VDDIO input */
+ };
+@@ -220,7 +221,15 @@ static void lp855x_pwm_ctrl(struct lp855x *lp, int br, int max_br)
+ {
+ 	struct pwm_state state;
+ 
+-	pwm_get_state(lp->pwm, &state);
++	if (lp->needs_pwm_init) {
++		pwm_init_state(lp->pwm, &state);
++		/* Legacy platform data compatibility */
++		if (lp->pdata->period_ns > 0)
++			state.period = lp->pdata->period_ns;
++		lp->needs_pwm_init = false;
++	} else {
++		pwm_get_state(lp->pwm, &state);
++	}
+ 
+ 	state.duty_cycle = div_u64(br * state.period, max_br);
+ 	state.enabled = state.duty_cycle;
+@@ -387,7 +396,6 @@ static int lp855x_probe(struct i2c_client *cl)
+ 	const struct i2c_device_id *id = i2c_client_get_device_id(cl);
+ 	const struct acpi_device_id *acpi_id = NULL;
+ 	struct device *dev = &cl->dev;
+-	struct pwm_state pwmstate;
+ 	struct lp855x *lp;
+ 	int ret;
+ 
+@@ -470,15 +478,11 @@ static int lp855x_probe(struct i2c_client *cl)
+ 		else
+ 			return dev_err_probe(dev, ret, "getting PWM\n");
+ 
++		lp->needs_pwm_init = false;
+ 		lp->mode = REGISTER_BASED;
+ 		dev_dbg(dev, "mode: register based\n");
+ 	} else {
+-		pwm_init_state(lp->pwm, &pwmstate);
+-		/* Legacy platform data compatibility */
+-		if (lp->pdata->period_ns > 0)
+-			pwmstate.period = lp->pdata->period_ns;
+-		pwm_apply_state(lp->pwm, &pwmstate);
+-
++		lp->needs_pwm_init = true;
+ 		lp->mode = PWM_BASED;
+ 		dev_dbg(dev, "mode: PWM based\n");
+ 	}
+diff --git a/drivers/video/fbdev/ep93xx-fb.c b/drivers/video/fbdev/ep93xx-fb.c
+index 22158d9ca8ddf..71f355d34a13a 100644
+--- a/drivers/video/fbdev/ep93xx-fb.c
++++ b/drivers/video/fbdev/ep93xx-fb.c
+@@ -474,7 +474,6 @@ static int ep93xxfb_probe(struct platform_device *pdev)
+ 	if (!info)
+ 		return -ENOMEM;
+ 
+-	info->dev = &pdev->dev;
+ 	platform_set_drvdata(pdev, info);
+ 	fbi = info->par;
+ 	fbi->mach_info = mach_info;
+diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
+index ee97d89dfc11a..2108283b438cd 100644
+--- a/drivers/watchdog/Kconfig
++++ b/drivers/watchdog/Kconfig
+@@ -1075,6 +1075,8 @@ config ADVANTECH_WDT
+ config ADVANTECH_EC_WDT
+ 	tristate "Advantech Embedded Controller Watchdog Timer"
+ 	depends on X86
++	select ISA_BUS_API
++	select WATCHDOG_CORE
+ 	help
+ 		This driver supports Advantech products with ITE based Embedded Controller.
+ 		It does not support Advantech products with other ECs or without EC.
+diff --git a/drivers/watchdog/intel-mid_wdt.c b/drivers/watchdog/intel-mid_wdt.c
+index 9b2173f765c8c..fb7fae750181b 100644
+--- a/drivers/watchdog/intel-mid_wdt.c
++++ b/drivers/watchdog/intel-mid_wdt.c
+@@ -203,3 +203,4 @@ module_platform_driver(mid_wdt_driver);
+ MODULE_AUTHOR("David Cohen <david.a.cohen@linux.intel.com>");
+ MODULE_DESCRIPTION("Watchdog Driver for Intel MID platform");
+ MODULE_LICENSE("GPL");
++MODULE_ALIAS("platform:intel_mid_wdt");
+diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
+index a9a2c5446c180..4494883a19abc 100644
+--- a/fs/btrfs/disk-io.c
++++ b/fs/btrfs/disk-io.c
+@@ -2384,11 +2384,10 @@ int btrfs_validate_super(struct btrfs_fs_info *fs_info,
+ 		ret = -EINVAL;
+ 	}
+ 
+-	if (memcmp(fs_info->fs_devices->fsid, fs_info->super_copy->fsid,
+-		   BTRFS_FSID_SIZE)) {
++	if (memcmp(fs_info->fs_devices->fsid, sb->fsid, BTRFS_FSID_SIZE) != 0) {
+ 		btrfs_err(fs_info,
+ 		"superblock fsid doesn't match fsid of fs_devices: %pU != %pU",
+-			fs_info->super_copy->fsid, fs_info->fs_devices->fsid);
++			  sb->fsid, fs_info->fs_devices->fsid);
+ 		ret = -EINVAL;
+ 	}
+ 
+diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
+index f396a9afa4032..e5566827da17e 100644
+--- a/fs/btrfs/extent-tree.c
++++ b/fs/btrfs/extent-tree.c
+@@ -3709,7 +3709,8 @@ static int do_allocation_zoned(struct btrfs_block_group *block_group,
+ 	       fs_info->data_reloc_bg == 0);
+ 
+ 	if (block_group->ro ||
+-	    test_bit(BLOCK_GROUP_FLAG_ZONED_DATA_RELOC, &block_group->runtime_flags)) {
++	    (!ffe_ctl->for_data_reloc &&
++	     test_bit(BLOCK_GROUP_FLAG_ZONED_DATA_RELOC, &block_group->runtime_flags))) {
+ 		ret = 1;
+ 		goto out;
+ 	}
+@@ -3752,8 +3753,26 @@ static int do_allocation_zoned(struct btrfs_block_group *block_group,
+ 	if (ffe_ctl->for_treelog && !fs_info->treelog_bg)
+ 		fs_info->treelog_bg = block_group->start;
+ 
+-	if (ffe_ctl->for_data_reloc && !fs_info->data_reloc_bg)
+-		fs_info->data_reloc_bg = block_group->start;
++	if (ffe_ctl->for_data_reloc) {
++		if (!fs_info->data_reloc_bg)
++			fs_info->data_reloc_bg = block_group->start;
++		/*
++		 * Do not allow allocations from this block group, unless it is
++		 * for data relocation. Compared to increasing the ->ro, setting
++		 * the ->zoned_data_reloc_ongoing flag still allows nocow
++		 * writers to come in. See btrfs_inc_nocow_writers().
++		 *
++		 * We need to disable an allocation to avoid an allocation of
++		 * regular (non-relocation data) extent. With mix of relocation
++		 * extents and regular extents, we can dispatch WRITE commands
++		 * (for relocation extents) and ZONE APPEND commands (for
++		 * regular extents) at the same time to the same zone, which
++		 * easily break the write pointer.
++		 *
++		 * Also, this flag avoids this block group to be zone finished.
++		 */
++		set_bit(BLOCK_GROUP_FLAG_ZONED_DATA_RELOC, &block_group->runtime_flags);
++	}
+ 
+ 	ffe_ctl->found_offset = start + block_group->alloc_offset;
+ 	block_group->alloc_offset += num_bytes;
+@@ -3771,24 +3790,8 @@ static int do_allocation_zoned(struct btrfs_block_group *block_group,
+ out:
+ 	if (ret && ffe_ctl->for_treelog)
+ 		fs_info->treelog_bg = 0;
+-	if (ret && ffe_ctl->for_data_reloc &&
+-	    fs_info->data_reloc_bg == block_group->start) {
+-		/*
+-		 * Do not allow further allocations from this block group.
+-		 * Compared to increasing the ->ro, setting the
+-		 * ->zoned_data_reloc_ongoing flag still allows nocow
+-		 *  writers to come in. See btrfs_inc_nocow_writers().
+-		 *
+-		 * We need to disable an allocation to avoid an allocation of
+-		 * regular (non-relocation data) extent. With mix of relocation
+-		 * extents and regular extents, we can dispatch WRITE commands
+-		 * (for relocation extents) and ZONE APPEND commands (for
+-		 * regular extents) at the same time to the same zone, which
+-		 * easily break the write pointer.
+-		 */
+-		set_bit(BLOCK_GROUP_FLAG_ZONED_DATA_RELOC, &block_group->runtime_flags);
++	if (ret && ffe_ctl->for_data_reloc)
+ 		fs_info->data_reloc_bg = 0;
+-	}
+ 	spin_unlock(&fs_info->relocation_bg_lock);
+ 	spin_unlock(&fs_info->treelog_bg_lock);
+ 	spin_unlock(&block_group->lock);
+diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c
+index 696bf695d8eb0..1ce5dd1544995 100644
+--- a/fs/btrfs/file-item.c
++++ b/fs/btrfs/file-item.c
+@@ -597,29 +597,37 @@ fail:
+  * Each bit represents a sector. Thus caller should ensure @csum_buf passed
+  * in is large enough to contain all csums.
+  */
+-int btrfs_lookup_csums_bitmap(struct btrfs_root *root, u64 start, u64 end,
+-			      u8 *csum_buf, unsigned long *csum_bitmap,
+-			      bool search_commit)
++int btrfs_lookup_csums_bitmap(struct btrfs_root *root, struct btrfs_path *path,
++			      u64 start, u64 end, u8 *csum_buf,
++			      unsigned long *csum_bitmap)
+ {
+ 	struct btrfs_fs_info *fs_info = root->fs_info;
+ 	struct btrfs_key key;
+-	struct btrfs_path *path;
+ 	struct extent_buffer *leaf;
+ 	struct btrfs_csum_item *item;
+ 	const u64 orig_start = start;
++	bool free_path = false;
+ 	int ret;
+ 
+ 	ASSERT(IS_ALIGNED(start, fs_info->sectorsize) &&
+ 	       IS_ALIGNED(end + 1, fs_info->sectorsize));
+ 
+-	path = btrfs_alloc_path();
+-	if (!path)
+-		return -ENOMEM;
++	if (!path) {
++		path = btrfs_alloc_path();
++		if (!path)
++			return -ENOMEM;
++		free_path = true;
++	}
+ 
+-	if (search_commit) {
+-		path->skip_locking = 1;
+-		path->reada = READA_FORWARD;
+-		path->search_commit_root = 1;
++	/* Check if we can reuse the previous path. */
++	if (path->nodes[0]) {
++		btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
++
++		if (key.objectid == BTRFS_EXTENT_CSUM_OBJECTID &&
++		    key.type == BTRFS_EXTENT_CSUM_KEY &&
++		    key.offset <= start)
++			goto search_forward;
++		btrfs_release_path(path);
+ 	}
+ 
+ 	key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
+@@ -656,6 +664,7 @@ int btrfs_lookup_csums_bitmap(struct btrfs_root *root, u64 start, u64 end,
+ 		}
+ 	}
+ 
++search_forward:
+ 	while (start <= end) {
+ 		u64 csum_end;
+ 
+@@ -712,7 +721,8 @@ int btrfs_lookup_csums_bitmap(struct btrfs_root *root, u64 start, u64 end,
+ 	}
+ 	ret = 0;
+ fail:
+-	btrfs_free_path(path);
++	if (free_path)
++		btrfs_free_path(path);
+ 	return ret;
+ }
+ 
+diff --git a/fs/btrfs/file-item.h b/fs/btrfs/file-item.h
+index 4ec669b690080..04bd2d34efb14 100644
+--- a/fs/btrfs/file-item.h
++++ b/fs/btrfs/file-item.h
+@@ -57,9 +57,9 @@ int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end,
+ int btrfs_lookup_csums_list(struct btrfs_root *root, u64 start, u64 end,
+ 			    struct list_head *list, int search_commit,
+ 			    bool nowait);
+-int btrfs_lookup_csums_bitmap(struct btrfs_root *root, u64 start, u64 end,
+-			      u8 *csum_buf, unsigned long *csum_bitmap,
+-			      bool search_commit);
++int btrfs_lookup_csums_bitmap(struct btrfs_root *root, struct btrfs_path *path,
++			      u64 start, u64 end, u8 *csum_buf,
++			      unsigned long *csum_bitmap);
+ void btrfs_extent_item_to_extent_map(struct btrfs_inode *inode,
+ 				     const struct btrfs_path *path,
+ 				     struct btrfs_file_extent_item *fi,
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index aa090b0b5d298..db2b33a822fcd 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -3359,6 +3359,13 @@ out:
+ 			btrfs_free_reserved_extent(fs_info,
+ 					ordered_extent->disk_bytenr,
+ 					ordered_extent->disk_num_bytes, 1);
++			/*
++			 * Actually free the qgroup rsv which was released when
++			 * the ordered extent was created.
++			 */
++			btrfs_qgroup_free_refroot(fs_info, inode->root->root_key.objectid,
++						  ordered_extent->qgroup_rsv,
++						  BTRFS_QGROUP_RSV_DATA);
+ 		}
+ 	}
+ 
+diff --git a/fs/btrfs/raid56.c b/fs/btrfs/raid56.c
+index 0249ea52bb803..3e388e0431e34 100644
+--- a/fs/btrfs/raid56.c
++++ b/fs/btrfs/raid56.c
+@@ -2112,8 +2112,8 @@ static void fill_data_csums(struct btrfs_raid_bio *rbio)
+ 		goto error;
+ 	}
+ 
+-	ret = btrfs_lookup_csums_bitmap(csum_root, start, start + len - 1,
+-					rbio->csum_buf, rbio->csum_bitmap, false);
++	ret = btrfs_lookup_csums_bitmap(csum_root, NULL, start, start + len - 1,
++					rbio->csum_buf, rbio->csum_bitmap);
+ 	if (ret < 0)
+ 		goto error;
+ 	if (bitmap_empty(rbio->csum_bitmap, len >> fs_info->sectorsize_bits))
+diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
+index 46c3c1d572668..5f4ff7d5b5c19 100644
+--- a/fs/btrfs/relocation.c
++++ b/fs/btrfs/relocation.c
+@@ -3006,9 +3006,6 @@ static int relocate_one_page(struct inode *inode, struct file_ra_state *ra,
+ 		if (!page)
+ 			return -ENOMEM;
+ 	}
+-	ret = set_page_extent_mapped(page);
+-	if (ret < 0)
+-		goto release_page;
+ 
+ 	if (PageReadahead(page))
+ 		page_cache_async_readahead(inode->i_mapping, ra, NULL,
+@@ -3024,6 +3021,15 @@ static int relocate_one_page(struct inode *inode, struct file_ra_state *ra,
+ 		}
+ 	}
+ 
++	/*
++	 * We could have lost page private when we dropped the lock to read the
++	 * page above, make sure we set_page_extent_mapped here so we have any
++	 * of the subpage blocksize stuff we need in place.
++	 */
++	ret = set_page_extent_mapped(page);
++	if (ret < 0)
++		goto release_page;
++
+ 	page_start = page_offset(page);
+ 	page_end = page_start + PAGE_SIZE - 1;
+ 
+diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
+index 7289f5bff397d..cfbd6b1c4b7f1 100644
+--- a/fs/btrfs/scrub.c
++++ b/fs/btrfs/scrub.c
+@@ -43,9 +43,20 @@ struct scrub_ctx;
+ /*
+  * The following value only influences the performance.
+  *
+- * This determines the batch size for stripe submitted in one go.
++ * This detemines how many stripes would be submitted in one go,
++ * which is 512KiB (BTRFS_STRIPE_LEN * SCRUB_STRIPES_PER_GROUP).
+  */
+-#define SCRUB_STRIPES_PER_SCTX	8	/* That would be 8 64K stripe per-device. */
++#define SCRUB_STRIPES_PER_GROUP		8
++
++/*
++ * How many groups we have for each sctx.
++ *
++ * This would be 8M per device, the same value as the old scrub in-flight bios
++ * size limit.
++ */
++#define SCRUB_GROUPS_PER_SCTX		16
++
++#define SCRUB_TOTAL_STRIPES		(SCRUB_GROUPS_PER_SCTX * SCRUB_STRIPES_PER_GROUP)
+ 
+ /*
+  * The following value times PAGE_SIZE needs to be large enough to match the
+@@ -172,9 +183,11 @@ struct scrub_stripe {
+ };
+ 
+ struct scrub_ctx {
+-	struct scrub_stripe	stripes[SCRUB_STRIPES_PER_SCTX];
++	struct scrub_stripe	stripes[SCRUB_TOTAL_STRIPES];
+ 	struct scrub_stripe	*raid56_data_stripes;
+ 	struct btrfs_fs_info	*fs_info;
++	struct btrfs_path	extent_path;
++	struct btrfs_path	csum_path;
+ 	int			first_free;
+ 	int			cur_stripe;
+ 	atomic_t		cancel_req;
+@@ -315,10 +328,10 @@ static noinline_for_stack void scrub_free_ctx(struct scrub_ctx *sctx)
+ 	if (!sctx)
+ 		return;
+ 
+-	for (i = 0; i < SCRUB_STRIPES_PER_SCTX; i++)
++	for (i = 0; i < SCRUB_TOTAL_STRIPES; i++)
+ 		release_scrub_stripe(&sctx->stripes[i]);
+ 
+-	kfree(sctx);
++	kvfree(sctx);
+ }
+ 
+ static void scrub_put_ctx(struct scrub_ctx *sctx)
+@@ -333,13 +346,20 @@ static noinline_for_stack struct scrub_ctx *scrub_setup_ctx(
+ 	struct scrub_ctx *sctx;
+ 	int		i;
+ 
+-	sctx = kzalloc(sizeof(*sctx), GFP_KERNEL);
++	/* Since sctx has inline 128 stripes, it can go beyond 64K easily.  Use
++	 * kvzalloc().
++	 */
++	sctx = kvzalloc(sizeof(*sctx), GFP_KERNEL);
+ 	if (!sctx)
+ 		goto nomem;
+ 	refcount_set(&sctx->refs, 1);
+ 	sctx->is_dev_replace = is_dev_replace;
+ 	sctx->fs_info = fs_info;
+-	for (i = 0; i < SCRUB_STRIPES_PER_SCTX; i++) {
++	sctx->extent_path.search_commit_root = 1;
++	sctx->extent_path.skip_locking = 1;
++	sctx->csum_path.search_commit_root = 1;
++	sctx->csum_path.skip_locking = 1;
++	for (i = 0; i < SCRUB_TOTAL_STRIPES; i++) {
+ 		int ret;
+ 
+ 		ret = init_scrub_stripe(fs_info, &sctx->stripes[i]);
+@@ -1468,6 +1488,8 @@ static void scrub_stripe_reset_bitmaps(struct scrub_stripe *stripe)
+  * Return <0 for error.
+  */
+ static int scrub_find_fill_first_stripe(struct btrfs_block_group *bg,
++					struct btrfs_path *extent_path,
++					struct btrfs_path *csum_path,
+ 					struct btrfs_device *dev, u64 physical,
+ 					int mirror_num, u64 logical_start,
+ 					u32 logical_len,
+@@ -1477,7 +1499,6 @@ static int scrub_find_fill_first_stripe(struct btrfs_block_group *bg,
+ 	struct btrfs_root *extent_root = btrfs_extent_root(fs_info, bg->start);
+ 	struct btrfs_root *csum_root = btrfs_csum_root(fs_info, bg->start);
+ 	const u64 logical_end = logical_start + logical_len;
+-	struct btrfs_path path = { 0 };
+ 	u64 cur_logical = logical_start;
+ 	u64 stripe_end;
+ 	u64 extent_start;
+@@ -1493,14 +1514,13 @@ static int scrub_find_fill_first_stripe(struct btrfs_block_group *bg,
+ 	/* The range must be inside the bg. */
+ 	ASSERT(logical_start >= bg->start && logical_end <= bg->start + bg->length);
+ 
+-	path.search_commit_root = 1;
+-	path.skip_locking = 1;
+-
+-	ret = find_first_extent_item(extent_root, &path, logical_start, logical_len);
++	ret = find_first_extent_item(extent_root, extent_path, logical_start,
++				     logical_len);
+ 	/* Either error or not found. */
+ 	if (ret)
+ 		goto out;
+-	get_extent_info(&path, &extent_start, &extent_len, &extent_flags, &extent_gen);
++	get_extent_info(extent_path, &extent_start, &extent_len, &extent_flags,
++			&extent_gen);
+ 	if (extent_flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
+ 		stripe->nr_meta_extents++;
+ 	if (extent_flags & BTRFS_EXTENT_FLAG_DATA)
+@@ -1528,7 +1548,7 @@ static int scrub_find_fill_first_stripe(struct btrfs_block_group *bg,
+ 
+ 	/* Fill the extent info for the remaining sectors. */
+ 	while (cur_logical <= stripe_end) {
+-		ret = find_first_extent_item(extent_root, &path, cur_logical,
++		ret = find_first_extent_item(extent_root, extent_path, cur_logical,
+ 					     stripe_end - cur_logical + 1);
+ 		if (ret < 0)
+ 			goto out;
+@@ -1536,7 +1556,7 @@ static int scrub_find_fill_first_stripe(struct btrfs_block_group *bg,
+ 			ret = 0;
+ 			break;
+ 		}
+-		get_extent_info(&path, &extent_start, &extent_len,
++		get_extent_info(extent_path, &extent_start, &extent_len,
+ 				&extent_flags, &extent_gen);
+ 		if (extent_flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
+ 			stripe->nr_meta_extents++;
+@@ -1561,9 +1581,9 @@ static int scrub_find_fill_first_stripe(struct btrfs_block_group *bg,
+ 		 */
+ 		ASSERT(BITS_PER_LONG >= BTRFS_STRIPE_LEN >> fs_info->sectorsize_bits);
+ 
+-		ret = btrfs_lookup_csums_bitmap(csum_root, stripe->logical,
+-						stripe_end, stripe->csums,
+-						&csum_bitmap, true);
++		ret = btrfs_lookup_csums_bitmap(csum_root, csum_path,
++						stripe->logical, stripe_end,
++						stripe->csums, &csum_bitmap);
+ 		if (ret < 0)
+ 			goto out;
+ 		if (ret > 0)
+@@ -1576,7 +1596,6 @@ static int scrub_find_fill_first_stripe(struct btrfs_block_group *bg,
+ 	}
+ 	set_bit(SCRUB_STRIPE_FLAG_INITIALIZED, &stripe->state);
+ out:
+-	btrfs_release_path(&path);
+ 	return ret;
+ }
+ 
+@@ -1654,6 +1673,28 @@ static bool stripe_has_metadata_error(struct scrub_stripe *stripe)
+ 	return false;
+ }
+ 
++static void submit_initial_group_read(struct scrub_ctx *sctx,
++				      unsigned int first_slot,
++				      unsigned int nr_stripes)
++{
++	struct blk_plug plug;
++
++	ASSERT(first_slot < SCRUB_TOTAL_STRIPES);
++	ASSERT(first_slot + nr_stripes <= SCRUB_TOTAL_STRIPES);
++
++	scrub_throttle_dev_io(sctx, sctx->stripes[0].dev,
++			      btrfs_stripe_nr_to_offset(nr_stripes));
++	blk_start_plug(&plug);
++	for (int i = 0; i < nr_stripes; i++) {
++		struct scrub_stripe *stripe = &sctx->stripes[first_slot + i];
++
++		/* Those stripes should be initialized. */
++		ASSERT(test_bit(SCRUB_STRIPE_FLAG_INITIALIZED, &stripe->state));
++		scrub_submit_initial_read(sctx, stripe);
++	}
++	blk_finish_plug(&plug);
++}
++
+ static int flush_scrub_stripes(struct scrub_ctx *sctx)
+ {
+ 	struct btrfs_fs_info *fs_info = sctx->fs_info;
+@@ -1666,11 +1707,11 @@ static int flush_scrub_stripes(struct scrub_ctx *sctx)
+ 
+ 	ASSERT(test_bit(SCRUB_STRIPE_FLAG_INITIALIZED, &sctx->stripes[0].state));
+ 
+-	scrub_throttle_dev_io(sctx, sctx->stripes[0].dev,
+-			      btrfs_stripe_nr_to_offset(nr_stripes));
+-	for (int i = 0; i < nr_stripes; i++) {
+-		stripe = &sctx->stripes[i];
+-		scrub_submit_initial_read(sctx, stripe);
++	/* Submit the stripes which are populated but not submitted. */
++	if (nr_stripes % SCRUB_STRIPES_PER_GROUP) {
++		const int first_slot = round_down(nr_stripes, SCRUB_STRIPES_PER_GROUP);
++
++		submit_initial_group_read(sctx, first_slot, nr_stripes - first_slot);
+ 	}
+ 
+ 	for (int i = 0; i < nr_stripes; i++) {
+@@ -1750,28 +1791,40 @@ static void raid56_scrub_wait_endio(struct bio *bio)
+ 
+ static int queue_scrub_stripe(struct scrub_ctx *sctx, struct btrfs_block_group *bg,
+ 			      struct btrfs_device *dev, int mirror_num,
+-			      u64 logical, u32 length, u64 physical)
++			      u64 logical, u32 length, u64 physical,
++			      u64 *found_logical_ret)
+ {
+ 	struct scrub_stripe *stripe;
+ 	int ret;
+ 
+-	/* No available slot, submit all stripes and wait for them. */
+-	if (sctx->cur_stripe >= SCRUB_STRIPES_PER_SCTX) {
+-		ret = flush_scrub_stripes(sctx);
+-		if (ret < 0)
+-			return ret;
+-	}
++	/*
++	 * There should always be one slot left, as caller filling the last
++	 * slot should flush them all.
++	 */
++	ASSERT(sctx->cur_stripe < SCRUB_TOTAL_STRIPES);
+ 
+ 	stripe = &sctx->stripes[sctx->cur_stripe];
+-
+-	/* We can queue one stripe using the remaining slot. */
+ 	scrub_reset_stripe(stripe);
+-	ret = scrub_find_fill_first_stripe(bg, dev, physical, mirror_num,
+-					   logical, length, stripe);
++	ret = scrub_find_fill_first_stripe(bg, &sctx->extent_path,
++					   &sctx->csum_path, dev, physical,
++					   mirror_num, logical, length, stripe);
+ 	/* Either >0 as no more extents or <0 for error. */
+ 	if (ret)
+ 		return ret;
++	if (found_logical_ret)
++		*found_logical_ret = stripe->logical;
+ 	sctx->cur_stripe++;
++
++	/* We filled one group, submit it. */
++	if (sctx->cur_stripe % SCRUB_STRIPES_PER_GROUP == 0) {
++		const int first_slot = sctx->cur_stripe - SCRUB_STRIPES_PER_GROUP;
++
++		submit_initial_group_read(sctx, first_slot, SCRUB_STRIPES_PER_GROUP);
++	}
++
++	/* Last slot used, flush them all. */
++	if (sctx->cur_stripe == SCRUB_TOTAL_STRIPES)
++		return flush_scrub_stripes(sctx);
+ 	return 0;
+ }
+ 
+@@ -1785,6 +1838,8 @@ static int scrub_raid56_parity_stripe(struct scrub_ctx *sctx,
+ 	struct btrfs_fs_info *fs_info = sctx->fs_info;
+ 	struct btrfs_raid_bio *rbio;
+ 	struct btrfs_io_context *bioc = NULL;
++	struct btrfs_path extent_path = { 0 };
++	struct btrfs_path csum_path = { 0 };
+ 	struct bio *bio;
+ 	struct scrub_stripe *stripe;
+ 	bool all_empty = true;
+@@ -1795,6 +1850,16 @@ static int scrub_raid56_parity_stripe(struct scrub_ctx *sctx,
+ 
+ 	ASSERT(sctx->raid56_data_stripes);
+ 
++	/*
++	 * For data stripe search, we cannot re-use the same extent/csum paths,
++	 * as the data stripe bytenr may be smaller than previous extent.  Thus
++	 * we have to use our own extent/csum paths.
++	 */
++	extent_path.search_commit_root = 1;
++	extent_path.skip_locking = 1;
++	csum_path.search_commit_root = 1;
++	csum_path.skip_locking = 1;
++
+ 	for (int i = 0; i < data_stripes; i++) {
+ 		int stripe_index;
+ 		int rot;
+@@ -1809,7 +1874,7 @@ static int scrub_raid56_parity_stripe(struct scrub_ctx *sctx,
+ 
+ 		scrub_reset_stripe(stripe);
+ 		set_bit(SCRUB_STRIPE_FLAG_NO_REPORT, &stripe->state);
+-		ret = scrub_find_fill_first_stripe(bg,
++		ret = scrub_find_fill_first_stripe(bg, &extent_path, &csum_path,
+ 				map->stripes[stripe_index].dev, physical, 1,
+ 				full_stripe_start + btrfs_stripe_nr_to_offset(i),
+ 				BTRFS_STRIPE_LEN, stripe);
+@@ -1937,6 +2002,8 @@ static int scrub_raid56_parity_stripe(struct scrub_ctx *sctx,
+ 	bio_put(bio);
+ 	btrfs_bio_counter_dec(fs_info);
+ 
++	btrfs_release_path(&extent_path);
++	btrfs_release_path(&csum_path);
+ out:
+ 	return ret;
+ }
+@@ -1970,6 +2037,7 @@ static int scrub_simple_mirror(struct scrub_ctx *sctx,
+ 	path.skip_locking = 1;
+ 	/* Go through each extent items inside the logical range */
+ 	while (cur_logical < logical_end) {
++		u64 found_logical;
+ 		u64 cur_physical = physical + cur_logical - logical_start;
+ 
+ 		/* Canceled? */
+@@ -1994,7 +2062,7 @@ static int scrub_simple_mirror(struct scrub_ctx *sctx,
+ 
+ 		ret = queue_scrub_stripe(sctx, bg, device, mirror_num,
+ 					 cur_logical, logical_end - cur_logical,
+-					 cur_physical);
++					 cur_physical, &found_logical);
+ 		if (ret > 0) {
+ 			/* No more extent, just update the accounting */
+ 			sctx->stat.last_physical = physical + logical_length;
+@@ -2004,9 +2072,7 @@ static int scrub_simple_mirror(struct scrub_ctx *sctx,
+ 		if (ret < 0)
+ 			break;
+ 
+-		ASSERT(sctx->cur_stripe > 0);
+-		cur_logical = sctx->stripes[sctx->cur_stripe - 1].logical
+-			      + BTRFS_STRIPE_LEN;
++		cur_logical = found_logical + BTRFS_STRIPE_LEN;
+ 
+ 		/* Don't hold CPU for too long time */
+ 		cond_resched();
+@@ -2109,6 +2175,9 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
+ 	u64 stripe_logical;
+ 	int stop_loop = 0;
+ 
++	/* Extent_path should be released by now. */
++	ASSERT(sctx->extent_path.nodes[0] == NULL);
++
+ 	scrub_blocked_if_needed(fs_info);
+ 
+ 	if (sctx->is_dev_replace &&
+@@ -2227,6 +2296,9 @@ out:
+ 	ret2 = flush_scrub_stripes(sctx);
+ 	if (!ret)
+ 		ret = ret2;
++	btrfs_release_path(&sctx->extent_path);
++	btrfs_release_path(&sctx->csum_path);
++
+ 	if (sctx->raid56_data_stripes) {
+ 		for (int i = 0; i < nr_data_stripes(map); i++)
+ 			release_scrub_stripe(&sctx->raid56_data_stripes[i]);
+diff --git a/fs/btrfs/space-info.c b/fs/btrfs/space-info.c
+index 75e7fa337e66c..ba5322fe34f57 100644
+--- a/fs/btrfs/space-info.c
++++ b/fs/btrfs/space-info.c
+@@ -389,11 +389,7 @@ int btrfs_can_overcommit(struct btrfs_fs_info *fs_info,
+ 		return 0;
+ 
+ 	used = btrfs_space_info_used(space_info, true);
+-	if (test_bit(BTRFS_FS_ACTIVE_ZONE_TRACKING, &fs_info->flags) &&
+-	    (space_info->flags & BTRFS_BLOCK_GROUP_METADATA))
+-		avail = 0;
+-	else
+-		avail = calc_available_free_space(fs_info, space_info, flush);
++	avail = calc_available_free_space(fs_info, space_info, flush);
+ 
+ 	if (used + bytes < space_info->total_bytes + avail)
+ 		return 1;
+diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
+index 91b6c2fdc420e..5bbd288b9cb54 100644
+--- a/fs/btrfs/transaction.c
++++ b/fs/btrfs/transaction.c
+@@ -292,10 +292,11 @@ loop:
+ 	spin_unlock(&fs_info->trans_lock);
+ 
+ 	/*
+-	 * If we are ATTACH, we just want to catch the current transaction,
+-	 * and commit it. If there is no transaction, just return ENOENT.
++	 * If we are ATTACH or TRANS_JOIN_NOSTART, we just want to catch the
++	 * current transaction, and commit it. If there is no transaction, just
++	 * return ENOENT.
+ 	 */
+-	if (type == TRANS_ATTACH)
++	if (type == TRANS_ATTACH || type == TRANS_JOIN_NOSTART)
+ 		return -ENOENT;
+ 
+ 	/*
+@@ -591,8 +592,13 @@ start_transaction(struct btrfs_root *root, unsigned int num_items,
+ 		u64 delayed_refs_bytes = 0;
+ 
+ 		qgroup_reserved = num_items * fs_info->nodesize;
+-		ret = btrfs_qgroup_reserve_meta_pertrans(root, qgroup_reserved,
+-				enforce_qgroups);
++		/*
++		 * Use prealloc for now, as there might be a currently running
++		 * transaction that could free this reserved space prematurely
++		 * by committing.
++		 */
++		ret = btrfs_qgroup_reserve_meta_prealloc(root, qgroup_reserved,
++							 enforce_qgroups, false);
+ 		if (ret)
+ 			return ERR_PTR(ret);
+ 
+@@ -705,6 +711,14 @@ again:
+ 		h->reloc_reserved = reloc_reserved;
+ 	}
+ 
++	/*
++	 * Now that we have found a transaction to be a part of, convert the
++	 * qgroup reservation from prealloc to pertrans. A different transaction
++	 * can't race in and free our pertrans out from under us.
++	 */
++	if (qgroup_reserved)
++		btrfs_qgroup_convert_reserved_meta(root, qgroup_reserved);
++
+ got_it:
+ 	if (!current->journal_info)
+ 		current->journal_info = h;
+@@ -752,7 +766,7 @@ alloc_fail:
+ 		btrfs_block_rsv_release(fs_info, &fs_info->trans_block_rsv,
+ 					num_bytes, NULL);
+ reserve_fail:
+-	btrfs_qgroup_free_meta_pertrans(root, qgroup_reserved);
++	btrfs_qgroup_free_meta_prealloc(root, qgroup_reserved);
+ 	return ERR_PTR(ret);
+ }
+ 
+diff --git a/fs/btrfs/zoned.c b/fs/btrfs/zoned.c
+index 2490301350015..d9e6df2da272c 100644
+--- a/fs/btrfs/zoned.c
++++ b/fs/btrfs/zoned.c
+@@ -2017,6 +2017,10 @@ static int do_zone_finish(struct btrfs_block_group *block_group, bool fully_writ
+ 	 * and block_group->meta_write_pointer for metadata.
+ 	 */
+ 	if (!fully_written) {
++		if (test_bit(BLOCK_GROUP_FLAG_ZONED_DATA_RELOC, &block_group->runtime_flags)) {
++			spin_unlock(&block_group->lock);
++			return -EAGAIN;
++		}
+ 		spin_unlock(&block_group->lock);
+ 
+ 		ret = btrfs_inc_block_group_ro(block_group, false);
+@@ -2045,7 +2049,9 @@ static int do_zone_finish(struct btrfs_block_group *block_group, bool fully_writ
+ 			return 0;
+ 		}
+ 
+-		if (block_group->reserved) {
++		if (block_group->reserved ||
++		    test_bit(BLOCK_GROUP_FLAG_ZONED_DATA_RELOC,
++			     &block_group->runtime_flags)) {
+ 			spin_unlock(&block_group->lock);
+ 			btrfs_dec_block_group_ro(block_group);
+ 			return -EAGAIN;
+@@ -2276,7 +2282,10 @@ void btrfs_zoned_release_data_reloc_bg(struct btrfs_fs_info *fs_info, u64 logica
+ 
+ 	/* All relocation extents are written. */
+ 	if (block_group->start + block_group->alloc_offset == logical + length) {
+-		/* Now, release this block group for further allocations. */
++		/*
++		 * Now, release this block group for further allocations and
++		 * zone finish.
++		 */
+ 		clear_bit(BLOCK_GROUP_FLAG_ZONED_DATA_RELOC,
+ 			  &block_group->runtime_flags);
+ 	}
+@@ -2300,7 +2309,8 @@ int btrfs_zone_finish_one_bg(struct btrfs_fs_info *fs_info)
+ 
+ 		spin_lock(&block_group->lock);
+ 		if (block_group->reserved || block_group->alloc_offset == 0 ||
+-		    (block_group->flags & BTRFS_BLOCK_GROUP_SYSTEM)) {
++		    (block_group->flags & BTRFS_BLOCK_GROUP_SYSTEM) ||
++		    test_bit(BLOCK_GROUP_FLAG_ZONED_DATA_RELOC, &block_group->runtime_flags)) {
+ 			spin_unlock(&block_group->lock);
+ 			continue;
+ 		}
+diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
+index 1f72f977c6dba..79b20d6ae39ec 100644
+--- a/fs/ext4/balloc.c
++++ b/fs/ext4/balloc.c
+@@ -913,11 +913,11 @@ unsigned long ext4_bg_num_gdb(struct super_block *sb, ext4_group_t group)
+ }
+ 
+ /*
+- * This function returns the number of file system metadata clusters at
++ * This function returns the number of file system metadata blocks at
+  * the beginning of a block group, including the reserved gdt blocks.
+  */
+-static unsigned ext4_num_base_meta_clusters(struct super_block *sb,
+-				     ext4_group_t block_group)
++unsigned int ext4_num_base_meta_blocks(struct super_block *sb,
++				       ext4_group_t block_group)
+ {
+ 	struct ext4_sb_info *sbi = EXT4_SB(sb);
+ 	unsigned num;
+@@ -935,8 +935,15 @@ static unsigned ext4_num_base_meta_clusters(struct super_block *sb,
+ 	} else { /* For META_BG_BLOCK_GROUPS */
+ 		num += ext4_bg_num_gdb_meta(sb, block_group);
+ 	}
+-	return EXT4_NUM_B2C(sbi, num);
++	return num;
+ }
++
++static unsigned int ext4_num_base_meta_clusters(struct super_block *sb,
++						ext4_group_t block_group)
++{
++	return EXT4_NUM_B2C(EXT4_SB(sb), ext4_num_base_meta_blocks(sb, block_group));
++}
++
+ /**
+  *	ext4_inode_to_goal_block - return a hint for block allocation
+  *	@inode: inode for block allocation
+diff --git a/fs/ext4/block_validity.c b/fs/ext4/block_validity.c
+index 5504f72bbbbe7..6fe3c941b5651 100644
+--- a/fs/ext4/block_validity.c
++++ b/fs/ext4/block_validity.c
+@@ -215,7 +215,6 @@ int ext4_setup_system_zone(struct super_block *sb)
+ 	struct ext4_system_blocks *system_blks;
+ 	struct ext4_group_desc *gdp;
+ 	ext4_group_t i;
+-	int flex_size = ext4_flex_bg_size(sbi);
+ 	int ret;
+ 
+ 	system_blks = kzalloc(sizeof(*system_blks), GFP_KERNEL);
+@@ -223,12 +222,13 @@ int ext4_setup_system_zone(struct super_block *sb)
+ 		return -ENOMEM;
+ 
+ 	for (i=0; i < ngroups; i++) {
++		unsigned int meta_blks = ext4_num_base_meta_blocks(sb, i);
++
+ 		cond_resched();
+-		if (ext4_bg_has_super(sb, i) &&
+-		    ((i < 5) || ((i % flex_size) == 0))) {
++		if (meta_blks != 0) {
+ 			ret = add_system_zone(system_blks,
+ 					ext4_group_first_block_no(sb, i),
+-					ext4_bg_num_gdb(sb, i) + 1, 0);
++					meta_blks, 0);
+ 			if (ret)
+ 				goto err;
+ 		}
+diff --git a/fs/ext4/crypto.c b/fs/ext4/crypto.c
+index e20ac0654b3f2..453d4da5de520 100644
+--- a/fs/ext4/crypto.c
++++ b/fs/ext4/crypto.c
+@@ -33,6 +33,8 @@ int ext4_fname_setup_filename(struct inode *dir, const struct qstr *iname,
+ 
+ #if IS_ENABLED(CONFIG_UNICODE)
+ 	err = ext4_fname_setup_ci_filename(dir, iname, fname);
++	if (err)
++		ext4_fname_free_filename(fname);
+ #endif
+ 	return err;
+ }
+@@ -51,6 +53,8 @@ int ext4_fname_prepare_lookup(struct inode *dir, struct dentry *dentry,
+ 
+ #if IS_ENABLED(CONFIG_UNICODE)
+ 	err = ext4_fname_setup_ci_filename(dir, &dentry->d_name, fname);
++	if (err)
++		ext4_fname_free_filename(fname);
+ #endif
+ 	return err;
+ }
+diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
+index 0a2d55faa095e..9653aab5e9f4a 100644
+--- a/fs/ext4/ext4.h
++++ b/fs/ext4/ext4.h
+@@ -3084,6 +3084,8 @@ extern const char *ext4_decode_error(struct super_block *sb, int errno,
+ extern void ext4_mark_group_bitmap_corrupted(struct super_block *sb,
+ 					     ext4_group_t block_group,
+ 					     unsigned int flags);
++extern unsigned int ext4_num_base_meta_blocks(struct super_block *sb,
++					      ext4_group_t block_group);
+ 
+ extern __printf(7, 8)
+ void __ext4_error(struct super_block *, const char *, unsigned int, bool,
+diff --git a/fs/ext4/extents_status.c b/fs/ext4/extents_status.c
+index 9b5b8951afb44..6f7de14c0fa86 100644
+--- a/fs/ext4/extents_status.c
++++ b/fs/ext4/extents_status.c
+@@ -878,23 +878,29 @@ retry:
+ 	err1 = __es_remove_extent(inode, lblk, end, NULL, es1);
+ 	if (err1 != 0)
+ 		goto error;
++	/* Free preallocated extent if it didn't get used. */
++	if (es1) {
++		if (!es1->es_len)
++			__es_free_extent(es1);
++		es1 = NULL;
++	}
+ 
+ 	err2 = __es_insert_extent(inode, &newes, es2);
+ 	if (err2 == -ENOMEM && !ext4_es_must_keep(&newes))
+ 		err2 = 0;
+ 	if (err2 != 0)
+ 		goto error;
++	/* Free preallocated extent if it didn't get used. */
++	if (es2) {
++		if (!es2->es_len)
++			__es_free_extent(es2);
++		es2 = NULL;
++	}
+ 
+ 	if (sbi->s_cluster_ratio > 1 && test_opt(inode->i_sb, DELALLOC) &&
+ 	    (status & EXTENT_STATUS_WRITTEN ||
+ 	     status & EXTENT_STATUS_UNWRITTEN))
+ 		__revise_pending(inode, lblk, len);
+-
+-	/* es is pre-allocated but not used, free it. */
+-	if (es1 && !es1->es_len)
+-		__es_free_extent(es1);
+-	if (es2 && !es2->es_len)
+-		__es_free_extent(es2);
+ error:
+ 	write_unlock(&EXT4_I(inode)->i_es_lock);
+ 	if (err1 || err2)
+@@ -1491,8 +1497,12 @@ retry:
+ 	 */
+ 	write_lock(&EXT4_I(inode)->i_es_lock);
+ 	err = __es_remove_extent(inode, lblk, end, &reserved, es);
+-	if (es && !es->es_len)
+-		__es_free_extent(es);
++	/* Free preallocated extent if it didn't get used. */
++	if (es) {
++		if (!es->es_len)
++			__es_free_extent(es);
++		es = NULL;
++	}
+ 	write_unlock(&EXT4_I(inode)->i_es_lock);
+ 	if (err)
+ 		goto retry;
+@@ -2047,19 +2057,25 @@ retry:
+ 	err1 = __es_remove_extent(inode, lblk, lblk, NULL, es1);
+ 	if (err1 != 0)
+ 		goto error;
++	/* Free preallocated extent if it didn't get used. */
++	if (es1) {
++		if (!es1->es_len)
++			__es_free_extent(es1);
++		es1 = NULL;
++	}
+ 
+ 	err2 = __es_insert_extent(inode, &newes, es2);
+ 	if (err2 != 0)
+ 		goto error;
++	/* Free preallocated extent if it didn't get used. */
++	if (es2) {
++		if (!es2->es_len)
++			__es_free_extent(es2);
++		es2 = NULL;
++	}
+ 
+ 	if (allocated)
+ 		__insert_pending(inode, lblk);
+-
+-	/* es is pre-allocated but not used, free it. */
+-	if (es1 && !es1->es_len)
+-		__es_free_extent(es1);
+-	if (es2 && !es2->es_len)
+-		__es_free_extent(es2);
+ error:
+ 	write_unlock(&EXT4_I(inode)->i_es_lock);
+ 	if (err1 || err2)
+diff --git a/fs/ext4/file.c b/fs/ext4/file.c
+index c457c8517f0fd..73a4b711be025 100644
+--- a/fs/ext4/file.c
++++ b/fs/ext4/file.c
+@@ -476,6 +476,11 @@ restart:
+ 	 * required to change security info in file_modified(), for extending
+ 	 * I/O, any form of non-overwrite I/O, and unaligned I/O to unwritten
+ 	 * extents (as partial block zeroing may be required).
++	 *
++	 * Note that unaligned writes are allowed under shared lock so long as
++	 * they are pure overwrites. Otherwise, concurrent unaligned writes risk
++	 * data corruption due to partial block zeroing in the dio layer, and so
++	 * the I/O must occur exclusively.
+ 	 */
+ 	if (*ilock_shared &&
+ 	    ((!IS_NOSEC(inode) || *extend || !overwrite ||
+@@ -492,21 +497,12 @@ restart:
+ 
+ 	/*
+ 	 * Now that locking is settled, determine dio flags and exclusivity
+-	 * requirements. Unaligned writes are allowed under shared lock so long
+-	 * as they are pure overwrites. Set the iomap overwrite only flag as an
+-	 * added precaution in this case. Even though this is unnecessary, we
+-	 * can detect and warn on unexpected -EAGAIN if an unsafe unaligned
+-	 * write is ever submitted.
+-	 *
+-	 * Otherwise, concurrent unaligned writes risk data corruption due to
+-	 * partial block zeroing in the dio layer, and so the I/O must occur
+-	 * exclusively. The inode lock is already held exclusive if the write is
+-	 * non-overwrite or extending, so drain all outstanding dio and set the
+-	 * force wait dio flag.
++	 * requirements. We don't use DIO_OVERWRITE_ONLY because we enforce
++	 * behavior already. The inode lock is already held exclusive if the
++	 * write is non-overwrite or extending, so drain all outstanding dio and
++	 * set the force wait dio flag.
+ 	 */
+-	if (*ilock_shared && unaligned_io) {
+-		*dio_flags = IOMAP_DIO_OVERWRITE_ONLY;
+-	} else if (!*ilock_shared && (unaligned_io || *extend)) {
++	if (!*ilock_shared && (unaligned_io || *extend)) {
+ 		if (iocb->ki_flags & IOCB_NOWAIT) {
+ 			ret = -EAGAIN;
+ 			goto out;
+@@ -608,7 +604,6 @@ static ssize_t ext4_dio_write_iter(struct kiocb *iocb, struct iov_iter *from)
+ 		iomap_ops = &ext4_iomap_overwrite_ops;
+ 	ret = iomap_dio_rw(iocb, from, iomap_ops, &ext4_dio_write_ops,
+ 			   dio_flags, NULL, 0);
+-	WARN_ON_ONCE(ret == -EAGAIN && !(iocb->ki_flags & IOCB_NOWAIT));
+ 	if (ret == -ENOTBLK)
+ 		ret = 0;
+ 
+diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
+index 5882afe71d82b..ecebc3a139be2 100644
+--- a/fs/f2fs/data.c
++++ b/fs/f2fs/data.c
+@@ -1389,18 +1389,14 @@ struct page *f2fs_get_lock_data_page(struct inode *inode, pgoff_t index,
+ {
+ 	struct address_space *mapping = inode->i_mapping;
+ 	struct page *page;
+-repeat:
++
+ 	page = f2fs_get_read_data_page(inode, index, 0, for_write, NULL);
+ 	if (IS_ERR(page))
+ 		return page;
+ 
+ 	/* wait for read completion */
+ 	lock_page(page);
+-	if (unlikely(page->mapping != mapping)) {
+-		f2fs_put_page(page, 1);
+-		goto repeat;
+-	}
+-	if (unlikely(!PageUptodate(page))) {
++	if (unlikely(page->mapping != mapping || !PageUptodate(page))) {
+ 		f2fs_put_page(page, 1);
+ 		return ERR_PTR(-EIO);
+ 	}
+diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
+index c602ff2403b67..c0b7417f1d87d 100644
+--- a/fs/f2fs/f2fs.h
++++ b/fs/f2fs/f2fs.h
+@@ -2122,15 +2122,6 @@ static inline int f2fs_down_read_trylock(struct f2fs_rwsem *sem)
+ 	return down_read_trylock(&sem->internal_rwsem);
+ }
+ 
+-#ifdef CONFIG_DEBUG_LOCK_ALLOC
+-static inline void f2fs_down_read_nested(struct f2fs_rwsem *sem, int subclass)
+-{
+-	down_read_nested(&sem->internal_rwsem, subclass);
+-}
+-#else
+-#define f2fs_down_read_nested(sem, subclass) f2fs_down_read(sem)
+-#endif
+-
+ static inline void f2fs_up_read(struct f2fs_rwsem *sem)
+ {
+ 	up_read(&sem->internal_rwsem);
+@@ -2141,6 +2132,21 @@ static inline void f2fs_down_write(struct f2fs_rwsem *sem)
+ 	down_write(&sem->internal_rwsem);
+ }
+ 
++#ifdef CONFIG_DEBUG_LOCK_ALLOC
++static inline void f2fs_down_read_nested(struct f2fs_rwsem *sem, int subclass)
++{
++	down_read_nested(&sem->internal_rwsem, subclass);
++}
++
++static inline void f2fs_down_write_nested(struct f2fs_rwsem *sem, int subclass)
++{
++	down_write_nested(&sem->internal_rwsem, subclass);
++}
++#else
++#define f2fs_down_read_nested(sem, subclass) f2fs_down_read(sem)
++#define f2fs_down_write_nested(sem, subclass) f2fs_down_write(sem)
++#endif
++
+ static inline int f2fs_down_write_trylock(struct f2fs_rwsem *sem)
+ {
+ 	return down_write_trylock(&sem->internal_rwsem);
+diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c
+index 4638fee16a91c..0d185ad5e4698 100644
+--- a/fs/f2fs/inline.c
++++ b/fs/f2fs/inline.c
+@@ -641,7 +641,8 @@ int f2fs_add_inline_entry(struct inode *dir, const struct f2fs_filename *fname,
+ 	}
+ 
+ 	if (inode) {
+-		f2fs_down_write(&F2FS_I(inode)->i_sem);
++		f2fs_down_write_nested(&F2FS_I(inode)->i_sem,
++						SINGLE_DEPTH_NESTING);
+ 		page = f2fs_init_inode_metadata(inode, dir, fname, ipage);
+ 		if (IS_ERR(page)) {
+ 			err = PTR_ERR(page);
+diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
+index be08be6f4bfd6..b38bf2b34490b 100644
+--- a/fs/f2fs/segment.c
++++ b/fs/f2fs/segment.c
+@@ -205,6 +205,8 @@ void f2fs_abort_atomic_write(struct inode *inode, bool clean)
+ 		f2fs_i_size_write(inode, fi->original_i_size);
+ 		fi->original_i_size = 0;
+ 	}
++	/* avoid stale dirty inode during eviction */
++	sync_inode_metadata(inode, 0);
+ }
+ 
+ static int __replace_atomic_write_block(struct inode *inode, pgoff_t index,
+diff --git a/fs/fuse/readdir.c b/fs/fuse/readdir.c
+index dc603479b30ef..b3d498163f973 100644
+--- a/fs/fuse/readdir.c
++++ b/fs/fuse/readdir.c
+@@ -243,8 +243,16 @@ retry:
+ 			dput(dentry);
+ 			dentry = alias;
+ 		}
+-		if (IS_ERR(dentry))
++		if (IS_ERR(dentry)) {
++			if (!IS_ERR(inode)) {
++				struct fuse_inode *fi = get_fuse_inode(inode);
++
++				spin_lock(&fi->lock);
++				fi->nlookup--;
++				spin_unlock(&fi->lock);
++			}
+ 			return PTR_ERR(dentry);
++		}
+ 	}
+ 	if (fc->readdirplus_auto)
+ 		set_bit(FUSE_I_INIT_RDPLUS, &get_fuse_inode(inode)->state);
+diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c
+index ae49256b7c8c6..be2759a974f9e 100644
+--- a/fs/gfs2/aops.c
++++ b/fs/gfs2/aops.c
+@@ -183,13 +183,13 @@ static int gfs2_writepages(struct address_space *mapping,
+ 	int ret;
+ 
+ 	/*
+-	 * Even if we didn't write any pages here, we might still be holding
++	 * Even if we didn't write enough pages here, we might still be holding
+ 	 * dirty pages in the ail. We forcibly flush the ail because we don't
+ 	 * want balance_dirty_pages() to loop indefinitely trying to write out
+ 	 * pages held in the ail that it can't find.
+ 	 */
+ 	ret = iomap_writepages(mapping, wbc, &wpc, &gfs2_writeback_ops);
+-	if (ret == 0)
++	if (ret == 0 && wbc->nr_to_write > 0)
+ 		set_bit(SDF_FORCE_AIL_FLUSH, &sdp->sd_flags);
+ 	return ret;
+ }
+diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c
+index aa568796207c0..aaca22f2aa2d1 100644
+--- a/fs/gfs2/log.c
++++ b/fs/gfs2/log.c
+@@ -1282,9 +1282,6 @@ static inline int gfs2_ail_flush_reqd(struct gfs2_sbd *sdp)
+ {
+ 	unsigned int used_blocks = sdp->sd_jdesc->jd_blocks - atomic_read(&sdp->sd_log_blks_free);
+ 
+-	if (test_and_clear_bit(SDF_FORCE_AIL_FLUSH, &sdp->sd_flags))
+-		return 1;
+-
+ 	return used_blocks + atomic_read(&sdp->sd_log_blks_needed) >=
+ 		atomic_read(&sdp->sd_log_thresh2);
+ }
+@@ -1301,7 +1298,6 @@ int gfs2_logd(void *data)
+ {
+ 	struct gfs2_sbd *sdp = data;
+ 	unsigned long t = 1;
+-	DEFINE_WAIT(wait);
+ 
+ 	while (!kthread_should_stop()) {
+ 
+@@ -1326,7 +1322,9 @@ int gfs2_logd(void *data)
+ 						  GFS2_LFC_LOGD_JFLUSH_REQD);
+ 		}
+ 
+-		if (gfs2_ail_flush_reqd(sdp)) {
++		if (test_bit(SDF_FORCE_AIL_FLUSH, &sdp->sd_flags) ||
++		    gfs2_ail_flush_reqd(sdp)) {
++			clear_bit(SDF_FORCE_AIL_FLUSH, &sdp->sd_flags);
+ 			gfs2_ail1_start(sdp);
+ 			gfs2_ail1_wait(sdp);
+ 			gfs2_ail1_empty(sdp, 0);
+@@ -1338,17 +1336,12 @@ int gfs2_logd(void *data)
+ 
+ 		try_to_freeze();
+ 
+-		do {
+-			prepare_to_wait(&sdp->sd_logd_waitq, &wait,
+-					TASK_INTERRUPTIBLE);
+-			if (!gfs2_ail_flush_reqd(sdp) &&
+-			    !gfs2_jrnl_flush_reqd(sdp) &&
+-			    !kthread_should_stop())
+-				t = schedule_timeout(t);
+-		} while(t && !gfs2_ail_flush_reqd(sdp) &&
+-			!gfs2_jrnl_flush_reqd(sdp) &&
+-			!kthread_should_stop());
+-		finish_wait(&sdp->sd_logd_waitq, &wait);
++		t = wait_event_interruptible_timeout(sdp->sd_logd_waitq,
++				test_bit(SDF_FORCE_AIL_FLUSH, &sdp->sd_flags) ||
++				gfs2_ail_flush_reqd(sdp) ||
++				gfs2_jrnl_flush_reqd(sdp) ||
++				kthread_should_stop(),
++				t);
+ 	}
+ 
+ 	return 0;
+diff --git a/fs/jbd2/checkpoint.c b/fs/jbd2/checkpoint.c
+index 9ec91017a7f3c..f033ac807013c 100644
+--- a/fs/jbd2/checkpoint.c
++++ b/fs/jbd2/checkpoint.c
+@@ -349,6 +349,8 @@ int jbd2_cleanup_journal_tail(journal_t *journal)
+ 
+ /* Checkpoint list management */
+ 
++enum shrink_type {SHRINK_DESTROY, SHRINK_BUSY_STOP, SHRINK_BUSY_SKIP};
++
+ /*
+  * journal_shrink_one_cp_list
+  *
+@@ -360,7 +362,8 @@ int jbd2_cleanup_journal_tail(journal_t *journal)
+  * Called with j_list_lock held.
+  */
+ static unsigned long journal_shrink_one_cp_list(struct journal_head *jh,
+-						bool destroy, bool *released)
++						enum shrink_type type,
++						bool *released)
+ {
+ 	struct journal_head *last_jh;
+ 	struct journal_head *next_jh = jh;
+@@ -376,12 +379,15 @@ static unsigned long journal_shrink_one_cp_list(struct journal_head *jh,
+ 		jh = next_jh;
+ 		next_jh = jh->b_cpnext;
+ 
+-		if (destroy) {
++		if (type == SHRINK_DESTROY) {
+ 			ret = __jbd2_journal_remove_checkpoint(jh);
+ 		} else {
+ 			ret = jbd2_journal_try_remove_checkpoint(jh);
+-			if (ret < 0)
+-				continue;
++			if (ret < 0) {
++				if (type == SHRINK_BUSY_SKIP)
++					continue;
++				break;
++			}
+ 		}
+ 
+ 		nr_freed++;
+@@ -445,7 +451,7 @@ again:
+ 		tid = transaction->t_tid;
+ 
+ 		freed = journal_shrink_one_cp_list(transaction->t_checkpoint_list,
+-						   false, &released);
++						   SHRINK_BUSY_SKIP, &released);
+ 		nr_freed += freed;
+ 		(*nr_to_scan) -= min(*nr_to_scan, freed);
+ 		if (*nr_to_scan == 0)
+@@ -485,19 +491,21 @@ out:
+ void __jbd2_journal_clean_checkpoint_list(journal_t *journal, bool destroy)
+ {
+ 	transaction_t *transaction, *last_transaction, *next_transaction;
++	enum shrink_type type;
+ 	bool released;
+ 
+ 	transaction = journal->j_checkpoint_transactions;
+ 	if (!transaction)
+ 		return;
+ 
++	type = destroy ? SHRINK_DESTROY : SHRINK_BUSY_STOP;
+ 	last_transaction = transaction->t_cpprev;
+ 	next_transaction = transaction;
+ 	do {
+ 		transaction = next_transaction;
+ 		next_transaction = transaction->t_cpnext;
+ 		journal_shrink_one_cp_list(transaction->t_checkpoint_list,
+-					   destroy, &released);
++					   type, &released);
+ 		/*
+ 		 * This function only frees up some memory if possible so we
+ 		 * dont have an obligation to finish processing. Bail out if
+@@ -631,6 +639,8 @@ int jbd2_journal_try_remove_checkpoint(struct journal_head *jh)
+ {
+ 	struct buffer_head *bh = jh2bh(jh);
+ 
++	if (jh->b_transaction)
++		return -EBUSY;
+ 	if (!trylock_buffer(bh))
+ 		return -EBUSY;
+ 	if (buffer_dirty(bh)) {
+diff --git a/fs/jbd2/recovery.c b/fs/jbd2/recovery.c
+index 0184931d47f7d..c269a7d29a465 100644
+--- a/fs/jbd2/recovery.c
++++ b/fs/jbd2/recovery.c
+@@ -230,12 +230,8 @@ static int count_tags(journal_t *journal, struct buffer_head *bh)
+ /* Make sure we wrap around the log correctly! */
+ #define wrap(journal, var)						\
+ do {									\
+-	unsigned long _wrap_last =					\
+-		jbd2_has_feature_fast_commit(journal) ?			\
+-			(journal)->j_fc_last : (journal)->j_last;	\
+-									\
+-	if (var >= _wrap_last)						\
+-		var -= (_wrap_last - (journal)->j_first);		\
++	if (var >= (journal)->j_last)					\
++		var -= ((journal)->j_last - (journal)->j_first);	\
+ } while (0)
+ 
+ static int fc_do_one_pass(journal_t *journal,
+@@ -524,9 +520,7 @@ static int do_one_pass(journal_t *journal,
+ 				break;
+ 
+ 		jbd2_debug(2, "Scanning for sequence ID %u at %lu/%lu\n",
+-			  next_commit_ID, next_log_block,
+-			  jbd2_has_feature_fast_commit(journal) ?
+-			  journal->j_fc_last : journal->j_last);
++			  next_commit_ID, next_log_block, journal->j_last);
+ 
+ 		/* Skip over each chunk of the transaction looking
+ 		 * either the next descriptor block or the final commit
+diff --git a/fs/kernfs/dir.c b/fs/kernfs/dir.c
+index 5a1a4af9d3d29..bf243015834ec 100644
+--- a/fs/kernfs/dir.c
++++ b/fs/kernfs/dir.c
+@@ -383,9 +383,11 @@ static int kernfs_link_sibling(struct kernfs_node *kn)
+ 	rb_insert_color(&kn->rb, &kn->parent->dir.children);
+ 
+ 	/* successfully added, account subdir number */
++	down_write(&kernfs_root(kn)->kernfs_iattr_rwsem);
+ 	if (kernfs_type(kn) == KERNFS_DIR)
+ 		kn->parent->dir.subdirs++;
+ 	kernfs_inc_rev(kn->parent);
++	up_write(&kernfs_root(kn)->kernfs_iattr_rwsem);
+ 
+ 	return 0;
+ }
+@@ -408,9 +410,11 @@ static bool kernfs_unlink_sibling(struct kernfs_node *kn)
+ 	if (RB_EMPTY_NODE(&kn->rb))
+ 		return false;
+ 
++	down_write(&kernfs_root(kn)->kernfs_iattr_rwsem);
+ 	if (kernfs_type(kn) == KERNFS_DIR)
+ 		kn->parent->dir.subdirs--;
+ 	kernfs_inc_rev(kn->parent);
++	up_write(&kernfs_root(kn)->kernfs_iattr_rwsem);
+ 
+ 	rb_erase(&kn->rb, &kn->parent->dir.children);
+ 	RB_CLEAR_NODE(&kn->rb);
+diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
+index aaffaaa336cc5..47d892a1d363d 100644
+--- a/fs/nfs/direct.c
++++ b/fs/nfs/direct.c
+@@ -472,13 +472,31 @@ out:
+ 	return result;
+ }
+ 
++static void nfs_direct_add_page_head(struct list_head *list,
++				     struct nfs_page *req)
++{
++	struct nfs_page *head = req->wb_head;
++
++	if (!list_empty(&head->wb_list) || !nfs_lock_request(head))
++		return;
++	if (!list_empty(&head->wb_list)) {
++		nfs_unlock_request(head);
++		return;
++	}
++	list_add(&head->wb_list, list);
++	kref_get(&head->wb_kref);
++	kref_get(&head->wb_kref);
++}
++
+ static void nfs_direct_join_group(struct list_head *list, struct inode *inode)
+ {
+ 	struct nfs_page *req, *subreq;
+ 
+ 	list_for_each_entry(req, list, wb_list) {
+-		if (req->wb_head != req)
++		if (req->wb_head != req) {
++			nfs_direct_add_page_head(&req->wb_list, req);
+ 			continue;
++		}
+ 		subreq = req->wb_this_page;
+ 		if (subreq == req)
+ 			continue;
+diff --git a/fs/nfs/pnfs_dev.c b/fs/nfs/pnfs_dev.c
+index ddbbf4fcda867..178001c90156f 100644
+--- a/fs/nfs/pnfs_dev.c
++++ b/fs/nfs/pnfs_dev.c
+@@ -154,7 +154,7 @@ nfs4_get_device_info(struct nfs_server *server,
+ 		set_bit(NFS_DEVICEID_NOCACHE, &d->flags);
+ 
+ out_free_pages:
+-	for (i = 0; i < max_pages; i++)
++	while (--i >= 0)
+ 		__free_page(pages[i]);
+ 	kfree(pages);
+ out_free_pdev:
+diff --git a/fs/smb/client/cached_dir.c b/fs/smb/client/cached_dir.c
+index fe483f163dbc8..2d5e9a9d5b8be 100644
+--- a/fs/smb/client/cached_dir.c
++++ b/fs/smb/client/cached_dir.c
+@@ -218,7 +218,7 @@ int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon,
+ 		.tcon = tcon,
+ 		.path = path,
+ 		.create_options = cifs_create_options(cifs_sb, CREATE_NOT_FILE),
+-		.desired_access = FILE_READ_ATTRIBUTES,
++		.desired_access =  FILE_READ_DATA | FILE_READ_ATTRIBUTES,
+ 		.disposition = FILE_OPEN,
+ 		.fid = pfid,
+ 	};
+diff --git a/fs/smb/client/cifsglob.h b/fs/smb/client/cifsglob.h
+index 657dee4b2c8ce..051f15b9d6078 100644
+--- a/fs/smb/client/cifsglob.h
++++ b/fs/smb/client/cifsglob.h
+@@ -729,6 +729,7 @@ struct TCP_Server_Info {
+ 	 */
+ #define CIFS_SERVER_IS_CHAN(server)	(!!(server)->primary_server)
+ 	struct TCP_Server_Info *primary_server;
++	__u16 channel_sequence_num;  /* incremented on primary channel on each chan reconnect */
+ 
+ #ifdef CONFIG_CIFS_SWN_UPCALL
+ 	bool use_swn_dstaddr;
+diff --git a/fs/smb/client/connect.c b/fs/smb/client/connect.c
+index 238538dde4e36..2456b5dd439cd 100644
+--- a/fs/smb/client/connect.c
++++ b/fs/smb/client/connect.c
+@@ -1686,6 +1686,7 @@ cifs_get_tcp_session(struct smb3_fs_context *ctx,
+ 		ctx->target_rfc1001_name, RFC1001_NAME_LEN_WITH_NULL);
+ 	tcp_ses->session_estab = false;
+ 	tcp_ses->sequence_number = 0;
++	tcp_ses->channel_sequence_num = 0; /* only tracked for primary channel */
+ 	tcp_ses->reconnect_instance = 1;
+ 	tcp_ses->lstrp = jiffies;
+ 	tcp_ses->compress_algorithm = cpu_to_le16(ctx->compression);
+diff --git a/fs/smb/client/fscache.c b/fs/smb/client/fscache.c
+index 8f6909d633da8..34e20c4cd507f 100644
+--- a/fs/smb/client/fscache.c
++++ b/fs/smb/client/fscache.c
+@@ -48,7 +48,7 @@ int cifs_fscache_get_super_cookie(struct cifs_tcon *tcon)
+ 	sharename = extract_sharename(tcon->tree_name);
+ 	if (IS_ERR(sharename)) {
+ 		cifs_dbg(FYI, "%s: couldn't extract sharename\n", __func__);
+-		return -EINVAL;
++		return PTR_ERR(sharename);
+ 	}
+ 
+ 	slen = strlen(sharename);
+diff --git a/fs/smb/client/smb2ops.c b/fs/smb/client/smb2ops.c
+index 0f62bc373ad05..dd6a423dc6e11 100644
+--- a/fs/smb/client/smb2ops.c
++++ b/fs/smb/client/smb2ops.c
+@@ -172,8 +172,17 @@ smb2_set_credits(struct TCP_Server_Info *server, const int val)
+ 
+ 	spin_lock(&server->req_lock);
+ 	server->credits = val;
+-	if (val == 1)
++	if (val == 1) {
+ 		server->reconnect_instance++;
++		/*
++		 * ChannelSequence updated for all channels in primary channel so that consistent
++		 * across SMB3 requests sent on any channel. See MS-SMB2 3.2.4.1 and 3.2.7.1
++		 */
++		if (CIFS_SERVER_IS_CHAN(server))
++			server->primary_server->channel_sequence_num++;
++		else
++			server->channel_sequence_num++;
++	}
+ 	scredits = server->credits;
+ 	in_flight = server->in_flight;
+ 	spin_unlock(&server->req_lock);
+diff --git a/fs/smb/client/smb2pdu.c b/fs/smb/client/smb2pdu.c
+index a457f07f820dc..9c7e46b7e7c7a 100644
+--- a/fs/smb/client/smb2pdu.c
++++ b/fs/smb/client/smb2pdu.c
+@@ -88,9 +88,20 @@ smb2_hdr_assemble(struct smb2_hdr *shdr, __le16 smb2_cmd,
+ 		  const struct cifs_tcon *tcon,
+ 		  struct TCP_Server_Info *server)
+ {
++	struct smb3_hdr_req *smb3_hdr;
+ 	shdr->ProtocolId = SMB2_PROTO_NUMBER;
+ 	shdr->StructureSize = cpu_to_le16(64);
+ 	shdr->Command = smb2_cmd;
++	if (server->dialect >= SMB30_PROT_ID) {
++		/* After reconnect SMB3 must set ChannelSequence on subsequent reqs */
++		smb3_hdr = (struct smb3_hdr_req *)shdr;
++		/* if primary channel is not set yet, use default channel for chan sequence num */
++		if (CIFS_SERVER_IS_CHAN(server))
++			smb3_hdr->ChannelSequence =
++				cpu_to_le16(server->primary_server->channel_sequence_num);
++		else
++			smb3_hdr->ChannelSequence = cpu_to_le16(server->channel_sequence_num);
++	}
+ 	if (server) {
+ 		spin_lock(&server->req_lock);
+ 		/* Request up to 10 credits but don't go over the limit. */
+diff --git a/fs/smb/common/smb2pdu.h b/fs/smb/common/smb2pdu.h
+index bae590eec871f..2680251b9aac1 100644
+--- a/fs/smb/common/smb2pdu.h
++++ b/fs/smb/common/smb2pdu.h
+@@ -153,6 +153,28 @@ struct smb2_hdr {
+ 	__u8   Signature[16];
+ } __packed;
+ 
++struct smb3_hdr_req {
++	__le32 ProtocolId;	/* 0xFE 'S' 'M' 'B' */
++	__le16 StructureSize;	/* 64 */
++	__le16 CreditCharge;	/* MBZ */
++	__le16 ChannelSequence; /* See MS-SMB2 3.2.4.1 and 3.2.7.1 */
++	__le16 Reserved;
++	__le16 Command;
++	__le16 CreditRequest;	/* CreditResponse */
++	__le32 Flags;
++	__le32 NextCommand;
++	__le64 MessageId;
++	union {
++		struct {
++			__le32 ProcessId;
++			__le32  TreeId;
++		} __packed SyncId;
++		__le64  AsyncId;
++	} __packed Id;
++	__le64  SessionId;
++	__u8   Signature[16];
++} __packed;
++
+ struct smb2_pdu {
+ 	struct smb2_hdr hdr;
+ 	__le16 StructureSize2; /* size of wct area (varies, request specific) */
+diff --git a/include/linux/audit.h b/include/linux/audit.h
+index 6a3a9e122bb5e..51b1b7054a233 100644
+--- a/include/linux/audit.h
++++ b/include/linux/audit.h
+@@ -117,6 +117,8 @@ enum audit_nfcfgop {
+ 	AUDIT_NFT_OP_OBJ_RESET,
+ 	AUDIT_NFT_OP_FLOWTABLE_REGISTER,
+ 	AUDIT_NFT_OP_FLOWTABLE_UNREGISTER,
++	AUDIT_NFT_OP_SETELEM_RESET,
++	AUDIT_NFT_OP_RULE_RESET,
+ 	AUDIT_NFT_OP_INVALID,
+ };
+ 
+diff --git a/include/linux/bpf.h b/include/linux/bpf.h
+index f58895830adae..f316affcd2e13 100644
+--- a/include/linux/bpf.h
++++ b/include/linux/bpf.h
+@@ -2619,6 +2619,18 @@ static inline void bpf_dynptr_set_rdonly(struct bpf_dynptr_kern *ptr)
+ }
+ #endif /* CONFIG_BPF_SYSCALL */
+ 
++static __always_inline int
++bpf_probe_read_kernel_common(void *dst, u32 size, const void *unsafe_ptr)
++{
++	int ret = -EFAULT;
++
++	if (IS_ENABLED(CONFIG_BPF_EVENTS))
++		ret = copy_from_kernel_nofault(dst, unsafe_ptr, size);
++	if (unlikely(ret < 0))
++		memset(dst, 0, size);
++	return ret;
++}
++
+ void __bpf_free_used_btfs(struct bpf_prog_aux *aux,
+ 			  struct btf_mod_pair *used_btfs, u32 len);
+ 
+diff --git a/include/linux/export-internal.h b/include/linux/export-internal.h
+index 1c849db953a51..45fca09b23194 100644
+--- a/include/linux/export-internal.h
++++ b/include/linux/export-internal.h
+@@ -52,6 +52,8 @@
+ 
+ #ifdef CONFIG_IA64
+ #define KSYM_FUNC(name)		@fptr(name)
++#elif defined(CONFIG_PARISC) && defined(CONFIG_64BIT)
++#define KSYM_FUNC(name)		P%name
+ #else
+ #define KSYM_FUNC(name)		name
+ #endif
+diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
+index 660012997f54c..644e69354cba6 100644
+--- a/include/linux/ipv6.h
++++ b/include/linux/ipv6.h
+@@ -146,6 +146,7 @@ struct inet6_skb_parm {
+ #define IP6SKB_JUMBOGRAM      128
+ #define IP6SKB_SEG6	      256
+ #define IP6SKB_FAKEJUMBO      512
++#define IP6SKB_MULTIPATH      1024
+ };
+ 
+ #if defined(CONFIG_NET_L3_MASTER_DEV)
+diff --git a/include/linux/micrel_phy.h b/include/linux/micrel_phy.h
+index 8bef1ab62bba3..4e27ca7c49def 100644
+--- a/include/linux/micrel_phy.h
++++ b/include/linux/micrel_phy.h
+@@ -41,9 +41,10 @@
+ #define	PHY_ID_KSZ9477		0x00221631
+ 
+ /* struct phy_device dev_flags definitions */
+-#define MICREL_PHY_50MHZ_CLK	0x00000001
+-#define MICREL_PHY_FXEN		0x00000002
+-#define MICREL_KSZ8_P1_ERRATA	0x00000003
++#define MICREL_PHY_50MHZ_CLK	BIT(0)
++#define MICREL_PHY_FXEN		BIT(1)
++#define MICREL_KSZ8_P1_ERRATA	BIT(2)
++#define MICREL_NO_EEE		BIT(3)
+ 
+ #define MICREL_KSZ9021_EXTREG_CTRL	0xB
+ #define MICREL_KSZ9021_EXTREG_DATA_WRITE	0xC
+diff --git a/include/linux/phylink.h b/include/linux/phylink.h
+index 1817940a3418e..4dff1eba425ba 100644
+--- a/include/linux/phylink.h
++++ b/include/linux/phylink.h
+@@ -615,7 +615,7 @@ void pcs_get_state(struct phylink_pcs *pcs,
+  *
+  * The %neg_mode argument should be tested via the phylink_mode_*() family of
+  * functions, or for PCS that set pcs->neg_mode true, should be tested
+- * against the %PHYLINK_PCS_NEG_* definitions.
++ * against the PHYLINK_PCS_NEG_* definitions.
+  */
+ int pcs_config(struct phylink_pcs *pcs, unsigned int neg_mode,
+ 	       phy_interface_t interface, const unsigned long *advertising,
+@@ -645,7 +645,7 @@ void pcs_an_restart(struct phylink_pcs *pcs);
+  *
+  * The %mode argument should be tested via the phylink_mode_*() family of
+  * functions, or for PCS that set pcs->neg_mode true, should be tested
+- * against the %PHYLINK_PCS_NEG_* definitions.
++ * against the PHYLINK_PCS_NEG_* definitions.
+  */
+ void pcs_link_up(struct phylink_pcs *pcs, unsigned int neg_mode,
+ 		 phy_interface_t interface, int speed, int duplex);
+diff --git a/include/linux/tca6416_keypad.h b/include/linux/tca6416_keypad.h
+index b0d36a9934ccd..5cf6f6f82aa70 100644
+--- a/include/linux/tca6416_keypad.h
++++ b/include/linux/tca6416_keypad.h
+@@ -25,7 +25,6 @@ struct tca6416_keys_platform_data {
+ 	unsigned int rep:1;	/* enable input subsystem auto repeat */
+ 	uint16_t pinmask;
+ 	uint16_t invert;
+-	int irq_is_gpio;
+ 	int use_polling;	/* use polling if Interrupt is not connected*/
+ };
+ #endif
+diff --git a/include/net/ip.h b/include/net/ip.h
+index 19adacd5ece03..3489a1cca5e7b 100644
+--- a/include/net/ip.h
++++ b/include/net/ip.h
+@@ -57,6 +57,7 @@ struct inet_skb_parm {
+ #define IPSKB_FRAG_PMTU		BIT(6)
+ #define IPSKB_L3SLAVE		BIT(7)
+ #define IPSKB_NOPOLICY		BIT(8)
++#define IPSKB_MULTIPATH		BIT(9)
+ 
+ 	u16			frag_max_size;
+ };
+@@ -94,7 +95,7 @@ static inline void ipcm_init_sk(struct ipcm_cookie *ipcm,
+ 	ipcm_init(ipcm);
+ 
+ 	ipcm->sockc.mark = READ_ONCE(inet->sk.sk_mark);
+-	ipcm->sockc.tsflags = inet->sk.sk_tsflags;
++	ipcm->sockc.tsflags = READ_ONCE(inet->sk.sk_tsflags);
+ 	ipcm->oif = READ_ONCE(inet->sk.sk_bound_dev_if);
+ 	ipcm->addr = inet->inet_saddr;
+ 	ipcm->protocol = inet->inet_num;
+diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
+index 05e6f756feafe..9ba6413fd2e3e 100644
+--- a/include/net/ip6_fib.h
++++ b/include/net/ip6_fib.h
+@@ -604,7 +604,10 @@ static inline bool fib6_rules_early_flow_dissect(struct net *net,
+ 	if (!net->ipv6.fib6_rules_require_fldissect)
+ 		return false;
+ 
+-	skb_flow_dissect_flow_keys(skb, flkeys, flag);
++	memset(flkeys, 0, sizeof(*flkeys));
++	__skb_flow_dissect(net, skb, &flow_keys_dissector,
++			   flkeys, NULL, 0, 0, 0, flag);
++
+ 	fl6->fl6_sport = flkeys->ports.src;
+ 	fl6->fl6_dport = flkeys->ports.dst;
+ 	fl6->flowi6_proto = flkeys->basic.ip_proto;
+diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
+index a378eff827c74..f0c13864180e2 100644
+--- a/include/net/ip_fib.h
++++ b/include/net/ip_fib.h
+@@ -418,7 +418,10 @@ static inline bool fib4_rules_early_flow_dissect(struct net *net,
+ 	if (!net->ipv4.fib_rules_require_fldissect)
+ 		return false;
+ 
+-	skb_flow_dissect_flow_keys(skb, flkeys, flag);
++	memset(flkeys, 0, sizeof(*flkeys));
++	__skb_flow_dissect(net, skb, &flow_keys_dissector,
++			   flkeys, NULL, 0, 0, 0, flag);
++
+ 	fl4->fl4_sport = flkeys->ports.src;
+ 	fl4->fl4_dport = flkeys->ports.dst;
+ 	fl4->flowi4_proto = flkeys->basic.ip_proto;
+diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
+index ed4b6ad3fcac8..cd0e2744f66f3 100644
+--- a/include/net/ip_tunnels.h
++++ b/include/net/ip_tunnels.h
+@@ -482,15 +482,14 @@ static inline void iptunnel_xmit_stats(struct net_device *dev, int pkt_len)
+ 		u64_stats_inc(&tstats->tx_packets);
+ 		u64_stats_update_end(&tstats->syncp);
+ 		put_cpu_ptr(tstats);
++		return;
++	}
++
++	if (pkt_len < 0) {
++		DEV_STATS_INC(dev, tx_errors);
++		DEV_STATS_INC(dev, tx_aborted_errors);
+ 	} else {
+-		struct net_device_stats *err_stats = &dev->stats;
+-
+-		if (pkt_len < 0) {
+-			err_stats->tx_errors++;
+-			err_stats->tx_aborted_errors++;
+-		} else {
+-			err_stats->tx_dropped++;
+-		}
++		DEV_STATS_INC(dev, tx_dropped);
+ 	}
+ }
+ 
+diff --git a/include/net/ipv6.h b/include/net/ipv6.h
+index 2acc4c808d45d..a14ac821fb36f 100644
+--- a/include/net/ipv6.h
++++ b/include/net/ipv6.h
+@@ -784,6 +784,11 @@ static inline bool ipv6_addr_v4mapped(const struct in6_addr *a)
+ 					cpu_to_be32(0x0000ffff))) == 0UL;
+ }
+ 
++static inline bool ipv6_addr_v4mapped_any(const struct in6_addr *a)
++{
++	return ipv6_addr_v4mapped(a) && ipv4_is_zeronet(a->s6_addr32[3]);
++}
++
+ static inline bool ipv6_addr_v4mapped_loopback(const struct in6_addr *a)
+ {
+ 	return ipv6_addr_v4mapped(a) && ipv4_is_loopback(a->s6_addr32[3]);
+@@ -1356,7 +1361,7 @@ static inline int __ip6_sock_set_addr_preferences(struct sock *sk, int val)
+ 	return 0;
+ }
+ 
+-static inline int ip6_sock_set_addr_preferences(struct sock *sk, bool val)
++static inline int ip6_sock_set_addr_preferences(struct sock *sk, int val)
+ {
+ 	int ret;
+ 
+diff --git a/include/net/scm.h b/include/net/scm.h
+index c5bcdf65f55c9..e8c76b4be2fe7 100644
+--- a/include/net/scm.h
++++ b/include/net/scm.h
+@@ -9,6 +9,7 @@
+ #include <linux/pid.h>
+ #include <linux/nsproxy.h>
+ #include <linux/sched/signal.h>
++#include <net/compat.h>
+ 
+ /* Well, we should have at least one descriptor open
+  * to accept passed FDs 8)
+@@ -123,14 +124,17 @@ static inline bool scm_has_secdata(struct socket *sock)
+ static __inline__ void scm_pidfd_recv(struct msghdr *msg, struct scm_cookie *scm)
+ {
+ 	struct file *pidfd_file = NULL;
+-	int pidfd;
++	int len, pidfd;
+ 
+-	/*
+-	 * put_cmsg() doesn't return an error if CMSG is truncated,
++	/* put_cmsg() doesn't return an error if CMSG is truncated,
+ 	 * that's why we need to opencode these checks here.
+ 	 */
+-	if ((msg->msg_controllen <= sizeof(struct cmsghdr)) ||
+-	    (msg->msg_controllen - sizeof(struct cmsghdr)) < sizeof(int)) {
++	if (msg->msg_flags & MSG_CMSG_COMPAT)
++		len = sizeof(struct compat_cmsghdr) + sizeof(int);
++	else
++		len = sizeof(struct cmsghdr) + sizeof(int);
++
++	if (msg->msg_controllen < len) {
+ 		msg->msg_flags |= MSG_CTRUNC;
+ 		return;
+ 	}
+diff --git a/include/net/sock.h b/include/net/sock.h
+index e8927f2d47a3c..4e787285fc66b 100644
+--- a/include/net/sock.h
++++ b/include/net/sock.h
+@@ -1053,6 +1053,12 @@ static inline void sk_wmem_queued_add(struct sock *sk, int val)
+ 	WRITE_ONCE(sk->sk_wmem_queued, sk->sk_wmem_queued + val);
+ }
+ 
++static inline void sk_forward_alloc_add(struct sock *sk, int val)
++{
++	/* Paired with lockless reads of sk->sk_forward_alloc */
++	WRITE_ONCE(sk->sk_forward_alloc, sk->sk_forward_alloc + val);
++}
++
+ void sk_stream_write_space(struct sock *sk);
+ 
+ /* OOB backlog add */
+@@ -1377,7 +1383,7 @@ static inline int sk_forward_alloc_get(const struct sock *sk)
+ 	if (sk->sk_prot->forward_alloc_get)
+ 		return sk->sk_prot->forward_alloc_get(sk);
+ #endif
+-	return sk->sk_forward_alloc;
++	return READ_ONCE(sk->sk_forward_alloc);
+ }
+ 
+ static inline bool __sk_stream_memory_free(const struct sock *sk, int wake)
+@@ -1673,14 +1679,14 @@ static inline void sk_mem_charge(struct sock *sk, int size)
+ {
+ 	if (!sk_has_account(sk))
+ 		return;
+-	sk->sk_forward_alloc -= size;
++	sk_forward_alloc_add(sk, -size);
+ }
+ 
+ static inline void sk_mem_uncharge(struct sock *sk, int size)
+ {
+ 	if (!sk_has_account(sk))
+ 		return;
+-	sk->sk_forward_alloc += size;
++	sk_forward_alloc_add(sk, size);
+ 	sk_mem_reclaim(sk);
+ }
+ 
+@@ -1900,7 +1906,9 @@ struct sockcm_cookie {
+ static inline void sockcm_init(struct sockcm_cookie *sockc,
+ 			       const struct sock *sk)
+ {
+-	*sockc = (struct sockcm_cookie) { .tsflags = sk->sk_tsflags };
++	*sockc = (struct sockcm_cookie) {
++		.tsflags = READ_ONCE(sk->sk_tsflags)
++	};
+ }
+ 
+ int __sock_cmsg_send(struct sock *sk, struct cmsghdr *cmsg,
+@@ -2695,9 +2703,9 @@ void __sock_recv_wifi_status(struct msghdr *msg, struct sock *sk,
+ static inline void
+ sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb)
+ {
+-	ktime_t kt = skb->tstamp;
+ 	struct skb_shared_hwtstamps *hwtstamps = skb_hwtstamps(skb);
+-
++	u32 tsflags = READ_ONCE(sk->sk_tsflags);
++	ktime_t kt = skb->tstamp;
+ 	/*
+ 	 * generate control messages if
+ 	 * - receive time stamping in software requested
+@@ -2705,10 +2713,10 @@ sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb)
+ 	 * - hardware time stamps available and wanted
+ 	 */
+ 	if (sock_flag(sk, SOCK_RCVTSTAMP) ||
+-	    (sk->sk_tsflags & SOF_TIMESTAMPING_RX_SOFTWARE) ||
+-	    (kt && sk->sk_tsflags & SOF_TIMESTAMPING_SOFTWARE) ||
++	    (tsflags & SOF_TIMESTAMPING_RX_SOFTWARE) ||
++	    (kt && tsflags & SOF_TIMESTAMPING_SOFTWARE) ||
+ 	    (hwtstamps->hwtstamp &&
+-	     (sk->sk_tsflags & SOF_TIMESTAMPING_RAW_HARDWARE)))
++	     (tsflags & SOF_TIMESTAMPING_RAW_HARDWARE)))
+ 		__sock_recv_timestamp(msg, sk, skb);
+ 	else
+ 		sock_write_timestamp(sk, kt);
+@@ -2730,7 +2738,8 @@ static inline void sock_recv_cmsgs(struct msghdr *msg, struct sock *sk,
+ #define TSFLAGS_ANY	  (SOF_TIMESTAMPING_SOFTWARE			| \
+ 			   SOF_TIMESTAMPING_RAW_HARDWARE)
+ 
+-	if (sk->sk_flags & FLAGS_RECV_CMSGS || sk->sk_tsflags & TSFLAGS_ANY)
++	if (sk->sk_flags & FLAGS_RECV_CMSGS ||
++	    READ_ONCE(sk->sk_tsflags) & TSFLAGS_ANY)
+ 		__sock_recv_cmsgs(msg, sk, skb);
+ 	else if (unlikely(sock_flag(sk, SOCK_TIMESTAMP)))
+ 		sock_write_timestamp(sk, skb->tstamp);
+diff --git a/kernel/auditsc.c b/kernel/auditsc.c
+index 8dfd581cd5543..eae5dfe9b9a01 100644
+--- a/kernel/auditsc.c
++++ b/kernel/auditsc.c
+@@ -143,6 +143,8 @@ static const struct audit_nfcfgop_tab audit_nfcfgs[] = {
+ 	{ AUDIT_NFT_OP_OBJ_RESET,		"nft_reset_obj"		   },
+ 	{ AUDIT_NFT_OP_FLOWTABLE_REGISTER,	"nft_register_flowtable"   },
+ 	{ AUDIT_NFT_OP_FLOWTABLE_UNREGISTER,	"nft_unregister_flowtable" },
++	{ AUDIT_NFT_OP_SETELEM_RESET,		"nft_reset_setelem"        },
++	{ AUDIT_NFT_OP_RULE_RESET,		"nft_reset_rule"           },
+ 	{ AUDIT_NFT_OP_INVALID,			"nft_invalid"		   },
+ };
+ 
+diff --git a/kernel/bpf/bpf_local_storage.c b/kernel/bpf/bpf_local_storage.c
+index b5149cfce7d4d..146824cc96893 100644
+--- a/kernel/bpf/bpf_local_storage.c
++++ b/kernel/bpf/bpf_local_storage.c
+@@ -553,7 +553,7 @@ bpf_local_storage_update(void *owner, struct bpf_local_storage_map *smap,
+ 			 void *value, u64 map_flags, gfp_t gfp_flags)
+ {
+ 	struct bpf_local_storage_data *old_sdata = NULL;
+-	struct bpf_local_storage_elem *selem = NULL;
++	struct bpf_local_storage_elem *alloc_selem, *selem = NULL;
+ 	struct bpf_local_storage *local_storage;
+ 	unsigned long flags;
+ 	int err;
+@@ -607,11 +607,12 @@ bpf_local_storage_update(void *owner, struct bpf_local_storage_map *smap,
+ 		}
+ 	}
+ 
+-	if (gfp_flags == GFP_KERNEL) {
+-		selem = bpf_selem_alloc(smap, owner, value, true, gfp_flags);
+-		if (!selem)
+-			return ERR_PTR(-ENOMEM);
+-	}
++	/* A lookup has just been done before and concluded a new selem is
++	 * needed. The chance of an unnecessary alloc is unlikely.
++	 */
++	alloc_selem = selem = bpf_selem_alloc(smap, owner, value, true, gfp_flags);
++	if (!alloc_selem)
++		return ERR_PTR(-ENOMEM);
+ 
+ 	raw_spin_lock_irqsave(&local_storage->lock, flags);
+ 
+@@ -623,13 +624,13 @@ bpf_local_storage_update(void *owner, struct bpf_local_storage_map *smap,
+ 		 * simple.
+ 		 */
+ 		err = -EAGAIN;
+-		goto unlock_err;
++		goto unlock;
+ 	}
+ 
+ 	old_sdata = bpf_local_storage_lookup(local_storage, smap, false);
+ 	err = check_flags(old_sdata, map_flags);
+ 	if (err)
+-		goto unlock_err;
++		goto unlock;
+ 
+ 	if (old_sdata && (map_flags & BPF_F_LOCK)) {
+ 		copy_map_value_locked(&smap->map, old_sdata->data, value,
+@@ -638,23 +639,7 @@ bpf_local_storage_update(void *owner, struct bpf_local_storage_map *smap,
+ 		goto unlock;
+ 	}
+ 
+-	if (gfp_flags != GFP_KERNEL) {
+-		/* local_storage->lock is held.  Hence, we are sure
+-		 * we can unlink and uncharge the old_sdata successfully
+-		 * later.  Hence, instead of charging the new selem now
+-		 * and then uncharge the old selem later (which may cause
+-		 * a potential but unnecessary charge failure),  avoid taking
+-		 * a charge at all here (the "!old_sdata" check) and the
+-		 * old_sdata will not be uncharged later during
+-		 * bpf_selem_unlink_storage_nolock().
+-		 */
+-		selem = bpf_selem_alloc(smap, owner, value, !old_sdata, gfp_flags);
+-		if (!selem) {
+-			err = -ENOMEM;
+-			goto unlock_err;
+-		}
+-	}
+-
++	alloc_selem = NULL;
+ 	/* First, link the new selem to the map */
+ 	bpf_selem_link_map(smap, selem);
+ 
+@@ -665,20 +650,16 @@ bpf_local_storage_update(void *owner, struct bpf_local_storage_map *smap,
+ 	if (old_sdata) {
+ 		bpf_selem_unlink_map(SELEM(old_sdata));
+ 		bpf_selem_unlink_storage_nolock(local_storage, SELEM(old_sdata),
+-						false, false);
++						true, false);
+ 	}
+ 
+ unlock:
+ 	raw_spin_unlock_irqrestore(&local_storage->lock, flags);
+-	return SDATA(selem);
+-
+-unlock_err:
+-	raw_spin_unlock_irqrestore(&local_storage->lock, flags);
+-	if (selem) {
++	if (alloc_selem) {
+ 		mem_uncharge(smap, owner, smap->elem_size);
+-		bpf_selem_free(selem, smap, true);
++		bpf_selem_free(alloc_selem, smap, true);
+ 	}
+-	return ERR_PTR(err);
++	return err ? ERR_PTR(err) : SDATA(selem);
+ }
+ 
+ static u16 bpf_local_storage_cache_idx_get(struct bpf_local_storage_cache *cache)
+@@ -779,7 +760,7 @@ void bpf_local_storage_destroy(struct bpf_local_storage *local_storage)
+ 		 * of the loop will set the free_cgroup_storage to true.
+ 		 */
+ 		free_storage = bpf_selem_unlink_storage_nolock(
+-			local_storage, selem, false, true);
++			local_storage, selem, true, true);
+ 	}
+ 	raw_spin_unlock_irqrestore(&local_storage->lock, flags);
+ 
+diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
+index dc85240a01342..e3e45b651cd40 100644
+--- a/kernel/bpf/core.c
++++ b/kernel/bpf/core.c
+@@ -1635,12 +1635,6 @@ bool bpf_opcode_in_insntable(u8 code)
+ }
+ 
+ #ifndef CONFIG_BPF_JIT_ALWAYS_ON
+-u64 __weak bpf_probe_read_kernel(void *dst, u32 size, const void *unsafe_ptr)
+-{
+-	memset(dst, 0, size);
+-	return -EFAULT;
+-}
+-
+ /**
+  *	___bpf_prog_run - run eBPF program on a given context
+  *	@regs: is the array of MAX_BPF_EXT_REG eBPF pseudo-registers
+@@ -1931,8 +1925,8 @@ out:
+ 		DST = *(SIZE *)(unsigned long) (SRC + insn->off);	\
+ 		CONT;							\
+ 	LDX_PROBE_MEM_##SIZEOP:						\
+-		bpf_probe_read_kernel(&DST, sizeof(SIZE),		\
+-				      (const void *)(long) (SRC + insn->off));	\
++		bpf_probe_read_kernel_common(&DST, sizeof(SIZE),	\
++			      (const void *)(long) (SRC + insn->off));	\
+ 		DST = *((SIZE *)&DST);					\
+ 		CONT;
+ 
+diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
+index a2aef900519c2..1480b6cf12f06 100644
+--- a/kernel/bpf/syscall.c
++++ b/kernel/bpf/syscall.c
+@@ -5304,9 +5304,9 @@ int kern_sys_bpf(int cmd, union bpf_attr *attr, unsigned int size)
+ 		}
+ 
+ 		run_ctx.bpf_cookie = 0;
+-		run_ctx.saved_run_ctx = NULL;
+ 		if (!__bpf_prog_enter_sleepable_recur(prog, &run_ctx)) {
+ 			/* recursion detected */
++			__bpf_prog_exit_sleepable_recur(prog, 0, &run_ctx);
+ 			bpf_prog_put(prog);
+ 			return -EBUSY;
+ 		}
+diff --git a/kernel/bpf/trampoline.c b/kernel/bpf/trampoline.c
+index 78acf28d48732..53ff50cac61ea 100644
+--- a/kernel/bpf/trampoline.c
++++ b/kernel/bpf/trampoline.c
+@@ -926,13 +926,12 @@ u64 notrace __bpf_prog_enter_sleepable_recur(struct bpf_prog *prog,
+ 	migrate_disable();
+ 	might_fault();
+ 
++	run_ctx->saved_run_ctx = bpf_set_run_ctx(&run_ctx->run_ctx);
++
+ 	if (unlikely(this_cpu_inc_return(*(prog->active)) != 1)) {
+ 		bpf_prog_inc_misses_counter(prog);
+ 		return 0;
+ 	}
+-
+-	run_ctx->saved_run_ctx = bpf_set_run_ctx(&run_ctx->run_ctx);
+-
+ 	return bpf_prog_start_time();
+ }
+ 
+diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
+index 30d8db47c1e2f..abf287b2678a1 100644
+--- a/kernel/trace/bpf_trace.c
++++ b/kernel/trace/bpf_trace.c
+@@ -223,17 +223,6 @@ const struct bpf_func_proto bpf_probe_read_user_str_proto = {
+ 	.arg3_type	= ARG_ANYTHING,
+ };
+ 
+-static __always_inline int
+-bpf_probe_read_kernel_common(void *dst, u32 size, const void *unsafe_ptr)
+-{
+-	int ret;
+-
+-	ret = copy_from_kernel_nofault(dst, unsafe_ptr, size);
+-	if (unlikely(ret < 0))
+-		memset(dst, 0, size);
+-	return ret;
+-}
+-
+ BPF_CALL_3(bpf_probe_read_kernel, void *, dst, u32, size,
+ 	   const void *, unsafe_ptr)
+ {
+diff --git a/lib/idr.c b/lib/idr.c
+index 7ecdfdb5309e7..13f2758c23773 100644
+--- a/lib/idr.c
++++ b/lib/idr.c
+@@ -100,7 +100,7 @@ EXPORT_SYMBOL_GPL(idr_alloc);
+  * @end: The maximum ID (exclusive).
+  * @gfp: Memory allocation flags.
+  *
+- * Allocates an unused ID in the range specified by @nextid and @end.  If
++ * Allocates an unused ID in the range specified by @start and @end.  If
+  * @end is <= 0, it is treated as one larger than %INT_MAX.  This allows
+  * callers to use @start + N as @end as long as N is within integer range.
+  * The search for an unused ID will start at the last ID allocated and will
+diff --git a/lib/kunit/test.c b/lib/kunit/test.c
+index 84e4666555c94..e8c9dd9d73a30 100644
+--- a/lib/kunit/test.c
++++ b/lib/kunit/test.c
+@@ -744,12 +744,13 @@ static int kunit_module_notify(struct notifier_block *nb, unsigned long val,
+ 
+ 	switch (val) {
+ 	case MODULE_STATE_LIVE:
+-		kunit_module_init(mod);
+ 		break;
+ 	case MODULE_STATE_GOING:
+ 		kunit_module_exit(mod);
+ 		break;
+ 	case MODULE_STATE_COMING:
++		kunit_module_init(mod);
++		break;
+ 	case MODULE_STATE_UNFORMED:
+ 		break;
+ 	}
+diff --git a/lib/test_meminit.c b/lib/test_meminit.c
+index 60e1984c060fa..0ae35223d7733 100644
+--- a/lib/test_meminit.c
++++ b/lib/test_meminit.c
+@@ -93,7 +93,7 @@ static int __init test_pages(int *total_failures)
+ 	int failures = 0, num_tests = 0;
+ 	int i;
+ 
+-	for (i = 0; i < 10; i++)
++	for (i = 0; i <= MAX_ORDER; i++)
+ 		num_tests += do_alloc_pages_order(i, &failures);
+ 
+ 	REPORT_FAILURES_IN_FN();
+diff --git a/lib/test_scanf.c b/lib/test_scanf.c
+index b620cf7de5035..a2707af2951ab 100644
+--- a/lib/test_scanf.c
++++ b/lib/test_scanf.c
+@@ -606,7 +606,7 @@ static void __init numbers_slice(void)
+ #define test_number_prefix(T, str, scan_fmt, expect0, expect1, n_args, fn)	\
+ do {										\
+ 	const T expect[2] = { expect0, expect1 };				\
+-	T result[2] = {~expect[0], ~expect[1]};					\
++	T result[2] = { (T)~expect[0], (T)~expect[1] };				\
+ 										\
+ 	_test(fn, &expect, str, scan_fmt, n_args, &result[0], &result[1]);	\
+ } while (0)
+diff --git a/mm/hugetlb_vmemmap.c b/mm/hugetlb_vmemmap.c
+index c2007ef5e9b05..4b9734777f698 100644
+--- a/mm/hugetlb_vmemmap.c
++++ b/mm/hugetlb_vmemmap.c
+@@ -36,14 +36,22 @@ struct vmemmap_remap_walk {
+ 	struct list_head	*vmemmap_pages;
+ };
+ 
+-static int __split_vmemmap_huge_pmd(pmd_t *pmd, unsigned long start)
++static int split_vmemmap_huge_pmd(pmd_t *pmd, unsigned long start)
+ {
+ 	pmd_t __pmd;
+ 	int i;
+ 	unsigned long addr = start;
+-	struct page *page = pmd_page(*pmd);
+-	pte_t *pgtable = pte_alloc_one_kernel(&init_mm);
++	struct page *head;
++	pte_t *pgtable;
++
++	spin_lock(&init_mm.page_table_lock);
++	head = pmd_leaf(*pmd) ? pmd_page(*pmd) : NULL;
++	spin_unlock(&init_mm.page_table_lock);
+ 
++	if (!head)
++		return 0;
++
++	pgtable = pte_alloc_one_kernel(&init_mm);
+ 	if (!pgtable)
+ 		return -ENOMEM;
+ 
+@@ -53,7 +61,7 @@ static int __split_vmemmap_huge_pmd(pmd_t *pmd, unsigned long start)
+ 		pte_t entry, *pte;
+ 		pgprot_t pgprot = PAGE_KERNEL;
+ 
+-		entry = mk_pte(page + i, pgprot);
++		entry = mk_pte(head + i, pgprot);
+ 		pte = pte_offset_kernel(&__pmd, addr);
+ 		set_pte_at(&init_mm, addr, pte, entry);
+ 	}
+@@ -65,8 +73,8 @@ static int __split_vmemmap_huge_pmd(pmd_t *pmd, unsigned long start)
+ 		 * be treated as indepdenent small pages (as they can be freed
+ 		 * individually).
+ 		 */
+-		if (!PageReserved(page))
+-			split_page(page, get_order(PMD_SIZE));
++		if (!PageReserved(head))
++			split_page(head, get_order(PMD_SIZE));
+ 
+ 		/* Make pte visible before pmd. See comment in pmd_install(). */
+ 		smp_wmb();
+@@ -80,20 +88,6 @@ static int __split_vmemmap_huge_pmd(pmd_t *pmd, unsigned long start)
+ 	return 0;
+ }
+ 
+-static int split_vmemmap_huge_pmd(pmd_t *pmd, unsigned long start)
+-{
+-	int leaf;
+-
+-	spin_lock(&init_mm.page_table_lock);
+-	leaf = pmd_leaf(*pmd);
+-	spin_unlock(&init_mm.page_table_lock);
+-
+-	if (!leaf)
+-		return 0;
+-
+-	return __split_vmemmap_huge_pmd(pmd, start);
+-}
+-
+ static void vmemmap_pte_range(pmd_t *pmd, unsigned long addr,
+ 			      unsigned long end,
+ 			      struct vmemmap_remap_walk *walk)
+diff --git a/mm/memcontrol.c b/mm/memcontrol.c
+index 315fd5f45e3c0..e79267c1eee01 100644
+--- a/mm/memcontrol.c
++++ b/mm/memcontrol.c
+@@ -3871,10 +3871,6 @@ static ssize_t mem_cgroup_write(struct kernfs_open_file *of,
+ 		case _MEMSWAP:
+ 			ret = mem_cgroup_resize_max(memcg, nr_pages, true);
+ 			break;
+-		case _KMEM:
+-			/* kmem.limit_in_bytes is deprecated. */
+-			ret = -EOPNOTSUPP;
+-			break;
+ 		case _TCP:
+ 			ret = memcg_update_tcp_max(memcg, nr_pages);
+ 			break;
+@@ -5085,12 +5081,6 @@ static struct cftype mem_cgroup_legacy_files[] = {
+ 		.seq_show = memcg_numa_stat_show,
+ 	},
+ #endif
+-	{
+-		.name = "kmem.limit_in_bytes",
+-		.private = MEMFILE_PRIVATE(_KMEM, RES_LIMIT),
+-		.write = mem_cgroup_write,
+-		.read_u64 = mem_cgroup_read_u64,
+-	},
+ 	{
+ 		.name = "kmem.usage_in_bytes",
+ 		.private = MEMFILE_PRIVATE(_KMEM, RES_USAGE),
+@@ -5339,7 +5329,6 @@ static struct mem_cgroup *mem_cgroup_alloc(void)
+ 	INIT_LIST_HEAD(&memcg->deferred_split_queue.split_queue);
+ 	memcg->deferred_split_queue.split_queue_len = 0;
+ #endif
+-	idr_replace(&mem_cgroup_idr, memcg, memcg->id.id);
+ 	lru_gen_init_memcg(memcg);
+ 	return memcg;
+ fail:
+@@ -5411,14 +5400,27 @@ static int mem_cgroup_css_online(struct cgroup_subsys_state *css)
+ 	if (alloc_shrinker_info(memcg))
+ 		goto offline_kmem;
+ 
+-	/* Online state pins memcg ID, memcg ID pins CSS */
+-	refcount_set(&memcg->id.ref, 1);
+-	css_get(css);
+-
+ 	if (unlikely(mem_cgroup_is_root(memcg)))
+ 		queue_delayed_work(system_unbound_wq, &stats_flush_dwork,
+ 				   FLUSH_TIME);
+ 	lru_gen_online_memcg(memcg);
++
++	/* Online state pins memcg ID, memcg ID pins CSS */
++	refcount_set(&memcg->id.ref, 1);
++	css_get(css);
++
++	/*
++	 * Ensure mem_cgroup_from_id() works once we're fully online.
++	 *
++	 * We could do this earlier and require callers to filter with
++	 * css_tryget_online(). But right now there are no users that
++	 * need earlier access, and the workingset code relies on the
++	 * cgroup tree linkage (mem_cgroup_get_nr_swap_pages()). So
++	 * publish it here at the end of onlining. This matches the
++	 * regular ID destruction during offlining.
++	 */
++	idr_replace(&mem_cgroup_idr, memcg, memcg->id.id);
++
+ 	return 0;
+ offline_kmem:
+ 	memcg_offline_kmem(memcg);
+diff --git a/mm/mremap.c b/mm/mremap.c
+index 11e06e4ab33be..91f0173d396f0 100644
+--- a/mm/mremap.c
++++ b/mm/mremap.c
+@@ -715,7 +715,7 @@ static unsigned long move_vma(struct vm_area_struct *vma,
+ 	}
+ 
+ 	vma_iter_init(&vmi, mm, old_addr);
+-	if (!do_vmi_munmap(&vmi, mm, old_addr, old_len, uf_unmap, false)) {
++	if (do_vmi_munmap(&vmi, mm, old_addr, old_len, uf_unmap, false) < 0) {
+ 		/* OOM: unable to split vma, just get accounts right */
+ 		if (vm_flags & VM_ACCOUNT && !(flags & MREMAP_DONTUNMAP))
+ 			vm_acct_memory(old_len >> PAGE_SHIFT);
+diff --git a/mm/vmscan.c b/mm/vmscan.c
+index 5be64834a8527..da152407bc2b1 100644
+--- a/mm/vmscan.c
++++ b/mm/vmscan.c
+@@ -4440,7 +4440,7 @@ static void inc_max_seq(struct lruvec *lruvec, bool can_swap, bool force_scan)
+ 	int prev, next;
+ 	int type, zone;
+ 	struct lru_gen_folio *lrugen = &lruvec->lrugen;
+-
++restart:
+ 	spin_lock_irq(&lruvec->lru_lock);
+ 
+ 	VM_WARN_ON_ONCE(!seq_is_valid(lruvec));
+@@ -4451,11 +4451,12 @@ static void inc_max_seq(struct lruvec *lruvec, bool can_swap, bool force_scan)
+ 
+ 		VM_WARN_ON_ONCE(!force_scan && (type == LRU_GEN_FILE || can_swap));
+ 
+-		while (!inc_min_seq(lruvec, type, can_swap)) {
+-			spin_unlock_irq(&lruvec->lru_lock);
+-			cond_resched();
+-			spin_lock_irq(&lruvec->lru_lock);
+-		}
++		if (inc_min_seq(lruvec, type, can_swap))
++			continue;
++
++		spin_unlock_irq(&lruvec->lru_lock);
++		cond_resched();
++		goto restart;
+ 	}
+ 
+ 	/*
+diff --git a/net/can/j1939/socket.c b/net/can/j1939/socket.c
+index feaec4ad6d163..b28c976f52a0a 100644
+--- a/net/can/j1939/socket.c
++++ b/net/can/j1939/socket.c
+@@ -974,6 +974,7 @@ static void __j1939_sk_errqueue(struct j1939_session *session, struct sock *sk,
+ 	struct sock_exterr_skb *serr;
+ 	struct sk_buff *skb;
+ 	char *state = "UNK";
++	u32 tsflags;
+ 	int err;
+ 
+ 	jsk = j1939_sk(sk);
+@@ -981,13 +982,14 @@ static void __j1939_sk_errqueue(struct j1939_session *session, struct sock *sk,
+ 	if (!(jsk->state & J1939_SOCK_ERRQUEUE))
+ 		return;
+ 
++	tsflags = READ_ONCE(sk->sk_tsflags);
+ 	switch (type) {
+ 	case J1939_ERRQUEUE_TX_ACK:
+-		if (!(sk->sk_tsflags & SOF_TIMESTAMPING_TX_ACK))
++		if (!(tsflags & SOF_TIMESTAMPING_TX_ACK))
+ 			return;
+ 		break;
+ 	case J1939_ERRQUEUE_TX_SCHED:
+-		if (!(sk->sk_tsflags & SOF_TIMESTAMPING_TX_SCHED))
++		if (!(tsflags & SOF_TIMESTAMPING_TX_SCHED))
+ 			return;
+ 		break;
+ 	case J1939_ERRQUEUE_TX_ABORT:
+@@ -997,7 +999,7 @@ static void __j1939_sk_errqueue(struct j1939_session *session, struct sock *sk,
+ 	case J1939_ERRQUEUE_RX_DPO:
+ 		fallthrough;
+ 	case J1939_ERRQUEUE_RX_ABORT:
+-		if (!(sk->sk_tsflags & SOF_TIMESTAMPING_RX_SOFTWARE))
++		if (!(tsflags & SOF_TIMESTAMPING_RX_SOFTWARE))
+ 			return;
+ 		break;
+ 	default:
+@@ -1054,7 +1056,7 @@ static void __j1939_sk_errqueue(struct j1939_session *session, struct sock *sk,
+ 	}
+ 
+ 	serr->opt_stats = true;
+-	if (sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID)
++	if (tsflags & SOF_TIMESTAMPING_OPT_ID)
+ 		serr->ee.ee_data = session->tskey;
+ 
+ 	netdev_dbg(session->priv->ndev, "%s: 0x%p tskey: %i, state: %s\n",
+diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
+index 85a2d0d9bd395..6bed3992df814 100644
+--- a/net/core/flow_dissector.c
++++ b/net/core/flow_dissector.c
+@@ -1780,8 +1780,7 @@ u32 __skb_get_hash_symmetric(const struct sk_buff *skb)
+ 
+ 	memset(&keys, 0, sizeof(keys));
+ 	__skb_flow_dissect(NULL, skb, &flow_keys_dissector_symmetric,
+-			   &keys, NULL, 0, 0, 0,
+-			   FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL);
++			   &keys, NULL, 0, 0, 0, 0);
+ 
+ 	return __flow_hash_from_keys(&keys, &hashrnd);
+ }
+diff --git a/net/core/skbuff.c b/net/core/skbuff.c
+index acdf94bb54c80..7dfae58055c2b 100644
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -5149,7 +5149,7 @@ static void __skb_complete_tx_timestamp(struct sk_buff *skb,
+ 	serr->ee.ee_info = tstype;
+ 	serr->opt_stats = opt_stats;
+ 	serr->header.h4.iif = skb->dev ? skb->dev->ifindex : 0;
+-	if (sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID) {
++	if (READ_ONCE(sk->sk_tsflags) & SOF_TIMESTAMPING_OPT_ID) {
+ 		serr->ee.ee_data = skb_shinfo(skb)->tskey;
+ 		if (sk_is_tcp(sk))
+ 			serr->ee.ee_data -= atomic_read(&sk->sk_tskey);
+@@ -5205,21 +5205,23 @@ void __skb_tstamp_tx(struct sk_buff *orig_skb,
+ {
+ 	struct sk_buff *skb;
+ 	bool tsonly, opt_stats = false;
++	u32 tsflags;
+ 
+ 	if (!sk)
+ 		return;
+ 
+-	if (!hwtstamps && !(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_TX_SWHW) &&
++	tsflags = READ_ONCE(sk->sk_tsflags);
++	if (!hwtstamps && !(tsflags & SOF_TIMESTAMPING_OPT_TX_SWHW) &&
+ 	    skb_shinfo(orig_skb)->tx_flags & SKBTX_IN_PROGRESS)
+ 		return;
+ 
+-	tsonly = sk->sk_tsflags & SOF_TIMESTAMPING_OPT_TSONLY;
++	tsonly = tsflags & SOF_TIMESTAMPING_OPT_TSONLY;
+ 	if (!skb_may_tx_timestamp(sk, tsonly))
+ 		return;
+ 
+ 	if (tsonly) {
+ #ifdef CONFIG_INET
+-		if ((sk->sk_tsflags & SOF_TIMESTAMPING_OPT_STATS) &&
++		if ((tsflags & SOF_TIMESTAMPING_OPT_STATS) &&
+ 		    sk_is_tcp(sk)) {
+ 			skb = tcp_get_timestamping_opt_stats(sk, orig_skb,
+ 							     ack_skb);
+diff --git a/net/core/skmsg.c b/net/core/skmsg.c
+index ef1a2eb6520bf..a746dbc2f8877 100644
+--- a/net/core/skmsg.c
++++ b/net/core/skmsg.c
+@@ -612,12 +612,18 @@ static int sk_psock_skb_ingress_self(struct sk_psock *psock, struct sk_buff *skb
+ static int sk_psock_handle_skb(struct sk_psock *psock, struct sk_buff *skb,
+ 			       u32 off, u32 len, bool ingress)
+ {
++	int err = 0;
++
+ 	if (!ingress) {
+ 		if (!sock_writeable(psock->sk))
+ 			return -EAGAIN;
+ 		return skb_send_sock(psock->sk, skb, off, len);
+ 	}
+-	return sk_psock_skb_ingress(psock, skb, off, len);
++	skb_get(skb);
++	err = sk_psock_skb_ingress(psock, skb, off, len);
++	if (err < 0)
++		kfree_skb(skb);
++	return err;
+ }
+ 
+ static void sk_psock_skb_state(struct sk_psock *psock,
+@@ -685,9 +691,7 @@ static void sk_psock_backlog(struct work_struct *work)
+ 		} while (len);
+ 
+ 		skb = skb_dequeue(&psock->ingress_skb);
+-		if (!ingress) {
+-			kfree_skb(skb);
+-		}
++		kfree_skb(skb);
+ 	}
+ end:
+ 	mutex_unlock(&psock->work_mutex);
+diff --git a/net/core/sock.c b/net/core/sock.c
+index 1c5c01b116e6f..29c6cb030818b 100644
+--- a/net/core/sock.c
++++ b/net/core/sock.c
+@@ -765,7 +765,8 @@ bool sk_mc_loop(struct sock *sk)
+ 		return false;
+ 	if (!sk)
+ 		return true;
+-	switch (sk->sk_family) {
++	/* IPV6_ADDRFORM can change sk->sk_family under us. */
++	switch (READ_ONCE(sk->sk_family)) {
+ 	case AF_INET:
+ 		return inet_sk(sk)->mc_loop;
+ #if IS_ENABLED(CONFIG_IPV6)
+@@ -893,7 +894,7 @@ static int sock_timestamping_bind_phc(struct sock *sk, int phc_index)
+ 	if (!match)
+ 		return -EINVAL;
+ 
+-	sk->sk_bind_phc = phc_index;
++	WRITE_ONCE(sk->sk_bind_phc, phc_index);
+ 
+ 	return 0;
+ }
+@@ -936,7 +937,7 @@ int sock_set_timestamping(struct sock *sk, int optname,
+ 			return ret;
+ 	}
+ 
+-	sk->sk_tsflags = val;
++	WRITE_ONCE(sk->sk_tsflags, val);
+ 	sock_valbool_flag(sk, SOCK_TSTAMP_NEW, optname == SO_TIMESTAMPING_NEW);
+ 
+ 	if (val & SOF_TIMESTAMPING_RX_SOFTWARE)
+@@ -1044,7 +1045,7 @@ static int sock_reserve_memory(struct sock *sk, int bytes)
+ 		mem_cgroup_uncharge_skmem(sk->sk_memcg, pages);
+ 		return -ENOMEM;
+ 	}
+-	sk->sk_forward_alloc += pages << PAGE_SHIFT;
++	sk_forward_alloc_add(sk, pages << PAGE_SHIFT);
+ 
+ 	WRITE_ONCE(sk->sk_reserved_mem,
+ 		   sk->sk_reserved_mem + (pages << PAGE_SHIFT));
+@@ -1717,8 +1718,8 @@ int sk_getsockopt(struct sock *sk, int level, int optname,
+ 
+ 	case SO_TIMESTAMPING_OLD:
+ 		lv = sizeof(v.timestamping);
+-		v.timestamping.flags = sk->sk_tsflags;
+-		v.timestamping.bind_phc = sk->sk_bind_phc;
++		v.timestamping.flags = READ_ONCE(sk->sk_tsflags);
++		v.timestamping.bind_phc = READ_ONCE(sk->sk_bind_phc);
+ 		break;
+ 
+ 	case SO_RCVTIMEO_OLD:
+@@ -2745,9 +2746,9 @@ static long sock_wait_for_wmem(struct sock *sk, long timeo)
+ 		prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
+ 		if (refcount_read(&sk->sk_wmem_alloc) < READ_ONCE(sk->sk_sndbuf))
+ 			break;
+-		if (sk->sk_shutdown & SEND_SHUTDOWN)
++		if (READ_ONCE(sk->sk_shutdown) & SEND_SHUTDOWN)
+ 			break;
+-		if (sk->sk_err)
++		if (READ_ONCE(sk->sk_err))
+ 			break;
+ 		timeo = schedule_timeout(timeo);
+ 	}
+@@ -2775,7 +2776,7 @@ struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
+ 			goto failure;
+ 
+ 		err = -EPIPE;
+-		if (sk->sk_shutdown & SEND_SHUTDOWN)
++		if (READ_ONCE(sk->sk_shutdown) & SEND_SHUTDOWN)
+ 			goto failure;
+ 
+ 		if (sk_wmem_alloc_get(sk) < READ_ONCE(sk->sk_sndbuf))
+@@ -3137,10 +3138,10 @@ int __sk_mem_schedule(struct sock *sk, int size, int kind)
+ {
+ 	int ret, amt = sk_mem_pages(size);
+ 
+-	sk->sk_forward_alloc += amt << PAGE_SHIFT;
++	sk_forward_alloc_add(sk, amt << PAGE_SHIFT);
+ 	ret = __sk_mem_raise_allocated(sk, size, amt, kind);
+ 	if (!ret)
+-		sk->sk_forward_alloc -= amt << PAGE_SHIFT;
++		sk_forward_alloc_add(sk, -(amt << PAGE_SHIFT));
+ 	return ret;
+ }
+ EXPORT_SYMBOL(__sk_mem_schedule);
+@@ -3172,7 +3173,7 @@ void __sk_mem_reduce_allocated(struct sock *sk, int amount)
+ void __sk_mem_reclaim(struct sock *sk, int amount)
+ {
+ 	amount >>= PAGE_SHIFT;
+-	sk->sk_forward_alloc -= amount << PAGE_SHIFT;
++	sk_forward_alloc_add(sk, -(amount << PAGE_SHIFT));
+ 	__sk_mem_reduce_allocated(sk, amount);
+ }
+ EXPORT_SYMBOL(__sk_mem_reclaim);
+@@ -3741,7 +3742,7 @@ void sk_get_meminfo(const struct sock *sk, u32 *mem)
+ 	mem[SK_MEMINFO_RCVBUF] = READ_ONCE(sk->sk_rcvbuf);
+ 	mem[SK_MEMINFO_WMEM_ALLOC] = sk_wmem_alloc_get(sk);
+ 	mem[SK_MEMINFO_SNDBUF] = READ_ONCE(sk->sk_sndbuf);
+-	mem[SK_MEMINFO_FWD_ALLOC] = sk->sk_forward_alloc;
++	mem[SK_MEMINFO_FWD_ALLOC] = sk_forward_alloc_get(sk);
+ 	mem[SK_MEMINFO_WMEM_QUEUED] = READ_ONCE(sk->sk_wmem_queued);
+ 	mem[SK_MEMINFO_OPTMEM] = atomic_read(&sk->sk_omem_alloc);
+ 	mem[SK_MEMINFO_BACKLOG] = READ_ONCE(sk->sk_backlog.len);
+diff --git a/net/handshake/netlink.c b/net/handshake/netlink.c
+index 1086653e1fada..d0bc1dd8e65a8 100644
+--- a/net/handshake/netlink.c
++++ b/net/handshake/netlink.c
+@@ -157,26 +157,24 @@ out_status:
+ int handshake_nl_done_doit(struct sk_buff *skb, struct genl_info *info)
+ {
+ 	struct net *net = sock_net(skb->sk);
+-	struct handshake_req *req = NULL;
+-	struct socket *sock = NULL;
++	struct handshake_req *req;
++	struct socket *sock;
+ 	int fd, status, err;
+ 
+ 	if (GENL_REQ_ATTR_CHECK(info, HANDSHAKE_A_DONE_SOCKFD))
+ 		return -EINVAL;
+ 	fd = nla_get_u32(info->attrs[HANDSHAKE_A_DONE_SOCKFD]);
+ 
+-	err = 0;
+ 	sock = sockfd_lookup(fd, &err);
+-	if (err) {
+-		err = -EBADF;
+-		goto out_status;
+-	}
++	if (!sock)
++		return err;
+ 
+ 	req = handshake_req_hash_lookup(sock->sk);
+ 	if (!req) {
+ 		err = -EBUSY;
++		trace_handshake_cmd_done_err(net, req, sock->sk, err);
+ 		fput(sock->file);
+-		goto out_status;
++		return err;
+ 	}
+ 
+ 	trace_handshake_cmd_done(net, req, sock->sk, fd);
+@@ -188,10 +186,6 @@ int handshake_nl_done_doit(struct sk_buff *skb, struct genl_info *info)
+ 	handshake_complete(req, status, info);
+ 	fput(sock->file);
+ 	return 0;
+-
+-out_status:
+-	trace_handshake_cmd_done_err(net, req, sock->sk, err);
+-	return err;
+ }
+ 
+ static unsigned int handshake_net_id;
+diff --git a/net/hsr/hsr_forward.c b/net/hsr/hsr_forward.c
+index 629daacc96071..b71dab630a873 100644
+--- a/net/hsr/hsr_forward.c
++++ b/net/hsr/hsr_forward.c
+@@ -594,6 +594,7 @@ static int fill_frame_info(struct hsr_frame_info *frame,
+ 		proto = vlan_hdr->vlanhdr.h_vlan_encapsulated_proto;
+ 		/* FIXME: */
+ 		netdev_warn_once(skb->dev, "VLAN not yet supported");
++		return -EINVAL;
+ 	}
+ 
+ 	frame->is_from_san = false;
+diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
+index 5deac0517ef70..37be82496322d 100644
+--- a/net/ipv4/devinet.c
++++ b/net/ipv4/devinet.c
+@@ -355,14 +355,14 @@ static void __inet_del_ifa(struct in_device *in_dev,
+ {
+ 	struct in_ifaddr *promote = NULL;
+ 	struct in_ifaddr *ifa, *ifa1;
+-	struct in_ifaddr *last_prim;
++	struct in_ifaddr __rcu **last_prim;
+ 	struct in_ifaddr *prev_prom = NULL;
+ 	int do_promote = IN_DEV_PROMOTE_SECONDARIES(in_dev);
+ 
+ 	ASSERT_RTNL();
+ 
+ 	ifa1 = rtnl_dereference(*ifap);
+-	last_prim = rtnl_dereference(in_dev->ifa_list);
++	last_prim = ifap;
+ 	if (in_dev->dead)
+ 		goto no_promotions;
+ 
+@@ -376,7 +376,7 @@ static void __inet_del_ifa(struct in_device *in_dev,
+ 		while ((ifa = rtnl_dereference(*ifap1)) != NULL) {
+ 			if (!(ifa->ifa_flags & IFA_F_SECONDARY) &&
+ 			    ifa1->ifa_scope <= ifa->ifa_scope)
+-				last_prim = ifa;
++				last_prim = &ifa->ifa_next;
+ 
+ 			if (!(ifa->ifa_flags & IFA_F_SECONDARY) ||
+ 			    ifa1->ifa_mask != ifa->ifa_mask ||
+@@ -440,9 +440,9 @@ no_promotions:
+ 
+ 			rcu_assign_pointer(prev_prom->ifa_next, next_sec);
+ 
+-			last_sec = rtnl_dereference(last_prim->ifa_next);
++			last_sec = rtnl_dereference(*last_prim);
+ 			rcu_assign_pointer(promote->ifa_next, last_sec);
+-			rcu_assign_pointer(last_prim->ifa_next, promote);
++			rcu_assign_pointer(*last_prim, promote);
+ 		}
+ 
+ 		promote->ifa_flags &= ~IFA_F_SECONDARY;
+diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
+index 65ba18a91865a..eafa4a0335157 100644
+--- a/net/ipv4/fib_semantics.c
++++ b/net/ipv4/fib_semantics.c
+@@ -278,7 +278,8 @@ void fib_release_info(struct fib_info *fi)
+ 				hlist_del(&nexthop_nh->nh_hash);
+ 			} endfor_nexthops(fi)
+ 		}
+-		fi->fib_dead = 1;
++		/* Paired with READ_ONCE() from fib_table_lookup() */
++		WRITE_ONCE(fi->fib_dead, 1);
+ 		fib_info_put(fi);
+ 	}
+ 	spin_unlock_bh(&fib_info_lock);
+@@ -1581,6 +1582,7 @@ struct fib_info *fib_create_info(struct fib_config *cfg,
+ link_it:
+ 	ofi = fib_find_info(fi);
+ 	if (ofi) {
++		/* fib_table_lookup() should not see @fi yet. */
+ 		fi->fib_dead = 1;
+ 		free_fib_info(fi);
+ 		refcount_inc(&ofi->fib_treeref);
+@@ -1619,6 +1621,7 @@ err_inval:
+ 
+ failure:
+ 	if (fi) {
++		/* fib_table_lookup() should not see @fi yet. */
+ 		fi->fib_dead = 1;
+ 		free_fib_info(fi);
+ 	}
+diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
+index 74d403dbd2b4e..d13fb9e76b971 100644
+--- a/net/ipv4/fib_trie.c
++++ b/net/ipv4/fib_trie.c
+@@ -1582,7 +1582,8 @@ found:
+ 		if (fa->fa_dscp &&
+ 		    inet_dscp_to_dsfield(fa->fa_dscp) != flp->flowi4_tos)
+ 			continue;
+-		if (fi->fib_dead)
++		/* Paired with WRITE_ONCE() in fib_release_info() */
++		if (READ_ONCE(fi->fib_dead))
+ 			continue;
+ 		if (fa->fa_info->fib_scope < flp->flowi4_scope)
+ 			continue;
+diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
+index 0819d6001b9ab..ae5e786a0598d 100644
+--- a/net/ipv4/inet_hashtables.c
++++ b/net/ipv4/inet_hashtables.c
+@@ -795,41 +795,45 @@ static bool inet_bind2_bucket_match(const struct inet_bind2_bucket *tb,
+ 				    const struct net *net, unsigned short port,
+ 				    int l3mdev, const struct sock *sk)
+ {
++	if (!net_eq(ib2_net(tb), net) || tb->port != port ||
++	    tb->l3mdev != l3mdev)
++		return false;
++
+ #if IS_ENABLED(CONFIG_IPV6)
+-	if (sk->sk_family != tb->family)
++	if (sk->sk_family != tb->family) {
++		if (sk->sk_family == AF_INET)
++			return ipv6_addr_v4mapped(&tb->v6_rcv_saddr) &&
++				tb->v6_rcv_saddr.s6_addr32[3] == sk->sk_rcv_saddr;
++
+ 		return false;
++	}
+ 
+ 	if (sk->sk_family == AF_INET6)
+-		return net_eq(ib2_net(tb), net) && tb->port == port &&
+-			tb->l3mdev == l3mdev &&
+-			ipv6_addr_equal(&tb->v6_rcv_saddr, &sk->sk_v6_rcv_saddr);
+-	else
++		return ipv6_addr_equal(&tb->v6_rcv_saddr, &sk->sk_v6_rcv_saddr);
+ #endif
+-		return net_eq(ib2_net(tb), net) && tb->port == port &&
+-			tb->l3mdev == l3mdev && tb->rcv_saddr == sk->sk_rcv_saddr;
++	return tb->rcv_saddr == sk->sk_rcv_saddr;
+ }
+ 
+ bool inet_bind2_bucket_match_addr_any(const struct inet_bind2_bucket *tb, const struct net *net,
+ 				      unsigned short port, int l3mdev, const struct sock *sk)
+ {
++	if (!net_eq(ib2_net(tb), net) || tb->port != port ||
++	    tb->l3mdev != l3mdev)
++		return false;
++
+ #if IS_ENABLED(CONFIG_IPV6)
+ 	if (sk->sk_family != tb->family) {
+ 		if (sk->sk_family == AF_INET)
+-			return net_eq(ib2_net(tb), net) && tb->port == port &&
+-				tb->l3mdev == l3mdev &&
+-				ipv6_addr_any(&tb->v6_rcv_saddr);
++			return ipv6_addr_any(&tb->v6_rcv_saddr) ||
++				ipv6_addr_v4mapped_any(&tb->v6_rcv_saddr);
+ 
+ 		return false;
+ 	}
+ 
+ 	if (sk->sk_family == AF_INET6)
+-		return net_eq(ib2_net(tb), net) && tb->port == port &&
+-			tb->l3mdev == l3mdev &&
+-			ipv6_addr_any(&tb->v6_rcv_saddr);
+-	else
++		return ipv6_addr_any(&tb->v6_rcv_saddr);
+ #endif
+-		return net_eq(ib2_net(tb), net) && tb->port == port &&
+-			tb->l3mdev == l3mdev && tb->rcv_saddr == 0;
++	return tb->rcv_saddr == 0;
+ }
+ 
+ /* The socket's bhash2 hashbucket spinlock must be held when this is called */
+diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
+index fe9ead9ee863d..5e9c8156656a7 100644
+--- a/net/ipv4/ip_input.c
++++ b/net/ipv4/ip_input.c
+@@ -584,7 +584,8 @@ static void ip_sublist_rcv_finish(struct list_head *head)
+ static struct sk_buff *ip_extract_route_hint(const struct net *net,
+ 					     struct sk_buff *skb, int rt_type)
+ {
+-	if (fib4_has_custom_rules(net) || rt_type == RTN_BROADCAST)
++	if (fib4_has_custom_rules(net) || rt_type == RTN_BROADCAST ||
++	    IPCB(skb)->flags & IPSKB_MULTIPATH)
+ 		return NULL;
+ 
+ 	return skb;
+diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
+index a6e4c82615d7e..6935d07a60c35 100644
+--- a/net/ipv4/ip_output.c
++++ b/net/ipv4/ip_output.c
+@@ -982,7 +982,7 @@ static int __ip_append_data(struct sock *sk,
+ 	paged = !!cork->gso_size;
+ 
+ 	if (cork->tx_flags & SKBTX_ANY_TSTAMP &&
+-	    sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID)
++	    READ_ONCE(sk->sk_tsflags) & SOF_TIMESTAMPING_OPT_ID)
+ 		tskey = atomic_inc_return(&sk->sk_tskey) - 1;
+ 
+ 	hh_len = LL_RESERVED_SPACE(rt->dst.dev);
+diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
+index d41bce8927b2c..d7006942fc2f9 100644
+--- a/net/ipv4/ip_sockglue.c
++++ b/net/ipv4/ip_sockglue.c
+@@ -510,7 +510,7 @@ static bool ipv4_datagram_support_cmsg(const struct sock *sk,
+ 	 * or without payload (SOF_TIMESTAMPING_OPT_TSONLY).
+ 	 */
+ 	info = PKTINFO_SKB_CB(skb);
+-	if (!(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_CMSG) ||
++	if (!(READ_ONCE(sk->sk_tsflags) & SOF_TIMESTAMPING_OPT_CMSG) ||
+ 	    !info->ipi_ifindex)
+ 		return false;
+ 
+diff --git a/net/ipv4/route.c b/net/ipv4/route.c
+index 92fede388d520..33626619aee79 100644
+--- a/net/ipv4/route.c
++++ b/net/ipv4/route.c
+@@ -2144,6 +2144,7 @@ static int ip_mkroute_input(struct sk_buff *skb,
+ 		int h = fib_multipath_hash(res->fi->fib_net, NULL, skb, hkeys);
+ 
+ 		fib_select_multipath(res, h);
++		IPCB(skb)->flags |= IPSKB_MULTIPATH;
+ 	}
+ #endif
+ 
+diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
+index 8ed52e1e3c99a..75f24b931a185 100644
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -2256,14 +2256,14 @@ void tcp_recv_timestamp(struct msghdr *msg, const struct sock *sk,
+ 			}
+ 		}
+ 
+-		if (sk->sk_tsflags & SOF_TIMESTAMPING_SOFTWARE)
++		if (READ_ONCE(sk->sk_tsflags) & SOF_TIMESTAMPING_SOFTWARE)
+ 			has_timestamping = true;
+ 		else
+ 			tss->ts[0] = (struct timespec64) {0};
+ 	}
+ 
+ 	if (tss->ts[2].tv_sec || tss->ts[2].tv_nsec) {
+-		if (sk->sk_tsflags & SOF_TIMESTAMPING_RAW_HARDWARE)
++		if (READ_ONCE(sk->sk_tsflags) & SOF_TIMESTAMPING_RAW_HARDWARE)
+ 			has_timestamping = true;
+ 		else
+ 			tss->ts[2] = (struct timespec64) {0};
+diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
+index 51d8638d4b4c6..9f9ca68c47026 100644
+--- a/net/ipv4/tcp_output.c
++++ b/net/ipv4/tcp_output.c
+@@ -3459,7 +3459,7 @@ void sk_forced_mem_schedule(struct sock *sk, int size)
+ 	if (delta <= 0)
+ 		return;
+ 	amt = sk_mem_pages(delta);
+-	sk->sk_forward_alloc += amt << PAGE_SHIFT;
++	sk_forward_alloc_add(sk, amt << PAGE_SHIFT);
+ 	sk_memory_allocated_add(sk, amt);
+ 
+ 	if (mem_cgroup_sockets_enabled && sk->sk_memcg)
+diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
+index b3aa68ea29de2..4c847baf52d1c 100644
+--- a/net/ipv4/udp.c
++++ b/net/ipv4/udp.c
+@@ -1443,9 +1443,9 @@ static void udp_rmem_release(struct sock *sk, int size, int partial,
+ 		spin_lock(&sk_queue->lock);
+ 
+ 
+-	sk->sk_forward_alloc += size;
++	sk_forward_alloc_add(sk, size);
+ 	amt = (sk->sk_forward_alloc - partial) & ~(PAGE_SIZE - 1);
+-	sk->sk_forward_alloc -= amt;
++	sk_forward_alloc_add(sk, -amt);
+ 
+ 	if (amt)
+ 		__sk_mem_reduce_allocated(sk, amt >> PAGE_SHIFT);
+@@ -1556,7 +1556,7 @@ int __udp_enqueue_schedule_skb(struct sock *sk, struct sk_buff *skb)
+ 		goto uncharge_drop;
+ 	}
+ 
+-	sk->sk_forward_alloc -= size;
++	sk_forward_alloc_add(sk, -size);
+ 
+ 	/* no need to setup a destructor, we will explicitly release the
+ 	 * forward allocated memory on dequeue
+diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
+index 94cec2075eee8..c93a2b9a91723 100644
+--- a/net/ipv6/addrconf.c
++++ b/net/ipv6/addrconf.c
+@@ -1368,7 +1368,7 @@ retry:
+ 	 * idev->desync_factor if it's larger
+ 	 */
+ 	cnf_temp_preferred_lft = READ_ONCE(idev->cnf.temp_prefered_lft);
+-	max_desync_factor = min_t(__u32,
++	max_desync_factor = min_t(long,
+ 				  idev->cnf.max_desync_factor,
+ 				  cnf_temp_preferred_lft - regen_advance);
+ 
+diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
+index d94041bb42872..b8378814532ce 100644
+--- a/net/ipv6/ip6_input.c
++++ b/net/ipv6/ip6_input.c
+@@ -99,7 +99,8 @@ static bool ip6_can_use_hint(const struct sk_buff *skb,
+ static struct sk_buff *ip6_extract_route_hint(const struct net *net,
+ 					      struct sk_buff *skb)
+ {
+-	if (fib6_routes_require_src(net) || fib6_has_custom_rules(net))
++	if (fib6_routes_require_src(net) || fib6_has_custom_rules(net) ||
++	    IP6CB(skb)->flags & IP6SKB_MULTIPATH)
+ 		return NULL;
+ 
+ 	return skb;
+diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
+index 016b0a513259f..9270ef7f8e98b 100644
+--- a/net/ipv6/ip6_output.c
++++ b/net/ipv6/ip6_output.c
+@@ -1502,7 +1502,7 @@ static int __ip6_append_data(struct sock *sk,
+ 	orig_mtu = mtu;
+ 
+ 	if (cork->tx_flags & SKBTX_ANY_TSTAMP &&
+-	    sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID)
++	    READ_ONCE(sk->sk_tsflags) & SOF_TIMESTAMPING_OPT_ID)
+ 		tskey = atomic_inc_return(&sk->sk_tskey) - 1;
+ 
+ 	hh_len = LL_RESERVED_SPACE(rt->dst.dev);
+diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c
+index 1b27728349725..5831aaa53d75e 100644
+--- a/net/ipv6/ping.c
++++ b/net/ipv6/ping.c
+@@ -119,7 +119,7 @@ static int ping_v6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
+ 		return -EINVAL;
+ 
+ 	ipcm6_init_sk(&ipc6, np);
+-	ipc6.sockc.tsflags = sk->sk_tsflags;
++	ipc6.sockc.tsflags = READ_ONCE(sk->sk_tsflags);
+ 	ipc6.sockc.mark = READ_ONCE(sk->sk_mark);
+ 
+ 	fl6.flowi6_oif = oif;
+diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
+index ea16734f5e1f7..d52d5e34c12ae 100644
+--- a/net/ipv6/raw.c
++++ b/net/ipv6/raw.c
+@@ -778,7 +778,7 @@ static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
+ 	fl6.flowi6_uid = sk->sk_uid;
+ 
+ 	ipcm6_init(&ipc6);
+-	ipc6.sockc.tsflags = sk->sk_tsflags;
++	ipc6.sockc.tsflags = READ_ONCE(sk->sk_tsflags);
+ 	ipc6.sockc.mark = fl6.flowi6_mark;
+ 
+ 	if (sin6) {
+diff --git a/net/ipv6/route.c b/net/ipv6/route.c
+index 56a55585eb798..a02328c93a537 100644
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -424,6 +424,9 @@ void fib6_select_path(const struct net *net, struct fib6_result *res,
+ 	if (match->nh && have_oif_match && res->nh)
+ 		return;
+ 
++	if (skb)
++		IP6CB(skb)->flags |= IP6SKB_MULTIPATH;
++
+ 	/* We might have already computed the hash for ICMPv6 errors. In such
+ 	 * case it will always be non-zero. Otherwise now is the time to do it.
+ 	 */
+diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
+index 3ffca158d3e11..24d3c5c791218 100644
+--- a/net/ipv6/udp.c
++++ b/net/ipv6/udp.c
+@@ -1368,7 +1368,7 @@ int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
+ 
+ 	ipcm6_init(&ipc6);
+ 	ipc6.gso_size = READ_ONCE(up->gso_size);
+-	ipc6.sockc.tsflags = sk->sk_tsflags;
++	ipc6.sockc.tsflags = READ_ONCE(sk->sk_tsflags);
+ 	ipc6.sockc.mark = READ_ONCE(sk->sk_mark);
+ 
+ 	/* destination address check */
+diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c
+index 393f01b2a7e6d..dd1d8ffd5f594 100644
+--- a/net/kcm/kcmsock.c
++++ b/net/kcm/kcmsock.c
+@@ -930,15 +930,18 @@ partial_message:
+ out_error:
+ 	kcm_push(kcm);
+ 
+-	if (copied && sock->type == SOCK_SEQPACKET) {
++	if (sock->type == SOCK_SEQPACKET) {
+ 		/* Wrote some bytes before encountering an
+ 		 * error, return partial success.
+ 		 */
+-		goto partial_message;
+-	}
+-
+-	if (head != kcm->seq_skb)
++		if (copied)
++			goto partial_message;
++		if (head != kcm->seq_skb)
++			kfree_skb(head);
++	} else {
+ 		kfree_skb(head);
++		kcm->seq_skb = NULL;
++	}
+ 
+ 	err = sk_stream_error(sk, msg->msg_flags, err);
+ 
+@@ -1859,6 +1862,8 @@ static __net_exit void kcm_exit_net(struct net *net)
+ 	 * that all multiplexors and psocks have been destroyed.
+ 	 */
+ 	WARN_ON(!list_empty(&knet->mux_list));
++
++	mutex_destroy(&knet->mutex);
+ }
+ 
+ static struct pernet_operations kcm_net_ops = {
+diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
+index 0efc52c640b59..40258d9f8c799 100644
+--- a/net/mptcp/protocol.c
++++ b/net/mptcp/protocol.c
+@@ -136,9 +136,15 @@ static void mptcp_drop(struct sock *sk, struct sk_buff *skb)
+ 	__kfree_skb(skb);
+ }
+ 
++static void mptcp_rmem_fwd_alloc_add(struct sock *sk, int size)
++{
++	WRITE_ONCE(mptcp_sk(sk)->rmem_fwd_alloc,
++		   mptcp_sk(sk)->rmem_fwd_alloc + size);
++}
++
+ static void mptcp_rmem_charge(struct sock *sk, int size)
+ {
+-	mptcp_sk(sk)->rmem_fwd_alloc -= size;
++	mptcp_rmem_fwd_alloc_add(sk, -size);
+ }
+ 
+ static bool mptcp_try_coalesce(struct sock *sk, struct sk_buff *to,
+@@ -179,7 +185,7 @@ static bool mptcp_ooo_try_coalesce(struct mptcp_sock *msk, struct sk_buff *to,
+ static void __mptcp_rmem_reclaim(struct sock *sk, int amount)
+ {
+ 	amount >>= PAGE_SHIFT;
+-	mptcp_sk(sk)->rmem_fwd_alloc -= amount << PAGE_SHIFT;
++	mptcp_rmem_charge(sk, amount << PAGE_SHIFT);
+ 	__sk_mem_reduce_allocated(sk, amount);
+ }
+ 
+@@ -188,7 +194,7 @@ static void mptcp_rmem_uncharge(struct sock *sk, int size)
+ 	struct mptcp_sock *msk = mptcp_sk(sk);
+ 	int reclaimable;
+ 
+-	msk->rmem_fwd_alloc += size;
++	mptcp_rmem_fwd_alloc_add(sk, size);
+ 	reclaimable = msk->rmem_fwd_alloc - sk_unused_reserved_mem(sk);
+ 
+ 	/* see sk_mem_uncharge() for the rationale behind the following schema */
+@@ -343,7 +349,7 @@ static bool mptcp_rmem_schedule(struct sock *sk, struct sock *ssk, int size)
+ 	if (!__sk_mem_raise_allocated(sk, size, amt, SK_MEM_RECV))
+ 		return false;
+ 
+-	msk->rmem_fwd_alloc += amount;
++	mptcp_rmem_fwd_alloc_add(sk, amount);
+ 	return true;
+ }
+ 
+@@ -1773,7 +1779,7 @@ static int mptcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
+ 		}
+ 
+ 		/* data successfully copied into the write queue */
+-		sk->sk_forward_alloc -= total_ts;
++		sk_forward_alloc_add(sk, -total_ts);
+ 		copied += psize;
+ 		dfrag->data_len += psize;
+ 		frag_truesize += psize;
+@@ -3242,8 +3248,8 @@ void mptcp_destroy_common(struct mptcp_sock *msk, unsigned int flags)
+ 	/* move all the rx fwd alloc into the sk_mem_reclaim_final in
+ 	 * inet_sock_destruct() will dispose it
+ 	 */
+-	sk->sk_forward_alloc += msk->rmem_fwd_alloc;
+-	msk->rmem_fwd_alloc = 0;
++	sk_forward_alloc_add(sk, msk->rmem_fwd_alloc);
++	WRITE_ONCE(msk->rmem_fwd_alloc, 0);
+ 	mptcp_token_destroy(msk);
+ 	mptcp_pm_free_anno_list(msk);
+ 	mptcp_free_local_addr_list(msk);
+@@ -3513,7 +3519,8 @@ static void mptcp_shutdown(struct sock *sk, int how)
+ 
+ static int mptcp_forward_alloc_get(const struct sock *sk)
+ {
+-	return sk->sk_forward_alloc + mptcp_sk(sk)->rmem_fwd_alloc;
++	return READ_ONCE(sk->sk_forward_alloc) +
++	       READ_ONCE(mptcp_sk(sk)->rmem_fwd_alloc);
+ }
+ 
+ static int mptcp_ioctl_outq(const struct mptcp_sock *msk, u64 v)
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
+index eb8b1167dced2..a72934f00804e 100644
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -102,6 +102,7 @@ static const u8 nft2audit_op[NFT_MSG_MAX] = { // enum nf_tables_msg_types
+ 	[NFT_MSG_NEWFLOWTABLE]	= AUDIT_NFT_OP_FLOWTABLE_REGISTER,
+ 	[NFT_MSG_GETFLOWTABLE]	= AUDIT_NFT_OP_INVALID,
+ 	[NFT_MSG_DELFLOWTABLE]	= AUDIT_NFT_OP_FLOWTABLE_UNREGISTER,
++	[NFT_MSG_GETSETELEM_RESET] = AUDIT_NFT_OP_SETELEM_RESET,
+ };
+ 
+ static void nft_validate_state_update(struct nft_table *table, u8 new_validate_state)
+@@ -3421,6 +3422,18 @@ err:
+ 	nfnetlink_set_err(ctx->net, ctx->portid, NFNLGRP_NFTABLES, -ENOBUFS);
+ }
+ 
++static void audit_log_rule_reset(const struct nft_table *table,
++				 unsigned int base_seq,
++				 unsigned int nentries)
++{
++	char *buf = kasprintf(GFP_ATOMIC, "%s:%u",
++			      table->name, base_seq);
++
++	audit_log_nfcfg(buf, table->family, nentries,
++			AUDIT_NFT_OP_RULE_RESET, GFP_ATOMIC);
++	kfree(buf);
++}
++
+ struct nft_rule_dump_ctx {
+ 	char *table;
+ 	char *chain;
+@@ -3467,6 +3480,10 @@ cont:
+ cont_skip:
+ 		(*idx)++;
+ 	}
++
++	if (reset && *idx)
++		audit_log_rule_reset(table, cb->seq, *idx);
++
+ 	return 0;
+ }
+ 
+@@ -3634,6 +3651,9 @@ static int nf_tables_getrule(struct sk_buff *skb, const struct nfnl_info *info,
+ 	if (err < 0)
+ 		goto err_fill_rule_info;
+ 
++	if (reset)
++		audit_log_rule_reset(table, nft_pernet(net)->base_seq, 1);
++
+ 	return nfnetlink_unicast(skb2, net, NETLINK_CB(skb).portid);
+ 
+ err_fill_rule_info:
+@@ -5621,13 +5641,25 @@ static int nf_tables_dump_setelem(const struct nft_ctx *ctx,
+ 	return nf_tables_fill_setelem(args->skb, set, elem, args->reset);
+ }
+ 
++static void audit_log_nft_set_reset(const struct nft_table *table,
++				    unsigned int base_seq,
++				    unsigned int nentries)
++{
++	char *buf = kasprintf(GFP_ATOMIC, "%s:%u", table->name, base_seq);
++
++	audit_log_nfcfg(buf, table->family, nentries,
++			AUDIT_NFT_OP_SETELEM_RESET, GFP_ATOMIC);
++	kfree(buf);
++}
++
+ struct nft_set_dump_ctx {
+ 	const struct nft_set	*set;
+ 	struct nft_ctx		ctx;
+ };
+ 
+ static int nft_set_catchall_dump(struct net *net, struct sk_buff *skb,
+-				 const struct nft_set *set, bool reset)
++				 const struct nft_set *set, bool reset,
++				 unsigned int base_seq)
+ {
+ 	struct nft_set_elem_catchall *catchall;
+ 	u8 genmask = nft_genmask_cur(net);
+@@ -5643,6 +5675,8 @@ static int nft_set_catchall_dump(struct net *net, struct sk_buff *skb,
+ 
+ 		elem.priv = catchall->elem;
+ 		ret = nf_tables_fill_setelem(skb, set, &elem, reset);
++		if (reset && !ret)
++			audit_log_nft_set_reset(set->table, base_seq, 1);
+ 		break;
+ 	}
+ 
+@@ -5722,12 +5756,17 @@ static int nf_tables_dump_set(struct sk_buff *skb, struct netlink_callback *cb)
+ 	set->ops->walk(&dump_ctx->ctx, set, &args.iter);
+ 
+ 	if (!args.iter.err && args.iter.count == cb->args[0])
+-		args.iter.err = nft_set_catchall_dump(net, skb, set, reset);
+-	rcu_read_unlock();
+-
++		args.iter.err = nft_set_catchall_dump(net, skb, set,
++						      reset, cb->seq);
+ 	nla_nest_end(skb, nest);
+ 	nlmsg_end(skb, nlh);
+ 
++	if (reset && args.iter.count > args.iter.skip)
++		audit_log_nft_set_reset(table, cb->seq,
++					args.iter.count - args.iter.skip);
++
++	rcu_read_unlock();
++
+ 	if (args.iter.err && args.iter.err != -EMSGSIZE)
+ 		return args.iter.err;
+ 	if (args.iter.count == cb->args[0])
+@@ -5952,13 +5991,13 @@ static int nf_tables_getsetelem(struct sk_buff *skb,
+ 	struct netlink_ext_ack *extack = info->extack;
+ 	u8 genmask = nft_genmask_cur(info->net);
+ 	u8 family = info->nfmsg->nfgen_family;
++	int rem, err = 0, nelems = 0;
+ 	struct net *net = info->net;
+ 	struct nft_table *table;
+ 	struct nft_set *set;
+ 	struct nlattr *attr;
+ 	struct nft_ctx ctx;
+ 	bool reset = false;
+-	int rem, err = 0;
+ 
+ 	table = nft_table_lookup(net, nla[NFTA_SET_ELEM_LIST_TABLE], family,
+ 				 genmask, 0);
+@@ -6001,8 +6040,13 @@ static int nf_tables_getsetelem(struct sk_buff *skb,
+ 			NL_SET_BAD_ATTR(extack, attr);
+ 			break;
+ 		}
++		nelems++;
+ 	}
+ 
++	if (reset)
++		audit_log_nft_set_reset(table, nft_pernet(net)->base_seq,
++					nelems);
++
+ 	return err;
+ }
+ 
+diff --git a/net/netfilter/nfnetlink_osf.c b/net/netfilter/nfnetlink_osf.c
+index 8f1bfa6ccc2d9..50723ba082890 100644
+--- a/net/netfilter/nfnetlink_osf.c
++++ b/net/netfilter/nfnetlink_osf.c
+@@ -315,6 +315,14 @@ static int nfnl_osf_add_callback(struct sk_buff *skb,
+ 
+ 	f = nla_data(osf_attrs[OSF_ATTR_FINGER]);
+ 
++	if (f->opt_num > ARRAY_SIZE(f->opt))
++		return -EINVAL;
++
++	if (!memchr(f->genre, 0, MAXGENRELEN) ||
++	    !memchr(f->subtype, 0, MAXGENRELEN) ||
++	    !memchr(f->version, 0, MAXGENRELEN))
++		return -EINVAL;
++
+ 	kf = kmalloc(sizeof(struct nf_osf_finger), GFP_KERNEL);
+ 	if (!kf)
+ 		return -ENOMEM;
+diff --git a/net/netfilter/nft_exthdr.c b/net/netfilter/nft_exthdr.c
+index a9844eefedebc..3fbaa7bf41f9c 100644
+--- a/net/netfilter/nft_exthdr.c
++++ b/net/netfilter/nft_exthdr.c
+@@ -35,6 +35,14 @@ static unsigned int optlen(const u8 *opt, unsigned int offset)
+ 		return opt[offset + 1];
+ }
+ 
++static int nft_skb_copy_to_reg(const struct sk_buff *skb, int offset, u32 *dest, unsigned int len)
++{
++	if (len % NFT_REG32_SIZE)
++		dest[len / NFT_REG32_SIZE] = 0;
++
++	return skb_copy_bits(skb, offset, dest, len);
++}
++
+ static void nft_exthdr_ipv6_eval(const struct nft_expr *expr,
+ 				 struct nft_regs *regs,
+ 				 const struct nft_pktinfo *pkt)
+@@ -56,8 +64,7 @@ static void nft_exthdr_ipv6_eval(const struct nft_expr *expr,
+ 	}
+ 	offset += priv->offset;
+ 
+-	dest[priv->len / NFT_REG32_SIZE] = 0;
+-	if (skb_copy_bits(pkt->skb, offset, dest, priv->len) < 0)
++	if (nft_skb_copy_to_reg(pkt->skb, offset, dest, priv->len) < 0)
+ 		goto err;
+ 	return;
+ err:
+@@ -153,8 +160,7 @@ static void nft_exthdr_ipv4_eval(const struct nft_expr *expr,
+ 	}
+ 	offset += priv->offset;
+ 
+-	dest[priv->len / NFT_REG32_SIZE] = 0;
+-	if (skb_copy_bits(pkt->skb, offset, dest, priv->len) < 0)
++	if (nft_skb_copy_to_reg(pkt->skb, offset, dest, priv->len) < 0)
+ 		goto err;
+ 	return;
+ err:
+@@ -210,7 +216,8 @@ static void nft_exthdr_tcp_eval(const struct nft_expr *expr,
+ 		if (priv->flags & NFT_EXTHDR_F_PRESENT) {
+ 			*dest = 1;
+ 		} else {
+-			dest[priv->len / NFT_REG32_SIZE] = 0;
++			if (priv->len % NFT_REG32_SIZE)
++				dest[priv->len / NFT_REG32_SIZE] = 0;
+ 			memcpy(dest, opt + offset, priv->len);
+ 		}
+ 
+@@ -388,9 +395,8 @@ static void nft_exthdr_sctp_eval(const struct nft_expr *expr,
+ 			    offset + ntohs(sch->length) > pkt->skb->len)
+ 				break;
+ 
+-			dest[priv->len / NFT_REG32_SIZE] = 0;
+-			if (skb_copy_bits(pkt->skb, offset + priv->offset,
+-					  dest, priv->len) < 0)
++			if (nft_skb_copy_to_reg(pkt->skb, offset + priv->offset,
++						dest, priv->len) < 0)
+ 				break;
+ 			return;
+ 		}
+diff --git a/net/netfilter/nft_set_rbtree.c b/net/netfilter/nft_set_rbtree.c
+index c6435e7092319..f250b5399344a 100644
+--- a/net/netfilter/nft_set_rbtree.c
++++ b/net/netfilter/nft_set_rbtree.c
+@@ -312,6 +312,7 @@ static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set,
+ 	struct nft_rbtree_elem *rbe, *rbe_le = NULL, *rbe_ge = NULL;
+ 	struct rb_node *node, *next, *parent, **p, *first = NULL;
+ 	struct nft_rbtree *priv = nft_set_priv(set);
++	u8 cur_genmask = nft_genmask_cur(net);
+ 	u8 genmask = nft_genmask_next(net);
+ 	int d, err;
+ 
+@@ -357,8 +358,11 @@ static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set,
+ 		if (!nft_set_elem_active(&rbe->ext, genmask))
+ 			continue;
+ 
+-		/* perform garbage collection to avoid bogus overlap reports. */
+-		if (nft_set_elem_expired(&rbe->ext)) {
++		/* perform garbage collection to avoid bogus overlap reports
++		 * but skip new elements in this transaction.
++		 */
++		if (nft_set_elem_expired(&rbe->ext) &&
++		    nft_set_elem_active(&rbe->ext, cur_genmask)) {
+ 			err = nft_rbtree_gc_elem(set, priv, rbe, genmask);
+ 			if (err < 0)
+ 				return err;
+diff --git a/net/sched/sch_fq_pie.c b/net/sched/sch_fq_pie.c
+index 591d87d5e5c0f..68e6acd0f130d 100644
+--- a/net/sched/sch_fq_pie.c
++++ b/net/sched/sch_fq_pie.c
+@@ -61,6 +61,7 @@ struct fq_pie_sched_data {
+ 	struct pie_params p_params;
+ 	u32 ecn_prob;
+ 	u32 flows_cnt;
++	u32 flows_cursor;
+ 	u32 quantum;
+ 	u32 memory_limit;
+ 	u32 new_flow_count;
+@@ -375,22 +376,32 @@ flow_error:
+ static void fq_pie_timer(struct timer_list *t)
+ {
+ 	struct fq_pie_sched_data *q = from_timer(q, t, adapt_timer);
++	unsigned long next, tupdate;
+ 	struct Qdisc *sch = q->sch;
+ 	spinlock_t *root_lock; /* to lock qdisc for probability calculations */
+-	u32 idx;
++	int max_cnt, i;
+ 
+ 	rcu_read_lock();
+ 	root_lock = qdisc_lock(qdisc_root_sleeping(sch));
+ 	spin_lock(root_lock);
+ 
+-	for (idx = 0; idx < q->flows_cnt; idx++)
+-		pie_calculate_probability(&q->p_params, &q->flows[idx].vars,
+-					  q->flows[idx].backlog);
+-
+-	/* reset the timer to fire after 'tupdate' jiffies. */
+-	if (q->p_params.tupdate)
+-		mod_timer(&q->adapt_timer, jiffies + q->p_params.tupdate);
++	/* Limit this expensive loop to 2048 flows per round. */
++	max_cnt = min_t(int, q->flows_cnt - q->flows_cursor, 2048);
++	for (i = 0; i < max_cnt; i++) {
++		pie_calculate_probability(&q->p_params,
++					  &q->flows[q->flows_cursor].vars,
++					  q->flows[q->flows_cursor].backlog);
++		q->flows_cursor++;
++	}
+ 
++	tupdate = q->p_params.tupdate;
++	next = 0;
++	if (q->flows_cursor >= q->flows_cnt) {
++		q->flows_cursor = 0;
++		next = tupdate;
++	}
++	if (tupdate)
++		mod_timer(&q->adapt_timer, jiffies + next);
+ 	spin_unlock(root_lock);
+ 	rcu_read_unlock();
+ }
+diff --git a/net/sched/sch_plug.c b/net/sched/sch_plug.c
+index ea8c4a7174bba..35f49edf63dbf 100644
+--- a/net/sched/sch_plug.c
++++ b/net/sched/sch_plug.c
+@@ -207,7 +207,7 @@ static struct Qdisc_ops plug_qdisc_ops __read_mostly = {
+ 	.priv_size   =       sizeof(struct plug_sched_data),
+ 	.enqueue     =       plug_enqueue,
+ 	.dequeue     =       plug_dequeue,
+-	.peek        =       qdisc_peek_head,
++	.peek        =       qdisc_peek_dequeued,
+ 	.init        =       plug_init,
+ 	.change      =       plug_change,
+ 	.reset       =	     qdisc_reset_queue,
+diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c
+index befaf74b33caa..09d2955baab10 100644
+--- a/net/sched/sch_qfq.c
++++ b/net/sched/sch_qfq.c
+@@ -974,10 +974,13 @@ static void qfq_update_eligible(struct qfq_sched *q)
+ }
+ 
+ /* Dequeue head packet of the head class in the DRR queue of the aggregate. */
+-static void agg_dequeue(struct qfq_aggregate *agg,
+-			struct qfq_class *cl, unsigned int len)
++static struct sk_buff *agg_dequeue(struct qfq_aggregate *agg,
++				   struct qfq_class *cl, unsigned int len)
+ {
+-	qdisc_dequeue_peeked(cl->qdisc);
++	struct sk_buff *skb = qdisc_dequeue_peeked(cl->qdisc);
++
++	if (!skb)
++		return NULL;
+ 
+ 	cl->deficit -= (int) len;
+ 
+@@ -987,6 +990,8 @@ static void agg_dequeue(struct qfq_aggregate *agg,
+ 		cl->deficit += agg->lmax;
+ 		list_move_tail(&cl->alist, &agg->active);
+ 	}
++
++	return skb;
+ }
+ 
+ static inline struct sk_buff *qfq_peek_skb(struct qfq_aggregate *agg,
+@@ -1132,11 +1137,18 @@ static struct sk_buff *qfq_dequeue(struct Qdisc *sch)
+ 	if (!skb)
+ 		return NULL;
+ 
+-	qdisc_qstats_backlog_dec(sch, skb);
+ 	sch->q.qlen--;
++
++	skb = agg_dequeue(in_serv_agg, cl, len);
++
++	if (!skb) {
++		sch->q.qlen++;
++		return NULL;
++	}
++
++	qdisc_qstats_backlog_dec(sch, skb);
+ 	qdisc_bstats_update(sch, skb);
+ 
+-	agg_dequeue(in_serv_agg, cl, len);
+ 	/* If lmax is lowered, through qfq_change_class, for a class
+ 	 * owning pending packets with larger size than the new value
+ 	 * of lmax, then the following condition may hold.
+diff --git a/net/sctp/proc.c b/net/sctp/proc.c
+index f13d6a34f32f2..ec00ee75d59a6 100644
+--- a/net/sctp/proc.c
++++ b/net/sctp/proc.c
+@@ -282,7 +282,7 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v)
+ 		assoc->init_retries, assoc->shutdown_retries,
+ 		assoc->rtx_data_chunks,
+ 		refcount_read(&sk->sk_wmem_alloc),
+-		sk->sk_wmem_queued,
++		READ_ONCE(sk->sk_wmem_queued),
+ 		sk->sk_sndbuf,
+ 		sk->sk_rcvbuf);
+ 	seq_printf(seq, "\n");
+diff --git a/net/sctp/socket.c b/net/sctp/socket.c
+index 423dc400992ba..7cf207706eb66 100644
+--- a/net/sctp/socket.c
++++ b/net/sctp/socket.c
+@@ -69,7 +69,7 @@
+ #include <net/sctp/stream_sched.h>
+ 
+ /* Forward declarations for internal helper functions. */
+-static bool sctp_writeable(struct sock *sk);
++static bool sctp_writeable(const struct sock *sk);
+ static void sctp_wfree(struct sk_buff *skb);
+ static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
+ 				size_t msg_len);
+@@ -140,7 +140,7 @@ static inline void sctp_set_owner_w(struct sctp_chunk *chunk)
+ 
+ 	refcount_add(sizeof(struct sctp_chunk), &sk->sk_wmem_alloc);
+ 	asoc->sndbuf_used += chunk->skb->truesize + sizeof(struct sctp_chunk);
+-	sk->sk_wmem_queued += chunk->skb->truesize + sizeof(struct sctp_chunk);
++	sk_wmem_queued_add(sk, chunk->skb->truesize + sizeof(struct sctp_chunk));
+ 	sk_mem_charge(sk, chunk->skb->truesize);
+ }
+ 
+@@ -9144,7 +9144,7 @@ static void sctp_wfree(struct sk_buff *skb)
+ 	struct sock *sk = asoc->base.sk;
+ 
+ 	sk_mem_uncharge(sk, skb->truesize);
+-	sk->sk_wmem_queued -= skb->truesize + sizeof(struct sctp_chunk);
++	sk_wmem_queued_add(sk, -(skb->truesize + sizeof(struct sctp_chunk)));
+ 	asoc->sndbuf_used -= skb->truesize + sizeof(struct sctp_chunk);
+ 	WARN_ON(refcount_sub_and_test(sizeof(struct sctp_chunk),
+ 				      &sk->sk_wmem_alloc));
+@@ -9299,9 +9299,9 @@ void sctp_write_space(struct sock *sk)
+  * UDP-style sockets or TCP-style sockets, this code should work.
+  *  - Daisy
+  */
+-static bool sctp_writeable(struct sock *sk)
++static bool sctp_writeable(const struct sock *sk)
+ {
+-	return sk->sk_sndbuf > sk->sk_wmem_queued;
++	return READ_ONCE(sk->sk_sndbuf) > READ_ONCE(sk->sk_wmem_queued);
+ }
+ 
+ /* Wait for an association to go into ESTABLISHED state. If timeout is 0,
+diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c
+index 6b78075404d7d..3e89bb9b7c56c 100644
+--- a/net/smc/smc_core.c
++++ b/net/smc/smc_core.c
+@@ -1654,6 +1654,7 @@ void smcr_port_add(struct smc_ib_device *smcibdev, u8 ibport)
+ {
+ 	struct smc_link_group *lgr, *n;
+ 
++	spin_lock_bh(&smc_lgr_list.lock);
+ 	list_for_each_entry_safe(lgr, n, &smc_lgr_list.list, list) {
+ 		struct smc_link *link;
+ 
+@@ -1669,6 +1670,7 @@ void smcr_port_add(struct smc_ib_device *smcibdev, u8 ibport)
+ 		if (link)
+ 			smc_llc_add_link_local(link);
+ 	}
++	spin_unlock_bh(&smc_lgr_list.lock);
+ }
+ 
+ /* link is down - switch connections to alternate link,
+diff --git a/net/socket.c b/net/socket.c
+index f49edb9b49185..b5639a6500158 100644
+--- a/net/socket.c
++++ b/net/socket.c
+@@ -821,7 +821,7 @@ static bool skb_is_swtx_tstamp(const struct sk_buff *skb, int false_tstamp)
+ 
+ static ktime_t get_timestamp(struct sock *sk, struct sk_buff *skb, int *if_index)
+ {
+-	bool cycles = sk->sk_tsflags & SOF_TIMESTAMPING_BIND_PHC;
++	bool cycles = READ_ONCE(sk->sk_tsflags) & SOF_TIMESTAMPING_BIND_PHC;
+ 	struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
+ 	struct net_device *orig_dev;
+ 	ktime_t hwtstamp;
+@@ -873,12 +873,12 @@ void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk,
+ 	int need_software_tstamp = sock_flag(sk, SOCK_RCVTSTAMP);
+ 	int new_tstamp = sock_flag(sk, SOCK_TSTAMP_NEW);
+ 	struct scm_timestamping_internal tss;
+-
+ 	int empty = 1, false_tstamp = 0;
+ 	struct skb_shared_hwtstamps *shhwtstamps =
+ 		skb_hwtstamps(skb);
+ 	int if_index;
+ 	ktime_t hwtstamp;
++	u32 tsflags;
+ 
+ 	/* Race occurred between timestamp enabling and packet
+ 	   receiving.  Fill in the current time for now. */
+@@ -920,11 +920,12 @@ void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk,
+ 	}
+ 
+ 	memset(&tss, 0, sizeof(tss));
+-	if ((sk->sk_tsflags & SOF_TIMESTAMPING_SOFTWARE) &&
++	tsflags = READ_ONCE(sk->sk_tsflags);
++	if ((tsflags & SOF_TIMESTAMPING_SOFTWARE) &&
+ 	    ktime_to_timespec64_cond(skb->tstamp, tss.ts + 0))
+ 		empty = 0;
+ 	if (shhwtstamps &&
+-	    (sk->sk_tsflags & SOF_TIMESTAMPING_RAW_HARDWARE) &&
++	    (tsflags & SOF_TIMESTAMPING_RAW_HARDWARE) &&
+ 	    !skb_is_swtx_tstamp(skb, false_tstamp)) {
+ 		if_index = 0;
+ 		if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP_NETDEV)
+@@ -932,14 +933,14 @@ void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk,
+ 		else
+ 			hwtstamp = shhwtstamps->hwtstamp;
+ 
+-		if (sk->sk_tsflags & SOF_TIMESTAMPING_BIND_PHC)
++		if (tsflags & SOF_TIMESTAMPING_BIND_PHC)
+ 			hwtstamp = ptp_convert_timestamp(&hwtstamp,
+-							 sk->sk_bind_phc);
++							 READ_ONCE(sk->sk_bind_phc));
+ 
+ 		if (ktime_to_timespec64_cond(hwtstamp, tss.ts + 2)) {
+ 			empty = 0;
+ 
+-			if ((sk->sk_tsflags & SOF_TIMESTAMPING_OPT_PKTINFO) &&
++			if ((tsflags & SOF_TIMESTAMPING_OPT_PKTINFO) &&
+ 			    !skb_is_err_queue(skb))
+ 				put_ts_pktinfo(msg, skb, if_index);
+ 		}
+diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
+index 53f944e6d8ef2..e047abc600893 100644
+--- a/net/tls/tls_sw.c
++++ b/net/tls/tls_sw.c
+@@ -817,7 +817,7 @@ static int bpf_exec_tx_verdict(struct sk_msg *msg, struct sock *sk,
+ 	psock = sk_psock_get(sk);
+ 	if (!psock || !policy) {
+ 		err = tls_push_record(sk, flags, record_type);
+-		if (err && sk->sk_err == EBADMSG) {
++		if (err && err != -EINPROGRESS && sk->sk_err == EBADMSG) {
+ 			*copied -= sk_msg_free(sk, msg);
+ 			tls_free_open_rec(sk);
+ 			err = -sk->sk_err;
+@@ -846,7 +846,7 @@ more_data:
+ 	switch (psock->eval) {
+ 	case __SK_PASS:
+ 		err = tls_push_record(sk, flags, record_type);
+-		if (err && sk->sk_err == EBADMSG) {
++		if (err && err != -EINPROGRESS && sk->sk_err == EBADMSG) {
+ 			*copied -= sk_msg_free(sk, msg);
+ 			tls_free_open_rec(sk);
+ 			err = -sk->sk_err;
+diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
+index 86930a8ed012b..3e8a04a136688 100644
+--- a/net/unix/af_unix.c
++++ b/net/unix/af_unix.c
+@@ -680,7 +680,7 @@ static void unix_release_sock(struct sock *sk, int embrion)
+ 	 *	  What the above comment does talk about? --ANK(980817)
+ 	 */
+ 
+-	if (unix_tot_inflight)
++	if (READ_ONCE(unix_tot_inflight))
+ 		unix_gc();		/* Garbage collect fds */
+ }
+ 
+diff --git a/net/unix/scm.c b/net/unix/scm.c
+index f9152881d77f6..033e21e5c4df6 100644
+--- a/net/unix/scm.c
++++ b/net/unix/scm.c
+@@ -63,7 +63,7 @@ void unix_inflight(struct user_struct *user, struct file *fp)
+ 		/* Paired with READ_ONCE() in wait_for_unix_gc() */
+ 		WRITE_ONCE(unix_tot_inflight, unix_tot_inflight + 1);
+ 	}
+-	user->unix_inflight++;
++	WRITE_ONCE(user->unix_inflight, user->unix_inflight + 1);
+ 	spin_unlock(&unix_gc_lock);
+ }
+ 
+@@ -84,7 +84,7 @@ void unix_notinflight(struct user_struct *user, struct file *fp)
+ 		/* Paired with READ_ONCE() in wait_for_unix_gc() */
+ 		WRITE_ONCE(unix_tot_inflight, unix_tot_inflight - 1);
+ 	}
+-	user->unix_inflight--;
++	WRITE_ONCE(user->unix_inflight, user->unix_inflight - 1);
+ 	spin_unlock(&unix_gc_lock);
+ }
+ 
+@@ -98,7 +98,7 @@ static inline bool too_many_unix_fds(struct task_struct *p)
+ {
+ 	struct user_struct *user = current_user();
+ 
+-	if (unlikely(user->unix_inflight > task_rlimit(p, RLIMIT_NOFILE)))
++	if (unlikely(READ_ONCE(user->unix_inflight) > task_rlimit(p, RLIMIT_NOFILE)))
+ 		return !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN);
+ 	return false;
+ }
+diff --git a/net/xdp/xsk_diag.c b/net/xdp/xsk_diag.c
+index c014217f5fa7d..22b36c8143cfd 100644
+--- a/net/xdp/xsk_diag.c
++++ b/net/xdp/xsk_diag.c
+@@ -111,6 +111,9 @@ static int xsk_diag_fill(struct sock *sk, struct sk_buff *nlskb,
+ 	sock_diag_save_cookie(sk, msg->xdiag_cookie);
+ 
+ 	mutex_lock(&xs->mutex);
++	if (READ_ONCE(xs->state) == XSK_UNBOUND)
++		goto out_nlmsg_trim;
++
+ 	if ((req->xdiag_show & XDP_SHOW_INFO) && xsk_diag_put_info(xs, nlskb))
+ 		goto out_nlmsg_trim;
+ 
+diff --git a/scripts/dummy-tools/gcc b/scripts/dummy-tools/gcc
+index 1db1889f6d81e..07f6dc4c5cf69 100755
+--- a/scripts/dummy-tools/gcc
++++ b/scripts/dummy-tools/gcc
+@@ -85,8 +85,7 @@ if arg_contain -S "$@"; then
+ 	fi
+ 
+ 	# For arch/powerpc/tools/gcc-check-mprofile-kernel.sh
+-	if arg_contain -m64 "$@" && arg_contain -mlittle-endian "$@" &&
+-		arg_contain -mprofile-kernel "$@"; then
++	if arg_contain -m64 "$@" && arg_contain -mprofile-kernel "$@"; then
+ 		if ! test -t 0 && ! grep -q notrace; then
+ 			echo "_mcount"
+ 		fi
+diff --git a/scripts/kconfig/preprocess.c b/scripts/kconfig/preprocess.c
+index 748da578b418c..d1f5bcff4b62d 100644
+--- a/scripts/kconfig/preprocess.c
++++ b/scripts/kconfig/preprocess.c
+@@ -396,6 +396,9 @@ static char *eval_clause(const char *str, size_t len, int argc, char *argv[])
+ 
+ 		p++;
+ 	}
++
++	if (new_argc >= FUNCTION_MAX_ARGS)
++		pperror("too many function arguments");
+ 	new_argv[new_argc++] = prev;
+ 
+ 	/*
+diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
+index b29b29707f104..ba981f22908ad 100644
+--- a/scripts/mod/modpost.c
++++ b/scripts/mod/modpost.c
+@@ -1226,6 +1226,15 @@ static void check_export_symbol(struct module *mod, struct elf_info *elf,
+ 	 */
+ 	s->is_func = (ELF_ST_TYPE(sym->st_info) == STT_FUNC);
+ 
++	/*
++	 * For parisc64, symbols prefixed $$ from the library have the symbol type
++	 * STT_LOPROC. They should be handled as functions too.
++	 */
++	if (elf->hdr->e_ident[EI_CLASS] == ELFCLASS64 &&
++	    elf->hdr->e_machine == EM_PARISC &&
++	    ELF_ST_TYPE(sym->st_info) == STT_LOPROC)
++		s->is_func = true;
++
+ 	if (match(secname, PATTERNS(INIT_SECTIONS)))
+ 		warn("%s: %s: EXPORT_SYMBOL used for init symbol. Remove __init or EXPORT_SYMBOL.\n",
+ 		     mod->name, name);
+diff --git a/scripts/package/mkspec b/scripts/package/mkspec
+index 8049f0e2c110f..c9299f9c1f3e4 100755
+--- a/scripts/package/mkspec
++++ b/scripts/package/mkspec
+@@ -57,7 +57,7 @@ $S	BuildRequires: gcc make openssl openssl-devel perl python3 rsync
+ 
+ 	# $UTS_MACHINE as a fallback of _arch in case
+ 	# /usr/lib/rpm/platform/*/macros was not included.
+-	%define _arch %{?_arch:$UTS_MACHINE}
++	%{!?_arch: %define _arch $UTS_MACHINE}
+ 	%define __spec_install_post /usr/lib/rpm/brp-compress || :
+ 	%define debug_package %{nil}
+ 
+diff --git a/sound/soc/tegra/tegra210_sfc.c b/sound/soc/tegra/tegra210_sfc.c
+index e9df1ffc8a584..c2240babd6017 100644
+--- a/sound/soc/tegra/tegra210_sfc.c
++++ b/sound/soc/tegra/tegra210_sfc.c
+@@ -2,7 +2,7 @@
+ //
+ // tegra210_sfc.c - Tegra210 SFC driver
+ //
+-// Copyright (c) 2021 NVIDIA CORPORATION.  All rights reserved.
++// Copyright (c) 2021-2023 NVIDIA CORPORATION.  All rights reserved.
+ 
+ #include <linux/clk.h>
+ #include <linux/device.h>
+@@ -42,6 +42,7 @@ static const int tegra210_sfc_rates[TEGRA210_SFC_NUM_RATES] = {
+ 	32000,
+ 	44100,
+ 	48000,
++	64000,
+ 	88200,
+ 	96000,
+ 	176400,
+@@ -2857,6 +2858,7 @@ static s32 *coef_addr_table[TEGRA210_SFC_NUM_RATES][TEGRA210_SFC_NUM_RATES] = {
+ 		coef_8to32,
+ 		coef_8to44,
+ 		coef_8to48,
++		UNSUPP_CONV,
+ 		coef_8to88,
+ 		coef_8to96,
+ 		UNSUPP_CONV,
+@@ -2872,6 +2874,7 @@ static s32 *coef_addr_table[TEGRA210_SFC_NUM_RATES][TEGRA210_SFC_NUM_RATES] = {
+ 		coef_11to32,
+ 		coef_11to44,
+ 		coef_11to48,
++		UNSUPP_CONV,
+ 		coef_11to88,
+ 		coef_11to96,
+ 		UNSUPP_CONV,
+@@ -2887,6 +2890,7 @@ static s32 *coef_addr_table[TEGRA210_SFC_NUM_RATES][TEGRA210_SFC_NUM_RATES] = {
+ 		coef_16to32,
+ 		coef_16to44,
+ 		coef_16to48,
++		UNSUPP_CONV,
+ 		coef_16to88,
+ 		coef_16to96,
+ 		coef_16to176,
+@@ -2902,6 +2906,7 @@ static s32 *coef_addr_table[TEGRA210_SFC_NUM_RATES][TEGRA210_SFC_NUM_RATES] = {
+ 		coef_22to32,
+ 		coef_22to44,
+ 		coef_22to48,
++		UNSUPP_CONV,
+ 		coef_22to88,
+ 		coef_22to96,
+ 		coef_22to176,
+@@ -2917,6 +2922,7 @@ static s32 *coef_addr_table[TEGRA210_SFC_NUM_RATES][TEGRA210_SFC_NUM_RATES] = {
+ 		coef_24to32,
+ 		coef_24to44,
+ 		coef_24to48,
++		UNSUPP_CONV,
+ 		coef_24to88,
+ 		coef_24to96,
+ 		coef_24to176,
+@@ -2932,6 +2938,7 @@ static s32 *coef_addr_table[TEGRA210_SFC_NUM_RATES][TEGRA210_SFC_NUM_RATES] = {
+ 		BYPASS_CONV,
+ 		coef_32to44,
+ 		coef_32to48,
++		UNSUPP_CONV,
+ 		coef_32to88,
+ 		coef_32to96,
+ 		coef_32to176,
+@@ -2947,6 +2954,7 @@ static s32 *coef_addr_table[TEGRA210_SFC_NUM_RATES][TEGRA210_SFC_NUM_RATES] = {
+ 		coef_44to32,
+ 		BYPASS_CONV,
+ 		coef_44to48,
++		UNSUPP_CONV,
+ 		coef_44to88,
+ 		coef_44to96,
+ 		coef_44to176,
+@@ -2962,11 +2970,28 @@ static s32 *coef_addr_table[TEGRA210_SFC_NUM_RATES][TEGRA210_SFC_NUM_RATES] = {
+ 		coef_48to32,
+ 		coef_48to44,
+ 		BYPASS_CONV,
++		UNSUPP_CONV,
+ 		coef_48to88,
+ 		coef_48to96,
+ 		coef_48to176,
+ 		coef_48to192,
+ 	},
++	/* Convertions from 64 kHz */
++	{
++		UNSUPP_CONV,
++		UNSUPP_CONV,
++		UNSUPP_CONV,
++		UNSUPP_CONV,
++		UNSUPP_CONV,
++		UNSUPP_CONV,
++		UNSUPP_CONV,
++		UNSUPP_CONV,
++		UNSUPP_CONV,
++		UNSUPP_CONV,
++		UNSUPP_CONV,
++		UNSUPP_CONV,
++		UNSUPP_CONV,
++	},
+ 	/* Convertions from 88.2 kHz */
+ 	{
+ 		coef_88to8,
+@@ -2977,6 +3002,7 @@ static s32 *coef_addr_table[TEGRA210_SFC_NUM_RATES][TEGRA210_SFC_NUM_RATES] = {
+ 		coef_88to32,
+ 		coef_88to44,
+ 		coef_88to48,
++		UNSUPP_CONV,
+ 		BYPASS_CONV,
+ 		coef_88to96,
+ 		coef_88to176,
+@@ -2991,6 +3017,7 @@ static s32 *coef_addr_table[TEGRA210_SFC_NUM_RATES][TEGRA210_SFC_NUM_RATES] = {
+ 		coef_96to32,
+ 		coef_96to44,
+ 		coef_96to48,
++		UNSUPP_CONV,
+ 		coef_96to88,
+ 		BYPASS_CONV,
+ 		coef_96to176,
+@@ -3006,6 +3033,7 @@ static s32 *coef_addr_table[TEGRA210_SFC_NUM_RATES][TEGRA210_SFC_NUM_RATES] = {
+ 		coef_176to32,
+ 		coef_176to44,
+ 		coef_176to48,
++		UNSUPP_CONV,
+ 		coef_176to88,
+ 		coef_176to96,
+ 		BYPASS_CONV,
+@@ -3021,6 +3049,7 @@ static s32 *coef_addr_table[TEGRA210_SFC_NUM_RATES][TEGRA210_SFC_NUM_RATES] = {
+ 		coef_192to32,
+ 		coef_192to44,
+ 		coef_192to48,
++		UNSUPP_CONV,
+ 		coef_192to88,
+ 		coef_192to96,
+ 		coef_192to176,
+diff --git a/sound/soc/tegra/tegra210_sfc.h b/sound/soc/tegra/tegra210_sfc.h
+index 5a6b66e297d8f..a4c993d79403a 100644
+--- a/sound/soc/tegra/tegra210_sfc.h
++++ b/sound/soc/tegra/tegra210_sfc.h
+@@ -2,7 +2,7 @@
+ /*
+  * tegra210_sfc.h - Definitions for Tegra210 SFC driver
+  *
+- * Copyright (c) 2021 NVIDIA CORPORATION.  All rights reserved.
++ * Copyright (c) 2021-2023 NVIDIA CORPORATION.  All rights reserved.
+  *
+  */
+ 
+@@ -47,7 +47,7 @@
+ #define TEGRA210_SFC_EN_SHIFT			0
+ #define TEGRA210_SFC_EN				(1 << TEGRA210_SFC_EN_SHIFT)
+ 
+-#define TEGRA210_SFC_NUM_RATES 12
++#define TEGRA210_SFC_NUM_RATES 13
+ 
+ /* Fields in TEGRA210_SFC_COEF_RAM */
+ #define TEGRA210_SFC_COEF_RAM_EN		BIT(0)
+diff --git a/tools/build/Makefile.build b/tools/build/Makefile.build
+index 89430338a3d92..fac42486a8cf0 100644
+--- a/tools/build/Makefile.build
++++ b/tools/build/Makefile.build
+@@ -117,6 +117,16 @@ $(OUTPUT)%.s: %.c FORCE
+ 	$(call rule_mkdir)
+ 	$(call if_changed_dep,cc_s_c)
+ 
++# bison and flex files are generated in the OUTPUT directory
++# so it needs a separate rule to depend on them properly
++$(OUTPUT)%-bison.o: $(OUTPUT)%-bison.c FORCE
++	$(call rule_mkdir)
++	$(call if_changed_dep,$(host)cc_o_c)
++
++$(OUTPUT)%-flex.o: $(OUTPUT)%-flex.c FORCE
++	$(call rule_mkdir)
++	$(call if_changed_dep,$(host)cc_o_c)
++
+ # Gather build data:
+ #   obj-y        - list of build objects
+ #   subdir-y     - list of directories to nest
+diff --git a/tools/mm/Makefile b/tools/mm/Makefile
+index 6c1da51f4177c..1c5606cc33346 100644
+--- a/tools/mm/Makefile
++++ b/tools/mm/Makefile
+@@ -8,8 +8,8 @@ TARGETS=page-types slabinfo page_owner_sort
+ LIB_DIR = ../lib/api
+ LIBS = $(LIB_DIR)/libapi.a
+ 
+-CFLAGS += -Wall -Wextra -I../lib/
+-LDFLAGS += $(LIBS)
++CFLAGS += -Wall -Wextra -I../lib/ -pthread
++LDFLAGS += $(LIBS) -pthread
+ 
+ all: $(TARGETS)
+ 
+diff --git a/tools/perf/Documentation/perf-dlfilter.txt b/tools/perf/Documentation/perf-dlfilter.txt
+index fb22e3b31dc5c..8887cc20a809e 100644
+--- a/tools/perf/Documentation/perf-dlfilter.txt
++++ b/tools/perf/Documentation/perf-dlfilter.txt
+@@ -64,6 +64,12 @@ internal filtering.
+ If implemented, 'filter_description' should return a one-line description
+ of the filter, and optionally a longer description.
+ 
++Do not assume the 'sample' argument is valid (dereferenceable)
++after 'filter_event' and 'filter_event_early' return.
++
++Do not assume data referenced by pointers in struct perf_dlfilter_sample
++is valid (dereferenceable) after 'filter_event' and 'filter_event_early' return.
++
+ The perf_dlfilter_sample structure
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ 
+@@ -150,7 +156,8 @@ struct perf_dlfilter_fns {
+ 	const char *(*srcline)(void *ctx, __u32 *line_number);
+ 	struct perf_event_attr *(*attr)(void *ctx);
+ 	__s32 (*object_code)(void *ctx, __u64 ip, void *buf, __u32 len);
+-	void *(*reserved[120])(void *);
++	void (*al_cleanup)(void *ctx, struct perf_dlfilter_al *al);
++	void *(*reserved[119])(void *);
+ };
+ ----
+ 
+@@ -161,7 +168,8 @@ struct perf_dlfilter_fns {
+ 'args' returns arguments from --dlarg options.
+ 
+ 'resolve_address' provides information about 'address'. al->size must be set
+-before calling. Returns 0 on success, -1 otherwise.
++before calling. Returns 0 on success, -1 otherwise. Call al_cleanup() (if present,
++see below) when 'al' data is no longer needed.
+ 
+ 'insn' returns instruction bytes and length.
+ 
+@@ -171,6 +179,12 @@ before calling. Returns 0 on success, -1 otherwise.
+ 
+ 'object_code' reads object code and returns the number of bytes read.
+ 
++'al_cleanup' must be called (if present, so check perf_dlfilter_fns.al_cleanup != NULL)
++after resolve_address() to free any associated resources.
++
++Do not assume pointers obtained via perf_dlfilter_fns are valid (dereferenceable)
++after 'filter_event' and 'filter_event_early' return.
++
+ The perf_dlfilter_al structure
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ 
+@@ -197,9 +211,13 @@ struct perf_dlfilter_al {
+ 	/* Below members are only populated by resolve_ip() */
+ 	__u8 filtered; /* true if this sample event will be filtered out */
+ 	const char *comm;
++	void *priv; /* Private data. Do not change */
+ };
+ ----
+ 
++Do not assume data referenced by pointers in struct perf_dlfilter_al
++is valid (dereferenceable) after 'filter_event' and 'filter_event_early' return.
++
+ perf_dlfilter_sample flags
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~
+ 
+diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf
+index 097316ef38e6a..f178b36c69402 100644
+--- a/tools/perf/Makefile.perf
++++ b/tools/perf/Makefile.perf
+@@ -381,7 +381,7 @@ ifndef NO_JVMTI
+ PROGRAMS += $(OUTPUT)$(LIBJVMTI)
+ endif
+ 
+-DLFILTERS := dlfilter-test-api-v0.so dlfilter-show-cycles.so
++DLFILTERS := dlfilter-test-api-v0.so dlfilter-test-api-v2.so dlfilter-show-cycles.so
+ DLFILTERS := $(patsubst %,$(OUTPUT)dlfilters/%,$(DLFILTERS))
+ 
+ # what 'all' will build and 'install' will install, in perfexecdir
+diff --git a/tools/perf/builtin-lock.c b/tools/perf/builtin-lock.c
+index c15386cb10331..0cf1c5a2e0323 100644
+--- a/tools/perf/builtin-lock.c
++++ b/tools/perf/builtin-lock.c
+@@ -2052,6 +2052,7 @@ static int __cmd_contention(int argc, const char **argv)
+ 	if (IS_ERR(session)) {
+ 		pr_err("Initializing perf session failed\n");
+ 		err = PTR_ERR(session);
++		session = NULL;
+ 		goto out_delete;
+ 	}
+ 
+diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
+index 200b3e7ea8dad..517bf25750c8b 100644
+--- a/tools/perf/builtin-script.c
++++ b/tools/perf/builtin-script.c
+@@ -2199,6 +2199,17 @@ static void process_event(struct perf_script *script,
+ 	if (PRINT_FIELD(RETIRE_LAT))
+ 		fprintf(fp, "%16" PRIu16, sample->retire_lat);
+ 
++	if (PRINT_FIELD(CGROUP)) {
++		const char *cgrp_name;
++		struct cgroup *cgrp = cgroup__find(machine->env,
++						   sample->cgroup);
++		if (cgrp != NULL)
++			cgrp_name = cgrp->name;
++		else
++			cgrp_name = "unknown";
++		fprintf(fp, " %s", cgrp_name);
++	}
++
+ 	if (PRINT_FIELD(IP)) {
+ 		struct callchain_cursor *cursor = NULL;
+ 
+@@ -2243,17 +2254,6 @@ static void process_event(struct perf_script *script,
+ 	if (PRINT_FIELD(CODE_PAGE_SIZE))
+ 		fprintf(fp, " %s", get_page_size_name(sample->code_page_size, str));
+ 
+-	if (PRINT_FIELD(CGROUP)) {
+-		const char *cgrp_name;
+-		struct cgroup *cgrp = cgroup__find(machine->env,
+-						   sample->cgroup);
+-		if (cgrp != NULL)
+-			cgrp_name = cgrp->name;
+-		else
+-			cgrp_name = "unknown";
+-		fprintf(fp, " %s", cgrp_name);
+-	}
+-
+ 	perf_sample__fprintf_ipc(sample, attr, fp);
+ 
+ 	fprintf(fp, "\n");
+diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
+index 1baa2acb3cedd..ea8c7eca5eeed 100644
+--- a/tools/perf/builtin-top.c
++++ b/tools/perf/builtin-top.c
+@@ -1805,6 +1805,7 @@ int cmd_top(int argc, const char **argv)
+ 	top.session = perf_session__new(NULL, NULL);
+ 	if (IS_ERR(top.session)) {
+ 		status = PTR_ERR(top.session);
++		top.session = NULL;
+ 		goto out_delete_evlist;
+ 	}
+ 
+diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
+index 6e73d0e957152..4aba576512a15 100644
+--- a/tools/perf/builtin-trace.c
++++ b/tools/perf/builtin-trace.c
+@@ -3136,13 +3136,8 @@ static void evlist__free_syscall_tp_fields(struct evlist *evlist)
+ 	struct evsel *evsel;
+ 
+ 	evlist__for_each_entry(evlist, evsel) {
+-		struct evsel_trace *et = evsel->priv;
+-
+-		if (!et || !evsel->tp_format || strcmp(evsel->tp_format->system, "syscalls"))
+-			continue;
+-
+-		zfree(&et->fmt);
+-		free(et);
++		evsel_trace__delete(evsel->priv);
++		evsel->priv = NULL;
+ 	}
+ }
+ 
+diff --git a/tools/perf/dlfilters/dlfilter-test-api-v2.c b/tools/perf/dlfilters/dlfilter-test-api-v2.c
+new file mode 100644
+index 0000000000000..38e593d92920c
+--- /dev/null
++++ b/tools/perf/dlfilters/dlfilter-test-api-v2.c
+@@ -0,0 +1,377 @@
++// SPDX-License-Identifier: GPL-2.0
++/*
++ * Test v2 API for perf --dlfilter shared object
++ * Copyright (c) 2023, Intel Corporation.
++ */
++#include <stdio.h>
++#include <stdlib.h>
++#include <string.h>
++#include <stdbool.h>
++
++/*
++ * Copy v2 API instead of including current API
++ */
++#include <linux/perf_event.h>
++#include <linux/types.h>
++
++/*
++ * The following macro can be used to determine if this header defines
++ * perf_dlfilter_sample machine_pid and vcpu.
++ */
++#define PERF_DLFILTER_HAS_MACHINE_PID
++
++/* Definitions for perf_dlfilter_sample flags */
++enum {
++	PERF_DLFILTER_FLAG_BRANCH	= 1ULL << 0,
++	PERF_DLFILTER_FLAG_CALL		= 1ULL << 1,
++	PERF_DLFILTER_FLAG_RETURN	= 1ULL << 2,
++	PERF_DLFILTER_FLAG_CONDITIONAL	= 1ULL << 3,
++	PERF_DLFILTER_FLAG_SYSCALLRET	= 1ULL << 4,
++	PERF_DLFILTER_FLAG_ASYNC	= 1ULL << 5,
++	PERF_DLFILTER_FLAG_INTERRUPT	= 1ULL << 6,
++	PERF_DLFILTER_FLAG_TX_ABORT	= 1ULL << 7,
++	PERF_DLFILTER_FLAG_TRACE_BEGIN	= 1ULL << 8,
++	PERF_DLFILTER_FLAG_TRACE_END	= 1ULL << 9,
++	PERF_DLFILTER_FLAG_IN_TX	= 1ULL << 10,
++	PERF_DLFILTER_FLAG_VMENTRY	= 1ULL << 11,
++	PERF_DLFILTER_FLAG_VMEXIT	= 1ULL << 12,
++};
++
++/*
++ * perf sample event information (as per perf script and <linux/perf_event.h>)
++ */
++struct perf_dlfilter_sample {
++	__u32 size; /* Size of this structure (for compatibility checking) */
++	__u16 ins_lat;		/* Refer PERF_SAMPLE_WEIGHT_TYPE in <linux/perf_event.h> */
++	__u16 p_stage_cyc;	/* Refer PERF_SAMPLE_WEIGHT_TYPE in <linux/perf_event.h> */
++	__u64 ip;
++	__s32 pid;
++	__s32 tid;
++	__u64 time;
++	__u64 addr;
++	__u64 id;
++	__u64 stream_id;
++	__u64 period;
++	__u64 weight;		/* Refer PERF_SAMPLE_WEIGHT_TYPE in <linux/perf_event.h> */
++	__u64 transaction;	/* Refer PERF_SAMPLE_TRANSACTION in <linux/perf_event.h> */
++	__u64 insn_cnt;	/* For instructions-per-cycle (IPC) */
++	__u64 cyc_cnt;		/* For instructions-per-cycle (IPC) */
++	__s32 cpu;
++	__u32 flags;		/* Refer PERF_DLFILTER_FLAG_* above */
++	__u64 data_src;		/* Refer PERF_SAMPLE_DATA_SRC in <linux/perf_event.h> */
++	__u64 phys_addr;	/* Refer PERF_SAMPLE_PHYS_ADDR in <linux/perf_event.h> */
++	__u64 data_page_size;	/* Refer PERF_SAMPLE_DATA_PAGE_SIZE in <linux/perf_event.h> */
++	__u64 code_page_size;	/* Refer PERF_SAMPLE_CODE_PAGE_SIZE in <linux/perf_event.h> */
++	__u64 cgroup;		/* Refer PERF_SAMPLE_CGROUP in <linux/perf_event.h> */
++	__u8  cpumode;		/* Refer CPUMODE_MASK etc in <linux/perf_event.h> */
++	__u8  addr_correlates_sym; /* True => resolve_addr() can be called */
++	__u16 misc;		/* Refer perf_event_header in <linux/perf_event.h> */
++	__u32 raw_size;		/* Refer PERF_SAMPLE_RAW in <linux/perf_event.h> */
++	const void *raw_data;	/* Refer PERF_SAMPLE_RAW in <linux/perf_event.h> */
++	__u64 brstack_nr;	/* Number of brstack entries */
++	const struct perf_branch_entry *brstack; /* Refer <linux/perf_event.h> */
++	__u64 raw_callchain_nr;	/* Number of raw_callchain entries */
++	const __u64 *raw_callchain; /* Refer <linux/perf_event.h> */
++	const char *event;
++	__s32 machine_pid;
++	__s32 vcpu;
++};
++
++/*
++ * Address location (as per perf script)
++ */
++struct perf_dlfilter_al {
++	__u32 size; /* Size of this structure (for compatibility checking) */
++	__u32 symoff;
++	const char *sym;
++	__u64 addr; /* Mapped address (from dso) */
++	__u64 sym_start;
++	__u64 sym_end;
++	const char *dso;
++	__u8  sym_binding; /* STB_LOCAL, STB_GLOBAL or STB_WEAK, refer <elf.h> */
++	__u8  is_64_bit; /* Only valid if dso is not NULL */
++	__u8  is_kernel_ip; /* True if in kernel space */
++	__u32 buildid_size;
++	__u8 *buildid;
++	/* Below members are only populated by resolve_ip() */
++	__u8 filtered; /* True if this sample event will be filtered out */
++	const char *comm;
++	void *priv; /* Private data (v2 API) */
++};
++
++struct perf_dlfilter_fns {
++	/* Return information about ip */
++	const struct perf_dlfilter_al *(*resolve_ip)(void *ctx);
++	/* Return information about addr (if addr_correlates_sym) */
++	const struct perf_dlfilter_al *(*resolve_addr)(void *ctx);
++	/* Return arguments from --dlarg option */
++	char **(*args)(void *ctx, int *dlargc);
++	/*
++	 * Return information about address (al->size must be set before
++	 * calling). Returns 0 on success, -1 otherwise. Call al_cleanup()
++	 * when 'al' data is no longer needed.
++	 */
++	__s32 (*resolve_address)(void *ctx, __u64 address, struct perf_dlfilter_al *al);
++	/* Return instruction bytes and length */
++	const __u8 *(*insn)(void *ctx, __u32 *length);
++	/* Return source file name and line number */
++	const char *(*srcline)(void *ctx, __u32 *line_number);
++	/* Return perf_event_attr, refer <linux/perf_event.h> */
++	struct perf_event_attr *(*attr)(void *ctx);
++	/* Read object code, return numbers of bytes read */
++	__s32 (*object_code)(void *ctx, __u64 ip, void *buf, __u32 len);
++	/*
++	 * If present (i.e. must check al_cleanup != NULL), call after
++	 * resolve_address() to free any associated resources. (v2 API)
++	 */
++	void (*al_cleanup)(void *ctx, struct perf_dlfilter_al *al);
++	/* Reserved */
++	void *(*reserved[119])(void *);
++};
++
++struct perf_dlfilter_fns perf_dlfilter_fns;
++
++static int verbose;
++
++#define pr_debug(fmt, ...) do { \
++		if (verbose > 0) \
++			fprintf(stderr, fmt, ##__VA_ARGS__); \
++	} while (0)
++
++static int test_fail(const char *msg)
++{
++	pr_debug("%s\n", msg);
++	return -1;
++}
++
++#define CHECK(x) do { \
++		if (!(x)) \
++			return test_fail("Check '" #x "' failed\n"); \
++	} while (0)
++
++struct filter_data {
++	__u64 ip;
++	__u64 addr;
++	int do_early;
++	int early_filter_cnt;
++	int filter_cnt;
++};
++
++static struct filter_data *filt_dat;
++
++int start(void **data, void *ctx)
++{
++	int dlargc;
++	char **dlargv;
++	struct filter_data *d;
++	static bool called;
++
++	verbose = 1;
++
++	CHECK(!filt_dat && !called);
++	called = true;
++
++	d = calloc(1, sizeof(*d));
++	if (!d)
++		test_fail("Failed to allocate memory");
++	filt_dat = d;
++	*data = d;
++
++	dlargv = perf_dlfilter_fns.args(ctx, &dlargc);
++
++	CHECK(dlargc == 6);
++	CHECK(!strcmp(dlargv[0], "first"));
++	verbose = strtol(dlargv[1], NULL, 0);
++	d->ip = strtoull(dlargv[2], NULL, 0);
++	d->addr = strtoull(dlargv[3], NULL, 0);
++	d->do_early = strtol(dlargv[4], NULL, 0);
++	CHECK(!strcmp(dlargv[5], "last"));
++
++	pr_debug("%s API\n", __func__);
++
++	return 0;
++}
++
++#define CHECK_SAMPLE(x) do { \
++		if (sample->x != expected.x) \
++			return test_fail("'" #x "' not expected value\n"); \
++	} while (0)
++
++static int check_sample(struct filter_data *d, const struct perf_dlfilter_sample *sample)
++{
++	struct perf_dlfilter_sample expected = {
++		.ip		= d->ip,
++		.pid		= 12345,
++		.tid		= 12346,
++		.time		= 1234567890,
++		.addr		= d->addr,
++		.id		= 99,
++		.stream_id	= 101,
++		.period		= 543212345,
++		.cpu		= 31,
++		.cpumode	= PERF_RECORD_MISC_USER,
++		.addr_correlates_sym = 1,
++		.misc		= PERF_RECORD_MISC_USER,
++	};
++
++	CHECK(sample->size >= sizeof(struct perf_dlfilter_sample));
++
++	CHECK_SAMPLE(ip);
++	CHECK_SAMPLE(pid);
++	CHECK_SAMPLE(tid);
++	CHECK_SAMPLE(time);
++	CHECK_SAMPLE(addr);
++	CHECK_SAMPLE(id);
++	CHECK_SAMPLE(stream_id);
++	CHECK_SAMPLE(period);
++	CHECK_SAMPLE(cpu);
++	CHECK_SAMPLE(cpumode);
++	CHECK_SAMPLE(addr_correlates_sym);
++	CHECK_SAMPLE(misc);
++
++	CHECK(!sample->raw_data);
++	CHECK_SAMPLE(brstack_nr);
++	CHECK(!sample->brstack);
++	CHECK_SAMPLE(raw_callchain_nr);
++	CHECK(!sample->raw_callchain);
++
++#define EVENT_NAME "branches:"
++	CHECK(!strncmp(sample->event, EVENT_NAME, strlen(EVENT_NAME)));
++
++	return 0;
++}
++
++static int check_al(void *ctx)
++{
++	const struct perf_dlfilter_al *al;
++
++	al = perf_dlfilter_fns.resolve_ip(ctx);
++	if (!al)
++		return test_fail("resolve_ip() failed");
++
++	CHECK(al->sym && !strcmp("foo", al->sym));
++	CHECK(!al->symoff);
++
++	return 0;
++}
++
++static int check_addr_al(void *ctx)
++{
++	const struct perf_dlfilter_al *addr_al;
++
++	addr_al = perf_dlfilter_fns.resolve_addr(ctx);
++	if (!addr_al)
++		return test_fail("resolve_addr() failed");
++
++	CHECK(addr_al->sym && !strcmp("bar", addr_al->sym));
++	CHECK(!addr_al->symoff);
++
++	return 0;
++}
++
++static int check_address_al(void *ctx, const struct perf_dlfilter_sample *sample)
++{
++	struct perf_dlfilter_al address_al;
++	const struct perf_dlfilter_al *al;
++
++	al = perf_dlfilter_fns.resolve_ip(ctx);
++	if (!al)
++		return test_fail("resolve_ip() failed");
++
++	address_al.size = sizeof(address_al);
++	if (perf_dlfilter_fns.resolve_address(ctx, sample->ip, &address_al))
++		return test_fail("resolve_address() failed");
++
++	CHECK(address_al.sym && al->sym);
++	CHECK(!strcmp(address_al.sym, al->sym));
++	CHECK(address_al.addr == al->addr);
++	CHECK(address_al.sym_start == al->sym_start);
++	CHECK(address_al.sym_end == al->sym_end);
++	CHECK(address_al.dso && al->dso);
++	CHECK(!strcmp(address_al.dso, al->dso));
++
++	/* al_cleanup() is v2 API so may not be present */
++	if (perf_dlfilter_fns.al_cleanup)
++		perf_dlfilter_fns.al_cleanup(ctx, &address_al);
++
++	return 0;
++}
++
++static int check_attr(void *ctx)
++{
++	struct perf_event_attr *attr = perf_dlfilter_fns.attr(ctx);
++
++	CHECK(attr);
++	CHECK(attr->type == PERF_TYPE_HARDWARE);
++	CHECK(attr->config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS);
++
++	return 0;
++}
++
++static int do_checks(void *data, const struct perf_dlfilter_sample *sample, void *ctx, bool early)
++{
++	struct filter_data *d = data;
++
++	CHECK(data && filt_dat == data);
++
++	if (early) {
++		CHECK(!d->early_filter_cnt);
++		d->early_filter_cnt += 1;
++	} else {
++		CHECK(!d->filter_cnt);
++		CHECK(d->early_filter_cnt);
++		CHECK(d->do_early != 2);
++		d->filter_cnt += 1;
++	}
++
++	if (check_sample(data, sample))
++		return -1;
++
++	if (check_attr(ctx))
++		return -1;
++
++	if (early && !d->do_early)
++		return 0;
++
++	if (check_al(ctx) || check_addr_al(ctx) || check_address_al(ctx, sample))
++		return -1;
++
++	if (early)
++		return d->do_early == 2;
++
++	return 1;
++}
++
++int filter_event_early(void *data, const struct perf_dlfilter_sample *sample, void *ctx)
++{
++	pr_debug("%s API\n", __func__);
++
++	return do_checks(data, sample, ctx, true);
++}
++
++int filter_event(void *data, const struct perf_dlfilter_sample *sample, void *ctx)
++{
++	pr_debug("%s API\n", __func__);
++
++	return do_checks(data, sample, ctx, false);
++}
++
++int stop(void *data, void *ctx)
++{
++	static bool called;
++
++	pr_debug("%s API\n", __func__);
++
++	CHECK(data && filt_dat == data && !called);
++	called = true;
++
++	free(data);
++	filt_dat = NULL;
++	return 0;
++}
++
++const char *filter_description(const char **long_description)
++{
++	*long_description = "Filter used by the 'dlfilter C API' perf test";
++	return "dlfilter to test v2 C API";
++}
+diff --git a/tools/perf/include/perf/perf_dlfilter.h b/tools/perf/include/perf/perf_dlfilter.h
+index a26e2f129f83e..16fc4568ac53b 100644
+--- a/tools/perf/include/perf/perf_dlfilter.h
++++ b/tools/perf/include/perf/perf_dlfilter.h
+@@ -91,6 +91,7 @@ struct perf_dlfilter_al {
+ 	/* Below members are only populated by resolve_ip() */
+ 	__u8 filtered; /* True if this sample event will be filtered out */
+ 	const char *comm;
++	void *priv; /* Private data. Do not change */
+ };
+ 
+ struct perf_dlfilter_fns {
+@@ -102,7 +103,8 @@ struct perf_dlfilter_fns {
+ 	char **(*args)(void *ctx, int *dlargc);
+ 	/*
+ 	 * Return information about address (al->size must be set before
+-	 * calling). Returns 0 on success, -1 otherwise.
++	 * calling). Returns 0 on success, -1 otherwise. Call al_cleanup()
++	 * when 'al' data is no longer needed.
+ 	 */
+ 	__s32 (*resolve_address)(void *ctx, __u64 address, struct perf_dlfilter_al *al);
+ 	/* Return instruction bytes and length */
+@@ -113,8 +115,13 @@ struct perf_dlfilter_fns {
+ 	struct perf_event_attr *(*attr)(void *ctx);
+ 	/* Read object code, return numbers of bytes read */
+ 	__s32 (*object_code)(void *ctx, __u64 ip, void *buf, __u32 len);
++	/*
++	 * If present (i.e. must check al_cleanup != NULL), call after
++	 * resolve_address() to free any associated resources.
++	 */
++	void (*al_cleanup)(void *ctx, struct perf_dlfilter_al *al);
+ 	/* Reserved */
+-	void *(*reserved[120])(void *);
++	void *(*reserved[119])(void *);
+ };
+ 
+ /*
+diff --git a/tools/perf/pmu-events/Build b/tools/perf/pmu-events/Build
+index 150765f2baeea..1d18bb89402e6 100644
+--- a/tools/perf/pmu-events/Build
++++ b/tools/perf/pmu-events/Build
+@@ -35,3 +35,9 @@ $(PMU_EVENTS_C): $(JSON) $(JSON_TEST) $(JEVENTS_PY) $(METRIC_PY) $(METRIC_TEST_L
+ 	$(call rule_mkdir)
+ 	$(Q)$(call echo-cmd,gen)$(PYTHON) $(JEVENTS_PY) $(JEVENTS_ARCH) $(JEVENTS_MODEL) pmu-events/arch $@
+ endif
++
++# pmu-events.c file is generated in the OUTPUT directory so it needs a
++# separate rule to depend on it properly
++$(OUTPUT)pmu-events/pmu-events.o: $(PMU_EVENTS_C)
++	$(call rule_mkdir)
++	$(call if_changed_dep,cc_o_c)
+diff --git a/tools/perf/pmu-events/arch/arm64/ampere/ampereone/cache.json b/tools/perf/pmu-events/arch/arm64/ampere/ampereone/cache.json
+index fc06330542116..7a2b7b200f144 100644
+--- a/tools/perf/pmu-events/arch/arm64/ampere/ampereone/cache.json
++++ b/tools/perf/pmu-events/arch/arm64/ampere/ampereone/cache.json
+@@ -92,9 +92,6 @@
+     {
+         "ArchStdEvent": "L1D_CACHE_LMISS_RD"
+     },
+-    {
+-        "ArchStdEvent": "L1D_CACHE_LMISS"
+-    },
+     {
+         "ArchStdEvent": "L1I_CACHE_LMISS"
+     },
+diff --git a/tools/perf/pmu-events/arch/powerpc/power10/cache.json b/tools/perf/pmu-events/arch/powerpc/power10/cache.json
+index 605be14f441c8..839ae26945fb2 100644
+--- a/tools/perf/pmu-events/arch/powerpc/power10/cache.json
++++ b/tools/perf/pmu-events/arch/powerpc/power10/cache.json
+@@ -1,53 +1,8 @@
+ [
+-  {
+-    "EventCode": "0x1003C",
+-    "EventName": "PM_EXEC_STALL_DMISS_L2L3",
+-    "BriefDescription": "Cycles in which the oldest instruction in the pipeline was waiting for a load miss to resolve from either the local L2 or local L3."
+-  },
+-  {
+-    "EventCode": "0x1E054",
+-    "EventName": "PM_EXEC_STALL_DMISS_L21_L31",
+-    "BriefDescription": "Cycles in which the oldest instruction in the pipeline was waiting for a load miss to resolve from another core's L2 or L3 on the same chip."
+-  },
+-  {
+-    "EventCode": "0x34054",
+-    "EventName": "PM_EXEC_STALL_DMISS_L2L3_NOCONFLICT",
+-    "BriefDescription": "Cycles in which the oldest instruction in the pipeline was waiting for a load miss to resolve from the local L2 or local L3, without a dispatch conflict."
+-  },
+-  {
+-    "EventCode": "0x34056",
+-    "EventName": "PM_EXEC_STALL_LOAD_FINISH",
+-    "BriefDescription": "Cycles in which the oldest instruction in the pipeline was finishing a load after its data was reloaded from a data source beyond the local L1; cycles in which the LSU was processing an L1-hit; cycles in which the NTF instruction merged with another load in the LMQ; cycles in which the NTF instruction is waiting for a data reload for a load miss, but the data comes back with a non-NTF instruction."
+-  },
+-  {
+-    "EventCode": "0x3006C",
+-    "EventName": "PM_RUN_CYC_SMT2_MODE",
+-    "BriefDescription": "Cycles when this thread's run latch is set and the core is in SMT2 mode."
+-  },
+   {
+     "EventCode": "0x300F4",
+     "EventName": "PM_RUN_INST_CMPL_CONC",
+-    "BriefDescription": "PowerPC instructions completed by this thread when all threads in the core had the run-latch set."
+-  },
+-  {
+-    "EventCode": "0x4C016",
+-    "EventName": "PM_EXEC_STALL_DMISS_L2L3_CONFLICT",
+-    "BriefDescription": "Cycles in which the oldest instruction in the pipeline was waiting for a load miss to resolve from the local L2 or local L3, with a dispatch conflict."
+-  },
+-  {
+-    "EventCode": "0x4D014",
+-    "EventName": "PM_EXEC_STALL_LOAD",
+-    "BriefDescription": "Cycles in which the oldest instruction in the pipeline was a load instruction executing in the Load Store Unit."
+-  },
+-  {
+-    "EventCode": "0x4D016",
+-    "EventName": "PM_EXEC_STALL_PTESYNC",
+-    "BriefDescription": "Cycles in which the oldest instruction in the pipeline was a PTESYNC instruction executing in the Load Store Unit."
+-  },
+-  {
+-    "EventCode": "0x401EA",
+-    "EventName": "PM_THRESH_EXC_128",
+-    "BriefDescription": "Threshold counter exceeded a value of 128."
++    "BriefDescription": "PowerPC instruction completed by this thread when all threads in the core had the run-latch set."
+   },
+   {
+     "EventCode": "0x400F6",
+diff --git a/tools/perf/pmu-events/arch/powerpc/power10/floating_point.json b/tools/perf/pmu-events/arch/powerpc/power10/floating_point.json
+index 54acb55e2c8c6..e816cd10c1293 100644
+--- a/tools/perf/pmu-events/arch/powerpc/power10/floating_point.json
++++ b/tools/perf/pmu-events/arch/powerpc/power10/floating_point.json
+@@ -1,7 +1,67 @@
+ [
+   {
+-    "EventCode": "0x4016E",
+-    "EventName": "PM_THRESH_NOT_MET",
+-    "BriefDescription": "Threshold counter did not meet threshold."
++    "EventCode": "0x100F4",
++    "EventName": "PM_FLOP_CMPL",
++    "BriefDescription": "Floating Point Operations Completed. Includes any type. It counts once for each 1, 2, 4 or 8 flop instruction. Use PM_1|2|4|8_FLOP_CMPL events to count flops."
++  },
++  {
++    "EventCode": "0x45050",
++    "EventName": "PM_1FLOP_CMPL",
++    "BriefDescription": "One floating point instruction completed (fadd, fmul, fsub, fcmp, fsel, fabs, fnabs, fres, fsqrte, fneg)."
++  },
++  {
++    "EventCode": "0x45052",
++    "EventName": "PM_4FLOP_CMPL",
++    "BriefDescription": "Four floating point instruction completed (fadd, fmul, fsub, fcmp, fsel, fabs, fnabs, fres, fsqrte, fneg)."
++  },
++  {
++    "EventCode": "0x45054",
++    "EventName": "PM_FMA_CMPL",
++    "BriefDescription": "Two floating point instruction completed (FMA class of instructions: fmadd, fnmadd, fmsub, fnmsub). Scalar instructions only."
++  },
++  {
++    "EventCode": "0x45056",
++    "EventName": "PM_SCALAR_FLOP_CMPL",
++    "BriefDescription": "Scalar floating point instruction completed."
++  },
++  {
++    "EventCode": "0x4505A",
++    "EventName": "PM_SP_FLOP_CMPL",
++    "BriefDescription": "Single Precision floating point instruction completed."
++  },
++  {
++    "EventCode": "0x4505C",
++    "EventName": "PM_MATH_FLOP_CMPL",
++    "BriefDescription": "Math floating point instruction completed."
++  },
++  {
++    "EventCode": "0x4D052",
++    "EventName": "PM_2FLOP_CMPL",
++    "BriefDescription": "Double Precision vector version of fmul, fsub, fcmp, fsel, fabs, fnabs, fres, fsqrte, fneg completed."
++  },
++  {
++    "EventCode": "0x4D054",
++    "EventName": "PM_8FLOP_CMPL",
++    "BriefDescription": "Four Double Precision vector instruction completed."
++  },
++  {
++    "EventCode": "0x4D056",
++    "EventName": "PM_NON_FMA_FLOP_CMPL",
++    "BriefDescription": "Non FMA instruction completed."
++  },
++  {
++    "EventCode": "0x4D058",
++    "EventName": "PM_VECTOR_FLOP_CMPL",
++    "BriefDescription": "Vector floating point instruction completed."
++  },
++  {
++    "EventCode": "0x4D05A",
++    "EventName": "PM_NON_MATH_FLOP_CMPL",
++    "BriefDescription": "Non Math instruction completed."
++  },
++  {
++    "EventCode": "0x4D05C",
++    "EventName": "PM_DPP_FLOP_CMPL",
++    "BriefDescription": "Double-Precision or Quad-Precision instruction completed."
+   }
+ ]
+diff --git a/tools/perf/pmu-events/arch/powerpc/power10/frontend.json b/tools/perf/pmu-events/arch/powerpc/power10/frontend.json
+index 558f9530f54ec..dc0bb6c6338bf 100644
+--- a/tools/perf/pmu-events/arch/powerpc/power10/frontend.json
++++ b/tools/perf/pmu-events/arch/powerpc/power10/frontend.json
+@@ -1,64 +1,9 @@
+ [
+-  {
+-    "EventCode": "0x10004",
+-    "EventName": "PM_EXEC_STALL_TRANSLATION",
+-    "BriefDescription": "Cycles in which the oldest instruction in the pipeline suffered a TLB miss or ERAT miss and waited for it to resolve."
+-  },
+-  {
+-    "EventCode": "0x10006",
+-    "EventName": "PM_DISP_STALL_HELD_OTHER_CYC",
+-    "BriefDescription": "Cycles in which the NTC instruction is held at dispatch for any other reason."
+-  },
+-  {
+-    "EventCode": "0x10010",
+-    "EventName": "PM_PMC4_OVERFLOW",
+-    "BriefDescription": "The event selected for PMC4 caused the event counter to overflow."
+-  },
+-  {
+-    "EventCode": "0x10020",
+-    "EventName": "PM_PMC4_REWIND",
+-    "BriefDescription": "The speculative event selected for PMC4 rewinds and the counter for PMC4 is not charged."
+-  },
+-  {
+-    "EventCode": "0x10038",
+-    "EventName": "PM_DISP_STALL_TRANSLATION",
+-    "BriefDescription": "Cycles when dispatch was stalled for this thread because the MMU was handling a translation miss."
+-  },
+-  {
+-    "EventCode": "0x1003A",
+-    "EventName": "PM_DISP_STALL_BR_MPRED_IC_L2",
+-    "BriefDescription": "Cycles when dispatch was stalled while the instruction was fetched from the local L2 after suffering a branch mispredict."
+-  },
+-  {
+-    "EventCode": "0x1D05E",
+-    "EventName": "PM_DISP_STALL_HELD_HALT_CYC",
+-    "BriefDescription": "Cycles in which the NTC instruction is held at dispatch because of power management."
+-  },
+-  {
+-    "EventCode": "0x1E050",
+-    "EventName": "PM_DISP_STALL_HELD_STF_MAPPER_CYC",
+-    "BriefDescription": "Cycles in which the NTC instruction is held at dispatch because the STF mapper/SRB was full. Includes GPR (count, link, tar), VSR, VMR, FPR."
+-  },
+   {
+     "EventCode": "0x1F054",
+     "EventName": "PM_DTLB_HIT",
+     "BriefDescription": "The PTE required by the instruction was resident in the TLB (data TLB access). When MMCR1[16]=0 this event counts only demand hits. When MMCR1[16]=1 this event includes demand and prefetch. Applies to both HPT and RPT."
+   },
+-  {
+-    "EventCode": "0x10064",
+-    "EventName": "PM_DISP_STALL_IC_L2",
+-    "BriefDescription": "Cycles when dispatch was stalled while the instruction was fetched from the local L2."
+-  },
+-  {
+-    "EventCode": "0x101E8",
+-    "EventName": "PM_THRESH_EXC_256",
+-    "BriefDescription": "Threshold counter exceeded a count of 256."
+-  },
+-  {
+-    "EventCode": "0x101EC",
+-    "EventName": "PM_THRESH_MET",
+-    "BriefDescription": "Threshold exceeded."
+-  },
+   {
+     "EventCode": "0x100F2",
+     "EventName": "PM_1PLUS_PPC_CMPL",
+@@ -67,57 +12,7 @@
+   {
+     "EventCode": "0x100F6",
+     "EventName": "PM_IERAT_MISS",
+-    "BriefDescription": "IERAT Reloaded to satisfy an IERAT miss. All page sizes are counted by this event."
+-  },
+-  {
+-    "EventCode": "0x100F8",
+-    "EventName": "PM_DISP_STALL_CYC",
+-    "BriefDescription": "Cycles the ICT has no itags assigned to this thread (no instructions were dispatched during these cycles)."
+-  },
+-  {
+-    "EventCode": "0x20006",
+-    "EventName": "PM_DISP_STALL_HELD_ISSQ_FULL_CYC",
+-    "BriefDescription": "Cycles in which the NTC instruction is held at dispatch due to Issue queue full. Includes issue queue and branch queue."
+-  },
+-  {
+-    "EventCode": "0x20114",
+-    "EventName": "PM_MRK_L2_RC_DISP",
+-    "BriefDescription": "Marked instruction RC dispatched in L2."
+-  },
+-  {
+-    "EventCode": "0x2C010",
+-    "EventName": "PM_EXEC_STALL_LSU",
+-    "BriefDescription": "Cycles in which the oldest instruction in the pipeline was executing in the Load Store Unit. This does not include simple fixed point instructions."
+-  },
+-  {
+-    "EventCode": "0x2C016",
+-    "EventName": "PM_DISP_STALL_IERAT_ONLY_MISS",
+-    "BriefDescription": "Cycles when dispatch was stalled while waiting to resolve an instruction ERAT miss."
+-  },
+-  {
+-    "EventCode": "0x2C01E",
+-    "EventName": "PM_DISP_STALL_BR_MPRED_IC_L3",
+-    "BriefDescription": "Cycles when dispatch was stalled while the instruction was fetched from the local L3 after suffering a branch mispredict."
+-  },
+-  {
+-    "EventCode": "0x2D01A",
+-    "EventName": "PM_DISP_STALL_IC_MISS",
+-    "BriefDescription": "Cycles when dispatch was stalled for this thread due to an Icache Miss."
+-  },
+-  {
+-    "EventCode": "0x2E018",
+-    "EventName": "PM_DISP_STALL_FETCH",
+-    "BriefDescription": "Cycles when dispatch was stalled for this thread because Fetch was being held."
+-  },
+-  {
+-    "EventCode": "0x2E01A",
+-    "EventName": "PM_DISP_STALL_HELD_XVFC_MAPPER_CYC",
+-    "BriefDescription": "Cycles in which the NTC instruction is held at dispatch because the XVFC mapper/SRB was full."
+-  },
+-  {
+-    "EventCode": "0x2C142",
+-    "EventName": "PM_MRK_XFER_FROM_SRC_PMC2",
+-    "BriefDescription": "For a marked data transfer instruction, the processor's L1 data cache was reloaded from the source specified in MMCR3[15:27]. If MMCR1[16|17] is 0 (default), this count includes only lines that were reloaded to satisfy a demand miss. If MMCR1[16|17] is 1, this count includes both demand misses and prefetch reloads."
++    "BriefDescription": "IERAT Reloaded to satisfy an IERAT miss. All page sizes are counted by this event. This event only counts instruction demand access."
+   },
+   {
+     "EventCode": "0x24050",
+@@ -134,11 +29,6 @@
+     "EventName": "PM_BR_TAKEN_CMPL",
+     "BriefDescription": "Branch Taken instruction completed."
+   },
+-  {
+-    "EventCode": "0x30004",
+-    "EventName": "PM_DISP_STALL_FLUSH",
+-    "BriefDescription": "Cycles when dispatch was stalled because of a flush that happened to an instruction(s) that was not yet NTC. PM_EXEC_STALL_NTC_FLUSH only includes instructions that were flushed after becoming NTC."
+-  },
+   {
+     "EventCode": "0x3000A",
+     "EventName": "PM_DISP_STALL_ITLB_MISS",
+@@ -149,99 +39,29 @@
+     "EventName": "PM_FLUSH_COMPLETION",
+     "BriefDescription": "The instruction that was next to complete (oldest in the pipeline) did not complete because it suffered a flush."
+   },
+-  {
+-    "EventCode": "0x30014",
+-    "EventName": "PM_EXEC_STALL_STORE",
+-    "BriefDescription": "Cycles in which the oldest instruction in the pipeline was a store instruction executing in the Load Store Unit."
+-  },
+-  {
+-    "EventCode": "0x30018",
+-    "EventName": "PM_DISP_STALL_HELD_SCOREBOARD_CYC",
+-    "BriefDescription": "Cycles in which the NTC instruction is held at dispatch while waiting on the Scoreboard. This event combines VSCR and FPSCR together."
+-  },
+-  {
+-    "EventCode": "0x30026",
+-    "EventName": "PM_EXEC_STALL_STORE_MISS",
+-    "BriefDescription": "Cycles in which the oldest instruction in the pipeline was a store whose cache line was not resident in the L1 and was waiting for allocation of the missing line into the L1."
+-  },
+-  {
+-    "EventCode": "0x3012A",
+-    "EventName": "PM_MRK_L2_RC_DONE",
+-    "BriefDescription": "L2 RC machine completed the transaction for the marked instruction."
+-  },
+   {
+     "EventCode": "0x3F046",
+     "EventName": "PM_ITLB_HIT_1G",
+     "BriefDescription": "Instruction TLB hit (IERAT reload) page size 1G, which implies Radix Page Table translation is in use. When MMCR1[17]=0 this event counts only for demand misses. When MMCR1[17]=1 this event includes demand misses and prefetches."
+   },
+-  {
+-    "EventCode": "0x34058",
+-    "EventName": "PM_DISP_STALL_BR_MPRED_ICMISS",
+-    "BriefDescription": "Cycles when dispatch was stalled after a mispredicted branch resulted in an instruction cache miss."
+-  },
+-  {
+-    "EventCode": "0x3D05C",
+-    "EventName": "PM_DISP_STALL_HELD_RENAME_CYC",
+-    "BriefDescription": "Cycles in which the NTC instruction is held at dispatch because the mapper/SRB was full. Includes GPR (count, link, tar), VSR, VMR, FPR and XVFC."
+-  },
+-  {
+-    "EventCode": "0x3E052",
+-    "EventName": "PM_DISP_STALL_IC_L3",
+-    "BriefDescription": "Cycles when dispatch was stalled while the instruction was fetched from the local L3."
+-  },
+   {
+     "EventCode": "0x3E054",
+     "EventName": "PM_LD_MISS_L1",
+-    "BriefDescription": "Load Missed L1, counted at execution time (can be greater than loads finished). LMQ merges are not included in this count. i.e. if a load instruction misses on an address that is already allocated on the LMQ, this event will not increment for that load). Note that this count is per slice, so if a load spans multiple slices this event will increment multiple times for a single load."
+-  },
+-  {
+-    "EventCode": "0x301EA",
+-    "EventName": "PM_THRESH_EXC_1024",
+-    "BriefDescription": "Threshold counter exceeded a value of 1024."
++    "BriefDescription": "Load missed L1, counted at finish time. LMQ merges are not included in this count. i.e. if a load instruction misses on an address that is already allocated on the LMQ, this event will not increment for that load). Note that this count is per slice, so if a load spans multiple slices this event will increment multiple times for a single load."
+   },
+   {
+     "EventCode": "0x300FA",
+     "EventName": "PM_INST_FROM_L3MISS",
+-    "BriefDescription": "The processor's instruction cache was reloaded from a source other than the local core's L1, L2, or L3 due to a demand miss."
++    "BriefDescription": "The processor's instruction cache was reloaded from beyond the local core's L3 due to a demand miss."
+   },
+   {
+     "EventCode": "0x40006",
+     "EventName": "PM_ISSUE_KILL",
+     "BriefDescription": "Cycles in which an instruction or group of instructions were cancelled after being issued. This event increments once per occurrence, regardless of how many instructions are included in the issue group."
+   },
+-  {
+-    "EventCode": "0x40116",
+-    "EventName": "PM_MRK_LARX_FIN",
+-    "BriefDescription": "Marked load and reserve instruction (LARX) finished. LARX and STCX are instructions used to acquire a lock."
+-  },
+-  {
+-    "EventCode": "0x4C010",
+-    "EventName": "PM_DISP_STALL_BR_MPRED_IC_L3MISS",
+-    "BriefDescription": "Cycles when dispatch was stalled while the instruction was fetched from sources beyond the local L3 after suffering a mispredicted branch."
+-  },
+-  {
+-    "EventCode": "0x4D01E",
+-    "EventName": "PM_DISP_STALL_BR_MPRED",
+-    "BriefDescription": "Cycles when dispatch was stalled for this thread due to a mispredicted branch."
+-  },
+-  {
+-    "EventCode": "0x4E010",
+-    "EventName": "PM_DISP_STALL_IC_L3MISS",
+-    "BriefDescription": "Cycles when dispatch was stalled while the instruction was fetched from any source beyond the local L3."
+-  },
+-  {
+-    "EventCode": "0x4E01A",
+-    "EventName": "PM_DISP_STALL_HELD_CYC",
+-    "BriefDescription": "Cycles in which the NTC instruction is held at dispatch for any reason."
+-  },
+-  {
+-    "EventCode": "0x4003C",
+-    "EventName": "PM_DISP_STALL_HELD_SYNC_CYC",
+-    "BriefDescription": "Cycles in which the NTC instruction is held at dispatch because of a synchronizing instruction that requires the ICT to be empty before dispatch."
+-  },
+   {
+     "EventCode": "0x44056",
+     "EventName": "PM_VECTOR_ST_CMPL",
+-    "BriefDescription": "Vector store instructions completed."
++    "BriefDescription": "Vector store instruction completed."
+   }
+ ]
+diff --git a/tools/perf/pmu-events/arch/powerpc/power10/marked.json b/tools/perf/pmu-events/arch/powerpc/power10/marked.json
+index 58b5dfe3a2731..913b6515b8701 100644
+--- a/tools/perf/pmu-events/arch/powerpc/power10/marked.json
++++ b/tools/perf/pmu-events/arch/powerpc/power10/marked.json
+@@ -1,14 +1,29 @@
+ [
+-  {
+-    "EventCode": "0x1002C",
+-    "EventName": "PM_LD_PREFETCH_CACHE_LINE_MISS",
+-    "BriefDescription": "The L1 cache was reloaded with a line that fulfills a prefetch request."
+-  },
+   {
+     "EventCode": "0x10132",
+     "EventName": "PM_MRK_INST_ISSUED",
+     "BriefDescription": "Marked instruction issued. Note that stores always get issued twice, the address gets issued to the LSU and the data gets issued to the VSU. Also, issues can sometimes get killed/cancelled and cause multiple sequential issues for the same instruction."
+   },
++  {
++    "EventCode": "0x10134",
++    "EventName": "PM_MRK_ST_DONE_L2",
++    "BriefDescription": "Marked store completed in L2."
++  },
++  {
++    "EventCode": "0x1C142",
++    "EventName": "PM_MRK_XFER_FROM_SRC_PMC1",
++    "BriefDescription": "For a marked data transfer instruction, the processor's L1 data cache was reloaded from the source specified in MMCR3[0:12]. If MMCR1[16|17] is 0 (default), this count includes only lines that were reloaded to satisfy a demand miss. If MMCR1[16|17] is 1, this count includes both demand misses and prefetch reloads."
++  },
++  {
++    "EventCode": "0x1C144",
++    "EventName": "PM_MRK_XFER_FROM_SRC_CYC_PMC1",
++    "BriefDescription": "Cycles taken for a marked demand miss to reload a line from the source specified in MMCR3[0:12]."
++  },
++  {
++    "EventCode": "0x1F150",
++    "EventName": "PM_MRK_ST_L2_CYC",
++    "BriefDescription": "Cycles from L2 RC dispatch to L2 RC completion."
++  },
+   {
+     "EventCode": "0x101E0",
+     "EventName": "PM_MRK_INST_DISP",
+@@ -20,14 +35,39 @@
+     "BriefDescription": "Marked Branch Taken instruction completed."
+   },
+   {
+-    "EventCode": "0x20112",
+-    "EventName": "PM_MRK_NTF_FIN",
+-    "BriefDescription": "The marked instruction became the oldest in the pipeline before it finished. It excludes instructions that finish at dispatch."
++    "EventCode": "0x101E4",
++    "EventName": "PM_MRK_L1_ICACHE_MISS",
++    "BriefDescription": "Marked instruction suffered an instruction cache miss."
++  },
++  {
++    "EventCode": "0x101EA",
++    "EventName": "PM_MRK_L1_RELOAD_VALID",
++    "BriefDescription": "Marked demand reload."
++  },
++  {
++    "EventCode": "0x20114",
++    "EventName": "PM_MRK_L2_RC_DISP",
++    "BriefDescription": "Marked instruction RC dispatched in L2."
+   },
+   {
+-    "EventCode": "0x2C01C",
+-    "EventName": "PM_EXEC_STALL_DMISS_OFF_CHIP",
+-    "BriefDescription": "Cycles in which the oldest instruction in the pipeline was waiting for a load miss to resolve from a remote chip."
++    "EventCode": "0x2011C",
++    "EventName": "PM_MRK_NTF_CYC",
++    "BriefDescription": "Cycles in which the marked instruction is the oldest in the pipeline (next-to-finish or next-to-complete)."
++  },
++  {
++    "EventCode": "0x20130",
++    "EventName": "PM_MRK_INST_DECODED",
++    "BriefDescription": "An instruction was marked at decode time. Random Instruction Sampling (RIS) only."
++  },
++  {
++    "EventCode": "0x20132",
++    "EventName": "PM_MRK_DFU_ISSUE",
++    "BriefDescription": "The marked instruction was a decimal floating point operation issued to the VSU. Measured at issue time."
++  },
++  {
++    "EventCode": "0x20134",
++    "EventName": "PM_MRK_FXU_ISSUE",
++    "BriefDescription": "The marked instruction was a fixed point operation issued to the VSU. Measured at issue time."
+   },
+   {
+     "EventCode": "0x20138",
+@@ -39,6 +79,16 @@
+     "EventName": "PM_MRK_BRU_FIN",
+     "BriefDescription": "Marked Branch instruction finished."
+   },
++  {
++    "EventCode": "0x2013C",
++    "EventName": "PM_MRK_FX_LSU_FIN",
++    "BriefDescription": "The marked instruction was simple fixed point that was issued to the store unit. Measured at finish time."
++  },
++  {
++    "EventCode": "0x2C142",
++    "EventName": "PM_MRK_XFER_FROM_SRC_PMC2",
++    "BriefDescription": "For a marked data transfer instruction, the processor's L1 data cache was reloaded from the source specified in MMCR3[15:27]. If MMCR1[16|17] is 0 (default), this count includes only lines that were reloaded to satisfy a demand miss. If MMCR1[16|17] is 1, this count includes both demand misses and prefetch reloads."
++  },
+   {
+     "EventCode": "0x2C144",
+     "EventName": "PM_MRK_XFER_FROM_SRC_CYC_PMC2",
+@@ -60,19 +110,49 @@
+     "BriefDescription": "A marked branch completed. All branches are included."
+   },
+   {
+-    "EventCode": "0x200FD",
+-    "EventName": "PM_L1_ICACHE_MISS",
+-    "BriefDescription": "Demand iCache Miss."
++    "EventCode": "0x2D154",
++    "EventName": "PM_MRK_DERAT_MISS_64K",
++    "BriefDescription": "Data ERAT Miss (Data TLB Access) page size 64K for a marked instruction. When MMCR1[16]=0 this event counts only DERAT reloads for demand misses. When MMCR1[16]=1 this event includes demand misses and prefetches."
++  },
++  {
++    "EventCode": "0x201E0",
++    "EventName": "PM_MRK_DATA_FROM_MEMORY",
++    "BriefDescription": "The processor's data cache was reloaded from local, remote, or distant memory due to a demand miss for a marked load."
++  },
++  {
++    "EventCode": "0x201E2",
++    "EventName": "PM_MRK_LD_MISS_L1",
++    "BriefDescription": "Marked demand data load miss counted at finish time."
+   },
+   {
+-    "EventCode": "0x30130",
+-    "EventName": "PM_MRK_INST_FIN",
+-    "BriefDescription": "marked instruction finished. Excludes instructions that finish at dispatch. Note that stores always finish twice since the address gets issued to the LSU and the data gets issued to the VSU."
++    "EventCode": "0x201E4",
++    "EventName": "PM_MRK_DATA_FROM_L3MISS",
++    "BriefDescription": "The processor's data cache was reloaded from a source other than the local core's L1, L2, or L3 due to a demand miss for a marked load."
++  },
++  {
++    "EventCode": "0x3012A",
++    "EventName": "PM_MRK_L2_RC_DONE",
++    "BriefDescription": "L2 RC machine completed the transaction for the marked instruction."
++  },
++  {
++    "EventCode": "0x30132",
++    "EventName": "PM_MRK_VSU_FIN",
++    "BriefDescription": "VSU marked instruction finished. Excludes simple FX instructions issued to the Store Unit."
+   },
+   {
+     "EventCode": "0x34146",
+     "EventName": "PM_MRK_LD_CMPL",
+-    "BriefDescription": "Marked loads completed."
++    "BriefDescription": "Marked load instruction completed."
++  },
++  {
++    "EventCode": "0x3C142",
++    "EventName": "PM_MRK_XFER_FROM_SRC_PMC3",
++    "BriefDescription": "For a marked data transfer instruction, the processor's L1 data cache was reloaded from the source specified in MMCR3[30:42]. If MMCR1[16|17] is 0 (default), this count includes only lines that were reloaded to satisfy a demand miss. If MMCR1[16|17] is 1, this count includes both demand misses and prefetch reloads."
++  },
++  {
++    "EventCode": "0x3C144",
++    "EventName": "PM_MRK_XFER_FROM_SRC_CYC_PMC3",
++    "BriefDescription": "Cycles taken for a marked demand miss to reload a line from the source specified in MMCR3[30:42]."
+   },
+   {
+     "EventCode": "0x3E158",
+@@ -82,12 +162,22 @@
+   {
+     "EventCode": "0x3E15A",
+     "EventName": "PM_MRK_ST_FIN",
+-    "BriefDescription": "The marked instruction was a store of any kind."
++    "BriefDescription": "Marked store instruction finished."
+   },
+   {
+-    "EventCode": "0x30068",
+-    "EventName": "PM_L1_ICACHE_RELOADED_PREF",
+-    "BriefDescription": "Counts all Icache prefetch reloads ( includes demand turned into prefetch)."
++    "EventCode": "0x3F150",
++    "EventName": "PM_MRK_ST_DRAIN_CYC",
++    "BriefDescription": "Cycles in which the marked store drained from the core to the L2."
++  },
++  {
++    "EventCode": "0x30162",
++    "EventName": "PM_MRK_ISSUE_DEPENDENT_LOAD",
++    "BriefDescription": "The marked instruction was dependent on a load. It is eligible for issue kill."
++  },
++  {
++    "EventCode": "0x301E2",
++    "EventName": "PM_MRK_ST_CMPL",
++    "BriefDescription": "Marked store completed and sent to nest. Note that this count excludes cache-inhibited stores."
+   },
+   {
+     "EventCode": "0x301E4",
+@@ -95,48 +185,58 @@
+     "BriefDescription": "Marked Branch Mispredicted. Includes direction and target."
+   },
+   {
+-    "EventCode": "0x300F6",
+-    "EventName": "PM_LD_DEMAND_MISS_L1",
+-    "BriefDescription": "The L1 cache was reloaded with a line that fulfills a demand miss request. Counted at reload time, before finish."
++    "EventCode": "0x40116",
++    "EventName": "PM_MRK_LARX_FIN",
++    "BriefDescription": "Marked load and reserve instruction (LARX) finished. LARX and STCX are instructions used to acquire a lock."
++  },
++  {
++    "EventCode": "0x40132",
++    "EventName": "PM_MRK_LSU_FIN",
++    "BriefDescription": "LSU marked instruction finish."
+   },
+   {
+-    "EventCode": "0x300FE",
+-    "EventName": "PM_DATA_FROM_L3MISS",
+-    "BriefDescription": "The processor's data cache was reloaded from a source other than the local core's L1, L2, or L3 due to a demand miss."
++    "EventCode": "0x44146",
++    "EventName": "PM_MRK_STCX_CORE_CYC",
++    "BriefDescription": "Cycles spent in the core portion of a marked STCX instruction. It starts counting when the instruction is decoded and stops counting when it drains into the L2."
+   },
+   {
+-    "EventCode": "0x40012",
+-    "EventName": "PM_L1_ICACHE_RELOADED_ALL",
+-    "BriefDescription": "Counts all Icache reloads includes demand, prefetch, prefetch turned into demand and demand turned into prefetch."
++    "EventCode": "0x4C142",
++    "EventName": "PM_MRK_XFER_FROM_SRC_PMC4",
++    "BriefDescription": "For a marked data transfer instruction, the processor's L1 data cache was reloaded from the source specified in MMCR3[45:57]. If MMCR1[16|17] is 0 (default), this count includes only lines that were reloaded to satisfy a demand miss. If MMCR1[16|17] is 1, this count includes both demand misses and prefetch reloads."
+   },
+   {
+-    "EventCode": "0x40134",
+-    "EventName": "PM_MRK_INST_TIMEO",
+-    "BriefDescription": "Marked instruction finish timeout (instruction was lost)."
++    "EventCode": "0x4C144",
++    "EventName": "PM_MRK_XFER_FROM_SRC_CYC_PMC4",
++    "BriefDescription": "Cycles taken for a marked demand miss to reload a line from the source specified in MMCR3[45:57]."
+   },
+   {
+-    "EventCode": "0x4505A",
+-    "EventName": "PM_SP_FLOP_CMPL",
+-    "BriefDescription": "Single Precision floating point instructions completed."
++    "EventCode": "0x4C15E",
++    "EventName": "PM_MRK_DTLB_MISS_64K",
++    "BriefDescription": "Marked Data TLB reload (after a miss) page size 64K. When MMCR1[16]=0 this event counts only for demand misses. When MMCR1[16]=1 this event includes demand misses and prefetches."
+   },
+   {
+-    "EventCode": "0x4D058",
+-    "EventName": "PM_VECTOR_FLOP_CMPL",
+-    "BriefDescription": "Vector floating point instructions completed."
++    "EventCode": "0x4E15E",
++    "EventName": "PM_MRK_INST_FLUSHED",
++    "BriefDescription": "The marked instruction was flushed."
+   },
+   {
+-    "EventCode": "0x4D05A",
+-    "EventName": "PM_NON_MATH_FLOP_CMPL",
+-    "BriefDescription": "Non Math instructions completed."
++    "EventCode": "0x40164",
++    "EventName": "PM_MRK_DERAT_MISS_2M",
++    "BriefDescription": "Data ERAT Miss (Data TLB Access) page size 2M for a marked instruction. When MMCR1[16]=0 this event counts only DERAT reloads for demand misses. When MMCR1[16]=1 this event includes demand misses and prefetches."
+   },
+   {
+     "EventCode": "0x401E0",
+     "EventName": "PM_MRK_INST_CMPL",
+-    "BriefDescription": "marked instruction completed."
++    "BriefDescription": "Marked instruction completed."
++  },
++  {
++    "EventCode": "0x401E6",
++    "EventName": "PM_MRK_INST_FROM_L3MISS",
++    "BriefDescription": "The processor's instruction cache was reloaded from beyond the local core's L3 due to a demand miss for a marked instruction."
+   },
+   {
+-    "EventCode": "0x400FE",
+-    "EventName": "PM_DATA_FROM_MEMORY",
+-    "BriefDescription": "The processor's data cache was reloaded from local, remote, or distant memory due to a demand miss."
++    "EventCode": "0x401E8",
++    "EventName": "PM_MRK_DATA_FROM_L2MISS",
++    "BriefDescription": "The processor's L1 data cache was reloaded from a source beyond the local core's L2 due to a demand miss for a marked instruction."
+   }
+ ]
+diff --git a/tools/perf/pmu-events/arch/powerpc/power10/memory.json b/tools/perf/pmu-events/arch/powerpc/power10/memory.json
+index 843b51f531e95..b95a547a704b3 100644
+--- a/tools/perf/pmu-events/arch/powerpc/power10/memory.json
++++ b/tools/perf/pmu-events/arch/powerpc/power10/memory.json
+@@ -1,24 +1,9 @@
+ [
+-  {
+-    "EventCode": "0x1000A",
+-    "EventName": "PM_PMC3_REWIND",
+-    "BriefDescription": "The speculative event selected for PMC3 rewinds and the counter for PMC3 is not charged."
+-  },
+   {
+     "EventCode": "0x1C040",
+     "EventName": "PM_XFER_FROM_SRC_PMC1",
+     "BriefDescription": "The processor's L1 data cache was reloaded from the source specified in MMCR3[0:12]. If MMCR1[16|17] is 0 (default), this count includes only lines that were reloaded to satisfy a demand miss. If MMCR1[16|17] is 1, this count includes both demand misses and prefetch reloads."
+   },
+-  {
+-    "EventCode": "0x1C142",
+-    "EventName": "PM_MRK_XFER_FROM_SRC_PMC1",
+-    "BriefDescription": "For a marked data transfer instruction, the processor's L1 data cache was reloaded from the source specified in MMCR3[0:12]. If MMCR1[16|17] is 0 (default), this count includes only lines that were reloaded to satisfy a demand miss. If MMCR1[16|17] is 1, this count includes both demand misses and prefetch reloads."
+-  },
+-  {
+-    "EventCode": "0x1C144",
+-    "EventName": "PM_MRK_XFER_FROM_SRC_CYC_PMC1",
+-    "BriefDescription": "Cycles taken for a marked demand miss to reload a line from the source specified in MMCR3[0:12]."
+-  },
+   {
+     "EventCode": "0x1C056",
+     "EventName": "PM_DERAT_MISS_4K",
+@@ -34,25 +19,10 @@
+     "EventName": "PM_DTLB_MISS_2M",
+     "BriefDescription": "Data TLB reload (after a miss) page size 2M. Implies radix translation was used. When MMCR1[16]=0 this event counts only for demand misses. When MMCR1[16]=1 this event includes demand misses and prefetches."
+   },
+-  {
+-    "EventCode": "0x1E056",
+-    "EventName": "PM_EXEC_STALL_STORE_PIPE",
+-    "BriefDescription": "Cycles in which the oldest instruction in the pipeline was executing in the store unit. This does not include cycles spent handling store misses, PTESYNC instructions or TLBIE instructions."
+-  },
+-  {
+-    "EventCode": "0x1F150",
+-    "EventName": "PM_MRK_ST_L2_CYC",
+-    "BriefDescription": "Cycles from L2 RC dispatch to L2 RC completion."
+-  },
+   {
+     "EventCode": "0x10062",
+     "EventName": "PM_LD_L3MISS_PEND_CYC",
+-    "BriefDescription": "Cycles L3 miss was pending for this thread."
+-  },
+-  {
+-    "EventCode": "0x20010",
+-    "EventName": "PM_PMC1_OVERFLOW",
+-    "BriefDescription": "The event selected for PMC1 caused the event counter to overflow."
++    "BriefDescription": "Cycles in which an L3 miss was pending for this thread."
+   },
+   {
+     "EventCode": "0x2001A",
+@@ -79,36 +49,16 @@
+     "EventName": "PM_DTLB_MISS_4K",
+     "BriefDescription": "Data TLB reload (after a miss) page size 4K. When MMCR1[16]=0 this event counts only for demand misses. When MMCR1[16]=1 this event includes demand misses and prefetches."
+   },
+-  {
+-    "EventCode": "0x2D154",
+-    "EventName": "PM_MRK_DERAT_MISS_64K",
+-    "BriefDescription": "Data ERAT Miss (Data TLB Access) page size 64K for a marked instruction. When MMCR1[16]=0 this event counts only DERAT reloads for demand misses. When MMCR1[16]=1 this event includes demand misses and prefetches."
+-  },
+   {
+     "EventCode": "0x200F6",
+     "EventName": "PM_DERAT_MISS",
+     "BriefDescription": "DERAT Reloaded to satisfy a DERAT miss. All page sizes are counted by this event. When MMCR1[16]=0 this event counts only DERAT reloads for demand misses. When MMCR1[16]=1 this event includes demand misses and prefetches."
+   },
+-  {
+-    "EventCode": "0x30016",
+-    "EventName": "PM_EXEC_STALL_DERAT_DTLB_MISS",
+-    "BriefDescription": "Cycles in which the oldest instruction in the pipeline suffered a TLB miss and waited for it resolve."
+-  },
+   {
+     "EventCode": "0x3C040",
+     "EventName": "PM_XFER_FROM_SRC_PMC3",
+     "BriefDescription": "The processor's L1 data cache was reloaded from the source specified in MMCR3[30:42]. If MMCR1[16|17] is 0 (default), this count includes only lines that were reloaded to satisfy a demand miss. If MMCR1[16|17] is 1, this count includes both demand misses and prefetch reloads."
+   },
+-  {
+-    "EventCode": "0x3C142",
+-    "EventName": "PM_MRK_XFER_FROM_SRC_PMC3",
+-    "BriefDescription": "For a marked data transfer instruction, the processor's L1 data cache was reloaded from the source specified in MMCR3[30:42]. If MMCR1[16|17] is 0 (default), this count includes only lines that were reloaded to satisfy a demand miss. If MMCR1[16|17] is 1, this count includes both demand misses and prefetch reloads."
+-  },
+-  {
+-    "EventCode": "0x3C144",
+-    "EventName": "PM_MRK_XFER_FROM_SRC_CYC_PMC3",
+-    "BriefDescription": "Cycles taken for a marked demand miss to reload a line from the source specified in MMCR3[30:42]."
+-  },
+   {
+     "EventCode": "0x3C054",
+     "EventName": "PM_DERAT_MISS_16M",
+@@ -124,41 +74,21 @@
+     "EventName": "PM_LARX_FIN",
+     "BriefDescription": "Load and reserve instruction (LARX) finished. LARX and STCX are instructions used to acquire a lock."
+   },
+-  {
+-    "EventCode": "0x301E2",
+-    "EventName": "PM_MRK_ST_CMPL",
+-    "BriefDescription": "Marked store completed and sent to nest. Note that this count excludes cache-inhibited stores."
+-  },
+   {
+     "EventCode": "0x300FC",
+     "EventName": "PM_DTLB_MISS",
+-    "BriefDescription": "The DPTEG required for the load/store instruction in execution was missing from the TLB. It includes pages of all sizes for demand and prefetch activity."
+-  },
+-  {
+-    "EventCode": "0x4D02C",
+-    "EventName": "PM_PMC1_REWIND",
+-    "BriefDescription": "The speculative event selected for PMC1 rewinds and the counter for PMC1 is not charged."
++    "BriefDescription": "The DPTEG required for the load/store instruction in execution was missing from the TLB. This event only counts for demand misses."
+   },
+   {
+     "EventCode": "0x4003E",
+     "EventName": "PM_LD_CMPL",
+-    "BriefDescription": "Loads completed."
++    "BriefDescription": "Load instruction completed."
+   },
+   {
+     "EventCode": "0x4C040",
+     "EventName": "PM_XFER_FROM_SRC_PMC4",
+     "BriefDescription": "The processor's L1 data cache was reloaded from the source specified in MMCR3[45:57]. If MMCR1[16|17] is 0 (default), this count includes only lines that were reloaded to satisfy a demand miss. If MMCR1[16|17] is 1, this count includes both demand misses and prefetch reloads."
+   },
+-  {
+-    "EventCode": "0x4C142",
+-    "EventName": "PM_MRK_XFER_FROM_SRC_PMC4",
+-    "BriefDescription": "For a marked data transfer instruction, the processor's L1 data cache was reloaded from the source specified in MMCR3[45:57]. If MMCR1[16|17] is 0 (default), this count includes only lines that were reloaded to satisfy a demand miss. If MMCR1[16|17] is 1, this count includes both demand misses and prefetch reloads."
+-  },
+-  {
+-    "EventCode": "0x4C144",
+-    "EventName": "PM_MRK_XFER_FROM_SRC_CYC_PMC4",
+-    "BriefDescription": "Cycles taken for a marked demand miss to reload a line from the source specified in MMCR3[45:57]."
+-  },
+   {
+     "EventCode": "0x4C056",
+     "EventName": "PM_DTLB_MISS_16M",
+@@ -168,20 +98,5 @@
+     "EventCode": "0x4C05A",
+     "EventName": "PM_DTLB_MISS_1G",
+     "BriefDescription": "Data TLB reload (after a miss) page size 1G. Implies radix translation was used. When MMCR1[16]=0 this event counts only for demand misses. When MMCR1[16]=1 this event includes demand misses and prefetches."
+-  },
+-  {
+-    "EventCode": "0x4C15E",
+-    "EventName": "PM_MRK_DTLB_MISS_64K",
+-    "BriefDescription": "Marked Data TLB reload (after a miss) page size 64K. When MMCR1[16]=0 this event counts only for demand misses. When MMCR1[16]=1 this event includes demand misses and prefetches."
+-  },
+-  {
+-    "EventCode": "0x4D056",
+-    "EventName": "PM_NON_FMA_FLOP_CMPL",
+-    "BriefDescription": "Non FMA instruction completed."
+-  },
+-  {
+-    "EventCode": "0x40164",
+-    "EventName": "PM_MRK_DERAT_MISS_2M",
+-    "BriefDescription": "Data ERAT Miss (Data TLB Access) page size 2M for a marked instruction. When MMCR1[16]=0 this event counts only DERAT reloads for demand misses. When MMCR1[16]=1 this event includes demand misses and prefetches."
+   }
+ ]
+diff --git a/tools/perf/pmu-events/arch/powerpc/power10/metrics.json b/tools/perf/pmu-events/arch/powerpc/power10/metrics.json
+index 6f53583a0c62c..182369076d956 100644
+--- a/tools/perf/pmu-events/arch/powerpc/power10/metrics.json
++++ b/tools/perf/pmu-events/arch/powerpc/power10/metrics.json
+@@ -16,133 +16,133 @@
+         "BriefDescription": "Average cycles per completed instruction when dispatch was stalled for any reason",
+         "MetricExpr": "PM_DISP_STALL_CYC / PM_RUN_INST_CMPL",
+         "MetricGroup": "CPI;CPI_STALL_RATIO",
+-        "MetricName": "DISPATCHED_CPI"
++        "MetricName": "DISPATCH_STALL_CPI"
+     },
+     {
+         "BriefDescription": "Average cycles per completed instruction when dispatch was stalled because there was a flush",
+         "MetricExpr": "PM_DISP_STALL_FLUSH / PM_RUN_INST_CMPL",
+         "MetricGroup": "CPI",
+-        "MetricName": "DISPATCHED_FLUSH_CPI"
++        "MetricName": "DISPATCH_STALL_FLUSH_CPI"
+     },
+     {
+         "BriefDescription": "Average cycles per completed instruction when dispatch was stalled because the MMU was handling a translation miss",
+         "MetricExpr": "PM_DISP_STALL_TRANSLATION / PM_RUN_INST_CMPL",
+         "MetricGroup": "CPI",
+-        "MetricName": "DISPATCHED_TRANSLATION_CPI"
++        "MetricName": "DISPATCH_STALL_TRANSLATION_CPI"
+     },
+     {
+         "BriefDescription": "Average cycles per completed instruction when dispatch was stalled waiting to resolve an instruction ERAT miss",
+         "MetricExpr": "PM_DISP_STALL_IERAT_ONLY_MISS / PM_RUN_INST_CMPL",
+         "MetricGroup": "CPI",
+-        "MetricName": "DISPATCHED_IERAT_ONLY_MISS_CPI"
++        "MetricName": "DISPATCH_STALL_IERAT_ONLY_MISS_CPI"
+     },
+     {
+         "BriefDescription": "Average cycles per completed instruction when dispatch was stalled waiting to resolve an instruction TLB miss",
+         "MetricExpr": "PM_DISP_STALL_ITLB_MISS / PM_RUN_INST_CMPL",
+         "MetricGroup": "CPI",
+-        "MetricName": "DISPATCHED_ITLB_MISS_CPI"
++        "MetricName": "DISPATCH_STALL_ITLB_MISS_CPI"
+     },
+     {
+         "BriefDescription": "Average cycles per completed instruction when dispatch was stalled due to an icache miss",
+         "MetricExpr": "PM_DISP_STALL_IC_MISS / PM_RUN_INST_CMPL",
+         "MetricGroup": "CPI",
+-        "MetricName": "DISPATCHED_IC_MISS_CPI"
++        "MetricName": "DISPATCH_STALL_IC_MISS_CPI"
+     },
+     {
+         "BriefDescription": "Average cycles per completed instruction when dispatch was stalled while the instruction was fetched from the local L2",
+         "MetricExpr": "PM_DISP_STALL_IC_L2 / PM_RUN_INST_CMPL",
+         "MetricGroup": "CPI",
+-        "MetricName": "DISPATCHED_IC_L2_CPI"
++        "MetricName": "DISPATCH_STALL_IC_L2_CPI"
+     },
+     {
+         "BriefDescription": "Average cycles per completed instruction when dispatch was stalled while the instruction was fetched from the local L3",
+         "MetricExpr": "PM_DISP_STALL_IC_L3 / PM_RUN_INST_CMPL",
+         "MetricGroup": "CPI",
+-        "MetricName": "DISPATCHED_IC_L3_CPI"
++        "MetricName": "DISPATCH_STALL_IC_L3_CPI"
+     },
+     {
+         "BriefDescription": "Average cycles per completed instruction when dispatch was stalled while the instruction was fetched from any source beyond the local L3",
+         "MetricExpr": "PM_DISP_STALL_IC_L3MISS / PM_RUN_INST_CMPL",
+         "MetricGroup": "CPI",
+-        "MetricName": "DISPATCHED_IC_L3MISS_CPI"
++        "MetricName": "DISPATCH_STALL_IC_L3MISS_CPI"
+     },
+     {
+         "BriefDescription": "Average cycles per completed instruction when dispatch was stalled due to an icache miss after a branch mispredict",
+         "MetricExpr": "PM_DISP_STALL_BR_MPRED_ICMISS / PM_RUN_INST_CMPL",
+         "MetricGroup": "CPI",
+-        "MetricName": "DISPATCHED_BR_MPRED_ICMISS_CPI"
++        "MetricName": "DISPATCH_STALL_BR_MPRED_ICMISS_CPI"
+     },
+     {
+         "BriefDescription": "Average cycles per completed instruction when dispatch was stalled while instruction was fetched from the local L2 after suffering a branch mispredict",
+         "MetricExpr": "PM_DISP_STALL_BR_MPRED_IC_L2 / PM_RUN_INST_CMPL",
+         "MetricGroup": "CPI",
+-        "MetricName": "DISPATCHED_BR_MPRED_IC_L2_CPI"
++        "MetricName": "DISPATCH_STALL_BR_MPRED_IC_L2_CPI"
+     },
+     {
+         "BriefDescription": "Average cycles per completed instruction when dispatch was stalled while instruction was fetched from the local L3 after suffering a branch mispredict",
+         "MetricExpr": "PM_DISP_STALL_BR_MPRED_IC_L3 / PM_RUN_INST_CMPL",
+         "MetricGroup": "CPI",
+-        "MetricName": "DISPATCHED_BR_MPRED_IC_L3_CPI"
++        "MetricName": "DISPATCH_STALL_BR_MPRED_IC_L3_CPI"
+     },
+     {
+         "BriefDescription": "Average cycles per completed instruction when dispatch was stalled while instruction was fetched from any source beyond the local L3 after suffering a branch mispredict",
+         "MetricExpr": "PM_DISP_STALL_BR_MPRED_IC_L3MISS / PM_RUN_INST_CMPL",
+         "MetricGroup": "CPI",
+-        "MetricName": "DISPATCHED_BR_MPRED_IC_L3MISS_CPI"
++        "MetricName": "DISPATCH_STALL_BR_MPRED_IC_L3MISS_CPI"
+     },
+     {
+         "BriefDescription": "Average cycles per completed instruction when dispatch was stalled due to a branch mispredict",
+         "MetricExpr": "PM_DISP_STALL_BR_MPRED / PM_RUN_INST_CMPL",
+         "MetricGroup": "CPI",
+-        "MetricName": "DISPATCHED_BR_MPRED_CPI"
++        "MetricName": "DISPATCH_STALL_BR_MPRED_CPI"
+     },
+     {
+         "BriefDescription": "Average cycles per completed instruction when the NTC instruction was held at dispatch for any reason",
+         "MetricExpr": "PM_DISP_STALL_HELD_CYC / PM_RUN_INST_CMPL",
+         "MetricGroup": "CPI",
+-        "MetricName": "DISPATCHED_HELD_CPI"
++        "MetricName": "DISPATCH_STALL_HELD_CPI"
+     },
+     {
+         "BriefDescription": "Average cycles per completed instruction when the NTC instruction was held at dispatch because of a synchronizing instruction that requires the ICT to be empty before dispatch",
+         "MetricExpr": "PM_DISP_STALL_HELD_SYNC_CYC / PM_RUN_INST_CMPL",
+         "MetricGroup": "CPI",
+-        "MetricName": "DISP_HELD_STALL_SYNC_CPI"
++        "MetricName": "DISPATCH_STALL_HELD_SYNC_CPI"
+     },
+     {
+         "BriefDescription": "Average cycles per completed instruction when the NTC instruction was held at dispatch while waiting on the scoreboard",
+         "MetricExpr": "PM_DISP_STALL_HELD_SCOREBOARD_CYC / PM_RUN_INST_CMPL",
+         "MetricGroup": "CPI",
+-        "MetricName": "DISP_HELD_STALL_SCOREBOARD_CPI"
++        "MetricName": "DISPATCH_STALL_HELD_SCOREBOARD_CPI"
+     },
+     {
+         "BriefDescription": "Average cycles per completed instruction when the NTC instruction was held at dispatch due to issue queue full",
+         "MetricExpr": "PM_DISP_STALL_HELD_ISSQ_FULL_CYC / PM_RUN_INST_CMPL",
+         "MetricGroup": "CPI",
+-        "MetricName": "DISP_HELD_STALL_ISSQ_FULL_CPI"
++        "MetricName": "DISPATCH_STALL_HELD_ISSQ_FULL_CPI"
+     },
+     {
+         "BriefDescription": "Average cycles per completed instruction when the NTC instruction was held at dispatch because the mapper/SRB was full",
+         "MetricExpr": "PM_DISP_STALL_HELD_RENAME_CYC / PM_RUN_INST_CMPL",
+         "MetricGroup": "CPI",
+-        "MetricName": "DISPATCHED_HELD_RENAME_CPI"
++        "MetricName": "DISPATCH_STALL_HELD_RENAME_CPI"
+     },
+     {
+         "BriefDescription": "Average cycles per completed instruction when the NTC instruction was held at dispatch because the STF mapper/SRB was full",
+         "MetricExpr": "PM_DISP_STALL_HELD_STF_MAPPER_CYC / PM_RUN_INST_CMPL",
+         "MetricGroup": "CPI",
+-        "MetricName": "DISPATCHED_HELD_STF_MAPPER_CPI"
++        "MetricName": "DISPATCH_STALL_HELD_STF_MAPPER_CPI"
+     },
+     {
+         "BriefDescription": "Average cycles per completed instruction when the NTC instruction was held at dispatch because the XVFC mapper/SRB was full",
+         "MetricExpr": "PM_DISP_STALL_HELD_XVFC_MAPPER_CYC / PM_RUN_INST_CMPL",
+         "MetricGroup": "CPI",
+-        "MetricName": "DISPATCHED_HELD_XVFC_MAPPER_CPI"
++        "MetricName": "DISPATCH_STALL_HELD_XVFC_MAPPER_CPI"
+     },
+     {
+         "BriefDescription": "Average cycles per completed instruction when the NTC instruction was held at dispatch for any other reason",
+         "MetricExpr": "PM_DISP_STALL_HELD_OTHER_CYC / PM_RUN_INST_CMPL",
+         "MetricGroup": "CPI",
+-        "MetricName": "DISPATCHED_HELD_OTHER_CPI"
++        "MetricName": "DISPATCH_STALL_HELD_OTHER_CPI"
+     },
+     {
+         "BriefDescription": "Average cycles per completed instruction when the NTC instruction has been dispatched but not issued for any reason",
+@@ -352,13 +352,13 @@
+         "BriefDescription": "Average cycles per completed instruction when dispatch was stalled because fetch was being held, so there was nothing in the pipeline for this thread",
+         "MetricExpr": "PM_DISP_STALL_FETCH / PM_RUN_INST_CMPL",
+         "MetricGroup": "CPI",
+-        "MetricName": "DISPATCHED_FETCH_CPI"
++        "MetricName": "DISPATCH_STALL_FETCH_CPI"
+     },
+     {
+         "BriefDescription": "Average cycles per completed instruction when the NTC instruction was held at dispatch because of power management",
+         "MetricExpr": "PM_DISP_STALL_HELD_HALT_CYC / PM_RUN_INST_CMPL",
+         "MetricGroup": "CPI",
+-        "MetricName": "DISPATCHED_HELD_HALT_CPI"
++        "MetricName": "DISPATCH_STALL_HELD_HALT_CPI"
+     },
+     {
+         "BriefDescription": "Percentage of flushes per completed instruction",
+@@ -453,12 +453,6 @@
+         "MetricGroup": "General",
+         "MetricName": "LOADS_PER_INST"
+     },
+-    {
+-        "BriefDescription": "Average number of finished stores per completed instruction",
+-        "MetricExpr": "PM_ST_FIN / PM_RUN_INST_CMPL",
+-        "MetricGroup": "General",
+-        "MetricName": "STORES_PER_INST"
+-    },
+     {
+         "BriefDescription": "Percentage of demand loads that reloaded from beyond the L2 per completed instruction",
+         "MetricExpr": "PM_DATA_FROM_L2MISS / PM_RUN_INST_CMPL * 100",
+@@ -566,7 +560,7 @@
+         "BriefDescription": "Average number of STCX instructions finshed per completed instruction",
+         "MetricExpr": "PM_STCX_FIN / PM_RUN_INST_CMPL",
+         "MetricGroup": "General",
+-        "MetricName": "STXC_PER_INST"
++        "MetricName": "STCX_PER_INST"
+     },
+     {
+         "BriefDescription": "Average number of LARX instructions finshed per completed instruction",
+diff --git a/tools/perf/pmu-events/arch/powerpc/power10/others.json b/tools/perf/pmu-events/arch/powerpc/power10/others.json
+index a771e4b6bec58..f09c00c89322e 100644
+--- a/tools/perf/pmu-events/arch/powerpc/power10/others.json
++++ b/tools/perf/pmu-events/arch/powerpc/power10/others.json
+@@ -1,104 +1,39 @@
+ [
+   {
+-    "EventCode": "0x10016",
+-    "EventName": "PM_VSU0_ISSUE",
+-    "BriefDescription": "VSU instructions issued to VSU pipe 0."
+-  },
+-  {
+-    "EventCode": "0x1001C",
+-    "EventName": "PM_ULTRAVISOR_INST_CMPL",
+-    "BriefDescription": "PowerPC instructions that completed while the thread was in ultravisor state."
+-  },
+-  {
+-    "EventCode": "0x100F0",
+-    "EventName": "PM_CYC",
+-    "BriefDescription": "Processor cycles."
+-  },
+-  {
+-    "EventCode": "0x10134",
+-    "EventName": "PM_MRK_ST_DONE_L2",
+-    "BriefDescription": "Marked stores completed in L2 (RC machine done)."
++    "EventCode": "0x1002C",
++    "EventName": "PM_LD_PREFETCH_CACHE_LINE_MISS",
++    "BriefDescription": "The L1 cache was reloaded with a line that fulfills a prefetch request."
+   },
+   {
+     "EventCode": "0x1505E",
+     "EventName": "PM_LD_HIT_L1",
+-    "BriefDescription": "Loads that finished without experiencing an L1 miss."
++    "BriefDescription": "Load finished without experiencing an L1 miss."
+   },
+   {
+     "EventCode": "0x1F056",
+     "EventName": "PM_DISP_SS0_2_INSTR_CYC",
+     "BriefDescription": "Cycles in which Superslice 0 dispatches either 1 or 2 instructions."
+   },
+-  {
+-    "EventCode": "0x1F15C",
+-    "EventName": "PM_MRK_STCX_L2_CYC",
+-    "BriefDescription": "Cycles spent in the nest portion of a marked Stcx instruction. It starts counting when the operation starts to drain to the L2 and it stops counting when the instruction retires from the Instruction Completion Table (ICT) in the Instruction Sequencing Unit (ISU)."
+-  },
+   {
+     "EventCode": "0x10066",
+     "EventName": "PM_ADJUNCT_CYC",
+     "BriefDescription": "Cycles in which the thread is in Adjunct state. MSR[S HV PR] bits = 011."
+   },
+-  {
+-    "EventCode": "0x101E4",
+-    "EventName": "PM_MRK_L1_ICACHE_MISS",
+-    "BriefDescription": "Marked Instruction suffered an icache Miss."
+-  },
+-  {
+-    "EventCode": "0x101EA",
+-    "EventName": "PM_MRK_L1_RELOAD_VALID",
+-    "BriefDescription": "Marked demand reload."
+-  },
+-  {
+-    "EventCode": "0x100F4",
+-    "EventName": "PM_FLOP_CMPL",
+-    "BriefDescription": "Floating Point Operations Completed. Includes any type. It counts once for each 1, 2, 4 or 8 flop instruction. Use PM_1|2|4|8_FLOP_CMPL events to count flops."
+-  },
+-  {
+-    "EventCode": "0x100FA",
+-    "EventName": "PM_RUN_LATCH_ANY_THREAD_CYC",
+-    "BriefDescription": "Cycles when at least one thread has the run latch set."
+-  },
+   {
+     "EventCode": "0x100FC",
+     "EventName": "PM_LD_REF_L1",
+     "BriefDescription": "All L1 D cache load references counted at finish, gated by reject. In P9 and earlier this event counted only cacheable loads but in P10 both cacheable and non-cacheable loads are included."
+   },
+-  {
+-    "EventCode": "0x2000C",
+-    "EventName": "PM_RUN_LATCH_ALL_THREADS_CYC",
+-    "BriefDescription": "Cycles when the run latch is set for all threads."
+-  },
+   {
+     "EventCode": "0x2E010",
+     "EventName": "PM_ADJUNCT_INST_CMPL",
+-    "BriefDescription": "PowerPC instructions that completed while the thread is in Adjunct state."
++    "BriefDescription": "PowerPC instruction completed while the thread was in Adjunct state."
+   },
+   {
+     "EventCode": "0x2E014",
+     "EventName": "PM_STCX_FIN",
+     "BriefDescription": "Conditional store instruction (STCX) finished. LARX and STCX are instructions used to acquire a lock."
+   },
+-  {
+-    "EventCode": "0x20130",
+-    "EventName": "PM_MRK_INST_DECODED",
+-    "BriefDescription": "An instruction was marked at decode time. Random Instruction Sampling (RIS) only."
+-  },
+-  {
+-    "EventCode": "0x20132",
+-    "EventName": "PM_MRK_DFU_ISSUE",
+-    "BriefDescription": "The marked instruction was a decimal floating point operation issued to the VSU. Measured at issue time."
+-  },
+-  {
+-    "EventCode": "0x20134",
+-    "EventName": "PM_MRK_FXU_ISSUE",
+-    "BriefDescription": "The marked instruction was a fixed point operation issued to the VSU. Measured at issue time."
+-  },
+-  {
+-    "EventCode": "0x2505C",
+-    "EventName": "PM_VSU_ISSUE",
+-    "BriefDescription": "At least one VSU instruction was issued to one of the VSU pipes. Up to 4 per cycle. Includes fixed point operations."
+-  },
+   {
+     "EventCode": "0x2F054",
+     "EventName": "PM_DISP_SS1_2_INSTR_CYC",
+@@ -109,40 +44,15 @@
+     "EventName": "PM_DISP_SS1_4_INSTR_CYC",
+     "BriefDescription": "Cycles in which Superslice 1 dispatches either 3 or 4 instructions."
+   },
+-  {
+-    "EventCode": "0x2006C",
+-    "EventName": "PM_RUN_CYC_SMT4_MODE",
+-    "BriefDescription": "Cycles when this thread's run latch is set and the core is in SMT4 mode."
+-  },
+-  {
+-    "EventCode": "0x201E0",
+-    "EventName": "PM_MRK_DATA_FROM_MEMORY",
+-    "BriefDescription": "The processor's data cache was reloaded from local, remote, or distant memory due to a demand miss for a marked load."
+-  },
+-  {
+-    "EventCode": "0x201E4",
+-    "EventName": "PM_MRK_DATA_FROM_L3MISS",
+-    "BriefDescription": "The processor's data cache was reloaded from a source other than the local core's L1, L2, or L3 due to a demand miss for a marked load."
+-  },
+-  {
+-    "EventCode": "0x201E8",
+-    "EventName": "PM_THRESH_EXC_512",
+-    "BriefDescription": "Threshold counter exceeded a value of 512."
+-  },
+   {
+     "EventCode": "0x200F2",
+     "EventName": "PM_INST_DISP",
+-    "BriefDescription": "PowerPC instructions dispatched."
+-  },
+-  {
+-    "EventCode": "0x30132",
+-    "EventName": "PM_MRK_VSU_FIN",
+-    "BriefDescription": "VSU marked instructions finished. Excludes simple FX instructions issued to the Store Unit."
++    "BriefDescription": "PowerPC instruction dispatched."
+   },
+   {
+-    "EventCode": "0x30038",
+-    "EventName": "PM_EXEC_STALL_DMISS_LMEM",
+-    "BriefDescription": "Cycles in which the oldest instruction in the pipeline was waiting for a load miss to resolve from the local memory, local OpenCapp cache, or local OpenCapp memory."
++    "EventCode": "0x200FD",
++    "EventName": "PM_L1_ICACHE_MISS",
++    "BriefDescription": "Demand instruction cache miss."
+   },
+   {
+     "EventCode": "0x3F04A",
+@@ -152,12 +62,7 @@
+   {
+     "EventCode": "0x3405A",
+     "EventName": "PM_PRIVILEGED_INST_CMPL",
+-    "BriefDescription": "PowerPC Instructions that completed while the thread is in Privileged state."
+-  },
+-  {
+-    "EventCode": "0x3F150",
+-    "EventName": "PM_MRK_ST_DRAIN_CYC",
+-    "BriefDescription": "cycles to drain st from core to L2."
++    "BriefDescription": "PowerPC instruction completed while the thread was in Privileged state."
+   },
+   {
+     "EventCode": "0x3F054",
+@@ -170,103 +75,43 @@
+     "BriefDescription": "Cycles in which Superslice 0 dispatches either 5, 6, 7 or 8 instructions."
+   },
+   {
+-    "EventCode": "0x30162",
+-    "EventName": "PM_MRK_ISSUE_DEPENDENT_LOAD",
+-    "BriefDescription": "The marked instruction was dependent on a load. It is eligible for issue kill."
+-  },
+-  {
+-    "EventCode": "0x40114",
+-    "EventName": "PM_MRK_START_PROBE_NOP_DISP",
+-    "BriefDescription": "Marked Start probe nop dispatched. Instruction AND R0,R0,R0."
+-  },
+-  {
+-    "EventCode": "0x4001C",
+-    "EventName": "PM_VSU_FIN",
+-    "BriefDescription": "VSU instructions finished."
+-  },
+-  {
+-    "EventCode": "0x4C01A",
+-    "EventName": "PM_EXEC_STALL_DMISS_OFF_NODE",
+-    "BriefDescription": "Cycles in which the oldest instruction in the pipeline was waiting for a load miss to resolve from a distant chip."
+-  },
+-  {
+-    "EventCode": "0x4D012",
+-    "EventName": "PM_PMC3_SAVED",
+-    "BriefDescription": "The conditions for the speculative event selected for PMC3 are met and PMC3 is charged."
++    "EventCode": "0x30068",
++    "EventName": "PM_L1_ICACHE_RELOADED_PREF",
++    "BriefDescription": "Counts all instruction cache prefetch reloads (includes demand turned into prefetch)."
+   },
+   {
+-    "EventCode": "0x4D022",
+-    "EventName": "PM_HYPERVISOR_INST_CMPL",
+-    "BriefDescription": "PowerPC instructions that completed while the thread is in hypervisor state."
++    "EventCode": "0x300F6",
++    "EventName": "PM_LD_DEMAND_MISS_L1",
++    "BriefDescription": "The L1 cache was reloaded with a line that fulfills a demand miss request. Counted at reload time, before finish."
+   },
+   {
+-    "EventCode": "0x4D026",
+-    "EventName": "PM_ULTRAVISOR_CYC",
+-    "BriefDescription": "Cycles when the thread is in Ultravisor state. MSR[S HV PR]=110."
++    "EventCode": "0x300FE",
++    "EventName": "PM_DATA_FROM_L3MISS",
++    "BriefDescription": "The processor's data cache was reloaded from a source other than the local core's L1, L2, or L3 due to a demand miss."
+   },
+   {
+-    "EventCode": "0x4D028",
+-    "EventName": "PM_PRIVILEGED_CYC",
+-    "BriefDescription": "Cycles when the thread is in Privileged state. MSR[S HV PR]=x00."
+-  },
+-  {
+-    "EventCode": "0x40030",
+-    "EventName": "PM_INST_FIN",
+-    "BriefDescription": "Instructions finished."
+-  },
+-  {
+-    "EventCode": "0x44146",
+-    "EventName": "PM_MRK_STCX_CORE_CYC",
+-    "BriefDescription": "Cycles spent in the core portion of a marked Stcx instruction. It starts counting when the instruction is decoded and stops counting when it drains into the L2."
++    "EventCode": "0x40012",
++    "EventName": "PM_L1_ICACHE_RELOADED_ALL",
++    "BriefDescription": "Counts all instruction cache reloads includes demand, prefetch, prefetch turned into demand and demand turned into prefetch."
+   },
+   {
+     "EventCode": "0x44054",
+     "EventName": "PM_VECTOR_LD_CMPL",
+-    "BriefDescription": "Vector load instructions completed."
+-  },
+-  {
+-    "EventCode": "0x45054",
+-    "EventName": "PM_FMA_CMPL",
+-    "BriefDescription": "Two floating point instructions completed (FMA class of instructions: fmadd, fnmadd, fmsub, fnmsub). Scalar instructions only."
+-  },
+-  {
+-    "EventCode": "0x45056",
+-    "EventName": "PM_SCALAR_FLOP_CMPL",
+-    "BriefDescription": "Scalar floating point instructions completed."
+-  },
+-  {
+-    "EventCode": "0x4505C",
+-    "EventName": "PM_MATH_FLOP_CMPL",
+-    "BriefDescription": "Math floating point instructions completed."
++    "BriefDescription": "Vector load instruction completed."
+   },
+   {
+     "EventCode": "0x4D05E",
+     "EventName": "PM_BR_CMPL",
+     "BriefDescription": "A branch completed. All branches are included."
+   },
+-  {
+-    "EventCode": "0x4E15E",
+-    "EventName": "PM_MRK_INST_FLUSHED",
+-    "BriefDescription": "The marked instruction was flushed."
+-  },
+-  {
+-    "EventCode": "0x401E6",
+-    "EventName": "PM_MRK_INST_FROM_L3MISS",
+-    "BriefDescription": "The processor's instruction cache was reloaded from a source other than the local core's L1, L2, or L3 due to a demand miss for a marked instruction."
+-  },
+-  {
+-    "EventCode": "0x401E8",
+-    "EventName": "PM_MRK_DATA_FROM_L2MISS",
+-    "BriefDescription": "The processor's data cache was reloaded from a source other than the local core's L1 or L2 due to a demand miss for a marked load."
+-  },
+   {
+     "EventCode": "0x400F0",
+     "EventName": "PM_LD_DEMAND_MISS_L1_FIN",
+-    "BriefDescription": "Load Missed L1, counted at finish time."
++    "BriefDescription": "Load missed L1, counted at finish time."
+   },
+   {
+-    "EventCode": "0x500FA",
+-    "EventName": "PM_RUN_INST_CMPL",
+-    "BriefDescription": "Completed PowerPC instructions gated by the run latch."
++    "EventCode": "0x400FE",
++    "EventName": "PM_DATA_FROM_MEMORY",
++    "BriefDescription": "The processor's data cache was reloaded from local, remote, or distant memory due to a demand miss."
+   }
+ ]
+diff --git a/tools/perf/pmu-events/arch/powerpc/power10/pipeline.json b/tools/perf/pmu-events/arch/powerpc/power10/pipeline.json
+index b8aded6045faa..a8272a2f05174 100644
+--- a/tools/perf/pmu-events/arch/powerpc/power10/pipeline.json
++++ b/tools/perf/pmu-events/arch/powerpc/power10/pipeline.json
+@@ -1,8 +1,13 @@
+ [
+   {
+-    "EventCode": "0x100FE",
+-    "EventName": "PM_INST_CMPL",
+-    "BriefDescription": "PowerPC instructions completed."
++    "EventCode": "0x10004",
++    "EventName": "PM_EXEC_STALL_TRANSLATION",
++    "BriefDescription": "Cycles in which the oldest instruction in the pipeline suffered a TLB miss or ERAT miss and waited for it to resolve."
++  },
++  {
++    "EventCode": "0x10006",
++    "EventName": "PM_DISP_STALL_HELD_OTHER_CYC",
++    "BriefDescription": "Cycles in which the next-to-complete (NTC) instruction is held at dispatch for any other reason."
+   },
+   {
+     "EventCode": "0x1000C",
+@@ -12,7 +17,7 @@
+   {
+     "EventCode": "0x1000E",
+     "EventName": "PM_MMA_ISSUED",
+-    "BriefDescription": "MMA instructions issued."
++    "BriefDescription": "MMA instruction issued."
+   },
+   {
+     "EventCode": "0x10012",
+@@ -30,14 +35,19 @@
+     "BriefDescription": "Cycles in which an instruction reload is pending to satisfy a demand miss."
+   },
+   {
+-    "EventCode": "0x10022",
+-    "EventName": "PM_PMC2_SAVED",
+-    "BriefDescription": "The conditions for the speculative event selected for PMC2 are met and PMC2 is charged."
++    "EventCode": "0x10038",
++    "EventName": "PM_DISP_STALL_TRANSLATION",
++    "BriefDescription": "Cycles when dispatch was stalled for this thread because the MMU was handling a translation miss."
+   },
+   {
+-    "EventCode": "0x10024",
+-    "EventName": "PM_PMC5_OVERFLOW",
+-    "BriefDescription": "The event selected for PMC5 caused the event counter to overflow."
++    "EventCode": "0x1003A",
++    "EventName": "PM_DISP_STALL_BR_MPRED_IC_L2",
++    "BriefDescription": "Cycles when dispatch was stalled while the instruction was fetched from the local L2 after suffering a branch mispredict."
++  },
++  {
++    "EventCode": "0x1003C",
++    "EventName": "PM_EXEC_STALL_DMISS_L2L3",
++    "BriefDescription": "Cycles in which the oldest instruction in the pipeline was waiting for a load miss to resolve from either the local L2 or local L3."
+   },
+   {
+     "EventCode": "0x10058",
+@@ -54,11 +64,36 @@
+     "EventName": "PM_DERAT_MISS_2M",
+     "BriefDescription": "Data ERAT Miss (Data TLB Access) page size 2M. Implies radix translation. When MMCR1[16]=0 this event counts only DERAT reloads for demand misses. When MMCR1[16]=1 this event includes demand misses and prefetches."
+   },
++  {
++    "EventCode": "0x1D05E",
++    "EventName": "PM_DISP_STALL_HELD_HALT_CYC",
++    "BriefDescription": "Cycles in which the next-to-complete (NTC) instruction is held at dispatch because of power management."
++  },
++  {
++    "EventCode": "0x1E050",
++    "EventName": "PM_DISP_STALL_HELD_STF_MAPPER_CYC",
++    "BriefDescription": "Cycles in which the next-to-complete (NTC) instruction is held at dispatch because the STF mapper/SRB was full. Includes GPR (count, link, tar), VSR, VMR, FPR."
++  },
++  {
++    "EventCode": "0x1E054",
++    "EventName": "PM_EXEC_STALL_DMISS_L21_L31",
++    "BriefDescription": "Cycles in which the oldest instruction in the pipeline was waiting for a load miss to resolve from another core's L2 or L3 on the same chip."
++  },
++  {
++    "EventCode": "0x1E056",
++    "EventName": "PM_EXEC_STALL_STORE_PIPE",
++    "BriefDescription": "Cycles in which the oldest instruction in the pipeline was executing in the store unit. This does not include cycles spent handling store misses, PTESYNC instructions or TLBIE instructions."
++  },
+   {
+     "EventCode": "0x1E05A",
+     "EventName": "PM_CMPL_STALL_LWSYNC",
+     "BriefDescription": "Cycles in which the oldest instruction in the pipeline was a lwsync waiting to complete."
+   },
++  {
++    "EventCode": "0x10064",
++    "EventName": "PM_DISP_STALL_IC_L2",
++    "BriefDescription": "Cycles when dispatch was stalled while the instruction was fetched from the local L2."
++  },
+   {
+     "EventCode": "0x10068",
+     "EventName": "PM_BR_FIN",
+@@ -70,9 +105,9 @@
+     "BriefDescription": "Simple fixed point instruction issued to the store unit. Measured at finish time."
+   },
+   {
+-    "EventCode": "0x1006C",
+-    "EventName": "PM_RUN_CYC_ST_MODE",
+-    "BriefDescription": "Cycles when the run latch is set and the core is in ST mode."
++    "EventCode": "0x100F8",
++    "EventName": "PM_DISP_STALL_CYC",
++    "BriefDescription": "Cycles the ICT has no itags assigned to this thread (no instructions were dispatched during these cycles)."
+   },
+   {
+     "EventCode": "0x20004",
+@@ -80,54 +115,104 @@
+     "BriefDescription": "Cycles in which the oldest instruction in the pipeline was dispatched but not issued yet."
+   },
+   {
+-    "EventCode": "0x2000A",
+-    "EventName": "PM_HYPERVISOR_CYC",
+-    "BriefDescription": "Cycles when the thread is in Hypervisor state. MSR[S HV PR]=010."
++    "EventCode": "0x20006",
++    "EventName": "PM_DISP_STALL_HELD_ISSQ_FULL_CYC",
++    "BriefDescription": "Cycles in which the next-to-complete (NTC) instruction is held at dispatch due to Issue queue full. Includes issue queue and branch queue."
+   },
+   {
+     "EventCode": "0x2000E",
+     "EventName": "PM_LSU_LD1_FIN",
+     "BriefDescription": "LSU Finished an internal operation in LD1 port."
+   },
++  {
++    "EventCode": "0x2C010",
++    "EventName": "PM_EXEC_STALL_LSU",
++    "BriefDescription": "Cycles in which the oldest instruction in the pipeline was executing in the Load Store Unit. This does not include simple fixed point instructions."
++  },
+   {
+     "EventCode": "0x2C014",
+     "EventName": "PM_CMPL_STALL_SPECIAL",
+     "BriefDescription": "Cycles in which the oldest instruction in the pipeline required special handling before completing."
+   },
++  {
++    "EventCode": "0x2C016",
++    "EventName": "PM_DISP_STALL_IERAT_ONLY_MISS",
++    "BriefDescription": "Cycles when dispatch was stalled while waiting to resolve an instruction ERAT miss."
++  },
+   {
+     "EventCode": "0x2C018",
+     "EventName": "PM_EXEC_STALL_DMISS_L3MISS",
+     "BriefDescription": "Cycles in which the oldest instruction in the pipeline was waiting for a load miss to resolve from a source beyond the local L2 or local L3."
+   },
++  {
++    "EventCode": "0x2C01C",
++    "EventName": "PM_EXEC_STALL_DMISS_OFF_CHIP",
++    "BriefDescription": "Cycles in which the oldest instruction in the pipeline was waiting for a load miss to resolve from a remote chip."
++  },
++  {
++    "EventCode": "0x2C01E",
++    "EventName": "PM_DISP_STALL_BR_MPRED_IC_L3",
++    "BriefDescription": "Cycles when dispatch was stalled while the instruction was fetched from the local L3 after suffering a branch mispredict."
++  },
+   {
+     "EventCode": "0x2D010",
+     "EventName": "PM_LSU_ST1_FIN",
+     "BriefDescription": "LSU Finished an internal operation in ST1 port."
+   },
++  {
++    "EventCode": "0x10016",
++    "EventName": "PM_VSU0_ISSUE",
++    "BriefDescription": "VSU instruction issued to VSU pipe 0."
++  },
+   {
+     "EventCode": "0x2D012",
+     "EventName": "PM_VSU1_ISSUE",
+-    "BriefDescription": "VSU instructions issued to VSU pipe 1."
++    "BriefDescription": "VSU instruction issued to VSU pipe 1."
++  },
++  {
++    "EventCode": "0x2505C",
++    "EventName": "PM_VSU_ISSUE",
++    "BriefDescription": "At least one VSU instruction was issued to one of the VSU pipes. Up to 4 per cycle. Includes fixed point operations."
++  },
++  {
++    "EventCode": "0x4001C",
++    "EventName": "PM_VSU_FIN",
++    "BriefDescription": "VSU instruction finished."
+   },
+   {
+     "EventCode": "0x2D018",
+     "EventName": "PM_EXEC_STALL_VSU",
+     "BriefDescription": "Cycles in which the oldest instruction in the pipeline was executing in the VSU (includes FXU, VSU, CRU)."
+   },
++  {
++    "EventCode": "0x2D01A",
++    "EventName": "PM_DISP_STALL_IC_MISS",
++    "BriefDescription": "Cycles when dispatch was stalled for this thread due to an instruction cache miss."
++  },
+   {
+     "EventCode": "0x2D01C",
+     "EventName": "PM_CMPL_STALL_STCX",
+     "BriefDescription": "Cycles in which the oldest instruction in the pipeline was a stcx waiting for resolution from the nest before completing."
+   },
+   {
+-    "EventCode": "0x2E01E",
+-    "EventName": "PM_EXEC_STALL_NTC_FLUSH",
+-    "BriefDescription": "Cycles in which the oldest instruction in the pipeline was executing in any unit before it was flushed. Note that if the flush of the oldest instruction happens after finish, the cycles from dispatch to issue will be included in PM_DISP_STALL and the cycles from issue to finish will be included in PM_EXEC_STALL and its corresponding children. This event will also count cycles when the previous NTF instruction is still completing and the new NTF instruction is stalled at dispatch."
++    "EventCode": "0x2E018",
++    "EventName": "PM_DISP_STALL_FETCH",
++    "BriefDescription": "Cycles when dispatch was stalled for this thread because Fetch was being held."
++  },
++  {
++    "EventCode": "0x2E01A",
++    "EventName": "PM_DISP_STALL_HELD_XVFC_MAPPER_CYC",
++    "BriefDescription": "Cycles in which the next-to-complete (NTC) instruction is held at dispatch because the XVFC mapper/SRB was full."
+   },
+   {
+-    "EventCode": "0x2013C",
+-    "EventName": "PM_MRK_FX_LSU_FIN",
+-    "BriefDescription": "The marked instruction was simple fixed point that was issued to the store unit. Measured at finish time."
++    "EventCode": "0x2E01C",
++    "EventName": "PM_EXEC_STALL_TLBIE",
++    "BriefDescription": "Cycles in which the oldest instruction in the pipeline was a TLBIE instruction executing in the Load Store Unit."
++  },
++  {
++    "EventCode": "0x2E01E",
++    "EventName": "PM_EXEC_STALL_NTC_FLUSH",
++    "BriefDescription": "Cycles in which the oldest instruction in the pipeline was executing in any unit before it was flushed. Note that if the flush of the oldest instruction happens after finish, the cycles from dispatch to issue will be included in PM_DISP_STALL and the cycles from issue to finish will be included in PM_EXEC_STALL and its corresponding children. This event will also count cycles when the previous next-to-finish (NTF) instruction is still completing and the new NTF instruction is stalled at dispatch."
+   },
+   {
+     "EventCode": "0x2405A",
+@@ -135,14 +220,9 @@
+     "BriefDescription": "Cycles in which the oldest instruction in the pipeline (NTC) finishes. Note that instructions can finish out of order, therefore not all the instructions that finish have a Next-to-complete status."
+   },
+   {
+-    "EventCode": "0x201E2",
+-    "EventName": "PM_MRK_LD_MISS_L1",
+-    "BriefDescription": "Marked DL1 Demand Miss counted at finish time."
+-  },
+-  {
+-    "EventCode": "0x200F4",
+-    "EventName": "PM_RUN_CYC",
+-    "BriefDescription": "Processor cycles gated by the run latch."
++    "EventCode": "0x30004",
++    "EventName": "PM_DISP_STALL_FLUSH",
++    "BriefDescription": "Cycles when dispatch was stalled because of a flush that happened to an instruction(s) that was not yet next-to-complete (NTC). PM_EXEC_STALL_NTC_FLUSH only includes instructions that were flushed after becoming NTC."
+   },
+   {
+     "EventCode": "0x30008",
+@@ -150,35 +230,45 @@
+     "BriefDescription": "Cycles in which the oldest instruction in the pipeline was waiting to finish in one of the execution units (BRU, LSU, VSU). Only cycles between issue and finish are counted in this category."
+   },
+   {
+-    "EventCode": "0x3001A",
+-    "EventName": "PM_LSU_ST2_FIN",
+-    "BriefDescription": "LSU Finished an internal operation in ST2 port."
++    "EventCode": "0x30014",
++    "EventName": "PM_EXEC_STALL_STORE",
++    "BriefDescription": "Cycles in which the oldest instruction in the pipeline was a store instruction executing in the Load Store Unit."
+   },
+   {
+-    "EventCode": "0x30020",
+-    "EventName": "PM_PMC2_REWIND",
+-    "BriefDescription": "The speculative event selected for PMC2 rewinds and the counter for PMC2 is not charged."
++    "EventCode": "0x30016",
++    "EventName": "PM_EXEC_STALL_DERAT_DTLB_MISS",
++    "BriefDescription": "Cycles in which the oldest instruction in the pipeline suffered a TLB miss and waited for it resolve."
+   },
+   {
+-    "EventCode": "0x30022",
+-    "EventName": "PM_PMC4_SAVED",
+-    "BriefDescription": "The conditions for the speculative event selected for PMC4 are met and PMC4 is charged."
++    "EventCode": "0x30018",
++    "EventName": "PM_DISP_STALL_HELD_SCOREBOARD_CYC",
++    "BriefDescription": "Cycles in which the next-to-complete (NTC) instruction is held at dispatch while waiting on the Scoreboard. This event combines VSCR and FPSCR together."
++  },
++  {
++    "EventCode": "0x3001A",
++    "EventName": "PM_LSU_ST2_FIN",
++    "BriefDescription": "LSU Finished an internal operation in ST2 port."
+   },
+   {
+-    "EventCode": "0x30024",
+-    "EventName": "PM_PMC6_OVERFLOW",
+-    "BriefDescription": "The event selected for PMC6 caused the event counter to overflow."
++    "EventCode": "0x30026",
++    "EventName": "PM_EXEC_STALL_STORE_MISS",
++    "BriefDescription": "Cycles in which the oldest instruction in the pipeline was a store whose cache line was not resident in the L1 and was waiting for allocation of the missing line into the L1."
+   },
+   {
+     "EventCode": "0x30028",
+     "EventName": "PM_CMPL_STALL_MEM_ECC",
+-    "BriefDescription": "Cycles in which the oldest instruction in the pipeline was waiting for the non-speculative finish of either a stcx waiting for its result or a load waiting for non-critical sectors of data and ECC."
++    "BriefDescription": "Cycles in which the oldest instruction in the pipeline was waiting for the non-speculative finish of either a STCX waiting for its result or a load waiting for non-critical sectors of data and ECC."
+   },
+   {
+     "EventCode": "0x30036",
+     "EventName": "PM_EXEC_STALL_SIMPLE_FX",
+     "BriefDescription": "Cycles in which the oldest instruction in the pipeline was a simple fixed point instruction executing in the Load Store Unit."
+   },
++  {
++    "EventCode": "0x30038",
++    "EventName": "PM_EXEC_STALL_DMISS_LMEM",
++    "BriefDescription": "Cycles in which the oldest instruction in the pipeline was waiting for a load miss to resolve from the local memory, local OpenCAPI cache, or local OpenCAPI memory."
++  },
+   {
+     "EventCode": "0x3003A",
+     "EventName": "PM_CMPL_STALL_EXCEPTION",
+@@ -187,17 +277,37 @@
+   {
+     "EventCode": "0x3F044",
+     "EventName": "PM_VSU2_ISSUE",
+-    "BriefDescription": "VSU instructions issued to VSU pipe 2."
++    "BriefDescription": "VSU instruction issued to VSU pipe 2."
+   },
+   {
+     "EventCode": "0x30058",
+     "EventName": "PM_TLBIE_FIN",
+-    "BriefDescription": "TLBIE instructions finished in the LSU. Two TLBIEs can finish each cycle. All will be counted."
++    "BriefDescription": "TLBIE instruction finished in the LSU. Two TLBIEs can finish each cycle. All will be counted."
++  },
++  {
++    "EventCode": "0x34054",
++    "EventName": "PM_EXEC_STALL_DMISS_L2L3_NOCONFLICT",
++    "BriefDescription": "Cycles in which the oldest instruction in the pipeline was waiting for a load miss to resolve from the local L2 or local L3, without a dispatch conflict."
++  },
++  {
++    "EventCode": "0x34056",
++    "EventName": "PM_EXEC_STALL_LOAD_FINISH",
++    "BriefDescription": "Cycles in which the oldest instruction in the pipeline was finishing a load after its data was reloaded from a data source beyond the local L1; cycles in which the LSU was processing an L1-hit; cycles in which the next-to-finish (NTF) instruction merged with another load in the LMQ; cycles in which the NTF instruction is waiting for a data reload for a load miss, but the data comes back with a non-NTF instruction."
++  },
++  {
++    "EventCode": "0x34058",
++    "EventName": "PM_DISP_STALL_BR_MPRED_ICMISS",
++    "BriefDescription": "Cycles when dispatch was stalled after a mispredicted branch resulted in an instruction cache miss."
++  },
++  {
++    "EventCode": "0x3D05C",
++    "EventName": "PM_DISP_STALL_HELD_RENAME_CYC",
++    "BriefDescription": "Cycles in which the next-to-complete (NTC) instruction is held at dispatch because the mapper/SRB was full. Includes GPR (count, link, tar), VSR, VMR, FPR and XVFC."
+   },
+   {
+-    "EventCode": "0x3D058",
+-    "EventName": "PM_SCALAR_FSQRT_FDIV_ISSUE",
+-    "BriefDescription": "Scalar versions of four floating point operations: fdiv,fsqrt (xvdivdp, xvdivsp, xvsqrtdp, xvsqrtsp)."
++    "EventCode": "0x3E052",
++    "EventName": "PM_DISP_STALL_IC_L3",
++    "BriefDescription": "Cycles when dispatch was stalled while the instruction was fetched from the local L3."
+   },
+   {
+     "EventCode": "0x30066",
+@@ -215,25 +325,45 @@
+     "BriefDescription": "Cycles in which both instructions in the ICT entry pair show as finished. These are the cycles between finish and completion for the oldest pair of instructions in the pipeline."
+   },
+   {
+-    "EventCode": "0x40010",
+-    "EventName": "PM_PMC3_OVERFLOW",
+-    "BriefDescription": "The event selected for PMC3 caused the event counter to overflow."
++    "EventCode": "0x4C010",
++    "EventName": "PM_DISP_STALL_BR_MPRED_IC_L3MISS",
++    "BriefDescription": "Cycles when dispatch was stalled while the instruction was fetched from sources beyond the local L3 after suffering a mispredicted branch."
+   },
+   {
+     "EventCode": "0x4C012",
+     "EventName": "PM_EXEC_STALL_DERAT_ONLY_MISS",
+     "BriefDescription": "Cycles in which the oldest instruction in the pipeline suffered an ERAT miss and waited for it resolve."
+   },
++  {
++    "EventCode": "0x4C016",
++    "EventName": "PM_EXEC_STALL_DMISS_L2L3_CONFLICT",
++    "BriefDescription": "Cycles in which the oldest instruction in the pipeline was waiting for a load miss to resolve from the local L2 or local L3, with a dispatch conflict."
++  },
+   {
+     "EventCode": "0x4C018",
+     "EventName": "PM_CMPL_STALL",
+     "BriefDescription": "Cycles in which the oldest instruction in the pipeline cannot complete because the thread was blocked for any reason."
+   },
++  {
++    "EventCode": "0x4C01A",
++    "EventName": "PM_EXEC_STALL_DMISS_OFF_NODE",
++    "BriefDescription": "Cycles in which the oldest instruction in the pipeline was waiting for a load miss to resolve from a distant chip."
++  },
+   {
+     "EventCode": "0x4C01E",
+     "EventName": "PM_LSU_ST3_FIN",
+     "BriefDescription": "LSU Finished an internal operation in ST3 port."
+   },
++  {
++    "EventCode": "0x4D014",
++    "EventName": "PM_EXEC_STALL_LOAD",
++    "BriefDescription": "Cycles in which the oldest instruction in the pipeline was a load instruction executing in the Load Store Unit."
++  },
++  {
++    "EventCode": "0x4D016",
++    "EventName": "PM_EXEC_STALL_PTESYNC",
++    "BriefDescription": "Cycles in which the oldest instruction in the pipeline was a PTESYNC instruction executing in the Load Store Unit."
++  },
+   {
+     "EventCode": "0x4D018",
+     "EventName": "PM_EXEC_STALL_BRU",
+@@ -249,10 +379,25 @@
+     "EventName": "PM_EXEC_STALL_TLBIEL",
+     "BriefDescription": "Cycles in which the oldest instruction in the pipeline was a TLBIEL instruction executing in the Load Store Unit. TLBIEL instructions have lower overhead than TLBIE instructions because they don't get set to the nest."
+   },
++  {
++    "EventCode": "0x4D01E",
++    "EventName": "PM_DISP_STALL_BR_MPRED",
++    "BriefDescription": "Cycles when dispatch was stalled for this thread due to a mispredicted branch."
++  },
++  {
++    "EventCode": "0x4E010",
++    "EventName": "PM_DISP_STALL_IC_L3MISS",
++    "BriefDescription": "Cycles when dispatch was stalled while the instruction was fetched from any source beyond the local L3."
++  },
+   {
+     "EventCode": "0x4E012",
+     "EventName": "PM_EXEC_STALL_UNKNOWN",
+-    "BriefDescription": "Cycles in which the oldest instruction in the pipeline completed without an ntf_type pulse. The ntf_pulse was missed by the ISU because the NTF finishes and completions came too close together."
++    "BriefDescription": "Cycles in which the oldest instruction in the pipeline completed without an ntf_type pulse. The ntf_pulse was missed by the ISU because the next-to-finish (NTF) instruction finishes and completions came too close together."
++  },
++  {
++    "EventCode": "0x4E01A",
++    "EventName": "PM_DISP_STALL_HELD_CYC",
++    "BriefDescription": "Cycles in which the next-to-complete (NTC) instruction is held at dispatch for any reason."
+   },
+   {
+     "EventCode": "0x4D020",
+@@ -260,24 +405,14 @@
+     "BriefDescription": "VSU instruction was issued to VSU pipe 3."
+   },
+   {
+-    "EventCode": "0x40132",
+-    "EventName": "PM_MRK_LSU_FIN",
+-    "BriefDescription": "LSU marked instruction finish."
++    "EventCode": "0x4003C",
++    "EventName": "PM_DISP_STALL_HELD_SYNC_CYC",
++    "BriefDescription": "Cycles in which the next-to-complete (NTC) instruction is held at dispatch because of a synchronizing instruction that requires the ICT to be empty before dispatch."
+   },
+   {
+     "EventCode": "0x45058",
+     "EventName": "PM_IC_MISS_CMPL",
+-    "BriefDescription": "Non-speculative icache miss, counted at completion."
+-  },
+-  {
+-    "EventCode": "0x4D050",
+-    "EventName": "PM_VSU_NON_FLOP_CMPL",
+-    "BriefDescription": "Non-floating point VSU instructions completed."
+-  },
+-  {
+-    "EventCode": "0x4D052",
+-    "EventName": "PM_2FLOP_CMPL",
+-    "BriefDescription": "Double Precision vector version of fmul, fsub, fcmp, fsel, fabs, fnabs, fres, fsqrte, fneg completed."
++    "BriefDescription": "Non-speculative instruction cache miss, counted at completion."
+   },
+   {
+     "EventCode": "0x400F2",
+diff --git a/tools/perf/pmu-events/arch/powerpc/power10/pmc.json b/tools/perf/pmu-events/arch/powerpc/power10/pmc.json
+index b5d1bd39cfb22..0a2bf56ee7c10 100644
+--- a/tools/perf/pmu-events/arch/powerpc/power10/pmc.json
++++ b/tools/perf/pmu-events/arch/powerpc/power10/pmc.json
+@@ -1,22 +1,197 @@
+ [
++  {
++    "EventCode": "0x100FE",
++    "EventName": "PM_INST_CMPL",
++    "BriefDescription": "PowerPC instruction completed."
++  },
++  {
++    "EventCode": "0x1000A",
++    "EventName": "PM_PMC3_REWIND",
++    "BriefDescription": "The speculative event selected for PMC3 rewinds and the counter for PMC3 is not charged."
++  },
++  {
++    "EventCode": "0x10010",
++    "EventName": "PM_PMC4_OVERFLOW",
++    "BriefDescription": "The event selected for PMC4 caused the event counter to overflow."
++  },
++  {
++    "EventCode": "0x1001C",
++    "EventName": "PM_ULTRAVISOR_INST_CMPL",
++    "BriefDescription": "PowerPC instruction completed while the thread was in ultravisor state."
++  },
++  {
++    "EventCode": "0x100F0",
++    "EventName": "PM_CYC",
++    "BriefDescription": "Processor cycles."
++  },
++  {
++    "EventCode": "0x10020",
++    "EventName": "PM_PMC4_REWIND",
++    "BriefDescription": "The speculative event selected for PMC4 rewinds and the counter for PMC4 is not charged."
++  },
++  {
++    "EventCode": "0x10022",
++    "EventName": "PM_PMC2_SAVED",
++    "BriefDescription": "The conditions for the speculative event selected for PMC2 are met and PMC2 is charged."
++  },
++  {
++    "EventCode": "0x10024",
++    "EventName": "PM_PMC5_OVERFLOW",
++    "BriefDescription": "The event selected for PMC5 caused the event counter to overflow."
++  },
++  {
++    "EventCode": "0x1F15E",
++    "EventName": "PM_MRK_START_PROBE_NOP_CMPL",
++    "BriefDescription": "Marked Start probe nop (AND R0,R0,R0) completed."
++  },
++  {
++    "EventCode": "0x1006C",
++    "EventName": "PM_RUN_CYC_ST_MODE",
++    "BriefDescription": "Cycles when the run latch is set and the core is in ST mode."
++  },
++  {
++    "EventCode": "0x101E8",
++    "EventName": "PM_THRESH_EXC_256",
++    "BriefDescription": "Threshold counter exceeded a count of 256."
++  },
++  {
++    "EventCode": "0x101EC",
++    "EventName": "PM_THRESH_MET",
++    "BriefDescription": "Threshold exceeded."
++  },
++  {
++    "EventCode": "0x100FA",
++    "EventName": "PM_RUN_LATCH_ANY_THREAD_CYC",
++    "BriefDescription": "Cycles when at least one thread has the run latch set."
++  },
++  {
++    "EventCode": "0x2000A",
++    "EventName": "PM_HYPERVISOR_CYC",
++    "BriefDescription": "Cycles when the thread is in Hypervisor state. MSR[S HV PR]=010."
++  },
++  {
++    "EventCode": "0x2000C",
++    "EventName": "PM_RUN_LATCH_ALL_THREADS_CYC",
++    "BriefDescription": "Cycles when the run latch is set for all threads."
++  },
++  {
++    "EventCode": "0x20010",
++    "EventName": "PM_PMC1_OVERFLOW",
++    "BriefDescription": "The event selected for PMC1 caused the event counter to overflow."
++  },
++  {
++    "EventCode": "0x2006C",
++    "EventName": "PM_RUN_CYC_SMT4_MODE",
++    "BriefDescription": "Cycles when this thread's run latch is set and the core is in SMT4 mode."
++  },
++  {
++    "EventCode": "0x201E6",
++    "EventName": "PM_THRESH_EXC_32",
++    "BriefDescription": "Threshold counter exceeded a value of 32."
++  },
++  {
++    "EventCode": "0x201E8",
++    "EventName": "PM_THRESH_EXC_512",
++    "BriefDescription": "Threshold counter exceeded a value of 512."
++  },
++  {
++    "EventCode": "0x200F4",
++    "EventName": "PM_RUN_CYC",
++    "BriefDescription": "Processor cycles gated by the run latch."
++  },
++  {
++    "EventCode": "0x30010",
++    "EventName": "PM_PMC2_OVERFLOW",
++    "BriefDescription": "The event selected for PMC2 caused the event counter to overflow."
++  },
++  {
++    "EventCode": "0x30020",
++    "EventName": "PM_PMC2_REWIND",
++    "BriefDescription": "The speculative event selected for PMC2 rewinds and the counter for PMC2 is not charged."
++  },
++  {
++    "EventCode": "0x30022",
++    "EventName": "PM_PMC4_SAVED",
++    "BriefDescription": "The conditions for the speculative event selected for PMC4 are met and PMC4 is charged."
++  },
++  {
++    "EventCode": "0x30024",
++    "EventName": "PM_PMC6_OVERFLOW",
++    "BriefDescription": "The event selected for PMC6 caused the event counter to overflow."
++  },
++  {
++    "EventCode": "0x3006C",
++    "EventName": "PM_RUN_CYC_SMT2_MODE",
++    "BriefDescription": "Cycles when this thread's run latch is set and the core is in SMT2 mode."
++  },
+   {
+     "EventCode": "0x301E8",
+     "EventName": "PM_THRESH_EXC_64",
+     "BriefDescription": "Threshold counter exceeded a value of 64."
+   },
+   {
+-    "EventCode": "0x45050",
+-    "EventName": "PM_1FLOP_CMPL",
+-    "BriefDescription": "One floating point instruction completed (fadd, fmul, fsub, fcmp, fsel, fabs, fnabs, fres, fsqrte, fneg)."
++    "EventCode": "0x301EA",
++    "EventName": "PM_THRESH_EXC_1024",
++    "BriefDescription": "Threshold counter exceeded a value of 1024."
++  },
++  {
++    "EventCode": "0x40010",
++    "EventName": "PM_PMC3_OVERFLOW",
++    "BriefDescription": "The event selected for PMC3 caused the event counter to overflow."
++  },
++  {
++    "EventCode": "0x40114",
++    "EventName": "PM_MRK_START_PROBE_NOP_DISP",
++    "BriefDescription": "Marked Start probe nop dispatched. Instruction AND R0,R0,R0."
++  },
++  {
++    "EventCode": "0x4D010",
++    "EventName": "PM_PMC1_SAVED",
++    "BriefDescription": "The conditions for the speculative event selected for PMC1 are met and PMC1 is charged."
++  },
++  {
++    "EventCode": "0x4D012",
++    "EventName": "PM_PMC3_SAVED",
++    "BriefDescription": "The conditions for the speculative event selected for PMC3 are met and PMC3 is charged."
++  },
++  {
++    "EventCode": "0x4D022",
++    "EventName": "PM_HYPERVISOR_INST_CMPL",
++    "BriefDescription": "PowerPC instruction completed while the thread was in hypervisor state."
++  },
++  {
++    "EventCode": "0x4D026",
++    "EventName": "PM_ULTRAVISOR_CYC",
++    "BriefDescription": "Cycles when the thread is in Ultravisor state. MSR[S HV PR]=110."
++  },
++  {
++    "EventCode": "0x4D028",
++    "EventName": "PM_PRIVILEGED_CYC",
++    "BriefDescription": "Cycles when the thread is in Privileged state. MSR[S HV PR]=x00."
++  },
++  {
++    "EventCode": "0x4D02C",
++    "EventName": "PM_PMC1_REWIND",
++    "BriefDescription": "The speculative event selected for PMC1 rewinds and the counter for PMC1 is not charged."
++  },
++  {
++    "EventCode": "0x40030",
++    "EventName": "PM_INST_FIN",
++    "BriefDescription": "Instruction finished."
++  },
++  {
++    "EventCode": "0x40134",
++    "EventName": "PM_MRK_INST_TIMEO",
++    "BriefDescription": "Marked instruction finish timeout (instruction was lost)."
+   },
+   {
+-    "EventCode": "0x45052",
+-    "EventName": "PM_4FLOP_CMPL",
+-    "BriefDescription": "Four floating point instructions completed (fadd, fmul, fsub, fcmp, fsel, fabs, fnabs, fres, fsqrte, fneg)."
++    "EventCode": "0x401EA",
++    "EventName": "PM_THRESH_EXC_128",
++    "BriefDescription": "Threshold counter exceeded a value of 128."
+   },
+   {
+-    "EventCode": "0x4D054",
+-    "EventName": "PM_8FLOP_CMPL",
+-    "BriefDescription": "Four Double Precision vector instructions completed."
++    "EventCode": "0x400FA",
++    "EventName": "PM_RUN_INST_CMPL",
++    "BriefDescription": "PowerPC instruction completed while the run latch is set."
+   }
+ ]
+diff --git a/tools/perf/pmu-events/arch/powerpc/power10/translation.json b/tools/perf/pmu-events/arch/powerpc/power10/translation.json
+index db3766dca07c5..170c9aeb30d83 100644
+--- a/tools/perf/pmu-events/arch/powerpc/power10/translation.json
++++ b/tools/perf/pmu-events/arch/powerpc/power10/translation.json
+@@ -1,34 +1,9 @@
+ [
+-  {
+-    "EventCode": "0x1F15E",
+-    "EventName": "PM_MRK_START_PROBE_NOP_CMPL",
+-    "BriefDescription": "Marked Start probe nop (AND R0,R0,R0) completed."
+-  },
+-  {
+-    "EventCode": "0x20016",
+-    "EventName": "PM_ST_FIN",
+-    "BriefDescription": "Store finish count. Includes speculative activity."
+-  },
+   {
+     "EventCode": "0x20018",
+     "EventName": "PM_ST_FWD",
+     "BriefDescription": "Store forwards that finished."
+   },
+-  {
+-    "EventCode": "0x2011C",
+-    "EventName": "PM_MRK_NTF_CYC",
+-    "BriefDescription": "Cycles during which the marked instruction is the oldest in the pipeline (NTF or NTC)."
+-  },
+-  {
+-    "EventCode": "0x2E01C",
+-    "EventName": "PM_EXEC_STALL_TLBIE",
+-    "BriefDescription": "Cycles in which the oldest instruction in the pipeline was a TLBIE instruction executing in the Load Store Unit."
+-  },
+-  {
+-    "EventCode": "0x201E6",
+-    "EventName": "PM_THRESH_EXC_32",
+-    "BriefDescription": "Threshold counter exceeded a value of 32."
+-  },
+   {
+     "EventCode": "0x200F0",
+     "EventName": "PM_ST_CMPL",
+@@ -37,21 +12,6 @@
+   {
+     "EventCode": "0x200FE",
+     "EventName": "PM_DATA_FROM_L2MISS",
+-    "BriefDescription": "The processor's data cache was reloaded from a source other than the local core's L1 or L2 due to a demand miss."
+-  },
+-  {
+-    "EventCode": "0x30010",
+-    "EventName": "PM_PMC2_OVERFLOW",
+-    "BriefDescription": "The event selected for PMC2 caused the event counter to overflow."
+-  },
+-  {
+-    "EventCode": "0x4D010",
+-    "EventName": "PM_PMC1_SAVED",
+-    "BriefDescription": "The conditions for the speculative event selected for PMC1 are met and PMC1 is charged."
+-  },
+-  {
+-    "EventCode": "0x4D05C",
+-    "EventName": "PM_DPP_FLOP_CMPL",
+-    "BriefDescription": "Double-Precision or Quad-Precision instructions completed."
++    "BriefDescription": "The processor's L1 data cache was reloaded from a source beyond the local core's L2 due to a demand miss."
+   }
+ ]
+diff --git a/tools/perf/pmu-events/jevents.py b/tools/perf/pmu-events/jevents.py
+index 12e80bb7939be..82c42c46d8866 100755
+--- a/tools/perf/pmu-events/jevents.py
++++ b/tools/perf/pmu-events/jevents.py
+@@ -999,7 +999,7 @@ such as "arm/cortex-a34".''',
+   _args = ap.parse_args()
+ 
+   _args.output_file.write("""
+-#include "pmu-events/pmu-events.h"
++#include <pmu-events/pmu-events.h>
+ #include "util/header.h"
+ #include "util/pmu.h"
+ #include <string.h>
+diff --git a/tools/perf/tests/dlfilter-test.c b/tools/perf/tests/dlfilter-test.c
+index 086fd2179e41f..da3a9b50b1b1f 100644
+--- a/tools/perf/tests/dlfilter-test.c
++++ b/tools/perf/tests/dlfilter-test.c
+@@ -1,7 +1,7 @@
+ // SPDX-License-Identifier: GPL-2.0
+ /*
+  * Test dlfilter C API. A perf.data file is synthesized and then processed
+- * by perf script with a dlfilter named dlfilter-test-api-v0.so. Also a C file
++ * by perf script with dlfilters named dlfilter-test-api-v*.so. Also a C file
+  * is compiled to provide a dso to match the synthesized perf.data file.
+  */
+ 
+@@ -37,6 +37,8 @@
+ 
+ #define MAP_START 0x400000
+ 
++#define DLFILTER_TEST_NAME_MAX 128
++
+ struct test_data {
+ 	struct perf_tool tool;
+ 	struct machine *machine;
+@@ -45,6 +47,8 @@ struct test_data {
+ 	u64 bar;
+ 	u64 ip;
+ 	u64 addr;
++	char name[DLFILTER_TEST_NAME_MAX];
++	char desc[DLFILTER_TEST_NAME_MAX];
+ 	char perf[PATH_MAX];
+ 	char perf_data_file_name[PATH_MAX];
+ 	char c_file_name[PATH_MAX];
+@@ -215,7 +219,7 @@ static int write_prog(char *file_name)
+ 	return err ? -1 : 0;
+ }
+ 
+-static int get_dlfilters_path(char *buf, size_t sz)
++static int get_dlfilters_path(const char *name, char *buf, size_t sz)
+ {
+ 	char perf[PATH_MAX];
+ 	char path[PATH_MAX];
+@@ -224,12 +228,12 @@ static int get_dlfilters_path(char *buf, size_t sz)
+ 
+ 	perf_exe(perf, sizeof(perf));
+ 	perf_path = dirname(perf);
+-	snprintf(path, sizeof(path), "%s/dlfilters/dlfilter-test-api-v0.so", perf_path);
++	snprintf(path, sizeof(path), "%s/dlfilters/%s", perf_path, name);
+ 	if (access(path, R_OK)) {
+ 		exec_path = get_argv_exec_path();
+ 		if (!exec_path)
+ 			return -1;
+-		snprintf(path, sizeof(path), "%s/dlfilters/dlfilter-test-api-v0.so", exec_path);
++		snprintf(path, sizeof(path), "%s/dlfilters/%s", exec_path, name);
+ 		free(exec_path);
+ 		if (access(path, R_OK))
+ 			return -1;
+@@ -244,9 +248,9 @@ static int check_filter_desc(struct test_data *td)
+ 	char *desc = NULL;
+ 	int ret;
+ 
+-	if (get_filter_desc(td->dlfilters, "dlfilter-test-api-v0.so", &desc, &long_desc) &&
++	if (get_filter_desc(td->dlfilters, td->name, &desc, &long_desc) &&
+ 	    long_desc && !strcmp(long_desc, "Filter used by the 'dlfilter C API' perf test") &&
+-	    desc && !strcmp(desc, "dlfilter to test v0 C API"))
++	    desc && !strcmp(desc, td->desc))
+ 		ret = 0;
+ 	else
+ 		ret = -1;
+@@ -284,7 +288,7 @@ static int get_ip_addr(struct test_data *td)
+ static int do_run_perf_script(struct test_data *td, int do_early)
+ {
+ 	return system_cmd("%s script -i %s "
+-			  "--dlfilter %s/dlfilter-test-api-v0.so "
++			  "--dlfilter %s/%s "
+ 			  "--dlarg first "
+ 			  "--dlarg %d "
+ 			  "--dlarg %" PRIu64 " "
+@@ -292,7 +296,7 @@ static int do_run_perf_script(struct test_data *td, int do_early)
+ 			  "--dlarg %d "
+ 			  "--dlarg last",
+ 			  td->perf, td->perf_data_file_name, td->dlfilters,
+-			  verbose, td->ip, td->addr, do_early);
++			  td->name, verbose, td->ip, td->addr, do_early);
+ }
+ 
+ static int run_perf_script(struct test_data *td)
+@@ -321,7 +325,7 @@ static int test__dlfilter_test(struct test_data *td)
+ 	u64 id = 99;
+ 	int err;
+ 
+-	if (get_dlfilters_path(td->dlfilters, PATH_MAX))
++	if (get_dlfilters_path(td->name, td->dlfilters, PATH_MAX))
+ 		return test_result("dlfilters not found", TEST_SKIP);
+ 
+ 	if (check_filter_desc(td))
+@@ -399,14 +403,18 @@ static void test_data__free(struct test_data *td)
+ 	}
+ }
+ 
+-static int test__dlfilter(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
++static int test__dlfilter_ver(int ver)
+ {
+ 	struct test_data td = {.fd = -1};
+ 	int pid = getpid();
+ 	int err;
+ 
++	pr_debug("\n-- Testing version %d API --\n", ver);
++
+ 	perf_exe(td.perf, sizeof(td.perf));
+ 
++	snprintf(td.name, sizeof(td.name), "dlfilter-test-api-v%d.so", ver);
++	snprintf(td.desc, sizeof(td.desc), "dlfilter to test v%d C API", ver);
+ 	snprintf(td.perf_data_file_name, PATH_MAX, "/tmp/dlfilter-test-%u-perf-data", pid);
+ 	snprintf(td.c_file_name, PATH_MAX, "/tmp/dlfilter-test-%u-prog.c", pid);
+ 	snprintf(td.prog_file_name, PATH_MAX, "/tmp/dlfilter-test-%u-prog", pid);
+@@ -416,4 +424,14 @@ static int test__dlfilter(struct test_suite *test __maybe_unused, int subtest __
+ 	return err;
+ }
+ 
++static int test__dlfilter(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
++{
++	int err = test__dlfilter_ver(0);
++
++	if (err)
++		return err;
++	/* No test for version 1 */
++	return test__dlfilter_ver(2);
++}
++
+ DEFINE_SUITE("dlfilter C API", dlfilter);
+diff --git a/tools/perf/tests/shell/stat_bpf_counters.sh b/tools/perf/tests/shell/stat_bpf_counters.sh
+index 13473aeba489c..6bf24b85294c7 100755
+--- a/tools/perf/tests/shell/stat_bpf_counters.sh
++++ b/tools/perf/tests/shell/stat_bpf_counters.sh
+@@ -22,10 +22,10 @@ compare_number()
+ }
+ 
+ # skip if --bpf-counters is not supported
+-if ! perf stat --bpf-counters true > /dev/null 2>&1; then
++if ! perf stat -e cycles --bpf-counters true > /dev/null 2>&1; then
+ 	if [ "$1" = "-v" ]; then
+ 		echo "Skipping: --bpf-counters not supported"
+-		perf --no-pager stat --bpf-counters true || true
++		perf --no-pager stat -e cycles --bpf-counters true || true
+ 	fi
+ 	exit 2
+ fi
+diff --git a/tools/perf/tests/shell/stat_bpf_counters_cgrp.sh b/tools/perf/tests/shell/stat_bpf_counters_cgrp.sh
+index d724855d097c2..e75d0780dc788 100755
+--- a/tools/perf/tests/shell/stat_bpf_counters_cgrp.sh
++++ b/tools/perf/tests/shell/stat_bpf_counters_cgrp.sh
+@@ -25,22 +25,22 @@ check_bpf_counter()
+ find_cgroups()
+ {
+ 	# try usual systemd slices first
+-	if [ -d /sys/fs/cgroup/system.slice -a -d /sys/fs/cgroup/user.slice ]; then
++	if [ -d /sys/fs/cgroup/system.slice ] && [ -d /sys/fs/cgroup/user.slice ]; then
+ 		test_cgroups="system.slice,user.slice"
+ 		return
+ 	fi
+ 
+ 	# try root and self cgroups
+-	local self_cgrp=$(grep perf_event /proc/self/cgroup | cut -d: -f3)
+-	if [ -z ${self_cgrp} ]; then
++	find_cgroups_self_cgrp=$(grep perf_event /proc/self/cgroup | cut -d: -f3)
++	if [ -z ${find_cgroups_self_cgrp} ]; then
+ 		# cgroup v2 doesn't specify perf_event
+-		self_cgrp=$(grep ^0: /proc/self/cgroup | cut -d: -f3)
++		find_cgroups_self_cgrp=$(grep ^0: /proc/self/cgroup | cut -d: -f3)
+ 	fi
+ 
+-	if [ -z ${self_cgrp} ]; then
++	if [ -z ${find_cgroups_self_cgrp} ]; then
+ 		test_cgroups="/"
+ 	else
+-		test_cgroups="/,${self_cgrp}"
++		test_cgroups="/,${find_cgroups_self_cgrp}"
+ 	fi
+ }
+ 
+@@ -48,13 +48,11 @@ find_cgroups()
+ # Just check if it runs without failure and has non-zero results.
+ check_system_wide_counted()
+ {
+-	local output
+-
+-	output=$(perf stat -a --bpf-counters --for-each-cgroup ${test_cgroups} -e cpu-clock -x, sleep 1  2>&1)
+-	if echo ${output} | grep -q -F "<not "; then
++	check_system_wide_counted_output=$(perf stat -a --bpf-counters --for-each-cgroup ${test_cgroups} -e cpu-clock -x, sleep 1  2>&1)
++	if echo ${check_system_wide_counted_output} | grep -q -F "<not "; then
+ 		echo "Some system-wide events are not counted"
+ 		if [ "${verbose}" = "1" ]; then
+-			echo ${output}
++			echo ${check_system_wide_counted_output}
+ 		fi
+ 		exit 1
+ 	fi
+@@ -62,13 +60,11 @@ check_system_wide_counted()
+ 
+ check_cpu_list_counted()
+ {
+-	local output
+-
+-	output=$(perf stat -C 1 --bpf-counters --for-each-cgroup ${test_cgroups} -e cpu-clock -x, taskset -c 1 sleep 1  2>&1)
+-	if echo ${output} | grep -q -F "<not "; then
++	check_cpu_list_counted_output=$(perf stat -C 0,1 --bpf-counters --for-each-cgroup ${test_cgroups} -e cpu-clock -x, taskset -c 1 sleep 1  2>&1)
++	if echo ${check_cpu_list_counted_output} | grep -q -F "<not "; then
+ 		echo "Some CPU events are not counted"
+ 		if [ "${verbose}" = "1" ]; then
+-			echo ${output}
++			echo ${check_cpu_list_counted_output}
+ 		fi
+ 		exit 1
+ 	fi
+diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c
+index c7ad9e0030800..70db5a7179056 100644
+--- a/tools/perf/ui/browsers/hists.c
++++ b/tools/perf/ui/browsers/hists.c
+@@ -407,11 +407,6 @@ static bool hist_browser__selection_has_children(struct hist_browser *browser)
+ 	return container_of(ms, struct callchain_list, ms)->has_children;
+ }
+ 
+-static bool hist_browser__he_selection_unfolded(struct hist_browser *browser)
+-{
+-	return browser->he_selection ? browser->he_selection->unfolded : false;
+-}
+-
+ static bool hist_browser__selection_unfolded(struct hist_browser *browser)
+ {
+ 	struct hist_entry *he = browser->he_selection;
+@@ -584,8 +579,8 @@ static int hierarchy_set_folding(struct hist_browser *hb, struct hist_entry *he,
+ 	return n;
+ }
+ 
+-static void __hist_entry__set_folding(struct hist_entry *he,
+-				      struct hist_browser *hb, bool unfold)
++static void hist_entry__set_folding(struct hist_entry *he,
++				    struct hist_browser *hb, bool unfold)
+ {
+ 	hist_entry__init_have_children(he);
+ 	he->unfolded = unfold ? he->has_children : false;
+@@ -603,34 +598,12 @@ static void __hist_entry__set_folding(struct hist_entry *he,
+ 		he->nr_rows = 0;
+ }
+ 
+-static void hist_entry__set_folding(struct hist_entry *he,
+-				    struct hist_browser *browser, bool unfold)
+-{
+-	double percent;
+-
+-	percent = hist_entry__get_percent_limit(he);
+-	if (he->filtered || percent < browser->min_pcnt)
+-		return;
+-
+-	__hist_entry__set_folding(he, browser, unfold);
+-
+-	if (!he->depth || unfold)
+-		browser->nr_hierarchy_entries++;
+-	if (he->leaf)
+-		browser->nr_callchain_rows += he->nr_rows;
+-	else if (unfold && !hist_entry__has_hierarchy_children(he, browser->min_pcnt)) {
+-		browser->nr_hierarchy_entries++;
+-		he->has_no_entry = true;
+-		he->nr_rows = 1;
+-	} else
+-		he->has_no_entry = false;
+-}
+-
+ static void
+ __hist_browser__set_folding(struct hist_browser *browser, bool unfold)
+ {
+ 	struct rb_node *nd;
+ 	struct hist_entry *he;
++	double percent;
+ 
+ 	nd = rb_first_cached(&browser->hists->entries);
+ 	while (nd) {
+@@ -640,6 +613,21 @@ __hist_browser__set_folding(struct hist_browser *browser, bool unfold)
+ 		nd = __rb_hierarchy_next(nd, HMD_FORCE_CHILD);
+ 
+ 		hist_entry__set_folding(he, browser, unfold);
++
++		percent = hist_entry__get_percent_limit(he);
++		if (he->filtered || percent < browser->min_pcnt)
++			continue;
++
++		if (!he->depth || unfold)
++			browser->nr_hierarchy_entries++;
++		if (he->leaf)
++			browser->nr_callchain_rows += he->nr_rows;
++		else if (unfold && !hist_entry__has_hierarchy_children(he, browser->min_pcnt)) {
++			browser->nr_hierarchy_entries++;
++			he->has_no_entry = true;
++			he->nr_rows = 1;
++		} else
++			he->has_no_entry = false;
+ 	}
+ }
+ 
+@@ -659,8 +647,10 @@ static void hist_browser__set_folding_selected(struct hist_browser *browser, boo
+ 	if (!browser->he_selection)
+ 		return;
+ 
+-	hist_entry__set_folding(browser->he_selection, browser, unfold);
+-	browser->b.nr_entries = hist_browser__nr_entries(browser);
++	if (unfold == browser->he_selection->unfolded)
++		return;
++
++	hist_browser__toggle_fold(browser);
+ }
+ 
+ static void ui_browser__warn_lost_events(struct ui_browser *browser)
+@@ -732,8 +722,8 @@ static int hist_browser__handle_hotkey(struct hist_browser *browser, bool warn_l
+ 		hist_browser__set_folding(browser, true);
+ 		break;
+ 	case 'e':
+-		/* Expand the selected entry. */
+-		hist_browser__set_folding_selected(browser, !hist_browser__he_selection_unfolded(browser));
++		/* Toggle expand/collapse the selected entry. */
++		hist_browser__toggle_fold(browser);
+ 		break;
+ 	case 'H':
+ 		browser->show_headers = !browser->show_headers;
+@@ -1779,7 +1769,7 @@ static void hists_browser__hierarchy_headers(struct hist_browser *browser)
+ 	hists_browser__scnprintf_hierarchy_headers(browser, headers,
+ 						   sizeof(headers));
+ 
+-	ui_browser__gotorc(&browser->b, 0, 0);
++	ui_browser__gotorc_title(&browser->b, 0, 0);
+ 	ui_browser__set_color(&browser->b, HE_COLORSET_ROOT);
+ 	ui_browser__write_nstring(&browser->b, headers, browser->b.width + 1);
+ }
+diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c
+index ba988a13dacb6..82956adf99632 100644
+--- a/tools/perf/util/annotate.c
++++ b/tools/perf/util/annotate.c
+@@ -1846,8 +1846,11 @@ static int symbol__disassemble_bpf(struct symbol *sym,
+ 	perf_exe(tpath, sizeof(tpath));
+ 
+ 	bfdf = bfd_openr(tpath, NULL);
+-	assert(bfdf);
+-	assert(bfd_check_format(bfdf, bfd_object));
++	if (bfdf == NULL)
++		abort();
++
++	if (!bfd_check_format(bfdf, bfd_object))
++		abort();
+ 
+ 	s = open_memstream(&buf, &buf_size);
+ 	if (!s) {
+@@ -1895,7 +1898,8 @@ static int symbol__disassemble_bpf(struct symbol *sym,
+ #else
+ 	disassemble = disassembler(bfdf);
+ #endif
+-	assert(disassemble);
++	if (disassemble == NULL)
++		abort();
+ 
+ 	fflush(s);
+ 	do {
+diff --git a/tools/perf/util/bpf-filter.c b/tools/perf/util/bpf-filter.c
+index 0b30688d78a7f..b51544996046d 100644
+--- a/tools/perf/util/bpf-filter.c
++++ b/tools/perf/util/bpf-filter.c
+@@ -9,8 +9,8 @@
+ #include "util/evsel.h"
+ 
+ #include "util/bpf-filter.h"
+-#include "util/bpf-filter-flex.h"
+-#include "util/bpf-filter-bison.h"
++#include <util/bpf-filter-flex.h>
++#include <util/bpf-filter-bison.h>
+ 
+ #include "bpf_skel/sample-filter.h"
+ #include "bpf_skel/sample_filter.skel.h"
+@@ -62,6 +62,16 @@ static int check_sample_flags(struct evsel *evsel, struct perf_bpf_filter_expr *
+ 	if (evsel->core.attr.sample_type & expr->sample_flags)
+ 		return 0;
+ 
++	if (expr->op == PBF_OP_GROUP_BEGIN) {
++		struct perf_bpf_filter_expr *group;
++
++		list_for_each_entry(group, &expr->groups, list) {
++			if (check_sample_flags(evsel, group) < 0)
++				return -1;
++		}
++		return 0;
++	}
++
+ 	info = get_sample_info(expr->sample_flags);
+ 	if (info == NULL) {
+ 		pr_err("Error: %s event does not have sample flags %lx\n",
+diff --git a/tools/perf/util/dlfilter.c b/tools/perf/util/dlfilter.c
+index 46f74b2344dbb..e0f822ebb9b97 100644
+--- a/tools/perf/util/dlfilter.c
++++ b/tools/perf/util/dlfilter.c
+@@ -10,6 +10,8 @@
+ #include <subcmd/exec-cmd.h>
+ #include <linux/zalloc.h>
+ #include <linux/build_bug.h>
++#include <linux/kernel.h>
++#include <linux/string.h>
+ 
+ #include "debug.h"
+ #include "event.h"
+@@ -63,6 +65,7 @@ static void al_to_d_al(struct addr_location *al, struct perf_dlfilter_al *d_al)
+ 	d_al->addr = al->addr;
+ 	d_al->comm = NULL;
+ 	d_al->filtered = 0;
++	d_al->priv = NULL;
+ }
+ 
+ static struct addr_location *get_al(struct dlfilter *d)
+@@ -151,6 +154,11 @@ static char **dlfilter__args(void *ctx, int *dlargc)
+ 	return d->dlargv;
+ }
+ 
++static bool has_priv(struct perf_dlfilter_al *d_al_p)
++{
++	return d_al_p->size >= offsetof(struct perf_dlfilter_al, priv) + sizeof(d_al_p->priv);
++}
++
+ static __s32 dlfilter__resolve_address(void *ctx, __u64 address, struct perf_dlfilter_al *d_al_p)
+ {
+ 	struct dlfilter *d = (struct dlfilter *)ctx;
+@@ -166,6 +174,7 @@ static __s32 dlfilter__resolve_address(void *ctx, __u64 address, struct perf_dlf
+ 	if (!thread)
+ 		return -1;
+ 
++	addr_location__init(&al);
+ 	thread__find_symbol_fb(thread, d->sample->cpumode, address, &al);
+ 
+ 	al_to_d_al(&al, &d_al);
+@@ -176,9 +185,29 @@ static __s32 dlfilter__resolve_address(void *ctx, __u64 address, struct perf_dlf
+ 	memcpy(d_al_p, &d_al, min((size_t)sz, sizeof(d_al)));
+ 	d_al_p->size = sz;
+ 
++	if (has_priv(d_al_p))
++		d_al_p->priv = memdup(&al, sizeof(al));
++
+ 	return 0;
+ }
+ 
++static void dlfilter__al_cleanup(void *ctx __maybe_unused, struct perf_dlfilter_al *d_al_p)
++{
++	struct addr_location *al;
++
++	/* Ensure backward compatibility */
++	if (!has_priv(d_al_p) || !d_al_p->priv)
++		return;
++
++	al = d_al_p->priv;
++
++	d_al_p->priv = NULL;
++
++	addr_location__exit(al);
++
++	free(al);
++}
++
+ static const __u8 *dlfilter__insn(void *ctx, __u32 *len)
+ {
+ 	struct dlfilter *d = (struct dlfilter *)ctx;
+@@ -296,6 +325,7 @@ static const struct perf_dlfilter_fns perf_dlfilter_fns = {
+ 	.resolve_addr    = dlfilter__resolve_addr,
+ 	.args            = dlfilter__args,
+ 	.resolve_address = dlfilter__resolve_address,
++	.al_cleanup      = dlfilter__al_cleanup,
+ 	.insn            = dlfilter__insn,
+ 	.srcline         = dlfilter__srcline,
+ 	.attr            = dlfilter__attr,
+diff --git a/tools/perf/util/expr.c b/tools/perf/util/expr.c
+index 4814262e3805c..7410a165f68b7 100644
+--- a/tools/perf/util/expr.c
++++ b/tools/perf/util/expr.c
+@@ -10,8 +10,8 @@
+ #include "debug.h"
+ #include "evlist.h"
+ #include "expr.h"
+-#include "expr-bison.h"
+-#include "expr-flex.h"
++#include <util/expr-bison.h>
++#include <util/expr-flex.h>
+ #include "util/hashmap.h"
+ #include "smt.h"
+ #include "tsc.h"
+diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
+index 52fbf526fe74a..7af85a479786b 100644
+--- a/tools/perf/util/header.c
++++ b/tools/perf/util/header.c
+@@ -1605,8 +1605,15 @@ static int write_pmu_caps(struct feat_fd *ff,
+ 	int ret;
+ 
+ 	while ((pmu = perf_pmus__scan(pmu))) {
+-		if (!pmu->name || !strcmp(pmu->name, "cpu") ||
+-		    perf_pmu__caps_parse(pmu) <= 0)
++		if (!strcmp(pmu->name, "cpu")) {
++			/*
++			 * The "cpu" PMU is special and covered by
++			 * HEADER_CPU_PMU_CAPS. Note, core PMUs are
++			 * counted/written here for ARM, s390 and Intel hybrid.
++			 */
++			continue;
++		}
++		if (perf_pmu__caps_parse(pmu) <= 0)
+ 			continue;
+ 		nr_pmu++;
+ 	}
+@@ -1619,23 +1626,17 @@ static int write_pmu_caps(struct feat_fd *ff,
+ 		return 0;
+ 
+ 	/*
+-	 * Write hybrid pmu caps first to maintain compatibility with
+-	 * older perf tool.
++	 * Note older perf tools assume core PMUs come first, this is a property
++	 * of perf_pmus__scan.
+ 	 */
+-	if (perf_pmus__num_core_pmus() > 1) {
+-		pmu = NULL;
+-		while ((pmu = perf_pmus__scan_core(pmu))) {
+-			ret = __write_pmu_caps(ff, pmu, true);
+-			if (ret < 0)
+-				return ret;
+-		}
+-	}
+-
+ 	pmu = NULL;
+ 	while ((pmu = perf_pmus__scan(pmu))) {
+-		if (pmu->is_core || !pmu->nr_caps)
++		if (!strcmp(pmu->name, "cpu")) {
++			/* Skip as above. */
++			continue;
++		}
++		if (perf_pmu__caps_parse(pmu) <= 0)
+ 			continue;
+-
+ 		ret = __write_pmu_caps(ff, pmu, true);
+ 		if (ret < 0)
+ 			return ret;
+@@ -4381,7 +4382,8 @@ int perf_event__process_attr(struct perf_tool *tool __maybe_unused,
+ 			     union perf_event *event,
+ 			     struct evlist **pevlist)
+ {
+-	u32 i, ids, n_ids;
++	u32 i, n_ids;
++	u64 *ids;
+ 	struct evsel *evsel;
+ 	struct evlist *evlist = *pevlist;
+ 
+@@ -4397,9 +4399,8 @@ int perf_event__process_attr(struct perf_tool *tool __maybe_unused,
+ 
+ 	evlist__add(evlist, evsel);
+ 
+-	ids = event->header.size;
+-	ids -= (void *)&event->attr.id - (void *)event;
+-	n_ids = ids / sizeof(u64);
++	n_ids = event->header.size - sizeof(event->header) - event->attr.attr.size;
++	n_ids = n_ids / sizeof(u64);
+ 	/*
+ 	 * We don't have the cpu and thread maps on the header, so
+ 	 * for allocating the perf_sample_id table we fake 1 cpu and
+@@ -4408,8 +4409,9 @@ int perf_event__process_attr(struct perf_tool *tool __maybe_unused,
+ 	if (perf_evsel__alloc_id(&evsel->core, 1, n_ids))
+ 		return -ENOMEM;
+ 
++	ids = (void *)&event->attr.attr + event->attr.attr.size;
+ 	for (i = 0; i < n_ids; i++) {
+-		perf_evlist__id_add(&evlist->core, &evsel->core, 0, i, event->attr.id[i]);
++		perf_evlist__id_add(&evlist->core, &evsel->core, 0, i, ids[i]);
+ 	}
+ 
+ 	return 0;
+diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
+index c9ec0cafb69d0..30311844eea7b 100644
+--- a/tools/perf/util/parse-events.c
++++ b/tools/perf/util/parse-events.c
+@@ -18,8 +18,8 @@
+ #include "debug.h"
+ #include <api/fs/tracing_path.h>
+ #include <perf/cpumap.h>
+-#include "parse-events-bison.h"
+-#include "parse-events-flex.h"
++#include <util/parse-events-bison.h>
++#include <util/parse-events-flex.h>
+ #include "pmu.h"
+ #include "pmus.h"
+ #include "asm/bug.h"
+diff --git a/tools/perf/util/parse-events.y b/tools/perf/util/parse-events.y
+index 9f28d4b5502f1..c590cf7f02a45 100644
+--- a/tools/perf/util/parse-events.y
++++ b/tools/perf/util/parse-events.y
+@@ -28,6 +28,13 @@ do { \
+ 		YYABORT; \
+ } while (0)
+ 
++#define PE_ABORT(val) \
++do { \
++	if (val == -ENOMEM) \
++		YYNOMEM; \
++	YYABORT; \
++} while (0)
++
+ static struct list_head* alloc_list(void)
+ {
+ 	struct list_head *list;
+@@ -285,37 +292,38 @@ event_pmu:
+ PE_NAME opt_pmu_config
+ {
+ 	struct parse_events_state *parse_state = _parse_state;
+-	struct parse_events_error *error = parse_state->error;
+ 	struct list_head *list = NULL, *orig_terms = NULL, *terms= NULL;
+ 	char *pattern = NULL;
+ 
+-#define CLEANUP_YYABORT					\
++#define CLEANUP						\
+ 	do {						\
+ 		parse_events_terms__delete($2);		\
+ 		parse_events_terms__delete(orig_terms);	\
+ 		free(list);				\
+ 		free($1);				\
+ 		free(pattern);				\
+-		YYABORT;				\
+ 	} while(0)
+ 
+-	if (parse_events_copy_term_list($2, &orig_terms))
+-		CLEANUP_YYABORT;
+-
+-	if (error)
+-		error->idx = @1.first_column;
++	if (parse_events_copy_term_list($2, &orig_terms)) {
++		CLEANUP;
++		YYNOMEM;
++	}
+ 
+ 	list = alloc_list();
+-	if (!list)
+-		CLEANUP_YYABORT;
++	if (!list) {
++		CLEANUP;
++		YYNOMEM;
++	}
+ 	/* Attempt to add to list assuming $1 is a PMU name. */
+ 	if (parse_events_add_pmu(parse_state, list, $1, $2, /*auto_merge_stats=*/false)) {
+ 		struct perf_pmu *pmu = NULL;
+ 		int ok = 0;
+ 
+ 		/* Failure to add, try wildcard expansion of $1 as a PMU name. */
+-		if (asprintf(&pattern, "%s*", $1) < 0)
+-			CLEANUP_YYABORT;
++		if (asprintf(&pattern, "%s*", $1) < 0) {
++			CLEANUP;
++			YYNOMEM;
++		}
+ 
+ 		while ((pmu = perf_pmus__scan(pmu)) != NULL) {
+ 			char *name = pmu->name;
+@@ -330,8 +338,10 @@ PE_NAME opt_pmu_config
+ 			    !perf_pmu__match(pattern, pmu->alias_name, $1)) {
+ 				bool auto_merge_stats = perf_pmu__auto_merge_stats(pmu);
+ 
+-				if (parse_events_copy_term_list(orig_terms, &terms))
+-					CLEANUP_YYABORT;
++				if (parse_events_copy_term_list(orig_terms, &terms)) {
++					CLEANUP;
++					YYNOMEM;
++				}
+ 				if (!parse_events_add_pmu(parse_state, list, pmu->name, terms,
+ 							  auto_merge_stats)) {
+ 					ok++;
+@@ -347,15 +357,23 @@ PE_NAME opt_pmu_config
+ 			ok = !parse_events_multi_pmu_add(parse_state, $1, $2, &list);
+ 			$2 = NULL;
+ 		}
+-		if (!ok)
+-			CLEANUP_YYABORT;
++		if (!ok) {
++			struct parse_events_error *error = parse_state->error;
++			char *help;
++
++			if (asprintf(&help, "Unabled to find PMU or event on a PMU of '%s'", $1) < 0)
++				help = NULL;
++			parse_events_error__handle(error, @1.first_column,
++						   strdup("Bad event or PMU"),
++						   help);
++			CLEANUP;
++			YYABORT;
++		}
+ 	}
+-	parse_events_terms__delete($2);
+-	parse_events_terms__delete(orig_terms);
+-	free(pattern);
+-	free($1);
+ 	$$ = list;
+-#undef CLEANUP_YYABORT
++	list = NULL;
++	CLEANUP;
++#undef CLEANUP
+ }
+ |
+ PE_KERNEL_PMU_EVENT sep_dc
+@@ -376,9 +394,18 @@ PE_NAME sep_dc
+ 	int err;
+ 
+ 	err = parse_events_multi_pmu_add(_parse_state, $1, NULL, &list);
++	if (err < 0) {
++		struct parse_events_state *parse_state = _parse_state;
++		struct parse_events_error *error = parse_state->error;
++		char *help;
++
++		if (asprintf(&help, "Unabled to find PMU or event on a PMU of '%s'", $1) < 0)
++			help = NULL;
++		parse_events_error__handle(error, @1.first_column, strdup("Bad event name"), help);
++		free($1);
++		PE_ABORT(err);
++	}
+ 	free($1);
+-	if (err < 0)
+-		YYABORT;
+ 	$$ = list;
+ }
+ |
+@@ -448,12 +475,13 @@ value_sym '/' event_config '/'
+ 	bool wildcard = (type == PERF_TYPE_HARDWARE || type == PERF_TYPE_HW_CACHE);
+ 
+ 	list = alloc_list();
+-	ABORT_ON(!list);
++	if (!list)
++		YYNOMEM;
+ 	err = parse_events_add_numeric(_parse_state, list, type, config, $3, wildcard);
+ 	parse_events_terms__delete($3);
+ 	if (err) {
+ 		free_list_evsel(list);
+-		YYABORT;
++		PE_ABORT(err);
+ 	}
+ 	$$ = list;
+ }
+@@ -464,21 +492,28 @@ value_sym sep_slash_slash_dc
+ 	int type = $1 >> 16;
+ 	int config = $1 & 255;
+ 	bool wildcard = (type == PERF_TYPE_HARDWARE || type == PERF_TYPE_HW_CACHE);
++	int err;
+ 
+ 	list = alloc_list();
+-	ABORT_ON(!list);
+-	ABORT_ON(parse_events_add_numeric(_parse_state, list, type, config,
+-					  /*head_config=*/NULL, wildcard));
++	if (!list)
++		YYNOMEM;
++	err = parse_events_add_numeric(_parse_state, list, type, config, /*head_config=*/NULL, wildcard);
++	if (err)
++		PE_ABORT(err);
+ 	$$ = list;
+ }
+ |
+ PE_VALUE_SYM_TOOL sep_slash_slash_dc
+ {
+ 	struct list_head *list;
++	int err;
+ 
+ 	list = alloc_list();
+-	ABORT_ON(!list);
+-	ABORT_ON(parse_events_add_tool(_parse_state, list, $1));
++	if (!list)
++		YYNOMEM;
++	err = parse_events_add_tool(_parse_state, list, $1);
++	if (err)
++		YYNOMEM;
+ 	$$ = list;
+ }
+ 
+@@ -490,14 +525,16 @@ PE_LEGACY_CACHE opt_event_config
+ 	int err;
+ 
+ 	list = alloc_list();
+-	ABORT_ON(!list);
++	if (!list)
++		YYNOMEM;
++
+ 	err = parse_events_add_cache(list, &parse_state->idx, $1, parse_state, $2);
+ 
+ 	parse_events_terms__delete($2);
+ 	free($1);
+ 	if (err) {
+ 		free_list_evsel(list);
+-		YYABORT;
++		PE_ABORT(err);
+ 	}
+ 	$$ = list;
+ }
+@@ -509,14 +546,16 @@ PE_PREFIX_MEM PE_VALUE PE_BP_SLASH PE_VALUE PE_BP_COLON PE_MODIFIER_BP opt_event
+ 	int err;
+ 
+ 	list = alloc_list();
+-	ABORT_ON(!list);
++	if (!list)
++		YYNOMEM;
++
+ 	err = parse_events_add_breakpoint(_parse_state, list,
+ 					  $2, $6, $4, $7);
+ 	parse_events_terms__delete($7);
+ 	free($6);
+ 	if (err) {
+ 		free(list);
+-		YYABORT;
++		PE_ABORT(err);
+ 	}
+ 	$$ = list;
+ }
+@@ -527,13 +566,15 @@ PE_PREFIX_MEM PE_VALUE PE_BP_SLASH PE_VALUE opt_event_config
+ 	int err;
+ 
+ 	list = alloc_list();
+-	ABORT_ON(!list);
++	if (!list)
++		YYNOMEM;
++
+ 	err = parse_events_add_breakpoint(_parse_state, list,
+ 					  $2, NULL, $4, $5);
+ 	parse_events_terms__delete($5);
+ 	if (err) {
+ 		free(list);
+-		YYABORT;
++		PE_ABORT(err);
+ 	}
+ 	$$ = list;
+ }
+@@ -544,14 +585,16 @@ PE_PREFIX_MEM PE_VALUE PE_BP_COLON PE_MODIFIER_BP opt_event_config
+ 	int err;
+ 
+ 	list = alloc_list();
+-	ABORT_ON(!list);
++	if (!list)
++		YYNOMEM;
++
+ 	err = parse_events_add_breakpoint(_parse_state, list,
+ 					  $2, $4, 0, $5);
+ 	parse_events_terms__delete($5);
+ 	free($4);
+ 	if (err) {
+ 		free(list);
+-		YYABORT;
++		PE_ABORT(err);
+ 	}
+ 	$$ = list;
+ }
+@@ -562,13 +605,14 @@ PE_PREFIX_MEM PE_VALUE opt_event_config
+ 	int err;
+ 
+ 	list = alloc_list();
+-	ABORT_ON(!list);
++	if (!list)
++		YYNOMEM;
+ 	err = parse_events_add_breakpoint(_parse_state, list,
+ 					  $2, NULL, 0, $3);
+ 	parse_events_terms__delete($3);
+ 	if (err) {
+ 		free(list);
+-		YYABORT;
++		PE_ABORT(err);
+ 	}
+ 	$$ = list;
+ }
+@@ -582,7 +626,8 @@ tracepoint_name opt_event_config
+ 	int err;
+ 
+ 	list = alloc_list();
+-	ABORT_ON(!list);
++	if (!list)
++		YYNOMEM;
+ 	if (error)
+ 		error->idx = @1.first_column;
+ 
+@@ -594,7 +639,7 @@ tracepoint_name opt_event_config
+ 	free($1.event);
+ 	if (err) {
+ 		free(list);
+-		YYABORT;
++		PE_ABORT(err);
+ 	}
+ 	$$ = list;
+ }
+@@ -614,13 +659,14 @@ PE_VALUE ':' PE_VALUE opt_event_config
+ 	int err;
+ 
+ 	list = alloc_list();
+-	ABORT_ON(!list);
++	if (!list)
++		YYNOMEM;
+ 	err = parse_events_add_numeric(_parse_state, list, (u32)$1, $3, $4,
+ 				       /*wildcard=*/false);
+ 	parse_events_terms__delete($4);
+ 	if (err) {
+ 		free(list);
+-		YYABORT;
++		PE_ABORT(err);
+ 	}
+ 	$$ = list;
+ }
+@@ -633,7 +679,8 @@ PE_RAW opt_event_config
+ 	u64 num;
+ 
+ 	list = alloc_list();
+-	ABORT_ON(!list);
++	if (!list)
++		YYNOMEM;
+ 	errno = 0;
+ 	num = strtoull($1 + 1, NULL, 16);
+ 	ABORT_ON(errno);
+@@ -643,7 +690,7 @@ PE_RAW opt_event_config
+ 	parse_events_terms__delete($2);
+ 	if (err) {
+ 		free(list);
+-		YYABORT;
++		PE_ABORT(err);
+ 	}
+ 	$$ = list;
+ }
+@@ -656,13 +703,14 @@ PE_BPF_OBJECT opt_event_config
+ 	int err;
+ 
+ 	list = alloc_list();
+-	ABORT_ON(!list);
++	if (!list)
++		YYNOMEM;
+ 	err = parse_events_load_bpf(parse_state, list, $1, false, $2);
+ 	parse_events_terms__delete($2);
+ 	free($1);
+ 	if (err) {
+ 		free(list);
+-		YYABORT;
++		PE_ABORT(err);
+ 	}
+ 	$$ = list;
+ }
+@@ -673,12 +721,13 @@ PE_BPF_SOURCE opt_event_config
+ 	int err;
+ 
+ 	list = alloc_list();
+-	ABORT_ON(!list);
++	if (!list)
++		YYNOMEM;
+ 	err = parse_events_load_bpf(_parse_state, list, $1, true, $2);
+ 	parse_events_terms__delete($2);
+ 	if (err) {
+ 		free(list);
+-		YYABORT;
++		PE_ABORT(err);
+ 	}
+ 	$$ = list;
+ }
+@@ -738,7 +787,8 @@ event_term
+ 	struct list_head *head = malloc(sizeof(*head));
+ 	struct parse_events_term *term = $1;
+ 
+-	ABORT_ON(!head);
++	if (!head)
++		YYNOMEM;
+ 	INIT_LIST_HEAD(head);
+ 	list_add_tail(&term->list, head);
+ 	$$ = head;
+@@ -752,11 +802,12 @@ event_term:
+ PE_RAW
+ {
+ 	struct parse_events_term *term;
++	int err = parse_events_term__str(&term, PARSE_EVENTS__TERM_TYPE_RAW,
++					 strdup("raw"), $1, &@1, &@1);
+ 
+-	if (parse_events_term__str(&term, PARSE_EVENTS__TERM_TYPE_RAW,
+-					strdup("raw"), $1, &@1, &@1)) {
++	if (err) {
+ 		free($1);
+-		YYABORT;
++		PE_ABORT(err);
+ 	}
+ 	$$ = term;
+ }
+@@ -764,12 +815,12 @@ PE_RAW
+ name_or_raw '=' name_or_legacy
+ {
+ 	struct parse_events_term *term;
++	int err = parse_events_term__str(&term, PARSE_EVENTS__TERM_TYPE_USER, $1, $3, &@1, &@3);
+ 
+-	if (parse_events_term__str(&term, PARSE_EVENTS__TERM_TYPE_USER,
+-					$1, $3, &@1, &@3)) {
++	if (err) {
+ 		free($1);
+ 		free($3);
+-		YYABORT;
++		PE_ABORT(err);
+ 	}
+ 	$$ = term;
+ }
+@@ -777,11 +828,12 @@ name_or_raw '=' name_or_legacy
+ name_or_raw '=' PE_VALUE
+ {
+ 	struct parse_events_term *term;
++	int err = parse_events_term__num(&term, PARSE_EVENTS__TERM_TYPE_USER,
++					 $1, $3, false, &@1, &@3);
+ 
+-	if (parse_events_term__num(&term, PARSE_EVENTS__TERM_TYPE_USER,
+-					$1, $3, false, &@1, &@3)) {
++	if (err) {
+ 		free($1);
+-		YYABORT;
++		PE_ABORT(err);
+ 	}
+ 	$$ = term;
+ }
+@@ -789,12 +841,13 @@ name_or_raw '=' PE_VALUE
+ name_or_raw '=' PE_TERM_HW
+ {
+ 	struct parse_events_term *term;
++	int err = parse_events_term__str(&term, PARSE_EVENTS__TERM_TYPE_USER,
++					 $1, $3.str, &@1, &@3);
+ 
+-	if (parse_events_term__str(&term, PARSE_EVENTS__TERM_TYPE_USER,
+-					$1, $3.str, &@1, &@3)) {
++	if (err) {
+ 		free($1);
+ 		free($3.str);
+-		YYABORT;
++		PE_ABORT(err);
+ 	}
+ 	$$ = term;
+ }
+@@ -802,11 +855,12 @@ name_or_raw '=' PE_TERM_HW
+ PE_LEGACY_CACHE
+ {
+ 	struct parse_events_term *term;
++	int err = parse_events_term__num(&term, PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE,
++					 $1, 1, true, &@1, NULL);
+ 
+-	if (parse_events_term__num(&term, PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE,
+-					$1, 1, true, &@1, NULL)) {
++	if (err) {
+ 		free($1);
+-		YYABORT;
++		PE_ABORT(err);
+ 	}
+ 	$$ = term;
+ }
+@@ -814,11 +868,12 @@ PE_LEGACY_CACHE
+ PE_NAME
+ {
+ 	struct parse_events_term *term;
++	int err = parse_events_term__num(&term, PARSE_EVENTS__TERM_TYPE_USER,
++					 $1, 1, true, &@1, NULL);
+ 
+-	if (parse_events_term__num(&term, PARSE_EVENTS__TERM_TYPE_USER,
+-					$1, 1, true, &@1, NULL)) {
++	if (err) {
+ 		free($1);
+-		YYABORT;
++		PE_ABORT(err);
+ 	}
+ 	$$ = term;
+ }
+@@ -826,11 +881,12 @@ PE_NAME
+ PE_TERM_HW
+ {
+ 	struct parse_events_term *term;
++	int err = parse_events_term__num(&term, PARSE_EVENTS__TERM_TYPE_HARDWARE,
++					 $1.str, $1.num & 255, false, &@1, NULL);
+ 
+-	if (parse_events_term__num(&term, PARSE_EVENTS__TERM_TYPE_HARDWARE,
+-				   $1.str, $1.num & 255, false, &@1, NULL)) {
++	if (err) {
+ 		free($1.str);
+-		YYABORT;
++		PE_ABORT(err);
+ 	}
+ 	$$ = term;
+ }
+@@ -838,10 +894,11 @@ PE_TERM_HW
+ PE_TERM '=' name_or_legacy
+ {
+ 	struct parse_events_term *term;
++	int err = parse_events_term__str(&term, (int)$1, NULL, $3, &@1, &@3);
+ 
+-	if (parse_events_term__str(&term, (int)$1, NULL, $3, &@1, &@3)) {
++	if (err) {
+ 		free($3);
+-		YYABORT;
++		PE_ABORT(err);
+ 	}
+ 	$$ = term;
+ }
+@@ -849,10 +906,11 @@ PE_TERM '=' name_or_legacy
+ PE_TERM '=' PE_TERM_HW
+ {
+ 	struct parse_events_term *term;
++	int err = parse_events_term__str(&term, (int)$1, NULL, $3.str, &@1, &@3);
+ 
+-	if (parse_events_term__str(&term, (int)$1, NULL, $3.str, &@1, &@3)) {
++	if (err) {
+ 		free($3.str);
+-		YYABORT;
++		PE_ABORT(err);
+ 	}
+ 	$$ = term;
+ }
+@@ -860,37 +918,46 @@ PE_TERM '=' PE_TERM_HW
+ PE_TERM '=' PE_TERM
+ {
+ 	struct parse_events_term *term;
++	int err = parse_events_term__term(&term, (int)$1, (int)$3, &@1, &@3);
++
++	if (err)
++		PE_ABORT(err);
+ 
+-	ABORT_ON(parse_events_term__term(&term, (int)$1, (int)$3, &@1, &@3));
+ 	$$ = term;
+ }
+ |
+ PE_TERM '=' PE_VALUE
+ {
+ 	struct parse_events_term *term;
++	int err = parse_events_term__num(&term, (int)$1, NULL, $3, false, &@1, &@3);
++
++	if (err)
++		PE_ABORT(err);
+ 
+-	ABORT_ON(parse_events_term__num(&term, (int)$1, NULL, $3, false, &@1, &@3));
+ 	$$ = term;
+ }
+ |
+ PE_TERM
+ {
+ 	struct parse_events_term *term;
++	int err = parse_events_term__num(&term, (int)$1, NULL, 1, true, &@1, NULL);
++
++	if (err)
++		PE_ABORT(err);
+ 
+-	ABORT_ON(parse_events_term__num(&term, (int)$1, NULL, 1, true, &@1, NULL));
+ 	$$ = term;
+ }
+ |
+ name_or_raw array '=' name_or_legacy
+ {
+ 	struct parse_events_term *term;
++	int err = parse_events_term__str(&term, PARSE_EVENTS__TERM_TYPE_USER, $1, $4, &@1, &@4);
+ 
+-	if (parse_events_term__str(&term, PARSE_EVENTS__TERM_TYPE_USER,
+-					$1, $4, &@1, &@4)) {
++	if (err) {
+ 		free($1);
+ 		free($4);
+ 		free($2.ranges);
+-		YYABORT;
++		PE_ABORT(err);
+ 	}
+ 	term->array = $2;
+ 	$$ = term;
+@@ -899,12 +966,12 @@ name_or_raw array '=' name_or_legacy
+ name_or_raw array '=' PE_VALUE
+ {
+ 	struct parse_events_term *term;
++	int err = parse_events_term__num(&term, PARSE_EVENTS__TERM_TYPE_USER, $1, $4, false, &@1, &@4);
+ 
+-	if (parse_events_term__num(&term, PARSE_EVENTS__TERM_TYPE_USER,
+-					$1, $4, false, &@1, &@4)) {
++	if (err) {
+ 		free($1);
+ 		free($2.ranges);
+-		YYABORT;
++		PE_ABORT(err);
+ 	}
+ 	term->array = $2;
+ 	$$ = term;
+@@ -914,13 +981,15 @@ PE_DRV_CFG_TERM
+ {
+ 	struct parse_events_term *term;
+ 	char *config = strdup($1);
++	int err;
+ 
+-	ABORT_ON(!config);
+-	if (parse_events_term__str(&term, PARSE_EVENTS__TERM_TYPE_DRV_CFG,
+-					config, $1, &@1, NULL)) {
++	if (!config)
++		YYNOMEM;
++	err = parse_events_term__str(&term, PARSE_EVENTS__TERM_TYPE_DRV_CFG, config, $1, &@1, NULL);
++	if (err) {
+ 		free($1);
+ 		free(config);
+-		YYABORT;
++		PE_ABORT(err);
+ 	}
+ 	$$ = term;
+ }
+@@ -946,7 +1015,8 @@ array_terms ',' array_term
+ 	new_array.ranges = realloc($1.ranges,
+ 				sizeof(new_array.ranges[0]) *
+ 				new_array.nr_ranges);
+-	ABORT_ON(!new_array.ranges);
++	if (!new_array.ranges)
++		YYNOMEM;
+ 	memcpy(&new_array.ranges[$1.nr_ranges], $3.ranges,
+ 	       $3.nr_ranges * sizeof(new_array.ranges[0]));
+ 	free($3.ranges);
+@@ -962,7 +1032,8 @@ PE_VALUE
+ 
+ 	array.nr_ranges = 1;
+ 	array.ranges = malloc(sizeof(array.ranges[0]));
+-	ABORT_ON(!array.ranges);
++	if (!array.ranges)
++		YYNOMEM;
+ 	array.ranges[0].start = $1;
+ 	array.ranges[0].length = 1;
+ 	$$ = array;
+@@ -975,7 +1046,8 @@ PE_VALUE PE_ARRAY_RANGE PE_VALUE
+ 	ABORT_ON($3 < $1);
+ 	array.nr_ranges = 1;
+ 	array.ranges = malloc(sizeof(array.ranges[0]));
+-	ABORT_ON(!array.ranges);
++	if (!array.ranges)
++		YYNOMEM;
+ 	array.ranges[0].start = $1;
+ 	array.ranges[0].length = $3 - $1 + 1;
+ 	$$ = array;
+diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c
+index 28380e7aa8d01..d5406effc1695 100644
+--- a/tools/perf/util/pmu.c
++++ b/tools/perf/util/pmu.c
+@@ -19,8 +19,8 @@
+ #include "evsel.h"
+ #include "pmu.h"
+ #include "pmus.h"
+-#include "pmu-bison.h"
+-#include "pmu-flex.h"
++#include <util/pmu-bison.h>
++#include <util/pmu-flex.h>
+ #include "parse-events.h"
+ #include "print-events.h"
+ #include "header.h"
+diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_obj_pinning.c b/tools/testing/selftests/bpf/prog_tests/bpf_obj_pinning.c
+index 31f1e815f6719..ee0458a5ce789 100644
+--- a/tools/testing/selftests/bpf/prog_tests/bpf_obj_pinning.c
++++ b/tools/testing/selftests/bpf/prog_tests/bpf_obj_pinning.c
+@@ -8,6 +8,7 @@
+ #include <linux/unistd.h>
+ #include <linux/mount.h>
+ #include <sys/syscall.h>
++#include "bpf/libbpf_internal.h"
+ 
+ static inline int sys_fsopen(const char *fsname, unsigned flags)
+ {
+@@ -155,7 +156,7 @@ static void validate_pin(int map_fd, const char *map_name, int src_value,
+ 	ASSERT_OK(err, "obj_pin");
+ 
+ 	/* cleanup */
+-	if (pin_opts.path_fd >= 0)
++	if (path_kind == PATH_FD_REL && pin_opts.path_fd >= 0)
+ 		close(pin_opts.path_fd);
+ 	if (old_cwd[0])
+ 		ASSERT_OK(chdir(old_cwd), "restore_cwd");
+@@ -220,7 +221,7 @@ static void validate_get(int map_fd, const char *map_name, int src_value,
+ 		goto cleanup;
+ 
+ 	/* cleanup */
+-	if (get_opts.path_fd >= 0)
++	if (path_kind == PATH_FD_REL && get_opts.path_fd >= 0)
+ 		close(get_opts.path_fd);
+ 	if (old_cwd[0])
+ 		ASSERT_OK(chdir(old_cwd), "restore_cwd");
+diff --git a/tools/testing/selftests/bpf/prog_tests/sockmap_helpers.h b/tools/testing/selftests/bpf/prog_tests/sockmap_helpers.h
+index d12665490a905..36d829a65aa44 100644
+--- a/tools/testing/selftests/bpf/prog_tests/sockmap_helpers.h
++++ b/tools/testing/selftests/bpf/prog_tests/sockmap_helpers.h
+@@ -179,6 +179,32 @@
+ 		__ret;                                                         \
+ 	})
+ 
++static inline int poll_connect(int fd, unsigned int timeout_sec)
++{
++	struct timeval timeout = { .tv_sec = timeout_sec };
++	fd_set wfds;
++	int r, eval;
++	socklen_t esize = sizeof(eval);
++
++	FD_ZERO(&wfds);
++	FD_SET(fd, &wfds);
++
++	r = select(fd + 1, NULL, &wfds, NULL, &timeout);
++	if (r == 0)
++		errno = ETIME;
++	if (r != 1)
++		return -1;
++
++	if (getsockopt(fd, SOL_SOCKET, SO_ERROR, &eval, &esize) < 0)
++		return -1;
++	if (eval != 0) {
++		errno = eval;
++		return -1;
++	}
++
++	return 0;
++}
++
+ static inline int poll_read(int fd, unsigned int timeout_sec)
+ {
+ 	struct timeval timeout = { .tv_sec = timeout_sec };
+diff --git a/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c b/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c
+index 5674a9d0cacf0..8df8cbb447f10 100644
+--- a/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c
++++ b/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c
+@@ -1452,11 +1452,18 @@ static int vsock_socketpair_connectible(int sotype, int *v0, int *v1)
+ 	if (p < 0)
+ 		goto close_cli;
+ 
++	if (poll_connect(c, IO_TIMEOUT_SEC) < 0) {
++		FAIL_ERRNO("poll_connect");
++		goto close_acc;
++	}
++
+ 	*v0 = p;
+ 	*v1 = c;
+ 
+ 	return 0;
+ 
++close_acc:
++	close(p);
+ close_cli:
+ 	close(c);
+ close_srv:
+diff --git a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-synthetic-event-dynstring.tc b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-synthetic-event-dynstring.tc
+index 213d890ed1886..174376ddbc6c7 100644
+--- a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-synthetic-event-dynstring.tc
++++ b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-synthetic-event-dynstring.tc
+@@ -1,7 +1,7 @@
+ #!/bin/sh
+ # SPDX-License-Identifier: GPL-2.0
+ # description: event trigger - test inter-event histogram trigger trace action with dynamic string param
+-# requires: set_event synthetic_events events/sched/sched_process_exec/hist "char name[]' >> synthetic_events":README ping:program
++# requires: set_event synthetic_events events/sched/sched_process_exec/hist "' >> synthetic_events":README ping:program
+ 
+ fail() { #msg
+     echo $1
+diff --git a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-synthetic_event_syntax_errors.tc b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-synthetic_event_syntax_errors.tc
+index 955e3ceea44b5..b927ee54c02da 100644
+--- a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-synthetic_event_syntax_errors.tc
++++ b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-synthetic_event_syntax_errors.tc
+@@ -1,7 +1,7 @@
+ #!/bin/sh
+ # SPDX-License-Identifier: GPL-2.0
+ # description: event trigger - test synthetic_events syntax parser errors
+-# requires: synthetic_events error_log "char name[]' >> synthetic_events":README
++# requires: synthetic_events error_log "' >> synthetic_events":README
+ 
+ check_error() { # command-with-error-pos-by-^
+     ftrace_errlog_check 'synthetic_events' "$1" 'synthetic_events'
+diff --git a/tools/testing/selftests/kselftest/runner.sh b/tools/testing/selftests/kselftest/runner.sh
+index 1c952d1401d46..70e0a465e30da 100644
+--- a/tools/testing/selftests/kselftest/runner.sh
++++ b/tools/testing/selftests/kselftest/runner.sh
+@@ -36,7 +36,8 @@ tap_timeout()
+ {
+ 	# Make sure tests will time out if utility is available.
+ 	if [ -x /usr/bin/timeout ] ; then
+-		/usr/bin/timeout --foreground "$kselftest_timeout" $1
++		/usr/bin/timeout --foreground "$kselftest_timeout" \
++			/usr/bin/timeout "$kselftest_timeout" $1
+ 	else
+ 		$1
+ 	fi
+diff --git a/tools/testing/selftests/lib.mk b/tools/testing/selftests/lib.mk
+index d17854285f2b6..118e0964bda94 100644
+--- a/tools/testing/selftests/lib.mk
++++ b/tools/testing/selftests/lib.mk
+@@ -106,7 +106,7 @@ endef
+ run_tests: all
+ ifdef building_out_of_srctree
+ 	@if [ "X$(TEST_PROGS)$(TEST_PROGS_EXTENDED)$(TEST_FILES)" != "X" ]; then \
+-		rsync -aLq $(TEST_PROGS) $(TEST_PROGS_EXTENDED) $(TEST_FILES) $(OUTPUT); \
++		rsync -aq --copy-unsafe-links $(TEST_PROGS) $(TEST_PROGS_EXTENDED) $(TEST_FILES) $(OUTPUT); \
+ 	fi
+ 	@if [ "X$(TEST_PROGS)" != "X" ]; then \
+ 		$(call RUN_TESTS, $(TEST_GEN_PROGS) $(TEST_CUSTOM_PROGS) \
+@@ -120,7 +120,7 @@ endif
+ 
+ define INSTALL_SINGLE_RULE
+ 	$(if $(INSTALL_LIST),@mkdir -p $(INSTALL_PATH))
+-	$(if $(INSTALL_LIST),rsync -aL $(INSTALL_LIST) $(INSTALL_PATH)/)
++	$(if $(INSTALL_LIST),rsync -a --copy-unsafe-links $(INSTALL_LIST) $(INSTALL_PATH)/)
+ endef
+ 
+ define INSTALL_RULE
+diff --git a/tools/testing/selftests/net/bind_wildcard.c b/tools/testing/selftests/net/bind_wildcard.c
+index 58edfc15d28bd..e7ebe72e879d7 100644
+--- a/tools/testing/selftests/net/bind_wildcard.c
++++ b/tools/testing/selftests/net/bind_wildcard.c
+@@ -100,7 +100,7 @@ void bind_sockets(struct __test_metadata *_metadata,
+ TEST_F(bind_wildcard, v4_v6)
+ {
+ 	bind_sockets(_metadata, self,
+-		     (struct sockaddr *)&self->addr4, sizeof(self->addr6),
++		     (struct sockaddr *)&self->addr4, sizeof(self->addr4),
+ 		     (struct sockaddr *)&self->addr6, sizeof(self->addr6));
+ }
+ 


^ permalink raw reply related	[flat|nested] 24+ messages in thread
* [gentoo-commits] proj/linux-patches:6.5 commit in: /
@ 2023-09-15 17:55 Mike Pagano
  0 siblings, 0 replies; 24+ messages in thread
From: Mike Pagano @ 2023-09-15 17:55 UTC (permalink / raw
  To: gentoo-commits

commit:     0dd1f2b7dd1a6ae2aa7ae62e410e833431f14e39
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Fri Sep 15 17:55:05 2023 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Fri Sep 15 17:55:05 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=0dd1f2b7

selinux: fix handling of empty opts in selinux_fs_context_submount()

Bug: https://bugs.gentoo.org/914204

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README                                   |  4 +++
 1515_selinux-fix-handling-of-empty-opts.patch | 51 +++++++++++++++++++++++++++
 2 files changed, 55 insertions(+)

diff --git a/0000_README b/0000_README
index 25625324..270cfbdf 100644
--- a/0000_README
+++ b/0000_README
@@ -63,6 +63,10 @@ Patch:  1510_fs-enable-link-security-restrictions-by-default.patch
 From:   http://sources.debian.net/src/linux/3.16.7-ckt4-3/debian/patches/debian/fs-enable-link-security-restrictions-by-default.patch/
 Desc:   Enable link security restrictions by default.
 
+Patch:  1515_selinux-fix-handling-of-empty-opts.patch
+From:   https://www.spinics.net/lists/linux-fsdevel/msg249428.html
+Desc:   selinux: fix handling of empty opts in selinux_fs_context_submount()
+
 Patch:  1700_sparc-address-warray-bound-warnings.patch
 From:		https://github.com/KSPP/linux/issues/109
 Desc:		Address -Warray-bounds warnings 

diff --git a/1515_selinux-fix-handling-of-empty-opts.patch b/1515_selinux-fix-handling-of-empty-opts.patch
new file mode 100644
index 00000000..10336ec5
--- /dev/null
+++ b/1515_selinux-fix-handling-of-empty-opts.patch
@@ -0,0 +1,51 @@
+selinux: fix handling of empty opts in selinux_fs_context_submount()
+
+selinux_set_mnt_opts() relies on the fact that the mount options pointer
+is always NULL when all options are unset (specifically in its
+!selinux_initialized() branch. However, the new
+selinux_fs_context_submount() hook breaks this rule by allocating a new
+structure even if no options are set. That causes any submount created
+before a SELinux policy is loaded to be rejected in
+selinux_set_mnt_opts().
+
+Fix this by making selinux_fs_context_submount() leave fc->security
+set to NULL when there are no options to be copied from the reference
+superblock.
+
+Reported-by: Adam Williamson <awilliam@xxxxxxxxxx>
+Link: https://bugzilla.redhat.com/show_bug.cgi?id=2236345
+Fixes: d80a8f1b58c2 ("vfs, security: Fix automount superblock LSM init problem, preventing NFS sb sharing")
+Signed-off-by: Ondrej Mosnacek <omosnace@xxxxxxxxxx>
+---
+ security/selinux/hooks.c | 10 ++++++++--
+ 1 file changed, 8 insertions(+), 2 deletions(-)
+
+diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
+index 10350534de6d6..2aa0e219d7217 100644
+--- a/security/selinux/hooks.c
++++ b/security/selinux/hooks.c
+@@ -2775,14 +2775,20 @@ static int selinux_umount(struct vfsmount *mnt, int flags)
+ static int selinux_fs_context_submount(struct fs_context *fc,
+ 				   struct super_block *reference)
+ {
+-	const struct superblock_security_struct *sbsec;
++	const struct superblock_security_struct *sbsec = selinux_superblock(reference);
+ 	struct selinux_mnt_opts *opts;
+ 
++	/*
++	 * Ensure that fc->security remains NULL when no options are set
++	 * as expected by selinux_set_mnt_opts().
++	 */
++	if (!(sbsec->flags & (FSCONTEXT_MNT|CONTEXT_MNT|DEFCONTEXT_MNT)))
++		return 0;
++
+ 	opts = kzalloc(sizeof(*opts), GFP_KERNEL);
+ 	if (!opts)
+ 		return -ENOMEM;
+ 
+-	sbsec = selinux_superblock(reference);
+ 	if (sbsec->flags & FSCONTEXT_MNT)
+ 		opts->fscontext_sid = sbsec->sid;
+ 	if (sbsec->flags & CONTEXT_MNT)
+-- 
+2.41.0


^ permalink raw reply related	[flat|nested] 24+ messages in thread
* [gentoo-commits] proj/linux-patches:6.5 commit in: /
@ 2023-09-13 12:07 Mike Pagano
  0 siblings, 0 replies; 24+ messages in thread
From: Mike Pagano @ 2023-09-13 12:07 UTC (permalink / raw
  To: gentoo-commits

commit:     8d3a7a27e859c54c74edab90803e9aedfc9681b0
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed Sep 13 12:07:36 2023 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed Sep 13 12:07:36 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=8d3a7a27

Minor fix for BMQ Patch (Kconfig)

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README                                 |   2 +-
 5020_BMQ-and-PDS-io-scheduler-v6.5-r0.patch | 647 ++++++++++++++--------------
 2 files changed, 317 insertions(+), 332 deletions(-)

diff --git a/0000_README b/0000_README
index de8216ab..25625324 100644
--- a/0000_README
+++ b/0000_README
@@ -100,5 +100,5 @@ From:   https://github.com/graysky2/kernel_compiler_patch
 Desc:   Kernel >= 5.15 patch enables gcc = v11.1+ optimizations for additional CPUs.
 
 Patch:  5020_BMQ-and-PDS-io-scheduler-v6.5-r0.patch
-From:   https://github.com/hhoffstaette/kernel-patches/
+From:   https://gitlab.com/alfredchen/projectc
 Desc:   BMQ(BitMap Queue) Scheduler. A new CPU scheduler developed from PDS(incld). Inspired by the scheduler in zircon.

diff --git a/5020_BMQ-and-PDS-io-scheduler-v6.5-r0.patch b/5020_BMQ-and-PDS-io-scheduler-v6.5-r0.patch
index f305f913..cb6b2d12 100644
--- a/5020_BMQ-and-PDS-io-scheduler-v6.5-r0.patch
+++ b/5020_BMQ-and-PDS-io-scheduler-v6.5-r0.patch
@@ -1,8 +1,5 @@
-
-Thanks to torvic9 in https://gitlab.com/alfredchen/linux-prjc/-/issues/85
-
 diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
-index 722b6ec..223e96f 100644
+index 23ebe34ff901..3fd78edff69c 100644
 --- a/Documentation/admin-guide/kernel-parameters.txt
 +++ b/Documentation/admin-guide/kernel-parameters.txt
 @@ -5553,6 +5553,12 @@
@@ -19,7 +16,7 @@ index 722b6ec..223e96f 100644
  
  	schedstats=	[KNL,X86] Enable or disable scheduled statistics.
 diff --git a/Documentation/admin-guide/sysctl/kernel.rst b/Documentation/admin-guide/sysctl/kernel.rst
-index 3800fab..12ea62d 100644
+index 3800fab1619b..12ea62da87e8 100644
 --- a/Documentation/admin-guide/sysctl/kernel.rst
 +++ b/Documentation/admin-guide/sysctl/kernel.rst
 @@ -1616,3 +1616,13 @@ is 10 seconds.
@@ -36,9 +33,9 @@ index 3800fab..12ea62d 100644
 +  0 - No yield.
 +  1 - Deboost and requeue task. (default)
 +  2 - Set run queue skip task.
-diff --git a/b/Documentation/scheduler/sched-BMQ.txt b/Documentation/scheduler/sched-BMQ.txt
+diff --git a/Documentation/scheduler/sched-BMQ.txt b/Documentation/scheduler/sched-BMQ.txt
 new file mode 100644
-index 0000000..05c84ee
+index 000000000000..05c84eec0f31
 --- /dev/null
 +++ b/Documentation/scheduler/sched-BMQ.txt
 @@ -0,0 +1,110 @@
@@ -153,7 +150,7 @@ index 0000000..05c84ee
 +priority boost from unblocking while background threads that do most of the
 +processing receive the priority penalty for using their entire timeslice.
 diff --git a/fs/proc/base.c b/fs/proc/base.c
-index 9df3f48..8a0596f 100644
+index 9df3f4839662..8a0596fbd14e 100644
 --- a/fs/proc/base.c
 +++ b/fs/proc/base.c
 @@ -480,7 +480,7 @@ static int proc_pid_schedstat(struct seq_file *m, struct pid_namespace *ns,
@@ -166,7 +163,7 @@ index 9df3f48..8a0596f 100644
  		   task->sched_info.pcount);
  
 diff --git a/include/asm-generic/resource.h b/include/asm-generic/resource.h
-index 8874f68..59eb72b 100644
+index 8874f681b056..59eb72bf7d5f 100644
 --- a/include/asm-generic/resource.h
 +++ b/include/asm-generic/resource.h
 @@ -23,7 +23,7 @@
@@ -178,8 +175,81 @@ index 8874f68..59eb72b 100644
  	[RLIMIT_RTPRIO]		= { 0, 0 },				\
  	[RLIMIT_RTTIME]		= {  RLIM_INFINITY,  RLIM_INFINITY },	\
  }
+diff --git a/include/linux/sched.h b/include/linux/sched.h
+index 609bde814cb0..bfdf715804a1 100644
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -763,8 +763,14 @@ struct task_struct {
+ 	unsigned int			ptrace;
+ 
+ #ifdef CONFIG_SMP
+-	int				on_cpu;
+ 	struct __call_single_node	wake_entry;
++#endif
++#if defined(CONFIG_SMP) || defined(CONFIG_SCHED_ALT)
++	int				on_cpu;
++#endif
++
++#ifdef CONFIG_SMP
++#ifndef CONFIG_SCHED_ALT
+ 	unsigned int			wakee_flips;
+ 	unsigned long			wakee_flip_decay_ts;
+ 	struct task_struct		*last_wakee;
+@@ -778,6 +784,7 @@ struct task_struct {
+ 	 */
+ 	int				recent_used_cpu;
+ 	int				wake_cpu;
++#endif /* !CONFIG_SCHED_ALT */
+ #endif
+ 	int				on_rq;
+ 
+@@ -786,6 +793,20 @@ struct task_struct {
+ 	int				normal_prio;
+ 	unsigned int			rt_priority;
+ 
++#ifdef CONFIG_SCHED_ALT
++	u64				last_ran;
++	s64				time_slice;
++	int				sq_idx;
++	struct list_head		sq_node;
++#ifdef CONFIG_SCHED_BMQ
++	int				boost_prio;
++#endif /* CONFIG_SCHED_BMQ */
++#ifdef CONFIG_SCHED_PDS
++	u64				deadline;
++#endif /* CONFIG_SCHED_PDS */
++	/* sched_clock time spent running */
++	u64				sched_time;
++#else /* !CONFIG_SCHED_ALT */
+ 	struct sched_entity		se;
+ 	struct sched_rt_entity		rt;
+ 	struct sched_dl_entity		dl;
+@@ -796,6 +817,7 @@ struct task_struct {
+ 	unsigned long			core_cookie;
+ 	unsigned int			core_occupation;
+ #endif
++#endif /* !CONFIG_SCHED_ALT */
+ 
+ #ifdef CONFIG_CGROUP_SCHED
+ 	struct task_group		*sched_task_group;
+@@ -1548,6 +1570,15 @@ struct task_struct {
+ 	 */
+ };
+ 
++#ifdef CONFIG_SCHED_ALT
++#define tsk_seruntime(t)		((t)->sched_time)
++/* replace the uncertian rt_timeout with 0UL */
++#define tsk_rttimeout(t)		(0UL)
++#else /* CFS */
++#define tsk_seruntime(t)	((t)->se.sum_exec_runtime)
++#define tsk_rttimeout(t)	((t)->rt.timeout)
++#endif /* !CONFIG_SCHED_ALT */
++
+ static inline struct pid *task_pid(struct task_struct *task)
+ {
+ 	return task->thread_pid;
 diff --git a/include/linux/sched/deadline.h b/include/linux/sched/deadline.h
-index 7c83d4d..fa30f98 100644
+index 7c83d4d5a971..fa30f98cb2be 100644
 --- a/include/linux/sched/deadline.h
 +++ b/include/linux/sched/deadline.h
 @@ -1,5 +1,24 @@
@@ -216,7 +286,7 @@ index 7c83d4d..fa30f98 100644
  static inline bool dl_time_before(u64 a, u64 b)
  {
 diff --git a/include/linux/sched/prio.h b/include/linux/sched/prio.h
-index ab83d85..6af9ae6 100644
+index ab83d85e1183..6af9ae681116 100644
 --- a/include/linux/sched/prio.h
 +++ b/include/linux/sched/prio.h
 @@ -18,6 +18,32 @@
@@ -253,7 +323,7 @@ index ab83d85..6af9ae6 100644
   * Convert user-nice values [ -20 ... 0 ... 19 ]
   * to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ],
 diff --git a/include/linux/sched/rt.h b/include/linux/sched/rt.h
-index 994c256..8c050a5 100644
+index 994c25640e15..8c050a59ece1 100644
 --- a/include/linux/sched/rt.h
 +++ b/include/linux/sched/rt.h
 @@ -24,8 +24,10 @@ static inline bool task_is_realtime(struct task_struct *tsk)
@@ -268,7 +338,7 @@ index 994c256..8c050a5 100644
  }
  
 diff --git a/include/linux/sched/topology.h b/include/linux/sched/topology.h
-index 67b573d..93f45c8 100644
+index 67b573d5bf28..93f45c8640ed 100644
 --- a/include/linux/sched/topology.h
 +++ b/include/linux/sched/topology.h
 @@ -234,7 +234,8 @@ static inline bool cpus_share_cache(int this_cpu, int that_cpu)
@@ -281,136 +351,60 @@ index 67b573d..93f45c8 100644
  extern void rebuild_sched_domains_energy(void);
  #else
  static inline void rebuild_sched_domains_energy(void)
-diff --git a/include/linux/sched.h b/include/linux/sched.h
-index 609bde8..5d4e8aa 100644
---- a/include/linux/sched.h
-+++ b/include/linux/sched.h
-@@ -763,8 +762,14 @@ struct task_struct {
- 	unsigned int			ptrace;
- 
- #ifdef CONFIG_SMP
--	int				on_cpu;
- 	struct __call_single_node	wake_entry;
-+#endif
-+#if defined(CONFIG_SMP) || defined(CONFIG_SCHED_ALT)
-+	int				on_cpu;
-+#endif
-+
-+#ifdef CONFIG_SMP
-+#ifndef CONFIG_SCHED_ALT
- 	unsigned int			wakee_flips;
- 	unsigned long			wakee_flip_decay_ts;
- 	struct task_struct		*last_wakee;
-@@ -778,6 +783,7 @@ struct task_struct {
- 	 */
- 	int				recent_used_cpu;
- 	int				wake_cpu;
-+#endif /* !CONFIG_SCHED_ALT */
- #endif
- 	int				on_rq;
- 
-@@ -786,6 +792,20 @@ struct task_struct {
- 	int				normal_prio;
- 	unsigned int			rt_priority;
- 
-+#ifdef CONFIG_SCHED_ALT
-+	u64				last_ran;
-+	s64				time_slice;
-+	int				sq_idx;
-+	struct list_head		sq_node;
-+#ifdef CONFIG_SCHED_BMQ
-+	int				boost_prio;
-+#endif /* CONFIG_SCHED_BMQ */
-+#ifdef CONFIG_SCHED_PDS
-+	u64				deadline;
-+#endif /* CONFIG_SCHED_PDS */
-+	/* sched_clock time spent running */
-+	u64				sched_time;
-+#else /* !CONFIG_SCHED_ALT */
- 	struct sched_entity		se;
- 	struct sched_rt_entity		rt;
- 	struct sched_dl_entity		dl;
-@@ -796,6 +816,7 @@ struct task_struct {
- 	unsigned long			core_cookie;
- 	unsigned int			core_occupation;
- #endif
-+#endif /* !CONFIG_SCHED_ALT */
- 
- #ifdef CONFIG_CGROUP_SCHED
- 	struct task_group		*sched_task_group;
-@@ -1548,6 +1569,15 @@ struct task_struct {
- 	 */
- };
- 
-+#ifdef CONFIG_SCHED_ALT
-+#define tsk_seruntime(t)		((t)->sched_time)
-+/* replace the uncertian rt_timeout with 0UL */
-+#define tsk_rttimeout(t)		(0UL)
-+#else /* CFS */
-+#define tsk_seruntime(t)	((t)->se.sum_exec_runtime)
-+#define tsk_rttimeout(t)	((t)->rt.timeout)
-+#endif /* !CONFIG_SCHED_ALT */
-+
- static inline struct pid *task_pid(struct task_struct *task)
- {
- 	return task->thread_pid;
-diff --git a/init/Kconfig b/init/Kconfig
-index f7f65af..d57f100 100644
---- a/init/Kconfig
-+++ b/init/Kconfig
+--- a/init/Kconfig	2023-09-13 07:57:19.044978203 -0400
++++ b/init/Kconfig	2023-09-13 08:04:23.196746027 -0400
 @@ -629,6 +629,7 @@ config TASK_IO_ACCOUNTING
  
  config PSI
  	bool "Pressure stall information tracking"
 +	depends on !SCHED_ALT
+ 	select KERNFS
  	help
  	  Collect metrics that indicate how overcommitted the CPU, memory,
- 	  and IO capacity are in the system.
-@@ -793,6 +794,7 @@ menu "Scheduler features"
+@@ -794,6 +795,7 @@ menu "Scheduler features"
  config UCLAMP_TASK
  	bool "Enable utilization clamping for RT/FAIR tasks"
  	depends on CPU_FREQ_GOV_SCHEDUTIL
-+	depends on !SCHED_ALT
++  depends on !SCHED_ALT
  	help
  	  This feature enables the scheduler to track the clamped utilization
  	  of each CPU based on RUNNABLE tasks scheduled on that CPU.
-@@ -839,6 +841,35 @@ config UCLAMP_BUCKETS_COUNT
+@@ -840,6 +842,34 @@ config UCLAMP_BUCKETS_COUNT
  
  	  If in doubt, use the default value.
  
 +menuconfig SCHED_ALT
-+	bool "Alternative CPU Schedulers"
-+	default n
-+	help
-+	  This feature enables the ProjectC alternative CPU schedulers."
-+
-+if SCHED_ALT
-+
-+choice
-+	prompt "Alternative CPU schedulers"
-+	default SCHED_PDS
-+
-+config SCHED_BMQ
-+	bool "BMQ CPU scheduler"
-+	help
-+	  The BitMap Queue CPU scheduler for excellent interactivity and
-+	  responsiveness on the desktop and solid scalability on normal
-+	  hardware and commodity servers.
-+
-+config SCHED_PDS
-+	bool "PDS CPU scheduler"
-+	help
-+	  The Priority and Deadline based Skip list multiple queue CPU
-+	  Scheduler.
-+
-+endchoice
-+
++  bool "Alternative CPU Schedulers"
++  default n
++  help
++    This feature enable alternative CPU scheduler"
++ 
++ if SCHED_ALT
++ 
++  choice
++    prompt "Alternative CPU Scheduler"
++    default SCHED_BMQ
++
++   config SCHED_BMQ
++    bool "BMQ CPU scheduler"
++    help
++     The BitMap Queue CPU scheduler for excellent interactivity and
++     responsiveness on the desktop and solid scalability on normal
++     hardware and commodity servers.
++
++   config SCHED_PDS
++    bool "PDS CPU scheduler"
++    help
++     The Priority and Deadline based Skip list multiple queue CPU
++     Scheduler.
++
++  endchoice
 +endif
 +
  endmenu
  
  #
-@@ -892,6 +923,7 @@ config NUMA_BALANCING
+@@ -893,6 +923,7 @@ config NUMA_BALANCING
  	depends on ARCH_SUPPORTS_NUMA_BALANCING
  	depends on !ARCH_WANT_NUMA_VARIABLE_LOCALITY
  	depends on SMP && NUMA && MIGRATION && !PREEMPT_RT
@@ -418,7 +412,7 @@ index f7f65af..d57f100 100644
  	help
  	  This option adds support for automatic NUMA aware memory/task placement.
  	  The mechanism is quite primitive and is based on migrating memory when
-@@ -989,6 +1021,7 @@ config FAIR_GROUP_SCHED
+@@ -990,6 +1021,7 @@ config FAIR_GROUP_SCHED
  	depends on CGROUP_SCHED
  	default CGROUP_SCHED
  
@@ -426,24 +420,25 @@ index f7f65af..d57f100 100644
  config CFS_BANDWIDTH
  	bool "CPU bandwidth provisioning for FAIR_GROUP_SCHED"
  	depends on FAIR_GROUP_SCHED
-@@ -1011,6 +1044,7 @@ config RT_GROUP_SCHED
+@@ -1011,7 +1043,7 @@ config RT_GROUP_SCHED
+ 	  schedule realtime tasks for non-root users until you allocate
  	  realtime bandwidth for them.
  	  See Documentation/scheduler/sched-rt-group.rst for more information.
- 
+-
 +endif #!SCHED_ALT
  endif #CGROUP_SCHED
  
  config SCHED_MM_CID
-@@ -1259,6 +1293,7 @@ config CHECKPOINT_RESTORE
+@@ -1260,6 +1292,7 @@ config CHECKPOINT_RESTORE
  
  config SCHED_AUTOGROUP
  	bool "Automatic process group scheduling"
-+	depends on !SCHED_ALT
++  depends on !SCHED_ALT
  	select CGROUPS
  	select CGROUP_SCHED
  	select FAIR_GROUP_SCHED
 diff --git a/init/init_task.c b/init/init_task.c
-index ff6c4b9..19e9c66 100644
+index ff6c4b9bfe6b..19e9c662d1a1 100644
 --- a/init/init_task.c
 +++ b/init/init_task.c
 @@ -75,9 +75,15 @@ struct task_struct init_task
@@ -489,7 +484,7 @@ index ff6c4b9..19e9c66 100644
  #ifdef CONFIG_SMP
  	.pushable_tasks	= PLIST_NODE_INIT(init_task.pushable_tasks, MAX_PRIO),
 diff --git a/kernel/Kconfig.preempt b/kernel/Kconfig.preempt
-index c2f1fd9..4165467 100644
+index c2f1fd95a821..41654679b1b2 100644
 --- a/kernel/Kconfig.preempt
 +++ b/kernel/Kconfig.preempt
 @@ -117,7 +117,7 @@ config PREEMPT_DYNAMIC
@@ -502,10 +497,10 @@ index c2f1fd9..4165467 100644
  	  This option permits Core Scheduling, a means of coordinated task
  	  selection across SMT siblings. When enabled -- see
 diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
-index 58e6f18..18852b0 100644
+index 58e6f18f01c1..71f5da268ee8 100644
 --- a/kernel/cgroup/cpuset.c
 +++ b/kernel/cgroup/cpuset.c
-@@ -791,7 +791,7 @@ out:
+@@ -791,7 +791,7 @@ static int validate_change(struct cpuset *cur, struct cpuset *trial)
  	return ret;
  }
  
@@ -523,21 +518,7 @@ index 58e6f18..18852b0 100644
  static void rebuild_sched_domains_locked(void)
  {
  }
-@@ -2475,11 +2475,13 @@ static int cpuset_can_attach_check(struct cpuset *cs)
- 	return 0;
- }
- 
-+#ifndef CONFIG_SCHED_ALT
- static void reset_migrate_dl_data(struct cpuset *cs)
- {
- 	cs->nr_migrate_dl_tasks = 0;
- 	cs->sum_migrate_dl_bw = 0;
- }
-+#endif
- 
- /* Called by cgroups to determine if a cpuset is usable; cpuset_mutex held */
- static int cpuset_can_attach(struct cgroup_taskset *tset)
-@@ -2509,12 +2511,15 @@ static int cpuset_can_attach(struct cgroup_taskset *tset)
+@@ -2509,12 +2509,15 @@ static int cpuset_can_attach(struct cgroup_taskset *tset)
  		if (ret)
  			goto out_unlock;
  
@@ -553,7 +534,7 @@ index 58e6f18..18852b0 100644
  	if (!cs->nr_migrate_dl_tasks)
  		goto out_success;
  
-@@ -2535,6 +2540,7 @@ static int cpuset_can_attach(struct cgroup_taskset *tset)
+@@ -2535,6 +2538,7 @@ static int cpuset_can_attach(struct cgroup_taskset *tset)
  	}
  
  out_success:
@@ -561,11 +542,10 @@ index 58e6f18..18852b0 100644
  	/*
  	 * Mark attach is in progress.  This makes validate_change() fail
  	 * changes which zero cpus/mems_allowed.
-@@ -2557,13 +2563,14 @@ static void cpuset_cancel_attach(struct cgroup_taskset *tset)
- 	cs->attach_in_progress--;
+@@ -2558,12 +2562,14 @@ static void cpuset_cancel_attach(struct cgroup_taskset *tset)
  	if (!cs->attach_in_progress)
  		wake_up(&cpuset_attach_wq);
--
+ 
 +#ifndef CONFIG_SCHED_ALT
  	if (cs->nr_migrate_dl_tasks) {
  		int cpu = cpumask_any(cs->effective_cpus);
@@ -577,22 +557,8 @@ index 58e6f18..18852b0 100644
  
  	mutex_unlock(&cpuset_mutex);
  }
-@@ -2665,11 +2672,13 @@ static void cpuset_attach(struct cgroup_taskset *tset)
- out:
- 	cs->old_mems_allowed = cpuset_attach_nodemask_to;
- 
-+#ifndef CONFIG_SCHED_ALT
- 	if (cs->nr_migrate_dl_tasks) {
- 		cs->nr_deadline_tasks += cs->nr_migrate_dl_tasks;
- 		oldcs->nr_deadline_tasks -= cs->nr_migrate_dl_tasks;
- 		reset_migrate_dl_data(cs);
- 	}
-+#endif
- 
- 	cs->attach_in_progress--;
- 	if (!cs->attach_in_progress)
 diff --git a/kernel/delayacct.c b/kernel/delayacct.c
-index 6f0c358..8111481 100644
+index 6f0c358e73d8..8111481ce8b1 100644
 --- a/kernel/delayacct.c
 +++ b/kernel/delayacct.c
 @@ -150,7 +150,7 @@ int delayacct_add_tsk(struct taskstats *d, struct task_struct *tsk)
@@ -605,7 +571,7 @@ index 6f0c358..8111481 100644
  	d->cpu_count += t1;
  
 diff --git a/kernel/exit.c b/kernel/exit.c
-index edb50b4..09e72bb 100644
+index edb50b4c9972..09e72bba7cc2 100644
 --- a/kernel/exit.c
 +++ b/kernel/exit.c
 @@ -173,7 +173,7 @@ static void __exit_signal(struct task_struct *tsk)
@@ -627,7 +593,7 @@ index edb50b4..09e72bb 100644
  	__unhash_process(tsk, group_dead);
  	write_sequnlock(&sig->stats_lock);
 diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
-index 21db0df..039badd 100644
+index 21db0df0eb00..37a47396575f 100644
 --- a/kernel/locking/rtmutex.c
 +++ b/kernel/locking/rtmutex.c
 @@ -343,7 +343,7 @@ waiter_update_prio(struct rt_mutex_waiter *waiter, struct task_struct *task)
@@ -644,7 +610,7 @@ index 21db0df..039badd 100644
   */
  #define task_to_waiter_node(p)	\
 -	&(struct rt_waiter_node){ .prio = __waiter_prio(p), .deadline = (p)->dl.deadline }
-+	&(struct rt_waiter_node){ .prio = __waiter_prio(p), .deadline = __tsk_deadline(task) }
++	&(struct rt_waiter_node){ .prio = __waiter_prio(p), .deadline = __tsk_deadline(p) }
  #define task_to_waiter(p)	\
  	&(struct rt_mutex_waiter){ .tree = *task_to_waiter_node(p) }
  
@@ -696,7 +662,7 @@ index 21db0df..039badd 100644
  
  static inline bool rt_mutex_steal(struct rt_mutex_waiter *waiter,
 diff --git a/kernel/sched/Makefile b/kernel/sched/Makefile
-index 976092b..31d587c 100644
+index 976092b7bd45..31d587c16ec1 100644
 --- a/kernel/sched/Makefile
 +++ b/kernel/sched/Makefile
 @@ -28,7 +28,12 @@ endif
@@ -712,12 +678,12 @@ index 976092b..31d587c 100644
 +endif
  obj-y += build_policy.o
  obj-y += build_utility.o
-diff --git a/b/kernel/sched/alt_core.c b/kernel/sched/alt_core.c
+diff --git a/kernel/sched/alt_core.c b/kernel/sched/alt_core.c
 new file mode 100644
-index 0000000..05b0f12
+index 000000000000..35d634c208cf
 --- /dev/null
 +++ b/kernel/sched/alt_core.c
-@@ -0,0 +1,8738 @@
+@@ -0,0 +1,8762 @@
 +/*
 + *  kernel/sched/alt_core.c
 + *
@@ -769,6 +735,7 @@ index 0000000..05b0f12
 +#include "../smpboot.h"
 +
 +EXPORT_TRACEPOINT_SYMBOL_GPL(ipi_send_cpu);
++EXPORT_TRACEPOINT_SYMBOL_GPL(ipi_send_cpumask);
 +
 +/*
 + * Export tracepoints that act as a bare tracehook (ie: have no trace event
@@ -791,7 +758,7 @@ index 0000000..05b0f12
 +#define sched_feat(x)	(0)
 +#endif /* CONFIG_SCHED_DEBUG */
 +
-+#define ALT_SCHED_VERSION "v6.5-r0-tv"
++#define ALT_SCHED_VERSION "v6.5-r0"
 +
 +/*
 + * Compile time debug macro
@@ -1929,8 +1896,7 @@ index 0000000..05b0f12
 +unsigned long wait_task_inactive(struct task_struct *p, unsigned int match_state)
 +{
 +	unsigned long flags;
-+	bool running, on_rq;
-+	int match;
++	int running, queued, match;
 +	unsigned long ncsw;
 +	struct rq *rq;
 +	raw_spinlock_t *lock;
@@ -1949,7 +1915,7 @@ index 0000000..05b0f12
 +		 * if the runqueue has changed and p is actually now
 +		 * running somewhere else!
 +		 */
-+		while (task_on_cpu(p) && p == rq->curr) {
++		while (task_on_cpu(p)) {
 +			if (!task_state_match(p, match_state))
 +				return 0;
 +			cpu_relax();
@@ -1963,7 +1929,7 @@ index 0000000..05b0f12
 +		task_access_lock_irqsave(p, &lock, &flags);
 +		trace_sched_wait_task(p);
 +		running = task_on_cpu(p);
-+		on_rq = p->on_rq;
++		queued = p->on_rq;
 +		ncsw = 0;
 +		if ((match = __task_state_match(p, match_state))) {
 +			/*
@@ -1971,7 +1937,7 @@ index 0000000..05b0f12
 +			 * still queued so it will wait.
 +			 */
 +			if (match < 0)
-+				on_rq = 1;
++				queued = 1;
 +			ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
 +		}
 +		task_access_unlock_irqrestore(p, lock, &flags);
@@ -2002,7 +1968,7 @@ index 0000000..05b0f12
 +		 * running right now), it's preempted, and we should
 +		 * yield - it could be a while.
 +		 */
-+		if (unlikely(on_rq)) {
++		if (unlikely(queued)) {
 +			ktime_t to = NSEC_PER_SEC / HZ;
 +
 +			set_current_state(TASK_UNINTERRUPTIBLE);
@@ -2196,9 +2162,9 @@ index 0000000..05b0f12
 + *
 + * Context: rq->lock
 + */
-+static void activate_task(struct task_struct *p, struct rq *rq, int flags)
++static void activate_task(struct task_struct *p, struct rq *rq)
 +{
-+	enqueue_task(p, rq, flags);
++	enqueue_task(p, rq, ENQUEUE_WAKEUP);
 +	p->on_rq = TASK_ON_RQ_QUEUED;
 +
 +	/*
@@ -2214,10 +2180,10 @@ index 0000000..05b0f12
 + *
 + * Context: rq->lock
 + */
-+static void deactivate_task(struct task_struct *p, struct rq *rq, int flags)
++static inline void deactivate_task(struct task_struct *p, struct rq *rq)
 +{
-+	p->on_rq = (flags & DEQUEUE_SLEEP) ? 0 : TASK_ON_RQ_MIGRATING;
-+	dequeue_task(p, rq, flags);
++	dequeue_task(p, rq, DEQUEUE_SLEEP);
++	p->on_rq = 0;
 +	cpufreq_update_util(rq, 0);
 +}
 +
@@ -2278,12 +2244,11 @@ index 0000000..05b0f12
 +
 +	WARN_ON_ONCE(is_migration_disabled(p));
 +#endif
-+
 +	trace_sched_migrate_task(p, new_cpu);
 +
-+	if (task_cpu(p) != new_cpu) {
++	if (task_cpu(p) != new_cpu)
++	{
 +		rseq_migrate(p);
-+		sched_mm_cid_migrate_from(p);
 +		perf_event_task_migrate(p);
 +	}
 +
@@ -2433,9 +2398,13 @@ index 0000000..05b0f12
 +static struct rq *move_queued_task(struct rq *rq, struct task_struct *p, int
 +				   new_cpu)
 +{
++	int src_cpu;
++
 +	lockdep_assert_held(&rq->lock);
 +
-+	deactivate_task(p, rq, 0);
++	src_cpu = cpu_of(rq);
++	WRITE_ONCE(p->on_rq, TASK_ON_RQ_MIGRATING);
++	dequeue_task(p, rq, 0);
 +	set_task_cpu(p, new_cpu);
 +	raw_spin_unlock(&rq->lock);
 +
@@ -2443,8 +2412,12 @@ index 0000000..05b0f12
 +
 +	raw_spin_lock(&rq->lock);
 +	WARN_ON_ONCE(task_cpu(p) != new_cpu);
++
++	sched_mm_cid_migrate_to(rq, p, src_cpu);
++
 +	sched_task_sanity_check(p, rq);
-+	activate_task(p, rq, 0);
++	enqueue_task(p, rq, 0);
++	p->on_rq = TASK_ON_RQ_QUEUED;
 +	check_preempt_curr(rq);
 +
 +	return rq;
@@ -3175,7 +3148,7 @@ index 0000000..05b0f12
 +		atomic_dec(&task_rq(p)->nr_iowait);
 +	}
 +
-+	activate_task(p, rq, ENQUEUE_WAKEUP);
++	activate_task(p, rq);
 +	check_preempt_curr(rq);
 +
 +	ttwu_do_wakeup(p);
@@ -4105,7 +4078,7 @@ index 0000000..05b0f12
 +	raw_spin_lock(&rq->lock);
 +	update_rq_clock(rq);
 +
-+	activate_task(p, rq, flags);
++	activate_task(p, rq);
 +	trace_sched_wakeup_new(p);
 +	check_preempt_curr(rq);
 +
@@ -4206,7 +4179,8 @@ index 0000000..05b0f12
 +	 * Claim the task as running, we do this before switching to it
 +	 * such that any running task will have this set.
 +	 *
-+	 * See the ttwu() WF_ON_CPU case and its ordering comment.
++	 * See the smp_load_acquire(&p->on_cpu) case in ttwu() and
++	 * its ordering comment.
 +	 */
 +	WRITE_ONCE(next->on_cpu, 1);
 +}
@@ -4276,7 +4250,7 @@ index 0000000..05b0f12
 +	if (likely(!head))
 +		return NULL;
 +
-+	lockdep_assert_held(&rq->lock);
++	lockdep_assert_rq_held(rq);
 +	/*
 +	 * Must not take balance_push_callback off the list when
 +	 * splice_balance_callbacks() and balance_callbacks() are not
@@ -4875,7 +4849,8 @@ index 0000000..05b0f12
 +	if (sched_feat(LATENCY_WARN))
 +		resched_latency = cpu_resched_latency(rq);
 +	calc_global_load_tick(rq);
-+	task_tick_mm_cid(rq, curr);
++
++	task_tick_mm_cid(rq, rq->curr);
 +
 +	rq->last_tick = rq->clock;
 +	raw_spin_unlock(&rq->lock);
@@ -5108,7 +5083,7 @@ index 0000000..05b0f12
 +int __init sched_tick_offload_init(void)
 +{
 +	tick_work_cpu = alloc_percpu(struct tick_work);
-+	WARN_ON_ONCE(!tick_work_cpu);
++	BUG_ON(!tick_work_cpu);
 +	return 0;
 +}
 +
@@ -5293,7 +5268,7 @@ index 0000000..05b0f12
 +static inline int
 +migrate_pending_tasks(struct rq *rq, struct rq *dest_rq, const int dest_cpu)
 +{
-+	struct task_struct *p, *skip = rcu_dereference(rq->curr);
++	struct task_struct *p, *skip = rq->curr;
 +	int nr_migrated = 0;
 +	int nr_tries = min(rq->nr_running / 2, sysctl_sched_nr_migrate);
 +
@@ -5308,6 +5283,7 @@ index 0000000..05b0f12
 +			__SCHED_DEQUEUE_TASK(p, rq, 0, );
 +			set_task_cpu(p, dest_cpu);
 +			sched_task_sanity_check(p, dest_rq);
++			sched_mm_cid_migrate_to(dest_rq, p, cpu_of(rq));
 +			__SCHED_ENQUEUE_TASK(p, dest_rq, 0);
 +			nr_migrated++;
 +		}
@@ -5556,7 +5532,7 @@ index 0000000..05b0f12
 +			 * After this, schedule() must not care about p->state any more.
 +			 */
 +			sched_task_deactivate(prev, rq);
-+			deactivate_task(prev, rq, DEQUEUE_SLEEP);
++			deactivate_task(prev, rq);
 +
 +			if (prev->in_iowait) {
 +				atomic_inc(&rq->nr_iowait);
@@ -5914,7 +5890,7 @@ index 0000000..05b0f12
 +	enum ctx_state prev_state;
 +
 +	/* Catch callers which need to be fixed */
-+	WARN_ON_ONCE(preempt_count() || !irqs_disabled());
++	BUG_ON(preempt_count() || !irqs_disabled());
 +
 +	prev_state = exception_enter();
 +
@@ -6093,17 +6069,29 @@ index 0000000..05b0f12
 +EXPORT_SYMBOL(set_user_nice);
 +
 +/*
-+ * can_nice - check if a task can reduce its nice value
++ * is_nice_reduction - check if nice value is an actual reduction
++ *
++ * Similar to can_nice() but does not perform a capability check.
++ *
 + * @p: task
 + * @nice: nice value
 + */
-+int can_nice(const struct task_struct *p, const int nice)
++static bool is_nice_reduction(const struct task_struct *p, const int nice)
 +{
-+	/* Convert nice value [19,-20] to rlimit style value [1,40] */
++	/* Convert nice value [19,-20] to rlimit style value [1,40]: */
 +	int nice_rlim = nice_to_rlimit(nice);
 +
-+	return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
-+		capable(CAP_SYS_NICE));
++	return (nice_rlim <= task_rlimit(p, RLIMIT_NICE));
++}
++
++/*
++ * can_nice - check if a task can reduce its nice value
++ * @p: task
++ * @nice: nice value
++ */
++int can_nice(const struct task_struct *p, const int nice)
++{
++	return is_nice_reduction(p, nice) || capable(CAP_SYS_NICE);
 +}
 +
 +#ifdef __ARCH_WANT_SYS_NICE
@@ -6254,6 +6242,45 @@ index 0000000..05b0f12
 +	return match;
 +}
 +
++/*
++ * Allow unprivileged RT tasks to decrease priority.
++ * Only issue a capable test if needed and only once to avoid an audit
++ * event on permitted non-privileged operations:
++ */
++static int user_check_sched_setscheduler(struct task_struct *p,
++					 const struct sched_attr *attr,
++					 int policy, int reset_on_fork)
++{
++	if (rt_policy(policy)) {
++		unsigned long rlim_rtprio = task_rlimit(p, RLIMIT_RTPRIO);
++
++		/* Can't set/change the rt policy: */
++		if (policy != p->policy && !rlim_rtprio)
++			goto req_priv;
++
++		/* Can't increase priority: */
++		if (attr->sched_priority > p->rt_priority &&
++		    attr->sched_priority > rlim_rtprio)
++			goto req_priv;
++	}
++
++	/* Can't change other user's priorities: */
++	if (!check_same_owner(p))
++		goto req_priv;
++
++	/* Normal users shall not reset the sched_reset_on_fork flag: */
++	if (p->sched_reset_on_fork && !reset_on_fork)
++		goto req_priv;
++
++	return 0;
++
++req_priv:
++	if (!capable(CAP_SYS_NICE))
++		return -EPERM;
++
++	return 0;
++}
++
 +static int __sched_setscheduler(struct task_struct *p,
 +				const struct sched_attr *attr,
 +				bool user, bool pi)
@@ -6269,12 +6296,11 @@ index 0000000..05b0f12
 +	struct balance_callback *head;
 +	unsigned long flags;
 +	struct rq *rq;
-+	bool cpuset_locked = false;
 +	int reset_on_fork;
 +	raw_spinlock_t *lock;
 +
 +	/* The pi code expects interrupts enabled */
-+	WARN_ON_ONCE(pi && in_interrupt());
++	BUG_ON(pi && in_interrupt());
 +
 +	/*
 +	 * Alt schedule FW supports SCHED_DEADLINE by squash it as prio 0 SCHED_FIFO
@@ -6311,42 +6337,14 @@ index 0000000..05b0f12
 +	    (attr->sched_priority != 0))
 +		return -EINVAL;
 +
-+	/*
-+	 * Allow unprivileged RT tasks to decrease priority:
-+	 */
-+	if (user && !capable(CAP_SYS_NICE)) {
-+		if (SCHED_FIFO == policy || SCHED_RR == policy) {
-+			unsigned long rlim_rtprio =
-+					task_rlimit(p, RLIMIT_RTPRIO);
-+
-+			/* Can't set/change the rt policy */
-+			if (policy != p->policy && !rlim_rtprio)
-+				return -EPERM;
-+
-+			/* Can't increase priority */
-+			if (attr->sched_priority > p->rt_priority &&
-+			    attr->sched_priority > rlim_rtprio)
-+				return -EPERM;
-+		}
-+
-+		/* Can't change other user's priorities */
-+		if (!check_same_owner(p))
-+			return -EPERM;
-+
-+		/* Normal users shall not reset the sched_reset_on_fork flag */
-+		if (p->sched_reset_on_fork && !reset_on_fork)
-+			return -EPERM;
-+	}
-+
 +	if (user) {
-+		retval = security_task_setscheduler(p);
++		retval = user_check_sched_setscheduler(p, attr, policy, reset_on_fork);
 +		if (retval)
 +			return retval;
-+	}
 +
-+	if (pi) {
-+		cpuset_locked = true;
-+		cpuset_lock();
++		retval = security_task_setscheduler(p);
++		if (retval)
++			return retval;
 +	}
 +
 +	/*
@@ -6394,8 +6392,6 @@ index 0000000..05b0f12
 +		policy = oldpolicy = -1;
 +		__task_access_unlock(p, lock);
 +		raw_spin_unlock_irqrestore(&p->pi_lock, flags);
-+		if (cpuset_locked)
-+			cpuset_unlock();
 +		goto recheck;
 +	}
 +
@@ -6426,11 +6422,8 @@ index 0000000..05b0f12
 +	__task_access_unlock(p, lock);
 +	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
 +
-+	if (pi) {
-+		if (cpuset_locked)
-+			cpuset_unlock();
++	if (pi)
 +		rt_mutex_adjust_pi(p);
-+	}
 +
 +	/* Run balance callbacks after we've adjusted the PI chain: */
 +	balance_callbacks(rq, head);
@@ -6441,8 +6434,6 @@ index 0000000..05b0f12
 +unlock:
 +	__task_access_unlock(p, lock);
 +	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
-+	if (cpuset_locked)
-+		cpuset_unlock();
 +	return retval;
 +}
 +
@@ -7881,7 +7872,7 @@ index 0000000..05b0f12
 +{
 +	struct mm_struct *mm = current->active_mm;
 +
-+	WARN_ON_ONCE(current != this_rq()->idle);
++	BUG_ON(current != this_rq()->idle);
 +
 +	if (mm != &init_mm) {
 +		switch_mm(mm, &init_mm, current);
@@ -8030,8 +8021,10 @@ index 0000000..05b0f12
 +
 +static void set_rq_offline(struct rq *rq)
 +{
-+	if (rq->online)
++	if (rq->online) {
++		update_rq_clock(rq);
 +		rq->online = false;
++	}
 +}
 +
 +static void set_rq_online(struct rq *rq)
@@ -8155,7 +8148,6 @@ index 0000000..05b0f12
 +	synchronize_rcu();
 +
 +	raw_spin_lock_irqsave(&rq->lock, flags);
-+	update_rq_clock(rq);
 +	set_rq_offline(rq);
 +	raw_spin_unlock_irqrestore(&rq->lock, flags);
 +
@@ -8398,6 +8390,7 @@ index 0000000..05b0f12
 +void __init sched_init(void)
 +{
 +	int i;
++	struct rq *rq;
 +
 +	printk(KERN_INFO "sched/alt: "ALT_SCHED_NAME" CPU Scheduler "ALT_SCHED_VERSION\
 +			 " by Alfred Chen.\n");
@@ -8417,7 +8410,6 @@ index 0000000..05b0f12
 +	INIT_LIST_HEAD(&root_task_group.siblings);
 +#endif /* CONFIG_CGROUP_SCHED */
 +	for_each_possible_cpu(i) {
-+		struct rq *rq;
 +		rq = cpu_rq(i);
 +
 +		sched_queue_init(&rq->queue);
@@ -8928,6 +8920,7 @@ index 0000000..05b0f12
 +
 +#ifdef CONFIG_SCHED_MM_CID
 +
++#
 +/*
 + * @cid_lock: Guarantee forward-progress of cid allocation.
 + *
@@ -9147,18 +9140,17 @@ index 0000000..05b0f12
 + * Interrupts are disabled, which keeps the window of cid ownership without the
 + * source rq lock held small.
 + */
-+void sched_mm_cid_migrate_to(struct rq *dst_rq, struct task_struct *t)
++void sched_mm_cid_migrate_to(struct rq *dst_rq, struct task_struct *t, int src_cpu)
 +{
 +	struct mm_cid *src_pcpu_cid, *dst_pcpu_cid;
 +	struct mm_struct *mm = t->mm;
-+	int src_cid, dst_cid, src_cpu;
++	int src_cid, dst_cid;
 +	struct rq *src_rq;
 +
 +	lockdep_assert_rq_held(dst_rq);
 +
 +	if (!mm)
 +		return;
-+	src_cpu = t->migrate_from_cpu;
 +	if (src_cpu == -1) {
 +		t->last_mm_cid = -1;
 +		return;
@@ -9210,7 +9202,7 @@ index 0000000..05b0f12
 +
 +	cid = READ_ONCE(pcpu_cid->cid);
 +	if (!mm_cid_is_valid(cid))
-+ 		return;
++		return;
 +
 +	/*
 +	 * Clear the cpu cid if it is set to keep cid allocation compact.  If
@@ -9400,8 +9392,6 @@ index 0000000..05b0f12
 +	rq_unlock_irqrestore(rq, &rf);
 +}
 +
-+
-+
 +void sched_mm_cid_before_execve(struct task_struct *t)
 +{
 +	struct mm_struct *mm = t->mm;
@@ -9456,9 +9446,9 @@ index 0000000..05b0f12
 +	t->mm_cid_active = 1;
 +}
 +#endif
-diff --git a/b/kernel/sched/alt_debug.c b/kernel/sched/alt_debug.c
+diff --git a/kernel/sched/alt_debug.c b/kernel/sched/alt_debug.c
 new file mode 100644
-index 0000000..1212a03
+index 000000000000..1212a031700e
 --- /dev/null
 +++ b/kernel/sched/alt_debug.c
 @@ -0,0 +1,31 @@
@@ -9493,9 +9483,9 @@ index 0000000..1212a03
 +
 +void proc_sched_set_task(struct task_struct *p)
 +{}
-diff --git a/b/kernel/sched/alt_sched.h b/kernel/sched/alt_sched.h
+diff --git a/kernel/sched/alt_sched.h b/kernel/sched/alt_sched.h
 new file mode 100644
-index 0000000..dc99b8d
+index 000000000000..5494f27cdb04
 --- /dev/null
 +++ b/kernel/sched/alt_sched.h
 @@ -0,0 +1,906 @@
@@ -9902,31 +9892,31 @@ index 0000000..dc99b8d
 +}
 +
 +static inline void
-+rq_lock_irq(struct rq *rq, struct rq_flags *rf)
++rq_lock(struct rq *rq, struct rq_flags *rf)
 +	__acquires(rq->lock)
 +{
-+	raw_spin_lock_irq(&rq->lock);
++	raw_spin_lock(&rq->lock);
 +}
 +
 +static inline void
-+rq_lock(struct rq *rq, struct rq_flags *rf)
-+	__acquires(rq->lock)
++rq_unlock(struct rq *rq, struct rq_flags *rf)
++	__releases(rq->lock)
 +{
-+	raw_spin_lock(&rq->lock);
++	raw_spin_unlock(&rq->lock);
 +}
 +
 +static inline void
-+rq_unlock_irq(struct rq *rq, struct rq_flags *rf)
-+	__releases(rq->lock)
++rq_lock_irq(struct rq *rq, struct rq_flags *rf)
++	__acquires(rq->lock)
 +{
-+	raw_spin_unlock_irq(&rq->lock);
++	raw_spin_lock_irq(&rq->lock);
 +}
 +
 +static inline void
-+rq_unlock(struct rq *rq, struct rq_flags *rf)
++rq_unlock_irq(struct rq *rq, struct rq_flags *rf)
 +	__releases(rq->lock)
 +{
-+	raw_spin_unlock(&rq->lock);
++	raw_spin_unlock_irq(&rq->lock);
 +}
 +
 +static inline struct rq *
@@ -10178,7 +10168,7 @@ index 0000000..dc99b8d
 +extern int use_cid_lock;
 +
 +extern void sched_mm_cid_migrate_from(struct task_struct *t);
-+extern void sched_mm_cid_migrate_to(struct rq *dst_rq, struct task_struct *t);
++extern void sched_mm_cid_migrate_to(struct rq *dst_rq, struct task_struct *t, int src_cpu);
 +extern void task_tick_mm_cid(struct rq *rq, struct task_struct *curr);
 +extern void init_sched_mm_cid(struct task_struct *t);
 +
@@ -10399,15 +10389,15 @@ index 0000000..dc99b8d
 +#else
 +static inline void switch_mm_cid(struct rq *rq, struct task_struct *prev, struct task_struct *next) { }
 +static inline void sched_mm_cid_migrate_from(struct task_struct *t) { }
-+static inline void sched_mm_cid_migrate_to(struct rq *dst_rq, struct task_struct *t) { }
++static inline void sched_mm_cid_migrate_to(struct rq *dst_rq, struct task_struct *t, int src_cpu) { }
 +static inline void task_tick_mm_cid(struct rq *rq, struct task_struct *curr) { }
 +static inline void init_sched_mm_cid(struct task_struct *t) { }
 +#endif
 +
 +#endif /* ALT_SCHED_H */
-diff --git a/b/kernel/sched/bmq.h b/kernel/sched/bmq.h
+diff --git a/kernel/sched/bmq.h b/kernel/sched/bmq.h
 new file mode 100644
-index 0000000..f29b8f3
+index 000000000000..f29b8f3aa786
 --- /dev/null
 +++ b/kernel/sched/bmq.h
 @@ -0,0 +1,110 @@
@@ -10522,7 +10512,7 @@ index 0000000..f29b8f3
 +
 +static inline void update_rq_time_edge(struct rq *rq) {}
 diff --git a/kernel/sched/build_policy.c b/kernel/sched/build_policy.c
-index d9dc9ab..71a2554 100644
+index d9dc9ab3773f..71a25540d65e 100644
 --- a/kernel/sched/build_policy.c
 +++ b/kernel/sched/build_policy.c
 @@ -42,13 +42,19 @@
@@ -10547,18 +10537,10 @@ index d9dc9ab..71a2554 100644
 +#include "deadline.c"
 +#endif
 diff --git a/kernel/sched/build_utility.c b/kernel/sched/build_utility.c
-index 99bdd96..bc17d5a 100644
+index 99bdd96f454f..23f80a86d2d7 100644
 --- a/kernel/sched/build_utility.c
 +++ b/kernel/sched/build_utility.c
-@@ -34,7 +34,6 @@
- #include <linux/nospec.h>
- #include <linux/proc_fs.h>
- #include <linux/psi.h>
--#include <linux/psi.h>
- #include <linux/ptrace_api.h>
- #include <linux/sched_clock.h>
- #include <linux/security.h>
-@@ -85,7 +84,9 @@
+@@ -85,7 +85,9 @@
  
  #ifdef CONFIG_SMP
  # include "cpupri.c"
@@ -10569,7 +10551,7 @@ index 99bdd96..bc17d5a 100644
  #endif
  
 diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
-index 4492608..3522bbf 100644
+index 4492608b7d7f..b2bf561dba12 100644
 --- a/kernel/sched/cpufreq_schedutil.c
 +++ b/kernel/sched/cpufreq_schedutil.c
 @@ -155,12 +155,18 @@ static unsigned int get_next_freq(struct sugov_policy *sg_policy,
@@ -10581,13 +10563,13 @@ index 4492608..3522bbf 100644
  
 +#ifndef CONFIG_SCHED_ALT
 +	unsigned long util = cpu_util_cfs_boost(sg_cpu->cpu);
++
  	sg_cpu->bw_dl = cpu_bw_dl(rq);
  	sg_cpu->util = effective_cpu_util(sg_cpu->cpu, util,
  					  FREQUENCY_UTIL, NULL);
 +#else
-+	unsigned long max_cap = arch_scale_cpu_capacity(sg_cpu->cpu);
 +	sg_cpu->bw_dl = 0;
-+	sg_cpu->util = rq_load_util(rq, max_cap);
++	sg_cpu->util = rq_load_util(rq, arch_scale_cpu_capacity(sg_cpu->cpu));
 +#endif /* CONFIG_SCHED_ALT */
  }
  
@@ -10622,7 +10604,7 @@ index 4492608..3522bbf 100644
  static DECLARE_WORK(rebuild_sd_work, rebuild_sd_workfn);
  
 diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c
-index af7952f..6461cbb 100644
+index af7952f12e6c..6461cbbb734d 100644
 --- a/kernel/sched/cputime.c
 +++ b/kernel/sched/cputime.c
 @@ -126,7 +126,7 @@ void account_user_time(struct task_struct *p, u64 cputime)
@@ -10661,7 +10643,7 @@ index af7952f..6461cbb 100644
  	task_rq_unlock(rq, t, &rf);
  
  	return ns;
-@@ -630,7 +630,7 @@ out:
+@@ -630,7 +630,7 @@ void cputime_adjust(struct task_cputime *curr, struct prev_cputime *prev,
  void task_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st)
  {
  	struct task_cputime cputime = {
@@ -10671,7 +10653,7 @@ index af7952f..6461cbb 100644
  
  	if (task_cputime(p, &cputime.utime, &cputime.stime))
 diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
-index 066ff1c..7bdb806 100644
+index 066ff1c8ae4e..1afd8c786840 100644
 --- a/kernel/sched/debug.c
 +++ b/kernel/sched/debug.c
 @@ -7,6 +7,7 @@
@@ -10706,13 +10688,14 @@ index 066ff1c..7bdb806 100644
  
  static struct dentry *debugfs_sched;
  
-@@ -341,12 +345,15 @@ static __init int sched_init_debug(void)
+@@ -341,12 +345,16 @@ static __init int sched_init_debug(void)
  
  	debugfs_sched = debugfs_create_dir("sched", NULL);
  
 +#ifndef CONFIG_SCHED_ALT
  	debugfs_create_file("features", 0644, debugfs_sched, NULL, &sched_feat_fops);
  	debugfs_create_file_unsafe("verbose", 0644, debugfs_sched, &sched_debug_verbose, &sched_verbose_fops);
++	debugfs_create_bool("verbose", 0644, debugfs_sched, &sched_debug_verbose);
 +#endif /* !CONFIG_SCHED_ALT */
  #ifdef CONFIG_PREEMPT_DYNAMIC
  	debugfs_create_file("preempt", 0644, debugfs_sched, NULL, &sched_dynamic_fops);
@@ -10722,7 +10705,7 @@ index 066ff1c..7bdb806 100644
  	debugfs_create_u32("latency_ns", 0644, debugfs_sched, &sysctl_sched_latency);
  	debugfs_create_u32("min_granularity_ns", 0644, debugfs_sched, &sysctl_sched_min_granularity);
  	debugfs_create_u32("idle_min_granularity_ns", 0644, debugfs_sched, &sysctl_sched_idle_min_granularity);
-@@ -376,11 +383,13 @@ static __init int sched_init_debug(void)
+@@ -376,11 +384,13 @@ static __init int sched_init_debug(void)
  #endif
  
  	debugfs_create_file("debug", 0444, debugfs_sched, NULL, &sched_debug_fops);
@@ -10736,7 +10719,7 @@ index 066ff1c..7bdb806 100644
  #ifdef CONFIG_SMP
  
  static cpumask_var_t		sd_sysctl_cpus;
-@@ -1114,6 +1123,7 @@ void proc_sched_set_task(struct task_struct *p)
+@@ -1114,6 +1124,7 @@ void proc_sched_set_task(struct task_struct *p)
  	memset(&p->stats, 0, sizeof(p->stats));
  #endif
  }
@@ -10745,7 +10728,7 @@ index 066ff1c..7bdb806 100644
  void resched_latency_warn(int cpu, u64 latency)
  {
 diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c
-index 342f58a..ab493e7 100644
+index 342f58a329f5..ab493e759084 100644
 --- a/kernel/sched/idle.c
 +++ b/kernel/sched/idle.c
 @@ -379,6 +379,7 @@ void cpu_startup_entry(enum cpuhp_state state)
@@ -10761,9 +10744,9 @@ index 342f58a..ab493e7 100644
  	.update_curr		= update_curr_idle,
  };
 +#endif
-diff --git a/b/kernel/sched/pds.h b/kernel/sched/pds.h
+diff --git a/kernel/sched/pds.h b/kernel/sched/pds.h
 new file mode 100644
-index 0000000..15cc488
+index 000000000000..15cc4887efed
 --- /dev/null
 +++ b/kernel/sched/pds.h
 @@ -0,0 +1,152 @@
@@ -10920,7 +10903,7 @@ index 0000000..15cc488
 +#endif
 +static inline void sched_task_deactivate(struct task_struct *p, struct rq *rq) {}
 diff --git a/kernel/sched/pelt.c b/kernel/sched/pelt.c
-index 0f31076..bd38bf7 100644
+index 0f310768260c..bd38bf738fe9 100644
 --- a/kernel/sched/pelt.c
 +++ b/kernel/sched/pelt.c
 @@ -266,6 +266,7 @@ ___update_load_avg(struct sched_avg *sa, unsigned long load)
@@ -10943,7 +10926,7 @@ index 0f31076..bd38bf7 100644
   * thermal:
   *
 diff --git a/kernel/sched/pelt.h b/kernel/sched/pelt.h
-index 3a0e0dc..e8a7d84 100644
+index 3a0e0dc28721..e8a7d84aa5a5 100644
 --- a/kernel/sched/pelt.h
 +++ b/kernel/sched/pelt.h
 @@ -1,13 +1,15 @@
@@ -10992,7 +10975,7 @@ index 3a0e0dc..e8a7d84 100644
  static inline int
  update_thermal_load_avg(u64 now, struct rq *rq, u64 capacity)
 diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
-index e93e006..9bab981 100644
+index e93e006a942b..326ff9684cae 100644
 --- a/kernel/sched/sched.h
 +++ b/kernel/sched/sched.h
 @@ -5,6 +5,10 @@
@@ -11006,26 +10989,18 @@ index e93e006..9bab981 100644
  #include <linux/sched/affinity.h>
  #include <linux/sched/autogroup.h>
  #include <linux/sched/cpufreq.h>
-@@ -3245,6 +3249,11 @@ static inline void update_current_exec_runtime(struct task_struct *curr,
- 	cgroup_account_cputime(curr, delta_exec);
- }
+@@ -3480,4 +3484,9 @@ static inline void task_tick_mm_cid(struct rq *rq, struct task_struct *curr) { }
+ static inline void init_sched_mm_cid(struct task_struct *t) { }
+ #endif
  
 +static inline int task_running_nice(struct task_struct *p)
 +{
 +	return (task_nice(p) > 0);
 +}
-+
- #ifdef CONFIG_SCHED_MM_CID
- 
- #define SCHED_MM_CID_PERIOD_NS	(100ULL * 1000000)	/* 100ms */
-@@ -3480,4 +3489,5 @@ static inline void task_tick_mm_cid(struct rq *rq, struct task_struct *curr) { }
- static inline void init_sched_mm_cid(struct task_struct *t) { }
- #endif
- 
 +#endif /* !CONFIG_SCHED_ALT */
  #endif /* _KERNEL_SCHED_SCHED_H */
 diff --git a/kernel/sched/stats.c b/kernel/sched/stats.c
-index 857f837..5486c63 100644
+index 857f837f52cb..5486c63e4790 100644
 --- a/kernel/sched/stats.c
 +++ b/kernel/sched/stats.c
 @@ -125,8 +125,10 @@ static int show_schedstat(struct seq_file *seq, void *v)
@@ -11056,7 +11031,7 @@ index 857f837..5486c63 100644
  	}
  	return 0;
 diff --git a/kernel/sched/stats.h b/kernel/sched/stats.h
-index 38f3698..b9d5973 100644
+index 38f3698f5e5b..b9d597394316 100644
 --- a/kernel/sched/stats.h
 +++ b/kernel/sched/stats.h
 @@ -89,6 +89,7 @@ static inline void rq_sched_info_depart  (struct rq *rq, unsigned long long delt
@@ -11076,17 +11051,17 @@ index 38f3698..b9d5973 100644
  #ifdef CONFIG_PSI
  void psi_task_change(struct task_struct *task, int clear, int set);
 diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
-index d3a3b26..fed43c1 100644
+index d3a3b2646ec4..10f64ed42463 100644
 --- a/kernel/sched/topology.c
 +++ b/kernel/sched/topology.c
-@@ -5,6 +5,7 @@
+@@ -3,6 +3,7 @@
+  * Scheduler topology setup/handling methods
+  */
  
++#ifndef CONFIG_SCHED_ALT
  #include <linux/bsearch.h>
  
-+#ifndef CONFIG_SCHED_ALT
  DEFINE_MUTEX(sched_domains_mutex);
- 
- /* Protected by sched_domains_mutex: */
 @@ -1420,8 +1421,10 @@ static void asym_cpu_capacity_scan(void)
   */
  
@@ -11136,7 +11111,7 @@ index d3a3b26..fed43c1 100644
 +#endif /* CONFIG_NUMA */
 +#endif
 diff --git a/kernel/sysctl.c b/kernel/sysctl.c
-index 354a2d2..73080f0 100644
+index 354a2d294f52..73080f0a9989 100644
 --- a/kernel/sysctl.c
 +++ b/kernel/sysctl.c
 @@ -92,6 +92,10 @@ EXPORT_SYMBOL_GPL(sysctl_long_vals);
@@ -11169,7 +11144,7 @@ index 354a2d2..73080f0 100644
  	{
  		.procname	= "spin_retry",
 diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
-index 238262e..962a26f 100644
+index 238262e4aba7..141c96f68957 100644
 --- a/kernel/time/hrtimer.c
 +++ b/kernel/time/hrtimer.c
 @@ -2091,8 +2091,10 @@ long hrtimer_nanosleep(ktime_t rqtp, const enum hrtimer_mode mode,
@@ -11178,13 +11153,14 @@ index 238262e..962a26f 100644
  
 +#ifndef CONFIG_SCHED_ALT
  	slack = current->timer_slack_ns;
- 	if (rt_task(current))
+-	if (rt_task(current))
++	if (dl_task(current) || rt_task(current))
 +#endif
  		slack = 0;
  
  	hrtimer_init_sleeper_on_stack(&t, clockid, mode);
 diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c
-index e9c6f9d..43ee0a9 100644
+index e9c6f9d0e42c..43ee0a94abdd 100644
 --- a/kernel/time/posix-cpu-timers.c
 +++ b/kernel/time/posix-cpu-timers.c
 @@ -223,7 +223,7 @@ static void task_sample_cputime(struct task_struct *p, u64 *samples)
@@ -11244,7 +11220,7 @@ index e9c6f9d..43ee0a9 100644
  	return false;
  }
 diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c
-index 5295904..d04bb99 100644
+index 529590499b1f..d04bb99b4f0e 100644
 --- a/kernel/trace/trace_selftest.c
 +++ b/kernel/trace/trace_selftest.c
 @@ -1155,10 +1155,15 @@ static int trace_wakeup_test_thread(void *data)
@@ -11264,33 +11240,42 @@ index 5295904..d04bb99 100644
  	struct wakeup_test_data *x = data;
  
 diff --git a/kernel/workqueue.c b/kernel/workqueue.c
-index 800b420..998a572 100644
+index 800b4208dba9..920b687a989d 100644
 --- a/kernel/workqueue.c
 +++ b/kernel/workqueue.c
-@@ -1075,7 +1075,7 @@ void wq_worker_running(struct task_struct *task)
+@@ -1075,7 +1075,11 @@ void wq_worker_running(struct task_struct *task)
  	 * CPU intensive auto-detection cares about how long a work item hogged
  	 * CPU without sleeping. Reset the starting timestamp on wakeup.
  	 */
--	worker->current_at = worker->task->se.sum_exec_runtime;
-+	worker->current_at = tsk_seruntime(worker->task);
++#ifdef CONFIG_SCHED_ALT
++	worker->current_at = worker->task->sched_time;
++#else
+ 	worker->current_at = worker->task->se.sum_exec_runtime;
++#endif
  
  	WRITE_ONCE(worker->sleeping, 0);
  }
-@@ -1161,7 +1161,7 @@ void wq_worker_tick(struct task_struct *task)
+@@ -1161,7 +1165,11 @@ void wq_worker_tick(struct task_struct *task)
  	 * We probably want to make this prettier in the future.
  	 */
  	if ((worker->flags & WORKER_NOT_RUNNING) || READ_ONCE(worker->sleeping) ||
--	    worker->task->se.sum_exec_runtime - worker->current_at <
-+	    tsk_seruntime(worker->task) - worker->current_at <
++#ifdef CONFIG_SCHED_ALT
++	    worker->task->sched_time - worker->current_at <
++#else
+ 	    worker->task->se.sum_exec_runtime - worker->current_at <
++#endif
  	    wq_cpu_intensive_thresh_us * NSEC_PER_USEC)
  		return;
  
-@@ -2530,7 +2530,7 @@ __acquires(&pool->lock)
+@@ -2530,7 +2538,11 @@ __acquires(&pool->lock)
  	worker->current_work = work;
  	worker->current_func = work->func;
  	worker->current_pwq = pwq;
--	worker->current_at = worker->task->se.sum_exec_runtime;
-+	worker->current_at = tsk_seruntime(worker->task);
++#ifdef CONFIG_SCHED_ALT
++	worker->current_at = worker->task->sched_time;
++#else
+ 	worker->current_at = worker->task->se.sum_exec_runtime;
++#endif
  	work_data = *work_data_bits(work);
  	worker->current_color = get_work_color(work_data);
  


^ permalink raw reply related	[flat|nested] 24+ messages in thread
* [gentoo-commits] proj/linux-patches:6.5 commit in: /
@ 2023-09-13 11:03 Mike Pagano
  0 siblings, 0 replies; 24+ messages in thread
From: Mike Pagano @ 2023-09-13 11:03 UTC (permalink / raw
  To: gentoo-commits

commit:     26f66be9229018459d0f69bb54b8627d14a9d562
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed Sep 13 11:03:31 2023 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed Sep 13 11:03:31 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=26f66be9

Linux patch 6.5.3

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README            |     4 +
 1002_linux-6.5.3.patch | 35809 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 35813 insertions(+)

diff --git a/0000_README b/0000_README
index 4ba02fbb..de8216ab 100644
--- a/0000_README
+++ b/0000_README
@@ -51,6 +51,10 @@ Patch:  1001_linux-6.5.2.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.5.2
 
+Patch:  1002_linux-6.5.3.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.5.3
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1002_linux-6.5.3.patch b/1002_linux-6.5.3.patch
new file mode 100644
index 00000000..c0712d3f
--- /dev/null
+++ b/1002_linux-6.5.3.patch
@@ -0,0 +1,35809 @@
+diff --git a/Documentation/ABI/testing/sysfs-bus-fsi-devices-sbefifo b/Documentation/ABI/testing/sysfs-bus-fsi-devices-sbefifo
+index 531fe9d6b40aa..c7393b4dd2d88 100644
+--- a/Documentation/ABI/testing/sysfs-bus-fsi-devices-sbefifo
++++ b/Documentation/ABI/testing/sysfs-bus-fsi-devices-sbefifo
+@@ -5,6 +5,6 @@ Description:
+ 		Indicates whether or not this SBE device has experienced a
+ 		timeout; i.e. the SBE did not respond within the time allotted
+ 		by the driver. A value of 1 indicates that a timeout has
+-		ocurred and no transfers have completed since the timeout. A
+-		value of 0 indicates that no timeout has ocurred, or if one
+-		has, more recent transfers have completed successful.
++		occurred and no transfers have completed since the timeout. A
++		value of 0 indicates that no timeout has occurred, or if one
++		has, more recent transfers have completed successfully.
+diff --git a/Documentation/ABI/testing/sysfs-driver-chromeos-acpi b/Documentation/ABI/testing/sysfs-driver-chromeos-acpi
+index c308926e1568a..7c8e129fc1005 100644
+--- a/Documentation/ABI/testing/sysfs-driver-chromeos-acpi
++++ b/Documentation/ABI/testing/sysfs-driver-chromeos-acpi
+@@ -134,4 +134,4 @@ KernelVersion:	5.19
+ Description:
+ 		Returns the verified boot data block shared between the
+ 		firmware verification step and the kernel verification step
+-		(binary).
++		(hex dump).
+diff --git a/Documentation/ABI/testing/sysfs-fs-f2fs b/Documentation/ABI/testing/sysfs-fs-f2fs
+index 8140fc98f5aee..ad3d76d37c8ba 100644
+--- a/Documentation/ABI/testing/sysfs-fs-f2fs
++++ b/Documentation/ABI/testing/sysfs-fs-f2fs
+@@ -54,9 +54,9 @@ Description:	Controls the in-place-update policy.
+ 		0x00  DISABLE         disable IPU(=default option in LFS mode)
+ 		0x01  FORCE           all the time
+ 		0x02  SSR             if SSR mode is activated
+-		0x04  UTIL            if FS utilization is over threashold
++		0x04  UTIL            if FS utilization is over threshold
+ 		0x08  SSR_UTIL        if SSR mode is activated and FS utilization is over
+-		                      threashold
++		                      threshold
+ 		0x10  FSYNC           activated in fsync path only for high performance
+ 		                      flash storages. IPU will be triggered only if the
+ 		                      # of dirty pages over min_fsync_blocks.
+@@ -117,7 +117,7 @@ Date:		December 2021
+ Contact:	"Konstantin Vyshetsky" <vkon@google.com>
+ Description:	Controls the number of discards a thread will issue at a time.
+ 		Higher number will allow the discard thread to finish its work
+-		faster, at the cost of higher latency for incomming I/O.
++		faster, at the cost of higher latency for incoming I/O.
+ 
+ What:		/sys/fs/f2fs/<disk>/min_discard_issue_time
+ Date:		December 2021
+@@ -334,7 +334,7 @@ Description:	This indicates how many GC can be failed for the pinned
+ 		state. 2048 trials is set by default.
+ 
+ What:		/sys/fs/f2fs/<disk>/extension_list
+-Date:		Feburary 2018
++Date:		February 2018
+ Contact:	"Chao Yu" <yuchao0@huawei.com>
+ Description:	Used to control configure extension list:
+ 		- Query: cat /sys/fs/f2fs/<disk>/extension_list
+diff --git a/Documentation/admin-guide/devices.txt b/Documentation/admin-guide/devices.txt
+index b1b57f638b94f..8390549235304 100644
+--- a/Documentation/admin-guide/devices.txt
++++ b/Documentation/admin-guide/devices.txt
+@@ -2691,18 +2691,9 @@
+ 		 45 = /dev/ttyMM1		Marvell MPSC - port 1 (obsolete unused)
+ 		 46 = /dev/ttyCPM0		PPC CPM (SCC or SMC) - port 0
+ 		    ...
+-		 49 = /dev/ttyCPM5		PPC CPM (SCC or SMC) - port 3
+-		 50 = /dev/ttyIOC0		Altix serial card
+-		    ...
+-		 81 = /dev/ttyIOC31		Altix serial card
++		 51 = /dev/ttyCPM5		PPC CPM (SCC or SMC) - port 5
+ 		 82 = /dev/ttyVR0		NEC VR4100 series SIU
+ 		 83 = /dev/ttyVR1		NEC VR4100 series DSIU
+-		 84 = /dev/ttyIOC84		Altix ioc4 serial card
+-		    ...
+-		 115 = /dev/ttyIOC115		Altix ioc4 serial card
+-		 116 = /dev/ttySIOC0		Altix ioc3 serial card
+-		    ...
+-		 147 = /dev/ttySIOC31		Altix ioc3 serial card
+ 		 148 = /dev/ttyPSC0		PPC PSC - port 0
+ 		    ...
+ 		 153 = /dev/ttyPSC5		PPC PSC - port 5
+@@ -2761,10 +2752,7 @@
+ 		 43 = /dev/ttycusmx2		Callout device for ttySMX2
+ 		 46 = /dev/cucpm0		Callout device for ttyCPM0
+ 		    ...
+-		 49 = /dev/cucpm5		Callout device for ttyCPM5
+-		 50 = /dev/cuioc40		Callout device for ttyIOC40
+-		    ...
+-		 81 = /dev/cuioc431		Callout device for ttyIOC431
++		 51 = /dev/cucpm5		Callout device for ttyCPM5
+ 		 82 = /dev/cuvr0		Callout device for ttyVR0
+ 		 83 = /dev/cuvr1		Callout device for ttyVR1
+ 
+diff --git a/Documentation/devicetree/bindings/extcon/maxim,max77843.yaml b/Documentation/devicetree/bindings/extcon/maxim,max77843.yaml
+index 1289605456408..55800fb0221d0 100644
+--- a/Documentation/devicetree/bindings/extcon/maxim,max77843.yaml
++++ b/Documentation/devicetree/bindings/extcon/maxim,max77843.yaml
+@@ -23,6 +23,7 @@ properties:
+ 
+   connector:
+     $ref: /schemas/connector/usb-connector.yaml#
++    unevaluatedProperties: false
+ 
+   ports:
+     $ref: /schemas/graph.yaml#/properties/ports
+diff --git a/Documentation/devicetree/bindings/pci/qcom,pcie-ep.yaml b/Documentation/devicetree/bindings/pci/qcom,pcie-ep.yaml
+index 811112255d7d2..c94b49498f695 100644
+--- a/Documentation/devicetree/bindings/pci/qcom,pcie-ep.yaml
++++ b/Documentation/devicetree/bindings/pci/qcom,pcie-ep.yaml
+@@ -11,10 +11,13 @@ maintainers:
+ 
+ properties:
+   compatible:
+-    enum:
+-      - qcom,sdx55-pcie-ep
+-      - qcom,sdx65-pcie-ep
+-      - qcom,sm8450-pcie-ep
++    oneOf:
++      - enum:
++          - qcom,sdx55-pcie-ep
++          - qcom,sm8450-pcie-ep
++      - items:
++          - const: qcom,sdx65-pcie-ep
++          - const: qcom,sdx55-pcie-ep
+ 
+   reg:
+     items:
+@@ -110,7 +113,6 @@ allOf:
+           contains:
+             enum:
+               - qcom,sdx55-pcie-ep
+-              - qcom,sdx65-pcie-ep
+     then:
+       properties:
+         clocks:
+diff --git a/Documentation/devicetree/bindings/power/qcom,kpss-acc-v2.yaml b/Documentation/devicetree/bindings/power/qcom,kpss-acc-v2.yaml
+index 202a5d51ee88c..facaafefb4414 100644
+--- a/Documentation/devicetree/bindings/power/qcom,kpss-acc-v2.yaml
++++ b/Documentation/devicetree/bindings/power/qcom,kpss-acc-v2.yaml
+@@ -21,6 +21,7 @@ properties:
+     const: qcom,kpss-acc-v2
+ 
+   reg:
++    minItems: 1
+     items:
+       - description: Base address and size of the register region
+       - description: Optional base address and size of the alias register region
+diff --git a/Documentation/devicetree/bindings/regulator/qcom,rpm-regulator.yaml b/Documentation/devicetree/bindings/regulator/qcom,rpm-regulator.yaml
+index 8a08698e34846..b4eb4001eb3d2 100644
+--- a/Documentation/devicetree/bindings/regulator/qcom,rpm-regulator.yaml
++++ b/Documentation/devicetree/bindings/regulator/qcom,rpm-regulator.yaml
+@@ -49,7 +49,7 @@ patternProperties:
+   ".*-supply$":
+     description: Input supply phandle(s) for this node
+ 
+-  "^((s|l|lvs)[0-9]*)|(s[1-2][a-b])|(ncp)|(mvs)|(usb-switch)|(hdmi-switch)$":
++  "^((s|l|lvs)[0-9]*|s[1-2][a-b]|ncp|mvs|usb-switch|hdmi-switch)$":
+     description: List of regulators and its properties
+     $ref: regulator.yaml#
+     unevaluatedProperties: false
+diff --git a/Documentation/devicetree/bindings/usb/samsung,exynos-dwc3.yaml b/Documentation/devicetree/bindings/usb/samsung,exynos-dwc3.yaml
+index 42ceaf13cd5da..deeed2bca2cdc 100644
+--- a/Documentation/devicetree/bindings/usb/samsung,exynos-dwc3.yaml
++++ b/Documentation/devicetree/bindings/usb/samsung,exynos-dwc3.yaml
+@@ -72,7 +72,7 @@ allOf:
+       properties:
+         compatible:
+           contains:
+-            const: samsung,exynos54333-dwusb3
++            const: samsung,exynos5433-dwusb3
+     then:
+       properties:
+         clocks:
+@@ -82,8 +82,8 @@ allOf:
+           items:
+             - const: aclk
+             - const: susp_clk
+-            - const: pipe_pclk
+             - const: phyclk
++            - const: pipe_pclk
+ 
+   - if:
+       properties:
+diff --git a/Documentation/scsi/scsi_mid_low_api.rst b/Documentation/scsi/scsi_mid_low_api.rst
+index 6fa3a62795016..022198c513506 100644
+--- a/Documentation/scsi/scsi_mid_low_api.rst
++++ b/Documentation/scsi/scsi_mid_low_api.rst
+@@ -1190,11 +1190,11 @@ Members of interest:
+ 		 - pointer to scsi_device object that this command is
+                    associated with.
+     resid
+-		 - an LLD should set this signed integer to the requested
++		 - an LLD should set this unsigned integer to the requested
+                    transfer length (i.e. 'request_bufflen') less the number
+                    of bytes that are actually transferred. 'resid' is
+                    preset to 0 so an LLD can ignore it if it cannot detect
+-                   underruns (overruns should be rare). If possible an LLD
++                   underruns (overruns should not be reported). An LLD
+                    should set 'resid' prior to invoking 'done'. The most
+                    interesting case is data transfers from a SCSI target
+                    device (e.g. READs) that underrun.
+diff --git a/Documentation/userspace-api/media/v4l/vidioc-subdev-g-routing.rst b/Documentation/userspace-api/media/v4l/vidioc-subdev-g-routing.rst
+index 2d6e3bbdd0404..72677a280cd64 100644
+--- a/Documentation/userspace-api/media/v4l/vidioc-subdev-g-routing.rst
++++ b/Documentation/userspace-api/media/v4l/vidioc-subdev-g-routing.rst
+@@ -58,6 +58,9 @@ the subdevice exposes, drivers return the ENOSPC error code and adjust the
+ value of the ``num_routes`` field. Application should then reserve enough memory
+ for all the route entries and call ``VIDIOC_SUBDEV_G_ROUTING`` again.
+ 
++On a successful ``VIDIOC_SUBDEV_G_ROUTING`` call the driver updates the
++``num_routes`` field to reflect the actual number of routes returned.
++
+ .. tabularcolumns:: |p{4.4cm}|p{4.4cm}|p{8.7cm}|
+ 
+ .. c:type:: v4l2_subdev_routing
+@@ -138,9 +141,7 @@ ENOSPC
+ 
+ EINVAL
+    The sink or source pad identifiers reference a non-existing pad, or reference
+-   pads of different types (ie. the sink_pad identifiers refers to a source pad)
+-   or the sink or source stream identifiers reference a non-existing stream on
+-   the sink or source pad.
++   pads of different types (ie. the sink_pad identifiers refers to a source pad).
+ 
+ E2BIG
+    The application provided ``num_routes`` for ``VIDIOC_SUBDEV_S_ROUTING`` is
+diff --git a/Makefile b/Makefile
+index c47558bc00aa8..901cdfa5e7d3b 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 5
+-SUBLEVEL = 2
++SUBLEVEL = 3
+ EXTRAVERSION =
+ NAME = Hurr durr I'ma ninja sloth
+ 
+@@ -1289,7 +1289,7 @@ prepare0: archprepare
+ # All the preparing..
+ prepare: prepare0
+ ifdef CONFIG_RUST
+-	$(Q)$(CONFIG_SHELL) $(srctree)/scripts/rust_is_available.sh -v
++	$(Q)$(CONFIG_SHELL) $(srctree)/scripts/rust_is_available.sh
+ 	$(Q)$(MAKE) $(build)=rust
+ endif
+ 
+@@ -1825,7 +1825,7 @@ $(DOC_TARGETS):
+ # "Is Rust available?" target
+ PHONY += rustavailable
+ rustavailable:
+-	$(Q)$(CONFIG_SHELL) $(srctree)/scripts/rust_is_available.sh -v && echo "Rust is available!"
++	$(Q)$(CONFIG_SHELL) $(srctree)/scripts/rust_is_available.sh && echo "Rust is available!"
+ 
+ # Documentation target
+ #
+diff --git a/arch/arm/boot/dts/broadcom/bcm47189-luxul-xap-1440.dts b/arch/arm/boot/dts/broadcom/bcm47189-luxul-xap-1440.dts
+index 0734aa249b8e0..0f6d7fe30068f 100644
+--- a/arch/arm/boot/dts/broadcom/bcm47189-luxul-xap-1440.dts
++++ b/arch/arm/boot/dts/broadcom/bcm47189-luxul-xap-1440.dts
+@@ -26,7 +26,6 @@
+ 		led-wlan {
+ 			label = "bcm53xx:blue:wlan";
+ 			gpios = <&chipcommon 10 GPIO_ACTIVE_LOW>;
+-			linux,default-trigger = "default-off";
+ 		};
+ 
+ 		led-system {
+@@ -46,3 +45,16 @@
+ 		};
+ 	};
+ };
++
++&gmac0 {
++	phy-mode = "rgmii";
++	phy-handle = <&bcm54210e>;
++
++	mdio {
++		/delete-node/ switch@1e;
++
++		bcm54210e: ethernet-phy@0 {
++			reg = <0>;
++		};
++	};
++};
+diff --git a/arch/arm/boot/dts/broadcom/bcm47189-luxul-xap-810.dts b/arch/arm/boot/dts/broadcom/bcm47189-luxul-xap-810.dts
+index e6fb6cbe69633..4e0ef0af726f5 100644
+--- a/arch/arm/boot/dts/broadcom/bcm47189-luxul-xap-810.dts
++++ b/arch/arm/boot/dts/broadcom/bcm47189-luxul-xap-810.dts
+@@ -26,7 +26,6 @@
+ 		led-5ghz {
+ 			label = "bcm53xx:blue:5ghz";
+ 			gpios = <&chipcommon 11 GPIO_ACTIVE_HIGH>;
+-			linux,default-trigger = "default-off";
+ 		};
+ 
+ 		led-system {
+@@ -42,7 +41,6 @@
+ 		led-2ghz {
+ 			label = "bcm53xx:blue:2ghz";
+ 			gpios = <&pcie0_chipcommon 3 GPIO_ACTIVE_HIGH>;
+-			linux,default-trigger = "default-off";
+ 		};
+ 	};
+ 
+@@ -83,3 +81,16 @@
+ 		};
+ 	};
+ };
++
++&gmac0 {
++	phy-mode = "rgmii";
++	phy-handle = <&bcm54210e>;
++
++	mdio {
++		/delete-node/ switch@1e;
++
++		bcm54210e: ethernet-phy@0 {
++			reg = <0>;
++		};
++	};
++};
+diff --git a/arch/arm/boot/dts/broadcom/bcm47189-tenda-ac9.dts b/arch/arm/boot/dts/broadcom/bcm47189-tenda-ac9.dts
+index dab2e5f63a727..06b1a582809ca 100644
+--- a/arch/arm/boot/dts/broadcom/bcm47189-tenda-ac9.dts
++++ b/arch/arm/boot/dts/broadcom/bcm47189-tenda-ac9.dts
+@@ -135,8 +135,8 @@
+ 			label = "lan4";
+ 		};
+ 
+-		port@5 {
+-			reg = <5>;
++		port@8 {
++			reg = <8>;
+ 			label = "cpu";
+ 			ethernet = <&gmac0>;
+ 		};
+diff --git a/arch/arm/boot/dts/broadcom/bcm53573.dtsi b/arch/arm/boot/dts/broadcom/bcm53573.dtsi
+index 3f03a381db0f2..eed1a6147f0bf 100644
+--- a/arch/arm/boot/dts/broadcom/bcm53573.dtsi
++++ b/arch/arm/boot/dts/broadcom/bcm53573.dtsi
+@@ -127,6 +127,9 @@
+ 
+ 		pcie0: pcie@2000 {
+ 			reg = <0x00002000 0x1000>;
++
++			#address-cells = <3>;
++			#size-cells = <2>;
+ 		};
+ 
+ 		usb2: usb2@4000 {
+@@ -156,8 +159,6 @@
+ 			};
+ 
+ 			ohci: usb@d000 {
+-				#usb-cells = <0>;
+-
+ 				compatible = "generic-ohci";
+ 				reg = <0xd000 0x1000>;
+ 				interrupt-parent = <&gic>;
+diff --git a/arch/arm/boot/dts/broadcom/bcm947189acdbmr.dts b/arch/arm/boot/dts/broadcom/bcm947189acdbmr.dts
+index 3709baa2376f5..0b8727ae6f16d 100644
+--- a/arch/arm/boot/dts/broadcom/bcm947189acdbmr.dts
++++ b/arch/arm/boot/dts/broadcom/bcm947189acdbmr.dts
+@@ -60,9 +60,9 @@
+ 	spi {
+ 		compatible = "spi-gpio";
+ 		num-chipselects = <1>;
+-		gpio-sck = <&chipcommon 21 0>;
+-		gpio-miso = <&chipcommon 22 0>;
+-		gpio-mosi = <&chipcommon 23 0>;
++		sck-gpios = <&chipcommon 21 0>;
++		miso-gpios = <&chipcommon 22 0>;
++		mosi-gpios = <&chipcommon 23 0>;
+ 		cs-gpios = <&chipcommon 24 0>;
+ 		#address-cells = <1>;
+ 		#size-cells = <0>;
+diff --git a/arch/arm/boot/dts/qcom/qcom-ipq4019.dtsi b/arch/arm/boot/dts/qcom/qcom-ipq4019.dtsi
+index f0ef86fadc9d9..e328216443135 100644
+--- a/arch/arm/boot/dts/qcom/qcom-ipq4019.dtsi
++++ b/arch/arm/boot/dts/qcom/qcom-ipq4019.dtsi
+@@ -230,9 +230,12 @@
+ 			interrupts = <GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 138 IRQ_TYPE_LEVEL_HIGH>;
+ 			interrupt-names = "hc_irq", "pwr_irq";
+ 			bus-width = <8>;
+-			clocks = <&gcc GCC_SDCC1_AHB_CLK>, <&gcc GCC_SDCC1_APPS_CLK>,
+-				 <&gcc GCC_DCD_XO_CLK>;
+-			clock-names = "iface", "core", "xo";
++			clocks = <&gcc GCC_SDCC1_AHB_CLK>,
++				 <&gcc GCC_SDCC1_APPS_CLK>,
++				 <&xo>;
++			clock-names = "iface",
++				      "core",
++				      "xo";
+ 			status = "disabled";
+ 		};
+ 
+diff --git a/arch/arm/boot/dts/qcom/qcom-sdx65-mtp.dts b/arch/arm/boot/dts/qcom/qcom-sdx65-mtp.dts
+index 02d8d6e241ae1..fcf1c51c5e7a7 100644
+--- a/arch/arm/boot/dts/qcom/qcom-sdx65-mtp.dts
++++ b/arch/arm/boot/dts/qcom/qcom-sdx65-mtp.dts
+@@ -7,7 +7,7 @@
+ #include "qcom-sdx65.dtsi"
+ #include <dt-bindings/regulator/qcom,rpmh-regulator.h>
+ #include <arm64/qcom/pmk8350.dtsi>
+-#include <arm64/qcom/pm8150b.dtsi>
++#include <arm64/qcom/pm7250b.dtsi>
+ #include "qcom-pmx65.dtsi"
+ 
+ / {
+diff --git a/arch/arm/boot/dts/samsung/s3c6410-mini6410.dts b/arch/arm/boot/dts/samsung/s3c6410-mini6410.dts
+index 17097da36f5ed..0b07b3c319604 100644
+--- a/arch/arm/boot/dts/samsung/s3c6410-mini6410.dts
++++ b/arch/arm/boot/dts/samsung/s3c6410-mini6410.dts
+@@ -51,7 +51,7 @@
+ 
+ 		ethernet@18000000 {
+ 			compatible = "davicom,dm9000";
+-			reg = <0x18000000 0x2 0x18000004 0x2>;
++			reg = <0x18000000 0x2>, <0x18000004 0x2>;
+ 			interrupt-parent = <&gpn>;
+ 			interrupts = <7 IRQ_TYPE_LEVEL_HIGH>;
+ 			davicom,no-eeprom;
+diff --git a/arch/arm/boot/dts/samsung/s5pv210-smdkv210.dts b/arch/arm/boot/dts/samsung/s5pv210-smdkv210.dts
+index 6e26c67e0a26e..901e7197b1368 100644
+--- a/arch/arm/boot/dts/samsung/s5pv210-smdkv210.dts
++++ b/arch/arm/boot/dts/samsung/s5pv210-smdkv210.dts
+@@ -41,7 +41,7 @@
+ 
+ 	ethernet@a8000000 {
+ 		compatible = "davicom,dm9000";
+-		reg = <0xA8000000 0x2 0xA8000002 0x2>;
++		reg = <0xa8000000 0x2>, <0xa8000002 0x2>;
+ 		interrupt-parent = <&gph1>;
+ 		interrupts = <1 IRQ_TYPE_LEVEL_HIGH>;
+ 		local-mac-address = [00 00 de ad be ef];
+diff --git a/arch/arm/boot/dts/st/stm32mp157c-emstamp-argon.dtsi b/arch/arm/boot/dts/st/stm32mp157c-emstamp-argon.dtsi
+index 94e38141af672..fd89542c69c93 100644
+--- a/arch/arm/boot/dts/st/stm32mp157c-emstamp-argon.dtsi
++++ b/arch/arm/boot/dts/st/stm32mp157c-emstamp-argon.dtsi
+@@ -368,8 +368,8 @@
+ &m4_rproc {
+ 	memory-region = <&retram>, <&mcuram>, <&mcuram2>, <&vdev0vring0>,
+ 			<&vdev0vring1>, <&vdev0buffer>;
+-	mboxes = <&ipcc 0>, <&ipcc 1>, <&ipcc 2>;
+-	mbox-names = "vq0", "vq1", "shutdown";
++	mboxes = <&ipcc 0>, <&ipcc 1>, <&ipcc 2>, <&ipcc 3>;
++	mbox-names = "vq0", "vq1", "shutdown", "detach";
+ 	interrupt-parent = <&exti>;
+ 	interrupts = <68 1>;
+ 	interrupt-names = "wdg";
+diff --git a/arch/arm/boot/dts/st/stm32mp157c-odyssey-som.dtsi b/arch/arm/boot/dts/st/stm32mp157c-odyssey-som.dtsi
+index e22871dc580c8..cf74852514906 100644
+--- a/arch/arm/boot/dts/st/stm32mp157c-odyssey-som.dtsi
++++ b/arch/arm/boot/dts/st/stm32mp157c-odyssey-som.dtsi
+@@ -230,8 +230,8 @@
+ &m4_rproc {
+ 	memory-region = <&retram>, <&mcuram>, <&mcuram2>, <&vdev0vring0>,
+ 			<&vdev0vring1>, <&vdev0buffer>;
+-	mboxes = <&ipcc 0>, <&ipcc 1>, <&ipcc 2>;
+-	mbox-names = "vq0", "vq1", "shutdown";
++	mboxes = <&ipcc 0>, <&ipcc 1>, <&ipcc 2>, <&ipcc 3>;
++	mbox-names = "vq0", "vq1", "shutdown", "detach";
+ 	interrupt-parent = <&exti>;
+ 	interrupts = <68 1>;
+ 	status = "okay";
+diff --git a/arch/arm/boot/dts/st/stm32mp15xx-dhcom-som.dtsi b/arch/arm/boot/dts/st/stm32mp15xx-dhcom-som.dtsi
+index e61df23d361a7..74a11ccc5333f 100644
+--- a/arch/arm/boot/dts/st/stm32mp15xx-dhcom-som.dtsi
++++ b/arch/arm/boot/dts/st/stm32mp15xx-dhcom-som.dtsi
+@@ -416,8 +416,8 @@
+ &m4_rproc {
+ 	memory-region = <&retram>, <&mcuram>, <&mcuram2>, <&vdev0vring0>,
+ 			<&vdev0vring1>, <&vdev0buffer>;
+-	mboxes = <&ipcc 0>, <&ipcc 1>, <&ipcc 2>;
+-	mbox-names = "vq0", "vq1", "shutdown";
++	mboxes = <&ipcc 0>, <&ipcc 1>, <&ipcc 2>, <&ipcc 3>;
++	mbox-names = "vq0", "vq1", "shutdown", "detach";
+ 	interrupt-parent = <&exti>;
+ 	interrupts = <68 1>;
+ 	status = "okay";
+diff --git a/arch/arm/boot/dts/st/stm32mp15xx-dhcor-som.dtsi b/arch/arm/boot/dts/st/stm32mp15xx-dhcor-som.dtsi
+index bba19f21e5277..89881a26c6141 100644
+--- a/arch/arm/boot/dts/st/stm32mp15xx-dhcor-som.dtsi
++++ b/arch/arm/boot/dts/st/stm32mp15xx-dhcor-som.dtsi
+@@ -227,8 +227,8 @@
+ &m4_rproc {
+ 	memory-region = <&retram>, <&mcuram>, <&mcuram2>, <&vdev0vring0>,
+ 			<&vdev0vring1>, <&vdev0buffer>;
+-	mboxes = <&ipcc 0>, <&ipcc 1>, <&ipcc 2>;
+-	mbox-names = "vq0", "vq1", "shutdown";
++	mboxes = <&ipcc 0>, <&ipcc 1>, <&ipcc 2>, <&ipcc 3>;
++	mbox-names = "vq0", "vq1", "shutdown", "detach";
+ 	interrupt-parent = <&exti>;
+ 	interrupts = <68 1>;
+ 	status = "okay";
+diff --git a/arch/arm/include/asm/irq.h b/arch/arm/include/asm/irq.h
+index 18605f1b35807..26c1d2ced4ce1 100644
+--- a/arch/arm/include/asm/irq.h
++++ b/arch/arm/include/asm/irq.h
+@@ -32,7 +32,7 @@ void handle_IRQ(unsigned int, struct pt_regs *);
+ #include <linux/cpumask.h>
+ 
+ extern void arch_trigger_cpumask_backtrace(const cpumask_t *mask,
+-					   bool exclude_self);
++					   int exclude_cpu);
+ #define arch_trigger_cpumask_backtrace arch_trigger_cpumask_backtrace
+ #endif
+ 
+diff --git a/arch/arm/include/asm/syscall.h b/arch/arm/include/asm/syscall.h
+index dfeed440254a8..fe4326d938c18 100644
+--- a/arch/arm/include/asm/syscall.h
++++ b/arch/arm/include/asm/syscall.h
+@@ -25,6 +25,9 @@ static inline int syscall_get_nr(struct task_struct *task,
+ 	if (IS_ENABLED(CONFIG_AEABI) && !IS_ENABLED(CONFIG_OABI_COMPAT))
+ 		return task_thread_info(task)->abi_syscall;
+ 
++	if (task_thread_info(task)->abi_syscall == -1)
++		return -1;
++
+ 	return task_thread_info(task)->abi_syscall & __NR_SYSCALL_MASK;
+ }
+ 
+diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
+index bcc4c9ec3aa4e..5c31e9de7a602 100644
+--- a/arch/arm/kernel/entry-common.S
++++ b/arch/arm/kernel/entry-common.S
+@@ -90,6 +90,7 @@ slow_work_pending:
+ 	cmp	r0, #0
+ 	beq	no_work_pending
+ 	movlt	scno, #(__NR_restart_syscall - __NR_SYSCALL_BASE)
++	str	scno, [tsk, #TI_ABI_SYSCALL]	@ make sure tracers see update
+ 	ldmia	sp, {r0 - r6}			@ have to reload r0 - r6
+ 	b	local_restart			@ ... and off we go
+ ENDPROC(ret_fast_syscall)
+diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
+index 2d8e2516906b6..fef32d73f9120 100644
+--- a/arch/arm/kernel/ptrace.c
++++ b/arch/arm/kernel/ptrace.c
+@@ -783,8 +783,9 @@ long arch_ptrace(struct task_struct *child, long request,
+ 			break;
+ 
+ 		case PTRACE_SET_SYSCALL:
+-			task_thread_info(child)->abi_syscall = data &
+-							__NR_SYSCALL_MASK;
++			if (data != -1)
++				data &= __NR_SYSCALL_MASK;
++			task_thread_info(child)->abi_syscall = data;
+ 			ret = 0;
+ 			break;
+ 
+diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
+index 6756203e45f3d..3431c0553f45c 100644
+--- a/arch/arm/kernel/smp.c
++++ b/arch/arm/kernel/smp.c
+@@ -846,7 +846,7 @@ static void raise_nmi(cpumask_t *mask)
+ 	__ipi_send_mask(ipi_desc[IPI_CPU_BACKTRACE], mask);
+ }
+ 
+-void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self)
++void arch_trigger_cpumask_backtrace(const cpumask_t *mask, int exclude_cpu)
+ {
+-	nmi_trigger_cpumask_backtrace(mask, exclude_self, raise_nmi);
++	nmi_trigger_cpumask_backtrace(mask, exclude_cpu, raise_nmi);
+ }
+diff --git a/arch/arm/mach-omap2/powerdomain.c b/arch/arm/mach-omap2/powerdomain.c
+index 777f9f8e7cd86..5e05dd1324e7b 100644
+--- a/arch/arm/mach-omap2/powerdomain.c
++++ b/arch/arm/mach-omap2/powerdomain.c
+@@ -174,7 +174,7 @@ static int _pwrdm_state_switch(struct powerdomain *pwrdm, int flag)
+ 		break;
+ 	case PWRDM_STATE_PREV:
+ 		prev = pwrdm_read_prev_pwrst(pwrdm);
+-		if (pwrdm->state != prev)
++		if (prev >= 0 && pwrdm->state != prev)
+ 			pwrdm->state_counter[prev]++;
+ 		if (prev == PWRDM_POWER_RET)
+ 			_update_logic_membank_counters(pwrdm);
+diff --git a/arch/arm64/boot/dts/freescale/imx8mp-debix-model-a.dts b/arch/arm64/boot/dts/freescale/imx8mp-debix-model-a.dts
+index b4409349eb3f6..1004ab0abb131 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mp-debix-model-a.dts
++++ b/arch/arm64/boot/dts/freescale/imx8mp-debix-model-a.dts
+@@ -355,28 +355,6 @@
+ 		>;
+ 	};
+ 
+-	pinctrl_fec: fecgrp {
+-		fsl,pins = <
+-			MX8MP_IOMUXC_SAI1_RXD2__ENET1_MDC				0x3
+-			MX8MP_IOMUXC_SAI1_RXD3__ENET1_MDIO				0x3
+-			MX8MP_IOMUXC_SAI1_RXD4__ENET1_RGMII_RD0				0x91
+-			MX8MP_IOMUXC_SAI1_RXD5__ENET1_RGMII_RD1				0x91
+-			MX8MP_IOMUXC_SAI1_RXD6__ENET1_RGMII_RD2				0x91
+-			MX8MP_IOMUXC_SAI1_RXD7__ENET1_RGMII_RD3				0x91
+-			MX8MP_IOMUXC_SAI1_TXC__ENET1_RGMII_RXC				0x91
+-			MX8MP_IOMUXC_SAI1_TXFS__ENET1_RGMII_RX_CTL			0x91
+-			MX8MP_IOMUXC_SAI1_TXD0__ENET1_RGMII_TD0				0x1f
+-			MX8MP_IOMUXC_SAI1_TXD1__ENET1_RGMII_TD1				0x1f
+-			MX8MP_IOMUXC_SAI1_TXD2__ENET1_RGMII_TD2				0x1f
+-			MX8MP_IOMUXC_SAI1_TXD3__ENET1_RGMII_TD3				0x1f
+-			MX8MP_IOMUXC_SAI1_TXD4__ENET1_RGMII_TX_CTL			0x1f
+-			MX8MP_IOMUXC_SAI1_TXD5__ENET1_RGMII_TXC				0x1f
+-			MX8MP_IOMUXC_SAI1_RXD1__ENET1_1588_EVENT1_OUT			0x1f
+-			MX8MP_IOMUXC_SAI1_RXD0__ENET1_1588_EVENT1_IN			0x1f
+-			MX8MP_IOMUXC_SAI1_TXD7__GPIO4_IO19				0x19
+-		>;
+-	};
+-
+ 	pinctrl_gpio_led: gpioledgrp {
+ 		fsl,pins = <
+ 			MX8MP_IOMUXC_NAND_READY_B__GPIO3_IO16				0x19
+diff --git a/arch/arm64/boot/dts/nvidia/tegra210-smaug.dts b/arch/arm64/boot/dts/nvidia/tegra210-smaug.dts
+index 5a1ce432c1fbb..15a71a59745c4 100644
+--- a/arch/arm64/boot/dts/nvidia/tegra210-smaug.dts
++++ b/arch/arm64/boot/dts/nvidia/tegra210-smaug.dts
+@@ -1317,6 +1317,7 @@
+ 
+ 	uartd: serial@70006300 {
+ 		compatible = "nvidia,tegra30-hsuart";
++		reset-names = "serial";
+ 		status = "okay";
+ 
+ 		bluetooth {
+diff --git a/arch/arm64/boot/dts/nvidia/tegra234-p3737-0000+p3701-0000.dts b/arch/arm64/boot/dts/nvidia/tegra234-p3737-0000+p3701-0000.dts
+index cd13cf2381dde..513cc2cd0b668 100644
+--- a/arch/arm64/boot/dts/nvidia/tegra234-p3737-0000+p3701-0000.dts
++++ b/arch/arm64/boot/dts/nvidia/tegra234-p3737-0000+p3701-0000.dts
+@@ -2010,6 +2010,7 @@
+ 
+ 		serial@3100000 {
+ 			compatible = "nvidia,tegra194-hsuart";
++			reset-names = "serial";
+ 			status = "okay";
+ 		};
+ 
+diff --git a/arch/arm64/boot/dts/nvidia/tegra234-p3740-0002+p3701-0008.dts b/arch/arm64/boot/dts/nvidia/tegra234-p3740-0002+p3701-0008.dts
+index 43d797e5544f5..b35044812ecfd 100644
+--- a/arch/arm64/boot/dts/nvidia/tegra234-p3740-0002+p3701-0008.dts
++++ b/arch/arm64/boot/dts/nvidia/tegra234-p3740-0002+p3701-0008.dts
+@@ -12,6 +12,7 @@
+ 
+ 	aliases {
+ 		serial0 = &tcu;
++		serial1 = &uarta;
+ 	};
+ 
+ 	chosen {
+diff --git a/arch/arm64/boot/dts/qcom/apq8016-sbc.dts b/arch/arm64/boot/dts/qcom/apq8016-sbc.dts
+index f3d65a6061949..5ee098c12801c 100644
+--- a/arch/arm64/boot/dts/qcom/apq8016-sbc.dts
++++ b/arch/arm64/boot/dts/qcom/apq8016-sbc.dts
+@@ -278,7 +278,7 @@
+ 		compatible = "ovti,ov5640";
+ 		reg = <0x3b>;
+ 
+-		enable-gpios = <&tlmm 34 GPIO_ACTIVE_HIGH>;
++		powerdown-gpios = <&tlmm 34 GPIO_ACTIVE_HIGH>;
+ 		reset-gpios = <&tlmm 35 GPIO_ACTIVE_LOW>;
+ 		pinctrl-names = "default";
+ 		pinctrl-0 = <&camera_rear_default>;
+@@ -287,9 +287,9 @@
+ 		clock-names = "xclk";
+ 		clock-frequency = <23880000>;
+ 
+-		vdddo-supply = <&camera_vdddo_1v8>;
+-		vdda-supply = <&camera_vdda_2v8>;
+-		vddd-supply = <&camera_vddd_1v5>;
++		DOVDD-supply = <&camera_vdddo_1v8>;
++		AVDD-supply = <&camera_vdda_2v8>;
++		DVDD-supply = <&camera_vddd_1v5>;
+ 
+ 		/* No camera mezzanine by default */
+ 		status = "disabled";
+diff --git a/arch/arm64/boot/dts/qcom/apq8096-db820c.dts b/arch/arm64/boot/dts/qcom/apq8096-db820c.dts
+index 537547b97459b..b599909c44639 100644
+--- a/arch/arm64/boot/dts/qcom/apq8096-db820c.dts
++++ b/arch/arm64/boot/dts/qcom/apq8096-db820c.dts
+@@ -208,6 +208,25 @@
+ 	status = "okay";
+ };
+ 
++&hdmi {
++	status = "okay";
++
++	pinctrl-names = "default", "sleep";
++	pinctrl-0 = <&hdmi_hpd_active &hdmi_ddc_active>;
++	pinctrl-1 = <&hdmi_hpd_suspend &hdmi_ddc_suspend>;
++
++	core-vdda-supply = <&vreg_l12a_1p8>;
++	core-vcc-supply = <&vreg_s4a_1p8>;
++};
++
++&hdmi_phy {
++	status = "okay";
++
++	vddio-supply = <&vreg_l12a_1p8>;
++	vcca-supply = <&vreg_l28a_0p925>;
++	#phy-cells = <0>;
++};
++
+ &hsusb_phy1 {
+ 	status = "okay";
+ 
+@@ -232,25 +251,6 @@
+ 	status = "okay";
+ };
+ 
+-&mdss_hdmi {
+-	status = "okay";
+-
+-	pinctrl-names = "default", "sleep";
+-	pinctrl-0 = <&mdss_hdmi_hpd_active &mdss_hdmi_ddc_active>;
+-	pinctrl-1 = <&mdss_hdmi_hpd_suspend &mdss_hdmi_ddc_suspend>;
+-
+-	core-vdda-supply = <&vreg_l12a_1p8>;
+-	core-vcc-supply = <&vreg_s4a_1p8>;
+-};
+-
+-&mdss_hdmi_phy {
+-	status = "okay";
+-
+-	vddio-supply = <&vreg_l12a_1p8>;
+-	vcca-supply = <&vreg_l28a_0p925>;
+-	#phy-cells = <0>;
+-};
+-
+ &mmcc {
+ 	vdd-gfx-supply = <&vdd_gfx>;
+ };
+@@ -433,28 +433,28 @@
+ 		drive-strength = <2>;
+ 	};
+ 
+-	mdss_hdmi_hpd_active: mdss_hdmi-hpd-active-state {
++	hdmi_hpd_active: hdmi-hpd-active-state {
+ 		pins = "gpio34";
+ 		function = "hdmi_hot";
+ 		bias-pull-down;
+ 		drive-strength = <16>;
+ 	};
+ 
+-	mdss_hdmi_hpd_suspend: mdss_hdmi-hpd-suspend-state {
++	hdmi_hpd_suspend: hdmi-hpd-suspend-state {
+ 		pins = "gpio34";
+ 		function = "hdmi_hot";
+ 		bias-pull-down;
+ 		drive-strength = <2>;
+ 	};
+ 
+-	mdss_hdmi_ddc_active: mdss_hdmi-ddc-active-state {
++	hdmi_ddc_active: hdmi-ddc-active-state {
+ 		pins = "gpio32", "gpio33";
+ 		function = "hdmi_ddc";
+ 		drive-strength = <2>;
+ 		bias-pull-up;
+ 	};
+ 
+-	mdss_hdmi_ddc_suspend: mdss_hdmi-ddc-suspend-state {
++	hdmi_ddc_suspend: hdmi-ddc-suspend-state {
+ 		pins = "gpio32", "gpio33";
+ 		function = "hdmi_ddc";
+ 		drive-strength = <2>;
+@@ -1043,7 +1043,7 @@
+ 		};
+ 	};
+ 
+-	mdss_hdmi-dai-link {
++	hdmi-dai-link {
+ 		link-name = "HDMI";
+ 		cpu {
+ 			sound-dai = <&q6afedai HDMI_RX>;
+@@ -1054,7 +1054,7 @@
+ 		};
+ 
+ 		codec {
+-			sound-dai = <&mdss_hdmi 0>;
++			sound-dai = <&hdmi 0>;
+ 		};
+ 	};
+ 
+diff --git a/arch/arm64/boot/dts/qcom/apq8096-ifc6640.dts b/arch/arm64/boot/dts/qcom/apq8096-ifc6640.dts
+index ac6471d1db1f7..ed2e2f6c6775a 100644
+--- a/arch/arm64/boot/dts/qcom/apq8096-ifc6640.dts
++++ b/arch/arm64/boot/dts/qcom/apq8096-ifc6640.dts
+@@ -92,15 +92,15 @@
+ 	status = "okay";
+ };
+ 
+-&mdss {
++&hdmi {
+ 	status = "okay";
+ };
+ 
+-&mdss_hdmi {
++&hdmi_phy {
+ 	status = "okay";
+ };
+ 
+-&mdss_hdmi_phy {
++&mdss {
+ 	status = "okay";
+ };
+ 
+diff --git a/arch/arm64/boot/dts/qcom/ipq8074.dtsi b/arch/arm64/boot/dts/qcom/ipq8074.dtsi
+index 68839acbd613f..00ed71936b472 100644
+--- a/arch/arm64/boot/dts/qcom/ipq8074.dtsi
++++ b/arch/arm64/boot/dts/qcom/ipq8074.dtsi
+@@ -794,10 +794,10 @@
+ 
+ 		pcie1: pci@10000000 {
+ 			compatible = "qcom,pcie-ipq8074";
+-			reg =  <0x10000000 0xf1d>,
+-			       <0x10000f20 0xa8>,
+-			       <0x00088000 0x2000>,
+-			       <0x10100000 0x1000>;
++			reg = <0x10000000 0xf1d>,
++			      <0x10000f20 0xa8>,
++			      <0x00088000 0x2000>,
++			      <0x10100000 0x1000>;
+ 			reg-names = "dbi", "elbi", "parf", "config";
+ 			device_type = "pci";
+ 			linux,pci-domain = <1>;
+diff --git a/arch/arm64/boot/dts/qcom/msm8916-longcheer-l8150.dts b/arch/arm64/boot/dts/qcom/msm8916-longcheer-l8150.dts
+index 97262b8519b36..3892ad4f639a8 100644
+--- a/arch/arm64/boot/dts/qcom/msm8916-longcheer-l8150.dts
++++ b/arch/arm64/boot/dts/qcom/msm8916-longcheer-l8150.dts
+@@ -165,7 +165,7 @@
+ 		pinctrl-0 = <&light_int_default>;
+ 
+ 		vdd-supply = <&pm8916_l17>;
+-		vio-supply = <&pm8916_l6>;
++		vddio-supply = <&pm8916_l6>;
+ 	};
+ 
+ 	gyroscope@68 {
+diff --git a/arch/arm64/boot/dts/qcom/msm8916-samsung-serranove.dts b/arch/arm64/boot/dts/qcom/msm8916-samsung-serranove.dts
+index 15dc246e84e2b..126e8b5cf49fd 100644
+--- a/arch/arm64/boot/dts/qcom/msm8916-samsung-serranove.dts
++++ b/arch/arm64/boot/dts/qcom/msm8916-samsung-serranove.dts
+@@ -219,9 +219,9 @@
+ 		compatible = "yamaha,yas537";
+ 		reg = <0x2e>;
+ 
+-		mount-matrix =  "0",  "1",  "0",
+-				"1",  "0",  "0",
+-				"0",  "0", "-1";
++		mount-matrix = "0",  "1",  "0",
++			       "1",  "0",  "0",
++			       "0",  "0", "-1";
+ 	};
+ };
+ 
+diff --git a/arch/arm64/boot/dts/qcom/msm8939.dtsi b/arch/arm64/boot/dts/qcom/msm8939.dtsi
+index 895cafc11480b..559a5d1ba615b 100644
+--- a/arch/arm64/boot/dts/qcom/msm8939.dtsi
++++ b/arch/arm64/boot/dts/qcom/msm8939.dtsi
+@@ -55,6 +55,7 @@
+ 			L2_1: l2-cache {
+ 				compatible = "cache";
+ 				cache-level = <2>;
++				cache-unified;
+ 			};
+ 		};
+ 
+@@ -111,6 +112,7 @@
+ 			L2_0: l2-cache {
+ 				compatible = "cache";
+ 				cache-level = <2>;
++				cache-unified;
+ 			};
+ 		};
+ 
+@@ -155,7 +157,7 @@
+ 
+ 		idle-states {
+ 			CPU_SLEEP_0: cpu-sleep-0 {
+-				compatible ="qcom,idle-state-spc", "arm,idle-state";
++				compatible = "arm,idle-state";
+ 				entry-latency-us = <130>;
+ 				exit-latency-us = <150>;
+ 				min-residency-us = <2000>;
+@@ -1644,7 +1646,7 @@
+ 			clocks = <&gcc GCC_SDCC2_AHB_CLK>,
+ 				 <&gcc GCC_SDCC2_APPS_CLK>,
+ 				 <&rpmcc RPM_SMD_XO_CLK_SRC>;
+-			clock-names =  "iface", "core", "xo";
++			clock-names = "iface", "core", "xo";
+ 			resets = <&gcc GCC_SDCC2_BCR>;
+ 			pinctrl-0 = <&sdc2_default>;
+ 			pinctrl-1 = <&sdc2_sleep>;
+@@ -1731,7 +1733,7 @@
+ 			interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_HIGH>;
+ 			clocks = <&gcc GCC_BLSP1_QUP2_I2C_APPS_CLK>,
+ 				 <&gcc GCC_BLSP1_AHB_CLK>;
+-			clock-names =  "core", "iface";
++			clock-names = "core", "iface";
+ 			dmas = <&blsp_dma 6>, <&blsp_dma 7>;
+ 			dma-names = "tx", "rx";
+ 			pinctrl-0 = <&blsp_i2c2_default>;
+@@ -1765,7 +1767,7 @@
+ 			interrupts = <GIC_SPI 97 IRQ_TYPE_LEVEL_HIGH>;
+ 			clocks = <&gcc GCC_BLSP1_QUP3_I2C_APPS_CLK>,
+ 				 <&gcc GCC_BLSP1_AHB_CLK>;
+-			clock-names =  "core", "iface";
++			clock-names = "core", "iface";
+ 			dmas = <&blsp_dma 8>, <&blsp_dma 9>;
+ 			dma-names = "tx", "rx";
+ 			pinctrl-0 = <&blsp_i2c3_default>;
+@@ -1799,7 +1801,7 @@
+ 			interrupts = <GIC_SPI 98 IRQ_TYPE_LEVEL_HIGH>;
+ 			clocks = <&gcc GCC_BLSP1_QUP4_I2C_APPS_CLK>,
+ 				 <&gcc GCC_BLSP1_AHB_CLK>;
+-			clock-names =  "core", "iface";
++			clock-names = "core", "iface";
+ 			dmas = <&blsp_dma 10>, <&blsp_dma 11>;
+ 			dma-names = "tx", "rx";
+ 			pinctrl-0 = <&blsp_i2c4_default>;
+@@ -1833,7 +1835,7 @@
+ 			interrupts = <GIC_SPI 99 IRQ_TYPE_LEVEL_HIGH>;
+ 			clocks = <&gcc GCC_BLSP1_QUP5_I2C_APPS_CLK>,
+ 				 <&gcc GCC_BLSP1_AHB_CLK>;
+-			clock-names =  "core", "iface";
++			clock-names = "core", "iface";
+ 			dmas = <&blsp_dma 12>, <&blsp_dma 13>;
+ 			dma-names = "tx", "rx";
+ 			pinctrl-0 = <&blsp_i2c5_default>;
+@@ -1867,7 +1869,7 @@
+ 			interrupts = <GIC_SPI 100 IRQ_TYPE_LEVEL_HIGH>;
+ 			clocks = <&gcc GCC_BLSP1_QUP6_I2C_APPS_CLK>,
+ 				 <&gcc GCC_BLSP1_AHB_CLK>;
+-			clock-names =  "core", "iface";
++			clock-names = "core", "iface";
+ 			dmas = <&blsp_dma 14>, <&blsp_dma 15>;
+ 			dma-names = "tx", "rx";
+ 			pinctrl-0 = <&blsp_i2c6_default>;
+diff --git a/arch/arm64/boot/dts/qcom/msm8953-xiaomi-daisy.dts b/arch/arm64/boot/dts/qcom/msm8953-xiaomi-daisy.dts
+index 1d672e6086532..790d19c99af14 100644
+--- a/arch/arm64/boot/dts/qcom/msm8953-xiaomi-daisy.dts
++++ b/arch/arm64/boot/dts/qcom/msm8953-xiaomi-daisy.dts
+@@ -17,7 +17,7 @@
+ 	compatible = "xiaomi,daisy", "qcom,msm8953";
+ 	chassis-type = "handset";
+ 	qcom,msm-id = <293 0>;
+-	qcom,board-id= <0x1000b 0x9>;
++	qcom,board-id = <0x1000b 0x9>;
+ 
+ 	chosen {
+ 		#address-cells = <2>;
+diff --git a/arch/arm64/boot/dts/qcom/msm8953-xiaomi-vince.dts b/arch/arm64/boot/dts/qcom/msm8953-xiaomi-vince.dts
+index b5be55034fd36..0956c866d6cb1 100644
+--- a/arch/arm64/boot/dts/qcom/msm8953-xiaomi-vince.dts
++++ b/arch/arm64/boot/dts/qcom/msm8953-xiaomi-vince.dts
+@@ -20,7 +20,7 @@
+ 	compatible = "xiaomi,vince", "qcom,msm8953";
+ 	chassis-type = "handset";
+ 	qcom,msm-id = <293 0>;
+-	qcom,board-id= <0x1000b 0x08>;
++	qcom,board-id = <0x1000b 0x08>;
+ 
+ 	gpio-keys {
+ 		compatible = "gpio-keys";
+diff --git a/arch/arm64/boot/dts/qcom/msm8996-mtp.dts b/arch/arm64/boot/dts/qcom/msm8996-mtp.dts
+index 495d45a16e63a..596ad4c896f55 100644
+--- a/arch/arm64/boot/dts/qcom/msm8996-mtp.dts
++++ b/arch/arm64/boot/dts/qcom/msm8996-mtp.dts
+@@ -24,10 +24,10 @@
+ 	status = "okay";
+ };
+ 
+-&mdss_hdmi {
++&hdmi {
+ 	status = "okay";
+ };
+ 
+-&mdss_hdmi_phy {
++&hdmi_phy {
+ 	status = "okay";
+ };
+diff --git a/arch/arm64/boot/dts/qcom/msm8996-xiaomi-gemini.dts b/arch/arm64/boot/dts/qcom/msm8996-xiaomi-gemini.dts
+index bdedcf9dff032..d1066edaea471 100644
+--- a/arch/arm64/boot/dts/qcom/msm8996-xiaomi-gemini.dts
++++ b/arch/arm64/boot/dts/qcom/msm8996-xiaomi-gemini.dts
+@@ -82,7 +82,7 @@
+ 		#size-cells = <0>;
+ 		interrupt-parent = <&tlmm>;
+ 		interrupts = <125 IRQ_TYPE_LEVEL_LOW>;
+-		vdda-supply = <&vreg_l6a_1p8>;
++		vio-supply = <&vreg_l6a_1p8>;
+ 		vdd-supply = <&vdd_3v2_tp>;
+ 		reset-gpios = <&tlmm 89 GPIO_ACTIVE_LOW>;
+ 
+diff --git a/arch/arm64/boot/dts/qcom/msm8996.dtsi b/arch/arm64/boot/dts/qcom/msm8996.dtsi
+index 0cb2d4f08c3a1..2ea3117438c3a 100644
+--- a/arch/arm64/boot/dts/qcom/msm8996.dtsi
++++ b/arch/arm64/boot/dts/qcom/msm8996.dtsi
+@@ -895,7 +895,7 @@
+ 				 <&mdss_dsi0_phy 0>,
+ 				 <&mdss_dsi1_phy 1>,
+ 				 <&mdss_dsi1_phy 0>,
+-				 <&mdss_hdmi_phy>;
++				 <&hdmi_phy>;
+ 			clock-names = "xo",
+ 				      "gpll0",
+ 				      "gcc_mmss_noc_cfg_ahb_clk",
+@@ -980,7 +980,7 @@
+ 					port@0 {
+ 						reg = <0>;
+ 						mdp5_intf3_out: endpoint {
+-							remote-endpoint = <&mdss_hdmi_in>;
++							remote-endpoint = <&hdmi_in>;
+ 						};
+ 					};
+ 
+@@ -1075,7 +1075,7 @@
+ 				reg-names = "dsi_ctrl";
+ 
+ 				interrupt-parent = <&mdss>;
+-				interrupts = <4>;
++				interrupts = <5>;
+ 
+ 				clocks = <&mmcc MDSS_MDP_CLK>,
+ 					 <&mmcc MDSS_BYTE1_CLK>,
+@@ -1136,11 +1136,11 @@
+ 				status = "disabled";
+ 			};
+ 
+-			mdss_hdmi: mdss_hdmi-tx@9a0000 {
+-				compatible = "qcom,mdss_hdmi-tx-8996";
+-				reg =	<0x009a0000 0x50c>,
+-					<0x00070000 0x6158>,
+-					<0x009e0000 0xfff>;
++			hdmi: hdmi-tx@9a0000 {
++				compatible = "qcom,hdmi-tx-8996";
++				reg = <0x009a0000 0x50c>,
++				      <0x00070000 0x6158>,
++				      <0x009e0000 0xfff>;
+ 				reg-names = "core_physical",
+ 					    "qfprom_physical",
+ 					    "hdcp_physical";
+@@ -1160,7 +1160,7 @@
+ 					"alt_iface",
+ 					"extp";
+ 
+-				phys = <&mdss_hdmi_phy>;
++				phys = <&hdmi_phy>;
+ 				#sound-dai-cells = <1>;
+ 
+ 				status = "disabled";
+@@ -1171,16 +1171,16 @@
+ 
+ 					port@0 {
+ 						reg = <0>;
+-						mdss_hdmi_in: endpoint {
++						hdmi_in: endpoint {
+ 							remote-endpoint = <&mdp5_intf3_out>;
+ 						};
+ 					};
+ 				};
+ 			};
+ 
+-			mdss_hdmi_phy: phy@9a0600 {
++			hdmi_phy: phy@9a0600 {
+ 				#phy-cells = <0>;
+-				compatible = "qcom,mdss_hdmi-phy-8996";
++				compatible = "qcom,hdmi-phy-8996";
+ 				reg = <0x009a0600 0x1c4>,
+ 				      <0x009a0a00 0x124>,
+ 				      <0x009a0c00 0x124>,
+@@ -3336,6 +3336,9 @@
+ 			#size-cells = <1>;
+ 			ranges;
+ 
++			interrupts = <GIC_SPI 352 IRQ_TYPE_LEVEL_HIGH>;
++			interrupt-names = "hs_phy_irq";
++
+ 			clocks = <&gcc GCC_PERIPH_NOC_USB20_AHB_CLK>,
+ 				<&gcc GCC_USB20_MASTER_CLK>,
+ 				<&gcc GCC_USB20_MOCK_UTMI_CLK>,
+diff --git a/arch/arm64/boot/dts/qcom/msm8996pro-xiaomi-natrium.dts b/arch/arm64/boot/dts/qcom/msm8996pro-xiaomi-natrium.dts
+index 7957c8823f0d5..5e3fd1637f449 100644
+--- a/arch/arm64/boot/dts/qcom/msm8996pro-xiaomi-natrium.dts
++++ b/arch/arm64/boot/dts/qcom/msm8996pro-xiaomi-natrium.dts
+@@ -106,7 +106,7 @@
+ &sound {
+ 	compatible = "qcom,apq8096-sndcard";
+ 	model = "natrium";
+-	audio-routing =	"RX_BIAS", "MCLK";
++	audio-routing = "RX_BIAS", "MCLK";
+ 
+ 	mm1-dai-link {
+ 		link-name = "MultiMedia1";
+diff --git a/arch/arm64/boot/dts/qcom/msm8998.dtsi b/arch/arm64/boot/dts/qcom/msm8998.dtsi
+index f0e943ff00464..ed764d02819f7 100644
+--- a/arch/arm64/boot/dts/qcom/msm8998.dtsi
++++ b/arch/arm64/boot/dts/qcom/msm8998.dtsi
+@@ -909,10 +909,10 @@
+ 
+ 		pcie0: pci@1c00000 {
+ 			compatible = "qcom,pcie-msm8998", "qcom,pcie-msm8996";
+-			reg =	<0x01c00000 0x2000>,
+-				<0x1b000000 0xf1d>,
+-				<0x1b000f20 0xa8>,
+-				<0x1b100000 0x100000>;
++			reg = <0x01c00000 0x2000>,
++			      <0x1b000000 0xf1d>,
++			      <0x1b000f20 0xa8>,
++			      <0x1b100000 0x100000>;
+ 			reg-names = "parf", "dbi", "elbi", "config";
+ 			device_type = "pci";
+ 			linux,pci-domain = <0>;
+@@ -2074,11 +2074,11 @@
+ 
+ 		spmi_bus: spmi@800f000 {
+ 			compatible = "qcom,spmi-pmic-arb";
+-			reg =	<0x0800f000 0x1000>,
+-				<0x08400000 0x1000000>,
+-				<0x09400000 0x1000000>,
+-				<0x0a400000 0x220000>,
+-				<0x0800a000 0x3000>;
++			reg = <0x0800f000 0x1000>,
++			      <0x08400000 0x1000000>,
++			      <0x09400000 0x1000000>,
++			      <0x0a400000 0x220000>,
++			      <0x0800a000 0x3000>;
+ 			reg-names = "core", "chnls", "obsrvr", "intr", "cnfg";
+ 			interrupt-names = "periph_irq";
+ 			interrupts = <GIC_SPI 326 IRQ_TYPE_LEVEL_HIGH>;
+@@ -2737,10 +2737,10 @@
+ 
+ 			clocks = <&mmcc MNOC_AHB_CLK>,
+ 				 <&mmcc BIMC_SMMU_AHB_CLK>,
+-				 <&rpmcc RPM_SMD_MMAXI_CLK>,
+ 				 <&mmcc BIMC_SMMU_AXI_CLK>;
+-			clock-names = "iface-mm", "iface-smmu",
+-				      "bus-mm", "bus-smmu";
++			clock-names = "iface-mm",
++				      "iface-smmu",
++				      "bus-smmu";
+ 
+ 			#global-interrupts = <0>;
+ 			interrupts =
+@@ -2764,6 +2764,8 @@
+ 				<GIC_SPI 261 IRQ_TYPE_LEVEL_HIGH>,
+ 				<GIC_SPI 262 IRQ_TYPE_LEVEL_HIGH>,
+ 				<GIC_SPI 272 IRQ_TYPE_LEVEL_HIGH>;
++
++			power-domains = <&mmcc BIMC_SMMU_GDSC>;
+ 		};
+ 
+ 		remoteproc_adsp: remoteproc@17300000 {
+diff --git a/arch/arm64/boot/dts/qcom/pm6150l.dtsi b/arch/arm64/boot/dts/qcom/pm6150l.dtsi
+index 6f7aa67501e27..0fdf440596c01 100644
+--- a/arch/arm64/boot/dts/qcom/pm6150l.dtsi
++++ b/arch/arm64/boot/dts/qcom/pm6150l.dtsi
+@@ -121,8 +121,9 @@
+ 		pm6150l_wled: leds@d800 {
+ 			compatible = "qcom,pm6150l-wled";
+ 			reg = <0xd800>, <0xd900>;
+-			interrupts = <0x5 0xd8 0x1 IRQ_TYPE_EDGE_RISING>;
+-			interrupt-names = "ovp";
++			interrupts = <0x5 0xd8 0x1 IRQ_TYPE_EDGE_RISING>,
++				     <0x5 0xd8 0x2 IRQ_TYPE_EDGE_RISING>;
++			interrupt-names = "ovp", "short";
+ 			label = "backlight";
+ 
+ 			status = "disabled";
+diff --git a/arch/arm64/boot/dts/qcom/pm660l.dtsi b/arch/arm64/boot/dts/qcom/pm660l.dtsi
+index 87b71b7205b85..6fdbf507c262a 100644
+--- a/arch/arm64/boot/dts/qcom/pm660l.dtsi
++++ b/arch/arm64/boot/dts/qcom/pm660l.dtsi
+@@ -74,8 +74,9 @@
+ 		pm660l_wled: leds@d800 {
+ 			compatible = "qcom,pm660l-wled";
+ 			reg = <0xd800>, <0xd900>;
+-			interrupts = <0x3 0xd8 0x1 IRQ_TYPE_EDGE_RISING>;
+-			interrupt-names = "ovp";
++			interrupts = <0x3 0xd8 0x1 IRQ_TYPE_EDGE_RISING>,
++				     <0x3 0xd8 0x2 IRQ_TYPE_EDGE_RISING>;
++			interrupt-names = "ovp", "short";
+ 			label = "backlight";
+ 
+ 			status = "disabled";
+diff --git a/arch/arm64/boot/dts/qcom/pm8350.dtsi b/arch/arm64/boot/dts/qcom/pm8350.dtsi
+index 2dfeb99300d74..9ed9ba23e81e4 100644
+--- a/arch/arm64/boot/dts/qcom/pm8350.dtsi
++++ b/arch/arm64/boot/dts/qcom/pm8350.dtsi
+@@ -8,7 +8,7 @@
+ 
+ / {
+ 	thermal-zones {
+-		pm8350_thermal: pm8350c-thermal {
++		pm8350_thermal: pm8350-thermal {
+ 			polling-delay-passive = <100>;
+ 			polling-delay = <0>;
+ 			thermal-sensors = <&pm8350_temp_alarm>;
+diff --git a/arch/arm64/boot/dts/qcom/pm8350b.dtsi b/arch/arm64/boot/dts/qcom/pm8350b.dtsi
+index f1c7bd9d079c2..05c1058988927 100644
+--- a/arch/arm64/boot/dts/qcom/pm8350b.dtsi
++++ b/arch/arm64/boot/dts/qcom/pm8350b.dtsi
+@@ -8,7 +8,7 @@
+ 
+ / {
+ 	thermal-zones {
+-		pm8350b_thermal: pm8350c-thermal {
++		pm8350b_thermal: pm8350b-thermal {
+ 			polling-delay-passive = <100>;
+ 			polling-delay = <0>;
+ 			thermal-sensors = <&pm8350b_temp_alarm>;
+diff --git a/arch/arm64/boot/dts/qcom/pmi8950.dtsi b/arch/arm64/boot/dts/qcom/pmi8950.dtsi
+index 4891be3cd68a3..c16adca4e93a9 100644
+--- a/arch/arm64/boot/dts/qcom/pmi8950.dtsi
++++ b/arch/arm64/boot/dts/qcom/pmi8950.dtsi
+@@ -87,8 +87,9 @@
+ 		pmi8950_wled: leds@d800 {
+ 			compatible = "qcom,pmi8950-wled";
+ 			reg = <0xd800>, <0xd900>;
+-			interrupts = <0x3 0xd8 0x02 IRQ_TYPE_EDGE_RISING>;
+-			interrupt-names = "short";
++			interrupts = <0x3 0xd8 0x1 IRQ_TYPE_EDGE_RISING>,
++				     <0x3 0xd8 0x2 IRQ_TYPE_EDGE_RISING>;
++			interrupt-names = "ovp", "short";
+ 			label = "backlight";
+ 
+ 			status = "disabled";
+diff --git a/arch/arm64/boot/dts/qcom/pmi8994.dtsi b/arch/arm64/boot/dts/qcom/pmi8994.dtsi
+index 0192968f4d9b3..36d6a1fb553ac 100644
+--- a/arch/arm64/boot/dts/qcom/pmi8994.dtsi
++++ b/arch/arm64/boot/dts/qcom/pmi8994.dtsi
+@@ -54,8 +54,9 @@
+ 		pmi8994_wled: wled@d800 {
+ 			compatible = "qcom,pmi8994-wled";
+ 			reg = <0xd800>, <0xd900>;
+-			interrupts = <3 0xd8 0x02 IRQ_TYPE_EDGE_RISING>;
+-			interrupt-names = "short";
++			interrupts = <0x3 0xd8 0x1 IRQ_TYPE_EDGE_RISING>,
++				     <0x3 0xd8 0x2 IRQ_TYPE_EDGE_RISING>;
++			interrupt-names = "ovp", "short";
+ 			qcom,cabc;
+ 			qcom,external-pfet;
+ 			status = "disabled";
+diff --git a/arch/arm64/boot/dts/qcom/pmk8350.dtsi b/arch/arm64/boot/dts/qcom/pmk8350.dtsi
+index bc6297e7253e2..1eb74017062d6 100644
+--- a/arch/arm64/boot/dts/qcom/pmk8350.dtsi
++++ b/arch/arm64/boot/dts/qcom/pmk8350.dtsi
+@@ -59,7 +59,7 @@
+ 		};
+ 
+ 		pmk8350_adc_tm: adc-tm@3400 {
+-			compatible = "qcom,adc-tm7";
++			compatible = "qcom,spmi-adc-tm5-gen2";
+ 			reg = <0x3400>;
+ 			interrupts = <PMK8350_SID 0x34 0x0 IRQ_TYPE_EDGE_RISING>;
+ 			#address-cells = <1>;
+diff --git a/arch/arm64/boot/dts/qcom/pmr735b.dtsi b/arch/arm64/boot/dts/qcom/pmr735b.dtsi
+index ec24c4478005a..f7473e2473224 100644
+--- a/arch/arm64/boot/dts/qcom/pmr735b.dtsi
++++ b/arch/arm64/boot/dts/qcom/pmr735b.dtsi
+@@ -8,7 +8,7 @@
+ 
+ / {
+ 	thermal-zones {
+-		pmr735a_thermal: pmr735a-thermal {
++		pmr735b_thermal: pmr735b-thermal {
+ 			polling-delay-passive = <100>;
+ 			polling-delay = <0>;
+ 			thermal-sensors = <&pmr735b_temp_alarm>;
+diff --git a/arch/arm64/boot/dts/qcom/qcm2290.dtsi b/arch/arm64/boot/dts/qcom/qcm2290.dtsi
+index 0ed11e80e5e29..1d1de156f8f04 100644
+--- a/arch/arm64/boot/dts/qcom/qcm2290.dtsi
++++ b/arch/arm64/boot/dts/qcom/qcm2290.dtsi
+@@ -790,7 +790,7 @@
+ 				     <GIC_SPI 342 IRQ_TYPE_LEVEL_HIGH>,
+ 				     <GIC_SPI 343 IRQ_TYPE_LEVEL_HIGH>,
+ 				     <GIC_SPI 344 IRQ_TYPE_LEVEL_HIGH>;
+-			dma-channels =  <10>;
++			dma-channels = <10>;
+ 			dma-channel-mask = <0x1f>;
+ 			iommus = <&apps_smmu 0xf6 0x0>;
+ 			#dma-cells = <3>;
+diff --git a/arch/arm64/boot/dts/qcom/qcs404.dtsi b/arch/arm64/boot/dts/qcom/qcs404.dtsi
+index 972f753847e13..f2568aff14c84 100644
+--- a/arch/arm64/boot/dts/qcom/qcs404.dtsi
++++ b/arch/arm64/boot/dts/qcom/qcs404.dtsi
+@@ -1459,10 +1459,10 @@
+ 
+ 		pcie: pci@10000000 {
+ 			compatible = "qcom,pcie-qcs404";
+-			reg =  <0x10000000 0xf1d>,
+-			       <0x10000f20 0xa8>,
+-			       <0x07780000 0x2000>,
+-			       <0x10001000 0x2000>;
++			reg = <0x10000000 0xf1d>,
++			      <0x10000f20 0xa8>,
++			      <0x07780000 0x2000>,
++			      <0x10001000 0x2000>;
+ 			reg-names = "dbi", "elbi", "parf", "config";
+ 			device_type = "pci";
+ 			linux,pci-domain = <0>;
+diff --git a/arch/arm64/boot/dts/qcom/sa8540p.dtsi b/arch/arm64/boot/dts/qcom/sa8540p.dtsi
+index bacbdec562814..96b2c59ad02b4 100644
+--- a/arch/arm64/boot/dts/qcom/sa8540p.dtsi
++++ b/arch/arm64/boot/dts/qcom/sa8540p.dtsi
+@@ -207,7 +207,7 @@
+ 
+ 	linux,pci-domain = <2>;
+ 
+-	interrupts =  <GIC_SPI 567 IRQ_TYPE_LEVEL_HIGH>;
++	interrupts = <GIC_SPI 567 IRQ_TYPE_LEVEL_HIGH>;
+ 	interrupt-names = "msi";
+ 
+ 	interrupt-map = <0 0 0 1 &intc 0 0 GIC_SPI 541 IRQ_TYPE_LEVEL_HIGH>,
+diff --git a/arch/arm64/boot/dts/qcom/sc7280-herobrine-audio-rt5682-3mic.dtsi b/arch/arm64/boot/dts/qcom/sc7280-herobrine-audio-rt5682-3mic.dtsi
+index 485f9942e1285..a90c70b1b73ea 100644
+--- a/arch/arm64/boot/dts/qcom/sc7280-herobrine-audio-rt5682-3mic.dtsi
++++ b/arch/arm64/boot/dts/qcom/sc7280-herobrine-audio-rt5682-3mic.dtsi
+@@ -13,7 +13,7 @@
+ 		compatible = "google,sc7280-herobrine";
+ 		model = "sc7280-rt5682-max98360a-3mic";
+ 
+-		audio-routing =	"VA DMIC0", "vdd-micb",
++		audio-routing = "VA DMIC0", "vdd-micb",
+ 				"VA DMIC1", "vdd-micb",
+ 				"VA DMIC2", "vdd-micb",
+ 				"VA DMIC3", "vdd-micb",
+diff --git a/arch/arm64/boot/dts/qcom/sc7280.dtsi b/arch/arm64/boot/dts/qcom/sc7280.dtsi
+index a0e8db8270e7a..925428a5f6aea 100644
+--- a/arch/arm64/boot/dts/qcom/sc7280.dtsi
++++ b/arch/arm64/boot/dts/qcom/sc7280.dtsi
+@@ -2449,7 +2449,7 @@
+ 				 <&apps_smmu 0x1821 0>,
+ 				 <&apps_smmu 0x1832 0>;
+ 
+-			power-domains =	<&rpmhpd SC7280_LCX>;
++			power-domains = <&rpmhpd SC7280_LCX>;
+ 			power-domain-names = "lcx";
+ 			required-opps = <&rpmhpd_opp_nom>;
+ 
+diff --git a/arch/arm64/boot/dts/qcom/sc8180x-pmics.dtsi b/arch/arm64/boot/dts/qcom/sc8180x-pmics.dtsi
+index 8247af01c84a5..925047af734fc 100644
+--- a/arch/arm64/boot/dts/qcom/sc8180x-pmics.dtsi
++++ b/arch/arm64/boot/dts/qcom/sc8180x-pmics.dtsi
+@@ -74,7 +74,7 @@
+ 		#address-cells = <1>;
+ 		#size-cells = <0>;
+ 
+-		pon: power-on@800 {
++		pon: pon@800 {
+ 			compatible = "qcom,pm8916-pon";
+ 			reg = <0x0800>;
+ 			pwrkey {
+@@ -142,9 +142,10 @@
+ 		};
+ 
+ 		pmc8180_gpios: gpio@c000 {
+-			compatible = "qcom,pmc8180-gpio";
++			compatible = "qcom,pmc8180-gpio", "qcom,spmi-gpio";
+ 			reg = <0xc000>;
+ 			gpio-controller;
++			gpio-ranges = <&pmc8180_gpios 0 0 10>;
+ 			#gpio-cells = <2>;
+ 			interrupt-controller;
+ 			#interrupt-cells = <2>;
+@@ -246,7 +247,7 @@
+ 		#address-cells = <1>;
+ 		#size-cells = <0>;
+ 
+-		power-on@800 {
++		pon@800 {
+ 			compatible = "qcom,pm8916-pon";
+ 			reg = <0x0800>;
+ 
+@@ -300,9 +301,10 @@
+ 		};
+ 
+ 		pmc8180c_gpios: gpio@c000 {
+-			compatible = "qcom,pmc8180c-gpio";
++			compatible = "qcom,pmc8180c-gpio", "qcom,spmi-gpio";
+ 			reg = <0xc000>;
+ 			gpio-controller;
++			gpio-ranges = <&pmc8180c_gpios 0 0 12>;
+ 			#gpio-cells = <2>;
+ 			interrupt-controller;
+ 			#interrupt-cells = <2>;
+@@ -313,7 +315,7 @@
+ 		compatible = "qcom,pmc8180c", "qcom,spmi-pmic";
+ 		reg = <0x5 SPMI_USID>;
+ 
+-		pmc8180c_lpg: lpg {
++		pmc8180c_lpg: pwm {
+ 			compatible = "qcom,pmc8180c-lpg";
+ 
+ 			#address-cells = <1>;
+diff --git a/arch/arm64/boot/dts/qcom/sc8180x.dtsi b/arch/arm64/boot/dts/qcom/sc8180x.dtsi
+index be78a933d8eb2..9aac5861a9132 100644
+--- a/arch/arm64/boot/dts/qcom/sc8180x.dtsi
++++ b/arch/arm64/boot/dts/qcom/sc8180x.dtsi
+@@ -64,6 +64,7 @@
+ 				L3_0: l3-cache {
+ 					compatible = "cache";
+ 					cache-level = <3>;
++					cache-unified;
+ 				};
+ 			};
+ 		};
+@@ -298,7 +299,7 @@
+ 		domain-idle-states {
+ 			CLUSTER_SLEEP_0: cluster-sleep-0 {
+ 				compatible = "domain-idle-state";
+-				arm,psci-suspend-param = <0x4100c244>;
++				arm,psci-suspend-param = <0x4100a344>;
+ 				entry-latency-us = <3263>;
+ 				exit-latency-us = <6562>;
+ 				min-residency-us = <9987>;
+@@ -2252,7 +2253,7 @@
+ 		};
+ 
+ 		gmu: gmu@2c6a000 {
+-			compatible="qcom,adreno-gmu-680.1", "qcom,adreno-gmu";
++			compatible = "qcom,adreno-gmu-680.1", "qcom,adreno-gmu";
+ 
+ 			reg = <0 0x02c6a000 0 0x30000>,
+ 			      <0 0x0b290000 0 0x10000>,
+@@ -2541,8 +2542,11 @@
+ 
+ 		system-cache-controller@9200000 {
+ 			compatible = "qcom,sc8180x-llcc";
+-			reg = <0 0x09200000 0 0x50000>, <0 0x09600000 0 0x50000>;
+-			reg-names = "llcc_base", "llcc_broadcast_base";
++			reg = <0 0x09200000 0 0x50000>, <0 0x09280000 0 0x50000>,
++			      <0 0x09300000 0 0x50000>, <0 0x09380000 0 0x50000>,
++			      <0 0x09600000 0 0x50000>;
++			reg-names = "llcc0_base", "llcc1_base", "llcc2_base",
++				    "llcc3_base", "llcc_broadcast_base";
+ 			interrupts = <GIC_SPI 582 IRQ_TYPE_LEVEL_HIGH>;
+ 		};
+ 
+diff --git a/arch/arm64/boot/dts/qcom/sc8280xp-crd.dts b/arch/arm64/boot/dts/qcom/sc8280xp-crd.dts
+index b566e403d1db2..b21b41a066b62 100644
+--- a/arch/arm64/boot/dts/qcom/sc8280xp-crd.dts
++++ b/arch/arm64/boot/dts/qcom/sc8280xp-crd.dts
+@@ -167,7 +167,7 @@
+ 		regulator-min-microvolt = <3300000>;
+ 		regulator-max-microvolt = <3300000>;
+ 
+-		gpio = <&pmc8280_1_gpios 1 GPIO_ACTIVE_HIGH>;
++		gpio = <&pmc8280_1_gpios 2 GPIO_ACTIVE_HIGH>;
+ 		enable-active-high;
+ 
+ 		pinctrl-names = "default";
+@@ -757,7 +757,7 @@
+ 	};
+ 
+ 	misc_3p3_reg_en: misc-3p3-reg-en-state {
+-		pins = "gpio1";
++		pins = "gpio2";
+ 		function = "normal";
+ 	};
+ };
+diff --git a/arch/arm64/boot/dts/qcom/sc8280xp-lenovo-thinkpad-x13s.dts b/arch/arm64/boot/dts/qcom/sc8280xp-lenovo-thinkpad-x13s.dts
+index 7cc3028440b64..059dfccdfe7c2 100644
+--- a/arch/arm64/boot/dts/qcom/sc8280xp-lenovo-thinkpad-x13s.dts
++++ b/arch/arm64/boot/dts/qcom/sc8280xp-lenovo-thinkpad-x13s.dts
+@@ -1246,7 +1246,7 @@
+ };
+ 
+ &tlmm {
+-	gpio-reserved-ranges = <70 2>, <74 6>, <83 4>, <125 2>, <128 2>, <154 7>;
++	gpio-reserved-ranges = <70 2>, <74 6>, <125 2>, <128 2>, <154 4>;
+ 
+ 	bt_default: bt-default-state {
+ 		hstp-bt-en-pins {
+diff --git a/arch/arm64/boot/dts/qcom/sc8280xp.dtsi b/arch/arm64/boot/dts/qcom/sc8280xp.dtsi
+index ac0596dfdbc47..0756b7c141fff 100644
+--- a/arch/arm64/boot/dts/qcom/sc8280xp.dtsi
++++ b/arch/arm64/boot/dts/qcom/sc8280xp.dtsi
+@@ -298,6 +298,7 @@
+ 	firmware {
+ 		scm: scm {
+ 			compatible = "qcom,scm-sc8280xp", "qcom,scm";
++			interconnects = <&aggre2_noc MASTER_CRYPTO 0 &mc_virt SLAVE_EBI1 0>;
+ 		};
+ 	};
+ 
+diff --git a/arch/arm64/boot/dts/qcom/sdm630.dtsi b/arch/arm64/boot/dts/qcom/sdm630.dtsi
+index bba0f366ef03b..759b3a5964cc9 100644
+--- a/arch/arm64/boot/dts/qcom/sdm630.dtsi
++++ b/arch/arm64/boot/dts/qcom/sdm630.dtsi
+@@ -1196,11 +1196,11 @@
+ 
+ 		spmi_bus: spmi@800f000 {
+ 			compatible = "qcom,spmi-pmic-arb";
+-			reg =	<0x0800f000 0x1000>,
+-				<0x08400000 0x1000000>,
+-				<0x09400000 0x1000000>,
+-				<0x0a400000 0x220000>,
+-				<0x0800a000 0x3000>;
++			reg = <0x0800f000 0x1000>,
++			      <0x08400000 0x1000000>,
++			      <0x09400000 0x1000000>,
++			      <0x0a400000 0x220000>,
++			      <0x0800a000 0x3000>;
+ 			reg-names = "core", "chnls", "obsrvr", "intr", "cnfg";
+ 			interrupt-names = "periph_irq";
+ 			interrupts = <GIC_SPI 326 IRQ_TYPE_LEVEL_HIGH>;
+diff --git a/arch/arm64/boot/dts/qcom/sdm845-oneplus-enchilada.dts b/arch/arm64/boot/dts/qcom/sdm845-oneplus-enchilada.dts
+index 623a826b18a3e..62fe72ff37630 100644
+--- a/arch/arm64/boot/dts/qcom/sdm845-oneplus-enchilada.dts
++++ b/arch/arm64/boot/dts/qcom/sdm845-oneplus-enchilada.dts
+@@ -57,7 +57,7 @@
+ 
+ &sound {
+ 	model = "OnePlus 6";
+-	audio-routing =	"RX_BIAS", "MCLK",
++	audio-routing = "RX_BIAS", "MCLK",
+ 			"AMIC2", "MIC BIAS2",
+ 			"AMIC3", "MIC BIAS4",
+ 			"AMIC4", "MIC BIAS1",
+diff --git a/arch/arm64/boot/dts/qcom/sdm845-sony-xperia-tama.dtsi b/arch/arm64/boot/dts/qcom/sdm845-sony-xperia-tama.dtsi
+index 3bc187a066aeb..7ee61b20452e2 100644
+--- a/arch/arm64/boot/dts/qcom/sdm845-sony-xperia-tama.dtsi
++++ b/arch/arm64/boot/dts/qcom/sdm845-sony-xperia-tama.dtsi
+@@ -15,6 +15,15 @@
+ 	qcom,msm-id = <321 0x20001>; /* SDM845 v2.1 */
+ 	qcom,board-id = <8 0>;
+ 
++	aliases {
++		serial0 = &uart6;
++		serial1 = &uart9;
++	};
++
++	chosen {
++		stdout-path = "serial0:115200n8";
++	};
++
+ 	gpio-keys {
+ 		compatible = "gpio-keys";
+ 
+diff --git a/arch/arm64/boot/dts/qcom/sdm845.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi
+index 02a6ea0b8b2c9..89520a9fe1e3d 100644
+--- a/arch/arm64/boot/dts/qcom/sdm845.dtsi
++++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi
+@@ -1207,6 +1207,7 @@
+ 			#clock-cells = <1>;
+ 			#reset-cells = <1>;
+ 			#power-domain-cells = <1>;
++			power-domains = <&rpmhpd SDM845_CX>;
+ 		};
+ 
+ 		qfprom@784000 {
+@@ -2613,7 +2614,7 @@
+ 				<0 0>,
+ 				<0 0>,
+ 				<0 0>,
+-				<0 300000000>;
++				<75000000 300000000>;
+ 
+ 			status = "disabled";
+ 		};
+diff --git a/arch/arm64/boot/dts/qcom/sdx75.dtsi b/arch/arm64/boot/dts/qcom/sdx75.dtsi
+index 21d5d55da5ebf..7d39a615f4f78 100644
+--- a/arch/arm64/boot/dts/qcom/sdx75.dtsi
++++ b/arch/arm64/boot/dts/qcom/sdx75.dtsi
+@@ -484,14 +484,14 @@
+ 				tx-pins {
+ 					pins = "gpio12";
+ 					function = "qup_se1_l2_mira";
+-					drive-strength= <2>;
++					drive-strength = <2>;
+ 					bias-disable;
+ 				};
+ 
+ 				rx-pins {
+ 					pins = "gpio13";
+ 					function = "qup_se1_l3_mira";
+-					drive-strength= <2>;
++					drive-strength = <2>;
+ 					bias-disable;
+ 				};
+ 			};
+diff --git a/arch/arm64/boot/dts/qcom/sm6115.dtsi b/arch/arm64/boot/dts/qcom/sm6115.dtsi
+index 55118577bf923..7d30b504441ad 100644
+--- a/arch/arm64/boot/dts/qcom/sm6115.dtsi
++++ b/arch/arm64/boot/dts/qcom/sm6115.dtsi
+@@ -1052,7 +1052,7 @@
+ 				     <GIC_SPI 342 IRQ_TYPE_LEVEL_HIGH>,
+ 				     <GIC_SPI 343 IRQ_TYPE_LEVEL_HIGH>,
+ 				     <GIC_SPI 344 IRQ_TYPE_LEVEL_HIGH>;
+-			dma-channels =  <10>;
++			dma-channels = <10>;
+ 			dma-channel-mask = <0xf>;
+ 			iommus = <&apps_smmu 0xf6 0x0>;
+ 			#dma-cells = <3>;
+diff --git a/arch/arm64/boot/dts/qcom/sm6350.dtsi b/arch/arm64/boot/dts/qcom/sm6350.dtsi
+index 30e77010aed57..7cafb32fbb941 100644
+--- a/arch/arm64/boot/dts/qcom/sm6350.dtsi
++++ b/arch/arm64/boot/dts/qcom/sm6350.dtsi
+@@ -633,11 +633,6 @@
+ 			no-map;
+ 		};
+ 
+-		pil_gpu_mem: memory@8b715400 {
+-			reg = <0 0x8b715400 0 0x2000>;
+-			no-map;
+-		};
+-
+ 		pil_modem_mem: memory@8b800000 {
+ 			reg = <0 0x8b800000 0 0xf800000>;
+ 			no-map;
+@@ -658,6 +653,11 @@
+ 			no-map;
+ 		};
+ 
++		pil_gpu_mem: memory@f0d00000 {
++			reg = <0 0xf0d00000 0 0x1000>;
++			no-map;
++		};
++
+ 		debug_region: memory@ffb00000 {
+ 			reg = <0 0xffb00000 0 0xc0000>;
+ 			no-map;
+diff --git a/arch/arm64/boot/dts/qcom/sm8150.dtsi b/arch/arm64/boot/dts/qcom/sm8150.dtsi
+index b46e55bb8bdec..a7c3020a5de49 100644
+--- a/arch/arm64/boot/dts/qcom/sm8150.dtsi
++++ b/arch/arm64/boot/dts/qcom/sm8150.dtsi
+@@ -1231,7 +1231,7 @@
+ 				dma-names = "tx", "rx";
+ 				pinctrl-names = "default";
+ 				pinctrl-0 = <&qup_i2c7_default>;
+-				interrupts = <GIC_SPI 607 IRQ_TYPE_LEVEL_HIGH>;
++				interrupts = <GIC_SPI 608 IRQ_TYPE_LEVEL_HIGH>;
+ 				#address-cells = <1>;
+ 				#size-cells = <0>;
+ 				status = "disabled";
+@@ -3840,7 +3840,7 @@
+ 			};
+ 
+ 			mdss_dsi0_phy: phy@ae94400 {
+-				compatible = "qcom,dsi-phy-7nm";
++				compatible = "qcom,dsi-phy-7nm-8150";
+ 				reg = <0 0x0ae94400 0 0x200>,
+ 				      <0 0x0ae94600 0 0x280>,
+ 				      <0 0x0ae94900 0 0x260>;
+@@ -3914,7 +3914,7 @@
+ 			};
+ 
+ 			mdss_dsi1_phy: phy@ae96400 {
+-				compatible = "qcom,dsi-phy-7nm";
++				compatible = "qcom,dsi-phy-7nm-8150";
+ 				reg = <0 0x0ae96400 0 0x200>,
+ 				      <0 0x0ae96600 0 0x280>,
+ 				      <0 0x0ae96900 0 0x260>;
+diff --git a/arch/arm64/boot/dts/qcom/sm8250-sony-xperia-edo-pdx203.dts b/arch/arm64/boot/dts/qcom/sm8250-sony-xperia-edo-pdx203.dts
+index 356a81698731a..62590c6bd3067 100644
+--- a/arch/arm64/boot/dts/qcom/sm8250-sony-xperia-edo-pdx203.dts
++++ b/arch/arm64/boot/dts/qcom/sm8250-sony-xperia-edo-pdx203.dts
+@@ -14,3 +14,236 @@
+ };
+ 
+ /delete-node/ &vreg_l7f_1p8;
++
++&pm8009_gpios {
++	gpio-line-names = "NC", /* GPIO_1 */
++			  "CAM_PWR_LD_EN",
++			  "WIDEC_PWR_EN",
++			  "NC";
++};
++
++&pm8150_gpios {
++	gpio-line-names = "VOL_DOWN_N", /* GPIO_1 */
++			  "OPTION_2",
++			  "NC",
++			  "PM_SLP_CLK_IN",
++			  "OPTION_1",
++			  "NC",
++			  "NC",
++			  "SP_ARI_PWR_ALARM",
++			  "NC",
++			  "NC"; /* GPIO_10 */
++};
++
++&pm8150b_gpios {
++	gpio-line-names = "SNAPSHOT_N", /* GPIO_1 */
++			  "FOCUS_N",
++			  "NC",
++			  "NC",
++			  "RF_LCD_ID_EN",
++			  "NC",
++			  "NC",
++			  "LCD_ID",
++			  "NC",
++			  "WLC_EN_N", /* GPIO_10 */
++			  "NC",
++			  "RF_ID";
++};
++
++&pm8150l_gpios {
++	gpio-line-names = "NC", /* GPIO_1 */
++			  "PM3003A_EN",
++			  "NC",
++			  "NC",
++			  "NC",
++			  "AUX2_THERM",
++			  "BB_HP_EN",
++			  "FP_LDO_EN",
++			  "PMX_RESET_N",
++			  "AUX3_THERM", /* GPIO_10 */
++			  "DTV_PWR_EN",
++			  "PM3003A_MODE";
++};
++
++&tlmm {
++	gpio-line-names = "AP_CTI_IN", /* GPIO_0 */
++			  "MDM2AP_ERR_FATAL",
++			  "AP_CTI_OUT",
++			  "MDM2AP_STATUS",
++			  "NFC_I2C_SDA",
++			  "NFC_I2C_SCL",
++			  "NFC_EN",
++			  "NFC_CLK_REQ",
++			  "NFC_ESE_PWR_REQ",
++			  "DVDT_WRT_DET_AND",
++			  "SPK_AMP_RESET_N", /* GPIO_10 */
++			  "SPK_AMP_INT_N",
++			  "APPS_I2C_1_SDA",
++			  "APPS_I2C_1_SCL",
++			  "NC",
++			  "TX_GTR_THRES_IN",
++			  "HST_BT_UART_CTS",
++			  "HST_BT_UART_RFR",
++			  "HST_BT_UART_TX",
++			  "HST_BT_UART_RX",
++			  "HST_WLAN_EN", /* GPIO_20 */
++			  "HST_BT_EN",
++			  "RGBC_IR_PWR_EN",
++			  "FP_INT_N",
++			  "NC",
++			  "NC",
++			  "NC",
++			  "NC",
++			  "NFC_ESE_SPI_MISO",
++			  "NFC_ESE_SPI_MOSI",
++			  "NFC_ESE_SPI_SCLK", /* GPIO_30 */
++			  "NFC_ESE_SPI_CS_N",
++			  "WCD_RST_N",
++			  "NC",
++			  "SDM_DEBUG_UART_TX",
++			  "SDM_DEBUG_UART_RX",
++			  "TS_I2C_SDA",
++			  "TS_I2C_SCL",
++			  "TS_INT_N",
++			  "FP_SPI_MISO", /* GPIO_40 */
++			  "FP_SPI_MOSI",
++			  "FP_SPI_SCLK",
++			  "FP_SPI_CS_N",
++			  "APPS_I2C_0_SDA",
++			  "APPS_I2C_0_SCL",
++			  "DISP_ERR_FG",
++			  "UIM2_DETECT_EN",
++			  "NC",
++			  "NC",
++			  "NC", /* GPIO_50 */
++			  "NC",
++			  "MDM_UART_CTS",
++			  "MDM_UART_RFR",
++			  "MDM_UART_TX",
++			  "MDM_UART_RX",
++			  "AP2MDM_STATUS",
++			  "AP2MDM_ERR_FATAL",
++			  "MDM_IPC_HS_UART_TX",
++			  "MDM_IPC_HS_UART_RX",
++			  "NC", /* GPIO_60 */
++			  "NC",
++			  "NC",
++			  "NC",
++			  "NC",
++			  "USB_CC_DIR",
++			  "DISP_VSYNC",
++			  "NC",
++			  "NC",
++			  "CAM_PWR_B_CS",
++			  "NC", /* GPIO_70 */
++			  "CAM_PWR_A_CS",
++			  "SBU_SW_SEL",
++			  "SBU_SW_OE",
++			  "FP_RESET_N",
++			  "FP_RESET_N",
++			  "DISP_RESET_N",
++			  "DEBUG_GPIO0",
++			  "TRAY_DET",
++			  "CAM2_RST_N",
++			  "PCIE0_RST_N",
++			  "PCIE0_CLK_REQ_N", /* GPIO_80 */
++			  "PCIE0_WAKE_N",
++			  "DVDT_ENABLE",
++			  "DVDT_WRT_DET_OR",
++			  "NC",
++			  "PCIE2_RST_N",
++			  "PCIE2_CLK_REQ_N",
++			  "PCIE2_WAKE_N",
++			  "MDM_VFR_IRQ0",
++			  "MDM_VFR_IRQ1",
++			  "SW_SERVICE", /* GPIO_90 */
++			  "CAM_SOF",
++			  "CAM1_RST_N",
++			  "CAM0_RST_N",
++			  "CAM0_MCLK",
++			  "CAM1_MCLK",
++			  "CAM2_MCLK",
++			  "CAM3_MCLK",
++			  "CAM4_MCLK",
++			  "TOF_RST_N",
++			  "NC", /* GPIO_100 */
++			  "CCI0_I2C_SDA",
++			  "CCI0_I2C_SCL",
++			  "CCI1_I2C_SDA",
++			  "CCI1_I2C_SCL_",
++			  "CCI2_I2C_SDA",
++			  "CCI2_I2C_SCL",
++			  "CCI3_I2C_SDA",
++			  "CCI3_I2C_SCL",
++			  "CAM3_RST_N",
++			  "NFC_DWL_REQ", /* GPIO_110 */
++			  "NFC_IRQ",
++			  "XVS",
++			  "NC",
++			  "RF_ID_EXTENSION",
++			  "SPK_AMP_I2C_SDA",
++			  "SPK_AMP_I2C_SCL",
++			  "NC",
++			  "NC",
++			  "WLC_I2C_SDA",
++			  "WLC_I2C_SCL", /* GPIO_120 */
++			  "ACC_COVER_OPEN",
++			  "ALS_PROX_INT_N",
++			  "ACCEL_INT",
++			  "WLAN_SW_CTRL",
++			  "CAMSENSOR_I2C_SDA",
++			  "CAMSENSOR_I2C_SCL",
++			  "UDON_SWITCH_SEL",
++			  "WDOG_DISABLE",
++			  "BAROMETER_INT",
++			  "NC", /* GPIO_130 */
++			  "NC",
++			  "FORCED_USB_BOOT",
++			  "NC",
++			  "NC",
++			  "WLC_INT_N",
++			  "NC",
++			  "NC",
++			  "RGBC_IR_INT",
++			  "NC",
++			  "NC", /* GPIO_140 */
++			  "NC",
++			  "BT_SLIMBUS_CLK",
++			  "BT_SLIMBUS_DATA",
++			  "HW_ID_0",
++			  "HW_ID_1",
++			  "WCD_SWR_TX_CLK",
++			  "WCD_SWR_TX_DATA0",
++			  "WCD_SWR_TX_DATA1",
++			  "WCD_SWR_RX_CLK",
++			  "WCD_SWR_RX_DATA0", /* GPIO_150 */
++			  "WCD_SWR_RX_DATA1",
++			  "SDM_DMIC_CLK1",
++			  "SDM_DMIC_DATA1",
++			  "SDM_DMIC_CLK2",
++			  "SDM_DMIC_DATA2",
++			  "SPK_AMP_I2S_CLK",
++			  "SPK_AMP_I2S_WS",
++			  "SPK_AMP_I2S_ASP_DIN",
++			  "SPK_AMP_I2S_ASP_DOUT",
++			  "COMPASS_I2C_SDA", /* GPIO_160 */
++			  "COMPASS_I2C_SCL",
++			  "NC",
++			  "NC",
++			  "SSC_SPI_1_MISO",
++			  "SSC_SPI_1_MOSI",
++			  "SSC_SPI_1_CLK",
++			  "SSC_SPI_1_CS_N",
++			  "NC",
++			  "NC",
++			  "SSC_SENSOR_I2C_SDA", /* GPIO_170 */
++			  "SSC_SENSOR_I2C_SCL",
++			  "NC",
++			  "NC",
++			  "NC",
++			  "NC",
++			  "HST_BLE_SNS_UART6_TX",
++			  "HST_BLE_SNS_UART6_RX",
++			  "HST_WLAN_UART_TX",
++			  "HST_WLAN_UART_RX";
++};
+diff --git a/arch/arm64/boot/dts/qcom/sm8250-sony-xperia-edo-pdx206.dts b/arch/arm64/boot/dts/qcom/sm8250-sony-xperia-edo-pdx206.dts
+index 01fe3974ee720..58a521046f5f5 100644
+--- a/arch/arm64/boot/dts/qcom/sm8250-sony-xperia-edo-pdx206.dts
++++ b/arch/arm64/boot/dts/qcom/sm8250-sony-xperia-edo-pdx206.dts
+@@ -20,6 +20,8 @@
+ };
+ 
+ &gpio_keys {
++	pinctrl-0 = <&focus_n &snapshot_n &vol_down_n &g_assist_n>;
++
+ 	g-assist-key {
+ 		label = "Google Assistant Key";
+ 		linux,code = <KEY_LEFTMETA>;
+@@ -30,6 +32,247 @@
+ 	};
+ };
+ 
++&pm8009_gpios {
++	gpio-line-names = "NC", /* GPIO_1 */
++			  "NC",
++			  "WIDEC_PWR_EN",
++			  "NC";
++};
++
++&pm8150_gpios {
++	gpio-line-names = "VOL_DOWN_N", /* GPIO_1 */
++			  "OPTION_2",
++			  "NC",
++			  "PM_SLP_CLK_IN",
++			  "OPTION_1",
++			  "G_ASSIST_N",
++			  "NC",
++			  "SP_ARI_PWR_ALARM",
++			  "NC",
++			  "NC"; /* GPIO_10 */
++
++	g_assist_n: g-assist-n-state {
++		pins = "gpio6";
++		function = "normal";
++		power-source = <1>;
++		bias-pull-up;
++		input-enable;
++	};
++};
++
++&pm8150b_gpios {
++	gpio-line-names = "SNAPSHOT_N", /* GPIO_1 */
++			  "FOCUS_N",
++			  "NC",
++			  "NC",
++			  "RF_LCD_ID_EN",
++			  "NC",
++			  "NC",
++			  "LCD_ID",
++			  "NC",
++			  "NC", /* GPIO_10 */
++			  "NC",
++			  "RF_ID";
++};
++
++&pm8150l_gpios {
++	gpio-line-names = "NC", /* GPIO_1 */
++			  "PM3003A_EN",
++			  "NC",
++			  "NC",
++			  "NC",
++			  "AUX2_THERM",
++			  "BB_HP_EN",
++			  "FP_LDO_EN",
++			  "PMX_RESET_N",
++			  "NC", /* GPIO_10 */
++			  "NC",
++			  "PM3003A_MODE";
++};
++
++&tlmm {
++	gpio-line-names = "AP_CTI_IN", /* GPIO_0 */
++			  "MDM2AP_ERR_FATAL",
++			  "AP_CTI_OUT",
++			  "MDM2AP_STATUS",
++			  "NFC_I2C_SDA",
++			  "NFC_I2C_SCL",
++			  "NFC_EN",
++			  "NFC_CLK_REQ",
++			  "NFC_ESE_PWR_REQ",
++			  "DVDT_WRT_DET_AND",
++			  "SPK_AMP_RESET_N", /* GPIO_10 */
++			  "SPK_AMP_INT_N",
++			  "APPS_I2C_1_SDA",
++			  "APPS_I2C_1_SCL",
++			  "NC",
++			  "TX_GTR_THRES_IN",
++			  "HST_BT_UART_CTS",
++			  "HST_BT_UART_RFR",
++			  "HST_BT_UART_TX",
++			  "HST_BT_UART_RX",
++			  "HST_WLAN_EN", /* GPIO_20 */
++			  "HST_BT_EN",
++			  "RGBC_IR_PWR_EN",
++			  "FP_INT_N",
++			  "NC",
++			  "NC",
++			  "NC",
++			  "NC",
++			  "NFC_ESE_SPI_MISO",
++			  "NFC_ESE_SPI_MOSI",
++			  "NFC_ESE_SPI_SCLK", /* GPIO_30 */
++			  "NFC_ESE_SPI_CS_N",
++			  "WCD_RST_N",
++			  "NC",
++			  "SDM_DEBUG_UART_TX",
++			  "SDM_DEBUG_UART_RX",
++			  "TS_I2C_SDA",
++			  "TS_I2C_SCL",
++			  "TS_INT_N",
++			  "FP_SPI_MISO", /* GPIO_40 */
++			  "FP_SPI_MOSI",
++			  "FP_SPI_SCLK",
++			  "FP_SPI_CS_N",
++			  "APPS_I2C_0_SDA",
++			  "APPS_I2C_0_SCL",
++			  "DISP_ERR_FG",
++			  "UIM2_DETECT_EN",
++			  "NC",
++			  "NC",
++			  "NC", /* GPIO_50 */
++			  "NC",
++			  "MDM_UART_CTS",
++			  "MDM_UART_RFR",
++			  "MDM_UART_TX",
++			  "MDM_UART_RX",
++			  "AP2MDM_STATUS",
++			  "AP2MDM_ERR_FATAL",
++			  "MDM_IPC_HS_UART_TX",
++			  "MDM_IPC_HS_UART_RX",
++			  "NC", /* GPIO_60 */
++			  "NC",
++			  "NC",
++			  "NC",
++			  "NC",
++			  "USB_CC_DIR",
++			  "DISP_VSYNC",
++			  "NC",
++			  "NC",
++			  "CAM_PWR_B_CS",
++			  "NC", /* GPIO_70 */
++			  "FRONTC_PWR_EN",
++			  "SBU_SW_SEL",
++			  "SBU_SW_OE",
++			  "FP_RESET_N",
++			  "FP_RESET_N",
++			  "DISP_RESET_N",
++			  "DEBUG_GPIO0",
++			  "TRAY_DET",
++			  "CAM2_RST_N",
++			  "PCIE0_RST_N",
++			  "PCIE0_CLK_REQ_N", /* GPIO_80 */
++			  "PCIE0_WAKE_N",
++			  "DVDT_ENABLE",
++			  "DVDT_WRT_DET_OR",
++			  "NC",
++			  "PCIE2_RST_N",
++			  "PCIE2_CLK_REQ_N",
++			  "PCIE2_WAKE_N",
++			  "MDM_VFR_IRQ0",
++			  "MDM_VFR_IRQ1",
++			  "SW_SERVICE", /* GPIO_90 */
++			  "CAM_SOF",
++			  "CAM1_RST_N",
++			  "CAM0_RST_N",
++			  "CAM0_MCLK",
++			  "CAM1_MCLK",
++			  "CAM2_MCLK",
++			  "CAM3_MCLK",
++			  "NC",
++			  "NC",
++			  "NC", /* GPIO_100 */
++			  "CCI0_I2C_SDA",
++			  "CCI0_I2C_SCL",
++			  "CCI1_I2C_SDA",
++			  "CCI1_I2C_SCL_",
++			  "CCI2_I2C_SDA",
++			  "CCI2_I2C_SCL",
++			  "CCI3_I2C_SDA",
++			  "CCI3_I2C_SCL",
++			  "CAM3_RST_N",
++			  "NFC_DWL_REQ", /* GPIO_110 */
++			  "NFC_IRQ",
++			  "XVS",
++			  "NC",
++			  "RF_ID_EXTENSION",
++			  "SPK_AMP_I2C_SDA",
++			  "SPK_AMP_I2C_SCL",
++			  "NC",
++			  "NC",
++			  "NC",
++			  "NC",
++			  "ACC_COVER_OPEN",
++			  "ALS_PROX_INT_N",
++			  "ACCEL_INT",
++			  "WLAN_SW_CTRL",
++			  "CAMSENSOR_I2C_SDA",
++			  "CAMSENSOR_I2C_SCL",
++			  "UDON_SWITCH_SEL",
++			  "WDOG_DISABLE",
++			  "BAROMETER_INT",
++			  "NC", /* GPIO_130 */
++			  "NC",
++			  "FORCED_USB_BOOT",
++			  "NC",
++			  "NC",
++			  "NC",
++			  "NC",
++			  "NC",
++			  "RGBC_IR_INT",
++			  "NC",
++			  "NC", /* GPIO_140 */
++			  "NC",
++			  "BT_SLIMBUS_CLK",
++			  "BT_SLIMBUS_DATA",
++			  "HW_ID_0",
++			  "HW_ID_1",
++			  "WCD_SWR_TX_CLK",
++			  "WCD_SWR_TX_DATA0",
++			  "WCD_SWR_TX_DATA1",
++			  "WCD_SWR_RX_CLK",
++			  "WCD_SWR_RX_DATA0", /* GPIO_150 */
++			  "WCD_SWR_RX_DATA1",
++			  "SDM_DMIC_CLK1",
++			  "SDM_DMIC_DATA1",
++			  "SDM_DMIC_CLK2",
++			  "SDM_DMIC_DATA2",
++			  "SPK_AMP_I2S_CLK",
++			  "SPK_AMP_I2S_WS",
++			  "SPK_AMP_I2S_ASP_DIN",
++			  "SPK_AMP_I2S_ASP_DOUT",
++			  "COMPASS_I2C_SDA", /* GPIO_160 */
++			  "COMPASS_I2C_SCL",
++			  "NC",
++			  "NC",
++			  "SSC_SPI_1_MISO",
++			  "SSC_SPI_1_MOSI",
++			  "SSC_SPI_1_CLK",
++			  "SSC_SPI_1_CS_N",
++			  "NC",
++			  "NC",
++			  "SSC_SENSOR_I2C_SDA", /* GPIO_170 */
++			  "SSC_SENSOR_I2C_SCL",
++			  "NC",
++			  "NC",
++			  "NC",
++			  "NC",
++			  "HST_BLE_SNS_UART6_TX",
++			  "HST_BLE_SNS_UART6_RX",
++			  "HST_WLAN_UART_TX",
++			  "HST_WLAN_UART_RX";
++};
++
+ &vreg_l2f_1p3 {
+ 	regulator-min-microvolt = <1200000>;
+ 	regulator-max-microvolt = <1200000>;
+diff --git a/arch/arm64/boot/dts/qcom/sm8250-sony-xperia-edo.dtsi b/arch/arm64/boot/dts/qcom/sm8250-sony-xperia-edo.dtsi
+index 8ab82bacba81f..b044cffb419e5 100644
+--- a/arch/arm64/boot/dts/qcom/sm8250-sony-xperia-edo.dtsi
++++ b/arch/arm64/boot/dts/qcom/sm8250-sony-xperia-edo.dtsi
+@@ -51,12 +51,26 @@
+ 	gpio_keys: gpio-keys {
+ 		compatible = "gpio-keys";
+ 
+-		/*
+-		 * Camera focus (light press) and camera snapshot (full press)
+-		 * seem not to work properly.. Adding the former one stalls the CPU
+-		 * and the latter kills the volume down key for whatever reason. In any
+-		 * case, they are both on &pm8150b_gpios: camera focus(2), camera snapshot(1).
+-		 */
++		pinctrl-0 = <&focus_n &snapshot_n &vol_down_n>;
++		pinctrl-names = "default";
++
++		key-camera-focus {
++			label = "Camera Focus";
++			linux,code = <KEY_CAMERA_FOCUS>;
++			gpios = <&pm8150b_gpios 2 GPIO_ACTIVE_LOW>;
++			debounce-interval = <15>;
++			linux,can-disable;
++			wakeup-source;
++		};
++
++		key-camera-snapshot {
++			label = "Camera Snapshot";
++			linux,code = <KEY_CAMERA>;
++			gpios = <&pm8150b_gpios 1 GPIO_ACTIVE_LOW>;
++			debounce-interval = <15>;
++			linux,can-disable;
++			wakeup-source;
++		};
+ 
+ 		key-vol-down {
+ 			label = "Volume Down";
+@@ -551,6 +565,34 @@
+ 	vdda-pll-supply = <&vreg_l9a_1p2>;
+ };
+ 
++&pm8150_gpios {
++	vol_down_n: vol-down-n-state {
++		pins = "gpio1";
++		function = "normal";
++		power-source = <0>;
++		bias-pull-up;
++		input-enable;
++	};
++};
++
++&pm8150b_gpios {
++	snapshot_n: snapshot-n-state {
++		pins = "gpio1";
++		function = "normal";
++		power-source = <0>;
++		bias-pull-up;
++		input-enable;
++	};
++
++	focus_n: focus-n-state {
++		pins = "gpio2";
++		function = "normal";
++		power-source = <0>;
++		bias-pull-up;
++		input-enable;
++	};
++};
++
+ &pon_pwrkey {
+ 	status = "okay";
+ };
+diff --git a/arch/arm64/boot/dts/qcom/sm8250.dtsi b/arch/arm64/boot/dts/qcom/sm8250.dtsi
+index 1efa07f2caff4..e03007e23e918 100644
+--- a/arch/arm64/boot/dts/qcom/sm8250.dtsi
++++ b/arch/arm64/boot/dts/qcom/sm8250.dtsi
+@@ -100,7 +100,7 @@
+ 			clocks = <&cpufreq_hw 0>;
+ 			enable-method = "psci";
+ 			capacity-dmips-mhz = <448>;
+-			dynamic-power-coefficient = <205>;
++			dynamic-power-coefficient = <105>;
+ 			next-level-cache = <&L2_0>;
+ 			power-domains = <&CPU_PD0>;
+ 			power-domain-names = "psci";
+@@ -131,7 +131,7 @@
+ 			clocks = <&cpufreq_hw 0>;
+ 			enable-method = "psci";
+ 			capacity-dmips-mhz = <448>;
+-			dynamic-power-coefficient = <205>;
++			dynamic-power-coefficient = <105>;
+ 			next-level-cache = <&L2_100>;
+ 			power-domains = <&CPU_PD1>;
+ 			power-domain-names = "psci";
+@@ -156,7 +156,7 @@
+ 			clocks = <&cpufreq_hw 0>;
+ 			enable-method = "psci";
+ 			capacity-dmips-mhz = <448>;
+-			dynamic-power-coefficient = <205>;
++			dynamic-power-coefficient = <105>;
+ 			next-level-cache = <&L2_200>;
+ 			power-domains = <&CPU_PD2>;
+ 			power-domain-names = "psci";
+@@ -181,7 +181,7 @@
+ 			clocks = <&cpufreq_hw 0>;
+ 			enable-method = "psci";
+ 			capacity-dmips-mhz = <448>;
+-			dynamic-power-coefficient = <205>;
++			dynamic-power-coefficient = <105>;
+ 			next-level-cache = <&L2_300>;
+ 			power-domains = <&CPU_PD3>;
+ 			power-domain-names = "psci";
+@@ -1905,6 +1905,7 @@
+ 
+ 			pinctrl-names = "default";
+ 			pinctrl-0 = <&pcie0_default_state>;
++			dma-coherent;
+ 
+ 			status = "disabled";
+ 		};
+@@ -2011,6 +2012,7 @@
+ 
+ 			pinctrl-names = "default";
+ 			pinctrl-0 = <&pcie1_default_state>;
++			dma-coherent;
+ 
+ 			status = "disabled";
+ 		};
+@@ -2119,6 +2121,7 @@
+ 
+ 			pinctrl-names = "default";
+ 			pinctrl-0 = <&pcie2_default_state>;
++			dma-coherent;
+ 
+ 			status = "disabled";
+ 		};
+@@ -2726,6 +2729,7 @@
+ 			clock-names = "ahb", "bus", "iface";
+ 
+ 			power-domains = <&gpucc GPU_CX_GDSC>;
++			dma-coherent;
+ 		};
+ 
+ 		slpi: remoteproc@5c00000 {
+@@ -3059,7 +3063,7 @@
+ 				port@7 {
+ 					reg = <7>;
+ 					funnel_swao_in_funnel_merg: endpoint {
+-						remote-endpoint= <&funnel_merg_out_funnel_swao>;
++						remote-endpoint = <&funnel_merg_out_funnel_swao>;
+ 					};
+ 				};
+ 			};
+@@ -5298,104 +5302,105 @@
+ 			reg = <0 0x15000000 0 0x100000>;
+ 			#iommu-cells = <2>;
+ 			#global-interrupts = <2>;
+-			interrupts =    <GIC_SPI 64 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 65 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 97 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 98 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 99 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 100 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 101 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 102 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 104 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 107 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 111 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 112 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 114 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 115 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 117 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 118 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 181 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 182 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 183 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 184 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 185 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 186 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 187 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 188 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 189 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 190 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 191 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 192 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 315 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 316 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 317 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 318 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 319 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 320 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 321 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 322 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 323 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 324 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 325 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 326 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 327 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 328 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 329 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 330 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 331 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 332 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 333 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 334 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 335 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 336 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 337 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 338 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 339 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 340 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 341 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 342 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 343 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 344 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 345 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 395 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 396 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 397 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 398 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 399 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 400 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 401 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 402 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 403 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 404 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 405 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 406 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 407 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 408 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 409 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 412 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 418 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 419 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 421 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 423 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 424 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 425 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 690 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 691 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 692 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 693 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 694 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 695 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 696 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 697 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 707 IRQ_TYPE_LEVEL_HIGH>;
++			interrupts = <GIC_SPI 64 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 65 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 97 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 98 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 99 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 100 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 101 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 102 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 104 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 107 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 111 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 112 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 114 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 115 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 117 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 118 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 181 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 182 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 183 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 184 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 185 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 186 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 187 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 188 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 189 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 190 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 191 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 192 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 315 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 316 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 317 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 318 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 319 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 320 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 321 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 322 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 323 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 324 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 325 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 326 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 327 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 328 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 329 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 330 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 331 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 332 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 333 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 334 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 335 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 336 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 337 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 338 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 339 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 340 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 341 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 342 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 343 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 344 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 345 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 395 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 396 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 397 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 398 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 399 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 400 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 401 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 402 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 403 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 404 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 405 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 406 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 407 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 408 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 409 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 412 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 418 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 419 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 421 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 423 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 424 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 425 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 690 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 691 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 692 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 693 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 694 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 695 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 696 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 697 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 707 IRQ_TYPE_LEVEL_HIGH>;
++			dma-coherent;
+ 		};
+ 
+ 		adsp: remoteproc@17300000 {
+diff --git a/arch/arm64/boot/dts/qcom/sm8350.dtsi b/arch/arm64/boot/dts/qcom/sm8350.dtsi
+index ec451c616f3e4..c236967725c1b 100644
+--- a/arch/arm64/boot/dts/qcom/sm8350.dtsi
++++ b/arch/arm64/boot/dts/qcom/sm8350.dtsi
+@@ -48,7 +48,7 @@
+ 
+ 		CPU0: cpu@0 {
+ 			device_type = "cpu";
+-			compatible = "qcom,kryo685";
++			compatible = "arm,cortex-a55";
+ 			reg = <0x0 0x0>;
+ 			clocks = <&cpufreq_hw 0>;
+ 			enable-method = "psci";
+@@ -72,7 +72,7 @@
+ 
+ 		CPU1: cpu@100 {
+ 			device_type = "cpu";
+-			compatible = "qcom,kryo685";
++			compatible = "arm,cortex-a55";
+ 			reg = <0x0 0x100>;
+ 			clocks = <&cpufreq_hw 0>;
+ 			enable-method = "psci";
+@@ -91,7 +91,7 @@
+ 
+ 		CPU2: cpu@200 {
+ 			device_type = "cpu";
+-			compatible = "qcom,kryo685";
++			compatible = "arm,cortex-a55";
+ 			reg = <0x0 0x200>;
+ 			clocks = <&cpufreq_hw 0>;
+ 			enable-method = "psci";
+@@ -110,7 +110,7 @@
+ 
+ 		CPU3: cpu@300 {
+ 			device_type = "cpu";
+-			compatible = "qcom,kryo685";
++			compatible = "arm,cortex-a55";
+ 			reg = <0x0 0x300>;
+ 			clocks = <&cpufreq_hw 0>;
+ 			enable-method = "psci";
+@@ -129,7 +129,7 @@
+ 
+ 		CPU4: cpu@400 {
+ 			device_type = "cpu";
+-			compatible = "qcom,kryo685";
++			compatible = "arm,cortex-a78";
+ 			reg = <0x0 0x400>;
+ 			clocks = <&cpufreq_hw 1>;
+ 			enable-method = "psci";
+@@ -148,7 +148,7 @@
+ 
+ 		CPU5: cpu@500 {
+ 			device_type = "cpu";
+-			compatible = "qcom,kryo685";
++			compatible = "arm,cortex-a78";
+ 			reg = <0x0 0x500>;
+ 			clocks = <&cpufreq_hw 1>;
+ 			enable-method = "psci";
+@@ -167,7 +167,7 @@
+ 
+ 		CPU6: cpu@600 {
+ 			device_type = "cpu";
+-			compatible = "qcom,kryo685";
++			compatible = "arm,cortex-a78";
+ 			reg = <0x0 0x600>;
+ 			clocks = <&cpufreq_hw 1>;
+ 			enable-method = "psci";
+@@ -186,7 +186,7 @@
+ 
+ 		CPU7: cpu@700 {
+ 			device_type = "cpu";
+-			compatible = "qcom,kryo685";
++			compatible = "arm,cortex-x1";
+ 			reg = <0x0 0x700>;
+ 			clocks = <&cpufreq_hw 2>;
+ 			enable-method = "psci";
+@@ -246,8 +246,8 @@
+ 				compatible = "arm,idle-state";
+ 				idle-state-name = "silver-rail-power-collapse";
+ 				arm,psci-suspend-param = <0x40000004>;
+-				entry-latency-us = <355>;
+-				exit-latency-us = <909>;
++				entry-latency-us = <360>;
++				exit-latency-us = <531>;
+ 				min-residency-us = <3934>;
+ 				local-timer-stop;
+ 			};
+@@ -256,8 +256,8 @@
+ 				compatible = "arm,idle-state";
+ 				idle-state-name = "gold-rail-power-collapse";
+ 				arm,psci-suspend-param = <0x40000004>;
+-				entry-latency-us = <241>;
+-				exit-latency-us = <1461>;
++				entry-latency-us = <702>;
++				exit-latency-us = <1061>;
+ 				min-residency-us = <4488>;
+ 				local-timer-stop;
+ 			};
+@@ -3077,104 +3077,104 @@
+ 			reg = <0 0x15000000 0 0x100000>;
+ 			#iommu-cells = <2>;
+ 			#global-interrupts = <2>;
+-			interrupts =    <GIC_SPI 64 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 65 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 97 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 98 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 99 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 100 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 101 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 102 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 104 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 107 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 111 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 112 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 114 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 115 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 117 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 118 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 181 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 182 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 183 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 184 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 185 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 186 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 187 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 188 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 189 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 190 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 191 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 192 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 315 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 316 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 317 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 318 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 319 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 320 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 321 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 322 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 323 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 324 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 325 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 326 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 327 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 328 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 329 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 330 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 331 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 332 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 333 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 334 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 335 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 336 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 337 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 338 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 339 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 340 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 341 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 342 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 343 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 344 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 345 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 395 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 396 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 397 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 398 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 399 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 400 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 401 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 402 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 403 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 404 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 405 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 406 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 407 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 408 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 409 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 412 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 418 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 419 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 421 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 423 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 424 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 425 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 690 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 691 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 692 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 693 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 694 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 695 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 696 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 697 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 707 IRQ_TYPE_LEVEL_HIGH>;
++			interrupts = <GIC_SPI 64 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 65 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 97 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 98 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 99 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 100 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 101 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 102 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 104 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 107 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 111 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 112 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 114 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 115 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 117 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 118 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 181 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 182 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 183 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 184 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 185 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 186 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 187 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 188 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 189 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 190 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 191 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 192 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 315 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 316 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 317 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 318 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 319 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 320 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 321 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 322 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 323 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 324 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 325 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 326 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 327 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 328 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 329 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 330 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 331 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 332 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 333 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 334 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 335 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 336 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 337 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 338 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 339 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 340 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 341 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 342 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 343 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 344 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 345 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 395 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 396 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 397 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 398 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 399 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 400 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 401 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 402 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 403 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 404 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 405 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 406 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 407 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 408 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 409 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 412 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 418 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 419 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 421 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 423 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 424 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 425 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 690 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 691 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 692 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 693 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 694 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 695 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 696 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 697 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 707 IRQ_TYPE_LEVEL_HIGH>;
+ 		};
+ 
+ 		adsp: remoteproc@17300000 {
+@@ -3399,6 +3399,13 @@
+ 			      <0 0x18593000 0 0x1000>;
+ 			reg-names = "freq-domain0", "freq-domain1", "freq-domain2";
+ 
++			interrupts = <GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH>;
++			interrupt-names = "dcvsh-irq-0",
++					  "dcvsh-irq-1",
++					  "dcvsh-irq-2";
++
+ 			clocks = <&rpmhcc RPMH_CXO_CLK>, <&gcc GCC_GPLL0>;
+ 			clock-names = "xo", "alternate";
+ 
+diff --git a/arch/arm64/boot/dts/qcom/sm8450-hdk.dts b/arch/arm64/boot/dts/qcom/sm8450-hdk.dts
+index bc4c125d1832e..dabb7e872f384 100644
+--- a/arch/arm64/boot/dts/qcom/sm8450-hdk.dts
++++ b/arch/arm64/boot/dts/qcom/sm8450-hdk.dts
+@@ -14,7 +14,6 @@
+ #include "pm8450.dtsi"
+ #include "pmk8350.dtsi"
+ #include "pmr735a.dtsi"
+-#include "pmr735b.dtsi"
+ 
+ / {
+ 	model = "Qualcomm Technologies, Inc. SM8450 HDK";
+diff --git a/arch/arm64/boot/dts/qcom/sm8450.dtsi b/arch/arm64/boot/dts/qcom/sm8450.dtsi
+index 5cd7296c76605..42b23ba7a573f 100644
+--- a/arch/arm64/boot/dts/qcom/sm8450.dtsi
++++ b/arch/arm64/boot/dts/qcom/sm8450.dtsi
+@@ -3810,103 +3810,103 @@
+ 			reg = <0 0x15000000 0 0x100000>;
+ 			#iommu-cells = <2>;
+ 			#global-interrupts = <1>;
+-			interrupts =    <GIC_SPI 65 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 97 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 98 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 99 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 100 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 101 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 102 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 104 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 107 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 111 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 112 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 114 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 115 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 117 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 118 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 181 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 182 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 183 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 184 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 185 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 186 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 187 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 188 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 189 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 190 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 191 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 192 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 315 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 316 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 317 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 318 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 319 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 320 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 321 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 322 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 323 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 324 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 325 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 326 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 327 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 328 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 329 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 330 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 331 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 332 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 333 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 334 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 335 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 336 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 337 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 338 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 339 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 340 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 341 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 342 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 343 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 344 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 345 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 395 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 396 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 397 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 398 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 399 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 400 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 401 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 402 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 403 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 404 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 405 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 406 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 407 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 408 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 409 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 418 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 419 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 412 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 421 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 707 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 423 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 424 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 425 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 690 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 691 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 692 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 693 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 694 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 695 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 696 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 697 IRQ_TYPE_LEVEL_HIGH>;
++			interrupts = <GIC_SPI 65 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 97 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 98 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 99 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 100 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 101 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 102 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 104 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 107 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 111 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 112 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 114 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 115 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 117 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 118 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 181 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 182 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 183 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 184 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 185 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 186 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 187 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 188 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 189 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 190 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 191 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 192 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 315 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 316 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 317 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 318 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 319 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 320 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 321 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 322 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 323 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 324 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 325 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 326 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 327 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 328 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 329 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 330 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 331 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 332 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 333 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 334 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 335 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 336 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 337 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 338 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 339 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 340 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 341 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 342 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 343 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 344 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 345 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 395 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 396 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 397 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 398 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 399 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 400 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 401 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 402 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 403 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 404 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 405 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 406 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 407 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 408 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 409 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 418 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 419 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 412 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 421 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 707 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 423 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 424 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 425 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 690 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 691 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 692 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 693 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 694 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 695 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 696 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 697 IRQ_TYPE_LEVEL_HIGH>;
+ 		};
+ 
+ 		intc: interrupt-controller@17100000 {
+@@ -4212,7 +4212,7 @@
+ 				 <&apps_smmu 0x59f 0x0>;
+ 		};
+ 
+-		crypto: crypto@1de0000 {
++		crypto: crypto@1dfa000 {
+ 			compatible = "qcom,sm8450-qce", "qcom,sm8150-qce", "qcom,qce";
+ 			reg = <0 0x01dfa000 0 0x6000>;
+ 			dmas = <&cryptobam 4>, <&cryptobam 5>;
+diff --git a/arch/arm64/boot/dts/qcom/sm8550-mtp.dts b/arch/arm64/boot/dts/qcom/sm8550-mtp.dts
+index ec86c5f380450..714a2f9497adc 100644
+--- a/arch/arm64/boot/dts/qcom/sm8550-mtp.dts
++++ b/arch/arm64/boot/dts/qcom/sm8550-mtp.dts
+@@ -186,6 +186,7 @@
+ 
+ 		vdd-bob1-supply = <&vph_pwr>;
+ 		vdd-bob2-supply = <&vph_pwr>;
++		vdd-l1-l4-l10-supply = <&vreg_s6g_1p8>;
+ 		vdd-l2-l13-l14-supply = <&vreg_bob1>;
+ 		vdd-l3-supply = <&vreg_s4g_1p3>;
+ 		vdd-l5-l16-supply = <&vreg_bob1>;
+diff --git a/arch/arm64/boot/dts/qcom/sm8550.dtsi b/arch/arm64/boot/dts/qcom/sm8550.dtsi
+index 41d60af936920..6e8aba2569316 100644
+--- a/arch/arm64/boot/dts/qcom/sm8550.dtsi
++++ b/arch/arm64/boot/dts/qcom/sm8550.dtsi
+@@ -1600,7 +1600,7 @@
+ 				pinctrl-0 = <&qup_uart7_default>;
+ 				interrupts = <GIC_SPI 579 IRQ_TYPE_LEVEL_HIGH>;
+ 				interconnect-names = "qup-core", "qup-config";
+-				interconnects =	<&clk_virt MASTER_QUP_CORE_1 0 &clk_virt SLAVE_QUP_CORE_1 0>,
++				interconnects = <&clk_virt MASTER_QUP_CORE_1 0 &clk_virt SLAVE_QUP_CORE_1 0>,
+ 						<&gem_noc MASTER_APPSS_PROC 0 &config_noc SLAVE_QUP_1 0>;
+ 				status = "disabled";
+ 			};
+@@ -3517,103 +3517,103 @@
+ 			reg = <0 0x15000000 0 0x100000>;
+ 			#iommu-cells = <2>;
+ 			#global-interrupts = <1>;
+-			interrupts =	<GIC_SPI 65 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 97 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 98 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 99 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 100 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 101 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 102 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 104 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 107 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 111 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 112 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 114 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 115 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 117 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 118 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 181 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 182 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 183 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 184 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 185 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 186 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 187 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 188 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 189 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 190 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 191 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 192 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 315 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 316 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 317 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 318 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 319 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 320 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 321 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 322 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 323 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 324 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 325 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 326 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 327 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 328 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 329 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 330 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 331 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 332 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 333 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 334 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 335 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 336 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 337 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 338 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 339 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 340 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 341 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 342 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 343 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 344 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 345 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 395 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 396 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 397 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 398 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 399 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 400 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 401 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 402 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 403 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 404 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 405 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 406 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 407 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 408 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 409 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 418 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 419 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 412 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 421 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 706 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 423 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 424 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 425 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 689 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 690 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 691 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 692 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 693 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 694 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 695 IRQ_TYPE_LEVEL_HIGH>,
+-					<GIC_SPI 696 IRQ_TYPE_LEVEL_HIGH>;
++			interrupts = <GIC_SPI 65 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 97 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 98 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 99 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 100 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 101 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 102 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 104 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 107 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 111 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 112 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 114 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 115 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 117 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 118 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 181 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 182 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 183 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 184 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 185 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 186 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 187 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 188 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 189 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 190 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 191 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 192 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 315 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 316 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 317 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 318 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 319 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 320 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 321 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 322 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 323 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 324 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 325 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 326 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 327 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 328 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 329 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 330 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 331 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 332 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 333 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 334 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 335 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 336 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 337 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 338 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 339 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 340 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 341 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 342 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 343 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 344 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 345 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 395 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 396 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 397 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 398 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 399 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 400 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 401 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 402 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 403 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 404 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 405 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 406 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 407 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 408 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 409 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 418 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 419 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 412 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 421 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 706 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 423 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 424 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 425 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 689 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 690 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 691 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 692 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 693 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 694 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 695 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 696 IRQ_TYPE_LEVEL_HIGH>;
+ 		};
+ 
+ 		intc: interrupt-controller@17100000 {
+diff --git a/arch/arm64/boot/dts/rockchip/rk3568-radxa-e25.dts b/arch/arm64/boot/dts/rockchip/rk3568-radxa-e25.dts
+index 63c4bd873188e..72ad74c38a2b4 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3568-radxa-e25.dts
++++ b/arch/arm64/boot/dts/rockchip/rk3568-radxa-e25.dts
+@@ -47,6 +47,9 @@
+ 		vin-supply = <&vcc5v0_sys>;
+ 	};
+ 
++	/* actually fed by vcc5v0_sys, dependent
++	 * on pi6c clock generator
++	 */
+ 	vcc3v3_minipcie: vcc3v3-minipcie-regulator {
+ 		compatible = "regulator-fixed";
+ 		enable-active-high;
+@@ -54,9 +57,9 @@
+ 		pinctrl-names = "default";
+ 		pinctrl-0 = <&minipcie_enable_h>;
+ 		regulator-name = "vcc3v3_minipcie";
+-		regulator-min-microvolt = <5000000>;
+-		regulator-max-microvolt = <5000000>;
+-		vin-supply = <&vcc5v0_sys>;
++		regulator-min-microvolt = <3300000>;
++		regulator-max-microvolt = <3300000>;
++		vin-supply = <&vcc3v3_pi6c_05>;
+ 	};
+ 
+ 	vcc3v3_ngff: vcc3v3-ngff-regulator {
+@@ -71,9 +74,6 @@
+ 		vin-supply = <&vcc5v0_sys>;
+ 	};
+ 
+-	/* actually fed by vcc5v0_sys, dependent
+-	 * on pi6c clock generator
+-	 */
+ 	vcc3v3_pcie30x1: vcc3v3-pcie30x1-regulator {
+ 		compatible = "regulator-fixed";
+ 		enable-active-high;
+@@ -83,7 +83,7 @@
+ 		regulator-name = "vcc3v3_pcie30x1";
+ 		regulator-min-microvolt = <3300000>;
+ 		regulator-max-microvolt = <3300000>;
+-		vin-supply = <&vcc3v3_pi6c_05>;
++		vin-supply = <&vcc5v0_sys>;
+ 	};
+ 
+ 	vcc3v3_pi6c_05: vcc3v3-pi6c-05-regulator {
+@@ -99,6 +99,10 @@
+ 	};
+ };
+ 
++&combphy1 {
++	phy-supply = <&vcc3v3_pcie30x1>;
++};
++
+ &pcie2x1 {
+ 	pinctrl-names = "default";
+ 	pinctrl-0 = <&pcie20_reset_h>;
+@@ -117,7 +121,7 @@
+ 	pinctrl-names = "default";
+ 	pinctrl-0 = <&pcie30x1m0_pins>;
+ 	reset-gpios = <&gpio0 RK_PC3 GPIO_ACTIVE_HIGH>;
+-	vpcie3v3-supply = <&vcc3v3_pcie30x1>;
++	vpcie3v3-supply = <&vcc3v3_minipcie>;
+ 	status = "okay";
+ };
+ 
+@@ -178,6 +182,10 @@
+ 	status = "okay";
+ };
+ 
++&sata1 {
++	status = "okay";
++};
++
+ &sdmmc0 {
+ 	bus-width = <4>;
+ 	cap-sd-highspeed;
+diff --git a/arch/arm64/boot/dts/ti/k3-am62x-sk-common.dtsi b/arch/arm64/boot/dts/ti/k3-am62x-sk-common.dtsi
+index 34c8ffc553ec3..540ed8a0d7fb6 100644
+--- a/arch/arm64/boot/dts/ti/k3-am62x-sk-common.dtsi
++++ b/arch/arm64/boot/dts/ti/k3-am62x-sk-common.dtsi
+@@ -300,7 +300,7 @@
+ 	status = "okay";
+ 	pinctrl-names = "default";
+ 	pinctrl-0 = <&main_i2c1_pins_default>;
+-	clock-frequency = <400000>;
++	clock-frequency = <100000>;
+ 
+ 	tlv320aic3106: audio-codec@1b {
+ 		#sound-dai-cells = <0>;
+diff --git a/arch/arm64/boot/dts/ti/k3-j721s2-common-proc-board.dts b/arch/arm64/boot/dts/ti/k3-j721s2-common-proc-board.dts
+index 04d4739d72457..2a5000645752d 100644
+--- a/arch/arm64/boot/dts/ti/k3-j721s2-common-proc-board.dts
++++ b/arch/arm64/boot/dts/ti/k3-j721s2-common-proc-board.dts
+@@ -249,18 +249,19 @@
+ 			J721S2_WKUP_IOPAD(0x108, PIN_INPUT, 0) /* (N27) MCU_ADC1_AIN7 */
+ 		>;
+ 	};
++};
+ 
++&wkup_pmx1 {
+ 	mcu_fss0_ospi1_pins_default: mcu-fss0-ospi1-default-pins {
+ 		pinctrl-single,pins = <
+-			J721S2_WKUP_IOPAD(0x040, PIN_OUTPUT, 0) /* (A19) MCU_OSPI1_CLK */
+-			J721S2_WKUP_IOPAD(0x05c, PIN_OUTPUT, 0) /* (D20) MCU_OSPI1_CSn0 */
+-			J721S2_WKUP_IOPAD(0x060, PIN_OUTPUT, 0) /* (C21) MCU_OSPI1_CSn1 */
+-			J721S2_WKUP_IOPAD(0x04c, PIN_INPUT, 0) /* (D21) MCU_OSPI1_D0 */
+-			J721S2_WKUP_IOPAD(0x050, PIN_INPUT, 0) /* (G20) MCU_OSPI1_D1 */
+-			J721S2_WKUP_IOPAD(0x054, PIN_INPUT, 0) /* (C20) MCU_OSPI1_D2 */
+-			J721S2_WKUP_IOPAD(0x058, PIN_INPUT, 0) /* (A20) MCU_OSPI1_D3 */
+-			J721S2_WKUP_IOPAD(0x048, PIN_INPUT, 0) /* (B19) MCU_OSPI1_DQS */
+-			J721S2_WKUP_IOPAD(0x044, PIN_INPUT, 0) /* (B20) MCU_OSPI1_LBCLKO */
++			J721S2_WKUP_IOPAD(0x008, PIN_OUTPUT, 0) /* (A19) MCU_OSPI1_CLK */
++			J721S2_WKUP_IOPAD(0x024, PIN_OUTPUT, 0) /* (D20) MCU_OSPI1_CSn0 */
++			J721S2_WKUP_IOPAD(0x014, PIN_INPUT, 0) /* (D21) MCU_OSPI1_D0 */
++			J721S2_WKUP_IOPAD(0x018, PIN_INPUT, 0) /* (G20) MCU_OSPI1_D1 */
++			J721S2_WKUP_IOPAD(0x01c, PIN_INPUT, 0) /* (C20) MCU_OSPI1_D2 */
++			J721S2_WKUP_IOPAD(0x020, PIN_INPUT, 0) /* (A20) MCU_OSPI1_D3 */
++			J721S2_WKUP_IOPAD(0x010, PIN_INPUT, 0) /* (B19) MCU_OSPI1_DQS */
++			J721S2_WKUP_IOPAD(0x00c, PIN_INPUT, 0) /* (B20) MCU_OSPI1_LBCLKO */
+ 		>;
+ 	};
+ };
+diff --git a/arch/arm64/boot/dts/ti/k3-j721s2-som-p0.dtsi b/arch/arm64/boot/dts/ti/k3-j721s2-som-p0.dtsi
+index d57dd43da0ef4..17ae27eac39ad 100644
+--- a/arch/arm64/boot/dts/ti/k3-j721s2-som-p0.dtsi
++++ b/arch/arm64/boot/dts/ti/k3-j721s2-som-p0.dtsi
+@@ -44,9 +44,6 @@
+ 		pinctrl-single,pins = <
+ 			J721S2_WKUP_IOPAD(0x000, PIN_OUTPUT, 0) /* (D19) MCU_OSPI0_CLK */
+ 			J721S2_WKUP_IOPAD(0x02c, PIN_OUTPUT, 0) /* (F15) MCU_OSPI0_CSn0 */
+-			J721S2_WKUP_IOPAD(0x030, PIN_OUTPUT, 0) /* (G17) MCU_OSPI0_CSn1 */
+-			J721S2_WKUP_IOPAD(0x038, PIN_OUTPUT, 0) /* (F14) MCU_OSPI0_CSn2 */
+-			J721S2_WKUP_IOPAD(0x03c, PIN_OUTPUT, 0) /* (F17) MCU_OSPI0_CSn3 */
+ 			J721S2_WKUP_IOPAD(0x00c, PIN_INPUT, 0) /* (C19) MCU_OSPI0_D0 */
+ 			J721S2_WKUP_IOPAD(0x010, PIN_INPUT, 0) /* (F16) MCU_OSPI0_D1 */
+ 			J721S2_WKUP_IOPAD(0x014, PIN_INPUT, 0) /* (G15) MCU_OSPI0_D2 */
+diff --git a/arch/arm64/boot/dts/ti/k3-j784s4-evm.dts b/arch/arm64/boot/dts/ti/k3-j784s4-evm.dts
+index 430b8a2c5df57..bf772f0641170 100644
+--- a/arch/arm64/boot/dts/ti/k3-j784s4-evm.dts
++++ b/arch/arm64/boot/dts/ti/k3-j784s4-evm.dts
+@@ -340,27 +340,27 @@
+ 
+ 	mcu_adc0_pins_default: mcu-adc0-default-pins {
+ 		pinctrl-single,pins = <
+-			J784S4_WKUP_IOPAD(0x134, PIN_INPUT, 0) /* (P36) MCU_ADC0_AIN0 */
+-			J784S4_WKUP_IOPAD(0x138, PIN_INPUT, 0) /* (V36) MCU_ADC0_AIN1 */
+-			J784S4_WKUP_IOPAD(0x13c, PIN_INPUT, 0) /* (T34) MCU_ADC0_AIN2 */
+-			J784S4_WKUP_IOPAD(0x140, PIN_INPUT, 0) /* (T36) MCU_ADC0_AIN3 */
+-			J784S4_WKUP_IOPAD(0x144, PIN_INPUT, 0) /* (P34) MCU_ADC0_AIN4 */
+-			J784S4_WKUP_IOPAD(0x148, PIN_INPUT, 0) /* (R37) MCU_ADC0_AIN5 */
+-			J784S4_WKUP_IOPAD(0x14c, PIN_INPUT, 0) /* (R33) MCU_ADC0_AIN6 */
+-			J784S4_WKUP_IOPAD(0x150, PIN_INPUT, 0) /* (V38) MCU_ADC0_AIN7 */
++			J784S4_WKUP_IOPAD(0x0cc, PIN_INPUT, 0) /* (P36) MCU_ADC0_AIN0 */
++			J784S4_WKUP_IOPAD(0x0d0, PIN_INPUT, 0) /* (V36) MCU_ADC0_AIN1 */
++			J784S4_WKUP_IOPAD(0x0d4, PIN_INPUT, 0) /* (T34) MCU_ADC0_AIN2 */
++			J784S4_WKUP_IOPAD(0x0d8, PIN_INPUT, 0) /* (T36) MCU_ADC0_AIN3 */
++			J784S4_WKUP_IOPAD(0x0dc, PIN_INPUT, 0) /* (P34) MCU_ADC0_AIN4 */
++			J784S4_WKUP_IOPAD(0x0e0, PIN_INPUT, 0) /* (R37) MCU_ADC0_AIN5 */
++			J784S4_WKUP_IOPAD(0x0e4, PIN_INPUT, 0) /* (R33) MCU_ADC0_AIN6 */
++			J784S4_WKUP_IOPAD(0x0e8, PIN_INPUT, 0) /* (V38) MCU_ADC0_AIN7 */
+ 		>;
+ 	};
+ 
+ 	mcu_adc1_pins_default: mcu-adc1-default-pins {
+ 		pinctrl-single,pins = <
+-			J784S4_WKUP_IOPAD(0x154, PIN_INPUT, 0) /* (Y38) MCU_ADC1_AIN0 */
+-			J784S4_WKUP_IOPAD(0x158, PIN_INPUT, 0) /* (Y34) MCU_ADC1_AIN1 */
+-			J784S4_WKUP_IOPAD(0x15c, PIN_INPUT, 0) /* (V34) MCU_ADC1_AIN2 */
+-			J784S4_WKUP_IOPAD(0x160, PIN_INPUT, 0) /* (W37) MCU_ADC1_AIN3 */
+-			J784S4_WKUP_IOPAD(0x164, PIN_INPUT, 0) /* (AA37) MCU_ADC1_AIN4 */
+-			J784S4_WKUP_IOPAD(0x168, PIN_INPUT, 0) /* (W33) MCU_ADC1_AIN5 */
+-			J784S4_WKUP_IOPAD(0x16c, PIN_INPUT, 0) /* (U33) MCU_ADC1_AIN6 */
+-			J784S4_WKUP_IOPAD(0x170, PIN_INPUT, 0) /* (Y36) MCU_ADC1_AIN7 */
++			J784S4_WKUP_IOPAD(0x0ec, PIN_INPUT, 0) /* (Y38) MCU_ADC1_AIN0 */
++			J784S4_WKUP_IOPAD(0x0f0, PIN_INPUT, 0) /* (Y34) MCU_ADC1_AIN1 */
++			J784S4_WKUP_IOPAD(0x0f4, PIN_INPUT, 0) /* (V34) MCU_ADC1_AIN2 */
++			J784S4_WKUP_IOPAD(0x0f8, PIN_INPUT, 0) /* (W37) MCU_ADC1_AIN3 */
++			J784S4_WKUP_IOPAD(0x0fc, PIN_INPUT, 0) /* (AA37) MCU_ADC1_AIN4 */
++			J784S4_WKUP_IOPAD(0x100, PIN_INPUT, 0) /* (W33) MCU_ADC1_AIN5 */
++			J784S4_WKUP_IOPAD(0x104, PIN_INPUT, 0) /* (U33) MCU_ADC1_AIN6 */
++			J784S4_WKUP_IOPAD(0x108, PIN_INPUT, 0) /* (Y36) MCU_ADC1_AIN7 */
+ 		>;
+ 	};
+ };
+@@ -379,21 +379,28 @@
+ 			J784S4_WKUP_IOPAD(0x024, PIN_INPUT, 0) /* (E34) MCU_OSPI0_D6 */
+ 			J784S4_WKUP_IOPAD(0x028, PIN_INPUT, 0) /* (E33) MCU_OSPI0_D7 */
+ 			J784S4_WKUP_IOPAD(0x008, PIN_INPUT, 0) /* (C34) MCU_OSPI0_DQS */
+-			J784S4_WKUP_IOPAD(0x03c, PIN_OUTPUT, 6) /* (C32) MCU_OSPI0_CSn3.MCU_OSPI0_ECC_FAIL */
+-			J784S4_WKUP_IOPAD(0x038, PIN_OUTPUT, 6) /* (B34) MCU_OSPI0_CSn2.MCU_OSPI0_RESET_OUT0 */
++		>;
++	};
++};
++
++&wkup_pmx1 {
++	mcu_fss0_ospi0_1_pins_default: mcu-fss0-ospi0-1-default-pins {
++		pinctrl-single,pins = <
++			J784S4_WKUP_IOPAD(0x004, PIN_OUTPUT, 6) /* (C32) MCU_OSPI0_ECC_FAIL */
++			J784S4_WKUP_IOPAD(0x000, PIN_OUTPUT, 6) /* (B34) MCU_OSPI0_RESET_OUT0 */
+ 		>;
+ 	};
+ 
+ 	mcu_fss0_ospi1_pins_default: mcu-fss0-ospi1-default-pins {
+ 		pinctrl-single,pins = <
+-			J784S4_WKUP_IOPAD(0x040, PIN_OUTPUT, 0) /* (F32) MCU_OSPI1_CLK */
+-			J784S4_WKUP_IOPAD(0x05c, PIN_OUTPUT, 0) /* (G32) MCU_OSPI1_CSn0 */
+-			J784S4_WKUP_IOPAD(0x04c, PIN_INPUT, 0) /* (E35) MCU_OSPI1_D0 */
+-			J784S4_WKUP_IOPAD(0x050, PIN_INPUT, 0) /* (D31) MCU_OSPI1_D1 */
+-			J784S4_WKUP_IOPAD(0x054, PIN_INPUT, 0) /* (G31) MCU_OSPI1_D2 */
+-			J784S4_WKUP_IOPAD(0x058, PIN_INPUT, 0) /* (F33) MCU_OSPI1_D3 */
+-			J784S4_WKUP_IOPAD(0x048, PIN_INPUT, 0) /* (F31) MCU_OSPI1_DQS */
+-			J784S4_WKUP_IOPAD(0x044, PIN_INPUT, 0) /* (C31) MCU_OSPI1_LBCLKO */
++			J784S4_WKUP_IOPAD(0x008, PIN_OUTPUT, 0) /* (F32) MCU_OSPI1_CLK */
++			J784S4_WKUP_IOPAD(0x024, PIN_OUTPUT, 0) /* (G32) MCU_OSPI1_CSn0 */
++			J784S4_WKUP_IOPAD(0x014, PIN_INPUT, 0) /* (E35) MCU_OSPI1_D0 */
++			J784S4_WKUP_IOPAD(0x018, PIN_INPUT, 0) /* (D31) MCU_OSPI1_D1 */
++			J784S4_WKUP_IOPAD(0x01C, PIN_INPUT, 0) /* (G31) MCU_OSPI1_D2 */
++			J784S4_WKUP_IOPAD(0x020, PIN_INPUT, 0) /* (F33) MCU_OSPI1_D3 */
++			J784S4_WKUP_IOPAD(0x010, PIN_INPUT, 0) /* (F31) MCU_OSPI1_DQS */
++			J784S4_WKUP_IOPAD(0x00C, PIN_INPUT, 0) /* (C31) MCU_OSPI1_LBCLKO */
+ 		>;
+ 	};
+ };
+@@ -437,7 +444,7 @@
+ &ospi0 {
+ 	status = "okay";
+ 	pinctrl-names = "default";
+-	pinctrl-0 = <&mcu_fss0_ospi0_pins_default>;
++	pinctrl-0 = <&mcu_fss0_ospi0_pins_default>, <&mcu_fss0_ospi0_1_pins_default>;
+ 
+ 	flash@0 {
+ 		compatible = "jedec,spi-nor";
+diff --git a/arch/arm64/boot/dts/ti/k3-j784s4-main.dtsi b/arch/arm64/boot/dts/ti/k3-j784s4-main.dtsi
+index 2ea0adae6832f..76e610d8782b5 100644
+--- a/arch/arm64/boot/dts/ti/k3-j784s4-main.dtsi
++++ b/arch/arm64/boot/dts/ti/k3-j784s4-main.dtsi
+@@ -60,7 +60,7 @@
+ 		#interrupt-cells = <1>;
+ 		ti,sci = <&sms>;
+ 		ti,sci-dev-id = <10>;
+-		ti,interrupt-ranges = <8 360 56>;
++		ti,interrupt-ranges = <8 392 56>;
+ 	};
+ 
+ 	main_pmx0: pinctrl@11c000 {
+diff --git a/arch/arm64/boot/dts/ti/k3-j784s4-mcu-wakeup.dtsi b/arch/arm64/boot/dts/ti/k3-j784s4-mcu-wakeup.dtsi
+index 657fb1d72512c..62a0f172fb2d4 100644
+--- a/arch/arm64/boot/dts/ti/k3-j784s4-mcu-wakeup.dtsi
++++ b/arch/arm64/boot/dts/ti/k3-j784s4-mcu-wakeup.dtsi
+@@ -107,7 +107,7 @@
+ 		#interrupt-cells = <1>;
+ 		ti,sci = <&sms>;
+ 		ti,sci-dev-id = <177>;
+-		ti,interrupt-ranges = <16 928 16>;
++		ti,interrupt-ranges = <16 960 16>;
+ 	};
+ 
+ 	/* MCU_TIMERIO pad input CTRLMMR_MCU_TIMER*_CTRL registers */
+diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig
+index a25d783dfb955..d8bae57af16d5 100644
+--- a/arch/arm64/configs/defconfig
++++ b/arch/arm64/configs/defconfig
+@@ -1129,7 +1129,6 @@ CONFIG_XEN_GNTDEV=y
+ CONFIG_XEN_GRANT_DEV_ALLOC=y
+ CONFIG_STAGING=y
+ CONFIG_STAGING_MEDIA=y
+-CONFIG_VIDEO_IMX_MEDIA=m
+ CONFIG_VIDEO_MAX96712=m
+ CONFIG_CHROME_PLATFORMS=y
+ CONFIG_CROS_EC=y
+@@ -1182,6 +1181,7 @@ CONFIG_IPQ_GCC_8074=y
+ CONFIG_IPQ_GCC_9574=y
+ CONFIG_MSM_GCC_8916=y
+ CONFIG_MSM_GCC_8994=y
++CONFIG_MSM_GCC_8996=y
+ CONFIG_MSM_MMCC_8994=m
+ CONFIG_MSM_MMCC_8996=m
+ CONFIG_MSM_MMCC_8998=m
+diff --git a/arch/arm64/include/asm/sdei.h b/arch/arm64/include/asm/sdei.h
+index 4292d9bafb9d2..484cb6972e99a 100644
+--- a/arch/arm64/include/asm/sdei.h
++++ b/arch/arm64/include/asm/sdei.h
+@@ -17,6 +17,9 @@
+ 
+ #include <asm/virt.h>
+ 
++DECLARE_PER_CPU(struct sdei_registered_event *, sdei_active_normal_event);
++DECLARE_PER_CPU(struct sdei_registered_event *, sdei_active_critical_event);
++
+ extern unsigned long sdei_exit_mode;
+ 
+ /* Software Delegated Exception entry point from firmware*/
+@@ -29,6 +32,9 @@ asmlinkage void __sdei_asm_entry_trampoline(unsigned long event_num,
+ 						   unsigned long pc,
+ 						   unsigned long pstate);
+ 
++/* Abort a running handler. Context is discarded. */
++void __sdei_handler_abort(void);
++
+ /*
+  * The above entry point does the minimum to call C code. This function does
+  * anything else, before calling the driver.
+diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
+index a40e5e50fa552..6ad61de03d0a0 100644
+--- a/arch/arm64/kernel/entry.S
++++ b/arch/arm64/kernel/entry.S
+@@ -986,9 +986,13 @@ SYM_CODE_START(__sdei_asm_handler)
+ 
+ 	mov	x19, x1
+ 
+-#if defined(CONFIG_VMAP_STACK) || defined(CONFIG_SHADOW_CALL_STACK)
++	/* Store the registered-event for crash_smp_send_stop() */
+ 	ldrb	w4, [x19, #SDEI_EVENT_PRIORITY]
+-#endif
++	cbnz	w4, 1f
++	adr_this_cpu dst=x5, sym=sdei_active_normal_event, tmp=x6
++	b	2f
++1:	adr_this_cpu dst=x5, sym=sdei_active_critical_event, tmp=x6
++2:	str	x19, [x5]
+ 
+ #ifdef CONFIG_VMAP_STACK
+ 	/*
+@@ -1055,6 +1059,14 @@ SYM_CODE_START(__sdei_asm_handler)
+ 
+ 	ldr_l	x2, sdei_exit_mode
+ 
++	/* Clear the registered-event seen by crash_smp_send_stop() */
++	ldrb	w3, [x4, #SDEI_EVENT_PRIORITY]
++	cbnz	w3, 1f
++	adr_this_cpu dst=x5, sym=sdei_active_normal_event, tmp=x6
++	b	2f
++1:	adr_this_cpu dst=x5, sym=sdei_active_critical_event, tmp=x6
++2:	str	xzr, [x5]
++
+ alternative_if_not ARM64_UNMAP_KERNEL_AT_EL0
+ 	sdei_handler_exit exit_mode=x2
+ alternative_else_nop_endif
+@@ -1065,4 +1077,15 @@ alternative_else_nop_endif
+ #endif
+ SYM_CODE_END(__sdei_asm_handler)
+ NOKPROBE(__sdei_asm_handler)
++
++SYM_CODE_START(__sdei_handler_abort)
++	mov_q	x0, SDEI_1_0_FN_SDEI_EVENT_COMPLETE_AND_RESUME
++	adr	x1, 1f
++	ldr_l	x2, sdei_exit_mode
++	sdei_handler_exit exit_mode=x2
++	// exit the handler and jump to the next instruction.
++	// Exit will stomp x0-x17, PSTATE, ELR_ELx, and SPSR_ELx.
++1:	ret
++SYM_CODE_END(__sdei_handler_abort)
++NOKPROBE(__sdei_handler_abort)
+ #endif /* CONFIG_ARM_SDE_INTERFACE */
+diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
+index 087c05aa960ea..91e44ac7150f9 100644
+--- a/arch/arm64/kernel/fpsimd.c
++++ b/arch/arm64/kernel/fpsimd.c
+@@ -1179,9 +1179,6 @@ void sve_kernel_enable(const struct arm64_cpu_capabilities *__always_unused p)
+  */
+ u64 read_zcr_features(void)
+ {
+-	u64 zcr;
+-	unsigned int vq_max;
+-
+ 	/*
+ 	 * Set the maximum possible VL, and write zeroes to all other
+ 	 * bits to see if they stick.
+@@ -1189,12 +1186,8 @@ u64 read_zcr_features(void)
+ 	sve_kernel_enable(NULL);
+ 	write_sysreg_s(ZCR_ELx_LEN_MASK, SYS_ZCR_EL1);
+ 
+-	zcr = read_sysreg_s(SYS_ZCR_EL1);
+-	zcr &= ~(u64)ZCR_ELx_LEN_MASK; /* find sticky 1s outside LEN field */
+-	vq_max = sve_vq_from_vl(sve_get_vl());
+-	zcr |= vq_max - 1; /* set LEN field to maximum effective value */
+-
+-	return zcr;
++	/* Return LEN value that would be written to get the maximum VL */
++	return sve_vq_from_vl(sve_get_vl()) - 1;
+ }
+ 
+ void __init sve_setup(void)
+@@ -1349,9 +1342,6 @@ void fa64_kernel_enable(const struct arm64_cpu_capabilities *__always_unused p)
+  */
+ u64 read_smcr_features(void)
+ {
+-	u64 smcr;
+-	unsigned int vq_max;
+-
+ 	sme_kernel_enable(NULL);
+ 
+ 	/*
+@@ -1360,12 +1350,8 @@ u64 read_smcr_features(void)
+ 	write_sysreg_s(read_sysreg_s(SYS_SMCR_EL1) | SMCR_ELx_LEN_MASK,
+ 		       SYS_SMCR_EL1);
+ 
+-	smcr = read_sysreg_s(SYS_SMCR_EL1);
+-	smcr &= ~(u64)SMCR_ELx_LEN_MASK; /* Only the LEN field */
+-	vq_max = sve_vq_from_vl(sme_get_vl());
+-	smcr |= vq_max - 1; /* set LEN field to maximum effective value */
+-
+-	return smcr;
++	/* Return LEN value that would be written to get the maximum VL */
++	return sve_vq_from_vl(sme_get_vl()) - 1;
+ }
+ 
+ void __init sme_setup(void)
+diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
+index 187aa2b175b4f..20d7ef82de90a 100644
+--- a/arch/arm64/kernel/ptrace.c
++++ b/arch/arm64/kernel/ptrace.c
+@@ -891,7 +891,8 @@ static int sve_set_common(struct task_struct *target,
+ 			break;
+ 		default:
+ 			WARN_ON_ONCE(1);
+-			return -EINVAL;
++			ret = -EINVAL;
++			goto out;
+ 		}
+ 
+ 		/*
+diff --git a/arch/arm64/kernel/sdei.c b/arch/arm64/kernel/sdei.c
+index 830be01af32db..255d12f881c26 100644
+--- a/arch/arm64/kernel/sdei.c
++++ b/arch/arm64/kernel/sdei.c
+@@ -47,6 +47,9 @@ DEFINE_PER_CPU(unsigned long *, sdei_shadow_call_stack_normal_ptr);
+ DEFINE_PER_CPU(unsigned long *, sdei_shadow_call_stack_critical_ptr);
+ #endif
+ 
++DEFINE_PER_CPU(struct sdei_registered_event *, sdei_active_normal_event);
++DEFINE_PER_CPU(struct sdei_registered_event *, sdei_active_critical_event);
++
+ static void _free_sdei_stack(unsigned long * __percpu *ptr, int cpu)
+ {
+ 	unsigned long *p;
+diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
+index edd63894d61e8..960b98b43506d 100644
+--- a/arch/arm64/kernel/smp.c
++++ b/arch/arm64/kernel/smp.c
+@@ -1044,10 +1044,8 @@ void crash_smp_send_stop(void)
+ 	 * If this cpu is the only one alive at this point in time, online or
+ 	 * not, there are no stop messages to be sent around, so just back out.
+ 	 */
+-	if (num_other_online_cpus() == 0) {
+-		sdei_mask_local_cpu();
+-		return;
+-	}
++	if (num_other_online_cpus() == 0)
++		goto skip_ipi;
+ 
+ 	cpumask_copy(&mask, cpu_online_mask);
+ 	cpumask_clear_cpu(smp_processor_id(), &mask);
+@@ -1066,7 +1064,9 @@ void crash_smp_send_stop(void)
+ 		pr_warn("SMP: failed to stop secondary CPUs %*pbl\n",
+ 			cpumask_pr_args(&mask));
+ 
++skip_ipi:
+ 	sdei_mask_local_cpu();
++	sdei_handler_abort();
+ }
+ 
+ bool smp_crash_stop_failed(void)
+diff --git a/arch/arm64/lib/csum.c b/arch/arm64/lib/csum.c
+index 78b87a64ca0a3..2432683e48a61 100644
+--- a/arch/arm64/lib/csum.c
++++ b/arch/arm64/lib/csum.c
+@@ -24,7 +24,7 @@ unsigned int __no_sanitize_address do_csum(const unsigned char *buff, int len)
+ 	const u64 *ptr;
+ 	u64 data, sum64 = 0;
+ 
+-	if (unlikely(len == 0))
++	if (unlikely(len <= 0))
+ 		return 0;
+ 
+ 	offset = (unsigned long)buff & 7;
+diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c
+index 21716c9406821..9c52718ea7509 100644
+--- a/arch/arm64/mm/hugetlbpage.c
++++ b/arch/arm64/mm/hugetlbpage.c
+@@ -236,7 +236,7 @@ static void clear_flush(struct mm_struct *mm,
+ 	unsigned long i, saddr = addr;
+ 
+ 	for (i = 0; i < ncontig; i++, addr += pgsize, ptep++)
+-		pte_clear(mm, addr, ptep);
++		ptep_clear(mm, addr, ptep);
+ 
+ 	flush_tlb_range(&vma, saddr, addr);
+ }
+diff --git a/arch/loongarch/include/asm/irq.h b/arch/loongarch/include/asm/irq.h
+index a115e8999c69e..218b4da0ea90d 100644
+--- a/arch/loongarch/include/asm/irq.h
++++ b/arch/loongarch/include/asm/irq.h
+@@ -40,7 +40,7 @@ void spurious_interrupt(void);
+ #define NR_IRQS_LEGACY 16
+ 
+ #define arch_trigger_cpumask_backtrace arch_trigger_cpumask_backtrace
+-void arch_trigger_cpumask_backtrace(const struct cpumask *mask, bool exclude_self);
++void arch_trigger_cpumask_backtrace(const struct cpumask *mask, int exclude_cpu);
+ 
+ #define MAX_IO_PICS 2
+ #define NR_IRQS	(64 + (256 * MAX_IO_PICS))
+diff --git a/arch/loongarch/include/asm/local.h b/arch/loongarch/include/asm/local.h
+index 83e995b30e472..c49675852bdcd 100644
+--- a/arch/loongarch/include/asm/local.h
++++ b/arch/loongarch/include/asm/local.h
+@@ -63,8 +63,8 @@ static inline long local_cmpxchg(local_t *l, long old, long new)
+ 
+ static inline bool local_try_cmpxchg(local_t *l, long *old, long new)
+ {
+-	typeof(l->a.counter) *__old = (typeof(l->a.counter) *) old;
+-	return try_cmpxchg_local(&l->a.counter, __old, new);
++	return try_cmpxchg_local(&l->a.counter,
++				 (typeof(l->a.counter) *) old, new);
+ }
+ 
+ #define local_xchg(l, n) (atomic_long_xchg((&(l)->a), (n)))
+diff --git a/arch/loongarch/include/asm/pgtable.h b/arch/loongarch/include/asm/pgtable.h
+index 38afeb7dd58b6..0ac6afa4a825b 100644
+--- a/arch/loongarch/include/asm/pgtable.h
++++ b/arch/loongarch/include/asm/pgtable.h
+@@ -593,6 +593,9 @@ static inline long pmd_protnone(pmd_t pmd)
+ }
+ #endif /* CONFIG_NUMA_BALANCING */
+ 
++#define pmd_leaf(pmd)		((pmd_val(pmd) & _PAGE_HUGE) != 0)
++#define pud_leaf(pud)		((pud_val(pud) & _PAGE_HUGE) != 0)
++
+ /*
+  * We provide our own get_unmapped area to cope with the virtual aliasing
+  * constraints placed on us by the cache architecture.
+diff --git a/arch/loongarch/kernel/process.c b/arch/loongarch/kernel/process.c
+index 4ee1e9d6a65f1..ba457e43f5be5 100644
+--- a/arch/loongarch/kernel/process.c
++++ b/arch/loongarch/kernel/process.c
+@@ -338,9 +338,9 @@ static void raise_backtrace(cpumask_t *mask)
+ 	}
+ }
+ 
+-void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self)
++void arch_trigger_cpumask_backtrace(const cpumask_t *mask, int exclude_cpu)
+ {
+-	nmi_trigger_cpumask_backtrace(mask, exclude_self, raise_backtrace);
++	nmi_trigger_cpumask_backtrace(mask, exclude_cpu, raise_backtrace);
+ }
+ 
+ #ifdef CONFIG_64BIT
+diff --git a/arch/mips/include/asm/irq.h b/arch/mips/include/asm/irq.h
+index 75abfa834ab7a..3a848e7e69f71 100644
+--- a/arch/mips/include/asm/irq.h
++++ b/arch/mips/include/asm/irq.h
+@@ -77,7 +77,7 @@ extern int cp0_fdc_irq;
+ extern int get_c0_fdc_int(void);
+ 
+ void arch_trigger_cpumask_backtrace(const struct cpumask *mask,
+-				    bool exclude_self);
++				    int exclude_cpu);
+ #define arch_trigger_cpumask_backtrace arch_trigger_cpumask_backtrace
+ 
+ #endif /* _ASM_IRQ_H */
+diff --git a/arch/mips/include/asm/local.h b/arch/mips/include/asm/local.h
+index 5daf6fe8e3e9a..e6ae3df0349d2 100644
+--- a/arch/mips/include/asm/local.h
++++ b/arch/mips/include/asm/local.h
+@@ -101,8 +101,8 @@ static __inline__ long local_cmpxchg(local_t *l, long old, long new)
+ 
+ static __inline__ bool local_try_cmpxchg(local_t *l, long *old, long new)
+ {
+-	typeof(l->a.counter) *__old = (typeof(l->a.counter) *) old;
+-	return try_cmpxchg_local(&l->a.counter, __old, new);
++	return try_cmpxchg_local(&l->a.counter,
++				 (typeof(l->a.counter) *) old, new);
+ }
+ 
+ #define local_xchg(l, n) (atomic_long_xchg((&(l)->a), (n)))
+diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
+index a3225912c862d..5387ed0a51862 100644
+--- a/arch/mips/kernel/process.c
++++ b/arch/mips/kernel/process.c
+@@ -750,9 +750,9 @@ static void raise_backtrace(cpumask_t *mask)
+ 	}
+ }
+ 
+-void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self)
++void arch_trigger_cpumask_backtrace(const cpumask_t *mask, int exclude_cpu)
+ {
+-	nmi_trigger_cpumask_backtrace(mask, exclude_self, raise_backtrace);
++	nmi_trigger_cpumask_backtrace(mask, exclude_cpu, raise_backtrace);
+ }
+ 
+ int mips_get_process_fp_mode(struct task_struct *task)
+diff --git a/arch/parisc/include/asm/runway.h b/arch/parisc/include/asm/runway.h
+index 5cf061376ddb1..2837f0223d6d3 100644
+--- a/arch/parisc/include/asm/runway.h
++++ b/arch/parisc/include/asm/runway.h
+@@ -2,9 +2,6 @@
+ #ifndef ASM_PARISC_RUNWAY_H
+ #define ASM_PARISC_RUNWAY_H
+ 
+-/* declared in arch/parisc/kernel/setup.c */
+-extern struct proc_dir_entry * proc_runway_root;
+-
+ #define RUNWAY_STATUS	0x10
+ #define RUNWAY_DEBUG	0x40
+ 
+diff --git a/arch/parisc/kernel/processor.c b/arch/parisc/kernel/processor.c
+index 762289b9984ea..a0e2d37c5b3b5 100644
+--- a/arch/parisc/kernel/processor.c
++++ b/arch/parisc/kernel/processor.c
+@@ -378,10 +378,18 @@ int
+ show_cpuinfo (struct seq_file *m, void *v)
+ {
+ 	unsigned long cpu;
++	char cpu_name[60], *p;
++
++	/* strip PA path from CPU name to not confuse lscpu */
++	strlcpy(cpu_name, per_cpu(cpu_data, 0).dev->name, sizeof(cpu_name));
++	p = strrchr(cpu_name, '[');
++	if (p)
++		*(--p) = 0;
+ 
+ 	for_each_online_cpu(cpu) {
+-		const struct cpuinfo_parisc *cpuinfo = &per_cpu(cpu_data, cpu);
+ #ifdef CONFIG_SMP
++		const struct cpuinfo_parisc *cpuinfo = &per_cpu(cpu_data, cpu);
++
+ 		if (0 == cpuinfo->hpa)
+ 			continue;
+ #endif
+@@ -426,8 +434,7 @@ show_cpuinfo (struct seq_file *m, void *v)
+ 
+ 		seq_printf(m, "model\t\t: %s - %s\n",
+ 				 boot_cpu_data.pdc.sys_model_name,
+-				 cpuinfo->dev ?
+-				 cpuinfo->dev->name : "Unknown");
++				 cpu_name);
+ 
+ 		seq_printf(m, "hversion\t: 0x%08x\n"
+ 			        "sversion\t: 0x%08x\n",
+diff --git a/arch/powerpc/include/asm/ftrace.h b/arch/powerpc/include/asm/ftrace.h
+index 91c049d51d0e1..2edc6269b1a35 100644
+--- a/arch/powerpc/include/asm/ftrace.h
++++ b/arch/powerpc/include/asm/ftrace.h
+@@ -12,7 +12,7 @@
+ 
+ /* Ignore unused weak functions which will have larger offsets */
+ #ifdef CONFIG_MPROFILE_KERNEL
+-#define FTRACE_MCOUNT_MAX_OFFSET	12
++#define FTRACE_MCOUNT_MAX_OFFSET	16
+ #elif defined(CONFIG_PPC32)
+ #define FTRACE_MCOUNT_MAX_OFFSET	8
+ #endif
+diff --git a/arch/powerpc/include/asm/irq.h b/arch/powerpc/include/asm/irq.h
+index f257cacb49a9c..ba1a5974e7143 100644
+--- a/arch/powerpc/include/asm/irq.h
++++ b/arch/powerpc/include/asm/irq.h
+@@ -55,7 +55,7 @@ int irq_choose_cpu(const struct cpumask *mask);
+ 
+ #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_NMI_IPI)
+ extern void arch_trigger_cpumask_backtrace(const cpumask_t *mask,
+-					   bool exclude_self);
++					   int exclude_cpu);
+ #define arch_trigger_cpumask_backtrace arch_trigger_cpumask_backtrace
+ #endif
+ 
+diff --git a/arch/powerpc/include/asm/lppaca.h b/arch/powerpc/include/asm/lppaca.h
+index 34d44cb17c874..ee1488d38fdc1 100644
+--- a/arch/powerpc/include/asm/lppaca.h
++++ b/arch/powerpc/include/asm/lppaca.h
+@@ -45,6 +45,7 @@
+ #include <asm/types.h>
+ #include <asm/mmu.h>
+ #include <asm/firmware.h>
++#include <asm/paca.h>
+ 
+ /*
+  * The lppaca is the "virtual processor area" registered with the hypervisor,
+@@ -127,13 +128,23 @@ struct lppaca {
+  */
+ #define LPPACA_OLD_SHARED_PROC		2
+ 
+-static inline bool lppaca_shared_proc(struct lppaca *l)
++#ifdef CONFIG_PPC_PSERIES
++/*
++ * All CPUs should have the same shared proc value, so directly access the PACA
++ * to avoid false positives from DEBUG_PREEMPT.
++ */
++static inline bool lppaca_shared_proc(void)
+ {
++	struct lppaca *l = local_paca->lppaca_ptr;
++
+ 	if (!firmware_has_feature(FW_FEATURE_SPLPAR))
+ 		return false;
+ 	return !!(l->__old_status & LPPACA_OLD_SHARED_PROC);
+ }
+ 
++#define get_lppaca()	(get_paca()->lppaca_ptr)
++#endif
++
+ /*
+  * SLB shadow buffer structure as defined in the PAPR.  The save_area
+  * contains adjacent ESID and VSID pairs for each shadowed SLB.  The
+diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h
+index cb325938766a5..e667d455ecb41 100644
+--- a/arch/powerpc/include/asm/paca.h
++++ b/arch/powerpc/include/asm/paca.h
+@@ -15,7 +15,6 @@
+ #include <linux/cache.h>
+ #include <linux/string.h>
+ #include <asm/types.h>
+-#include <asm/lppaca.h>
+ #include <asm/mmu.h>
+ #include <asm/page.h>
+ #ifdef CONFIG_PPC_BOOK3E_64
+@@ -47,14 +46,11 @@ extern unsigned int debug_smp_processor_id(void); /* from linux/smp.h */
+ #define get_paca()	local_paca
+ #endif
+ 
+-#ifdef CONFIG_PPC_PSERIES
+-#define get_lppaca()	(get_paca()->lppaca_ptr)
+-#endif
+-
+ #define get_slb_shadow()	(get_paca()->slb_shadow_ptr)
+ 
+ struct task_struct;
+ struct rtas_args;
++struct lppaca;
+ 
+ /*
+  * Defines the layout of the paca.
+diff --git a/arch/powerpc/include/asm/paravirt.h b/arch/powerpc/include/asm/paravirt.h
+index f5ba1a3c41f8e..e08513d731193 100644
+--- a/arch/powerpc/include/asm/paravirt.h
++++ b/arch/powerpc/include/asm/paravirt.h
+@@ -6,6 +6,7 @@
+ #include <asm/smp.h>
+ #ifdef CONFIG_PPC64
+ #include <asm/paca.h>
++#include <asm/lppaca.h>
+ #include <asm/hvcall.h>
+ #endif
+ 
+diff --git a/arch/powerpc/include/asm/plpar_wrappers.h b/arch/powerpc/include/asm/plpar_wrappers.h
+index 8239c0af5eb2b..fe3d0ea0058ac 100644
+--- a/arch/powerpc/include/asm/plpar_wrappers.h
++++ b/arch/powerpc/include/asm/plpar_wrappers.h
+@@ -9,6 +9,7 @@
+ 
+ #include <asm/hvcall.h>
+ #include <asm/paca.h>
++#include <asm/lppaca.h>
+ #include <asm/page.h>
+ 
+ static inline long poll_pending(void)
+diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c
+index ea0a073abd969..3ff2da7b120b5 100644
+--- a/arch/powerpc/kernel/fadump.c
++++ b/arch/powerpc/kernel/fadump.c
+@@ -654,6 +654,7 @@ int __init fadump_reserve_mem(void)
+ 	return ret;
+ error_out:
+ 	fw_dump.fadump_enabled = 0;
++	fw_dump.reserve_dump_area_size = 0;
+ 	return 0;
+ }
+ 
+diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
+index c52449ae6936a..14251bc5219eb 100644
+--- a/arch/powerpc/kernel/iommu.c
++++ b/arch/powerpc/kernel/iommu.c
+@@ -172,17 +172,28 @@ static int fail_iommu_bus_notify(struct notifier_block *nb,
+ 	return 0;
+ }
+ 
+-static struct notifier_block fail_iommu_bus_notifier = {
++/*
++ * PCI and VIO buses need separate notifier_block structs, since they're linked
++ * list nodes.  Sharing a notifier_block would mean that any notifiers later
++ * registered for PCI buses would also get called by VIO buses and vice versa.
++ */
++static struct notifier_block fail_iommu_pci_bus_notifier = {
+ 	.notifier_call = fail_iommu_bus_notify
+ };
+ 
++#ifdef CONFIG_IBMVIO
++static struct notifier_block fail_iommu_vio_bus_notifier = {
++	.notifier_call = fail_iommu_bus_notify
++};
++#endif
++
+ static int __init fail_iommu_setup(void)
+ {
+ #ifdef CONFIG_PCI
+-	bus_register_notifier(&pci_bus_type, &fail_iommu_bus_notifier);
++	bus_register_notifier(&pci_bus_type, &fail_iommu_pci_bus_notifier);
+ #endif
+ #ifdef CONFIG_IBMVIO
+-	bus_register_notifier(&vio_bus_type, &fail_iommu_bus_notifier);
++	bus_register_notifier(&vio_bus_type, &fail_iommu_vio_bus_notifier);
+ #endif
+ 
+ 	return 0;
+diff --git a/arch/powerpc/kernel/stacktrace.c b/arch/powerpc/kernel/stacktrace.c
+index 5de8597eaab8d..b15f15dcacb5c 100644
+--- a/arch/powerpc/kernel/stacktrace.c
++++ b/arch/powerpc/kernel/stacktrace.c
+@@ -221,8 +221,8 @@ static void raise_backtrace_ipi(cpumask_t *mask)
+ 	}
+ }
+ 
+-void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self)
++void arch_trigger_cpumask_backtrace(const cpumask_t *mask, int exclude_cpu)
+ {
+-	nmi_trigger_cpumask_backtrace(mask, exclude_self, raise_backtrace_ipi);
++	nmi_trigger_cpumask_backtrace(mask, exclude_cpu, raise_backtrace_ipi);
+ }
+ #endif /* defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_NMI_IPI) */
+diff --git a/arch/powerpc/kernel/watchdog.c b/arch/powerpc/kernel/watchdog.c
+index edb2dd1f53ebc..8c464a5d82469 100644
+--- a/arch/powerpc/kernel/watchdog.c
++++ b/arch/powerpc/kernel/watchdog.c
+@@ -245,7 +245,7 @@ static void watchdog_smp_panic(int cpu)
+ 			__cpumask_clear_cpu(c, &wd_smp_cpus_ipi);
+ 		}
+ 	} else {
+-		trigger_allbutself_cpu_backtrace();
++		trigger_allbutcpu_cpu_backtrace(cpu);
+ 		cpumask_clear(&wd_smp_cpus_ipi);
+ 	}
+ 
+@@ -416,7 +416,7 @@ DEFINE_INTERRUPT_HANDLER_NMI(soft_nmi_interrupt)
+ 		xchg(&__wd_nmi_output, 1); // see wd_lockup_ipi
+ 
+ 		if (sysctl_hardlockup_all_cpu_backtrace)
+-			trigger_allbutself_cpu_backtrace();
++			trigger_allbutcpu_cpu_backtrace(cpu);
+ 
+ 		if (hardlockup_panic)
+ 			nmi_panic(regs, "Hard LOCKUP");
+diff --git a/arch/powerpc/kvm/book3s_hv_ras.c b/arch/powerpc/kvm/book3s_hv_ras.c
+index ccfd969656306..82be6d87514b7 100644
+--- a/arch/powerpc/kvm/book3s_hv_ras.c
++++ b/arch/powerpc/kvm/book3s_hv_ras.c
+@@ -9,6 +9,7 @@
+ #include <linux/kvm.h>
+ #include <linux/kvm_host.h>
+ #include <linux/kernel.h>
++#include <asm/lppaca.h>
+ #include <asm/opal.h>
+ #include <asm/mce.h>
+ #include <asm/machdep.h>
+diff --git a/arch/powerpc/mm/book3s64/radix_tlb.c b/arch/powerpc/mm/book3s64/radix_tlb.c
+index 0bd4866d98241..9383606c5e6e0 100644
+--- a/arch/powerpc/mm/book3s64/radix_tlb.c
++++ b/arch/powerpc/mm/book3s64/radix_tlb.c
+@@ -127,21 +127,6 @@ static __always_inline void __tlbie_pid(unsigned long pid, unsigned long ric)
+ 	trace_tlbie(0, 0, rb, rs, ric, prs, r);
+ }
+ 
+-static __always_inline void __tlbie_pid_lpid(unsigned long pid,
+-					     unsigned long lpid,
+-					     unsigned long ric)
+-{
+-	unsigned long rb, rs, prs, r;
+-
+-	rb = PPC_BIT(53); /* IS = 1 */
+-	rs = (pid << PPC_BITLSHIFT(31)) | (lpid & ~(PPC_BITMASK(0, 31)));
+-	prs = 1; /* process scoped */
+-	r = 1;   /* radix format */
+-
+-	asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
+-		     : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
+-	trace_tlbie(0, 0, rb, rs, ric, prs, r);
+-}
+ static __always_inline void __tlbie_lpid(unsigned long lpid, unsigned long ric)
+ {
+ 	unsigned long rb,rs,prs,r;
+@@ -202,23 +187,6 @@ static __always_inline void __tlbie_va(unsigned long va, unsigned long pid,
+ 	trace_tlbie(0, 0, rb, rs, ric, prs, r);
+ }
+ 
+-static __always_inline void __tlbie_va_lpid(unsigned long va, unsigned long pid,
+-					    unsigned long lpid,
+-					    unsigned long ap, unsigned long ric)
+-{
+-	unsigned long rb, rs, prs, r;
+-
+-	rb = va & ~(PPC_BITMASK(52, 63));
+-	rb |= ap << PPC_BITLSHIFT(58);
+-	rs = (pid << PPC_BITLSHIFT(31)) | (lpid & ~(PPC_BITMASK(0, 31)));
+-	prs = 1; /* process scoped */
+-	r = 1;   /* radix format */
+-
+-	asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
+-		     : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
+-	trace_tlbie(0, 0, rb, rs, ric, prs, r);
+-}
+-
+ static __always_inline void __tlbie_lpid_va(unsigned long va, unsigned long lpid,
+ 					    unsigned long ap, unsigned long ric)
+ {
+@@ -264,22 +232,6 @@ static inline void fixup_tlbie_va_range(unsigned long va, unsigned long pid,
+ 	}
+ }
+ 
+-static inline void fixup_tlbie_va_range_lpid(unsigned long va,
+-					     unsigned long pid,
+-					     unsigned long lpid,
+-					     unsigned long ap)
+-{
+-	if (cpu_has_feature(CPU_FTR_P9_TLBIE_ERAT_BUG)) {
+-		asm volatile("ptesync" : : : "memory");
+-		__tlbie_pid_lpid(0, lpid, RIC_FLUSH_TLB);
+-	}
+-
+-	if (cpu_has_feature(CPU_FTR_P9_TLBIE_STQ_BUG)) {
+-		asm volatile("ptesync" : : : "memory");
+-		__tlbie_va_lpid(va, pid, lpid, ap, RIC_FLUSH_TLB);
+-	}
+-}
+-
+ static inline void fixup_tlbie_pid(unsigned long pid)
+ {
+ 	/*
+@@ -299,26 +251,6 @@ static inline void fixup_tlbie_pid(unsigned long pid)
+ 	}
+ }
+ 
+-static inline void fixup_tlbie_pid_lpid(unsigned long pid, unsigned long lpid)
+-{
+-	/*
+-	 * We can use any address for the invalidation, pick one which is
+-	 * probably unused as an optimisation.
+-	 */
+-	unsigned long va = ((1UL << 52) - 1);
+-
+-	if (cpu_has_feature(CPU_FTR_P9_TLBIE_ERAT_BUG)) {
+-		asm volatile("ptesync" : : : "memory");
+-		__tlbie_pid_lpid(0, lpid, RIC_FLUSH_TLB);
+-	}
+-
+-	if (cpu_has_feature(CPU_FTR_P9_TLBIE_STQ_BUG)) {
+-		asm volatile("ptesync" : : : "memory");
+-		__tlbie_va_lpid(va, pid, lpid, mmu_get_ap(MMU_PAGE_64K),
+-				RIC_FLUSH_TLB);
+-	}
+-}
+-
+ static inline void fixup_tlbie_lpid_va(unsigned long va, unsigned long lpid,
+ 				       unsigned long ap)
+ {
+@@ -416,31 +348,6 @@ static inline void _tlbie_pid(unsigned long pid, unsigned long ric)
+ 	asm volatile("eieio; tlbsync; ptesync": : :"memory");
+ }
+ 
+-static inline void _tlbie_pid_lpid(unsigned long pid, unsigned long lpid,
+-				   unsigned long ric)
+-{
+-	asm volatile("ptesync" : : : "memory");
+-
+-	/*
+-	 * Workaround the fact that the "ric" argument to __tlbie_pid
+-	 * must be a compile-time contraint to match the "i" constraint
+-	 * in the asm statement.
+-	 */
+-	switch (ric) {
+-	case RIC_FLUSH_TLB:
+-		__tlbie_pid_lpid(pid, lpid, RIC_FLUSH_TLB);
+-		fixup_tlbie_pid_lpid(pid, lpid);
+-		break;
+-	case RIC_FLUSH_PWC:
+-		__tlbie_pid_lpid(pid, lpid, RIC_FLUSH_PWC);
+-		break;
+-	case RIC_FLUSH_ALL:
+-	default:
+-		__tlbie_pid_lpid(pid, lpid, RIC_FLUSH_ALL);
+-		fixup_tlbie_pid_lpid(pid, lpid);
+-	}
+-	asm volatile("eieio; tlbsync; ptesync" : : : "memory");
+-}
+ struct tlbiel_pid {
+ 	unsigned long pid;
+ 	unsigned long ric;
+@@ -566,20 +473,6 @@ static inline void __tlbie_va_range(unsigned long start, unsigned long end,
+ 	fixup_tlbie_va_range(addr - page_size, pid, ap);
+ }
+ 
+-static inline void __tlbie_va_range_lpid(unsigned long start, unsigned long end,
+-					 unsigned long pid, unsigned long lpid,
+-					 unsigned long page_size,
+-					 unsigned long psize)
+-{
+-	unsigned long addr;
+-	unsigned long ap = mmu_get_ap(psize);
+-
+-	for (addr = start; addr < end; addr += page_size)
+-		__tlbie_va_lpid(addr, pid, lpid, ap, RIC_FLUSH_TLB);
+-
+-	fixup_tlbie_va_range_lpid(addr - page_size, pid, lpid, ap);
+-}
+-
+ static __always_inline void _tlbie_va(unsigned long va, unsigned long pid,
+ 				      unsigned long psize, unsigned long ric)
+ {
+@@ -660,18 +553,6 @@ static inline void _tlbie_va_range(unsigned long start, unsigned long end,
+ 	asm volatile("eieio; tlbsync; ptesync": : :"memory");
+ }
+ 
+-static inline void _tlbie_va_range_lpid(unsigned long start, unsigned long end,
+-					unsigned long pid, unsigned long lpid,
+-					unsigned long page_size,
+-					unsigned long psize, bool also_pwc)
+-{
+-	asm volatile("ptesync" : : : "memory");
+-	if (also_pwc)
+-		__tlbie_pid_lpid(pid, lpid, RIC_FLUSH_PWC);
+-	__tlbie_va_range_lpid(start, end, pid, lpid, page_size, psize);
+-	asm volatile("eieio; tlbsync; ptesync" : : : "memory");
+-}
+-
+ static inline void _tlbiel_va_range_multicast(struct mm_struct *mm,
+ 				unsigned long start, unsigned long end,
+ 				unsigned long pid, unsigned long page_size,
+@@ -1486,6 +1367,127 @@ void radix__flush_tlb_all(void)
+ }
+ 
+ #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
++static __always_inline void __tlbie_pid_lpid(unsigned long pid,
++					     unsigned long lpid,
++					     unsigned long ric)
++{
++	unsigned long rb, rs, prs, r;
++
++	rb = PPC_BIT(53); /* IS = 1 */
++	rs = (pid << PPC_BITLSHIFT(31)) | (lpid & ~(PPC_BITMASK(0, 31)));
++	prs = 1; /* process scoped */
++	r = 1;   /* radix format */
++
++	asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
++		     : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
++	trace_tlbie(0, 0, rb, rs, ric, prs, r);
++}
++
++static __always_inline void __tlbie_va_lpid(unsigned long va, unsigned long pid,
++					    unsigned long lpid,
++					    unsigned long ap, unsigned long ric)
++{
++	unsigned long rb, rs, prs, r;
++
++	rb = va & ~(PPC_BITMASK(52, 63));
++	rb |= ap << PPC_BITLSHIFT(58);
++	rs = (pid << PPC_BITLSHIFT(31)) | (lpid & ~(PPC_BITMASK(0, 31)));
++	prs = 1; /* process scoped */
++	r = 1;   /* radix format */
++
++	asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
++		     : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
++	trace_tlbie(0, 0, rb, rs, ric, prs, r);
++}
++
++static inline void fixup_tlbie_pid_lpid(unsigned long pid, unsigned long lpid)
++{
++	/*
++	 * We can use any address for the invalidation, pick one which is
++	 * probably unused as an optimisation.
++	 */
++	unsigned long va = ((1UL << 52) - 1);
++
++	if (cpu_has_feature(CPU_FTR_P9_TLBIE_ERAT_BUG)) {
++		asm volatile("ptesync" : : : "memory");
++		__tlbie_pid_lpid(0, lpid, RIC_FLUSH_TLB);
++	}
++
++	if (cpu_has_feature(CPU_FTR_P9_TLBIE_STQ_BUG)) {
++		asm volatile("ptesync" : : : "memory");
++		__tlbie_va_lpid(va, pid, lpid, mmu_get_ap(MMU_PAGE_64K),
++				RIC_FLUSH_TLB);
++	}
++}
++
++static inline void _tlbie_pid_lpid(unsigned long pid, unsigned long lpid,
++				   unsigned long ric)
++{
++	asm volatile("ptesync" : : : "memory");
++
++	/*
++	 * Workaround the fact that the "ric" argument to __tlbie_pid
++	 * must be a compile-time contraint to match the "i" constraint
++	 * in the asm statement.
++	 */
++	switch (ric) {
++	case RIC_FLUSH_TLB:
++		__tlbie_pid_lpid(pid, lpid, RIC_FLUSH_TLB);
++		fixup_tlbie_pid_lpid(pid, lpid);
++		break;
++	case RIC_FLUSH_PWC:
++		__tlbie_pid_lpid(pid, lpid, RIC_FLUSH_PWC);
++		break;
++	case RIC_FLUSH_ALL:
++	default:
++		__tlbie_pid_lpid(pid, lpid, RIC_FLUSH_ALL);
++		fixup_tlbie_pid_lpid(pid, lpid);
++	}
++	asm volatile("eieio; tlbsync; ptesync" : : : "memory");
++}
++
++static inline void fixup_tlbie_va_range_lpid(unsigned long va,
++					     unsigned long pid,
++					     unsigned long lpid,
++					     unsigned long ap)
++{
++	if (cpu_has_feature(CPU_FTR_P9_TLBIE_ERAT_BUG)) {
++		asm volatile("ptesync" : : : "memory");
++		__tlbie_pid_lpid(0, lpid, RIC_FLUSH_TLB);
++	}
++
++	if (cpu_has_feature(CPU_FTR_P9_TLBIE_STQ_BUG)) {
++		asm volatile("ptesync" : : : "memory");
++		__tlbie_va_lpid(va, pid, lpid, ap, RIC_FLUSH_TLB);
++	}
++}
++
++static inline void __tlbie_va_range_lpid(unsigned long start, unsigned long end,
++					 unsigned long pid, unsigned long lpid,
++					 unsigned long page_size,
++					 unsigned long psize)
++{
++	unsigned long addr;
++	unsigned long ap = mmu_get_ap(psize);
++
++	for (addr = start; addr < end; addr += page_size)
++		__tlbie_va_lpid(addr, pid, lpid, ap, RIC_FLUSH_TLB);
++
++	fixup_tlbie_va_range_lpid(addr - page_size, pid, lpid, ap);
++}
++
++static inline void _tlbie_va_range_lpid(unsigned long start, unsigned long end,
++					unsigned long pid, unsigned long lpid,
++					unsigned long page_size,
++					unsigned long psize, bool also_pwc)
++{
++	asm volatile("ptesync" : : : "memory");
++	if (also_pwc)
++		__tlbie_pid_lpid(pid, lpid, RIC_FLUSH_PWC);
++	__tlbie_va_range_lpid(start, end, pid, lpid, page_size, psize);
++	asm volatile("eieio; tlbsync; ptesync" : : : "memory");
++}
++
+ /*
+  * Performs process-scoped invalidations for a given LPID
+  * as part of H_RPT_INVALIDATE hcall.
+diff --git a/arch/powerpc/mm/book3s64/slb.c b/arch/powerpc/mm/book3s64/slb.c
+index 6956f637a38c1..f2708c8629a52 100644
+--- a/arch/powerpc/mm/book3s64/slb.c
++++ b/arch/powerpc/mm/book3s64/slb.c
+@@ -13,6 +13,7 @@
+ #include <asm/mmu.h>
+ #include <asm/mmu_context.h>
+ #include <asm/paca.h>
++#include <asm/lppaca.h>
+ #include <asm/ppc-opcode.h>
+ #include <asm/cputable.h>
+ #include <asm/cacheflush.h>
+diff --git a/arch/powerpc/perf/core-fsl-emb.c b/arch/powerpc/perf/core-fsl-emb.c
+index ee721f420a7ba..1a53ab08447cb 100644
+--- a/arch/powerpc/perf/core-fsl-emb.c
++++ b/arch/powerpc/perf/core-fsl-emb.c
+@@ -645,7 +645,6 @@ static void perf_event_interrupt(struct pt_regs *regs)
+ 	struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
+ 	struct perf_event *event;
+ 	unsigned long val;
+-	int found = 0;
+ 
+ 	for (i = 0; i < ppmu->n_counter; ++i) {
+ 		event = cpuhw->event[i];
+@@ -654,7 +653,6 @@ static void perf_event_interrupt(struct pt_regs *regs)
+ 		if ((int)val < 0) {
+ 			if (event) {
+ 				/* event has overflowed */
+-				found = 1;
+ 				record_and_restart(event, val, regs);
+ 			} else {
+ 				/*
+@@ -672,11 +670,13 @@ static void perf_event_interrupt(struct pt_regs *regs)
+ 	isync();
+ }
+ 
+-void hw_perf_event_setup(int cpu)
++static int fsl_emb_pmu_prepare_cpu(unsigned int cpu)
+ {
+ 	struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu);
+ 
+ 	memset(cpuhw, 0, sizeof(*cpuhw));
++
++	return 0;
+ }
+ 
+ int register_fsl_emb_pmu(struct fsl_emb_pmu *pmu)
+@@ -689,6 +689,8 @@ int register_fsl_emb_pmu(struct fsl_emb_pmu *pmu)
+ 		pmu->name);
+ 
+ 	perf_pmu_register(&fsl_emb_pmu, "cpu", PERF_TYPE_RAW);
++	cpuhp_setup_state(CPUHP_PERF_POWER, "perf/powerpc:prepare",
++			  fsl_emb_pmu_prepare_cpu, NULL);
+ 
+ 	return 0;
+ }
+diff --git a/arch/powerpc/platforms/pseries/hvCall.S b/arch/powerpc/platforms/pseries/hvCall.S
+index 35254ac7af5ee..ca0674b0b683e 100644
+--- a/arch/powerpc/platforms/pseries/hvCall.S
++++ b/arch/powerpc/platforms/pseries/hvCall.S
+@@ -91,6 +91,7 @@ BEGIN_FTR_SECTION;						\
+ 	b	1f;						\
+ END_FTR_SECTION(0, 1);						\
+ 	LOAD_REG_ADDR(r12, hcall_tracepoint_refcount) ;		\
++	ld	r12,0(r12);					\
+ 	std	r12,32(r1);					\
+ 	cmpdi	r12,0;						\
+ 	bne-	LABEL;						\
+diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
+index 2eab323f69706..cb2f1211f7ebf 100644
+--- a/arch/powerpc/platforms/pseries/lpar.c
++++ b/arch/powerpc/platforms/pseries/lpar.c
+@@ -639,16 +639,8 @@ static const struct proc_ops vcpudispatch_stats_freq_proc_ops = {
+ 
+ static int __init vcpudispatch_stats_procfs_init(void)
+ {
+-	/*
+-	 * Avoid smp_processor_id while preemptible. All CPUs should have
+-	 * the same value for lppaca_shared_proc.
+-	 */
+-	preempt_disable();
+-	if (!lppaca_shared_proc(get_lppaca())) {
+-		preempt_enable();
++	if (!lppaca_shared_proc())
+ 		return 0;
+-	}
+-	preempt_enable();
+ 
+ 	if (!proc_create("powerpc/vcpudispatch_stats", 0600, NULL,
+ 					&vcpudispatch_stats_proc_ops))
+diff --git a/arch/powerpc/platforms/pseries/lparcfg.c b/arch/powerpc/platforms/pseries/lparcfg.c
+index 8acc705095209..1c151d77e74b3 100644
+--- a/arch/powerpc/platforms/pseries/lparcfg.c
++++ b/arch/powerpc/platforms/pseries/lparcfg.c
+@@ -206,7 +206,7 @@ static void parse_ppp_data(struct seq_file *m)
+ 	           ppp_data.active_system_procs);
+ 
+ 	/* pool related entries are appropriate for shared configs */
+-	if (lppaca_shared_proc(get_lppaca())) {
++	if (lppaca_shared_proc()) {
+ 		unsigned long pool_idle_time, pool_procs;
+ 
+ 		seq_printf(m, "pool=%d\n", ppp_data.pool_num);
+@@ -560,7 +560,7 @@ static int pseries_lparcfg_data(struct seq_file *m, void *v)
+ 		   partition_potential_processors);
+ 
+ 	seq_printf(m, "shared_processor_mode=%d\n",
+-		   lppaca_shared_proc(get_lppaca()));
++		   lppaca_shared_proc());
+ 
+ #ifdef CONFIG_PPC_64S_HASH_MMU
+ 	if (!radix_enabled())
+diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
+index e2a57cfa6c837..0ef2a7e014aa1 100644
+--- a/arch/powerpc/platforms/pseries/setup.c
++++ b/arch/powerpc/platforms/pseries/setup.c
+@@ -847,7 +847,7 @@ static void __init pSeries_setup_arch(void)
+ 	if (firmware_has_feature(FW_FEATURE_LPAR)) {
+ 		vpa_init(boot_cpuid);
+ 
+-		if (lppaca_shared_proc(get_lppaca())) {
++		if (lppaca_shared_proc()) {
+ 			static_branch_enable(&shared_processor);
+ 			pv_spinlocks_init();
+ #ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
+diff --git a/arch/powerpc/sysdev/mpc5xxx_clocks.c b/arch/powerpc/sysdev/mpc5xxx_clocks.c
+index c5bf7e1b37804..58cee28e23992 100644
+--- a/arch/powerpc/sysdev/mpc5xxx_clocks.c
++++ b/arch/powerpc/sysdev/mpc5xxx_clocks.c
+@@ -25,8 +25,10 @@ unsigned long mpc5xxx_fwnode_get_bus_frequency(struct fwnode_handle *fwnode)
+ 
+ 	fwnode_for_each_parent_node(fwnode, parent) {
+ 		ret = fwnode_property_read_u32(parent, "bus-frequency", &bus_freq);
+-		if (!ret)
++		if (!ret) {
++			fwnode_handle_put(parent);
+ 			return bus_freq;
++		}
+ 	}
+ 
+ 	return 0;
+diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
+index fae747cc57d2d..97e61a17e936a 100644
+--- a/arch/powerpc/xmon/xmon.c
++++ b/arch/powerpc/xmon/xmon.c
+@@ -58,6 +58,7 @@
+ #ifdef CONFIG_PPC64
+ #include <asm/hvcall.h>
+ #include <asm/paca.h>
++#include <asm/lppaca.h>
+ #endif
+ 
+ #include "nonstdio.h"
+diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
+index bea7b73e895dd..ab099679f808c 100644
+--- a/arch/riscv/Kconfig
++++ b/arch/riscv/Kconfig
+@@ -62,6 +62,7 @@ config RISCV
+ 	select COMMON_CLK
+ 	select CPU_PM if CPU_IDLE || HIBERNATION
+ 	select EDAC_SUPPORT
++	select FRAME_POINTER if PERF_EVENTS || (FUNCTION_TRACER && !DYNAMIC_FTRACE)
+ 	select GENERIC_ARCH_TOPOLOGY
+ 	select GENERIC_ATOMIC64 if !64BIT
+ 	select GENERIC_CLOCKEVENTS_BROADCAST if SMP
+diff --git a/arch/riscv/Makefile b/arch/riscv/Makefile
+index 6ec6d52a41804..1329e060c5482 100644
+--- a/arch/riscv/Makefile
++++ b/arch/riscv/Makefile
+@@ -87,9 +87,6 @@ endif
+ ifeq ($(CONFIG_CMODEL_MEDANY),y)
+ 	KBUILD_CFLAGS += -mcmodel=medany
+ endif
+-ifeq ($(CONFIG_PERF_EVENTS),y)
+-        KBUILD_CFLAGS += -fno-omit-frame-pointer
+-endif
+ 
+ # Avoid generating .eh_frame sections.
+ KBUILD_CFLAGS += -fno-asynchronous-unwind-tables -fno-unwind-tables
+diff --git a/arch/riscv/include/uapi/asm/ptrace.h b/arch/riscv/include/uapi/asm/ptrace.h
+index 283800130614b..575e95bb1bc33 100644
+--- a/arch/riscv/include/uapi/asm/ptrace.h
++++ b/arch/riscv/include/uapi/asm/ptrace.h
+@@ -103,13 +103,18 @@ struct __riscv_v_ext_state {
+ 	 * In signal handler, datap will be set a correct user stack offset
+ 	 * and vector registers will be copied to the address of datap
+ 	 * pointer.
+-	 *
+-	 * In ptrace syscall, datap will be set to zero and the vector
+-	 * registers will be copied to the address right after this
+-	 * structure.
+ 	 */
+ };
+ 
++struct __riscv_v_regset_state {
++	unsigned long vstart;
++	unsigned long vl;
++	unsigned long vtype;
++	unsigned long vcsr;
++	unsigned long vlenb;
++	char vreg[];
++};
++
+ /*
+  * According to spec: The number of bits in a single vector register,
+  * VLEN >= ELEN, which must be a power of 2, and must be no greater than
+diff --git a/arch/riscv/kernel/ptrace.c b/arch/riscv/kernel/ptrace.c
+index 487303e3ef229..2afe460de16a6 100644
+--- a/arch/riscv/kernel/ptrace.c
++++ b/arch/riscv/kernel/ptrace.c
+@@ -25,6 +25,9 @@ enum riscv_regset {
+ #ifdef CONFIG_FPU
+ 	REGSET_F,
+ #endif
++#ifdef CONFIG_RISCV_ISA_V
++	REGSET_V,
++#endif
+ };
+ 
+ static int riscv_gpr_get(struct task_struct *target,
+@@ -81,6 +84,71 @@ static int riscv_fpr_set(struct task_struct *target,
+ }
+ #endif
+ 
++#ifdef CONFIG_RISCV_ISA_V
++static int riscv_vr_get(struct task_struct *target,
++			const struct user_regset *regset,
++			struct membuf to)
++{
++	struct __riscv_v_ext_state *vstate = &target->thread.vstate;
++	struct __riscv_v_regset_state ptrace_vstate;
++
++	if (!riscv_v_vstate_query(task_pt_regs(target)))
++		return -EINVAL;
++
++	/*
++	 * Ensure the vector registers have been saved to the memory before
++	 * copying them to membuf.
++	 */
++	if (target == current)
++		riscv_v_vstate_save(current, task_pt_regs(current));
++
++	ptrace_vstate.vstart = vstate->vstart;
++	ptrace_vstate.vl = vstate->vl;
++	ptrace_vstate.vtype = vstate->vtype;
++	ptrace_vstate.vcsr = vstate->vcsr;
++	ptrace_vstate.vlenb = vstate->vlenb;
++
++	/* Copy vector header from vstate. */
++	membuf_write(&to, &ptrace_vstate, sizeof(struct __riscv_v_regset_state));
++
++	/* Copy all the vector registers from vstate. */
++	return membuf_write(&to, vstate->datap, riscv_v_vsize);
++}
++
++static int riscv_vr_set(struct task_struct *target,
++			const struct user_regset *regset,
++			unsigned int pos, unsigned int count,
++			const void *kbuf, const void __user *ubuf)
++{
++	int ret;
++	struct __riscv_v_ext_state *vstate = &target->thread.vstate;
++	struct __riscv_v_regset_state ptrace_vstate;
++
++	if (!riscv_v_vstate_query(task_pt_regs(target)))
++		return -EINVAL;
++
++	/* Copy rest of the vstate except datap */
++	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ptrace_vstate, 0,
++				 sizeof(struct __riscv_v_regset_state));
++	if (unlikely(ret))
++		return ret;
++
++	if (vstate->vlenb != ptrace_vstate.vlenb)
++		return -EINVAL;
++
++	vstate->vstart = ptrace_vstate.vstart;
++	vstate->vl = ptrace_vstate.vl;
++	vstate->vtype = ptrace_vstate.vtype;
++	vstate->vcsr = ptrace_vstate.vcsr;
++
++	/* Copy all the vector registers. */
++	pos = 0;
++	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, vstate->datap,
++				 0, riscv_v_vsize);
++	return ret;
++}
++#endif
++
+ static const struct user_regset riscv_user_regset[] = {
+ 	[REGSET_X] = {
+ 		.core_note_type = NT_PRSTATUS,
+@@ -100,6 +168,17 @@ static const struct user_regset riscv_user_regset[] = {
+ 		.set = riscv_fpr_set,
+ 	},
+ #endif
++#ifdef CONFIG_RISCV_ISA_V
++	[REGSET_V] = {
++		.core_note_type = NT_RISCV_VECTOR,
++		.align = 16,
++		.n = ((32 * RISCV_MAX_VLENB) +
++		      sizeof(struct __riscv_v_regset_state)) / sizeof(__u32),
++		.size = sizeof(__u32),
++		.regset_get = riscv_vr_get,
++		.set = riscv_vr_set,
++	},
++#endif
+ };
+ 
+ static const struct user_regset_view riscv_user_native_view = {
+diff --git a/arch/riscv/mm/kasan_init.c b/arch/riscv/mm/kasan_init.c
+index a01bc15dce244..5e39dcf23fdbc 100644
+--- a/arch/riscv/mm/kasan_init.c
++++ b/arch/riscv/mm/kasan_init.c
+@@ -22,9 +22,9 @@
+  * region is not and then we have to go down to the PUD level.
+  */
+ 
+-pgd_t tmp_pg_dir[PTRS_PER_PGD] __page_aligned_bss;
+-p4d_t tmp_p4d[PTRS_PER_P4D] __page_aligned_bss;
+-pud_t tmp_pud[PTRS_PER_PUD] __page_aligned_bss;
++static pgd_t tmp_pg_dir[PTRS_PER_PGD] __page_aligned_bss;
++static p4d_t tmp_p4d[PTRS_PER_P4D] __page_aligned_bss;
++static pud_t tmp_pud[PTRS_PER_PUD] __page_aligned_bss;
+ 
+ static void __init kasan_populate_pte(pmd_t *pmd, unsigned long vaddr, unsigned long end)
+ {
+@@ -438,7 +438,7 @@ static void __init kasan_shallow_populate(void *start, void *end)
+ 	kasan_shallow_populate_pgd(vaddr, vend);
+ }
+ 
+-static void create_tmp_mapping(void)
++static void __init create_tmp_mapping(void)
+ {
+ 	void *ptr;
+ 	p4d_t *base_p4d;
+diff --git a/arch/s390/crypto/paes_s390.c b/arch/s390/crypto/paes_s390.c
+index 38349150c96e8..8b541e44151d4 100644
+--- a/arch/s390/crypto/paes_s390.c
++++ b/arch/s390/crypto/paes_s390.c
+@@ -35,7 +35,7 @@
+  * and padding is also possible, the limits need to be generous.
+  */
+ #define PAES_MIN_KEYSIZE 16
+-#define PAES_MAX_KEYSIZE 320
++#define PAES_MAX_KEYSIZE MAXEP11AESKEYBLOBSIZE
+ 
+ static u8 *ctrblk;
+ static DEFINE_MUTEX(ctrblk_lock);
+diff --git a/arch/s390/include/uapi/asm/pkey.h b/arch/s390/include/uapi/asm/pkey.h
+index 5faf0a1d2c167..5ad76471e73ff 100644
+--- a/arch/s390/include/uapi/asm/pkey.h
++++ b/arch/s390/include/uapi/asm/pkey.h
+@@ -26,7 +26,7 @@
+ #define MAXCLRKEYSIZE	32	   /* a clear key value may be up to 32 bytes */
+ #define MAXAESCIPHERKEYSIZE 136  /* our aes cipher keys have always 136 bytes */
+ #define MINEP11AESKEYBLOBSIZE 256  /* min EP11 AES key blob size  */
+-#define MAXEP11AESKEYBLOBSIZE 320  /* max EP11 AES key blob size */
++#define MAXEP11AESKEYBLOBSIZE 336  /* max EP11 AES key blob size */
+ 
+ /* Minimum size of a key blob */
+ #define MINKEYBLOBSIZE	SECKEYBLOBSIZE
+diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
+index 85a00d97a3143..dfcb2b563e2bd 100644
+--- a/arch/s390/kernel/ipl.c
++++ b/arch/s390/kernel/ipl.c
+@@ -640,6 +640,8 @@ static struct attribute_group ipl_ccw_attr_group_lpar = {
+ 
+ static struct attribute *ipl_unknown_attrs[] = {
+ 	&sys_ipl_type_attr.attr,
++	&sys_ipl_secure_attr.attr,
++	&sys_ipl_has_secure_attr.attr,
+ 	NULL,
+ };
+ 
+diff --git a/arch/sparc/include/asm/irq_64.h b/arch/sparc/include/asm/irq_64.h
+index b436029f1ced2..8c4c0c87f9980 100644
+--- a/arch/sparc/include/asm/irq_64.h
++++ b/arch/sparc/include/asm/irq_64.h
+@@ -87,7 +87,7 @@ static inline unsigned long get_softint(void)
+ }
+ 
+ void arch_trigger_cpumask_backtrace(const struct cpumask *mask,
+-				    bool exclude_self);
++				    int exclude_cpu);
+ #define arch_trigger_cpumask_backtrace arch_trigger_cpumask_backtrace
+ 
+ extern void *hardirq_stack[NR_CPUS];
+diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
+index b51d8fb0ecdc2..1ea3f37fa9851 100644
+--- a/arch/sparc/kernel/process_64.c
++++ b/arch/sparc/kernel/process_64.c
+@@ -236,7 +236,7 @@ static void __global_reg_poll(struct global_reg_snapshot *gp)
+ 	}
+ }
+ 
+-void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self)
++void arch_trigger_cpumask_backtrace(const cpumask_t *mask, int exclude_cpu)
+ {
+ 	struct thread_info *tp = current_thread_info();
+ 	struct pt_regs *regs = get_irq_regs();
+@@ -252,7 +252,7 @@ void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self)
+ 
+ 	memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot));
+ 
+-	if (cpumask_test_cpu(this_cpu, mask) && !exclude_self)
++	if (cpumask_test_cpu(this_cpu, mask) && this_cpu != exclude_cpu)
+ 		__global_reg_self(tp, regs, this_cpu);
+ 
+ 	smp_fetch_global_regs();
+@@ -260,7 +260,7 @@ void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self)
+ 	for_each_cpu(cpu, mask) {
+ 		struct global_reg_snapshot *gp;
+ 
+-		if (exclude_self && cpu == this_cpu)
++		if (cpu == exclude_cpu)
+ 			continue;
+ 
+ 		gp = &global_cpu_snapshot[cpu].reg;
+diff --git a/arch/um/configs/i386_defconfig b/arch/um/configs/i386_defconfig
+index 630be793759e2..e543cbac87925 100644
+--- a/arch/um/configs/i386_defconfig
++++ b/arch/um/configs/i386_defconfig
+@@ -34,6 +34,7 @@ CONFIG_TTY_CHAN=y
+ CONFIG_XTERM_CHAN=y
+ CONFIG_CON_CHAN="pts"
+ CONFIG_SSL_CHAN="pts"
++CONFIG_SOUND=m
+ CONFIG_UML_SOUND=m
+ CONFIG_DEVTMPFS=y
+ CONFIG_DEVTMPFS_MOUNT=y
+diff --git a/arch/um/configs/x86_64_defconfig b/arch/um/configs/x86_64_defconfig
+index 8540d33702726..939cb12318cae 100644
+--- a/arch/um/configs/x86_64_defconfig
++++ b/arch/um/configs/x86_64_defconfig
+@@ -32,6 +32,7 @@ CONFIG_TTY_CHAN=y
+ CONFIG_XTERM_CHAN=y
+ CONFIG_CON_CHAN="pts"
+ CONFIG_SSL_CHAN="pts"
++CONFIG_SOUND=m
+ CONFIG_UML_SOUND=m
+ CONFIG_DEVTMPFS=y
+ CONFIG_DEVTMPFS_MOUNT=y
+diff --git a/arch/um/drivers/Kconfig b/arch/um/drivers/Kconfig
+index 36911b1fddcf0..b94b2618e7d84 100644
+--- a/arch/um/drivers/Kconfig
++++ b/arch/um/drivers/Kconfig
+@@ -111,24 +111,14 @@ config SSL_CHAN
+ 
+ config UML_SOUND
+ 	tristate "Sound support"
++	depends on SOUND
++	select SOUND_OSS_CORE
+ 	help
+ 	  This option enables UML sound support.  If enabled, it will pull in
+-	  soundcore and the UML hostaudio relay, which acts as a intermediary
++	  the UML hostaudio relay, which acts as a intermediary
+ 	  between the host's dsp and mixer devices and the UML sound system.
+ 	  It is safe to say 'Y' here.
+ 
+-config SOUND
+-	tristate
+-	default UML_SOUND
+-
+-config SOUND_OSS_CORE
+-	bool
+-	default UML_SOUND
+-
+-config HOSTAUDIO
+-	tristate
+-	default UML_SOUND
+-
+ endmenu
+ 
+ menu "UML Network Devices"
+diff --git a/arch/um/drivers/Makefile b/arch/um/drivers/Makefile
+index a461a950f0518..0e6af81096fd5 100644
+--- a/arch/um/drivers/Makefile
++++ b/arch/um/drivers/Makefile
+@@ -54,7 +54,7 @@ obj-$(CONFIG_UML_NET) += net.o
+ obj-$(CONFIG_MCONSOLE) += mconsole.o
+ obj-$(CONFIG_MMAPPER) += mmapper_kern.o 
+ obj-$(CONFIG_BLK_DEV_UBD) += ubd.o 
+-obj-$(CONFIG_HOSTAUDIO) += hostaudio.o
++obj-$(CONFIG_UML_SOUND) += hostaudio.o
+ obj-$(CONFIG_NULL_CHAN) += null.o 
+ obj-$(CONFIG_PORT_CHAN) += port.o
+ obj-$(CONFIG_PTY_CHAN) += pty.o
+diff --git a/arch/um/drivers/virt-pci.c b/arch/um/drivers/virt-pci.c
+index 7699ca5f35d48..ffe2ee8a02465 100644
+--- a/arch/um/drivers/virt-pci.c
++++ b/arch/um/drivers/virt-pci.c
+@@ -544,6 +544,7 @@ static void um_pci_irq_vq_cb(struct virtqueue *vq)
+ 	}
+ }
+ 
++#ifdef CONFIG_OF
+ /* Copied from arch/x86/kernel/devicetree.c */
+ struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus)
+ {
+@@ -562,6 +563,7 @@ struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus)
+ 	}
+ 	return NULL;
+ }
++#endif
+ 
+ static int um_pci_init_vqs(struct um_pci_device *dev)
+ {
+diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
+index 03c4328a88cbd..f732426d3b483 100644
+--- a/arch/x86/boot/compressed/head_64.S
++++ b/arch/x86/boot/compressed/head_64.S
+@@ -459,11 +459,25 @@ SYM_CODE_START(startup_64)
+ 	/* Save the trampoline address in RCX */
+ 	movq	%rax, %rcx
+ 
++	/* Set up 32-bit addressable stack */
++	leaq	TRAMPOLINE_32BIT_STACK_END(%rcx), %rsp
++
++	/*
++	 * Preserve live 64-bit registers on the stack: this is necessary
++	 * because the architecture does not guarantee that GPRs will retain
++	 * their full 64-bit values across a 32-bit mode switch.
++	 */
++	pushq	%rbp
++	pushq	%rbx
++	pushq	%rsi
++
+ 	/*
+-	 * Load the address of trampoline_return() into RDI.
+-	 * It will be used by the trampoline to return to the main code.
++	 * Push the 64-bit address of trampoline_return() onto the new stack.
++	 * It will be used by the trampoline to return to the main code. Due to
++	 * the 32-bit mode switch, it cannot be kept it in a register either.
+ 	 */
+ 	leaq	trampoline_return(%rip), %rdi
++	pushq	%rdi
+ 
+ 	/* Switch to compatibility mode (CS.L = 0 CS.D = 1) via far return */
+ 	pushq	$__KERNEL32_CS
+@@ -471,6 +485,11 @@ SYM_CODE_START(startup_64)
+ 	pushq	%rax
+ 	lretq
+ trampoline_return:
++	/* Restore live 64-bit registers */
++	popq	%rsi
++	popq	%rbx
++	popq	%rbp
++
+ 	/* Restore the stack, the 32-bit trampoline uses its own stack */
+ 	leaq	rva(boot_stack_end)(%rbx), %rsp
+ 
+@@ -582,7 +601,7 @@ SYM_FUNC_END(.Lrelocated)
+ /*
+  * This is the 32-bit trampoline that will be copied over to low memory.
+  *
+- * RDI contains the return address (might be above 4G).
++ * Return address is at the top of the stack (might be above 4G).
+  * ECX contains the base address of the trampoline memory.
+  * Non zero RDX means trampoline needs to enable 5-level paging.
+  */
+@@ -592,9 +611,6 @@ SYM_CODE_START(trampoline_32bit_src)
+ 	movl	%eax, %ds
+ 	movl	%eax, %ss
+ 
+-	/* Set up new stack */
+-	leal	TRAMPOLINE_32BIT_STACK_END(%ecx), %esp
+-
+ 	/* Disable paging */
+ 	movl	%cr0, %eax
+ 	btrl	$X86_CR0_PG_BIT, %eax
+@@ -671,7 +687,7 @@ SYM_CODE_END(trampoline_32bit_src)
+ 	.code64
+ SYM_FUNC_START_LOCAL_NOALIGN(.Lpaging_enabled)
+ 	/* Return from the trampoline */
+-	jmp	*%rdi
++	retq
+ SYM_FUNC_END(.Lpaging_enabled)
+ 
+ 	/*
+diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c
+index d49e90dc04a4c..847740c08c97d 100644
+--- a/arch/x86/events/intel/uncore_snbep.c
++++ b/arch/x86/events/intel/uncore_snbep.c
+@@ -6474,8 +6474,18 @@ void spr_uncore_cpu_init(void)
+ 
+ 	type = uncore_find_type_by_id(uncore_msr_uncores, UNCORE_SPR_CHA);
+ 	if (type) {
++		/*
++		 * The value from the discovery table (stored in the type->num_boxes
++		 * of UNCORE_SPR_CHA) is incorrect on some SPR variants because of a
++		 * firmware bug. Using the value from SPR_MSR_UNC_CBO_CONFIG to replace it.
++		 */
+ 		rdmsrl(SPR_MSR_UNC_CBO_CONFIG, num_cbo);
+-		type->num_boxes = num_cbo;
++		/*
++		 * The MSR doesn't work on the EMR XCC, but the firmware bug doesn't impact
++		 * the EMR XCC. Don't let the value from the MSR replace the existing value.
++		 */
++		if (num_cbo)
++			type->num_boxes = num_cbo;
+ 	}
+ 	spr_uncore_iio_free_running.num_boxes = uncore_type_max_boxes(uncore_msr_uncores, UNCORE_SPR_IIO);
+ }
+diff --git a/arch/x86/include/asm/irq.h b/arch/x86/include/asm/irq.h
+index 29e083b92813c..836c170d30875 100644
+--- a/arch/x86/include/asm/irq.h
++++ b/arch/x86/include/asm/irq.h
+@@ -42,7 +42,7 @@ extern void init_ISA_irqs(void);
+ 
+ #ifdef CONFIG_X86_LOCAL_APIC
+ void arch_trigger_cpumask_backtrace(const struct cpumask *mask,
+-				    bool exclude_self);
++				    int exclude_cpu);
+ 
+ #define arch_trigger_cpumask_backtrace arch_trigger_cpumask_backtrace
+ #endif
+diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h
+index 56d4ef604b919..635132a127782 100644
+--- a/arch/x86/include/asm/local.h
++++ b/arch/x86/include/asm/local.h
+@@ -127,8 +127,8 @@ static inline long local_cmpxchg(local_t *l, long old, long new)
+ 
+ static inline bool local_try_cmpxchg(local_t *l, long *old, long new)
+ {
+-	typeof(l->a.counter) *__old = (typeof(l->a.counter) *) old;
+-	return try_cmpxchg_local(&l->a.counter, __old, new);
++	return try_cmpxchg_local(&l->a.counter,
++				 (typeof(l->a.counter) *) old, new);
+ }
+ 
+ /* Always has a lock prefix */
+diff --git a/arch/x86/include/asm/mem_encrypt.h b/arch/x86/include/asm/mem_encrypt.h
+index 7f97a8a97e24a..473b16d73b471 100644
+--- a/arch/x86/include/asm/mem_encrypt.h
++++ b/arch/x86/include/asm/mem_encrypt.h
+@@ -50,8 +50,8 @@ void __init sme_enable(struct boot_params *bp);
+ 
+ int __init early_set_memory_decrypted(unsigned long vaddr, unsigned long size);
+ int __init early_set_memory_encrypted(unsigned long vaddr, unsigned long size);
+-void __init early_set_mem_enc_dec_hypercall(unsigned long vaddr, int npages,
+-					    bool enc);
++void __init early_set_mem_enc_dec_hypercall(unsigned long vaddr,
++					    unsigned long size, bool enc);
+ 
+ void __init mem_encrypt_free_decrypted_mem(void);
+ 
+@@ -85,7 +85,7 @@ early_set_memory_decrypted(unsigned long vaddr, unsigned long size) { return 0;
+ static inline int __init
+ early_set_memory_encrypted(unsigned long vaddr, unsigned long size) { return 0; }
+ static inline void __init
+-early_set_mem_enc_dec_hypercall(unsigned long vaddr, int npages, bool enc) {}
++early_set_mem_enc_dec_hypercall(unsigned long vaddr, unsigned long size, bool enc) {}
+ 
+ static inline void mem_encrypt_free_decrypted_mem(void) { }
+ 
+diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
+index ba3e2554799ab..a6deb67cfbb26 100644
+--- a/arch/x86/include/asm/pgtable_types.h
++++ b/arch/x86/include/asm/pgtable_types.h
+@@ -125,11 +125,12 @@
+  * instance, and is *not* included in this mask since
+  * pte_modify() does modify it.
+  */
+-#define _PAGE_CHG_MASK	(PTE_PFN_MASK | _PAGE_PCD | _PAGE_PWT |		\
+-			 _PAGE_SPECIAL | _PAGE_ACCESSED | _PAGE_DIRTY |	\
+-			 _PAGE_SOFT_DIRTY | _PAGE_DEVMAP | _PAGE_ENC |  \
+-			 _PAGE_UFFD_WP)
+-#define _HPAGE_CHG_MASK (_PAGE_CHG_MASK | _PAGE_PSE)
++#define _COMMON_PAGE_CHG_MASK	(PTE_PFN_MASK | _PAGE_PCD | _PAGE_PWT |	       \
++				 _PAGE_SPECIAL | _PAGE_ACCESSED | _PAGE_DIRTY |\
++				 _PAGE_SOFT_DIRTY | _PAGE_DEVMAP | _PAGE_ENC | \
++				 _PAGE_UFFD_WP)
++#define _PAGE_CHG_MASK	(_COMMON_PAGE_CHG_MASK | _PAGE_PAT)
++#define _HPAGE_CHG_MASK (_COMMON_PAGE_CHG_MASK | _PAGE_PSE | _PAGE_PAT_LARGE)
+ 
+ /*
+  * The cache modes defined here are used to translate between pure SW usage
+diff --git a/arch/x86/kernel/apic/hw_nmi.c b/arch/x86/kernel/apic/hw_nmi.c
+index 34a992e275ef4..d6e01f9242996 100644
+--- a/arch/x86/kernel/apic/hw_nmi.c
++++ b/arch/x86/kernel/apic/hw_nmi.c
+@@ -34,9 +34,9 @@ static void nmi_raise_cpu_backtrace(cpumask_t *mask)
+ 	apic->send_IPI_mask(mask, NMI_VECTOR);
+ }
+ 
+-void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self)
++void arch_trigger_cpumask_backtrace(const cpumask_t *mask, int exclude_cpu)
+ {
+-	nmi_trigger_cpumask_backtrace(mask, exclude_self,
++	nmi_trigger_cpumask_backtrace(mask, exclude_cpu,
+ 				      nmi_raise_cpu_backtrace);
+ }
+ 
+diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
+index c6c15ce1952fb..5934ee5bc087e 100644
+--- a/arch/x86/kernel/apm_32.c
++++ b/arch/x86/kernel/apm_32.c
+@@ -238,12 +238,6 @@
+ extern int (*console_blank_hook)(int);
+ #endif
+ 
+-/*
+- * The apm_bios device is one of the misc char devices.
+- * This is its minor number.
+- */
+-#define	APM_MINOR_DEV	134
+-
+ /*
+  * Various options can be changed at boot time as follows:
+  * (We allow underscores for compatibility with the modules code)
+diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
+index e3a65e9fc750d..00f043a094fcd 100644
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -1265,11 +1265,11 @@ static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = {
+ 	VULNBL_INTEL_STEPPINGS(BROADWELL_G,	X86_STEPPING_ANY,		SRBDS),
+ 	VULNBL_INTEL_STEPPINGS(BROADWELL_X,	X86_STEPPING_ANY,		MMIO),
+ 	VULNBL_INTEL_STEPPINGS(BROADWELL,	X86_STEPPING_ANY,		SRBDS),
+-	VULNBL_INTEL_STEPPINGS(SKYLAKE_L,	X86_STEPPING_ANY,		SRBDS | MMIO | RETBLEED),
+ 	VULNBL_INTEL_STEPPINGS(SKYLAKE_X,	X86_STEPPING_ANY,		MMIO | RETBLEED | GDS),
+-	VULNBL_INTEL_STEPPINGS(SKYLAKE,		X86_STEPPING_ANY,		SRBDS | MMIO | RETBLEED),
+-	VULNBL_INTEL_STEPPINGS(KABYLAKE_L,	X86_STEPPING_ANY,		SRBDS | MMIO | RETBLEED | GDS),
+-	VULNBL_INTEL_STEPPINGS(KABYLAKE,	X86_STEPPING_ANY,		SRBDS | MMIO | RETBLEED | GDS),
++	VULNBL_INTEL_STEPPINGS(SKYLAKE_L,	X86_STEPPING_ANY,		MMIO | RETBLEED | GDS | SRBDS),
++	VULNBL_INTEL_STEPPINGS(SKYLAKE,		X86_STEPPING_ANY,		MMIO | RETBLEED | GDS | SRBDS),
++	VULNBL_INTEL_STEPPINGS(KABYLAKE_L,	X86_STEPPING_ANY,		MMIO | RETBLEED | GDS | SRBDS),
++	VULNBL_INTEL_STEPPINGS(KABYLAKE,	X86_STEPPING_ANY,		MMIO | RETBLEED | GDS | SRBDS),
+ 	VULNBL_INTEL_STEPPINGS(CANNONLAKE_L,	X86_STEPPING_ANY,		RETBLEED),
+ 	VULNBL_INTEL_STEPPINGS(ICELAKE_L,	X86_STEPPING_ANY,		MMIO | MMIO_SBDS | RETBLEED | GDS),
+ 	VULNBL_INTEL_STEPPINGS(ICELAKE_D,	X86_STEPPING_ANY,		MMIO | GDS),
+diff --git a/arch/x86/kernel/cpu/mce/core.c b/arch/x86/kernel/cpu/mce/core.c
+index 89e2aab5d34d8..17eb6a37a5872 100644
+--- a/arch/x86/kernel/cpu/mce/core.c
++++ b/arch/x86/kernel/cpu/mce/core.c
+@@ -842,6 +842,26 @@ static noinstr bool quirk_skylake_repmov(void)
+ 	return false;
+ }
+ 
++/*
++ * Some Zen-based Instruction Fetch Units set EIPV=RIPV=0 on poison consumption
++ * errors. This means mce_gather_info() will not save the "ip" and "cs" registers.
++ *
++ * However, the context is still valid, so save the "cs" register for later use.
++ *
++ * The "ip" register is truly unknown, so don't save it or fixup EIPV/RIPV.
++ *
++ * The Instruction Fetch Unit is at MCA bank 1 for all affected systems.
++ */
++static __always_inline void quirk_zen_ifu(int bank, struct mce *m, struct pt_regs *regs)
++{
++	if (bank != 1)
++		return;
++	if (!(m->status & MCI_STATUS_POISON))
++		return;
++
++	m->cs = regs->cs;
++}
++
+ /*
+  * Do a quick check if any of the events requires a panic.
+  * This decides if we keep the events around or clear them.
+@@ -861,6 +881,9 @@ static __always_inline int mce_no_way_out(struct mce *m, char **msg, unsigned lo
+ 		if (mce_flags.snb_ifu_quirk)
+ 			quirk_sandybridge_ifu(i, m, regs);
+ 
++		if (mce_flags.zen_ifu_quirk)
++			quirk_zen_ifu(i, m, regs);
++
+ 		m->bank = i;
+ 		if (mce_severity(m, regs, &tmp, true) >= MCE_PANIC_SEVERITY) {
+ 			mce_read_aux(m, i);
+@@ -1842,6 +1865,9 @@ static int __mcheck_cpu_apply_quirks(struct cpuinfo_x86 *c)
+ 		if (c->x86 == 0x15 && c->x86_model <= 0xf)
+ 			mce_flags.overflow_recov = 1;
+ 
++		if (c->x86 >= 0x17 && c->x86 <= 0x1A)
++			mce_flags.zen_ifu_quirk = 1;
++
+ 	}
+ 
+ 	if (c->x86_vendor == X86_VENDOR_INTEL) {
+diff --git a/arch/x86/kernel/cpu/mce/internal.h b/arch/x86/kernel/cpu/mce/internal.h
+index d2412ce2d312f..d5946fcdcd5de 100644
+--- a/arch/x86/kernel/cpu/mce/internal.h
++++ b/arch/x86/kernel/cpu/mce/internal.h
+@@ -157,6 +157,9 @@ struct mce_vendor_flags {
+ 	 */
+ 	smca			: 1,
+ 
++	/* Zen IFU quirk */
++	zen_ifu_quirk		: 1,
++
+ 	/* AMD-style error thresholding banks present. */
+ 	amd_threshold		: 1,
+ 
+@@ -172,7 +175,7 @@ struct mce_vendor_flags {
+ 	/* Skylake, Cascade Lake, Cooper Lake REP;MOVS* quirk */
+ 	skx_repmov_quirk	: 1,
+ 
+-	__reserved_0		: 56;
++	__reserved_0		: 55;
+ };
+ 
+ extern struct mce_vendor_flags mce_flags;
+diff --git a/arch/x86/kernel/cpu/sgx/virt.c b/arch/x86/kernel/cpu/sgx/virt.c
+index c3e37eaec8ecd..7aaa3652e31d1 100644
+--- a/arch/x86/kernel/cpu/sgx/virt.c
++++ b/arch/x86/kernel/cpu/sgx/virt.c
+@@ -204,6 +204,7 @@ static int sgx_vepc_release(struct inode *inode, struct file *file)
+ 			continue;
+ 
+ 		xa_erase(&vepc->page_array, index);
++		cond_resched();
+ 	}
+ 
+ 	/*
+@@ -222,6 +223,7 @@ static int sgx_vepc_release(struct inode *inode, struct file *file)
+ 			list_add_tail(&epc_page->list, &secs_pages);
+ 
+ 		xa_erase(&vepc->page_array, index);
++		cond_resched();
+ 	}
+ 
+ 	/*
+@@ -243,6 +245,7 @@ static int sgx_vepc_release(struct inode *inode, struct file *file)
+ 
+ 		if (sgx_vepc_free_page(epc_page))
+ 			list_add_tail(&epc_page->list, &secs_pages);
++		cond_resched();
+ 	}
+ 
+ 	if (!list_empty(&secs_pages))
+diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
+index 1cceac5984daa..526d4da3dcd46 100644
+--- a/arch/x86/kernel/kvm.c
++++ b/arch/x86/kernel/kvm.c
+@@ -966,10 +966,8 @@ static void __init kvm_init_platform(void)
+ 		 * Ensure that _bss_decrypted section is marked as decrypted in the
+ 		 * shared pages list.
+ 		 */
+-		nr_pages = DIV_ROUND_UP(__end_bss_decrypted - __start_bss_decrypted,
+-					PAGE_SIZE);
+ 		early_set_mem_enc_dec_hypercall((unsigned long)__start_bss_decrypted,
+-						nr_pages, 0);
++						__end_bss_decrypted - __start_bss_decrypted, 0);
+ 
+ 		/*
+ 		 * If not booted using EFI, enable Live migration support.
+diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
+index e1aa2cd7734ba..7d82f0bd449c7 100644
+--- a/arch/x86/kernel/smpboot.c
++++ b/arch/x86/kernel/smpboot.c
+@@ -1356,7 +1356,7 @@ bool smp_park_other_cpus_in_init(void)
+ 	if (this_cpu)
+ 		return false;
+ 
+-	for_each_present_cpu(cpu) {
++	for_each_cpu_and(cpu, &cpus_booted_once_mask, cpu_present_mask) {
+ 		if (cpu == this_cpu)
+ 			continue;
+ 		apicid = apic->cpu_present_to_apicid(cpu);
+diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
+index 83d41c2601d7b..f15fb71f280e2 100644
+--- a/arch/x86/kernel/vmlinux.lds.S
++++ b/arch/x86/kernel/vmlinux.lds.S
+@@ -156,7 +156,7 @@ SECTIONS
+ 		ALIGN_ENTRY_TEXT_END
+ 		*(.gnu.warning)
+ 
+-	} :text =0xcccc
++	} :text = 0xcccccccc
+ 
+ 	/* End of text section, which should occupy whole number of pages */
+ 	_etext = .;
+diff --git a/arch/x86/mm/mem_encrypt_amd.c b/arch/x86/mm/mem_encrypt_amd.c
+index 54bbd5163e8d3..6faea41e99b6b 100644
+--- a/arch/x86/mm/mem_encrypt_amd.c
++++ b/arch/x86/mm/mem_encrypt_amd.c
+@@ -288,11 +288,10 @@ static bool amd_enc_cache_flush_required(void)
+ 	return !cpu_feature_enabled(X86_FEATURE_SME_COHERENT);
+ }
+ 
+-static void enc_dec_hypercall(unsigned long vaddr, int npages, bool enc)
++static void enc_dec_hypercall(unsigned long vaddr, unsigned long size, bool enc)
+ {
+ #ifdef CONFIG_PARAVIRT
+-	unsigned long sz = npages << PAGE_SHIFT;
+-	unsigned long vaddr_end = vaddr + sz;
++	unsigned long vaddr_end = vaddr + size;
+ 
+ 	while (vaddr < vaddr_end) {
+ 		int psize, pmask, level;
+@@ -342,7 +341,7 @@ static bool amd_enc_status_change_finish(unsigned long vaddr, int npages, bool e
+ 		snp_set_memory_private(vaddr, npages);
+ 
+ 	if (!cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT))
+-		enc_dec_hypercall(vaddr, npages, enc);
++		enc_dec_hypercall(vaddr, npages << PAGE_SHIFT, enc);
+ 
+ 	return true;
+ }
+@@ -466,7 +465,7 @@ static int __init early_set_memory_enc_dec(unsigned long vaddr,
+ 
+ 	ret = 0;
+ 
+-	early_set_mem_enc_dec_hypercall(start, PAGE_ALIGN(size) >> PAGE_SHIFT, enc);
++	early_set_mem_enc_dec_hypercall(start, size, enc);
+ out:
+ 	__flush_tlb_all();
+ 	return ret;
+@@ -482,9 +481,9 @@ int __init early_set_memory_encrypted(unsigned long vaddr, unsigned long size)
+ 	return early_set_memory_enc_dec(vaddr, size, true);
+ }
+ 
+-void __init early_set_mem_enc_dec_hypercall(unsigned long vaddr, int npages, bool enc)
++void __init early_set_mem_enc_dec_hypercall(unsigned long vaddr, unsigned long size, bool enc)
+ {
+-	enc_dec_hypercall(vaddr, npages, enc);
++	enc_dec_hypercall(vaddr, size, enc);
+ }
+ 
+ void __init sme_early_init(void)
+diff --git a/arch/xtensa/include/asm/core.h b/arch/xtensa/include/asm/core.h
+index 0e1bb6f019d6b..3f5ffae89b580 100644
+--- a/arch/xtensa/include/asm/core.h
++++ b/arch/xtensa/include/asm/core.h
+@@ -52,4 +52,13 @@
+ #define XTENSA_STACK_ALIGNMENT	16
+ #endif
+ 
++#ifndef XCHAL_HW_MIN_VERSION
++#if defined(XCHAL_HW_MIN_VERSION_MAJOR) && defined(XCHAL_HW_MIN_VERSION_MINOR)
++#define XCHAL_HW_MIN_VERSION (XCHAL_HW_MIN_VERSION_MAJOR * 100 + \
++			      XCHAL_HW_MIN_VERSION_MINOR)
++#else
++#define XCHAL_HW_MIN_VERSION 0
++#endif
++#endif
++
+ #endif
+diff --git a/arch/xtensa/kernel/perf_event.c b/arch/xtensa/kernel/perf_event.c
+index a0d05c8598d0f..183618090d05b 100644
+--- a/arch/xtensa/kernel/perf_event.c
++++ b/arch/xtensa/kernel/perf_event.c
+@@ -13,17 +13,26 @@
+ #include <linux/perf_event.h>
+ #include <linux/platform_device.h>
+ 
++#include <asm/core.h>
+ #include <asm/processor.h>
+ #include <asm/stacktrace.h>
+ 
++#define XTENSA_HWVERSION_RG_2015_0	260000
++
++#if XCHAL_HW_MIN_VERSION >= XTENSA_HWVERSION_RG_2015_0
++#define XTENSA_PMU_ERI_BASE		0x00101000
++#else
++#define XTENSA_PMU_ERI_BASE		0x00001000
++#endif
++
+ /* Global control/status for all perf counters */
+-#define XTENSA_PMU_PMG			0x1000
++#define XTENSA_PMU_PMG			XTENSA_PMU_ERI_BASE
+ /* Perf counter values */
+-#define XTENSA_PMU_PM(i)		(0x1080 + (i) * 4)
++#define XTENSA_PMU_PM(i)		(XTENSA_PMU_ERI_BASE + 0x80 + (i) * 4)
+ /* Perf counter control registers */
+-#define XTENSA_PMU_PMCTRL(i)		(0x1100 + (i) * 4)
++#define XTENSA_PMU_PMCTRL(i)		(XTENSA_PMU_ERI_BASE + 0x100 + (i) * 4)
+ /* Perf counter status registers */
+-#define XTENSA_PMU_PMSTAT(i)		(0x1180 + (i) * 4)
++#define XTENSA_PMU_PMSTAT(i)		(XTENSA_PMU_ERI_BASE + 0x180 + (i) * 4)
+ 
+ #define XTENSA_PMU_PMG_PMEN		0x1
+ 
+diff --git a/block/bio-integrity.c b/block/bio-integrity.c
+index 4533eb4916610..6f81c10757fb9 100644
+--- a/block/bio-integrity.c
++++ b/block/bio-integrity.c
+@@ -123,17 +123,34 @@ void bio_integrity_free(struct bio *bio)
+ int bio_integrity_add_page(struct bio *bio, struct page *page,
+ 			   unsigned int len, unsigned int offset)
+ {
++	struct request_queue *q = bdev_get_queue(bio->bi_bdev);
+ 	struct bio_integrity_payload *bip = bio_integrity(bio);
+ 
+-	if (bip->bip_vcnt >= bip->bip_max_vcnt) {
+-		printk(KERN_ERR "%s: bip_vec full\n", __func__);
++	if (((bip->bip_iter.bi_size + len) >> SECTOR_SHIFT) >
++	    queue_max_hw_sectors(q))
+ 		return 0;
+-	}
+ 
+-	if (bip->bip_vcnt &&
+-	    bvec_gap_to_prev(&bdev_get_queue(bio->bi_bdev)->limits,
+-			     &bip->bip_vec[bip->bip_vcnt - 1], offset))
+-		return 0;
++	if (bip->bip_vcnt > 0) {
++		struct bio_vec *bv = &bip->bip_vec[bip->bip_vcnt - 1];
++		bool same_page = false;
++
++		if (bvec_try_merge_hw_page(q, bv, page, len, offset,
++					   &same_page)) {
++			bip->bip_iter.bi_size += len;
++			return len;
++		}
++
++		if (bip->bip_vcnt >=
++		    min(bip->bip_max_vcnt, queue_max_integrity_segments(q)))
++			return 0;
++
++		/*
++		 * If the queue doesn't support SG gaps and adding this segment
++		 * would create a gap, disallow it.
++		 */
++		if (bvec_gap_to_prev(&q->limits, bv, offset))
++			return 0;
++	}
+ 
+ 	bvec_set_page(&bip->bip_vec[bip->bip_vcnt], page, len, offset);
+ 	bip->bip_vcnt++;
+diff --git a/block/bio.c b/block/bio.c
+index 8672179213b93..00ac4c233e3aa 100644
+--- a/block/bio.c
++++ b/block/bio.c
+@@ -903,9 +903,8 @@ static inline bool bio_full(struct bio *bio, unsigned len)
+ 	return false;
+ }
+ 
+-static inline bool page_is_mergeable(const struct bio_vec *bv,
+-		struct page *page, unsigned int len, unsigned int off,
+-		bool *same_page)
++static bool bvec_try_merge_page(struct bio_vec *bv, struct page *page,
++		unsigned int len, unsigned int off, bool *same_page)
+ {
+ 	size_t bv_end = bv->bv_offset + bv->bv_len;
+ 	phys_addr_t vec_end_addr = page_to_phys(bv->bv_page) + bv_end - 1;
+@@ -919,49 +918,15 @@ static inline bool page_is_mergeable(const struct bio_vec *bv,
+ 		return false;
+ 
+ 	*same_page = ((vec_end_addr & PAGE_MASK) == page_addr);
+-	if (*same_page)
+-		return true;
+-	else if (IS_ENABLED(CONFIG_KMSAN))
+-		return false;
+-	return (bv->bv_page + bv_end / PAGE_SIZE) == (page + off / PAGE_SIZE);
+-}
+-
+-/**
+- * __bio_try_merge_page - try appending data to an existing bvec.
+- * @bio: destination bio
+- * @page: start page to add
+- * @len: length of the data to add
+- * @off: offset of the data relative to @page
+- * @same_page: return if the segment has been merged inside the same page
+- *
+- * Try to add the data at @page + @off to the last bvec of @bio.  This is a
+- * useful optimisation for file systems with a block size smaller than the
+- * page size.
+- *
+- * Warn if (@len, @off) crosses pages in case that @same_page is true.
+- *
+- * Return %true on success or %false on failure.
+- */
+-static bool __bio_try_merge_page(struct bio *bio, struct page *page,
+-		unsigned int len, unsigned int off, bool *same_page)
+-{
+-	if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)))
+-		return false;
+-
+-	if (bio->bi_vcnt > 0) {
+-		struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1];
+-
+-		if (page_is_mergeable(bv, page, len, off, same_page)) {
+-			if (bio->bi_iter.bi_size > UINT_MAX - len) {
+-				*same_page = false;
+-				return false;
+-			}
+-			bv->bv_len += len;
+-			bio->bi_iter.bi_size += len;
+-			return true;
+-		}
++	if (!*same_page) {
++		if (IS_ENABLED(CONFIG_KMSAN))
++			return false;
++		if (bv->bv_page + bv_end / PAGE_SIZE != page + off / PAGE_SIZE)
++			return false;
+ 	}
+-	return false;
++
++	bv->bv_len += len;
++	return true;
+ }
+ 
+ /*
+@@ -969,11 +934,10 @@ static bool __bio_try_merge_page(struct bio *bio, struct page *page,
+  * size limit.  This is not for normal read/write bios, but for passthrough
+  * or Zone Append operations that we can't split.
+  */
+-static bool bio_try_merge_hw_seg(struct request_queue *q, struct bio *bio,
+-				 struct page *page, unsigned len,
+-				 unsigned offset, bool *same_page)
++bool bvec_try_merge_hw_page(struct request_queue *q, struct bio_vec *bv,
++		struct page *page, unsigned len, unsigned offset,
++		bool *same_page)
+ {
+-	struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1];
+ 	unsigned long mask = queue_segment_boundary(q);
+ 	phys_addr_t addr1 = page_to_phys(bv->bv_page) + bv->bv_offset;
+ 	phys_addr_t addr2 = page_to_phys(page) + offset + len - 1;
+@@ -982,7 +946,7 @@ static bool bio_try_merge_hw_seg(struct request_queue *q, struct bio *bio,
+ 		return false;
+ 	if (bv->bv_len + len > queue_max_segment_size(q))
+ 		return false;
+-	return __bio_try_merge_page(bio, page, len, offset, same_page);
++	return bvec_try_merge_page(bv, page, len, offset, same_page);
+ }
+ 
+ /**
+@@ -1002,8 +966,6 @@ int bio_add_hw_page(struct request_queue *q, struct bio *bio,
+ 		struct page *page, unsigned int len, unsigned int offset,
+ 		unsigned int max_sectors, bool *same_page)
+ {
+-	struct bio_vec *bvec;
+-
+ 	if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)))
+ 		return 0;
+ 
+@@ -1011,15 +973,19 @@ int bio_add_hw_page(struct request_queue *q, struct bio *bio,
+ 		return 0;
+ 
+ 	if (bio->bi_vcnt > 0) {
+-		if (bio_try_merge_hw_seg(q, bio, page, len, offset, same_page))
++		struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1];
++
++		if (bvec_try_merge_hw_page(q, bv, page, len, offset,
++				same_page)) {
++			bio->bi_iter.bi_size += len;
+ 			return len;
++		}
+ 
+ 		/*
+ 		 * If the queue doesn't support SG gaps and adding this segment
+ 		 * would create a gap, disallow it.
+ 		 */
+-		bvec = &bio->bi_io_vec[bio->bi_vcnt - 1];
+-		if (bvec_gap_to_prev(&q->limits, bvec, offset))
++		if (bvec_gap_to_prev(&q->limits, bv, offset))
+ 			return 0;
+ 	}
+ 
+@@ -1129,11 +1095,21 @@ int bio_add_page(struct bio *bio, struct page *page,
+ {
+ 	bool same_page = false;
+ 
+-	if (!__bio_try_merge_page(bio, page, len, offset, &same_page)) {
+-		if (bio_full(bio, len))
+-			return 0;
+-		__bio_add_page(bio, page, len, offset);
++	if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)))
++		return 0;
++	if (bio->bi_iter.bi_size > UINT_MAX - len)
++		return 0;
++
++	if (bio->bi_vcnt > 0 &&
++	    bvec_try_merge_page(&bio->bi_io_vec[bio->bi_vcnt - 1],
++				page, len, offset, &same_page)) {
++		bio->bi_iter.bi_size += len;
++		return len;
+ 	}
++
++	if (bio_full(bio, len))
++		return 0;
++	__bio_add_page(bio, page, len, offset);
+ 	return len;
+ }
+ EXPORT_SYMBOL(bio_add_page);
+@@ -1207,13 +1183,18 @@ static int bio_iov_add_page(struct bio *bio, struct page *page,
+ {
+ 	bool same_page = false;
+ 
+-	if (!__bio_try_merge_page(bio, page, len, offset, &same_page)) {
+-		__bio_add_page(bio, page, len, offset);
++	if (WARN_ON_ONCE(bio->bi_iter.bi_size > UINT_MAX - len))
++		return -EIO;
++
++	if (bio->bi_vcnt > 0 &&
++	    bvec_try_merge_page(&bio->bi_io_vec[bio->bi_vcnt - 1],
++				page, len, offset, &same_page)) {
++		bio->bi_iter.bi_size += len;
++		if (same_page)
++			bio_release_page(bio, page);
+ 		return 0;
+ 	}
+-
+-	if (same_page)
+-		bio_release_page(bio, page);
++	__bio_add_page(bio, page, len, offset);
+ 	return 0;
+ }
+ 
+@@ -1337,6 +1318,9 @@ int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
+ {
+ 	int ret = 0;
+ 
++	if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)))
++		return -EIO;
++
+ 	if (iov_iter_is_bvec(iter)) {
+ 		bio_iov_bvec_set(bio, iter);
+ 		iov_iter_advance(iter, bio->bi_iter.bi_size);
+diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
+index 9faafcd10e177..4a42ea2972ad8 100644
+--- a/block/blk-cgroup.c
++++ b/block/blk-cgroup.c
+@@ -1511,7 +1511,7 @@ int blkcg_activate_policy(struct gendisk *disk, const struct blkcg_policy *pol)
+ retry:
+ 	spin_lock_irq(&q->queue_lock);
+ 
+-	/* blkg_list is pushed at the head, reverse walk to allocate parents first */
++	/* blkg_list is pushed at the head, reverse walk to initialize parents first */
+ 	list_for_each_entry_reverse(blkg, &q->blkg_list, q_node) {
+ 		struct blkg_policy_data *pd;
+ 
+@@ -1549,21 +1549,20 @@ retry:
+ 				goto enomem;
+ 		}
+ 
+-		blkg->pd[pol->plid] = pd;
++		spin_lock(&blkg->blkcg->lock);
++
+ 		pd->blkg = blkg;
+ 		pd->plid = pol->plid;
+-		pd->online = false;
+-	}
++		blkg->pd[pol->plid] = pd;
+ 
+-	/* all allocated, init in the same order */
+-	if (pol->pd_init_fn)
+-		list_for_each_entry_reverse(blkg, &q->blkg_list, q_node)
+-			pol->pd_init_fn(blkg->pd[pol->plid]);
++		if (pol->pd_init_fn)
++			pol->pd_init_fn(pd);
+ 
+-	list_for_each_entry_reverse(blkg, &q->blkg_list, q_node) {
+ 		if (pol->pd_online_fn)
+-			pol->pd_online_fn(blkg->pd[pol->plid]);
+-		blkg->pd[pol->plid]->online = true;
++			pol->pd_online_fn(pd);
++		pd->online = true;
++
++		spin_unlock(&blkg->blkcg->lock);
+ 	}
+ 
+ 	__set_bit(pol->plid, q->blkcg_pols);
+@@ -1580,14 +1579,19 @@ out:
+ 	return ret;
+ 
+ enomem:
+-	/* alloc failed, nothing's initialized yet, free everything */
++	/* alloc failed, take down everything */
+ 	spin_lock_irq(&q->queue_lock);
+ 	list_for_each_entry(blkg, &q->blkg_list, q_node) {
+ 		struct blkcg *blkcg = blkg->blkcg;
++		struct blkg_policy_data *pd;
+ 
+ 		spin_lock(&blkcg->lock);
+-		if (blkg->pd[pol->plid]) {
+-			pol->pd_free_fn(blkg->pd[pol->plid]);
++		pd = blkg->pd[pol->plid];
++		if (pd) {
++			if (pd->online && pol->pd_offline_fn)
++				pol->pd_offline_fn(pd);
++			pd->online = false;
++			pol->pd_free_fn(pd);
+ 			blkg->pd[pol->plid] = NULL;
+ 		}
+ 		spin_unlock(&blkcg->lock);
+diff --git a/block/blk-flush.c b/block/blk-flush.c
+index 8220517c2d67d..fdc489e0ea162 100644
+--- a/block/blk-flush.c
++++ b/block/blk-flush.c
+@@ -443,7 +443,7 @@ bool blk_insert_flush(struct request *rq)
+ 		 * the post flush, and then just pass the command on.
+ 		 */
+ 		blk_rq_init_flush(rq);
+-		rq->flush.seq |= REQ_FSEQ_POSTFLUSH;
++		rq->flush.seq |= REQ_FSEQ_PREFLUSH;
+ 		spin_lock_irq(&fq->mq_flush_lock);
+ 		list_move_tail(&rq->flush.list, &fq->flush_data_in_flight);
+ 		spin_unlock_irq(&fq->mq_flush_lock);
+diff --git a/block/blk-map.c b/block/blk-map.c
+index 44d74a30ddac0..8584babf3ea0c 100644
+--- a/block/blk-map.c
++++ b/block/blk-map.c
+@@ -315,12 +315,11 @@ static int bio_map_user_iov(struct request *rq, struct iov_iter *iter,
+ 					n = bytes;
+ 
+ 				if (!bio_add_hw_page(rq->q, bio, page, n, offs,
+-						     max_sectors, &same_page)) {
+-					if (same_page)
+-						bio_release_page(bio, page);
++						     max_sectors, &same_page))
+ 					break;
+-				}
+ 
++				if (same_page)
++					bio_release_page(bio, page);
+ 				bytes -= n;
+ 				offs = 0;
+ 			}
+diff --git a/block/blk-settings.c b/block/blk-settings.c
+index 4dd59059b788e..0046b447268f9 100644
+--- a/block/blk-settings.c
++++ b/block/blk-settings.c
+@@ -830,10 +830,13 @@ EXPORT_SYMBOL(blk_set_queue_depth);
+  */
+ void blk_queue_write_cache(struct request_queue *q, bool wc, bool fua)
+ {
+-	if (wc)
++	if (wc) {
++		blk_queue_flag_set(QUEUE_FLAG_HW_WC, q);
+ 		blk_queue_flag_set(QUEUE_FLAG_WC, q);
+-	else
++	} else {
++		blk_queue_flag_clear(QUEUE_FLAG_HW_WC, q);
+ 		blk_queue_flag_clear(QUEUE_FLAG_WC, q);
++	}
+ 	if (fua)
+ 		blk_queue_flag_set(QUEUE_FLAG_FUA, q);
+ 	else
+diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
+index afc797fb0dfc4..63e4812623361 100644
+--- a/block/blk-sysfs.c
++++ b/block/blk-sysfs.c
+@@ -449,21 +449,16 @@ static ssize_t queue_wc_show(struct request_queue *q, char *page)
+ static ssize_t queue_wc_store(struct request_queue *q, const char *page,
+ 			      size_t count)
+ {
+-	int set = -1;
+-
+-	if (!strncmp(page, "write back", 10))
+-		set = 1;
+-	else if (!strncmp(page, "write through", 13) ||
+-		 !strncmp(page, "none", 4))
+-		set = 0;
+-
+-	if (set == -1)
+-		return -EINVAL;
+-
+-	if (set)
++	if (!strncmp(page, "write back", 10)) {
++		if (!test_bit(QUEUE_FLAG_HW_WC, &q->queue_flags))
++			return -EINVAL;
+ 		blk_queue_flag_set(QUEUE_FLAG_WC, q);
+-	else
++	} else if (!strncmp(page, "write through", 13) ||
++		 !strncmp(page, "none", 4)) {
+ 		blk_queue_flag_clear(QUEUE_FLAG_WC, q);
++	} else {
++		return -EINVAL;
++	}
+ 
+ 	return count;
+ }
+diff --git a/block/blk.h b/block/blk.h
+index 608c5dcc516b5..b0dbbc4055966 100644
+--- a/block/blk.h
++++ b/block/blk.h
+@@ -76,6 +76,10 @@ struct bio_vec *bvec_alloc(mempool_t *pool, unsigned short *nr_vecs,
+ 		gfp_t gfp_mask);
+ void bvec_free(mempool_t *pool, struct bio_vec *bv, unsigned short nr_vecs);
+ 
++bool bvec_try_merge_hw_page(struct request_queue *q, struct bio_vec *bv,
++		struct page *page, unsigned len, unsigned offset,
++		bool *same_page);
++
+ static inline bool biovec_phys_mergeable(struct request_queue *q,
+ 		struct bio_vec *vec1, struct bio_vec *vec2)
+ {
+diff --git a/block/ioctl.c b/block/ioctl.c
+index 3be11941fb2dd..9fcddd847937e 100644
+--- a/block/ioctl.c
++++ b/block/ioctl.c
+@@ -20,6 +20,8 @@ static int blkpg_do_ioctl(struct block_device *bdev,
+ 	struct blkpg_partition p;
+ 	long long start, length;
+ 
++	if (disk->flags & GENHD_FL_NO_PART)
++		return -EINVAL;
+ 	if (!capable(CAP_SYS_ADMIN))
+ 		return -EACCES;
+ 	if (copy_from_user(&p, upart, sizeof(struct blkpg_partition)))
+diff --git a/block/mq-deadline.c b/block/mq-deadline.c
+index 02a916ba62ee7..f958e79277b8b 100644
+--- a/block/mq-deadline.c
++++ b/block/mq-deadline.c
+@@ -646,8 +646,9 @@ static void dd_depth_updated(struct blk_mq_hw_ctx *hctx)
+ 	struct request_queue *q = hctx->queue;
+ 	struct deadline_data *dd = q->elevator->elevator_data;
+ 	struct blk_mq_tags *tags = hctx->sched_tags;
++	unsigned int shift = tags->bitmap_tags.sb.shift;
+ 
+-	dd->async_depth = max(1UL, 3 * q->nr_requests / 4);
++	dd->async_depth = max(1U, 3 * (1U << shift)  / 4);
+ 
+ 	sbitmap_queue_min_shallow_depth(&tags->bitmap_tags, dd->async_depth);
+ }
+diff --git a/crypto/af_alg.c b/crypto/af_alg.c
+index 10efb56d8b481..ea6fb8e89d065 100644
+--- a/crypto/af_alg.c
++++ b/crypto/af_alg.c
+@@ -320,18 +320,21 @@ static int alg_setkey_by_key_serial(struct alg_sock *ask, sockptr_t optval,
+ 
+ 	if (IS_ERR(ret)) {
+ 		up_read(&key->sem);
++		key_put(key);
+ 		return PTR_ERR(ret);
+ 	}
+ 
+ 	key_data = sock_kmalloc(&ask->sk, key_datalen, GFP_KERNEL);
+ 	if (!key_data) {
+ 		up_read(&key->sem);
++		key_put(key);
+ 		return -ENOMEM;
+ 	}
+ 
+ 	memcpy(key_data, ret, key_datalen);
+ 
+ 	up_read(&key->sem);
++	key_put(key);
+ 
+ 	err = type->setkey(ask->private, key_data, key_datalen);
+ 
+@@ -1192,6 +1195,7 @@ struct af_alg_async_req *af_alg_alloc_areq(struct sock *sk,
+ 
+ 	areq->areqlen = areqlen;
+ 	areq->sk = sk;
++	areq->first_rsgl.sgl.sgt.sgl = areq->first_rsgl.sgl.sgl;
+ 	areq->last_rsgl = NULL;
+ 	INIT_LIST_HEAD(&areq->rsgl_list);
+ 	areq->tsgl = NULL;
+diff --git a/crypto/algapi.c b/crypto/algapi.c
+index 5e7cd603d489c..4fe95c4480473 100644
+--- a/crypto/algapi.c
++++ b/crypto/algapi.c
+@@ -17,6 +17,7 @@
+ #include <linux/rtnetlink.h>
+ #include <linux/slab.h>
+ #include <linux/string.h>
++#include <linux/workqueue.h>
+ 
+ #include "internal.h"
+ 
+@@ -74,15 +75,26 @@ static void crypto_free_instance(struct crypto_instance *inst)
+ 	inst->alg.cra_type->free(inst);
+ }
+ 
+-static void crypto_destroy_instance(struct crypto_alg *alg)
++static void crypto_destroy_instance_workfn(struct work_struct *w)
+ {
+-	struct crypto_instance *inst = (void *)alg;
++	struct crypto_instance *inst = container_of(w, struct crypto_instance,
++						    free_work);
+ 	struct crypto_template *tmpl = inst->tmpl;
+ 
+ 	crypto_free_instance(inst);
+ 	crypto_tmpl_put(tmpl);
+ }
+ 
++static void crypto_destroy_instance(struct crypto_alg *alg)
++{
++	struct crypto_instance *inst = container_of(alg,
++						    struct crypto_instance,
++						    alg);
++
++	INIT_WORK(&inst->free_work, crypto_destroy_instance_workfn);
++	schedule_work(&inst->free_work);
++}
++
+ /*
+  * This function adds a spawn to the list secondary_spawns which
+  * will be used at the end of crypto_remove_spawns to unregister
+diff --git a/crypto/asymmetric_keys/x509_public_key.c b/crypto/asymmetric_keys/x509_public_key.c
+index 6fdfc82e23a8a..7c71db3ac23d4 100644
+--- a/crypto/asymmetric_keys/x509_public_key.c
++++ b/crypto/asymmetric_keys/x509_public_key.c
+@@ -130,6 +130,11 @@ int x509_check_for_self_signed(struct x509_certificate *cert)
+ 			goto out;
+ 	}
+ 
++	if (cert->unsupported_sig) {
++		ret = 0;
++		goto out;
++	}
++
+ 	ret = public_key_verify_signature(cert->pub, cert->sig);
+ 	if (ret < 0) {
+ 		if (ret == -ENOPKG) {
+diff --git a/drivers/acpi/x86/s2idle.c b/drivers/acpi/x86/s2idle.c
+index ce62e61a9605e..60cc4605169c5 100644
+--- a/drivers/acpi/x86/s2idle.c
++++ b/drivers/acpi/x86/s2idle.c
+@@ -123,17 +123,16 @@ static void lpi_device_get_constraints_amd(void)
+ 			acpi_handle_debug(lps0_device_handle,
+ 					  "LPI: constraints list begin:\n");
+ 
+-			for (j = 0; j < package->package.count; ++j) {
++			for (j = 0; j < package->package.count; j++) {
+ 				union acpi_object *info_obj = &package->package.elements[j];
+ 				struct lpi_device_constraint_amd dev_info = {};
+ 				struct lpi_constraints *list;
+ 				acpi_status status;
+ 
+-				for (k = 0; k < info_obj->package.count; ++k) {
+-					union acpi_object *obj = &info_obj->package.elements[k];
++				list = &lpi_constraints_table[lpi_constraints_table_size];
+ 
+-					list = &lpi_constraints_table[lpi_constraints_table_size];
+-					list->min_dstate = -1;
++				for (k = 0; k < info_obj->package.count; k++) {
++					union acpi_object *obj = &info_obj->package.elements[k];
+ 
+ 					switch (k) {
+ 					case 0:
+@@ -149,27 +148,21 @@ static void lpi_device_get_constraints_amd(void)
+ 						dev_info.min_dstate = obj->integer.value;
+ 						break;
+ 					}
++				}
+ 
+-					if (!dev_info.enabled || !dev_info.name ||
+-					    !dev_info.min_dstate)
+-						continue;
++				if (!dev_info.enabled || !dev_info.name ||
++				    !dev_info.min_dstate)
++					continue;
+ 
+-					status = acpi_get_handle(NULL, dev_info.name,
+-								 &list->handle);
+-					if (ACPI_FAILURE(status))
+-						continue;
++				status = acpi_get_handle(NULL, dev_info.name, &list->handle);
++				if (ACPI_FAILURE(status))
++					continue;
+ 
+-					acpi_handle_debug(lps0_device_handle,
+-							  "Name:%s\n", dev_info.name);
++				acpi_handle_debug(lps0_device_handle,
++						  "Name:%s\n", dev_info.name);
+ 
+-					list->min_dstate = dev_info.min_dstate;
++				list->min_dstate = dev_info.min_dstate;
+ 
+-					if (list->min_dstate < 0) {
+-						acpi_handle_debug(lps0_device_handle,
+-								  "Incomplete constraint defined\n");
+-						continue;
+-					}
+-				}
+ 				lpi_constraints_table_size++;
+ 			}
+ 		}
+@@ -214,7 +207,7 @@ static void lpi_device_get_constraints(void)
+ 		if (!package)
+ 			continue;
+ 
+-		for (j = 0; j < package->package.count; ++j) {
++		for (j = 0; j < package->package.count; j++) {
+ 			union acpi_object *element =
+ 					&(package->package.elements[j]);
+ 
+@@ -246,7 +239,7 @@ static void lpi_device_get_constraints(void)
+ 
+ 		constraint->min_dstate = -1;
+ 
+-		for (j = 0; j < package_count; ++j) {
++		for (j = 0; j < package_count; j++) {
+ 			union acpi_object *info_obj = &info.package[j];
+ 			union acpi_object *cnstr_pkg;
+ 			union acpi_object *obj;
+diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c
+index ce88af9eb562f..09e72967b8abf 100644
+--- a/drivers/amba/bus.c
++++ b/drivers/amba/bus.c
+@@ -528,6 +528,7 @@ static void amba_device_release(struct device *dev)
+ {
+ 	struct amba_device *d = to_amba_device(dev);
+ 
++	of_node_put(d->dev.of_node);
+ 	if (d->res.parent)
+ 		release_resource(&d->res);
+ 	mutex_destroy(&d->periphid_lock);
+diff --git a/drivers/base/core.c b/drivers/base/core.c
+index 3dff5037943e0..6ceaf50f5a671 100644
+--- a/drivers/base/core.c
++++ b/drivers/base/core.c
+@@ -3817,6 +3817,17 @@ void device_del(struct device *dev)
+ 	device_platform_notify_remove(dev);
+ 	device_links_purge(dev);
+ 
++	/*
++	 * If a device does not have a driver attached, we need to clean
++	 * up any managed resources. We do this in device_release(), but
++	 * it's never called (and we leak the device) if a managed
++	 * resource holds a reference to the device. So release all
++	 * managed resources here, like we do in driver_detach(). We
++	 * still need to do so again in device_release() in case someone
++	 * adds a new resource after this point, though.
++	 */
++	devres_release_all(dev);
++
+ 	bus_notify(dev, BUS_NOTIFY_REMOVED_DEVICE);
+ 	kobject_uevent(&dev->kobj, KOBJ_REMOVE);
+ 	glue_dir = get_glue_dir(dev);
+diff --git a/drivers/base/dd.c b/drivers/base/dd.c
+index 878aa7646b37e..a528cec24264a 100644
+--- a/drivers/base/dd.c
++++ b/drivers/base/dd.c
+@@ -693,6 +693,8 @@ re_probe:
+ 
+ 		device_remove(dev);
+ 		driver_sysfs_remove(dev);
++		if (dev->bus && dev->bus->dma_cleanup)
++			dev->bus->dma_cleanup(dev);
+ 		device_unbind_cleanup(dev);
+ 
+ 		goto re_probe;
+diff --git a/drivers/base/regmap/regcache-maple.c b/drivers/base/regmap/regcache-maple.c
+index 283c2e02a2985..41edd6a430eb4 100644
+--- a/drivers/base/regmap/regcache-maple.c
++++ b/drivers/base/regmap/regcache-maple.c
+@@ -74,7 +74,7 @@ static int regcache_maple_write(struct regmap *map, unsigned int reg,
+ 	rcu_read_unlock();
+ 
+ 	entry = kmalloc((last - index + 1) * sizeof(unsigned long),
+-			GFP_KERNEL);
++			map->alloc_flags);
+ 	if (!entry)
+ 		return -ENOMEM;
+ 
+@@ -92,7 +92,7 @@ static int regcache_maple_write(struct regmap *map, unsigned int reg,
+ 	mas_lock(&mas);
+ 
+ 	mas_set_range(&mas, index, last);
+-	ret = mas_store_gfp(&mas, entry, GFP_KERNEL);
++	ret = mas_store_gfp(&mas, entry, map->alloc_flags);
+ 
+ 	mas_unlock(&mas);
+ 
+@@ -134,7 +134,7 @@ static int regcache_maple_drop(struct regmap *map, unsigned int min,
+ 
+ 			lower = kmemdup(entry, ((min - mas.index) *
+ 						sizeof(unsigned long)),
+-					GFP_KERNEL);
++					map->alloc_flags);
+ 			if (!lower) {
+ 				ret = -ENOMEM;
+ 				goto out_unlocked;
+@@ -148,7 +148,7 @@ static int regcache_maple_drop(struct regmap *map, unsigned int min,
+ 			upper = kmemdup(&entry[max + 1],
+ 					((mas.last - max) *
+ 					 sizeof(unsigned long)),
+-					GFP_KERNEL);
++					map->alloc_flags);
+ 			if (!upper) {
+ 				ret = -ENOMEM;
+ 				goto out_unlocked;
+@@ -162,7 +162,7 @@ static int regcache_maple_drop(struct regmap *map, unsigned int min,
+ 		/* Insert new nodes with the saved data */
+ 		if (lower) {
+ 			mas_set_range(&mas, lower_index, lower_last);
+-			ret = mas_store_gfp(&mas, lower, GFP_KERNEL);
++			ret = mas_store_gfp(&mas, lower, map->alloc_flags);
+ 			if (ret != 0)
+ 				goto out;
+ 			lower = NULL;
+@@ -170,7 +170,7 @@ static int regcache_maple_drop(struct regmap *map, unsigned int min,
+ 
+ 		if (upper) {
+ 			mas_set_range(&mas, upper_index, upper_last);
+-			ret = mas_store_gfp(&mas, upper, GFP_KERNEL);
++			ret = mas_store_gfp(&mas, upper, map->alloc_flags);
+ 			if (ret != 0)
+ 				goto out;
+ 			upper = NULL;
+@@ -320,7 +320,7 @@ static int regcache_maple_insert_block(struct regmap *map, int first,
+ 	unsigned long *entry;
+ 	int i, ret;
+ 
+-	entry = kcalloc(last - first + 1, sizeof(unsigned long), GFP_KERNEL);
++	entry = kcalloc(last - first + 1, sizeof(unsigned long), map->alloc_flags);
+ 	if (!entry)
+ 		return -ENOMEM;
+ 
+@@ -331,7 +331,7 @@ static int regcache_maple_insert_block(struct regmap *map, int first,
+ 
+ 	mas_set_range(&mas, map->reg_defaults[first].reg,
+ 		      map->reg_defaults[last].reg);
+-	ret = mas_store_gfp(&mas, entry, GFP_KERNEL);
++	ret = mas_store_gfp(&mas, entry, map->alloc_flags);
+ 
+ 	mas_unlock(&mas);
+ 
+diff --git a/drivers/base/regmap/regcache-rbtree.c b/drivers/base/regmap/regcache-rbtree.c
+index 584bcc55f56e3..06788965aa293 100644
+--- a/drivers/base/regmap/regcache-rbtree.c
++++ b/drivers/base/regmap/regcache-rbtree.c
+@@ -277,7 +277,7 @@ static int regcache_rbtree_insert_to_block(struct regmap *map,
+ 
+ 	blk = krealloc(rbnode->block,
+ 		       blklen * map->cache_word_size,
+-		       GFP_KERNEL);
++		       map->alloc_flags);
+ 	if (!blk)
+ 		return -ENOMEM;
+ 
+@@ -286,7 +286,7 @@ static int regcache_rbtree_insert_to_block(struct regmap *map,
+ 	if (BITS_TO_LONGS(blklen) > BITS_TO_LONGS(rbnode->blklen)) {
+ 		present = krealloc(rbnode->cache_present,
+ 				   BITS_TO_LONGS(blklen) * sizeof(*present),
+-				   GFP_KERNEL);
++				   map->alloc_flags);
+ 		if (!present)
+ 			return -ENOMEM;
+ 
+@@ -320,7 +320,7 @@ regcache_rbtree_node_alloc(struct regmap *map, unsigned int reg)
+ 	const struct regmap_range *range;
+ 	int i;
+ 
+-	rbnode = kzalloc(sizeof(*rbnode), GFP_KERNEL);
++	rbnode = kzalloc(sizeof(*rbnode), map->alloc_flags);
+ 	if (!rbnode)
+ 		return NULL;
+ 
+@@ -346,13 +346,13 @@ regcache_rbtree_node_alloc(struct regmap *map, unsigned int reg)
+ 	}
+ 
+ 	rbnode->block = kmalloc_array(rbnode->blklen, map->cache_word_size,
+-				      GFP_KERNEL);
++				      map->alloc_flags);
+ 	if (!rbnode->block)
+ 		goto err_free;
+ 
+ 	rbnode->cache_present = kcalloc(BITS_TO_LONGS(rbnode->blklen),
+ 					sizeof(*rbnode->cache_present),
+-					GFP_KERNEL);
++					map->alloc_flags);
+ 	if (!rbnode->cache_present)
+ 		goto err_free_block;
+ 
+diff --git a/drivers/base/test/test_async_driver_probe.c b/drivers/base/test/test_async_driver_probe.c
+index 929410d0dd6fe..3465800baa6c8 100644
+--- a/drivers/base/test/test_async_driver_probe.c
++++ b/drivers/base/test/test_async_driver_probe.c
+@@ -84,7 +84,7 @@ test_platform_device_register_node(char *name, int id, int nid)
+ 
+ 	pdev = platform_device_alloc(name, id);
+ 	if (!pdev)
+-		return NULL;
++		return ERR_PTR(-ENOMEM);
+ 
+ 	if (nid != NUMA_NO_NODE)
+ 		set_dev_node(&pdev->dev, nid);
+diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
+index 79ab532aabafb..6bc86106c7b2a 100644
+--- a/drivers/block/drbd/drbd_main.c
++++ b/drivers/block/drbd/drbd_main.c
+@@ -1557,7 +1557,7 @@ static int _drbd_send_page(struct drbd_peer_device *peer_device, struct page *pa
+ 	do {
+ 		int sent;
+ 
+-		bvec_set_page(&bvec, page, offset, len);
++		bvec_set_page(&bvec, page, len, offset);
+ 		iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, len);
+ 
+ 		sent = sock_sendmsg(socket, &msg);
+diff --git a/drivers/bluetooth/btintel.c b/drivers/bluetooth/btintel.c
+index d9349ba48281e..7ba60151a16a6 100644
+--- a/drivers/bluetooth/btintel.c
++++ b/drivers/bluetooth/btintel.c
+@@ -2658,6 +2658,9 @@ static int btintel_setup_combined(struct hci_dev *hdev)
+ 			set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
+ 				&hdev->quirks);
+ 
++			/* These variants don't seem to support LE Coded PHY */
++			set_bit(HCI_QUIRK_BROKEN_LE_CODED, &hdev->quirks);
++
+ 			/* Setup MSFT Extension support */
+ 			btintel_set_msft_opcode(hdev, ver.hw_variant);
+ 
+@@ -2729,6 +2732,9 @@ static int btintel_setup_combined(struct hci_dev *hdev)
+ 		 */
+ 		set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks);
+ 
++		/* These variants don't seem to support LE Coded PHY */
++		set_bit(HCI_QUIRK_BROKEN_LE_CODED, &hdev->quirks);
++
+ 		/* Set Valid LE States quirk */
+ 		set_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks);
+ 
+diff --git a/drivers/bluetooth/btrtl.c b/drivers/bluetooth/btrtl.c
+index d978e7cea8731..c06a04080cd75 100644
+--- a/drivers/bluetooth/btrtl.c
++++ b/drivers/bluetooth/btrtl.c
+@@ -101,21 +101,21 @@ static const struct id_table ic_id_table[] = {
+ 	{ IC_INFO(RTL_ROM_LMP_8723A, 0xb, 0x6, HCI_USB),
+ 	  .config_needed = false,
+ 	  .has_rom_version = false,
+-	  .fw_name = "rtl_bt/rtl8723a_fw.bin",
++	  .fw_name = "rtl_bt/rtl8723a_fw",
+ 	  .cfg_name = NULL },
+ 
+ 	/* 8723BS */
+ 	{ IC_INFO(RTL_ROM_LMP_8723B, 0xb, 0x6, HCI_UART),
+ 	  .config_needed = true,
+ 	  .has_rom_version = true,
+-	  .fw_name  = "rtl_bt/rtl8723bs_fw.bin",
++	  .fw_name  = "rtl_bt/rtl8723bs_fw",
+ 	  .cfg_name = "rtl_bt/rtl8723bs_config" },
+ 
+ 	/* 8723B */
+ 	{ IC_INFO(RTL_ROM_LMP_8723B, 0xb, 0x6, HCI_USB),
+ 	  .config_needed = false,
+ 	  .has_rom_version = true,
+-	  .fw_name  = "rtl_bt/rtl8723b_fw.bin",
++	  .fw_name  = "rtl_bt/rtl8723b_fw",
+ 	  .cfg_name = "rtl_bt/rtl8723b_config" },
+ 
+ 	/* 8723CS-CG */
+@@ -126,7 +126,7 @@ static const struct id_table ic_id_table[] = {
+ 	  .hci_bus = HCI_UART,
+ 	  .config_needed = true,
+ 	  .has_rom_version = true,
+-	  .fw_name  = "rtl_bt/rtl8723cs_cg_fw.bin",
++	  .fw_name  = "rtl_bt/rtl8723cs_cg_fw",
+ 	  .cfg_name = "rtl_bt/rtl8723cs_cg_config" },
+ 
+ 	/* 8723CS-VF */
+@@ -137,7 +137,7 @@ static const struct id_table ic_id_table[] = {
+ 	  .hci_bus = HCI_UART,
+ 	  .config_needed = true,
+ 	  .has_rom_version = true,
+-	  .fw_name  = "rtl_bt/rtl8723cs_vf_fw.bin",
++	  .fw_name  = "rtl_bt/rtl8723cs_vf_fw",
+ 	  .cfg_name = "rtl_bt/rtl8723cs_vf_config" },
+ 
+ 	/* 8723CS-XX */
+@@ -148,28 +148,28 @@ static const struct id_table ic_id_table[] = {
+ 	  .hci_bus = HCI_UART,
+ 	  .config_needed = true,
+ 	  .has_rom_version = true,
+-	  .fw_name  = "rtl_bt/rtl8723cs_xx_fw.bin",
++	  .fw_name  = "rtl_bt/rtl8723cs_xx_fw",
+ 	  .cfg_name = "rtl_bt/rtl8723cs_xx_config" },
+ 
+ 	/* 8723D */
+ 	{ IC_INFO(RTL_ROM_LMP_8723B, 0xd, 0x8, HCI_USB),
+ 	  .config_needed = true,
+ 	  .has_rom_version = true,
+-	  .fw_name  = "rtl_bt/rtl8723d_fw.bin",
++	  .fw_name  = "rtl_bt/rtl8723d_fw",
+ 	  .cfg_name = "rtl_bt/rtl8723d_config" },
+ 
+ 	/* 8723DS */
+ 	{ IC_INFO(RTL_ROM_LMP_8723B, 0xd, 0x8, HCI_UART),
+ 	  .config_needed = true,
+ 	  .has_rom_version = true,
+-	  .fw_name  = "rtl_bt/rtl8723ds_fw.bin",
++	  .fw_name  = "rtl_bt/rtl8723ds_fw",
+ 	  .cfg_name = "rtl_bt/rtl8723ds_config" },
+ 
+ 	/* 8821A */
+ 	{ IC_INFO(RTL_ROM_LMP_8821A, 0xa, 0x6, HCI_USB),
+ 	  .config_needed = false,
+ 	  .has_rom_version = true,
+-	  .fw_name  = "rtl_bt/rtl8821a_fw.bin",
++	  .fw_name  = "rtl_bt/rtl8821a_fw",
+ 	  .cfg_name = "rtl_bt/rtl8821a_config" },
+ 
+ 	/* 8821C */
+@@ -177,7 +177,7 @@ static const struct id_table ic_id_table[] = {
+ 	  .config_needed = false,
+ 	  .has_rom_version = true,
+ 	  .has_msft_ext = true,
+-	  .fw_name  = "rtl_bt/rtl8821c_fw.bin",
++	  .fw_name  = "rtl_bt/rtl8821c_fw",
+ 	  .cfg_name = "rtl_bt/rtl8821c_config" },
+ 
+ 	/* 8821CS */
+@@ -185,14 +185,14 @@ static const struct id_table ic_id_table[] = {
+ 	  .config_needed = true,
+ 	  .has_rom_version = true,
+ 	  .has_msft_ext = true,
+-	  .fw_name  = "rtl_bt/rtl8821cs_fw.bin",
++	  .fw_name  = "rtl_bt/rtl8821cs_fw",
+ 	  .cfg_name = "rtl_bt/rtl8821cs_config" },
+ 
+ 	/* 8761A */
+ 	{ IC_INFO(RTL_ROM_LMP_8761A, 0xa, 0x6, HCI_USB),
+ 	  .config_needed = false,
+ 	  .has_rom_version = true,
+-	  .fw_name  = "rtl_bt/rtl8761a_fw.bin",
++	  .fw_name  = "rtl_bt/rtl8761a_fw",
+ 	  .cfg_name = "rtl_bt/rtl8761a_config" },
+ 
+ 	/* 8761B */
+@@ -200,14 +200,14 @@ static const struct id_table ic_id_table[] = {
+ 	  .config_needed = false,
+ 	  .has_rom_version = true,
+ 	  .has_msft_ext = true,
+-	  .fw_name  = "rtl_bt/rtl8761b_fw.bin",
++	  .fw_name  = "rtl_bt/rtl8761b_fw",
+ 	  .cfg_name = "rtl_bt/rtl8761b_config" },
+ 
+ 	/* 8761BU */
+ 	{ IC_INFO(RTL_ROM_LMP_8761A, 0xb, 0xa, HCI_USB),
+ 	  .config_needed = false,
+ 	  .has_rom_version = true,
+-	  .fw_name  = "rtl_bt/rtl8761bu_fw.bin",
++	  .fw_name  = "rtl_bt/rtl8761bu_fw",
+ 	  .cfg_name = "rtl_bt/rtl8761bu_config" },
+ 
+ 	/* 8822C with UART interface */
+@@ -215,7 +215,7 @@ static const struct id_table ic_id_table[] = {
+ 	  .config_needed = true,
+ 	  .has_rom_version = true,
+ 	  .has_msft_ext = true,
+-	  .fw_name  = "rtl_bt/rtl8822cs_fw.bin",
++	  .fw_name  = "rtl_bt/rtl8822cs_fw",
+ 	  .cfg_name = "rtl_bt/rtl8822cs_config" },
+ 
+ 	/* 8822C with UART interface */
+@@ -223,7 +223,7 @@ static const struct id_table ic_id_table[] = {
+ 	  .config_needed = true,
+ 	  .has_rom_version = true,
+ 	  .has_msft_ext = true,
+-	  .fw_name  = "rtl_bt/rtl8822cs_fw.bin",
++	  .fw_name  = "rtl_bt/rtl8822cs_fw",
+ 	  .cfg_name = "rtl_bt/rtl8822cs_config" },
+ 
+ 	/* 8822C with USB interface */
+@@ -231,7 +231,7 @@ static const struct id_table ic_id_table[] = {
+ 	  .config_needed = false,
+ 	  .has_rom_version = true,
+ 	  .has_msft_ext = true,
+-	  .fw_name  = "rtl_bt/rtl8822cu_fw.bin",
++	  .fw_name  = "rtl_bt/rtl8822cu_fw",
+ 	  .cfg_name = "rtl_bt/rtl8822cu_config" },
+ 
+ 	/* 8822B */
+@@ -239,7 +239,7 @@ static const struct id_table ic_id_table[] = {
+ 	  .config_needed = true,
+ 	  .has_rom_version = true,
+ 	  .has_msft_ext = true,
+-	  .fw_name  = "rtl_bt/rtl8822b_fw.bin",
++	  .fw_name  = "rtl_bt/rtl8822b_fw",
+ 	  .cfg_name = "rtl_bt/rtl8822b_config" },
+ 
+ 	/* 8852A */
+@@ -247,7 +247,7 @@ static const struct id_table ic_id_table[] = {
+ 	  .config_needed = false,
+ 	  .has_rom_version = true,
+ 	  .has_msft_ext = true,
+-	  .fw_name  = "rtl_bt/rtl8852au_fw.bin",
++	  .fw_name  = "rtl_bt/rtl8852au_fw",
+ 	  .cfg_name = "rtl_bt/rtl8852au_config" },
+ 
+ 	/* 8852B with UART interface */
+@@ -255,7 +255,7 @@ static const struct id_table ic_id_table[] = {
+ 	  .config_needed = true,
+ 	  .has_rom_version = true,
+ 	  .has_msft_ext = true,
+-	  .fw_name  = "rtl_bt/rtl8852bs_fw.bin",
++	  .fw_name  = "rtl_bt/rtl8852bs_fw",
+ 	  .cfg_name = "rtl_bt/rtl8852bs_config" },
+ 
+ 	/* 8852B */
+@@ -263,7 +263,7 @@ static const struct id_table ic_id_table[] = {
+ 	  .config_needed = false,
+ 	  .has_rom_version = true,
+ 	  .has_msft_ext = true,
+-	  .fw_name  = "rtl_bt/rtl8852bu_fw.bin",
++	  .fw_name  = "rtl_bt/rtl8852bu_fw",
+ 	  .cfg_name = "rtl_bt/rtl8852bu_config" },
+ 
+ 	/* 8852C */
+@@ -271,7 +271,7 @@ static const struct id_table ic_id_table[] = {
+ 	  .config_needed = false,
+ 	  .has_rom_version = true,
+ 	  .has_msft_ext = true,
+-	  .fw_name  = "rtl_bt/rtl8852cu_fw.bin",
++	  .fw_name  = "rtl_bt/rtl8852cu_fw",
+ 	  .cfg_name = "rtl_bt/rtl8852cu_config" },
+ 
+ 	/* 8851B */
+@@ -279,7 +279,7 @@ static const struct id_table ic_id_table[] = {
+ 	  .config_needed = false,
+ 	  .has_rom_version = true,
+ 	  .has_msft_ext = false,
+-	  .fw_name  = "rtl_bt/rtl8851bu_fw.bin",
++	  .fw_name  = "rtl_bt/rtl8851bu_fw",
+ 	  .cfg_name = "rtl_bt/rtl8851bu_config" },
+ 	};
+ 
+@@ -967,6 +967,7 @@ struct btrtl_device_info *btrtl_initialize(struct hci_dev *hdev,
+ 	struct btrtl_device_info *btrtl_dev;
+ 	struct sk_buff *skb;
+ 	struct hci_rp_read_local_version *resp;
++	char fw_name[40];
+ 	char cfg_name[40];
+ 	u16 hci_rev, lmp_subver;
+ 	u8 hci_ver, lmp_ver, chip_type = 0;
+@@ -1079,8 +1080,26 @@ next:
+ 			goto err_free;
+ 	}
+ 
+-	btrtl_dev->fw_len = rtl_load_file(hdev, btrtl_dev->ic_info->fw_name,
+-					  &btrtl_dev->fw_data);
++	if (!btrtl_dev->ic_info->fw_name) {
++		ret = -ENOMEM;
++		goto err_free;
++	}
++
++	btrtl_dev->fw_len = -EIO;
++	if (lmp_subver == RTL_ROM_LMP_8852A && hci_rev == 0x000c) {
++		snprintf(fw_name, sizeof(fw_name), "%s_v2.bin",
++				btrtl_dev->ic_info->fw_name);
++		btrtl_dev->fw_len = rtl_load_file(hdev, fw_name,
++				&btrtl_dev->fw_data);
++	}
++
++	if (btrtl_dev->fw_len < 0) {
++		snprintf(fw_name, sizeof(fw_name), "%s.bin",
++				btrtl_dev->ic_info->fw_name);
++		btrtl_dev->fw_len = rtl_load_file(hdev, fw_name,
++				&btrtl_dev->fw_data);
++	}
++
+ 	if (btrtl_dev->fw_len < 0) {
+ 		rtl_dev_err(hdev, "firmware file %s not found",
+ 			    btrtl_dev->ic_info->fw_name);
+@@ -1180,6 +1199,10 @@ void btrtl_set_quirks(struct hci_dev *hdev, struct btrtl_device_info *btrtl_dev)
+ 		if (btrtl_dev->project_id == CHIP_ID_8852C)
+ 			btrealtek_set_flag(hdev, REALTEK_ALT6_CONTINUOUS_TX_CHIP);
+ 
++		if (btrtl_dev->project_id == CHIP_ID_8852A ||
++		    btrtl_dev->project_id == CHIP_ID_8852C)
++			set_bit(HCI_QUIRK_USE_MSFT_EXT_ADDRESS_FILTER, &hdev->quirks);
++
+ 		hci_set_aosp_capable(hdev);
+ 		break;
+ 	default:
+@@ -1398,4 +1421,5 @@ MODULE_FIRMWARE("rtl_bt/rtl8852bs_config.bin");
+ MODULE_FIRMWARE("rtl_bt/rtl8852bu_fw.bin");
+ MODULE_FIRMWARE("rtl_bt/rtl8852bu_config.bin");
+ MODULE_FIRMWARE("rtl_bt/rtl8852cu_fw.bin");
++MODULE_FIRMWARE("rtl_bt/rtl8852cu_fw_v2.bin");
+ MODULE_FIRMWARE("rtl_bt/rtl8852cu_config.bin");
+diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
+index 764d176e97351..e685acc5cacd9 100644
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -2079,7 +2079,7 @@ static int btusb_switch_alt_setting(struct hci_dev *hdev, int new_alts)
+ 		 * alternate setting.
+ 		 */
+ 		spin_lock_irqsave(&data->rxlock, flags);
+-		kfree_skb(data->sco_skb);
++		dev_kfree_skb_irq(data->sco_skb);
+ 		data->sco_skb = NULL;
+ 		spin_unlock_irqrestore(&data->rxlock, flags);
+ 
+diff --git a/drivers/bluetooth/hci_nokia.c b/drivers/bluetooth/hci_nokia.c
+index 05f7f6de6863d..97da0b2bfd17e 100644
+--- a/drivers/bluetooth/hci_nokia.c
++++ b/drivers/bluetooth/hci_nokia.c
+@@ -734,7 +734,11 @@ static int nokia_bluetooth_serdev_probe(struct serdev_device *serdev)
+ 		return err;
+ 	}
+ 
+-	clk_prepare_enable(sysclk);
++	err = clk_prepare_enable(sysclk);
++	if (err) {
++		dev_err(dev, "could not enable sysclk: %d", err);
++		return err;
++	}
+ 	btdev->sysclk_speed = clk_get_rate(sysclk);
+ 	clk_disable_unprepare(sysclk);
+ 
+diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c
+index 4cb23b9e06ea4..c95fa4335fee2 100644
+--- a/drivers/bus/ti-sysc.c
++++ b/drivers/bus/ti-sysc.c
+@@ -3106,7 +3106,7 @@ static int sysc_init_static_data(struct sysc *ddata)
+ 
+ 	match = soc_device_match(sysc_soc_match);
+ 	if (match && match->data)
+-		sysc_soc->soc = (int)match->data;
++		sysc_soc->soc = (enum sysc_soc)(uintptr_t)match->data;
+ 
+ 	/*
+ 	 * Check and warn about possible old incomplete dtb. We now want to see
+diff --git a/drivers/char/hw_random/iproc-rng200.c b/drivers/char/hw_random/iproc-rng200.c
+index 06bc060534d81..c0df053cbe4b2 100644
+--- a/drivers/char/hw_random/iproc-rng200.c
++++ b/drivers/char/hw_random/iproc-rng200.c
+@@ -182,6 +182,8 @@ static int iproc_rng200_probe(struct platform_device *pdev)
+ 		return PTR_ERR(priv->base);
+ 	}
+ 
++	dev_set_drvdata(dev, priv);
++
+ 	priv->rng.name = "iproc-rng200";
+ 	priv->rng.read = iproc_rng200_read;
+ 	priv->rng.init = iproc_rng200_init;
+@@ -199,6 +201,28 @@ static int iproc_rng200_probe(struct platform_device *pdev)
+ 	return 0;
+ }
+ 
++static int __maybe_unused iproc_rng200_suspend(struct device *dev)
++{
++	struct iproc_rng200_dev *priv = dev_get_drvdata(dev);
++
++	iproc_rng200_cleanup(&priv->rng);
++
++	return 0;
++}
++
++static int __maybe_unused iproc_rng200_resume(struct device *dev)
++{
++	struct iproc_rng200_dev *priv =  dev_get_drvdata(dev);
++
++	iproc_rng200_init(&priv->rng);
++
++	return 0;
++}
++
++static const struct dev_pm_ops iproc_rng200_pm_ops = {
++	SET_SYSTEM_SLEEP_PM_OPS(iproc_rng200_suspend, iproc_rng200_resume)
++};
++
+ static const struct of_device_id iproc_rng200_of_match[] = {
+ 	{ .compatible = "brcm,bcm2711-rng200", },
+ 	{ .compatible = "brcm,bcm7211-rng200", },
+@@ -212,6 +236,7 @@ static struct platform_driver iproc_rng200_driver = {
+ 	.driver = {
+ 		.name		= "iproc-rng200",
+ 		.of_match_table = iproc_rng200_of_match,
++		.pm		= &iproc_rng200_pm_ops,
+ 	},
+ 	.probe		= iproc_rng200_probe,
+ };
+diff --git a/drivers/char/hw_random/nomadik-rng.c b/drivers/char/hw_random/nomadik-rng.c
+index e8f9621e79541..3774adf903a83 100644
+--- a/drivers/char/hw_random/nomadik-rng.c
++++ b/drivers/char/hw_random/nomadik-rng.c
+@@ -13,8 +13,6 @@
+ #include <linux/clk.h>
+ #include <linux/err.h>
+ 
+-static struct clk *rng_clk;
+-
+ static int nmk_rng_read(struct hwrng *rng, void *data, size_t max, bool wait)
+ {
+ 	void __iomem *base = (void __iomem *)rng->priv;
+@@ -36,21 +34,20 @@ static struct hwrng nmk_rng = {
+ 
+ static int nmk_rng_probe(struct amba_device *dev, const struct amba_id *id)
+ {
++	struct clk *rng_clk;
+ 	void __iomem *base;
+ 	int ret;
+ 
+-	rng_clk = devm_clk_get(&dev->dev, NULL);
++	rng_clk = devm_clk_get_enabled(&dev->dev, NULL);
+ 	if (IS_ERR(rng_clk)) {
+ 		dev_err(&dev->dev, "could not get rng clock\n");
+ 		ret = PTR_ERR(rng_clk);
+ 		return ret;
+ 	}
+ 
+-	clk_prepare_enable(rng_clk);
+-
+ 	ret = amba_request_regions(dev, dev->dev.init_name);
+ 	if (ret)
+-		goto out_clk;
++		return ret;
+ 	ret = -ENOMEM;
+ 	base = devm_ioremap(&dev->dev, dev->res.start,
+ 			    resource_size(&dev->res));
+@@ -64,15 +61,12 @@ static int nmk_rng_probe(struct amba_device *dev, const struct amba_id *id)
+ 
+ out_release:
+ 	amba_release_regions(dev);
+-out_clk:
+-	clk_disable_unprepare(rng_clk);
+ 	return ret;
+ }
+ 
+ static void nmk_rng_remove(struct amba_device *dev)
+ {
+ 	amba_release_regions(dev);
+-	clk_disable_unprepare(rng_clk);
+ }
+ 
+ static const struct amba_id nmk_rng_ids[] = {
+diff --git a/drivers/char/hw_random/pic32-rng.c b/drivers/char/hw_random/pic32-rng.c
+index 99c8bd0859a14..e04a054e89307 100644
+--- a/drivers/char/hw_random/pic32-rng.c
++++ b/drivers/char/hw_random/pic32-rng.c
+@@ -36,7 +36,6 @@
+ struct pic32_rng {
+ 	void __iomem	*base;
+ 	struct hwrng	rng;
+-	struct clk	*clk;
+ };
+ 
+ /*
+@@ -70,6 +69,7 @@ static int pic32_rng_read(struct hwrng *rng, void *buf, size_t max,
+ static int pic32_rng_probe(struct platform_device *pdev)
+ {
+ 	struct pic32_rng *priv;
++	struct clk *clk;
+ 	u32 v;
+ 	int ret;
+ 
+@@ -81,13 +81,9 @@ static int pic32_rng_probe(struct platform_device *pdev)
+ 	if (IS_ERR(priv->base))
+ 		return PTR_ERR(priv->base);
+ 
+-	priv->clk = devm_clk_get(&pdev->dev, NULL);
+-	if (IS_ERR(priv->clk))
+-		return PTR_ERR(priv->clk);
+-
+-	ret = clk_prepare_enable(priv->clk);
+-	if (ret)
+-		return ret;
++	clk = devm_clk_get_enabled(&pdev->dev, NULL);
++	if (IS_ERR(clk))
++		return PTR_ERR(clk);
+ 
+ 	/* enable TRNG in enhanced mode */
+ 	v = TRNGEN | TRNGMOD;
+@@ -98,15 +94,11 @@ static int pic32_rng_probe(struct platform_device *pdev)
+ 
+ 	ret = devm_hwrng_register(&pdev->dev, &priv->rng);
+ 	if (ret)
+-		goto err_register;
++		return ret;
+ 
+ 	platform_set_drvdata(pdev, priv);
+ 
+ 	return 0;
+-
+-err_register:
+-	clk_disable_unprepare(priv->clk);
+-	return ret;
+ }
+ 
+ static int pic32_rng_remove(struct platform_device *pdev)
+@@ -114,7 +106,6 @@ static int pic32_rng_remove(struct platform_device *pdev)
+ 	struct pic32_rng *rng = platform_get_drvdata(pdev);
+ 
+ 	writel(0, rng->base + RNGCON);
+-	clk_disable_unprepare(rng->clk);
+ 	return 0;
+ }
+ 
+diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
+index abddd7e43a9a6..5cd031f3fc970 100644
+--- a/drivers/char/ipmi/ipmi_si_intf.c
++++ b/drivers/char/ipmi/ipmi_si_intf.c
+@@ -2082,6 +2082,11 @@ static int try_smi_init(struct smi_info *new_smi)
+ 		new_smi->io.io_cleanup = NULL;
+ 	}
+ 
++	if (rv && new_smi->si_sm) {
++		kfree(new_smi->si_sm);
++		new_smi->si_sm = NULL;
++	}
++
+ 	return rv;
+ }
+ 
+diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
+index 3b921c78ba083..faf1f2ad584bf 100644
+--- a/drivers/char/ipmi/ipmi_ssif.c
++++ b/drivers/char/ipmi/ipmi_ssif.c
+@@ -1400,7 +1400,7 @@ static struct ssif_addr_info *ssif_info_find(unsigned short addr,
+ restart:
+ 	list_for_each_entry(info, &ssif_infos, link) {
+ 		if (info->binfo.addr == addr) {
+-			if (info->addr_src == SI_SMBIOS)
++			if (info->addr_src == SI_SMBIOS && !info->adapter_name)
+ 				info->adapter_name = kstrdup(adapter_name,
+ 							     GFP_KERNEL);
+ 
+@@ -1600,6 +1600,11 @@ static int ssif_add_infos(struct i2c_client *client)
+ 	info->addr_src = SI_ACPI;
+ 	info->client = client;
+ 	info->adapter_name = kstrdup(client->adapter->name, GFP_KERNEL);
++	if (!info->adapter_name) {
++		kfree(info);
++		return -ENOMEM;
++	}
++
+ 	info->binfo.addr = client->addr;
+ 	list_add_tail(&info->link, &ssif_infos);
+ 	return 0;
+diff --git a/drivers/char/tpm/tpm_crb.c b/drivers/char/tpm/tpm_crb.c
+index 9eb1a18590123..a5dbebb1acfcf 100644
+--- a/drivers/char/tpm/tpm_crb.c
++++ b/drivers/char/tpm/tpm_crb.c
+@@ -463,28 +463,6 @@ static bool crb_req_canceled(struct tpm_chip *chip, u8 status)
+ 	return (cancel & CRB_CANCEL_INVOKE) == CRB_CANCEL_INVOKE;
+ }
+ 
+-static int crb_check_flags(struct tpm_chip *chip)
+-{
+-	u32 val;
+-	int ret;
+-
+-	ret = crb_request_locality(chip, 0);
+-	if (ret)
+-		return ret;
+-
+-	ret = tpm2_get_tpm_pt(chip, TPM2_PT_MANUFACTURER, &val, NULL);
+-	if (ret)
+-		goto release;
+-
+-	if (val == 0x414D4400U /* AMD */)
+-		chip->flags |= TPM_CHIP_FLAG_HWRNG_DISABLED;
+-
+-release:
+-	crb_relinquish_locality(chip, 0);
+-
+-	return ret;
+-}
+-
+ static const struct tpm_class_ops tpm_crb = {
+ 	.flags = TPM_OPS_AUTO_STARTUP,
+ 	.status = crb_status,
+@@ -826,9 +804,14 @@ static int crb_acpi_add(struct acpi_device *device)
+ 	if (rc)
+ 		goto out;
+ 
+-	rc = crb_check_flags(chip);
+-	if (rc)
+-		goto out;
++#ifdef CONFIG_X86
++	/* A quirk for https://www.amd.com/en/support/kb/faq/pa-410 */
++	if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
++	    priv->sm != ACPI_TPM2_COMMAND_BUFFER_WITH_PLUTON) {
++		dev_info(dev, "Disabling hwrng\n");
++		chip->flags |= TPM_CHIP_FLAG_HWRNG_DISABLED;
++	}
++#endif /* CONFIG_X86 */
+ 
+ 	rc = tpm_chip_register(chip);
+ 
+diff --git a/drivers/clk/imx/clk-composite-8m.c b/drivers/clk/imx/clk-composite-8m.c
+index 7a6e3ce97133b..27a08c50ac1d8 100644
+--- a/drivers/clk/imx/clk-composite-8m.c
++++ b/drivers/clk/imx/clk-composite-8m.c
+@@ -97,7 +97,7 @@ static int imx8m_clk_composite_divider_set_rate(struct clk_hw *hw,
+ 	int prediv_value;
+ 	int div_value;
+ 	int ret;
+-	u32 val;
++	u32 orig, val;
+ 
+ 	ret = imx8m_clk_composite_compute_dividers(rate, parent_rate,
+ 						&prediv_value, &div_value);
+@@ -106,13 +106,15 @@ static int imx8m_clk_composite_divider_set_rate(struct clk_hw *hw,
+ 
+ 	spin_lock_irqsave(divider->lock, flags);
+ 
+-	val = readl(divider->reg);
+-	val &= ~((clk_div_mask(divider->width) << divider->shift) |
+-			(clk_div_mask(PCG_DIV_WIDTH) << PCG_DIV_SHIFT));
++	orig = readl(divider->reg);
++	val = orig & ~((clk_div_mask(divider->width) << divider->shift) |
++		       (clk_div_mask(PCG_DIV_WIDTH) << PCG_DIV_SHIFT));
+ 
+ 	val |= (u32)(prediv_value  - 1) << divider->shift;
+ 	val |= (u32)(div_value - 1) << PCG_DIV_SHIFT;
+-	writel(val, divider->reg);
++
++	if (val != orig)
++		writel(val, divider->reg);
+ 
+ 	spin_unlock_irqrestore(divider->lock, flags);
+ 
+diff --git a/drivers/clk/imx/clk-imx8mp.c b/drivers/clk/imx/clk-imx8mp.c
+index 1469249386dd8..670aa2bab3017 100644
+--- a/drivers/clk/imx/clk-imx8mp.c
++++ b/drivers/clk/imx/clk-imx8mp.c
+@@ -178,10 +178,6 @@ static const char * const imx8mp_sai3_sels[] = {"osc_24m", "audio_pll1_out", "au
+ 						"video_pll1_out", "sys_pll1_133m", "osc_hdmi",
+ 						"clk_ext3", "clk_ext4", };
+ 
+-static const char * const imx8mp_sai4_sels[] = {"osc_24m", "audio_pll1_out", "audio_pll2_out",
+-						"video_pll1_out", "sys_pll1_133m", "osc_hdmi",
+-						"clk_ext1", "clk_ext2", };
+-
+ static const char * const imx8mp_sai5_sels[] = {"osc_24m", "audio_pll1_out", "audio_pll2_out",
+ 						"video_pll1_out", "sys_pll1_133m", "osc_hdmi",
+ 						"clk_ext2", "clk_ext3", };
+@@ -567,7 +563,6 @@ static int imx8mp_clocks_probe(struct platform_device *pdev)
+ 	hws[IMX8MP_CLK_SAI1] = imx8m_clk_hw_composite("sai1", imx8mp_sai1_sels, ccm_base + 0xa580);
+ 	hws[IMX8MP_CLK_SAI2] = imx8m_clk_hw_composite("sai2", imx8mp_sai2_sels, ccm_base + 0xa600);
+ 	hws[IMX8MP_CLK_SAI3] = imx8m_clk_hw_composite("sai3", imx8mp_sai3_sels, ccm_base + 0xa680);
+-	hws[IMX8MP_CLK_SAI4] = imx8m_clk_hw_composite("sai4", imx8mp_sai4_sels, ccm_base + 0xa700);
+ 	hws[IMX8MP_CLK_SAI5] = imx8m_clk_hw_composite("sai5", imx8mp_sai5_sels, ccm_base + 0xa780);
+ 	hws[IMX8MP_CLK_SAI6] = imx8m_clk_hw_composite("sai6", imx8mp_sai6_sels, ccm_base + 0xa800);
+ 	hws[IMX8MP_CLK_ENET_QOS] = imx8m_clk_hw_composite("enet_qos", imx8mp_enet_qos_sels, ccm_base + 0xa880);
+diff --git a/drivers/clk/imx/clk-imx8ulp.c b/drivers/clk/imx/clk-imx8ulp.c
+index e308c88cb801c..1b04e2fc78ad5 100644
+--- a/drivers/clk/imx/clk-imx8ulp.c
++++ b/drivers/clk/imx/clk-imx8ulp.c
+@@ -167,7 +167,7 @@ static int imx8ulp_clk_cgc1_init(struct platform_device *pdev)
+ 	clks[IMX8ULP_CLK_SPLL2_PRE_SEL]	= imx_clk_hw_mux_flags("spll2_pre_sel", base + 0x510, 0, 1, pll_pre_sels, ARRAY_SIZE(pll_pre_sels), CLK_SET_PARENT_GATE);
+ 	clks[IMX8ULP_CLK_SPLL3_PRE_SEL]	= imx_clk_hw_mux_flags("spll3_pre_sel", base + 0x610, 0, 1, pll_pre_sels, ARRAY_SIZE(pll_pre_sels), CLK_SET_PARENT_GATE);
+ 
+-	clks[IMX8ULP_CLK_SPLL2] = imx_clk_hw_pllv4(IMX_PLLV4_IMX8ULP, "spll2", "spll2_pre_sel", base + 0x500);
++	clks[IMX8ULP_CLK_SPLL2] = imx_clk_hw_pllv4(IMX_PLLV4_IMX8ULP_1GHZ, "spll2", "spll2_pre_sel", base + 0x500);
+ 	clks[IMX8ULP_CLK_SPLL3] = imx_clk_hw_pllv4(IMX_PLLV4_IMX8ULP, "spll3", "spll3_pre_sel", base + 0x600);
+ 	clks[IMX8ULP_CLK_SPLL3_VCODIV] = imx_clk_hw_divider("spll3_vcodiv", "spll3", base + 0x604, 0, 6);
+ 
+diff --git a/drivers/clk/imx/clk-pllv4.c b/drivers/clk/imx/clk-pllv4.c
+index 6e7e34571fc8d..9b136c951762c 100644
+--- a/drivers/clk/imx/clk-pllv4.c
++++ b/drivers/clk/imx/clk-pllv4.c
+@@ -44,11 +44,15 @@ struct clk_pllv4 {
+ 	u32		cfg_offset;
+ 	u32		num_offset;
+ 	u32		denom_offset;
++	bool		use_mult_range;
+ };
+ 
+ /* Valid PLL MULT Table */
+ static const int pllv4_mult_table[] = {33, 27, 22, 20, 17, 16};
+ 
++/* Valid PLL MULT range, (max, min) */
++static const int pllv4_mult_range[] = {54, 27};
++
+ #define to_clk_pllv4(__hw) container_of(__hw, struct clk_pllv4, hw)
+ 
+ #define LOCK_TIMEOUT_US		USEC_PER_MSEC
+@@ -94,17 +98,30 @@ static unsigned long clk_pllv4_recalc_rate(struct clk_hw *hw,
+ static long clk_pllv4_round_rate(struct clk_hw *hw, unsigned long rate,
+ 				 unsigned long *prate)
+ {
++	struct clk_pllv4 *pll = to_clk_pllv4(hw);
+ 	unsigned long parent_rate = *prate;
+ 	unsigned long round_rate, i;
+ 	u32 mfn, mfd = DEFAULT_MFD;
+ 	bool found = false;
+ 	u64 temp64;
+-
+-	for (i = 0; i < ARRAY_SIZE(pllv4_mult_table); i++) {
+-		round_rate = parent_rate * pllv4_mult_table[i];
+-		if (rate >= round_rate) {
++	u32 mult;
++
++	if (pll->use_mult_range) {
++		temp64 = (u64)rate;
++		do_div(temp64, parent_rate);
++		mult = temp64;
++		if (mult >= pllv4_mult_range[1] &&
++		    mult <= pllv4_mult_range[0]) {
++			round_rate = parent_rate * mult;
+ 			found = true;
+-			break;
++		}
++	} else {
++		for (i = 0; i < ARRAY_SIZE(pllv4_mult_table); i++) {
++			round_rate = parent_rate * pllv4_mult_table[i];
++			if (rate >= round_rate) {
++				found = true;
++				break;
++			}
+ 		}
+ 	}
+ 
+@@ -138,14 +155,20 @@ static long clk_pllv4_round_rate(struct clk_hw *hw, unsigned long rate,
+ 	return round_rate + (u32)temp64;
+ }
+ 
+-static bool clk_pllv4_is_valid_mult(unsigned int mult)
++static bool clk_pllv4_is_valid_mult(struct clk_pllv4 *pll, unsigned int mult)
+ {
+ 	int i;
+ 
+ 	/* check if mult is in valid MULT table */
+-	for (i = 0; i < ARRAY_SIZE(pllv4_mult_table); i++) {
+-		if (pllv4_mult_table[i] == mult)
++	if (pll->use_mult_range) {
++		if (mult >= pllv4_mult_range[1] &&
++		    mult <= pllv4_mult_range[0])
+ 			return true;
++	} else {
++		for (i = 0; i < ARRAY_SIZE(pllv4_mult_table); i++) {
++			if (pllv4_mult_table[i] == mult)
++				return true;
++		}
+ 	}
+ 
+ 	return false;
+@@ -160,7 +183,7 @@ static int clk_pllv4_set_rate(struct clk_hw *hw, unsigned long rate,
+ 
+ 	mult = rate / parent_rate;
+ 
+-	if (!clk_pllv4_is_valid_mult(mult))
++	if (!clk_pllv4_is_valid_mult(pll, mult))
+ 		return -EINVAL;
+ 
+ 	if (parent_rate <= MAX_MFD)
+@@ -227,10 +250,13 @@ struct clk_hw *imx_clk_hw_pllv4(enum imx_pllv4_type type, const char *name,
+ 
+ 	pll->base = base;
+ 
+-	if (type == IMX_PLLV4_IMX8ULP) {
++	if (type == IMX_PLLV4_IMX8ULP ||
++	    type == IMX_PLLV4_IMX8ULP_1GHZ) {
+ 		pll->cfg_offset = IMX8ULP_PLL_CFG_OFFSET;
+ 		pll->num_offset = IMX8ULP_PLL_NUM_OFFSET;
+ 		pll->denom_offset = IMX8ULP_PLL_DENOM_OFFSET;
++		if (type == IMX_PLLV4_IMX8ULP_1GHZ)
++			pll->use_mult_range = true;
+ 	} else {
+ 		pll->cfg_offset = PLL_CFG_OFFSET;
+ 		pll->num_offset = PLL_NUM_OFFSET;
+diff --git a/drivers/clk/imx/clk.h b/drivers/clk/imx/clk.h
+index af19d9f6aed09..adb7ad649a0d2 100644
+--- a/drivers/clk/imx/clk.h
++++ b/drivers/clk/imx/clk.h
+@@ -45,6 +45,7 @@ enum imx_pll14xx_type {
+ enum imx_pllv4_type {
+ 	IMX_PLLV4_IMX7ULP,
+ 	IMX_PLLV4_IMX8ULP,
++	IMX_PLLV4_IMX8ULP_1GHZ,
+ };
+ 
+ enum imx_pfdv2_type {
+diff --git a/drivers/clk/keystone/pll.c b/drivers/clk/keystone/pll.c
+index d59a7621bb204..ee5c72369334f 100644
+--- a/drivers/clk/keystone/pll.c
++++ b/drivers/clk/keystone/pll.c
+@@ -209,7 +209,7 @@ static void __init _of_pll_clk_init(struct device_node *node, bool pllctrl)
+ 	}
+ 
+ 	clk = clk_register_pll(NULL, node->name, parent_name, pll_data);
+-	if (clk) {
++	if (!IS_ERR_OR_NULL(clk)) {
+ 		of_clk_add_provider(node, of_clk_src_simple_get, clk);
+ 		return;
+ 	}
+diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig
+index 263e55d75e3f5..92ef5314b59ce 100644
+--- a/drivers/clk/qcom/Kconfig
++++ b/drivers/clk/qcom/Kconfig
+@@ -987,6 +987,7 @@ config SM_GPUCC_8350
+ 
+ config SM_GPUCC_8450
+ 	tristate "SM8450 Graphics Clock Controller"
++	depends on ARM64 || COMPILE_TEST
+ 	select SM_GCC_8450
+ 	help
+ 	  Support for the graphics clock controller on SM8450 devices.
+@@ -995,6 +996,7 @@ config SM_GPUCC_8450
+ 
+ config SM_GPUCC_8550
+ 	tristate "SM8550 Graphics Clock Controller"
++	depends on ARM64 || COMPILE_TEST
+ 	select SM_GCC_8550
+ 	help
+ 	  Support for the graphics clock controller on SM8550 devices.
+@@ -1031,6 +1033,7 @@ config SM_VIDEOCC_8250
+ 
+ config SM_VIDEOCC_8350
+ 	tristate "SM8350 Video Clock Controller"
++	depends on ARM64 || COMPILE_TEST
+ 	select SM_GCC_8350
+ 	select QCOM_GDSC
+ 	help
+@@ -1040,6 +1043,7 @@ config SM_VIDEOCC_8350
+ 
+ config SM_VIDEOCC_8550
+ 	tristate "SM8550 Video Clock Controller"
++	depends on ARM64 || COMPILE_TEST
+ 	select SM_GCC_8550
+ 	select QCOM_GDSC
+ 	help
+@@ -1088,6 +1092,7 @@ config CLK_GFM_LPASS_SM8250
+ 
+ config SM_VIDEOCC_8450
+ 	tristate "SM8450 Video Clock Controller"
++	depends on ARM64 || COMPILE_TEST
+ 	select SM_GCC_8450
+ 	select QCOM_GDSC
+ 	help
+diff --git a/drivers/clk/qcom/dispcc-sc8280xp.c b/drivers/clk/qcom/dispcc-sc8280xp.c
+index 167470beb3691..30f636b9f0ec8 100644
+--- a/drivers/clk/qcom/dispcc-sc8280xp.c
++++ b/drivers/clk/qcom/dispcc-sc8280xp.c
+@@ -3057,7 +3057,7 @@ static struct gdsc disp0_mdss_gdsc = {
+ 		.name = "disp0_mdss_gdsc",
+ 	},
+ 	.pwrsts = PWRSTS_OFF_ON,
+-	.flags = HW_CTRL,
++	.flags = HW_CTRL | RETAIN_FF_ENABLE,
+ };
+ 
+ static struct gdsc disp1_mdss_gdsc = {
+@@ -3069,7 +3069,7 @@ static struct gdsc disp1_mdss_gdsc = {
+ 		.name = "disp1_mdss_gdsc",
+ 	},
+ 	.pwrsts = PWRSTS_OFF_ON,
+-	.flags = HW_CTRL,
++	.flags = HW_CTRL | RETAIN_FF_ENABLE,
+ };
+ 
+ static struct gdsc disp0_mdss_int2_gdsc = {
+@@ -3081,7 +3081,7 @@ static struct gdsc disp0_mdss_int2_gdsc = {
+ 		.name = "disp0_mdss_int2_gdsc",
+ 	},
+ 	.pwrsts = PWRSTS_OFF_ON,
+-	.flags = HW_CTRL,
++	.flags = HW_CTRL | RETAIN_FF_ENABLE,
+ };
+ 
+ static struct gdsc disp1_mdss_int2_gdsc = {
+@@ -3093,7 +3093,7 @@ static struct gdsc disp1_mdss_int2_gdsc = {
+ 		.name = "disp1_mdss_int2_gdsc",
+ 	},
+ 	.pwrsts = PWRSTS_OFF_ON,
+-	.flags = HW_CTRL,
++	.flags = HW_CTRL | RETAIN_FF_ENABLE,
+ };
+ 
+ static struct gdsc *disp0_cc_sc8280xp_gdscs[] = {
+diff --git a/drivers/clk/qcom/gcc-qdu1000.c b/drivers/clk/qcom/gcc-qdu1000.c
+index 5051769ad90c7..8df7b79839680 100644
+--- a/drivers/clk/qcom/gcc-qdu1000.c
++++ b/drivers/clk/qcom/gcc-qdu1000.c
+@@ -1,6 +1,6 @@
+ // SPDX-License-Identifier: GPL-2.0-only
+ /*
+- * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
++ * Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved.
+  */
+ 
+ #include <linux/clk-provider.h>
+@@ -370,16 +370,6 @@ static const struct clk_parent_data gcc_parent_data_6[] = {
+ 	{ .index = DT_TCXO_IDX },
+ };
+ 
+-static const struct parent_map gcc_parent_map_7[] = {
+-	{ P_PCIE_0_PIPE_CLK, 0 },
+-	{ P_BI_TCXO, 2 },
+-};
+-
+-static const struct clk_parent_data gcc_parent_data_7[] = {
+-	{ .index = DT_PCIE_0_PIPE_CLK_IDX },
+-	{ .index = DT_TCXO_IDX },
+-};
+-
+ static const struct parent_map gcc_parent_map_8[] = {
+ 	{ P_BI_TCXO, 0 },
+ 	{ P_GCC_GPLL0_OUT_MAIN, 1 },
+@@ -439,16 +429,15 @@ static struct clk_regmap_mux gcc_pcie_0_phy_aux_clk_src = {
+ 	},
+ };
+ 
+-static struct clk_regmap_mux gcc_pcie_0_pipe_clk_src = {
++static struct clk_regmap_phy_mux gcc_pcie_0_pipe_clk_src = {
+ 	.reg = 0x9d064,
+-	.shift = 0,
+-	.width = 2,
+-	.parent_map = gcc_parent_map_7,
+ 	.clkr = {
+ 		.hw.init = &(const struct clk_init_data) {
+ 			.name = "gcc_pcie_0_pipe_clk_src",
+-			.parent_data = gcc_parent_data_7,
+-			.num_parents = ARRAY_SIZE(gcc_parent_data_7),
++			.parent_data = &(const struct clk_parent_data){
++				.index = DT_PCIE_0_PIPE_CLK_IDX,
++			},
++			.num_parents = 1,
+ 			.ops = &clk_regmap_phy_mux_ops,
+ 		},
+ 	},
+@@ -1458,14 +1447,13 @@ static struct clk_branch gcc_pcie_0_cfg_ahb_clk = {
+ 
+ static struct clk_branch gcc_pcie_0_clkref_en = {
+ 	.halt_reg = 0x9c004,
+-	.halt_bit = 31,
+-	.halt_check = BRANCH_HALT_ENABLE,
++	.halt_check = BRANCH_HALT,
+ 	.clkr = {
+ 		.enable_reg = 0x9c004,
+ 		.enable_mask = BIT(0),
+ 		.hw.init = &(const struct clk_init_data) {
+ 			.name = "gcc_pcie_0_clkref_en",
+-			.ops = &clk_branch_ops,
++			.ops = &clk_branch2_ops,
+ 		},
+ 	},
+ };
+@@ -2285,14 +2273,13 @@ static struct clk_branch gcc_tsc_etu_clk = {
+ 
+ static struct clk_branch gcc_usb2_clkref_en = {
+ 	.halt_reg = 0x9c008,
+-	.halt_bit = 31,
+-	.halt_check = BRANCH_HALT_ENABLE,
++	.halt_check = BRANCH_HALT,
+ 	.clkr = {
+ 		.enable_reg = 0x9c008,
+ 		.enable_mask = BIT(0),
+ 		.hw.init = &(const struct clk_init_data) {
+ 			.name = "gcc_usb2_clkref_en",
+-			.ops = &clk_branch_ops,
++			.ops = &clk_branch2_ops,
+ 		},
+ 	},
+ };
+diff --git a/drivers/clk/qcom/gcc-sc7180.c b/drivers/clk/qcom/gcc-sc7180.c
+index cef3c77564cfd..49f36e1df4fa8 100644
+--- a/drivers/clk/qcom/gcc-sc7180.c
++++ b/drivers/clk/qcom/gcc-sc7180.c
+@@ -651,6 +651,7 @@ static struct clk_rcg2 gcc_sdcc2_apps_clk_src = {
+ 		.name = "gcc_sdcc2_apps_clk_src",
+ 		.parent_data = gcc_parent_data_5,
+ 		.num_parents = ARRAY_SIZE(gcc_parent_data_5),
++		.flags = CLK_OPS_PARENT_ENABLE,
+ 		.ops = &clk_rcg2_floor_ops,
+ 	},
+ };
+diff --git a/drivers/clk/qcom/gcc-sc8280xp.c b/drivers/clk/qcom/gcc-sc8280xp.c
+index b90c71637814b..4d1133406ae05 100644
+--- a/drivers/clk/qcom/gcc-sc8280xp.c
++++ b/drivers/clk/qcom/gcc-sc8280xp.c
+@@ -6761,7 +6761,7 @@ static struct gdsc pcie_0_tunnel_gdsc = {
+ 		.name = "pcie_0_tunnel_gdsc",
+ 	},
+ 	.pwrsts = PWRSTS_OFF_ON,
+-	.flags = VOTABLE,
++	.flags = VOTABLE | RETAIN_FF_ENABLE,
+ };
+ 
+ static struct gdsc pcie_1_tunnel_gdsc = {
+@@ -6772,7 +6772,7 @@ static struct gdsc pcie_1_tunnel_gdsc = {
+ 		.name = "pcie_1_tunnel_gdsc",
+ 	},
+ 	.pwrsts = PWRSTS_OFF_ON,
+-	.flags = VOTABLE,
++	.flags = VOTABLE | RETAIN_FF_ENABLE,
+ };
+ 
+ /*
+@@ -6787,7 +6787,7 @@ static struct gdsc pcie_2a_gdsc = {
+ 		.name = "pcie_2a_gdsc",
+ 	},
+ 	.pwrsts = PWRSTS_OFF_ON,
+-	.flags = VOTABLE | ALWAYS_ON,
++	.flags = VOTABLE | RETAIN_FF_ENABLE | ALWAYS_ON,
+ };
+ 
+ static struct gdsc pcie_2b_gdsc = {
+@@ -6798,7 +6798,7 @@ static struct gdsc pcie_2b_gdsc = {
+ 		.name = "pcie_2b_gdsc",
+ 	},
+ 	.pwrsts = PWRSTS_OFF_ON,
+-	.flags = VOTABLE | ALWAYS_ON,
++	.flags = VOTABLE | RETAIN_FF_ENABLE | ALWAYS_ON,
+ };
+ 
+ static struct gdsc pcie_3a_gdsc = {
+@@ -6809,7 +6809,7 @@ static struct gdsc pcie_3a_gdsc = {
+ 		.name = "pcie_3a_gdsc",
+ 	},
+ 	.pwrsts = PWRSTS_OFF_ON,
+-	.flags = VOTABLE | ALWAYS_ON,
++	.flags = VOTABLE | RETAIN_FF_ENABLE | ALWAYS_ON,
+ };
+ 
+ static struct gdsc pcie_3b_gdsc = {
+@@ -6820,7 +6820,7 @@ static struct gdsc pcie_3b_gdsc = {
+ 		.name = "pcie_3b_gdsc",
+ 	},
+ 	.pwrsts = PWRSTS_OFF_ON,
+-	.flags = VOTABLE | ALWAYS_ON,
++	.flags = VOTABLE | RETAIN_FF_ENABLE | ALWAYS_ON,
+ };
+ 
+ static struct gdsc pcie_4_gdsc = {
+@@ -6831,7 +6831,7 @@ static struct gdsc pcie_4_gdsc = {
+ 		.name = "pcie_4_gdsc",
+ 	},
+ 	.pwrsts = PWRSTS_OFF_ON,
+-	.flags = VOTABLE | ALWAYS_ON,
++	.flags = VOTABLE | RETAIN_FF_ENABLE | ALWAYS_ON,
+ };
+ 
+ static struct gdsc ufs_card_gdsc = {
+@@ -6840,6 +6840,7 @@ static struct gdsc ufs_card_gdsc = {
+ 		.name = "ufs_card_gdsc",
+ 	},
+ 	.pwrsts = PWRSTS_OFF_ON,
++	.flags = RETAIN_FF_ENABLE,
+ };
+ 
+ static struct gdsc ufs_phy_gdsc = {
+@@ -6848,6 +6849,7 @@ static struct gdsc ufs_phy_gdsc = {
+ 		.name = "ufs_phy_gdsc",
+ 	},
+ 	.pwrsts = PWRSTS_OFF_ON,
++	.flags = RETAIN_FF_ENABLE,
+ };
+ 
+ static struct gdsc usb30_mp_gdsc = {
+@@ -6856,6 +6858,7 @@ static struct gdsc usb30_mp_gdsc = {
+ 		.name = "usb30_mp_gdsc",
+ 	},
+ 	.pwrsts = PWRSTS_RET_ON,
++	.flags = RETAIN_FF_ENABLE,
+ };
+ 
+ static struct gdsc usb30_prim_gdsc = {
+@@ -6864,6 +6867,7 @@ static struct gdsc usb30_prim_gdsc = {
+ 		.name = "usb30_prim_gdsc",
+ 	},
+ 	.pwrsts = PWRSTS_RET_ON,
++	.flags = RETAIN_FF_ENABLE,
+ };
+ 
+ static struct gdsc usb30_sec_gdsc = {
+@@ -6872,6 +6876,7 @@ static struct gdsc usb30_sec_gdsc = {
+ 		.name = "usb30_sec_gdsc",
+ 	},
+ 	.pwrsts = PWRSTS_RET_ON,
++	.flags = RETAIN_FF_ENABLE,
+ };
+ 
+ static struct gdsc emac_0_gdsc = {
+@@ -6880,6 +6885,7 @@ static struct gdsc emac_0_gdsc = {
+ 		.name = "emac_0_gdsc",
+ 	},
+ 	.pwrsts = PWRSTS_OFF_ON,
++	.flags = RETAIN_FF_ENABLE,
+ };
+ 
+ static struct gdsc emac_1_gdsc = {
+@@ -6888,6 +6894,97 @@ static struct gdsc emac_1_gdsc = {
+ 		.name = "emac_1_gdsc",
+ 	},
+ 	.pwrsts = PWRSTS_OFF_ON,
++	.flags = RETAIN_FF_ENABLE,
++};
++
++static struct gdsc usb4_1_gdsc = {
++	.gdscr = 0xb8004,
++	.pd = {
++		.name = "usb4_1_gdsc",
++	},
++	.pwrsts = PWRSTS_OFF_ON,
++	.flags = RETAIN_FF_ENABLE,
++};
++
++static struct gdsc usb4_gdsc = {
++	.gdscr = 0x2a004,
++	.pd = {
++		.name = "usb4_gdsc",
++	},
++	.pwrsts = PWRSTS_OFF_ON,
++	.flags = RETAIN_FF_ENABLE,
++};
++
++static struct gdsc hlos1_vote_mmnoc_mmu_tbu_hf0_gdsc = {
++	.gdscr = 0x7d050,
++	.pd = {
++		.name = "hlos1_vote_mmnoc_mmu_tbu_hf0_gdsc",
++	},
++	.pwrsts = PWRSTS_OFF_ON,
++	.flags = VOTABLE,
++};
++
++static struct gdsc hlos1_vote_mmnoc_mmu_tbu_hf1_gdsc = {
++	.gdscr = 0x7d058,
++	.pd = {
++		.name = "hlos1_vote_mmnoc_mmu_tbu_hf1_gdsc",
++	},
++	.pwrsts = PWRSTS_OFF_ON,
++	.flags = VOTABLE,
++};
++
++static struct gdsc hlos1_vote_mmnoc_mmu_tbu_sf0_gdsc = {
++	.gdscr = 0x7d054,
++	.pd = {
++		.name = "hlos1_vote_mmnoc_mmu_tbu_sf0_gdsc",
++	},
++	.pwrsts = PWRSTS_OFF_ON,
++	.flags = VOTABLE,
++};
++
++static struct gdsc hlos1_vote_mmnoc_mmu_tbu_sf1_gdsc = {
++	.gdscr = 0x7d06c,
++	.pd = {
++		.name = "hlos1_vote_mmnoc_mmu_tbu_sf1_gdsc",
++	},
++	.pwrsts = PWRSTS_OFF_ON,
++	.flags = VOTABLE,
++};
++
++static struct gdsc hlos1_vote_turing_mmu_tbu0_gdsc = {
++	.gdscr = 0x7d05c,
++	.pd = {
++		.name = "hlos1_vote_turing_mmu_tbu0_gdsc",
++	},
++	.pwrsts = PWRSTS_OFF_ON,
++	.flags = VOTABLE,
++};
++
++static struct gdsc hlos1_vote_turing_mmu_tbu1_gdsc = {
++	.gdscr = 0x7d060,
++	.pd = {
++		.name = "hlos1_vote_turing_mmu_tbu1_gdsc",
++	},
++	.pwrsts = PWRSTS_OFF_ON,
++	.flags = VOTABLE,
++};
++
++static struct gdsc hlos1_vote_turing_mmu_tbu2_gdsc = {
++	.gdscr = 0x7d0a0,
++	.pd = {
++		.name = "hlos1_vote_turing_mmu_tbu2_gdsc",
++	},
++	.pwrsts = PWRSTS_OFF_ON,
++	.flags = VOTABLE,
++};
++
++static struct gdsc hlos1_vote_turing_mmu_tbu3_gdsc = {
++	.gdscr = 0x7d0a4,
++	.pd = {
++		.name = "hlos1_vote_turing_mmu_tbu3_gdsc",
++	},
++	.pwrsts = PWRSTS_OFF_ON,
++	.flags = VOTABLE,
+ };
+ 
+ static struct clk_regmap *gcc_sc8280xp_clocks[] = {
+@@ -7370,6 +7467,16 @@ static struct gdsc *gcc_sc8280xp_gdscs[] = {
+ 	[USB30_SEC_GDSC] = &usb30_sec_gdsc,
+ 	[EMAC_0_GDSC] = &emac_0_gdsc,
+ 	[EMAC_1_GDSC] = &emac_1_gdsc,
++	[USB4_1_GDSC] = &usb4_1_gdsc,
++	[USB4_GDSC] = &usb4_gdsc,
++	[HLOS1_VOTE_MMNOC_MMU_TBU_HF0_GDSC] = &hlos1_vote_mmnoc_mmu_tbu_hf0_gdsc,
++	[HLOS1_VOTE_MMNOC_MMU_TBU_HF1_GDSC] = &hlos1_vote_mmnoc_mmu_tbu_hf1_gdsc,
++	[HLOS1_VOTE_MMNOC_MMU_TBU_SF0_GDSC] = &hlos1_vote_mmnoc_mmu_tbu_sf0_gdsc,
++	[HLOS1_VOTE_MMNOC_MMU_TBU_SF1_GDSC] = &hlos1_vote_mmnoc_mmu_tbu_sf1_gdsc,
++	[HLOS1_VOTE_TURING_MMU_TBU0_GDSC] = &hlos1_vote_turing_mmu_tbu0_gdsc,
++	[HLOS1_VOTE_TURING_MMU_TBU1_GDSC] = &hlos1_vote_turing_mmu_tbu1_gdsc,
++	[HLOS1_VOTE_TURING_MMU_TBU2_GDSC] = &hlos1_vote_turing_mmu_tbu2_gdsc,
++	[HLOS1_VOTE_TURING_MMU_TBU3_GDSC] = &hlos1_vote_turing_mmu_tbu3_gdsc,
+ };
+ 
+ static const struct clk_rcg_dfs_data gcc_dfs_clocks[] = {
+@@ -7432,8 +7539,8 @@ static int gcc_sc8280xp_probe(struct platform_device *pdev)
+ 
+ 	regmap = qcom_cc_map(pdev, &gcc_sc8280xp_desc);
+ 	if (IS_ERR(regmap)) {
+-		pm_runtime_put(&pdev->dev);
+-		return PTR_ERR(regmap);
++		ret = PTR_ERR(regmap);
++		goto err_put_rpm;
+ 	}
+ 
+ 	/*
+@@ -7454,11 +7561,19 @@ static int gcc_sc8280xp_probe(struct platform_device *pdev)
+ 
+ 	ret = qcom_cc_register_rcg_dfs(regmap, gcc_dfs_clocks, ARRAY_SIZE(gcc_dfs_clocks));
+ 	if (ret)
+-		return ret;
++		goto err_put_rpm;
+ 
+ 	ret = qcom_cc_really_probe(pdev, &gcc_sc8280xp_desc, regmap);
++	if (ret)
++		goto err_put_rpm;
++
+ 	pm_runtime_put(&pdev->dev);
+ 
++	return 0;
++
++err_put_rpm:
++	pm_runtime_put_sync(&pdev->dev);
++
+ 	return ret;
+ }
+ 
+diff --git a/drivers/clk/qcom/gcc-sm6350.c b/drivers/clk/qcom/gcc-sm6350.c
+index 9b4e4bb059635..cf4a7b6e0b23a 100644
+--- a/drivers/clk/qcom/gcc-sm6350.c
++++ b/drivers/clk/qcom/gcc-sm6350.c
+@@ -641,6 +641,7 @@ static struct clk_rcg2 gcc_sdcc2_apps_clk_src = {
+ 		.name = "gcc_sdcc2_apps_clk_src",
+ 		.parent_data = gcc_parent_data_8,
+ 		.num_parents = ARRAY_SIZE(gcc_parent_data_8),
++		.flags = CLK_OPS_PARENT_ENABLE,
+ 		.ops = &clk_rcg2_floor_ops,
+ 	},
+ };
+diff --git a/drivers/clk/qcom/gcc-sm7150.c b/drivers/clk/qcom/gcc-sm7150.c
+index 6b628178f62c4..6da87f0436d0c 100644
+--- a/drivers/clk/qcom/gcc-sm7150.c
++++ b/drivers/clk/qcom/gcc-sm7150.c
+@@ -739,6 +739,7 @@ static struct clk_rcg2 gcc_sdcc2_apps_clk_src = {
+ 		.parent_data = gcc_parent_data_6,
+ 		.num_parents = ARRAY_SIZE(gcc_parent_data_6),
+ 		.ops = &clk_rcg2_floor_ops,
++		.flags = CLK_OPS_PARENT_ENABLE,
+ 	},
+ };
+ 
+diff --git a/drivers/clk/qcom/gcc-sm8250.c b/drivers/clk/qcom/gcc-sm8250.c
+index b6cf4bc88d4d4..d3c75bb55946a 100644
+--- a/drivers/clk/qcom/gcc-sm8250.c
++++ b/drivers/clk/qcom/gcc-sm8250.c
+@@ -721,6 +721,7 @@ static struct clk_rcg2 gcc_sdcc2_apps_clk_src = {
+ 		.name = "gcc_sdcc2_apps_clk_src",
+ 		.parent_data = gcc_parent_data_4,
+ 		.num_parents = ARRAY_SIZE(gcc_parent_data_4),
++		.flags = CLK_OPS_PARENT_ENABLE,
+ 		.ops = &clk_rcg2_floor_ops,
+ 	},
+ };
+diff --git a/drivers/clk/qcom/gcc-sm8450.c b/drivers/clk/qcom/gcc-sm8450.c
+index 75635d40a12d3..9f4f72553ecf2 100644
+--- a/drivers/clk/qcom/gcc-sm8450.c
++++ b/drivers/clk/qcom/gcc-sm8450.c
+@@ -935,7 +935,7 @@ static struct clk_rcg2 gcc_sdcc2_apps_clk_src = {
+ 		.parent_data = gcc_parent_data_7,
+ 		.num_parents = ARRAY_SIZE(gcc_parent_data_7),
+ 		.flags = CLK_SET_RATE_PARENT,
+-		.ops = &clk_rcg2_ops,
++		.ops = &clk_rcg2_floor_ops,
+ 	},
+ };
+ 
+@@ -958,7 +958,7 @@ static struct clk_rcg2 gcc_sdcc4_apps_clk_src = {
+ 		.parent_data = gcc_parent_data_0,
+ 		.num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ 		.flags = CLK_SET_RATE_PARENT,
+-		.ops = &clk_rcg2_ops,
++		.ops = &clk_rcg2_floor_ops,
+ 	},
+ };
+ 
+diff --git a/drivers/clk/qcom/gpucc-sm6350.c b/drivers/clk/qcom/gpucc-sm6350.c
+index ef15185a99c31..0bcbba2a29436 100644
+--- a/drivers/clk/qcom/gpucc-sm6350.c
++++ b/drivers/clk/qcom/gpucc-sm6350.c
+@@ -24,6 +24,12 @@
+ #define CX_GMU_CBCR_WAKE_MASK		0xF
+ #define CX_GMU_CBCR_WAKE_SHIFT		8
+ 
++enum {
++	DT_BI_TCXO,
++	DT_GPLL0_OUT_MAIN,
++	DT_GPLL0_OUT_MAIN_DIV,
++};
++
+ enum {
+ 	P_BI_TCXO,
+ 	P_GPLL0_OUT_MAIN,
+@@ -61,6 +67,7 @@ static struct clk_alpha_pll gpu_cc_pll0 = {
+ 		.hw.init = &(struct clk_init_data){
+ 			.name = "gpu_cc_pll0",
+ 			.parent_data =  &(const struct clk_parent_data){
++				.index = DT_BI_TCXO,
+ 				.fw_name = "bi_tcxo",
+ 			},
+ 			.num_parents = 1,
+@@ -104,6 +111,7 @@ static struct clk_alpha_pll gpu_cc_pll1 = {
+ 		.hw.init = &(struct clk_init_data){
+ 			.name = "gpu_cc_pll1",
+ 			.parent_data =  &(const struct clk_parent_data){
++				.index = DT_BI_TCXO,
+ 				.fw_name = "bi_tcxo",
+ 			},
+ 			.num_parents = 1,
+@@ -121,11 +129,11 @@ static const struct parent_map gpu_cc_parent_map_0[] = {
+ };
+ 
+ static const struct clk_parent_data gpu_cc_parent_data_0[] = {
+-	{ .fw_name = "bi_tcxo" },
++	{ .index = DT_BI_TCXO, .fw_name = "bi_tcxo" },
+ 	{ .hw = &gpu_cc_pll0.clkr.hw },
+ 	{ .hw = &gpu_cc_pll1.clkr.hw },
+-	{ .fw_name = "gcc_gpu_gpll0_clk" },
+-	{ .fw_name = "gcc_gpu_gpll0_div_clk" },
++	{ .index = DT_GPLL0_OUT_MAIN, .fw_name = "gcc_gpu_gpll0_clk_src" },
++	{ .index = DT_GPLL0_OUT_MAIN_DIV, .fw_name = "gcc_gpu_gpll0_div_clk_src" },
+ };
+ 
+ static const struct parent_map gpu_cc_parent_map_1[] = {
+@@ -138,12 +146,12 @@ static const struct parent_map gpu_cc_parent_map_1[] = {
+ };
+ 
+ static const struct clk_parent_data gpu_cc_parent_data_1[] = {
+-	{ .fw_name = "bi_tcxo" },
++	{ .index = DT_BI_TCXO, .fw_name = "bi_tcxo" },
+ 	{ .hw = &crc_div.hw },
+ 	{ .hw = &gpu_cc_pll0.clkr.hw },
+ 	{ .hw = &gpu_cc_pll1.clkr.hw },
+ 	{ .hw = &gpu_cc_pll1.clkr.hw },
+-	{ .fw_name = "gcc_gpu_gpll0_clk" },
++	{ .index = DT_GPLL0_OUT_MAIN, .fw_name = "gcc_gpu_gpll0_clk_src" },
+ };
+ 
+ static const struct freq_tbl ftbl_gpu_cc_gmu_clk_src[] = {
+diff --git a/drivers/clk/qcom/reset.c b/drivers/clk/qcom/reset.c
+index 0e914ec7aeae1..e45e32804d2c7 100644
+--- a/drivers/clk/qcom/reset.c
++++ b/drivers/clk/qcom/reset.c
+@@ -16,7 +16,8 @@ static int qcom_reset(struct reset_controller_dev *rcdev, unsigned long id)
+ 	struct qcom_reset_controller *rst = to_qcom_reset_controller(rcdev);
+ 
+ 	rcdev->ops->assert(rcdev, id);
+-	udelay(rst->reset_map[id].udelay ?: 1); /* use 1 us as default */
++	fsleep(rst->reset_map[id].udelay ?: 1); /* use 1 us as default */
++
+ 	rcdev->ops->deassert(rcdev, id);
+ 	return 0;
+ }
+diff --git a/drivers/clk/rockchip/clk-rk3568.c b/drivers/clk/rockchip/clk-rk3568.c
+index f85902e2590c7..2f54f630c8b65 100644
+--- a/drivers/clk/rockchip/clk-rk3568.c
++++ b/drivers/clk/rockchip/clk-rk3568.c
+@@ -81,7 +81,7 @@ static struct rockchip_pll_rate_table rk3568_pll_rates[] = {
+ 	RK3036_PLL_RATE(108000000, 2, 45, 5, 1, 1, 0),
+ 	RK3036_PLL_RATE(100000000, 1, 150, 6, 6, 1, 0),
+ 	RK3036_PLL_RATE(96000000, 1, 96, 6, 4, 1, 0),
+-	RK3036_PLL_RATE(78750000, 1, 96, 6, 4, 1, 0),
++	RK3036_PLL_RATE(78750000, 4, 315, 6, 4, 1, 0),
+ 	RK3036_PLL_RATE(74250000, 2, 99, 4, 4, 1, 0),
+ 	{ /* sentinel */ },
+ };
+diff --git a/drivers/clk/sunxi-ng/ccu_mmc_timing.c b/drivers/clk/sunxi-ng/ccu_mmc_timing.c
+index 23a8d44e2449b..78919d7843bec 100644
+--- a/drivers/clk/sunxi-ng/ccu_mmc_timing.c
++++ b/drivers/clk/sunxi-ng/ccu_mmc_timing.c
+@@ -43,7 +43,7 @@ int sunxi_ccu_set_mmc_timing_mode(struct clk *clk, bool new_mode)
+ EXPORT_SYMBOL_GPL(sunxi_ccu_set_mmc_timing_mode);
+ 
+ /**
+- * sunxi_ccu_set_mmc_timing_mode: Get the current MMC clock timing mode
++ * sunxi_ccu_get_mmc_timing_mode: Get the current MMC clock timing mode
+  * @clk: clock to query
+  *
+  * Return: %0 if the clock is in old timing mode, > %0 if it is in
+diff --git a/drivers/counter/Kconfig b/drivers/counter/Kconfig
+index 62962ae84b77d..497bc05dca4df 100644
+--- a/drivers/counter/Kconfig
++++ b/drivers/counter/Kconfig
+@@ -92,7 +92,7 @@ config MICROCHIP_TCB_CAPTURE
+ 
+ config RZ_MTU3_CNT
+ 	tristate "Renesas RZ/G2L MTU3a counter driver"
+-	depends on RZ_MTU3 || COMPILE_TEST
++	depends on RZ_MTU3
+ 	help
+ 	  Enable support for MTU3a counter driver found on Renesas RZ/G2L alike
+ 	  SoCs. This IP supports both 16-bit and 32-bit phase counting mode
+diff --git a/drivers/cpufreq/amd-pstate-ut.c b/drivers/cpufreq/amd-pstate-ut.c
+index 7f3fe20489818..502d494499ae8 100644
+--- a/drivers/cpufreq/amd-pstate-ut.c
++++ b/drivers/cpufreq/amd-pstate-ut.c
+@@ -64,27 +64,9 @@ static struct amd_pstate_ut_struct amd_pstate_ut_cases[] = {
+ static bool get_shared_mem(void)
+ {
+ 	bool result = false;
+-	char path[] = "/sys/module/amd_pstate/parameters/shared_mem";
+-	char buf[5] = {0};
+-	struct file *filp = NULL;
+-	loff_t pos = 0;
+-	ssize_t ret;
+-
+-	if (!boot_cpu_has(X86_FEATURE_CPPC)) {
+-		filp = filp_open(path, O_RDONLY, 0);
+-		if (IS_ERR(filp))
+-			pr_err("%s unable to open %s file!\n", __func__, path);
+-		else {
+-			ret = kernel_read(filp, &buf, sizeof(buf), &pos);
+-			if (ret < 0)
+-				pr_err("%s read %s file fail ret=%ld!\n",
+-					__func__, path, (long)ret);
+-			filp_close(filp, NULL);
+-		}
+ 
+-		if ('Y' == *buf)
+-			result = true;
+-	}
++	if (!boot_cpu_has(X86_FEATURE_CPPC))
++		result = true;
+ 
+ 	return result;
+ }
+@@ -158,7 +140,7 @@ static void amd_pstate_ut_check_perf(u32 index)
+ 			if (ret) {
+ 				amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL;
+ 				pr_err("%s cppc_get_perf_caps ret=%d error!\n", __func__, ret);
+-				return;
++				goto skip_test;
+ 			}
+ 
+ 			nominal_perf = cppc_perf.nominal_perf;
+@@ -169,7 +151,7 @@ static void amd_pstate_ut_check_perf(u32 index)
+ 			if (ret) {
+ 				amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL;
+ 				pr_err("%s read CPPC_CAP1 ret=%d error!\n", __func__, ret);
+-				return;
++				goto skip_test;
+ 			}
+ 
+ 			nominal_perf = AMD_CPPC_NOMINAL_PERF(cap1);
+@@ -187,7 +169,7 @@ static void amd_pstate_ut_check_perf(u32 index)
+ 				nominal_perf, cpudata->nominal_perf,
+ 				lowest_nonlinear_perf, cpudata->lowest_nonlinear_perf,
+ 				lowest_perf, cpudata->lowest_perf);
+-			return;
++			goto skip_test;
+ 		}
+ 
+ 		if (!((highest_perf >= nominal_perf) &&
+@@ -198,11 +180,15 @@ static void amd_pstate_ut_check_perf(u32 index)
+ 			pr_err("%s cpu%d highest=%d >= nominal=%d > lowest_nonlinear=%d > lowest=%d > 0, the formula is incorrect!\n",
+ 				__func__, cpu, highest_perf, nominal_perf,
+ 				lowest_nonlinear_perf, lowest_perf);
+-			return;
++			goto skip_test;
+ 		}
++		cpufreq_cpu_put(policy);
+ 	}
+ 
+ 	amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_PASS;
++	return;
++skip_test:
++	cpufreq_cpu_put(policy);
+ }
+ 
+ /*
+@@ -230,14 +216,14 @@ static void amd_pstate_ut_check_freq(u32 index)
+ 			pr_err("%s cpu%d max=%d >= nominal=%d > lowest_nonlinear=%d > min=%d > 0, the formula is incorrect!\n",
+ 				__func__, cpu, cpudata->max_freq, cpudata->nominal_freq,
+ 				cpudata->lowest_nonlinear_freq, cpudata->min_freq);
+-			return;
++			goto skip_test;
+ 		}
+ 
+ 		if (cpudata->min_freq != policy->min) {
+ 			amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL;
+ 			pr_err("%s cpu%d cpudata_min_freq=%d policy_min=%d, they should be equal!\n",
+ 				__func__, cpu, cpudata->min_freq, policy->min);
+-			return;
++			goto skip_test;
+ 		}
+ 
+ 		if (cpudata->boost_supported) {
+@@ -249,16 +235,20 @@ static void amd_pstate_ut_check_freq(u32 index)
+ 				pr_err("%s cpu%d policy_max=%d should be equal cpu_max=%d or cpu_nominal=%d !\n",
+ 					__func__, cpu, policy->max, cpudata->max_freq,
+ 					cpudata->nominal_freq);
+-				return;
++				goto skip_test;
+ 			}
+ 		} else {
+ 			amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL;
+ 			pr_err("%s cpu%d must support boost!\n", __func__, cpu);
+-			return;
++			goto skip_test;
+ 		}
++		cpufreq_cpu_put(policy);
+ 	}
+ 
+ 	amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_PASS;
++	return;
++skip_test:
++	cpufreq_cpu_put(policy);
+ }
+ 
+ static int __init amd_pstate_ut_init(void)
+diff --git a/drivers/cpufreq/brcmstb-avs-cpufreq.c b/drivers/cpufreq/brcmstb-avs-cpufreq.c
+index ffea6402189d3..3052949aebbc7 100644
+--- a/drivers/cpufreq/brcmstb-avs-cpufreq.c
++++ b/drivers/cpufreq/brcmstb-avs-cpufreq.c
+@@ -434,7 +434,11 @@ brcm_avs_get_freq_table(struct device *dev, struct private_data *priv)
+ 	if (ret)
+ 		return ERR_PTR(ret);
+ 
+-	table = devm_kcalloc(dev, AVS_PSTATE_MAX + 1, sizeof(*table),
++	/*
++	 * We allocate space for the 5 different P-STATES AVS,
++	 * plus extra space for a terminating element.
++	 */
++	table = devm_kcalloc(dev, AVS_PSTATE_MAX + 1 + 1, sizeof(*table),
+ 			     GFP_KERNEL);
+ 	if (!table)
+ 		return ERR_PTR(-ENOMEM);
+diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
+index 50bbc969ffe53..5c655d7b96d4f 100644
+--- a/drivers/cpufreq/cpufreq.c
++++ b/drivers/cpufreq/cpufreq.c
+@@ -455,8 +455,10 @@ void cpufreq_freq_transition_end(struct cpufreq_policy *policy,
+ 			    policy->cur,
+ 			    policy->cpuinfo.max_freq);
+ 
++	spin_lock(&policy->transition_lock);
+ 	policy->transition_ongoing = false;
+ 	policy->transition_task = NULL;
++	spin_unlock(&policy->transition_lock);
+ 
+ 	wake_up(&policy->transition_wait);
+ }
+diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
+index 8ca2bce4341a4..dc50c9fb488df 100644
+--- a/drivers/cpufreq/intel_pstate.c
++++ b/drivers/cpufreq/intel_pstate.c
+@@ -2609,6 +2609,11 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
+ 			intel_pstate_clear_update_util_hook(policy->cpu);
+ 		intel_pstate_hwp_set(policy->cpu);
+ 	}
++	/*
++	 * policy->cur is never updated with the intel_pstate driver, but it
++	 * is used as a stale frequency value. So, keep it within limits.
++	 */
++	policy->cur = policy->min;
+ 
+ 	mutex_unlock(&intel_pstate_limits_lock);
+ 
+diff --git a/drivers/cpufreq/powernow-k8.c b/drivers/cpufreq/powernow-k8.c
+index d289036beff23..b10f7a1b77f11 100644
+--- a/drivers/cpufreq/powernow-k8.c
++++ b/drivers/cpufreq/powernow-k8.c
+@@ -1101,7 +1101,8 @@ static int powernowk8_cpu_exit(struct cpufreq_policy *pol)
+ 
+ 	kfree(data->powernow_table);
+ 	kfree(data);
+-	for_each_cpu(cpu, pol->cpus)
++	/* pol->cpus will be empty here, use related_cpus instead. */
++	for_each_cpu(cpu, pol->related_cpus)
+ 		per_cpu(powernow_data, cpu) = NULL;
+ 
+ 	return 0;
+diff --git a/drivers/cpufreq/tegra194-cpufreq.c b/drivers/cpufreq/tegra194-cpufreq.c
+index 36dad5ea59475..75f1e611d0aab 100644
+--- a/drivers/cpufreq/tegra194-cpufreq.c
++++ b/drivers/cpufreq/tegra194-cpufreq.c
+@@ -508,6 +508,32 @@ static int tegra194_cpufreq_init(struct cpufreq_policy *policy)
+ 	return 0;
+ }
+ 
++static int tegra194_cpufreq_online(struct cpufreq_policy *policy)
++{
++	/* We did light-weight tear down earlier, nothing to do here */
++	return 0;
++}
++
++static int tegra194_cpufreq_offline(struct cpufreq_policy *policy)
++{
++	/*
++	 * Preserve policy->driver_data and don't free resources on light-weight
++	 * tear down.
++	 */
++
++	return 0;
++}
++
++static int tegra194_cpufreq_exit(struct cpufreq_policy *policy)
++{
++	struct device *cpu_dev = get_cpu_device(policy->cpu);
++
++	dev_pm_opp_remove_all_dynamic(cpu_dev);
++	dev_pm_opp_of_cpumask_remove_table(policy->related_cpus);
++
++	return 0;
++}
++
+ static int tegra194_cpufreq_set_target(struct cpufreq_policy *policy,
+ 				       unsigned int index)
+ {
+@@ -535,6 +561,9 @@ static struct cpufreq_driver tegra194_cpufreq_driver = {
+ 	.target_index = tegra194_cpufreq_set_target,
+ 	.get = tegra194_get_speed,
+ 	.init = tegra194_cpufreq_init,
++	.exit = tegra194_cpufreq_exit,
++	.online = tegra194_cpufreq_online,
++	.offline = tegra194_cpufreq_offline,
+ 	.attr = cpufreq_generic_attr,
+ };
+ 
+diff --git a/drivers/cpuidle/cpuidle-pseries.c b/drivers/cpuidle/cpuidle-pseries.c
+index a7d33f3ee01e7..14db9b7d985d1 100644
+--- a/drivers/cpuidle/cpuidle-pseries.c
++++ b/drivers/cpuidle/cpuidle-pseries.c
+@@ -414,13 +414,7 @@ static int __init pseries_idle_probe(void)
+ 		return -ENODEV;
+ 
+ 	if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
+-		/*
+-		 * Use local_paca instead of get_lppaca() since
+-		 * preemption is not disabled, and it is not required in
+-		 * fact, since lppaca_ptr does not need to be the value
+-		 * associated to the current CPU, it can be from any CPU.
+-		 */
+-		if (lppaca_shared_proc(local_paca->lppaca_ptr)) {
++		if (lppaca_shared_proc()) {
+ 			cpuidle_state_table = shared_states;
+ 			max_idle_state = ARRAY_SIZE(shared_states);
+ 		} else {
+diff --git a/drivers/cpuidle/governors/teo.c b/drivers/cpuidle/governors/teo.c
+index 987fc5f3997dc..2cdc711679a5f 100644
+--- a/drivers/cpuidle/governors/teo.c
++++ b/drivers/cpuidle/governors/teo.c
+@@ -397,13 +397,23 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
+ 	 * the shallowest non-polling state and exit.
+ 	 */
+ 	if (drv->state_count < 3 && cpu_data->utilized) {
+-		for (i = 0; i < drv->state_count; ++i) {
+-			if (!dev->states_usage[i].disable &&
+-			    !(drv->states[i].flags & CPUIDLE_FLAG_POLLING)) {
+-				idx = i;
+-				goto end;
+-			}
+-		}
++		/* The CPU is utilized, so assume a short idle duration. */
++		duration_ns = teo_middle_of_bin(0, drv);
++		/*
++		 * If state 0 is enabled and it is not a polling one, select it
++		 * right away unless the scheduler tick has been stopped, in
++		 * which case care needs to be taken to leave the CPU in a deep
++		 * enough state in case it is not woken up any time soon after
++		 * all.  If state 1 is disabled, though, state 0 must be used
++		 * anyway.
++		 */
++		if ((!idx && !(drv->states[0].flags & CPUIDLE_FLAG_POLLING) &&
++		    teo_time_ok(duration_ns)) || dev->states_usage[1].disable)
++			idx = 0;
++		else /* Assume that state 1 is not a polling one and use it. */
++			idx = 1;
++
++		goto end;
+ 	}
+ 
+ 	/*
+@@ -539,10 +549,20 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
+ 
+ 	/*
+ 	 * If the CPU is being utilized over the threshold, choose a shallower
+-	 * non-polling state to improve latency
++	 * non-polling state to improve latency, unless the scheduler tick has
++	 * been stopped already and the shallower state's target residency is
++	 * not sufficiently large.
+ 	 */
+-	if (cpu_data->utilized)
+-		idx = teo_find_shallower_state(drv, dev, idx, duration_ns, true);
++	if (cpu_data->utilized) {
++		s64 span_ns;
++
++		i = teo_find_shallower_state(drv, dev, idx, duration_ns, true);
++		span_ns = teo_middle_of_bin(i, drv);
++		if (teo_time_ok(span_ns)) {
++			idx = i;
++			duration_ns = span_ns;
++		}
++	}
+ 
+ end:
+ 	/*
+diff --git a/drivers/crypto/caam/caampkc.c b/drivers/crypto/caam/caampkc.c
+index 72afc249d42fb..7e08af751e4ea 100644
+--- a/drivers/crypto/caam/caampkc.c
++++ b/drivers/crypto/caam/caampkc.c
+@@ -225,7 +225,9 @@ static int caam_rsa_count_leading_zeros(struct scatterlist *sgl,
+ 		if (len && *buff)
+ 			break;
+ 
+-		sg_miter_next(&miter);
++		if (!sg_miter_next(&miter))
++			break;
++
+ 		buff = miter.addr;
+ 		len = miter.length;
+ 
+diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c
+index e543a9e24a06f..3eda91aa7c112 100644
+--- a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c
++++ b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c
+@@ -223,6 +223,8 @@ static u32 get_accel_cap(struct adf_accel_dev *accel_dev)
+ 			  ICP_ACCEL_CAPABILITIES_HKDF |
+ 			  ICP_ACCEL_CAPABILITIES_CHACHA_POLY |
+ 			  ICP_ACCEL_CAPABILITIES_AESGCM_SPC |
++			  ICP_ACCEL_CAPABILITIES_SM3 |
++			  ICP_ACCEL_CAPABILITIES_SM4 |
+ 			  ICP_ACCEL_CAPABILITIES_AES_V2;
+ 
+ 	/* A set bit in fusectl1 means the feature is OFF in this SKU */
+@@ -246,12 +248,19 @@ static u32 get_accel_cap(struct adf_accel_dev *accel_dev)
+ 		capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
+ 	}
+ 
++	if (fusectl1 & ICP_ACCEL_4XXX_MASK_SMX_SLICE) {
++		capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_SM3;
++		capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_SM4;
++	}
++
+ 	capabilities_asym = ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC |
+ 			  ICP_ACCEL_CAPABILITIES_CIPHER |
++			  ICP_ACCEL_CAPABILITIES_SM2 |
+ 			  ICP_ACCEL_CAPABILITIES_ECEDMONT;
+ 
+ 	if (fusectl1 & ICP_ACCEL_4XXX_MASK_PKE_SLICE) {
+ 		capabilities_asym &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC;
++		capabilities_asym &= ~ICP_ACCEL_CAPABILITIES_SM2;
+ 		capabilities_asym &= ~ICP_ACCEL_CAPABILITIES_ECEDMONT;
+ 	}
+ 
+diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_pm.h b/drivers/crypto/intel/qat/qat_common/adf_gen4_pm.h
+index dd112923e006d..c2768762cca3b 100644
+--- a/drivers/crypto/intel/qat/qat_common/adf_gen4_pm.h
++++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_pm.h
+@@ -35,7 +35,7 @@
+ #define ADF_GEN4_PM_MSG_PENDING			BIT(0)
+ #define ADF_GEN4_PM_MSG_PAYLOAD_BIT_MASK	GENMASK(28, 1)
+ 
+-#define ADF_GEN4_PM_DEFAULT_IDLE_FILTER		(0x0)
++#define ADF_GEN4_PM_DEFAULT_IDLE_FILTER		(0x6)
+ #define ADF_GEN4_PM_MAX_IDLE_FILTER		(0x7)
+ #define ADF_GEN4_PM_DEFAULT_IDLE_SUPPORT	(0x1)
+ 
+diff --git a/drivers/crypto/intel/qat/qat_common/icp_qat_hw.h b/drivers/crypto/intel/qat/qat_common/icp_qat_hw.h
+index a65059e56248a..0c8883e2ccc6d 100644
+--- a/drivers/crypto/intel/qat/qat_common/icp_qat_hw.h
++++ b/drivers/crypto/intel/qat/qat_common/icp_qat_hw.h
+@@ -97,7 +97,10 @@ enum icp_qat_capabilities_mask {
+ 	ICP_ACCEL_CAPABILITIES_SHA3_EXT = BIT(15),
+ 	ICP_ACCEL_CAPABILITIES_AESGCM_SPC = BIT(16),
+ 	ICP_ACCEL_CAPABILITIES_CHACHA_POLY = BIT(17),
+-	/* Bits 18-21 are currently reserved */
++	ICP_ACCEL_CAPABILITIES_SM2 = BIT(18),
++	ICP_ACCEL_CAPABILITIES_SM3 = BIT(19),
++	ICP_ACCEL_CAPABILITIES_SM4 = BIT(20),
++	/* Bit 21 is currently reserved */
+ 	ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY = BIT(22),
+ 	ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY64 = BIT(23),
+ 	ICP_ACCEL_CAPABILITIES_LZ4_COMPRESSION = BIT(24),
+diff --git a/drivers/crypto/stm32/stm32-hash.c b/drivers/crypto/stm32/stm32-hash.c
+index f0df32382719c..fabae6da627b9 100644
+--- a/drivers/crypto/stm32/stm32-hash.c
++++ b/drivers/crypto/stm32/stm32-hash.c
+@@ -492,7 +492,7 @@ static int stm32_hash_xmit_dma(struct stm32_hash_dev *hdev,
+ 
+ 	reg = stm32_hash_read(hdev, HASH_CR);
+ 
+-	if (!hdev->pdata->has_mdmat) {
++	if (hdev->pdata->has_mdmat) {
+ 		if (mdma)
+ 			reg |= HASH_CR_MDMAT;
+ 		else
+@@ -627,9 +627,9 @@ static int stm32_hash_dma_send(struct stm32_hash_dev *hdev)
+ 	}
+ 
+ 	for_each_sg(rctx->sg, tsg, rctx->nents, i) {
++		sg[0] = *tsg;
+ 		len = sg->length;
+ 
+-		sg[0] = *tsg;
+ 		if (sg_is_last(sg)) {
+ 			if (hdev->dma_mode == 1) {
+ 				len = (ALIGN(sg->length, 16) - 16);
+@@ -1705,9 +1705,7 @@ static int stm32_hash_remove(struct platform_device *pdev)
+ 	if (!hdev)
+ 		return -ENODEV;
+ 
+-	ret = pm_runtime_resume_and_get(hdev->dev);
+-	if (ret < 0)
+-		return ret;
++	ret = pm_runtime_get_sync(hdev->dev);
+ 
+ 	stm32_hash_unregister_algs(hdev);
+ 
+@@ -1723,7 +1721,8 @@ static int stm32_hash_remove(struct platform_device *pdev)
+ 	pm_runtime_disable(hdev->dev);
+ 	pm_runtime_put_noidle(hdev->dev);
+ 
+-	clk_disable_unprepare(hdev->clk);
++	if (ret >= 0)
++		clk_disable_unprepare(hdev->clk);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
+index e36cbb920ec88..9464f8d3cb5b4 100644
+--- a/drivers/devfreq/devfreq.c
++++ b/drivers/devfreq/devfreq.c
+@@ -763,6 +763,7 @@ static void devfreq_dev_release(struct device *dev)
+ 		dev_pm_opp_put_opp_table(devfreq->opp_table);
+ 
+ 	mutex_destroy(&devfreq->lock);
++	srcu_cleanup_notifier_head(&devfreq->transition_notifier_list);
+ 	kfree(devfreq);
+ }
+ 
+diff --git a/drivers/dma/idxd/sysfs.c b/drivers/dma/idxd/sysfs.c
+index 293739ac55969..a5c3eb4348325 100644
+--- a/drivers/dma/idxd/sysfs.c
++++ b/drivers/dma/idxd/sysfs.c
+@@ -1095,8 +1095,8 @@ static ssize_t wq_ats_disable_store(struct device *dev, struct device_attribute
+ 	if (wq->state != IDXD_WQ_DISABLED)
+ 		return -EPERM;
+ 
+-	if (!idxd->hw.wq_cap.wq_ats_support)
+-		return -EOPNOTSUPP;
++	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
++		return -EPERM;
+ 
+ 	rc = kstrtobool(buf, &ats_dis);
+ 	if (rc < 0)
+@@ -1131,8 +1131,8 @@ static ssize_t wq_prs_disable_store(struct device *dev, struct device_attribute
+ 	if (wq->state != IDXD_WQ_DISABLED)
+ 		return -EPERM;
+ 
+-	if (!idxd->hw.wq_cap.wq_prs_support)
+-		return -EOPNOTSUPP;
++	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
++		return -EPERM;
+ 
+ 	rc = kstrtobool(buf, &prs_dis);
+ 	if (rc < 0)
+@@ -1288,12 +1288,9 @@ static struct attribute *idxd_wq_attributes[] = {
+ 	NULL,
+ };
+ 
+-static bool idxd_wq_attr_op_config_invisible(struct attribute *attr,
+-					     struct idxd_device *idxd)
+-{
+-	return attr == &dev_attr_wq_op_config.attr &&
+-	       !idxd->hw.wq_cap.op_config;
+-}
++/*  A WQ attr is invisible if the feature is not supported in WQCAP. */
++#define idxd_wq_attr_invisible(name, cap_field, a, idxd)		\
++	((a) == &dev_attr_wq_##name.attr && !(idxd)->hw.wq_cap.cap_field)
+ 
+ static bool idxd_wq_attr_max_batch_size_invisible(struct attribute *attr,
+ 						  struct idxd_device *idxd)
+@@ -1303,13 +1300,6 @@ static bool idxd_wq_attr_max_batch_size_invisible(struct attribute *attr,
+ 	       idxd->data->type == IDXD_TYPE_IAX;
+ }
+ 
+-static bool idxd_wq_attr_wq_prs_disable_invisible(struct attribute *attr,
+-						  struct idxd_device *idxd)
+-{
+-	return attr == &dev_attr_wq_prs_disable.attr &&
+-	       !idxd->hw.wq_cap.wq_prs_support;
+-}
+-
+ static umode_t idxd_wq_attr_visible(struct kobject *kobj,
+ 				    struct attribute *attr, int n)
+ {
+@@ -1317,13 +1307,16 @@ static umode_t idxd_wq_attr_visible(struct kobject *kobj,
+ 	struct idxd_wq *wq = confdev_to_wq(dev);
+ 	struct idxd_device *idxd = wq->idxd;
+ 
+-	if (idxd_wq_attr_op_config_invisible(attr, idxd))
++	if (idxd_wq_attr_invisible(op_config, op_config, attr, idxd))
+ 		return 0;
+ 
+ 	if (idxd_wq_attr_max_batch_size_invisible(attr, idxd))
+ 		return 0;
+ 
+-	if (idxd_wq_attr_wq_prs_disable_invisible(attr, idxd))
++	if (idxd_wq_attr_invisible(prs_disable, wq_prs_support, attr, idxd))
++		return 0;
++
++	if (idxd_wq_attr_invisible(ats_disable, wq_ats_support, attr, idxd))
+ 		return 0;
+ 
+ 	return attr->mode;
+@@ -1480,7 +1473,7 @@ static ssize_t pasid_enabled_show(struct device *dev,
+ {
+ 	struct idxd_device *idxd = confdev_to_idxd(dev);
+ 
+-	return sysfs_emit(buf, "%u\n", device_pasid_enabled(idxd));
++	return sysfs_emit(buf, "%u\n", device_user_pasid_enabled(idxd));
+ }
+ static DEVICE_ATTR_RO(pasid_enabled);
+ 
+diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
+index 825001bde42c4..89e82508c1339 100644
+--- a/drivers/dma/ste_dma40.c
++++ b/drivers/dma/ste_dma40.c
+@@ -3590,6 +3590,10 @@ static int __init d40_probe(struct platform_device *pdev)
+ 	spin_lock_init(&base->lcla_pool.lock);
+ 
+ 	base->irq = platform_get_irq(pdev, 0);
++	if (base->irq < 0) {
++		ret = base->irq;
++		goto destroy_cache;
++	}
+ 
+ 	ret = request_irq(base->irq, d40_handle_interrupt, 0, D40_NAME, base);
+ 	if (ret) {
+diff --git a/drivers/edac/i10nm_base.c b/drivers/edac/i10nm_base.c
+index a897b6aff3686..349ff6cfb3796 100644
+--- a/drivers/edac/i10nm_base.c
++++ b/drivers/edac/i10nm_base.c
+@@ -658,13 +658,49 @@ static struct pci_dev *get_ddr_munit(struct skx_dev *d, int i, u32 *offset, unsi
+ 	return mdev;
+ }
+ 
++/**
++ * i10nm_imc_absent() - Check whether the memory controller @imc is absent
++ *
++ * @imc    : The pointer to the structure of memory controller EDAC device.
++ *
++ * RETURNS : true if the memory controller EDAC device is absent, false otherwise.
++ */
++static bool i10nm_imc_absent(struct skx_imc *imc)
++{
++	u32 mcmtr;
++	int i;
++
++	switch (res_cfg->type) {
++	case SPR:
++		for (i = 0; i < res_cfg->ddr_chan_num; i++) {
++			mcmtr = I10NM_GET_MCMTR(imc, i);
++			edac_dbg(1, "ch%d mcmtr reg %x\n", i, mcmtr);
++			if (mcmtr != ~0)
++				return false;
++		}
++
++		/*
++		 * Some workstations' absent memory controllers still
++		 * appear as PCIe devices, misleading the EDAC driver.
++		 * By observing that the MMIO registers of these absent
++		 * memory controllers consistently hold the value of ~0.
++		 *
++		 * We identify a memory controller as absent by checking
++		 * if its MMIO register "mcmtr" == ~0 in all its channels.
++		 */
++		return true;
++	default:
++		return false;
++	}
++}
++
+ static int i10nm_get_ddr_munits(void)
+ {
+ 	struct pci_dev *mdev;
+ 	void __iomem *mbase;
+ 	unsigned long size;
+ 	struct skx_dev *d;
+-	int i, j = 0;
++	int i, lmc, j = 0;
+ 	u32 reg, off;
+ 	u64 base;
+ 
+@@ -690,7 +726,7 @@ static int i10nm_get_ddr_munits(void)
+ 		edac_dbg(2, "socket%d mmio base 0x%llx (reg 0x%x)\n",
+ 			 j++, base, reg);
+ 
+-		for (i = 0; i < res_cfg->ddr_imc_num; i++) {
++		for (lmc = 0, i = 0; i < res_cfg->ddr_imc_num; i++) {
+ 			mdev = get_ddr_munit(d, i, &off, &size);
+ 
+ 			if (i == 0 && !mdev) {
+@@ -700,8 +736,6 @@ static int i10nm_get_ddr_munits(void)
+ 			if (!mdev)
+ 				continue;
+ 
+-			d->imc[i].mdev = mdev;
+-
+ 			edac_dbg(2, "mc%d mmio base 0x%llx size 0x%lx (reg 0x%x)\n",
+ 				 i, base + off, size, reg);
+ 
+@@ -712,7 +746,17 @@ static int i10nm_get_ddr_munits(void)
+ 				return -ENODEV;
+ 			}
+ 
+-			d->imc[i].mbase = mbase;
++			d->imc[lmc].mbase = mbase;
++			if (i10nm_imc_absent(&d->imc[lmc])) {
++				pci_dev_put(mdev);
++				iounmap(mbase);
++				d->imc[lmc].mbase = NULL;
++				edac_dbg(2, "Skip absent mc%d\n", i);
++				continue;
++			} else {
++				d->imc[lmc].mdev = mdev;
++				lmc++;
++			}
+ 		}
+ 	}
+ 
+diff --git a/drivers/edac/igen6_edac.c b/drivers/edac/igen6_edac.c
+index 544dd19072eab..1a18693294db4 100644
+--- a/drivers/edac/igen6_edac.c
++++ b/drivers/edac/igen6_edac.c
+@@ -27,7 +27,7 @@
+ #include "edac_mc.h"
+ #include "edac_module.h"
+ 
+-#define IGEN6_REVISION	"v2.5"
++#define IGEN6_REVISION	"v2.5.1"
+ 
+ #define EDAC_MOD_STR	"igen6_edac"
+ #define IGEN6_NMI_NAME	"igen6_ibecc"
+@@ -1216,9 +1216,6 @@ static int igen6_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 	INIT_WORK(&ecclog_work, ecclog_work_cb);
+ 	init_irq_work(&ecclog_irq_work, ecclog_irq_work_cb);
+ 
+-	/* Check if any pending errors before registering the NMI handler */
+-	ecclog_handler();
+-
+ 	rc = register_err_handler();
+ 	if (rc)
+ 		goto fail3;
+@@ -1230,6 +1227,9 @@ static int igen6_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 		goto fail4;
+ 	}
+ 
++	/* Check if any pending errors before/during the registration of the error handler */
++	ecclog_handler();
++
+ 	igen6_debug_setup();
+ 	return 0;
+ fail4:
+diff --git a/drivers/extcon/Kconfig b/drivers/extcon/Kconfig
+index 0ef1971d22bb0..8de9023c2a387 100644
+--- a/drivers/extcon/Kconfig
++++ b/drivers/extcon/Kconfig
+@@ -62,6 +62,7 @@ config EXTCON_INTEL_CHT_WC
+ 	tristate "Intel Cherrytrail Whiskey Cove PMIC extcon driver"
+ 	depends on INTEL_SOC_PMIC_CHTWC
+ 	depends on USB_SUPPORT
++	depends on POWER_SUPPLY
+ 	select USB_ROLE_SWITCH
+ 	help
+ 	  Say Y here to enable extcon support for charger detection / control
+diff --git a/drivers/firmware/arm_sdei.c b/drivers/firmware/arm_sdei.c
+index f9040bd610812..285fe7ad490d1 100644
+--- a/drivers/firmware/arm_sdei.c
++++ b/drivers/firmware/arm_sdei.c
+@@ -1095,3 +1095,22 @@ int sdei_event_handler(struct pt_regs *regs,
+ 	return err;
+ }
+ NOKPROBE_SYMBOL(sdei_event_handler);
++
++void sdei_handler_abort(void)
++{
++	/*
++	 * If the crash happened in an SDEI event handler then we need to
++	 * finish the handler with the firmware so that we can have working
++	 * interrupts in the crash kernel.
++	 */
++	if (__this_cpu_read(sdei_active_critical_event)) {
++	        pr_warn("still in SDEI critical event context, attempting to finish handler.\n");
++	        __sdei_handler_abort();
++	        __this_cpu_write(sdei_active_critical_event, NULL);
++	}
++	if (__this_cpu_read(sdei_active_normal_event)) {
++	        pr_warn("still in SDEI normal event context, attempting to finish handler.\n");
++	        __sdei_handler_abort();
++	        __this_cpu_write(sdei_active_normal_event, NULL);
++	}
++}
+diff --git a/drivers/firmware/cirrus/cs_dsp.c b/drivers/firmware/cirrus/cs_dsp.c
+index 6a9aa97373d37..49b70c70dc696 100644
+--- a/drivers/firmware/cirrus/cs_dsp.c
++++ b/drivers/firmware/cirrus/cs_dsp.c
+@@ -978,7 +978,8 @@ static int cs_dsp_create_control(struct cs_dsp *dsp,
+ 		    ctl->alg_region.alg == alg_region->alg &&
+ 		    ctl->alg_region.type == alg_region->type) {
+ 			if ((!subname && !ctl->subname) ||
+-			    (subname && !strncmp(ctl->subname, subname, ctl->subname_len))) {
++			    (subname && (ctl->subname_len == subname_len) &&
++			     !strncmp(ctl->subname, subname, ctl->subname_len))) {
+ 				if (!ctl->enabled)
+ 					ctl->enabled = 1;
+ 				return 0;
+diff --git a/drivers/firmware/efi/libstub/x86-stub.c b/drivers/firmware/efi/libstub/x86-stub.c
+index 220be75a5cdc1..146477da2b98c 100644
+--- a/drivers/firmware/efi/libstub/x86-stub.c
++++ b/drivers/firmware/efi/libstub/x86-stub.c
+@@ -72,7 +72,7 @@ preserve_pci_rom_image(efi_pci_io_protocol_t *pci, struct pci_setup_rom **__rom)
+ 	rom->data.type	= SETUP_PCI;
+ 	rom->data.len	= size - sizeof(struct setup_data);
+ 	rom->data.next	= 0;
+-	rom->pcilen	= pci->romsize;
++	rom->pcilen	= romsize;
+ 	*__rom = rom;
+ 
+ 	status = efi_call_proto(pci, pci.read, EfiPciIoWidthUint16,
+diff --git a/drivers/firmware/meson/meson_sm.c b/drivers/firmware/meson/meson_sm.c
+index 798bcdb05d84e..9a2656d73600b 100644
+--- a/drivers/firmware/meson/meson_sm.c
++++ b/drivers/firmware/meson/meson_sm.c
+@@ -292,6 +292,8 @@ static int __init meson_sm_probe(struct platform_device *pdev)
+ 		return -ENOMEM;
+ 
+ 	chip = of_match_device(meson_sm_ids, dev)->data;
++	if (!chip)
++		return -EINVAL;
+ 
+ 	if (chip->cmd_shmem_in_base) {
+ 		fw->sm_shmem_in_base = meson_sm_map_shmem(chip->cmd_shmem_in_base,
+diff --git a/drivers/firmware/ti_sci.c b/drivers/firmware/ti_sci.c
+index 039d92a595ec6..91aaa0ca9bde8 100644
+--- a/drivers/firmware/ti_sci.c
++++ b/drivers/firmware/ti_sci.c
+@@ -97,7 +97,6 @@ struct ti_sci_desc {
+  * @node:	list head
+  * @host_id:	Host ID
+  * @users:	Number of users of this instance
+- * @is_suspending: Flag set to indicate in suspend path.
+  */
+ struct ti_sci_info {
+ 	struct device *dev;
+@@ -116,7 +115,6 @@ struct ti_sci_info {
+ 	u8 host_id;
+ 	/* protected by ti_sci_list_mutex */
+ 	int users;
+-	bool is_suspending;
+ };
+ 
+ #define cl_to_ti_sci_info(c)	container_of(c, struct ti_sci_info, cl)
+@@ -418,14 +416,14 @@ static inline int ti_sci_do_xfer(struct ti_sci_info *info,
+ 
+ 	ret = 0;
+ 
+-	if (!info->is_suspending) {
++	if (system_state <= SYSTEM_RUNNING) {
+ 		/* And we wait for the response. */
+ 		timeout = msecs_to_jiffies(info->desc->max_rx_timeout_ms);
+ 		if (!wait_for_completion_timeout(&xfer->done, timeout))
+ 			ret = -ETIMEDOUT;
+ 	} else {
+ 		/*
+-		 * If we are suspending, we cannot use wait_for_completion_timeout
++		 * If we are !running, we cannot use wait_for_completion_timeout
+ 		 * during noirq phase, so we must manually poll the completion.
+ 		 */
+ 		ret = read_poll_timeout_atomic(try_wait_for_completion, done_state,
+@@ -3281,35 +3279,6 @@ static int tisci_reboot_handler(struct notifier_block *nb, unsigned long mode,
+ 	return NOTIFY_BAD;
+ }
+ 
+-static void ti_sci_set_is_suspending(struct ti_sci_info *info, bool is_suspending)
+-{
+-	info->is_suspending = is_suspending;
+-}
+-
+-static int ti_sci_suspend(struct device *dev)
+-{
+-	struct ti_sci_info *info = dev_get_drvdata(dev);
+-	/*
+-	 * We must switch operation to polled mode now as drivers and the genpd
+-	 * layer may make late TI SCI calls to change clock and device states
+-	 * from the noirq phase of suspend.
+-	 */
+-	ti_sci_set_is_suspending(info, true);
+-
+-	return 0;
+-}
+-
+-static int ti_sci_resume(struct device *dev)
+-{
+-	struct ti_sci_info *info = dev_get_drvdata(dev);
+-
+-	ti_sci_set_is_suspending(info, false);
+-
+-	return 0;
+-}
+-
+-static DEFINE_SIMPLE_DEV_PM_OPS(ti_sci_pm_ops, ti_sci_suspend, ti_sci_resume);
+-
+ /* Description for K2G */
+ static const struct ti_sci_desc ti_sci_pmmc_k2g_desc = {
+ 	.default_host_id = 2,
+@@ -3516,7 +3485,6 @@ static struct platform_driver ti_sci_driver = {
+ 	.driver = {
+ 		   .name = "ti-sci",
+ 		   .of_match_table = of_match_ptr(ti_sci_of_match),
+-		   .pm = &ti_sci_pm_ops,
+ 	},
+ };
+ module_platform_driver(ti_sci_driver);
+diff --git a/drivers/fsi/fsi-master-aspeed.c b/drivers/fsi/fsi-master-aspeed.c
+index 7cec1772820d3..5eccab175e86b 100644
+--- a/drivers/fsi/fsi-master-aspeed.c
++++ b/drivers/fsi/fsi-master-aspeed.c
+@@ -454,6 +454,8 @@ static ssize_t cfam_reset_store(struct device *dev, struct device_attribute *att
+ 	gpiod_set_value(aspeed->cfam_reset_gpio, 1);
+ 	usleep_range(900, 1000);
+ 	gpiod_set_value(aspeed->cfam_reset_gpio, 0);
++	usleep_range(900, 1000);
++	opb_writel(aspeed, ctrl_base + FSI_MRESP0, cpu_to_be32(FSI_MRESP_RST_ALL_MASTER));
+ 	mutex_unlock(&aspeed->lock);
+ 	trace_fsi_master_aspeed_cfam_reset(false);
+ 
+diff --git a/drivers/gpio/gpio-zynq.c b/drivers/gpio/gpio-zynq.c
+index 0a7264aabe488..324e942c0650b 100644
+--- a/drivers/gpio/gpio-zynq.c
++++ b/drivers/gpio/gpio-zynq.c
+@@ -575,6 +575,26 @@ static int zynq_gpio_set_wake(struct irq_data *data, unsigned int on)
+ 	return 0;
+ }
+ 
++static int zynq_gpio_irq_reqres(struct irq_data *d)
++{
++	struct gpio_chip *chip = irq_data_get_irq_chip_data(d);
++	int ret;
++
++	ret = pm_runtime_resume_and_get(chip->parent);
++	if (ret < 0)
++		return ret;
++
++	return gpiochip_reqres_irq(chip, d->hwirq);
++}
++
++static void zynq_gpio_irq_relres(struct irq_data *d)
++{
++	struct gpio_chip *chip = irq_data_get_irq_chip_data(d);
++
++	gpiochip_relres_irq(chip, d->hwirq);
++	pm_runtime_put(chip->parent);
++}
++
+ /* irq chip descriptor */
+ static const struct irq_chip zynq_gpio_level_irqchip = {
+ 	.name		= DRIVER_NAME,
+@@ -584,9 +604,10 @@ static const struct irq_chip zynq_gpio_level_irqchip = {
+ 	.irq_unmask	= zynq_gpio_irq_unmask,
+ 	.irq_set_type	= zynq_gpio_set_irq_type,
+ 	.irq_set_wake	= zynq_gpio_set_wake,
++	.irq_request_resources = zynq_gpio_irq_reqres,
++	.irq_release_resources = zynq_gpio_irq_relres,
+ 	.flags		= IRQCHIP_EOI_THREADED | IRQCHIP_EOI_IF_HANDLED |
+ 			  IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_IMMUTABLE,
+-	GPIOCHIP_IRQ_RESOURCE_HELPERS,
+ };
+ 
+ static const struct irq_chip zynq_gpio_edge_irqchip = {
+@@ -597,8 +618,9 @@ static const struct irq_chip zynq_gpio_edge_irqchip = {
+ 	.irq_unmask	= zynq_gpio_irq_unmask,
+ 	.irq_set_type	= zynq_gpio_set_irq_type,
+ 	.irq_set_wake	= zynq_gpio_set_wake,
++	.irq_request_resources = zynq_gpio_irq_reqres,
++	.irq_release_resources = zynq_gpio_irq_relres,
+ 	.flags		= IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_IMMUTABLE,
+-	GPIOCHIP_IRQ_RESOURCE_HELPERS,
+ };
+ 
+ static void zynq_gpio_handle_bank_irq(struct zynq_gpio *gpio,
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index 6238701cde237..6e5e4603a51a1 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -1325,6 +1325,9 @@ int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
+ 	u16 cmd;
+ 	int r;
+ 
++	if (!IS_ENABLED(CONFIG_PHYS_ADDR_T_64BIT))
++		return 0;
++
+ 	/* Bypass for VF */
+ 	if (amdgpu_sriov_vf(adev))
+ 		return 0;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+index 0593ef8fe0a63..e06009966428f 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+@@ -26,30 +26,30 @@
+ #include <drm/drm_drv.h>
+ #include <drm/drm_fbdev_generic.h>
+ #include <drm/drm_gem.h>
+-#include <drm/drm_vblank.h>
+ #include <drm/drm_managed.h>
+-#include "amdgpu_drv.h"
+-
+ #include <drm/drm_pciids.h>
+-#include <linux/module.h>
+-#include <linux/pm_runtime.h>
+-#include <linux/vga_switcheroo.h>
+ #include <drm/drm_probe_helper.h>
+-#include <linux/mmu_notifier.h>
+-#include <linux/suspend.h>
++#include <drm/drm_vblank.h>
++
+ #include <linux/cc_platform.h>
+ #include <linux/dynamic_debug.h>
++#include <linux/module.h>
++#include <linux/mmu_notifier.h>
++#include <linux/pm_runtime.h>
++#include <linux/suspend.h>
++#include <linux/vga_switcheroo.h>
+ 
+ #include "amdgpu.h"
+-#include "amdgpu_irq.h"
++#include "amdgpu_amdkfd.h"
+ #include "amdgpu_dma_buf.h"
+-#include "amdgpu_sched.h"
++#include "amdgpu_drv.h"
+ #include "amdgpu_fdinfo.h"
+-#include "amdgpu_amdkfd.h"
+-
++#include "amdgpu_irq.h"
++#include "amdgpu_psp.h"
+ #include "amdgpu_ras.h"
+-#include "amdgpu_xgmi.h"
+ #include "amdgpu_reset.h"
++#include "amdgpu_sched.h"
++#include "amdgpu_xgmi.h"
+ #include "../amdxcp/amdgpu_xcp_drv.h"
+ 
+ /*
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+index ebeddc9a37e9b..6aa3b1d845abe 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+@@ -62,7 +62,7 @@
+  * Returns 0 on success, error on failure.
+  */
+ int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+-		  unsigned size, enum amdgpu_ib_pool_type pool_type,
++		  unsigned int size, enum amdgpu_ib_pool_type pool_type,
+ 		  struct amdgpu_ib *ib)
+ {
+ 	int r;
+@@ -123,7 +123,7 @@ void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib,
+  * a CONST_IB), it will be put on the ring prior to the DE IB.  Prior
+  * to SI there was just a DE IB.
+  */
+-int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
++int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned int num_ibs,
+ 		       struct amdgpu_ib *ibs, struct amdgpu_job *job,
+ 		       struct dma_fence **f)
+ {
+@@ -131,16 +131,16 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
+ 	struct amdgpu_ib *ib = &ibs[0];
+ 	struct dma_fence *tmp = NULL;
+ 	bool need_ctx_switch;
+-	unsigned patch_offset = ~0;
++	unsigned int patch_offset = ~0;
+ 	struct amdgpu_vm *vm;
+ 	uint64_t fence_ctx;
+ 	uint32_t status = 0, alloc_size;
+-	unsigned fence_flags = 0;
++	unsigned int fence_flags = 0;
+ 	bool secure, init_shadow;
+ 	u64 shadow_va, csa_va, gds_va;
+ 	int vmid = AMDGPU_JOB_GET_VMID(job);
+ 
+-	unsigned i;
++	unsigned int i;
+ 	int r = 0;
+ 	bool need_pipe_sync = false;
+ 
+@@ -282,7 +282,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
+ 		amdgpu_ring_emit_gfx_shadow(ring, 0, 0, 0, false, 0);
+ 
+ 		if (ring->funcs->init_cond_exec) {
+-			unsigned ce_offset = ~0;
++			unsigned int ce_offset = ~0;
+ 
+ 			ce_offset = amdgpu_ring_init_cond_exec(ring);
+ 			if (ce_offset != ~0 && ring->funcs->patch_cond_exec)
+@@ -385,7 +385,7 @@ int amdgpu_ib_ring_tests(struct amdgpu_device *adev)
+ {
+ 	long tmo_gfx, tmo_mm;
+ 	int r, ret = 0;
+-	unsigned i;
++	unsigned int i;
+ 
+ 	tmo_mm = tmo_gfx = AMDGPU_IB_TEST_TIMEOUT;
+ 	if (amdgpu_sriov_vf(adev)) {
+@@ -402,7 +402,7 @@ int amdgpu_ib_ring_tests(struct amdgpu_device *adev)
+ 		/* for CP & SDMA engines since they are scheduled together so
+ 		 * need to make the timeout width enough to cover the time
+ 		 * cost waiting for it coming back under RUNTIME only
+-		*/
++		 */
+ 		tmo_gfx = 8 * AMDGPU_IB_TEST_TIMEOUT;
+ 	} else if (adev->gmc.xgmi.hive_id) {
+ 		tmo_gfx = AMDGPU_IB_TEST_GFX_XGMI_TIMEOUT;
+@@ -465,13 +465,13 @@ static int amdgpu_debugfs_sa_info_show(struct seq_file *m, void *unused)
+ {
+ 	struct amdgpu_device *adev = m->private;
+ 
+-	seq_printf(m, "--------------------- DELAYED --------------------- \n");
++	seq_puts(m, "--------------------- DELAYED ---------------------\n");
+ 	amdgpu_sa_bo_dump_debug_info(&adev->ib_pools[AMDGPU_IB_POOL_DELAYED],
+ 				     m);
+-	seq_printf(m, "-------------------- IMMEDIATE -------------------- \n");
++	seq_puts(m, "-------------------- IMMEDIATE --------------------\n");
+ 	amdgpu_sa_bo_dump_debug_info(&adev->ib_pools[AMDGPU_IB_POOL_IMMEDIATE],
+ 				     m);
+-	seq_printf(m, "--------------------- DIRECT ---------------------- \n");
++	seq_puts(m, "--------------------- DIRECT ----------------------\n");
+ 	amdgpu_sa_bo_dump_debug_info(&adev->ib_pools[AMDGPU_IB_POOL_DIRECT], m);
+ 
+ 	return 0;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+index 12414a7132564..d4ca19ba5a289 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+@@ -557,6 +557,7 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
+ 			crtc = (struct drm_crtc *)minfo->crtcs[i];
+ 			if (crtc && crtc->base.id == info->mode_crtc.id) {
+ 				struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
++
+ 				ui32 = amdgpu_crtc->crtc_id;
+ 				found = 1;
+ 				break;
+@@ -575,7 +576,7 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
+ 		if (ret)
+ 			return ret;
+ 
+-		ret = copy_to_user(out, &ip, min((size_t)size, sizeof(ip)));
++		ret = copy_to_user(out, &ip, min_t(size_t, size, sizeof(ip)));
+ 		return ret ? -EFAULT : 0;
+ 	}
+ 	case AMDGPU_INFO_HW_IP_COUNT: {
+@@ -721,17 +722,18 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
+ 				    ? -EFAULT : 0;
+ 	}
+ 	case AMDGPU_INFO_READ_MMR_REG: {
+-		unsigned n, alloc_size;
++		unsigned int n, alloc_size;
+ 		uint32_t *regs;
+-		unsigned se_num = (info->read_mmr_reg.instance >>
++		unsigned int se_num = (info->read_mmr_reg.instance >>
+ 				   AMDGPU_INFO_MMR_SE_INDEX_SHIFT) &
+ 				  AMDGPU_INFO_MMR_SE_INDEX_MASK;
+-		unsigned sh_num = (info->read_mmr_reg.instance >>
++		unsigned int sh_num = (info->read_mmr_reg.instance >>
+ 				   AMDGPU_INFO_MMR_SH_INDEX_SHIFT) &
+ 				  AMDGPU_INFO_MMR_SH_INDEX_MASK;
+ 
+ 		/* set full masks if the userspace set all bits
+-		 * in the bitfields */
++		 * in the bitfields
++		 */
+ 		if (se_num == AMDGPU_INFO_MMR_SE_INDEX_MASK)
+ 			se_num = 0xffffffff;
+ 		else if (se_num >= AMDGPU_GFX_MAX_SE)
+@@ -896,7 +898,7 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
+ 		return ret;
+ 	}
+ 	case AMDGPU_INFO_VCE_CLOCK_TABLE: {
+-		unsigned i;
++		unsigned int i;
+ 		struct drm_amdgpu_info_vce_clock_table vce_clk_table = {};
+ 		struct amd_vce_state *vce_state;
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
+index 2cae0b1a0b8ac..c162d018cf259 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
+@@ -39,6 +39,8 @@
+ #define PSP_TMR_ALIGNMENT	0x100000
+ #define PSP_FW_NAME_LEN		0x24
+ 
++extern const struct attribute_group amdgpu_flash_attr_group;
++
+ enum psp_shared_mem_size {
+ 	PSP_ASD_SHARED_MEM_SIZE				= 0x0,
+ 	PSP_XGMI_SHARED_MEM_SIZE			= 0x4000,
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
+index 6d0d66e40db93..96732897f87a0 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
+@@ -39,6 +39,9 @@
+ 
+ #define AMDGPU_POISON	0xd0bed0be
+ 
++extern const struct attribute_group amdgpu_vram_mgr_attr_group;
++extern const struct attribute_group amdgpu_gtt_mgr_attr_group;
++
+ struct hmm_range;
+ 
+ struct amdgpu_gtt_mgr {
+diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c
+index 5641cf05d856b..e63abdf52b6c2 100644
+--- a/drivers/gpu/drm/amd/amdgpu/cik.c
++++ b/drivers/gpu/drm/amd/amdgpu/cik.c
+@@ -1574,17 +1574,8 @@ static void cik_pcie_gen3_enable(struct amdgpu_device *adev)
+ 			u16 bridge_cfg2, gpu_cfg2;
+ 			u32 max_lw, current_lw, tmp;
+ 
+-			pcie_capability_read_word(root, PCI_EXP_LNKCTL,
+-						  &bridge_cfg);
+-			pcie_capability_read_word(adev->pdev, PCI_EXP_LNKCTL,
+-						  &gpu_cfg);
+-
+-			tmp16 = bridge_cfg | PCI_EXP_LNKCTL_HAWD;
+-			pcie_capability_write_word(root, PCI_EXP_LNKCTL, tmp16);
+-
+-			tmp16 = gpu_cfg | PCI_EXP_LNKCTL_HAWD;
+-			pcie_capability_write_word(adev->pdev, PCI_EXP_LNKCTL,
+-						   tmp16);
++			pcie_capability_set_word(root, PCI_EXP_LNKCTL, PCI_EXP_LNKCTL_HAWD);
++			pcie_capability_set_word(adev->pdev, PCI_EXP_LNKCTL, PCI_EXP_LNKCTL_HAWD);
+ 
+ 			tmp = RREG32_PCIE(ixPCIE_LC_STATUS1);
+ 			max_lw = (tmp & PCIE_LC_STATUS1__LC_DETECTED_LINK_WIDTH_MASK) >>
+@@ -1637,21 +1628,14 @@ static void cik_pcie_gen3_enable(struct amdgpu_device *adev)
+ 				msleep(100);
+ 
+ 				/* linkctl */
+-				pcie_capability_read_word(root, PCI_EXP_LNKCTL,
+-							  &tmp16);
+-				tmp16 &= ~PCI_EXP_LNKCTL_HAWD;
+-				tmp16 |= (bridge_cfg & PCI_EXP_LNKCTL_HAWD);
+-				pcie_capability_write_word(root, PCI_EXP_LNKCTL,
+-							   tmp16);
+-
+-				pcie_capability_read_word(adev->pdev,
+-							  PCI_EXP_LNKCTL,
+-							  &tmp16);
+-				tmp16 &= ~PCI_EXP_LNKCTL_HAWD;
+-				tmp16 |= (gpu_cfg & PCI_EXP_LNKCTL_HAWD);
+-				pcie_capability_write_word(adev->pdev,
+-							   PCI_EXP_LNKCTL,
+-							   tmp16);
++				pcie_capability_clear_and_set_word(root, PCI_EXP_LNKCTL,
++								   PCI_EXP_LNKCTL_HAWD,
++								   bridge_cfg &
++								   PCI_EXP_LNKCTL_HAWD);
++				pcie_capability_clear_and_set_word(adev->pdev, PCI_EXP_LNKCTL,
++								   PCI_EXP_LNKCTL_HAWD,
++								   gpu_cfg &
++								   PCI_EXP_LNKCTL_HAWD);
+ 
+ 				/* linkctl2 */
+ 				pcie_capability_read_word(root, PCI_EXP_LNKCTL2,
+diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c
+index f64b87b11b1b5..4b81f29e5fd5a 100644
+--- a/drivers/gpu/drm/amd/amdgpu/si.c
++++ b/drivers/gpu/drm/amd/amdgpu/si.c
+@@ -2276,17 +2276,8 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev)
+ 			u16 bridge_cfg2, gpu_cfg2;
+ 			u32 max_lw, current_lw, tmp;
+ 
+-			pcie_capability_read_word(root, PCI_EXP_LNKCTL,
+-						  &bridge_cfg);
+-			pcie_capability_read_word(adev->pdev, PCI_EXP_LNKCTL,
+-						  &gpu_cfg);
+-
+-			tmp16 = bridge_cfg | PCI_EXP_LNKCTL_HAWD;
+-			pcie_capability_write_word(root, PCI_EXP_LNKCTL, tmp16);
+-
+-			tmp16 = gpu_cfg | PCI_EXP_LNKCTL_HAWD;
+-			pcie_capability_write_word(adev->pdev, PCI_EXP_LNKCTL,
+-						   tmp16);
++			pcie_capability_set_word(root, PCI_EXP_LNKCTL, PCI_EXP_LNKCTL_HAWD);
++			pcie_capability_set_word(adev->pdev, PCI_EXP_LNKCTL, PCI_EXP_LNKCTL_HAWD);
+ 
+ 			tmp = RREG32_PCIE(PCIE_LC_STATUS1);
+ 			max_lw = (tmp & LC_DETECTED_LINK_WIDTH_MASK) >> LC_DETECTED_LINK_WIDTH_SHIFT;
+@@ -2331,21 +2322,14 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev)
+ 
+ 				mdelay(100);
+ 
+-				pcie_capability_read_word(root, PCI_EXP_LNKCTL,
+-							  &tmp16);
+-				tmp16 &= ~PCI_EXP_LNKCTL_HAWD;
+-				tmp16 |= (bridge_cfg & PCI_EXP_LNKCTL_HAWD);
+-				pcie_capability_write_word(root, PCI_EXP_LNKCTL,
+-							   tmp16);
+-
+-				pcie_capability_read_word(adev->pdev,
+-							  PCI_EXP_LNKCTL,
+-							  &tmp16);
+-				tmp16 &= ~PCI_EXP_LNKCTL_HAWD;
+-				tmp16 |= (gpu_cfg & PCI_EXP_LNKCTL_HAWD);
+-				pcie_capability_write_word(adev->pdev,
+-							   PCI_EXP_LNKCTL,
+-							   tmp16);
++				pcie_capability_clear_and_set_word(root, PCI_EXP_LNKCTL,
++								   PCI_EXP_LNKCTL_HAWD,
++								   bridge_cfg &
++								   PCI_EXP_LNKCTL_HAWD);
++				pcie_capability_clear_and_set_word(adev->pdev, PCI_EXP_LNKCTL,
++								   PCI_EXP_LNKCTL_HAWD,
++								   gpu_cfg &
++								   PCI_EXP_LNKCTL_HAWD);
+ 
+ 				pcie_capability_read_word(root, PCI_EXP_LNKCTL2,
+ 							  &tmp16);
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index e5554a36e8c8b..3a7e7d2ce847b 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -8074,10 +8074,12 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
+ 		 * fast updates.
+ 		 */
+ 		if (crtc->state->async_flip &&
+-		    acrtc_state->update_type != UPDATE_TYPE_FAST)
++		    (acrtc_state->update_type != UPDATE_TYPE_FAST ||
++		     get_mem_type(old_plane_state->fb) != get_mem_type(fb)))
+ 			drm_warn_once(state->dev,
+ 				      "[PLANE:%d:%s] async flip with non-fast update\n",
+ 				      plane->base.id, plane->name);
++
+ 		bundle->flip_addrs[planes_count].flip_immediate =
+ 			crtc->state->async_flip &&
+ 			acrtc_state->update_type == UPDATE_TYPE_FAST &&
+@@ -10040,6 +10042,11 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
+ 
+ 	/* Remove exiting planes if they are modified */
+ 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
++		if (old_plane_state->fb && new_plane_state->fb &&
++		    get_mem_type(old_plane_state->fb) !=
++		    get_mem_type(new_plane_state->fb))
++			lock_and_validation_needed = true;
++
+ 		ret = dm_update_plane_state(dc, state, plane,
+ 					    old_plane_state,
+ 					    new_plane_state,
+@@ -10287,9 +10294,20 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
+ 		struct dm_crtc_state *dm_new_crtc_state =
+ 			to_dm_crtc_state(new_crtc_state);
+ 
++		/*
++		 * Only allow async flips for fast updates that don't change
++		 * the FB pitch, the DCC state, rotation, etc.
++		 */
++		if (new_crtc_state->async_flip && lock_and_validation_needed) {
++			drm_dbg_atomic(crtc->dev,
++				       "[CRTC:%d:%s] async flips are only supported for fast updates\n",
++				       crtc->base.id, crtc->name);
++			ret = -EINVAL;
++			goto fail;
++		}
++
+ 		dm_new_crtc_state->update_type = lock_and_validation_needed ?
+-							 UPDATE_TYPE_FULL :
+-							 UPDATE_TYPE_FAST;
++			UPDATE_TYPE_FULL : UPDATE_TYPE_FAST;
+ 	}
+ 
+ 	/* Must be success */
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
+index 30d4c6fd95f53..440fc0869a34b 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
+@@ -398,18 +398,6 @@ static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
+ 		return -EINVAL;
+ 	}
+ 
+-	/*
+-	 * Only allow async flips for fast updates that don't change the FB
+-	 * pitch, the DCC state, rotation, etc.
+-	 */
+-	if (crtc_state->async_flip &&
+-	    dm_crtc_state->update_type != UPDATE_TYPE_FAST) {
+-		drm_dbg_atomic(crtc->dev,
+-			       "[CRTC:%d:%s] async flips are only supported for fast updates\n",
+-			       crtc->base.id, crtc->name);
+-		return -EINVAL;
+-	}
+-
+ 	/* In some use cases, like reset, no stream is attached */
+ 	if (!dm_crtc_state->stream)
+ 		return 0;
+diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.c
+index 925d6e13620ec..1bbf85defd611 100644
+--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.c
++++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.c
+@@ -32,6 +32,7 @@
+ 
+ #define MAX_INSTANCE                                        6
+ #define MAX_SEGMENT                                         6
++#define SMU_REGISTER_WRITE_RETRY_COUNT                      5
+ 
+ struct IP_BASE_INSTANCE
+ {
+@@ -134,6 +135,8 @@ static int dcn315_smu_send_msg_with_param(
+ 		unsigned int msg_id, unsigned int param)
+ {
+ 	uint32_t result;
++	uint32_t i = 0;
++	uint32_t read_back_data;
+ 
+ 	result = dcn315_smu_wait_for_response(clk_mgr, 10, 200000);
+ 
+@@ -150,10 +153,19 @@ static int dcn315_smu_send_msg_with_param(
+ 	/* Set the parameter register for the SMU message, unit is Mhz */
+ 	REG_WRITE(MP1_SMN_C2PMSG_37, param);
+ 
+-	/* Trigger the message transaction by writing the message ID */
+-	generic_write_indirect_reg(CTX,
+-		REG_NBIO(RSMU_INDEX), REG_NBIO(RSMU_DATA),
+-		mmMP1_C2PMSG_3, msg_id);
++	for (i = 0; i < SMU_REGISTER_WRITE_RETRY_COUNT; i++) {
++		/* Trigger the message transaction by writing the message ID */
++		generic_write_indirect_reg(CTX,
++			REG_NBIO(RSMU_INDEX), REG_NBIO(RSMU_DATA),
++			mmMP1_C2PMSG_3, msg_id);
++		read_back_data = generic_read_indirect_reg(CTX,
++			REG_NBIO(RSMU_INDEX), REG_NBIO(RSMU_DATA),
++			mmMP1_C2PMSG_3);
++		if (read_back_data == msg_id)
++			break;
++		udelay(2);
++		smu_print("SMU msg id write fail %x times. \n", i + 1);
++	}
+ 
+ 	result = dcn315_smu_wait_for_response(clk_mgr, 10, 200000);
+ 
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
+index 4492bc2392b63..5cfa37804d7c6 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
+@@ -2123,6 +2123,15 @@ void dcn20_optimize_bandwidth(
+ 	if (hubbub->funcs->program_compbuf_size)
+ 		hubbub->funcs->program_compbuf_size(hubbub, context->bw_ctx.bw.dcn.compbuf_size_kb, true);
+ 
++	if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching) {
++		dc_dmub_srv_p_state_delegate(dc,
++			true, context);
++		context->bw_ctx.bw.dcn.clk.p_state_change_support = true;
++		dc->clk_mgr->clks.fw_based_mclk_switching = true;
++	} else {
++		dc->clk_mgr->clks.fw_based_mclk_switching = false;
++	}
++
+ 	dc->clk_mgr->funcs->update_clocks(
+ 			dc->clk_mgr,
+ 			context,
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
+index bf8864bc8a99e..4cd4ae07d73dc 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
+@@ -949,13 +949,36 @@ void dcn30_set_disp_pattern_generator(const struct dc *dc,
+ }
+ 
+ void dcn30_prepare_bandwidth(struct dc *dc,
+-			     struct dc_state *context)
++	struct dc_state *context)
+ {
++	bool p_state_change_support = context->bw_ctx.bw.dcn.clk.p_state_change_support;
++	/* Any transition into an FPO config should disable MCLK switching first to avoid
++	 * driver and FW P-State synchronization issues.
++	 */
++	if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching || dc->clk_mgr->clks.fw_based_mclk_switching) {
++		dc->optimized_required = true;
++		context->bw_ctx.bw.dcn.clk.p_state_change_support = false;
++	}
++
+ 	if (dc->clk_mgr->dc_mode_softmax_enabled)
+ 		if (dc->clk_mgr->clks.dramclk_khz <= dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000 &&
+ 				context->bw_ctx.bw.dcn.clk.dramclk_khz > dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000)
+ 			dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, dc->clk_mgr->bw_params->clk_table.entries[dc->clk_mgr->bw_params->clk_table.num_entries - 1].memclk_mhz);
+ 
+ 	dcn20_prepare_bandwidth(dc, context);
++	/*
++	 * enabled -> enabled: do not disable
++	 * enabled -> disabled: disable
++	 * disabled -> enabled: don't care
++	 * disabled -> disabled: don't care
++	 */
++	if (!context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching)
++		dc_dmub_srv_p_state_delegate(dc, false, context);
++
++	if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching || dc->clk_mgr->clks.fw_based_mclk_switching) {
++		/* After disabling P-State, restore the original value to ensure we get the correct P-State
++		 * on the next optimize. */
++		context->bw_ctx.bw.dcn.clk.p_state_change_support = p_state_change_support;
++	}
+ }
+ 
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.c
+index 257df8660b4ca..61205cdbe2d5a 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.c
+@@ -75,6 +75,7 @@ static const struct hw_sequencer_funcs dcn301_funcs = {
+ 	.get_hw_state = dcn10_get_hw_state,
+ 	.clear_status_bits = dcn10_clear_status_bits,
+ 	.wait_for_mpcc_disconnect = dcn10_wait_for_mpcc_disconnect,
++	.edp_backlight_control = dce110_edp_backlight_control,
+ 	.edp_power_control = dce110_edp_power_control,
+ 	.edp_wait_for_hpd_ready = dce110_edp_wait_for_hpd_ready,
+ 	.set_cursor_position = dcn10_set_cursor_position,
+diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
+index 9ef88a0b1b57e..d68fe5474676b 100644
+--- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c
++++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
+@@ -2172,15 +2172,19 @@ static int amdgpu_device_attr_create(struct amdgpu_device *adev,
+ 				     uint32_t mask, struct list_head *attr_list)
+ {
+ 	int ret = 0;
+-	struct device_attribute *dev_attr = &attr->dev_attr;
+-	const char *name = dev_attr->attr.name;
+ 	enum amdgpu_device_attr_states attr_states = ATTR_STATE_SUPPORTED;
+ 	struct amdgpu_device_attr_entry *attr_entry;
++	struct device_attribute *dev_attr;
++	const char *name;
+ 
+ 	int (*attr_update)(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
+ 			   uint32_t mask, enum amdgpu_device_attr_states *states) = default_attr_update;
+ 
+-	BUG_ON(!attr);
++	if (!attr)
++		return -EINVAL;
++
++	dev_attr = &attr->dev_attr;
++	name = dev_attr->attr.name;
+ 
+ 	attr_update = attr->attr_update ? attr->attr_update : default_attr_update;
+ 
+diff --git a/drivers/gpu/drm/armada/armada_overlay.c b/drivers/gpu/drm/armada/armada_overlay.c
+index f21eb8fb76d87..3b9bd8ecda137 100644
+--- a/drivers/gpu/drm/armada/armada_overlay.c
++++ b/drivers/gpu/drm/armada/armada_overlay.c
+@@ -4,6 +4,8 @@
+  *  Rewritten from the dovefb driver, and Armada510 manuals.
+  */
+ 
++#include <linux/bitfield.h>
++
+ #include <drm/armada_drm.h>
+ #include <drm/drm_atomic.h>
+ #include <drm/drm_atomic_helper.h>
+@@ -445,8 +447,8 @@ static int armada_overlay_get_property(struct drm_plane *plane,
+ 			     drm_to_overlay_state(state)->colorkey_ug,
+ 			     drm_to_overlay_state(state)->colorkey_vb, 0);
+ 	} else if (property == priv->colorkey_mode_prop) {
+-		*val = (drm_to_overlay_state(state)->colorkey_mode &
+-			CFG_CKMODE_MASK) >> ffs(CFG_CKMODE_MASK);
++		*val = FIELD_GET(CFG_CKMODE_MASK,
++				 drm_to_overlay_state(state)->colorkey_mode);
+ 	} else if (property == priv->brightness_prop) {
+ 		*val = drm_to_overlay_state(state)->brightness + 256;
+ 	} else if (property == priv->contrast_prop) {
+diff --git a/drivers/gpu/drm/ast/ast_dp.c b/drivers/gpu/drm/ast/ast_dp.c
+index 6dc1a09504e13..fdd9a493aa9c0 100644
+--- a/drivers/gpu/drm/ast/ast_dp.c
++++ b/drivers/gpu/drm/ast/ast_dp.c
+@@ -7,6 +7,17 @@
+ #include <drm/drm_print.h>
+ #include "ast_drv.h"
+ 
++bool ast_astdp_is_connected(struct ast_device *ast)
++{
++	if (!ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xD1, ASTDP_MCU_FW_EXECUTING))
++		return false;
++	if (!ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xDF, ASTDP_HPD))
++		return false;
++	if (!ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xDC, ASTDP_LINK_SUCCESS))
++		return false;
++	return true;
++}
++
+ int ast_astdp_read_edid(struct drm_device *dev, u8 *ediddata)
+ {
+ 	struct ast_device *ast = to_ast_device(dev);
+diff --git a/drivers/gpu/drm/ast/ast_dp501.c b/drivers/gpu/drm/ast/ast_dp501.c
+index 1bc35a992369d..fa7442b0c2612 100644
+--- a/drivers/gpu/drm/ast/ast_dp501.c
++++ b/drivers/gpu/drm/ast/ast_dp501.c
+@@ -272,11 +272,9 @@ static bool ast_launch_m68k(struct drm_device *dev)
+ 	return true;
+ }
+ 
+-bool ast_dp501_read_edid(struct drm_device *dev, u8 *ediddata)
++bool ast_dp501_is_connected(struct ast_device *ast)
+ {
+-	struct ast_device *ast = to_ast_device(dev);
+-	u32 i, boot_address, offset, data;
+-	u32 *pEDIDidx;
++	u32 boot_address, offset, data;
+ 
+ 	if (ast->config_mode == ast_use_p2a) {
+ 		boot_address = get_fw_base(ast);
+@@ -292,14 +290,6 @@ bool ast_dp501_read_edid(struct drm_device *dev, u8 *ediddata)
+ 		data = ast_mindwm(ast, boot_address + offset);
+ 		if (!(data & AST_DP501_PNP_CONNECTED))
+ 			return false;
+-
+-		/* Read EDID */
+-		offset = AST_DP501_EDID_DATA;
+-		for (i = 0; i < 128; i += 4) {
+-			data = ast_mindwm(ast, boot_address + offset + i);
+-			pEDIDidx = (u32 *)(ediddata + i);
+-			*pEDIDidx = data;
+-		}
+ 	} else {
+ 		if (!ast->dp501_fw_buf)
+ 			return false;
+@@ -319,7 +309,30 @@ bool ast_dp501_read_edid(struct drm_device *dev, u8 *ediddata)
+ 		data = readl(ast->dp501_fw_buf + offset);
+ 		if (!(data & AST_DP501_PNP_CONNECTED))
+ 			return false;
++	}
++	return true;
++}
++
++bool ast_dp501_read_edid(struct drm_device *dev, u8 *ediddata)
++{
++	struct ast_device *ast = to_ast_device(dev);
++	u32 i, boot_address, offset, data;
++	u32 *pEDIDidx;
++
++	if (!ast_dp501_is_connected(ast))
++		return false;
++
++	if (ast->config_mode == ast_use_p2a) {
++		boot_address = get_fw_base(ast);
+ 
++		/* Read EDID */
++		offset = AST_DP501_EDID_DATA;
++		for (i = 0; i < 128; i += 4) {
++			data = ast_mindwm(ast, boot_address + offset + i);
++			pEDIDidx = (u32 *)(ediddata + i);
++			*pEDIDidx = data;
++		}
++	} else {
+ 		/* Read EDID */
+ 		offset = AST_DP501_EDID_DATA;
+ 		for (i = 0; i < 128; i += 4) {
+diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
+index 5498a6676f2e8..8a0ffa8b5939b 100644
+--- a/drivers/gpu/drm/ast/ast_drv.h
++++ b/drivers/gpu/drm/ast/ast_drv.h
+@@ -468,6 +468,7 @@ void ast_patch_ahb_2500(struct ast_device *ast);
+ /* ast dp501 */
+ void ast_set_dp501_video_output(struct drm_device *dev, u8 mode);
+ bool ast_backup_fw(struct drm_device *dev, u8 *addr, u32 size);
++bool ast_dp501_is_connected(struct ast_device *ast);
+ bool ast_dp501_read_edid(struct drm_device *dev, u8 *ediddata);
+ u8 ast_get_dp501_max_clk(struct drm_device *dev);
+ void ast_init_3rdtx(struct drm_device *dev);
+@@ -476,6 +477,7 @@ void ast_init_3rdtx(struct drm_device *dev);
+ struct ast_i2c_chan *ast_i2c_create(struct drm_device *dev);
+ 
+ /* aspeed DP */
++bool ast_astdp_is_connected(struct ast_device *ast);
+ int ast_astdp_read_edid(struct drm_device *dev, u8 *ediddata);
+ void ast_dp_launch(struct drm_device *dev);
+ void ast_dp_power_on_off(struct drm_device *dev, bool no);
+diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
+index b3c670af6ef2b..0724516f29737 100644
+--- a/drivers/gpu/drm/ast/ast_mode.c
++++ b/drivers/gpu/drm/ast/ast_mode.c
+@@ -1585,8 +1585,20 @@ err_drm_connector_update_edid_property:
+ 	return 0;
+ }
+ 
++static int ast_dp501_connector_helper_detect_ctx(struct drm_connector *connector,
++						 struct drm_modeset_acquire_ctx *ctx,
++						 bool force)
++{
++	struct ast_device *ast = to_ast_device(connector->dev);
++
++	if (ast_dp501_is_connected(ast))
++		return connector_status_connected;
++	return connector_status_disconnected;
++}
++
+ static const struct drm_connector_helper_funcs ast_dp501_connector_helper_funcs = {
+ 	.get_modes = ast_dp501_connector_helper_get_modes,
++	.detect_ctx = ast_dp501_connector_helper_detect_ctx,
+ };
+ 
+ static const struct drm_connector_funcs ast_dp501_connector_funcs = {
+@@ -1611,7 +1623,7 @@ static int ast_dp501_connector_init(struct drm_device *dev, struct drm_connector
+ 	connector->interlace_allowed = 0;
+ 	connector->doublescan_allowed = 0;
+ 
+-	connector->polled = DRM_CONNECTOR_POLL_CONNECT;
++	connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
+ 
+ 	return 0;
+ }
+@@ -1683,8 +1695,20 @@ err_drm_connector_update_edid_property:
+ 	return 0;
+ }
+ 
++static int ast_astdp_connector_helper_detect_ctx(struct drm_connector *connector,
++						 struct drm_modeset_acquire_ctx *ctx,
++						 bool force)
++{
++	struct ast_device *ast = to_ast_device(connector->dev);
++
++	if (ast_astdp_is_connected(ast))
++		return connector_status_connected;
++	return connector_status_disconnected;
++}
++
+ static const struct drm_connector_helper_funcs ast_astdp_connector_helper_funcs = {
+ 	.get_modes = ast_astdp_connector_helper_get_modes,
++	.detect_ctx = ast_astdp_connector_helper_detect_ctx,
+ };
+ 
+ static const struct drm_connector_funcs ast_astdp_connector_funcs = {
+@@ -1709,7 +1733,7 @@ static int ast_astdp_connector_init(struct drm_device *dev, struct drm_connector
+ 	connector->interlace_allowed = 0;
+ 	connector->doublescan_allowed = 0;
+ 
+-	connector->polled = DRM_CONNECTOR_POLL_CONNECT;
++	connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
+ 
+ 	return 0;
+ }
+@@ -1848,5 +1872,7 @@ int ast_mode_config_init(struct ast_device *ast)
+ 
+ 	drm_mode_config_reset(dev);
+ 
++	drm_kms_helper_poll_init(dev);
++
+ 	return 0;
+ }
+diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
+index 2254457ab5d02..9aeeb63435cd9 100644
+--- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
++++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
+@@ -786,8 +786,13 @@ static void adv7511_mode_set(struct adv7511 *adv7511,
+ 	else
+ 		low_refresh_rate = ADV7511_LOW_REFRESH_RATE_NONE;
+ 
+-	regmap_update_bits(adv7511->regmap, 0xfb,
+-		0x6, low_refresh_rate << 1);
++	if (adv7511->type == ADV7511)
++		regmap_update_bits(adv7511->regmap, 0xfb,
++				   0x6, low_refresh_rate << 1);
++	else
++		regmap_update_bits(adv7511->regmap, 0x4a,
++				   0xc, low_refresh_rate << 2);
++
+ 	regmap_update_bits(adv7511->regmap, 0x17,
+ 		0x60, (vsync_polarity << 6) | (hsync_polarity << 5));
+ 
+diff --git a/drivers/gpu/drm/bridge/analogix/anx7625.c b/drivers/gpu/drm/bridge/analogix/anx7625.c
+index 8b985efdc086b..866d018f4bb11 100644
+--- a/drivers/gpu/drm/bridge/analogix/anx7625.c
++++ b/drivers/gpu/drm/bridge/analogix/anx7625.c
+@@ -872,11 +872,11 @@ static int anx7625_hdcp_enable(struct anx7625_data *ctx)
+ 	}
+ 
+ 	/* Read downstream capability */
+-	ret = anx7625_aux_trans(ctx, DP_AUX_NATIVE_READ, 0x68028, 1, &bcap);
++	ret = anx7625_aux_trans(ctx, DP_AUX_NATIVE_READ, DP_AUX_HDCP_BCAPS, 1, &bcap);
+ 	if (ret < 0)
+ 		return ret;
+ 
+-	if (!(bcap & 0x01)) {
++	if (!(bcap & DP_BCAPS_HDCP_CAPABLE)) {
+ 		pr_warn("downstream not support HDCP 1.4, cap(%x).\n", bcap);
+ 		return 0;
+ 	}
+@@ -931,8 +931,8 @@ static void anx7625_dp_start(struct anx7625_data *ctx)
+ 
+ 	dev_dbg(dev, "set downstream sink into normal\n");
+ 	/* Downstream sink enter into normal mode */
+-	data = 1;
+-	ret = anx7625_aux_trans(ctx, DP_AUX_NATIVE_WRITE, 0x000600, 1, &data);
++	data = DP_SET_POWER_D0;
++	ret = anx7625_aux_trans(ctx, DP_AUX_NATIVE_WRITE, DP_SET_POWER, 1, &data);
+ 	if (ret < 0)
+ 		dev_err(dev, "IO error : set sink into normal mode fail\n");
+ 
+@@ -971,8 +971,8 @@ static void anx7625_dp_stop(struct anx7625_data *ctx)
+ 
+ 	dev_dbg(dev, "notify downstream enter into standby\n");
+ 	/* Downstream monitor enter into standby mode */
+-	data = 2;
+-	ret |= anx7625_aux_trans(ctx, DP_AUX_NATIVE_WRITE, 0x000600, 1, &data);
++	data = DP_SET_POWER_D3;
++	ret |= anx7625_aux_trans(ctx, DP_AUX_NATIVE_WRITE, DP_SET_POWER, 1, &data);
+ 	if (ret < 0)
+ 		DRM_DEV_ERROR(dev, "IO error : mute video fail\n");
+ 
+diff --git a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
+index b2efecf7d1603..4291798bd70f5 100644
+--- a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
++++ b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
+@@ -265,6 +265,7 @@ struct dw_mipi_dsi {
+ 	struct dw_mipi_dsi *master; /* dual-dsi master ptr */
+ 	struct dw_mipi_dsi *slave; /* dual-dsi slave ptr */
+ 
++	struct drm_display_mode mode;
+ 	const struct dw_mipi_dsi_plat_data *plat_data;
+ };
+ 
+@@ -332,6 +333,7 @@ static int dw_mipi_dsi_host_attach(struct mipi_dsi_host *host,
+ 	if (IS_ERR(bridge))
+ 		return PTR_ERR(bridge);
+ 
++	bridge->pre_enable_prev_first = true;
+ 	dsi->panel_bridge = bridge;
+ 
+ 	drm_bridge_add(&dsi->bridge);
+@@ -859,15 +861,6 @@ static void dw_mipi_dsi_bridge_post_atomic_disable(struct drm_bridge *bridge,
+ 	 */
+ 	dw_mipi_dsi_set_mode(dsi, 0);
+ 
+-	/*
+-	 * TODO Only way found to call panel-bridge post_disable &
+-	 * panel unprepare before the dsi "final" disable...
+-	 * This needs to be fixed in the drm_bridge framework and the API
+-	 * needs to be updated to manage our own call chains...
+-	 */
+-	if (dsi->panel_bridge->funcs->post_disable)
+-		dsi->panel_bridge->funcs->post_disable(dsi->panel_bridge);
+-
+ 	if (phy_ops->power_off)
+ 		phy_ops->power_off(dsi->plat_data->priv_data);
+ 
+@@ -942,15 +935,25 @@ static void dw_mipi_dsi_mode_set(struct dw_mipi_dsi *dsi,
+ 		phy_ops->power_on(dsi->plat_data->priv_data);
+ }
+ 
++static void dw_mipi_dsi_bridge_atomic_pre_enable(struct drm_bridge *bridge,
++						 struct drm_bridge_state *old_bridge_state)
++{
++	struct dw_mipi_dsi *dsi = bridge_to_dsi(bridge);
++
++	/* Power up the dsi ctl into a command mode */
++	dw_mipi_dsi_mode_set(dsi, &dsi->mode);
++	if (dsi->slave)
++		dw_mipi_dsi_mode_set(dsi->slave, &dsi->mode);
++}
++
+ static void dw_mipi_dsi_bridge_mode_set(struct drm_bridge *bridge,
+ 					const struct drm_display_mode *mode,
+ 					const struct drm_display_mode *adjusted_mode)
+ {
+ 	struct dw_mipi_dsi *dsi = bridge_to_dsi(bridge);
+ 
+-	dw_mipi_dsi_mode_set(dsi, adjusted_mode);
+-	if (dsi->slave)
+-		dw_mipi_dsi_mode_set(dsi->slave, adjusted_mode);
++	/* Store the display mode for later use in pre_enable callback */
++	drm_mode_copy(&dsi->mode, adjusted_mode);
+ }
+ 
+ static void dw_mipi_dsi_bridge_atomic_enable(struct drm_bridge *bridge,
+@@ -1004,6 +1007,7 @@ static const struct drm_bridge_funcs dw_mipi_dsi_bridge_funcs = {
+ 	.atomic_duplicate_state	= drm_atomic_helper_bridge_duplicate_state,
+ 	.atomic_destroy_state	= drm_atomic_helper_bridge_destroy_state,
+ 	.atomic_reset		= drm_atomic_helper_bridge_reset,
++	.atomic_pre_enable	= dw_mipi_dsi_bridge_atomic_pre_enable,
+ 	.atomic_enable		= dw_mipi_dsi_bridge_atomic_enable,
+ 	.atomic_post_disable	= dw_mipi_dsi_bridge_post_atomic_disable,
+ 	.mode_set		= dw_mipi_dsi_bridge_mode_set,
+diff --git a/drivers/gpu/drm/bridge/tc358764.c b/drivers/gpu/drm/bridge/tc358764.c
+index f85654f1b1045..8e938a7480f37 100644
+--- a/drivers/gpu/drm/bridge/tc358764.c
++++ b/drivers/gpu/drm/bridge/tc358764.c
+@@ -176,7 +176,7 @@ static void tc358764_read(struct tc358764 *ctx, u16 addr, u32 *val)
+ 	if (ret >= 0)
+ 		le32_to_cpus(val);
+ 
+-	dev_dbg(ctx->dev, "read: %d, addr: %d\n", addr, *val);
++	dev_dbg(ctx->dev, "read: addr=0x%04x data=0x%08x\n", addr, *val);
+ }
+ 
+ static void tc358764_write(struct tc358764 *ctx, u16 addr, u32 val)
+diff --git a/drivers/gpu/drm/etnaviv/etnaviv_dump.c b/drivers/gpu/drm/etnaviv/etnaviv_dump.c
+index 44b5f3c35aabe..898f84a0fc30c 100644
+--- a/drivers/gpu/drm/etnaviv/etnaviv_dump.c
++++ b/drivers/gpu/drm/etnaviv/etnaviv_dump.c
+@@ -130,9 +130,9 @@ void etnaviv_core_dump(struct etnaviv_gem_submit *submit)
+ 		return;
+ 	etnaviv_dump_core = false;
+ 
+-	mutex_lock(&gpu->mmu_context->lock);
++	mutex_lock(&submit->mmu_context->lock);
+ 
+-	mmu_size = etnaviv_iommu_dump_size(gpu->mmu_context);
++	mmu_size = etnaviv_iommu_dump_size(submit->mmu_context);
+ 
+ 	/* We always dump registers, mmu, ring, hanging cmdbuf and end marker */
+ 	n_obj = 5;
+@@ -162,7 +162,7 @@ void etnaviv_core_dump(struct etnaviv_gem_submit *submit)
+ 	iter.start = __vmalloc(file_size, GFP_KERNEL | __GFP_NOWARN |
+ 			__GFP_NORETRY);
+ 	if (!iter.start) {
+-		mutex_unlock(&gpu->mmu_context->lock);
++		mutex_unlock(&submit->mmu_context->lock);
+ 		dev_warn(gpu->dev, "failed to allocate devcoredump file\n");
+ 		return;
+ 	}
+@@ -174,18 +174,18 @@ void etnaviv_core_dump(struct etnaviv_gem_submit *submit)
+ 	memset(iter.hdr, 0, iter.data - iter.start);
+ 
+ 	etnaviv_core_dump_registers(&iter, gpu);
+-	etnaviv_core_dump_mmu(&iter, gpu->mmu_context, mmu_size);
++	etnaviv_core_dump_mmu(&iter, submit->mmu_context, mmu_size);
+ 	etnaviv_core_dump_mem(&iter, ETDUMP_BUF_RING, gpu->buffer.vaddr,
+ 			      gpu->buffer.size,
+ 			      etnaviv_cmdbuf_get_va(&gpu->buffer,
+-					&gpu->mmu_context->cmdbuf_mapping));
++					&submit->mmu_context->cmdbuf_mapping));
+ 
+ 	etnaviv_core_dump_mem(&iter, ETDUMP_BUF_CMD,
+ 			      submit->cmdbuf.vaddr, submit->cmdbuf.size,
+ 			      etnaviv_cmdbuf_get_va(&submit->cmdbuf,
+-					&gpu->mmu_context->cmdbuf_mapping));
++					&submit->mmu_context->cmdbuf_mapping));
+ 
+-	mutex_unlock(&gpu->mmu_context->lock);
++	mutex_unlock(&submit->mmu_context->lock);
+ 
+ 	/* Reserve space for the bomap */
+ 	if (n_bomap_pages) {
+diff --git a/drivers/gpu/drm/hyperv/hyperv_drm_drv.c b/drivers/gpu/drm/hyperv/hyperv_drm_drv.c
+index a7d2c92d6c6a0..8026118c6e033 100644
+--- a/drivers/gpu/drm/hyperv/hyperv_drm_drv.c
++++ b/drivers/gpu/drm/hyperv/hyperv_drm_drv.c
+@@ -7,6 +7,7 @@
+ #include <linux/hyperv.h>
+ #include <linux/module.h>
+ #include <linux/pci.h>
++#include <linux/screen_info.h>
+ 
+ #include <drm/drm_aperture.h>
+ #include <drm/drm_atomic_helper.h>
+diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c b/drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c
+index c0a38f5217eee..f2f6a5c01a6d2 100644
+--- a/drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c
++++ b/drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c
+@@ -426,7 +426,7 @@ static int ovl_adaptor_comp_init(struct device *dev, struct component_match **ma
+ 			continue;
+ 		}
+ 
+-		type = (enum mtk_ovl_adaptor_comp_type)of_id->data;
++		type = (enum mtk_ovl_adaptor_comp_type)(uintptr_t)of_id->data;
+ 		id = ovl_adaptor_comp_get_id(dev, node, type);
+ 		if (id < 0) {
+ 			dev_warn(dev, "Skipping unknown component %pOF\n",
+diff --git a/drivers/gpu/drm/mediatek/mtk_dp.c b/drivers/gpu/drm/mediatek/mtk_dp.c
+index 64eee77452c04..c58b775877a31 100644
+--- a/drivers/gpu/drm/mediatek/mtk_dp.c
++++ b/drivers/gpu/drm/mediatek/mtk_dp.c
+@@ -1588,7 +1588,9 @@ static int mtk_dp_parse_capabilities(struct mtk_dp *mtk_dp)
+ 	u8 val;
+ 	ssize_t ret;
+ 
+-	drm_dp_read_dpcd_caps(&mtk_dp->aux, mtk_dp->rx_cap);
++	ret = drm_dp_read_dpcd_caps(&mtk_dp->aux, mtk_dp->rx_cap);
++	if (ret < 0)
++		return ret;
+ 
+ 	if (drm_dp_tps4_supported(mtk_dp->rx_cap))
+ 		mtk_dp->train_info.channel_eq_pattern = DP_TRAINING_PATTERN_4;
+@@ -1615,10 +1617,13 @@ static int mtk_dp_parse_capabilities(struct mtk_dp *mtk_dp)
+ 			return ret == 0 ? -EIO : ret;
+ 		}
+ 
+-		if (val)
+-			drm_dp_dpcd_writeb(&mtk_dp->aux,
+-					   DP_DEVICE_SERVICE_IRQ_VECTOR_ESI0,
+-					   val);
++		if (val) {
++			ret = drm_dp_dpcd_writeb(&mtk_dp->aux,
++						 DP_DEVICE_SERVICE_IRQ_VECTOR_ESI0,
++						 val);
++			if (ret < 0)
++				return ret;
++		}
+ 	}
+ 
+ 	return 0;
+diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
+index d40142842f85c..8d44f3df116fa 100644
+--- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
++++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
+@@ -116,10 +116,9 @@ static int mtk_drm_cmdq_pkt_create(struct cmdq_client *client, struct cmdq_pkt *
+ 	dma_addr_t dma_addr;
+ 
+ 	pkt->va_base = kzalloc(size, GFP_KERNEL);
+-	if (!pkt->va_base) {
+-		kfree(pkt);
++	if (!pkt->va_base)
+ 		return -ENOMEM;
+-	}
++
+ 	pkt->buf_size = size;
+ 	pkt->cl = (void *)client;
+ 
+@@ -129,7 +128,6 @@ static int mtk_drm_cmdq_pkt_create(struct cmdq_client *client, struct cmdq_pkt *
+ 	if (dma_mapping_error(dev, dma_addr)) {
+ 		dev_err(dev, "dma map failed, size=%u\n", (u32)(u64)size);
+ 		kfree(pkt->va_base);
+-		kfree(pkt);
+ 		return -ENOMEM;
+ 	}
+ 
+@@ -145,7 +143,6 @@ static void mtk_drm_cmdq_pkt_destroy(struct cmdq_pkt *pkt)
+ 	dma_unmap_single(client->chan->mbox->dev, pkt->pa_base, pkt->buf_size,
+ 			 DMA_TO_DEVICE);
+ 	kfree(pkt->va_base);
+-	kfree(pkt);
+ }
+ #endif
+ 
+diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
+index f114da4d36a96..771f4e1733539 100644
+--- a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
++++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
+@@ -563,14 +563,15 @@ int mtk_ddp_comp_init(struct device_node *node, struct mtk_ddp_comp *comp,
+ 	/* Not all drm components have a DTS device node, such as ovl_adaptor,
+ 	 * which is the drm bring up sub driver
+ 	 */
+-	if (node) {
+-		comp_pdev = of_find_device_by_node(node);
+-		if (!comp_pdev) {
+-			DRM_INFO("Waiting for device %s\n", node->full_name);
+-			return -EPROBE_DEFER;
+-		}
+-		comp->dev = &comp_pdev->dev;
++	if (!node)
++		return 0;
++
++	comp_pdev = of_find_device_by_node(node);
++	if (!comp_pdev) {
++		DRM_INFO("Waiting for device %s\n", node->full_name);
++		return -EPROBE_DEFER;
+ 	}
++	comp->dev = &comp_pdev->dev;
+ 
+ 	if (type == MTK_DISP_AAL ||
+ 	    type == MTK_DISP_BLS ||
+@@ -580,7 +581,6 @@ int mtk_ddp_comp_init(struct device_node *node, struct mtk_ddp_comp *comp,
+ 	    type == MTK_DISP_MERGE ||
+ 	    type == MTK_DISP_OVL ||
+ 	    type == MTK_DISP_OVL_2L ||
+-	    type == MTK_DISP_OVL_ADAPTOR ||
+ 	    type == MTK_DISP_PWM ||
+ 	    type == MTK_DISP_RDMA ||
+ 	    type == MTK_DPI ||
+diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+index 6dcb4ba2466c0..30d10f21562f4 100644
+--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c
++++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+@@ -354,7 +354,7 @@ static bool mtk_drm_get_all_drm_priv(struct device *dev)
+ 	const struct of_device_id *of_id;
+ 	struct device_node *node;
+ 	struct device *drm_dev;
+-	int cnt = 0;
++	unsigned int cnt = 0;
+ 	int i, j;
+ 
+ 	for_each_child_of_node(phandle->parent, node) {
+@@ -375,6 +375,9 @@ static bool mtk_drm_get_all_drm_priv(struct device *dev)
+ 		all_drm_priv[cnt] = dev_get_drvdata(drm_dev);
+ 		if (all_drm_priv[cnt] && all_drm_priv[cnt]->mtk_drm_bound)
+ 			cnt++;
++
++		if (cnt == MAX_CRTC)
++			break;
+ 	}
+ 
+ 	if (drm_priv->data->mmsys_dev_num == cnt) {
+@@ -829,7 +832,7 @@ static int mtk_drm_probe(struct platform_device *pdev)
+ 			continue;
+ 		}
+ 
+-		comp_type = (enum mtk_ddp_comp_type)of_id->data;
++		comp_type = (enum mtk_ddp_comp_type)(uintptr_t)of_id->data;
+ 
+ 		if (comp_type == MTK_DISP_MUTEX) {
+ 			int id;
+diff --git a/drivers/gpu/drm/mediatek/mtk_drm_gem.c b/drivers/gpu/drm/mediatek/mtk_drm_gem.c
+index a25b28d3ee902..9f364df52478d 100644
+--- a/drivers/gpu/drm/mediatek/mtk_drm_gem.c
++++ b/drivers/gpu/drm/mediatek/mtk_drm_gem.c
+@@ -247,7 +247,11 @@ int mtk_drm_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map)
+ 
+ 	mtk_gem->kvaddr = vmap(mtk_gem->pages, npages, VM_MAP,
+ 			       pgprot_writecombine(PAGE_KERNEL));
+-
++	if (!mtk_gem->kvaddr) {
++		kfree(sgt);
++		kfree(mtk_gem->pages);
++		return -ENOMEM;
++	}
+ out:
+ 	kfree(sgt);
+ 	iosys_map_set_vaddr(map, mtk_gem->kvaddr);
+diff --git a/drivers/gpu/drm/msm/adreno/a2xx_gpu.c b/drivers/gpu/drm/msm/adreno/a2xx_gpu.c
+index c67089a7ebc10..ad4570d60abf2 100644
+--- a/drivers/gpu/drm/msm/adreno/a2xx_gpu.c
++++ b/drivers/gpu/drm/msm/adreno/a2xx_gpu.c
+@@ -540,6 +540,10 @@ struct msm_gpu *a2xx_gpu_init(struct drm_device *dev)
+ 	gpu->perfcntrs = perfcntrs;
+ 	gpu->num_perfcntrs = ARRAY_SIZE(perfcntrs);
+ 
++	ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1);
++	if (ret)
++		goto fail;
++
+ 	if (adreno_is_a20x(adreno_gpu))
+ 		adreno_gpu->registers = a200_registers;
+ 	else if (adreno_is_a225(adreno_gpu))
+@@ -547,10 +551,6 @@ struct msm_gpu *a2xx_gpu_init(struct drm_device *dev)
+ 	else
+ 		adreno_gpu->registers = a220_registers;
+ 
+-	ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1);
+-	if (ret)
+-		goto fail;
+-
+ 	if (!gpu->aspace) {
+ 		dev_err(dev->dev, "No memory protection without MMU\n");
+ 		if (!allow_vram_carveout) {
+diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
+index 5deb79924897a..63dde676f4339 100644
+--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
++++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
+@@ -1435,8 +1435,15 @@ void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu)
+ 	struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
+ 	struct platform_device *pdev = to_platform_device(gmu->dev);
+ 
+-	if (!gmu->initialized)
++	mutex_lock(&gmu->lock);
++	if (!gmu->initialized) {
++		mutex_unlock(&gmu->lock);
+ 		return;
++	}
++
++	gmu->initialized = false;
++
++	mutex_unlock(&gmu->lock);
+ 
+ 	pm_runtime_force_suspend(gmu->dev);
+ 
+@@ -1466,8 +1473,6 @@ void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu)
+ 
+ 	/* Drop reference taken in of_find_device_by_node */
+ 	put_device(gmu->dev);
+-
+-	gmu->initialized = false;
+ }
+ 
+ static int cxpd_notifier_cb(struct notifier_block *nb,
+diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+index b3ada1e7b598b..a2513f7168238 100644
+--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
++++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+@@ -2091,9 +2091,7 @@ static void a6xx_destroy(struct msm_gpu *gpu)
+ 
+ 	a6xx_llc_slices_destroy(a6xx_gpu);
+ 
+-	mutex_lock(&a6xx_gpu->gmu.lock);
+ 	a6xx_gmu_remove(a6xx_gpu);
+-	mutex_unlock(&a6xx_gpu->gmu.lock);
+ 
+ 	adreno_gpu_cleanup(adreno_gpu);
+ 
+diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c
+index ce8d0b2475bf1..6e3c1368c5e15 100644
+--- a/drivers/gpu/drm/msm/adreno/adreno_device.c
++++ b/drivers/gpu/drm/msm/adreno/adreno_device.c
+@@ -371,7 +371,7 @@ static const struct adreno_info gpulist[] = {
+ 		.rev = ADRENO_REV(6, 9, 0, ANY_ID),
+ 		.fw = {
+ 			[ADRENO_FW_SQE] = "a660_sqe.fw",
+-			[ADRENO_FW_GMU] = "a690_gmu.bin",
++			[ADRENO_FW_GMU] = "a660_gmu.bin",
+ 		},
+ 		.gmem = SZ_4M,
+ 		.inactive_period = DRM_MSM_INACTIVE_PERIOD,
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_0_msm8998.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_0_msm8998.h
+index 7d0d0e74c3b08..be8e7e54df8af 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_0_msm8998.h
++++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_0_msm8998.h
+@@ -127,8 +127,13 @@ static const struct dpu_pingpong_cfg msm8998_pp[] = {
+ };
+ 
+ static const struct dpu_dsc_cfg msm8998_dsc[] = {
+-	DSC_BLK("dsc_0", DSC_0, 0x80000, 0),
+-	DSC_BLK("dsc_1", DSC_1, 0x80400, 0),
++	{
++		.name = "dsc_0", .id = DSC_0,
++		.base = 0x80000, .len = 0x140,
++	}, {
++		.name = "dsc_1", .id = DSC_1,
++		.base = 0x80400, .len = 0x140,
++	},
+ };
+ 
+ static const struct dpu_dspp_cfg msm8998_dspp[] = {
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_0_sdm845.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_0_sdm845.h
+index b6098141bb9bb..bd450712e65cd 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_0_sdm845.h
++++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_0_sdm845.h
+@@ -111,13 +111,13 @@ static const struct dpu_lm_cfg sdm845_lm[] = {
+ 
+ static const struct dpu_dspp_cfg sdm845_dspp[] = {
+ 	DSPP_BLK("dspp_0", DSPP_0, 0x54000, DSPP_SC7180_MASK,
+-		 &sm8150_dspp_sblk),
++		 &sdm845_dspp_sblk),
+ 	DSPP_BLK("dspp_1", DSPP_1, 0x56000, DSPP_SC7180_MASK,
+-		 &sm8150_dspp_sblk),
++		 &sdm845_dspp_sblk),
+ 	DSPP_BLK("dspp_2", DSPP_2, 0x58000, DSPP_SC7180_MASK,
+-		 &sm8150_dspp_sblk),
++		 &sdm845_dspp_sblk),
+ 	DSPP_BLK("dspp_3", DSPP_3, 0x5a000, DSPP_SC7180_MASK,
+-		 &sm8150_dspp_sblk),
++		 &sdm845_dspp_sblk),
+ };
+ 
+ static const struct dpu_pingpong_cfg sdm845_pp[] = {
+@@ -136,10 +136,19 @@ static const struct dpu_pingpong_cfg sdm845_pp[] = {
+ };
+ 
+ static const struct dpu_dsc_cfg sdm845_dsc[] = {
+-	DSC_BLK("dsc_0", DSC_0, 0x80000, 0),
+-	DSC_BLK("dsc_1", DSC_1, 0x80400, 0),
+-	DSC_BLK("dsc_2", DSC_2, 0x80800, 0),
+-	DSC_BLK("dsc_3", DSC_3, 0x80c00, 0),
++	{
++		.name = "dsc_0", .id = DSC_0,
++		.base = 0x80000, .len = 0x140,
++	}, {
++		.name = "dsc_1", .id = DSC_1,
++		.base = 0x80400, .len = 0x140,
++	}, {
++		.name = "dsc_2", .id = DSC_2,
++		.base = 0x80800, .len = 0x140,
++	}, {
++		.name = "dsc_3", .id = DSC_3,
++		.base = 0x80c00, .len = 0x140,
++	},
+ };
+ 
+ static const struct dpu_intf_cfg sdm845_intf[] = {
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h
+index b5f7513542678..4589b7a043990 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h
++++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h
+@@ -118,13 +118,13 @@ static const struct dpu_lm_cfg sm8150_lm[] = {
+ 
+ static const struct dpu_dspp_cfg sm8150_dspp[] = {
+ 	DSPP_BLK("dspp_0", DSPP_0, 0x54000, DSPP_SC7180_MASK,
+-		 &sm8150_dspp_sblk),
++		 &sdm845_dspp_sblk),
+ 	DSPP_BLK("dspp_1", DSPP_1, 0x56000, DSPP_SC7180_MASK,
+-		 &sm8150_dspp_sblk),
++		 &sdm845_dspp_sblk),
+ 	DSPP_BLK("dspp_2", DSPP_2, 0x58000, DSPP_SC7180_MASK,
+-		 &sm8150_dspp_sblk),
++		 &sdm845_dspp_sblk),
+ 	DSPP_BLK("dspp_3", DSPP_3, 0x5a000, DSPP_SC7180_MASK,
+-		 &sm8150_dspp_sblk),
++		 &sdm845_dspp_sblk),
+ };
+ 
+ static const struct dpu_pingpong_cfg sm8150_pp[] = {
+@@ -155,10 +155,23 @@ static const struct dpu_merge_3d_cfg sm8150_merge_3d[] = {
+ };
+ 
+ static const struct dpu_dsc_cfg sm8150_dsc[] = {
+-	DSC_BLK("dsc_0", DSC_0, 0x80000, BIT(DPU_DSC_OUTPUT_CTRL)),
+-	DSC_BLK("dsc_1", DSC_1, 0x80400, BIT(DPU_DSC_OUTPUT_CTRL)),
+-	DSC_BLK("dsc_2", DSC_2, 0x80800, BIT(DPU_DSC_OUTPUT_CTRL)),
+-	DSC_BLK("dsc_3", DSC_3, 0x80c00, BIT(DPU_DSC_OUTPUT_CTRL)),
++	{
++		.name = "dsc_0", .id = DSC_0,
++		.base = 0x80000, .len = 0x140,
++		.features = BIT(DPU_DSC_OUTPUT_CTRL),
++	}, {
++		.name = "dsc_1", .id = DSC_1,
++		.base = 0x80400, .len = 0x140,
++		.features = BIT(DPU_DSC_OUTPUT_CTRL),
++	}, {
++		.name = "dsc_2", .id = DSC_2,
++		.base = 0x80800, .len = 0x140,
++		.features = BIT(DPU_DSC_OUTPUT_CTRL),
++	}, {
++		.name = "dsc_3", .id = DSC_3,
++		.base = 0x80c00, .len = 0x140,
++		.features = BIT(DPU_DSC_OUTPUT_CTRL),
++	},
+ };
+ 
+ static const struct dpu_intf_cfg sm8150_intf[] = {
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h
+index 8ed2b263c5ea3..8f5d5d44ccb3d 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h
++++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h
+@@ -117,13 +117,13 @@ static const struct dpu_lm_cfg sc8180x_lm[] = {
+ 
+ static const struct dpu_dspp_cfg sc8180x_dspp[] = {
+ 	DSPP_BLK("dspp_0", DSPP_0, 0x54000, DSPP_SC7180_MASK,
+-		 &sm8150_dspp_sblk),
++		 &sdm845_dspp_sblk),
+ 	DSPP_BLK("dspp_1", DSPP_1, 0x56000, DSPP_SC7180_MASK,
+-		 &sm8150_dspp_sblk),
++		 &sdm845_dspp_sblk),
+ 	DSPP_BLK("dspp_2", DSPP_2, 0x58000, DSPP_SC7180_MASK,
+-		 &sm8150_dspp_sblk),
++		 &sdm845_dspp_sblk),
+ 	DSPP_BLK("dspp_3", DSPP_3, 0x5a000, DSPP_SC7180_MASK,
+-		 &sm8150_dspp_sblk),
++		 &sdm845_dspp_sblk),
+ };
+ 
+ static const struct dpu_pingpong_cfg sc8180x_pp[] = {
+@@ -154,12 +154,31 @@ static const struct dpu_merge_3d_cfg sc8180x_merge_3d[] = {
+ };
+ 
+ static const struct dpu_dsc_cfg sc8180x_dsc[] = {
+-	DSC_BLK("dsc_0", DSC_0, 0x80000, BIT(DPU_DSC_OUTPUT_CTRL)),
+-	DSC_BLK("dsc_1", DSC_1, 0x80400, BIT(DPU_DSC_OUTPUT_CTRL)),
+-	DSC_BLK("dsc_2", DSC_2, 0x80800, BIT(DPU_DSC_OUTPUT_CTRL)),
+-	DSC_BLK("dsc_3", DSC_3, 0x80c00, BIT(DPU_DSC_OUTPUT_CTRL)),
+-	DSC_BLK("dsc_4", DSC_4, 0x81000, BIT(DPU_DSC_OUTPUT_CTRL)),
+-	DSC_BLK("dsc_5", DSC_5, 0x81400, BIT(DPU_DSC_OUTPUT_CTRL)),
++	{
++		.name = "dsc_0", .id = DSC_0,
++		.base = 0x80000, .len = 0x140,
++		.features = BIT(DPU_DSC_OUTPUT_CTRL),
++	}, {
++		.name = "dsc_1", .id = DSC_1,
++		.base = 0x80400, .len = 0x140,
++		.features = BIT(DPU_DSC_OUTPUT_CTRL),
++	}, {
++		.name = "dsc_2", .id = DSC_2,
++		.base = 0x80800, .len = 0x140,
++		.features = BIT(DPU_DSC_OUTPUT_CTRL),
++	}, {
++		.name = "dsc_3", .id = DSC_3,
++		.base = 0x80c00, .len = 0x140,
++		.features = BIT(DPU_DSC_OUTPUT_CTRL),
++	}, {
++		.name = "dsc_4", .id = DSC_4,
++		.base = 0x81000, .len = 0x140,
++		.features = BIT(DPU_DSC_OUTPUT_CTRL),
++	}, {
++		.name = "dsc_5", .id = DSC_5,
++		.base = 0x81400, .len = 0x140,
++		.features = BIT(DPU_DSC_OUTPUT_CTRL),
++	},
+ };
+ 
+ static const struct dpu_intf_cfg sc8180x_intf[] = {
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_0_sm8250.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_0_sm8250.h
+index daebd21700413..0e17be6ed94f2 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_0_sm8250.h
++++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_0_sm8250.h
+@@ -119,13 +119,13 @@ static const struct dpu_lm_cfg sm8250_lm[] = {
+ 
+ static const struct dpu_dspp_cfg sm8250_dspp[] = {
+ 	DSPP_BLK("dspp_0", DSPP_0, 0x54000, DSPP_SC7180_MASK,
+-		 &sm8150_dspp_sblk),
++		 &sdm845_dspp_sblk),
+ 	DSPP_BLK("dspp_1", DSPP_1, 0x56000, DSPP_SC7180_MASK,
+-		 &sm8150_dspp_sblk),
++		 &sdm845_dspp_sblk),
+ 	DSPP_BLK("dspp_2", DSPP_2, 0x58000, DSPP_SC7180_MASK,
+-		 &sm8150_dspp_sblk),
++		 &sdm845_dspp_sblk),
+ 	DSPP_BLK("dspp_3", DSPP_3, 0x5a000, DSPP_SC7180_MASK,
+-		 &sm8150_dspp_sblk),
++		 &sdm845_dspp_sblk),
+ };
+ 
+ static const struct dpu_pingpong_cfg sm8250_pp[] = {
+@@ -156,10 +156,23 @@ static const struct dpu_merge_3d_cfg sm8250_merge_3d[] = {
+ };
+ 
+ static const struct dpu_dsc_cfg sm8250_dsc[] = {
+-	DSC_BLK("dsc_0", DSC_0, 0x80000, BIT(DPU_DSC_OUTPUT_CTRL)),
+-	DSC_BLK("dsc_1", DSC_1, 0x80400, BIT(DPU_DSC_OUTPUT_CTRL)),
+-	DSC_BLK("dsc_2", DSC_2, 0x80800, BIT(DPU_DSC_OUTPUT_CTRL)),
+-	DSC_BLK("dsc_3", DSC_3, 0x80c00, BIT(DPU_DSC_OUTPUT_CTRL)),
++	{
++		.name = "dsc_0", .id = DSC_0,
++		.base = 0x80000, .len = 0x140,
++		.features = BIT(DPU_DSC_OUTPUT_CTRL),
++	}, {
++		.name = "dsc_1", .id = DSC_1,
++		.base = 0x80400, .len = 0x140,
++		.features = BIT(DPU_DSC_OUTPUT_CTRL),
++	}, {
++		.name = "dsc_2", .id = DSC_2,
++		.base = 0x80800, .len = 0x140,
++		.features = BIT(DPU_DSC_OUTPUT_CTRL),
++	}, {
++		.name = "dsc_3", .id = DSC_3,
++		.base = 0x80c00, .len = 0x140,
++		.features = BIT(DPU_DSC_OUTPUT_CTRL),
++	},
+ };
+ 
+ static const struct dpu_intf_cfg sm8250_intf[] = {
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_2_sc7180.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_2_sc7180.h
+index 67566b07195a2..a3124661cb65f 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_2_sc7180.h
++++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_2_sc7180.h
+@@ -76,7 +76,7 @@ static const struct dpu_lm_cfg sc7180_lm[] = {
+ 
+ static const struct dpu_dspp_cfg sc7180_dspp[] = {
+ 	DSPP_BLK("dspp_0", DSPP_0, 0x54000, DSPP_SC7180_MASK,
+-		 &sm8150_dspp_sblk),
++		 &sdm845_dspp_sblk),
+ };
+ 
+ static const struct dpu_pingpong_cfg sc7180_pp[] = {
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_3_sm6115.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_3_sm6115.h
+index 031fc8dae3c69..04a0dbf96e179 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_3_sm6115.h
++++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_3_sm6115.h
+@@ -56,7 +56,7 @@ static const struct dpu_lm_cfg sm6115_lm[] = {
+ 
+ static const struct dpu_dspp_cfg sm6115_dspp[] = {
+ 	DSPP_BLK("dspp_0", DSPP_0, 0x54000, DSPP_SC7180_MASK,
+-		 &sm8150_dspp_sblk),
++		 &sdm845_dspp_sblk),
+ };
+ 
+ static const struct dpu_pingpong_cfg sm6115_pp[] = {
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_4_sm6350.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_4_sm6350.h
+index 06eba23b02364..398ea3749f805 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_4_sm6350.h
++++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_4_sm6350.h
+@@ -85,7 +85,7 @@ static const struct dpu_lm_cfg sm6350_lm[] = {
+ 
+ static const struct dpu_dspp_cfg sm6350_dspp[] = {
+ 	DSPP_BLK("dspp_0", DSPP_0, 0x54000, DSPP_SC7180_MASK,
+-		&sm8150_dspp_sblk),
++		&sdm845_dspp_sblk),
+ };
+ 
+ static struct dpu_pingpong_cfg sm6350_pp[] = {
+@@ -98,7 +98,11 @@ static struct dpu_pingpong_cfg sm6350_pp[] = {
+ };
+ 
+ static const struct dpu_dsc_cfg sm6350_dsc[] = {
+-	DSC_BLK("dsc_0", DSC_0, 0x80000, BIT(DPU_DSC_OUTPUT_CTRL)),
++	{
++		.name = "dsc_0", .id = DSC_0,
++		.base = 0x80000, .len = 0x140,
++		.features = BIT(DPU_DSC_OUTPUT_CTRL),
++	},
+ };
+ 
+ static const struct dpu_intf_cfg sm6350_intf[] = {
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_5_qcm2290.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_5_qcm2290.h
+index f2808098af399..06cf48b55f989 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_5_qcm2290.h
++++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_5_qcm2290.h
+@@ -53,7 +53,7 @@ static const struct dpu_lm_cfg qcm2290_lm[] = {
+ 
+ static const struct dpu_dspp_cfg qcm2290_dspp[] = {
+ 	DSPP_BLK("dspp_0", DSPP_0, 0x54000, DSPP_SC7180_MASK,
+-		 &sm8150_dspp_sblk),
++		 &sdm845_dspp_sblk),
+ };
+ 
+ static const struct dpu_pingpong_cfg qcm2290_pp[] = {
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_9_sm6375.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_9_sm6375.h
+index 241fa6746674d..ec12602896f31 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_9_sm6375.h
++++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_9_sm6375.h
+@@ -57,7 +57,7 @@ static const struct dpu_lm_cfg sm6375_lm[] = {
+ 
+ static const struct dpu_dspp_cfg sm6375_dspp[] = {
+ 	DSPP_BLK("dspp_0", DSPP_0, 0x54000, DSPP_SC7180_MASK,
+-		&sm8150_dspp_sblk),
++		&sdm845_dspp_sblk),
+ };
+ 
+ static const struct dpu_pingpong_cfg sm6375_pp[] = {
+@@ -67,7 +67,11 @@ static const struct dpu_pingpong_cfg sm6375_pp[] = {
+ };
+ 
+ static const struct dpu_dsc_cfg sm6375_dsc[] = {
+-	DSC_BLK("dsc_0", DSC_0, 0x80000, BIT(DPU_DSC_OUTPUT_CTRL)),
++	{
++		.name = "dsc_0", .id = DSC_0,
++		.base = 0x80000, .len = 0x140,
++		.features = BIT(DPU_DSC_OUTPUT_CTRL),
++	},
+ };
+ 
+ static const struct dpu_intf_cfg sm6375_intf[] = {
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_0_sm8350.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_0_sm8350.h
+index 8da424eaee6a2..66b3d299ffcf7 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_0_sm8350.h
++++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_0_sm8350.h
+@@ -117,13 +117,13 @@ static const struct dpu_lm_cfg sm8350_lm[] = {
+ 
+ static const struct dpu_dspp_cfg sm8350_dspp[] = {
+ 	DSPP_BLK("dspp_0", DSPP_0, 0x54000, DSPP_SC7180_MASK,
+-		 &sm8150_dspp_sblk),
++		 &sdm845_dspp_sblk),
+ 	DSPP_BLK("dspp_1", DSPP_1, 0x56000, DSPP_SC7180_MASK,
+-		 &sm8150_dspp_sblk),
++		 &sdm845_dspp_sblk),
+ 	DSPP_BLK("dspp_2", DSPP_2, 0x58000, DSPP_SC7180_MASK,
+-		 &sm8150_dspp_sblk),
++		 &sdm845_dspp_sblk),
+ 	DSPP_BLK("dspp_3", DSPP_3, 0x5a000, DSPP_SC7180_MASK,
+-		 &sm8150_dspp_sblk),
++		 &sdm845_dspp_sblk),
+ };
+ 
+ static const struct dpu_pingpong_cfg sm8350_pp[] = {
+@@ -159,10 +159,27 @@ static const struct dpu_merge_3d_cfg sm8350_merge_3d[] = {
+  * its own different sub block address.
+  */
+ static const struct dpu_dsc_cfg sm8350_dsc[] = {
+-	DSC_BLK_1_2("dce_0_0", DSC_0, 0x80000, 0x29c, 0, dsc_sblk_0),
+-	DSC_BLK_1_2("dce_0_1", DSC_1, 0x80000, 0x29c, 0, dsc_sblk_1),
+-	DSC_BLK_1_2("dce_1_0", DSC_2, 0x81000, 0x29c, BIT(DPU_DSC_NATIVE_42x_EN), dsc_sblk_0),
+-	DSC_BLK_1_2("dce_1_1", DSC_3, 0x81000, 0x29c, BIT(DPU_DSC_NATIVE_42x_EN), dsc_sblk_1),
++	{
++		.name = "dce_0_0", .id = DSC_0,
++		.base = 0x80000, .len = 0x4,
++		.features = BIT(DPU_DSC_HW_REV_1_2),
++		.sblk = &dsc_sblk_0,
++	}, {
++		.name = "dce_0_1", .id = DSC_1,
++		.base = 0x80000, .len = 0x4,
++		.features = BIT(DPU_DSC_HW_REV_1_2),
++		.sblk = &dsc_sblk_1,
++	}, {
++		.name = "dce_1_0", .id = DSC_2,
++		.base = 0x81000, .len = 0x4,
++		.features = BIT(DPU_DSC_HW_REV_1_2) | BIT(DPU_DSC_NATIVE_42x_EN),
++		.sblk = &dsc_sblk_0,
++	}, {
++		.name = "dce_1_1", .id = DSC_3,
++		.base = 0x81000, .len = 0x4,
++		.features = BIT(DPU_DSC_HW_REV_1_2) | BIT(DPU_DSC_NATIVE_42x_EN),
++		.sblk = &dsc_sblk_1,
++	},
+ };
+ 
+ static const struct dpu_intf_cfg sm8350_intf[] = {
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_2_sc7280.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_2_sc7280.h
+index 900fee410e113..f06ed9a73b071 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_2_sc7280.h
++++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_2_sc7280.h
+@@ -84,7 +84,7 @@ static const struct dpu_lm_cfg sc7280_lm[] = {
+ 
+ static const struct dpu_dspp_cfg sc7280_dspp[] = {
+ 	DSPP_BLK("dspp_0", DSPP_0, 0x54000, DSPP_SC7180_MASK,
+-		 &sm8150_dspp_sblk),
++		 &sdm845_dspp_sblk),
+ };
+ 
+ static const struct dpu_pingpong_cfg sc7280_pp[] = {
+@@ -104,7 +104,12 @@ static const struct dpu_pingpong_cfg sc7280_pp[] = {
+ 
+ /* NOTE: sc7280 only has one DSC hard slice encoder */
+ static const struct dpu_dsc_cfg sc7280_dsc[] = {
+-	DSC_BLK_1_2("dce_0_0", DSC_0, 0x80000, 0x29c, BIT(DPU_DSC_NATIVE_42x_EN), dsc_sblk_0),
++	{
++		.name = "dce_0_0", .id = DSC_0,
++		.base = 0x80000, .len = 0x4,
++		.features = BIT(DPU_DSC_HW_REV_1_2) | BIT(DPU_DSC_NATIVE_42x_EN),
++		.sblk = &dsc_sblk_0,
++	},
+ };
+ 
+ static const struct dpu_wb_cfg sc7280_wb[] = {
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_0_sc8280xp.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_0_sc8280xp.h
+index f6ce6b090f718..ac71cc62f605a 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_0_sc8280xp.h
++++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_0_sc8280xp.h
+@@ -112,13 +112,13 @@ static const struct dpu_lm_cfg sc8280xp_lm[] = {
+ 
+ static const struct dpu_dspp_cfg sc8280xp_dspp[] = {
+ 	DSPP_BLK("dspp_0", DSPP_0, 0x54000, DSPP_SC7180_MASK,
+-		 &sm8150_dspp_sblk),
++		 &sdm845_dspp_sblk),
+ 	DSPP_BLK("dspp_1", DSPP_1, 0x56000, DSPP_SC7180_MASK,
+-		 &sm8150_dspp_sblk),
++		 &sdm845_dspp_sblk),
+ 	DSPP_BLK("dspp_2", DSPP_2, 0x58000, DSPP_SC7180_MASK,
+-		 &sm8150_dspp_sblk),
++		 &sdm845_dspp_sblk),
+ 	DSPP_BLK("dspp_3", DSPP_3, 0x5a000, DSPP_SC7180_MASK,
+-		 &sm8150_dspp_sblk),
++		 &sdm845_dspp_sblk),
+ };
+ 
+ static const struct dpu_pingpong_cfg sc8280xp_pp[] = {
+@@ -148,12 +148,37 @@ static const struct dpu_merge_3d_cfg sc8280xp_merge_3d[] = {
+  * its own different sub block address.
+  */
+ static const struct dpu_dsc_cfg sc8280xp_dsc[] = {
+-	DSC_BLK_1_2("dce_0_0", DSC_0, 0x80000, 0x29c, 0, dsc_sblk_0),
+-	DSC_BLK_1_2("dce_0_1", DSC_1, 0x80000, 0x29c, 0, dsc_sblk_1),
+-	DSC_BLK_1_2("dce_1_0", DSC_2, 0x81000, 0x29c, BIT(DPU_DSC_NATIVE_42x_EN), dsc_sblk_0),
+-	DSC_BLK_1_2("dce_1_1", DSC_3, 0x81000, 0x29c, BIT(DPU_DSC_NATIVE_42x_EN), dsc_sblk_1),
+-	DSC_BLK_1_2("dce_2_0", DSC_4, 0x82000, 0x29c, 0, dsc_sblk_0),
+-	DSC_BLK_1_2("dce_2_1", DSC_5, 0x82000, 0x29c, 0, dsc_sblk_1),
++	{
++		.name = "dce_0_0", .id = DSC_0,
++		.base = 0x80000, .len = 0x4,
++		.features = BIT(DPU_DSC_HW_REV_1_2),
++		.sblk = &dsc_sblk_0,
++	}, {
++		.name = "dce_0_1", .id = DSC_1,
++		.base = 0x80000, .len = 0x4,
++		.features = BIT(DPU_DSC_HW_REV_1_2),
++		.sblk = &dsc_sblk_1,
++	}, {
++		.name = "dce_1_0", .id = DSC_2,
++		.base = 0x81000, .len = 0x4,
++		.features = BIT(DPU_DSC_HW_REV_1_2) | BIT(DPU_DSC_NATIVE_42x_EN),
++		.sblk = &dsc_sblk_0,
++	}, {
++		.name = "dce_1_1", .id = DSC_3,
++		.base = 0x81000, .len = 0x4,
++		.features = BIT(DPU_DSC_HW_REV_1_2) | BIT(DPU_DSC_NATIVE_42x_EN),
++		.sblk = &dsc_sblk_1,
++	}, {
++		.name = "dce_2_0", .id = DSC_4,
++		.base = 0x82000, .len = 0x4,
++		.features = BIT(DPU_DSC_HW_REV_1_2),
++		.sblk = &dsc_sblk_0,
++	}, {
++		.name = "dce_2_1", .id = DSC_5,
++		.base = 0x82000, .len = 0x4,
++		.features = BIT(DPU_DSC_HW_REV_1_2),
++		.sblk = &dsc_sblk_1,
++	},
+ };
+ 
+ /* TODO: INTF 3, 8 and 7 are used for MST, marked as INTF_NONE for now */
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_1_sm8450.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_1_sm8450.h
+index 8d13c369213c0..d7407d471a31e 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_1_sm8450.h
++++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_1_sm8450.h
+@@ -118,13 +118,13 @@ static const struct dpu_lm_cfg sm8450_lm[] = {
+ 
+ static const struct dpu_dspp_cfg sm8450_dspp[] = {
+ 	DSPP_BLK("dspp_0", DSPP_0, 0x54000, DSPP_SC7180_MASK,
+-		 &sm8150_dspp_sblk),
++		 &sdm845_dspp_sblk),
+ 	DSPP_BLK("dspp_1", DSPP_1, 0x56000, DSPP_SC7180_MASK,
+-		 &sm8150_dspp_sblk),
++		 &sdm845_dspp_sblk),
+ 	DSPP_BLK("dspp_2", DSPP_2, 0x58000, DSPP_SC7180_MASK,
+-		 &sm8150_dspp_sblk),
++		 &sdm845_dspp_sblk),
+ 	DSPP_BLK("dspp_3", DSPP_3, 0x5a000, DSPP_SC7180_MASK,
+-		 &sm8150_dspp_sblk),
++		 &sdm845_dspp_sblk),
+ };
+ 
+ static const struct dpu_pingpong_cfg sm8450_pp[] = {
+@@ -167,10 +167,27 @@ static const struct dpu_merge_3d_cfg sm8450_merge_3d[] = {
+  * its own different sub block address.
+  */
+ static const struct dpu_dsc_cfg sm8450_dsc[] = {
+-	DSC_BLK_1_2("dce_0_0", DSC_0, 0x80000, 0x29c, 0, dsc_sblk_0),
+-	DSC_BLK_1_2("dce_0_1", DSC_1, 0x80000, 0x29c, 0, dsc_sblk_1),
+-	DSC_BLK_1_2("dce_1_0", DSC_2, 0x81000, 0x29c, BIT(DPU_DSC_NATIVE_42x_EN), dsc_sblk_0),
+-	DSC_BLK_1_2("dce_1_1", DSC_3, 0x81000, 0x29c, BIT(DPU_DSC_NATIVE_42x_EN), dsc_sblk_1),
++	{
++		.name = "dce_0_0", .id = DSC_0,
++		.base = 0x80000, .len = 0x4,
++		.features = BIT(DPU_DSC_HW_REV_1_2),
++		.sblk = &dsc_sblk_0,
++	}, {
++		.name = "dce_0_1", .id = DSC_1,
++		.base = 0x80000, .len = 0x4,
++		.features = BIT(DPU_DSC_HW_REV_1_2),
++		.sblk = &dsc_sblk_1,
++	}, {
++		.name = "dce_1_0", .id = DSC_2,
++		.base = 0x81000, .len = 0x4,
++		.features = BIT(DPU_DSC_HW_REV_1_2) | BIT(DPU_DSC_NATIVE_42x_EN),
++		.sblk = &dsc_sblk_0,
++	}, {
++		.name = "dce_1_1", .id = DSC_3,
++		.base = 0x81000, .len = 0x4,
++		.features = BIT(DPU_DSC_HW_REV_1_2) | BIT(DPU_DSC_NATIVE_42x_EN),
++		.sblk = &dsc_sblk_1,
++	},
+ };
+ 
+ static const struct dpu_intf_cfg sm8450_intf[] = {
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_0_sm8550.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_0_sm8550.h
+index f17b9a7fee851..d51c2f8acba0a 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_0_sm8550.h
++++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_0_sm8550.h
+@@ -123,13 +123,13 @@ static const struct dpu_lm_cfg sm8550_lm[] = {
+ 
+ static const struct dpu_dspp_cfg sm8550_dspp[] = {
+ 	DSPP_BLK("dspp_0", DSPP_0, 0x54000, DSPP_SC7180_MASK,
+-		 &sm8150_dspp_sblk),
++		 &sdm845_dspp_sblk),
+ 	DSPP_BLK("dspp_1", DSPP_1, 0x56000, DSPP_SC7180_MASK,
+-		 &sm8150_dspp_sblk),
++		 &sdm845_dspp_sblk),
+ 	DSPP_BLK("dspp_2", DSPP_2, 0x58000, DSPP_SC7180_MASK,
+-		 &sm8150_dspp_sblk),
++		 &sdm845_dspp_sblk),
+ 	DSPP_BLK("dspp_3", DSPP_3, 0x5a000, DSPP_SC7180_MASK,
+-		 &sm8150_dspp_sblk),
++		 &sdm845_dspp_sblk),
+ };
+ static const struct dpu_pingpong_cfg sm8550_pp[] = {
+ 	PP_BLK_DITHER("pingpong_0", PINGPONG_0, 0x69000, MERGE_3D_0, sc7280_pp_sblk,
+@@ -171,10 +171,27 @@ static const struct dpu_merge_3d_cfg sm8550_merge_3d[] = {
+  * its own different sub block address.
+  */
+ static const struct dpu_dsc_cfg sm8550_dsc[] = {
+-	DSC_BLK_1_2("dce_0_0", DSC_0, 0x80000, 0x29c, 0, dsc_sblk_0),
+-	DSC_BLK_1_2("dce_0_1", DSC_1, 0x80000, 0x29c, 0, dsc_sblk_1),
+-	DSC_BLK_1_2("dce_1_0", DSC_2, 0x81000, 0x29c, BIT(DPU_DSC_NATIVE_42x_EN), dsc_sblk_0),
+-	DSC_BLK_1_2("dce_1_1", DSC_3, 0x81000, 0x29c, BIT(DPU_DSC_NATIVE_42x_EN), dsc_sblk_1),
++	{
++		.name = "dce_0_0", .id = DSC_0,
++		.base = 0x80000, .len = 0x4,
++		.features = BIT(DPU_DSC_HW_REV_1_2),
++		.sblk = &dsc_sblk_0,
++	}, {
++		.name = "dce_0_1", .id = DSC_1,
++		.base = 0x80000, .len = 0x4,
++		.features = BIT(DPU_DSC_HW_REV_1_2),
++		.sblk = &dsc_sblk_1,
++	}, {
++		.name = "dce_1_0", .id = DSC_2,
++		.base = 0x81000, .len = 0x4,
++		.features = BIT(DPU_DSC_HW_REV_1_2) | BIT(DPU_DSC_NATIVE_42x_EN),
++		.sblk = &dsc_sblk_0,
++	}, {
++		.name = "dce_1_1", .id = DSC_3,
++		.base = 0x81000, .len = 0x4,
++		.features = BIT(DPU_DSC_HW_REV_1_2) | BIT(DPU_DSC_NATIVE_42x_EN),
++		.sblk = &dsc_sblk_1,
++	},
+ };
+ 
+ static const struct dpu_intf_cfg sm8550_intf[] = {
+@@ -245,8 +262,8 @@ const struct dpu_mdss_cfg dpu_sm8550_cfg = {
+ 	.merge_3d = sm8550_merge_3d,
+ 	.intf_count = ARRAY_SIZE(sm8550_intf),
+ 	.intf = sm8550_intf,
+-	.vbif_count = ARRAY_SIZE(sdm845_vbif),
+-	.vbif = sdm845_vbif,
++	.vbif_count = ARRAY_SIZE(sm8550_vbif),
++	.vbif = sm8550_vbif,
+ 	.perf = &sm8550_perf_data,
+ 	.mdss_irqs = BIT(MDP_SSPP_TOP0_INTR) | \
+ 		     BIT(MDP_SSPP_TOP0_INTR2) | \
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c
+index a466ff70a4d62..78037a697633b 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c
++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c
+@@ -446,7 +446,8 @@ static int dpu_encoder_phys_wb_wait_for_commit_done(
+ 	wait_info.atomic_cnt = &phys_enc->pending_kickoff_cnt;
+ 	wait_info.timeout_ms = KICKOFF_TIMEOUT_MS;
+ 
+-	ret = dpu_encoder_helper_wait_for_irq(phys_enc, INTR_IDX_WB_DONE,
++	ret = dpu_encoder_helper_wait_for_irq(phys_enc,
++			phys_enc->irq[INTR_IDX_WB_DONE],
+ 			dpu_encoder_phys_wb_done_irq, &wait_info);
+ 	if (ret == -ETIMEDOUT)
+ 		_dpu_encoder_phys_wb_handle_wbdone_timeout(phys_enc);
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
+index 0de507d4d7b7a..721c18cf9b1eb 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
+@@ -444,12 +444,12 @@ static const struct dpu_lm_sub_blks qcm2290_lm_sblk = {
+  * DSPP sub blocks config
+  *************************************************************/
+ static const struct dpu_dspp_sub_blks msm8998_dspp_sblk = {
+-	.pcc = {.id = DPU_DSPP_PCC, .base = 0x1700,
++	.pcc = {.name = "pcc", .id = DPU_DSPP_PCC, .base = 0x1700,
+ 		.len = 0x90, .version = 0x10007},
+ };
+ 
+-static const struct dpu_dspp_sub_blks sm8150_dspp_sblk = {
+-	.pcc = {.id = DPU_DSPP_PCC, .base = 0x1700,
++static const struct dpu_dspp_sub_blks sdm845_dspp_sblk = {
++	.pcc = {.name = "pcc", .id = DPU_DSPP_PCC, .base = 0x1700,
+ 		.len = 0x90, .version = 0x40000},
+ };
+ 
+@@ -465,19 +465,19 @@ static const struct dpu_dspp_sub_blks sm8150_dspp_sblk = {
+  * PINGPONG sub blocks config
+  *************************************************************/
+ static const struct dpu_pingpong_sub_blks sdm845_pp_sblk_te = {
+-	.te2 = {.id = DPU_PINGPONG_TE2, .base = 0x2000, .len = 0x0,
++	.te2 = {.name = "te2", .id = DPU_PINGPONG_TE2, .base = 0x2000, .len = 0x0,
+ 		.version = 0x1},
+-	.dither = {.id = DPU_PINGPONG_DITHER, .base = 0x30e0,
++	.dither = {.name = "dither", .id = DPU_PINGPONG_DITHER, .base = 0x30e0,
+ 		.len = 0x20, .version = 0x10000},
+ };
+ 
+ static const struct dpu_pingpong_sub_blks sdm845_pp_sblk = {
+-	.dither = {.id = DPU_PINGPONG_DITHER, .base = 0x30e0,
++	.dither = {.name = "dither", .id = DPU_PINGPONG_DITHER, .base = 0x30e0,
+ 		.len = 0x20, .version = 0x10000},
+ };
+ 
+ static const struct dpu_pingpong_sub_blks sc7280_pp_sblk = {
+-	.dither = {.id = DPU_PINGPONG_DITHER, .base = 0xe0,
++	.dither = {.name = "dither", .id = DPU_PINGPONG_DITHER, .base = 0xe0,
+ 	.len = 0x20, .version = 0x20000},
+ };
+ 
+@@ -517,30 +517,15 @@ static const struct dpu_pingpong_sub_blks sc7280_pp_sblk = {
+  * DSC sub blocks config
+  *************************************************************/
+ static const struct dpu_dsc_sub_blks dsc_sblk_0 = {
+-	.enc = {.base = 0x100, .len = 0x100},
+-	.ctl = {.base = 0xF00, .len = 0x10},
++	.enc = {.name = "enc", .base = 0x100, .len = 0x9c},
++	.ctl = {.name = "ctl", .base = 0xF00, .len = 0x10},
+ };
+ 
+ static const struct dpu_dsc_sub_blks dsc_sblk_1 = {
+-	.enc = {.base = 0x200, .len = 0x100},
+-	.ctl = {.base = 0xF80, .len = 0x10},
++	.enc = {.name = "enc", .base = 0x200, .len = 0x9c},
++	.ctl = {.name = "ctl", .base = 0xF80, .len = 0x10},
+ };
+ 
+-#define DSC_BLK(_name, _id, _base, _features) \
+-	{\
+-	.name = _name, .id = _id, \
+-	.base = _base, .len = 0x140, \
+-	.features = _features, \
+-	}
+-
+-#define DSC_BLK_1_2(_name, _id, _base, _len, _features, _sblk) \
+-	{\
+-	.name = _name, .id = _id, \
+-	.base = _base, .len = _len, \
+-	.features = BIT(DPU_DSC_HW_REV_1_2) | _features, \
+-	.sblk = &_sblk, \
+-	}
+-
+ /*************************************************************
+  * INTF sub blocks config
+  *************************************************************/
+@@ -663,6 +648,26 @@ static const struct dpu_vbif_cfg sdm845_vbif[] = {
+ 	},
+ };
+ 
++static const struct dpu_vbif_cfg sm8550_vbif[] = {
++	{
++	.name = "vbif_rt", .id = VBIF_RT,
++	.base = 0, .len = 0x1040,
++	.features = BIT(DPU_VBIF_QOS_REMAP),
++	.xin_halt_timeout = 0x4000,
++	.qos_rp_remap_size = 0x40,
++	.qos_rt_tbl = {
++		.npriority_lvl = ARRAY_SIZE(sdm845_rt_pri_lvl),
++		.priority_lvl = sdm845_rt_pri_lvl,
++		},
++	.qos_nrt_tbl = {
++		.npriority_lvl = ARRAY_SIZE(sdm845_nrt_pri_lvl),
++		.priority_lvl = sdm845_nrt_pri_lvl,
++		},
++	.memtype_count = 16,
++	.memtype = {3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3},
++	},
++};
++
+ /*************************************************************
+  * PERF data config
+  *************************************************************/
+diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
+index bd2c4ac456017..0d5ff03cb0910 100644
+--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
++++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
+@@ -130,8 +130,7 @@ static void mdp5_plane_destroy_state(struct drm_plane *plane,
+ {
+ 	struct mdp5_plane_state *pstate = to_mdp5_plane_state(state);
+ 
+-	if (state->fb)
+-		drm_framebuffer_put(state->fb);
++	__drm_atomic_helper_plane_destroy_state(state);
+ 
+ 	kfree(pstate);
+ }
+diff --git a/drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c b/drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c
+index acfe1b31e0792..add72bbc28b17 100644
+--- a/drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c
++++ b/drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c
+@@ -192,5 +192,5 @@ void msm_disp_snapshot_add_block(struct msm_disp_state *disp_state, u32 len,
+ 	new_blk->base_addr = base_addr;
+ 
+ 	msm_disp_state_dump_regs(&new_blk->state, new_blk->size, base_addr);
+-	list_add(&new_blk->node, &disp_state->blocks);
++	list_add_tail(&new_blk->node, &disp_state->blocks);
+ }
+diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
+index b38d0e95cd542..03196fbfa4d79 100644
+--- a/drivers/gpu/drm/panel/panel-simple.c
++++ b/drivers/gpu/drm/panel/panel-simple.c
+@@ -1189,7 +1189,9 @@ static const struct panel_desc auo_t215hvn01 = {
+ 	.delay = {
+ 		.disable = 5,
+ 		.unprepare = 1000,
+-	}
++	},
++	.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
++	.connector_type = DRM_MODE_CONNECTOR_LVDS,
+ };
+ 
+ static const struct drm_display_mode avic_tm070ddh03_mode = {
+diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
+index 5819737c21c67..a6f3c811ceb8e 100644
+--- a/drivers/gpu/drm/radeon/cik.c
++++ b/drivers/gpu/drm/radeon/cik.c
+@@ -9534,17 +9534,8 @@ static void cik_pcie_gen3_enable(struct radeon_device *rdev)
+ 			u16 bridge_cfg2, gpu_cfg2;
+ 			u32 max_lw, current_lw, tmp;
+ 
+-			pcie_capability_read_word(root, PCI_EXP_LNKCTL,
+-						  &bridge_cfg);
+-			pcie_capability_read_word(rdev->pdev, PCI_EXP_LNKCTL,
+-						  &gpu_cfg);
+-
+-			tmp16 = bridge_cfg | PCI_EXP_LNKCTL_HAWD;
+-			pcie_capability_write_word(root, PCI_EXP_LNKCTL, tmp16);
+-
+-			tmp16 = gpu_cfg | PCI_EXP_LNKCTL_HAWD;
+-			pcie_capability_write_word(rdev->pdev, PCI_EXP_LNKCTL,
+-						   tmp16);
++			pcie_capability_set_word(root, PCI_EXP_LNKCTL, PCI_EXP_LNKCTL_HAWD);
++			pcie_capability_set_word(rdev->pdev, PCI_EXP_LNKCTL, PCI_EXP_LNKCTL_HAWD);
+ 
+ 			tmp = RREG32_PCIE_PORT(PCIE_LC_STATUS1);
+ 			max_lw = (tmp & LC_DETECTED_LINK_WIDTH_MASK) >> LC_DETECTED_LINK_WIDTH_SHIFT;
+@@ -9591,21 +9582,14 @@ static void cik_pcie_gen3_enable(struct radeon_device *rdev)
+ 				msleep(100);
+ 
+ 				/* linkctl */
+-				pcie_capability_read_word(root, PCI_EXP_LNKCTL,
+-							  &tmp16);
+-				tmp16 &= ~PCI_EXP_LNKCTL_HAWD;
+-				tmp16 |= (bridge_cfg & PCI_EXP_LNKCTL_HAWD);
+-				pcie_capability_write_word(root, PCI_EXP_LNKCTL,
+-							   tmp16);
+-
+-				pcie_capability_read_word(rdev->pdev,
+-							  PCI_EXP_LNKCTL,
+-							  &tmp16);
+-				tmp16 &= ~PCI_EXP_LNKCTL_HAWD;
+-				tmp16 |= (gpu_cfg & PCI_EXP_LNKCTL_HAWD);
+-				pcie_capability_write_word(rdev->pdev,
+-							   PCI_EXP_LNKCTL,
+-							   tmp16);
++				pcie_capability_clear_and_set_word(root, PCI_EXP_LNKCTL,
++								   PCI_EXP_LNKCTL_HAWD,
++								   bridge_cfg &
++								   PCI_EXP_LNKCTL_HAWD);
++				pcie_capability_clear_and_set_word(rdev->pdev, PCI_EXP_LNKCTL,
++								   PCI_EXP_LNKCTL_HAWD,
++								   gpu_cfg &
++								   PCI_EXP_LNKCTL_HAWD);
+ 
+ 				/* linkctl2 */
+ 				pcie_capability_read_word(root, PCI_EXP_LNKCTL2,
+diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
+index 8d5e4b25609d5..a91012447b56e 100644
+--- a/drivers/gpu/drm/radeon/si.c
++++ b/drivers/gpu/drm/radeon/si.c
+@@ -7131,17 +7131,8 @@ static void si_pcie_gen3_enable(struct radeon_device *rdev)
+ 			u16 bridge_cfg2, gpu_cfg2;
+ 			u32 max_lw, current_lw, tmp;
+ 
+-			pcie_capability_read_word(root, PCI_EXP_LNKCTL,
+-						  &bridge_cfg);
+-			pcie_capability_read_word(rdev->pdev, PCI_EXP_LNKCTL,
+-						  &gpu_cfg);
+-
+-			tmp16 = bridge_cfg | PCI_EXP_LNKCTL_HAWD;
+-			pcie_capability_write_word(root, PCI_EXP_LNKCTL, tmp16);
+-
+-			tmp16 = gpu_cfg | PCI_EXP_LNKCTL_HAWD;
+-			pcie_capability_write_word(rdev->pdev, PCI_EXP_LNKCTL,
+-						   tmp16);
++			pcie_capability_set_word(root, PCI_EXP_LNKCTL, PCI_EXP_LNKCTL_HAWD);
++			pcie_capability_set_word(rdev->pdev, PCI_EXP_LNKCTL, PCI_EXP_LNKCTL_HAWD);
+ 
+ 			tmp = RREG32_PCIE(PCIE_LC_STATUS1);
+ 			max_lw = (tmp & LC_DETECTED_LINK_WIDTH_MASK) >> LC_DETECTED_LINK_WIDTH_SHIFT;
+@@ -7188,22 +7179,14 @@ static void si_pcie_gen3_enable(struct radeon_device *rdev)
+ 				msleep(100);
+ 
+ 				/* linkctl */
+-				pcie_capability_read_word(root, PCI_EXP_LNKCTL,
+-							  &tmp16);
+-				tmp16 &= ~PCI_EXP_LNKCTL_HAWD;
+-				tmp16 |= (bridge_cfg & PCI_EXP_LNKCTL_HAWD);
+-				pcie_capability_write_word(root,
+-							   PCI_EXP_LNKCTL,
+-							   tmp16);
+-
+-				pcie_capability_read_word(rdev->pdev,
+-							  PCI_EXP_LNKCTL,
+-							  &tmp16);
+-				tmp16 &= ~PCI_EXP_LNKCTL_HAWD;
+-				tmp16 |= (gpu_cfg & PCI_EXP_LNKCTL_HAWD);
+-				pcie_capability_write_word(rdev->pdev,
+-							   PCI_EXP_LNKCTL,
+-							   tmp16);
++				pcie_capability_clear_and_set_word(root, PCI_EXP_LNKCTL,
++								   PCI_EXP_LNKCTL_HAWD,
++								   bridge_cfg &
++								   PCI_EXP_LNKCTL_HAWD);
++				pcie_capability_clear_and_set_word(rdev->pdev, PCI_EXP_LNKCTL,
++								   PCI_EXP_LNKCTL_HAWD,
++								   gpu_cfg &
++								   PCI_EXP_LNKCTL_HAWD);
+ 
+ 				/* linkctl2 */
+ 				pcie_capability_read_word(root, PCI_EXP_LNKCTL2,
+diff --git a/drivers/gpu/drm/tegra/dpaux.c b/drivers/gpu/drm/tegra/dpaux.c
+index 4d2677dcd8315..68ded2e34e1cf 100644
+--- a/drivers/gpu/drm/tegra/dpaux.c
++++ b/drivers/gpu/drm/tegra/dpaux.c
+@@ -468,7 +468,7 @@ static int tegra_dpaux_probe(struct platform_device *pdev)
+ 
+ 	dpaux->irq = platform_get_irq(pdev, 0);
+ 	if (dpaux->irq < 0)
+-		return -ENXIO;
++		return dpaux->irq;
+ 
+ 	if (!pdev->dev.pm_domain) {
+ 		dpaux->rst = devm_reset_control_get(&pdev->dev, "dpaux");
+diff --git a/drivers/gpu/drm/tiny/repaper.c b/drivers/gpu/drm/tiny/repaper.c
+index c2677d081a7b6..13ae148f59b9b 100644
+--- a/drivers/gpu/drm/tiny/repaper.c
++++ b/drivers/gpu/drm/tiny/repaper.c
+@@ -533,7 +533,7 @@ static int repaper_fb_dirty(struct drm_framebuffer *fb)
+ 	DRM_DEBUG("Flushing [FB:%d] st=%ums\n", fb->base.id,
+ 		  epd->factored_stage_time);
+ 
+-	buf = kmalloc_array(fb->width, fb->height, GFP_KERNEL);
++	buf = kmalloc(fb->width * fb->height / 8, GFP_KERNEL);
+ 	if (!buf) {
+ 		ret = -ENOMEM;
+ 		goto out_exit;
+diff --git a/drivers/gpu/drm/xlnx/zynqmp_dpsub.c b/drivers/gpu/drm/xlnx/zynqmp_dpsub.c
+index bab862484d429..068413be65275 100644
+--- a/drivers/gpu/drm/xlnx/zynqmp_dpsub.c
++++ b/drivers/gpu/drm/xlnx/zynqmp_dpsub.c
+@@ -227,7 +227,9 @@ static int zynqmp_dpsub_probe(struct platform_device *pdev)
+ 	dpsub->dev = &pdev->dev;
+ 	platform_set_drvdata(pdev, dpsub);
+ 
+-	dma_set_mask(dpsub->dev, DMA_BIT_MASK(ZYNQMP_DISP_MAX_DMA_BIT));
++	ret = dma_set_mask(dpsub->dev, DMA_BIT_MASK(ZYNQMP_DISP_MAX_DMA_BIT));
++	if (ret)
++		return ret;
+ 
+ 	/* Try the reserved memory. Proceed if there's none. */
+ 	of_reserved_mem_device_init(&pdev->dev);
+diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
+index 851ee86eff32a..40a5645f8fe81 100644
+--- a/drivers/hid/hid-input.c
++++ b/drivers/hid/hid-input.c
+@@ -988,6 +988,7 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
+ 			return;
+ 
+ 		case 0x3c: /* Invert */
++			device->quirks &= ~HID_QUIRK_NOINVERT;
+ 			map_key_clear(BTN_TOOL_RUBBER);
+ 			break;
+ 
+@@ -1013,9 +1014,13 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
+ 		case 0x45: /* ERASER */
+ 			/*
+ 			 * This event is reported when eraser tip touches the surface.
+-			 * Actual eraser (BTN_TOOL_RUBBER) is set by Invert usage when
+-			 * tool gets in proximity.
++			 * Actual eraser (BTN_TOOL_RUBBER) is set and released either
++			 * by Invert if tool reports proximity or by Eraser directly.
+ 			 */
++			if (!test_bit(BTN_TOOL_RUBBER, input->keybit)) {
++				device->quirks |= HID_QUIRK_NOINVERT;
++				set_bit(BTN_TOOL_RUBBER, input->keybit);
++			}
+ 			map_key_clear(BTN_TOUCH);
+ 			break;
+ 
+@@ -1580,6 +1585,15 @@ void hidinput_hid_event(struct hid_device *hid, struct hid_field *field, struct
+ 		else if (report->tool != BTN_TOOL_RUBBER)
+ 			/* value is off, tool is not rubber, ignore */
+ 			return;
++		else if (*quirks & HID_QUIRK_NOINVERT &&
++			 !test_bit(BTN_TOUCH, input->key)) {
++			/*
++			 * There is no invert to release the tool, let hid_input
++			 * send BTN_TOUCH with scancode and release the tool after.
++			 */
++			hid_report_release_tool(report, input, BTN_TOOL_RUBBER);
++			return;
++		}
+ 
+ 		/* let hid-input set BTN_TOUCH */
+ 		break;
+diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c
+index 62180414efccd..e6a8b6d8eab70 100644
+--- a/drivers/hid/hid-logitech-dj.c
++++ b/drivers/hid/hid-logitech-dj.c
+@@ -1285,6 +1285,9 @@ static int logi_dj_recv_switch_to_dj_mode(struct dj_receiver_dev *djrcv_dev,
+ 		 * 50 msec should gives enough time to the receiver to be ready.
+ 		 */
+ 		msleep(50);
++
++		if (retval)
++			return retval;
+ 	}
+ 
+ 	/*
+@@ -1306,7 +1309,7 @@ static int logi_dj_recv_switch_to_dj_mode(struct dj_receiver_dev *djrcv_dev,
+ 	buf[5] = 0x09;
+ 	buf[6] = 0x00;
+ 
+-	hid_hw_raw_request(hdev, REPORT_ID_HIDPP_SHORT, buf,
++	retval = hid_hw_raw_request(hdev, REPORT_ID_HIDPP_SHORT, buf,
+ 			HIDPP_REPORT_SHORT_LENGTH, HID_OUTPUT_REPORT,
+ 			HID_REQ_SET_REPORT);
+ 
+diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c
+index 129b01be488d2..09ba2086c95ce 100644
+--- a/drivers/hid/hid-logitech-hidpp.c
++++ b/drivers/hid/hid-logitech-hidpp.c
+@@ -275,21 +275,22 @@ static int __hidpp_send_report(struct hid_device *hdev,
+ }
+ 
+ /*
+- * hidpp_send_message_sync() returns 0 in case of success, and something else
+- * in case of a failure.
+- * - If ' something else' is positive, that means that an error has been raised
+- *   by the protocol itself.
+- * - If ' something else' is negative, that means that we had a classic error
+- *   (-ENOMEM, -EPIPE, etc...)
++ * Effectively send the message to the device, waiting for its answer.
++ *
++ * Must be called with hidpp->send_mutex locked
++ *
++ * Same return protocol than hidpp_send_message_sync():
++ * - success on 0
++ * - negative error means transport error
++ * - positive value means protocol error
+  */
+-static int hidpp_send_message_sync(struct hidpp_device *hidpp,
++static int __do_hidpp_send_message_sync(struct hidpp_device *hidpp,
+ 	struct hidpp_report *message,
+ 	struct hidpp_report *response)
+ {
+-	int ret = -1;
+-	int max_retries = 3;
++	int ret;
+ 
+-	mutex_lock(&hidpp->send_mutex);
++	__must_hold(&hidpp->send_mutex);
+ 
+ 	hidpp->send_receive_buf = response;
+ 	hidpp->answer_available = false;
+@@ -300,47 +301,74 @@ static int hidpp_send_message_sync(struct hidpp_device *hidpp,
+ 	 */
+ 	*response = *message;
+ 
+-	for (; max_retries != 0 && ret; max_retries--) {
+-		ret = __hidpp_send_report(hidpp->hid_dev, message);
++	ret = __hidpp_send_report(hidpp->hid_dev, message);
++	if (ret) {
++		dbg_hid("__hidpp_send_report returned err: %d\n", ret);
++		memset(response, 0, sizeof(struct hidpp_report));
++		return ret;
++	}
+ 
+-		if (ret) {
+-			dbg_hid("__hidpp_send_report returned err: %d\n", ret);
+-			memset(response, 0, sizeof(struct hidpp_report));
+-			break;
+-		}
++	if (!wait_event_timeout(hidpp->wait, hidpp->answer_available,
++				5*HZ)) {
++		dbg_hid("%s:timeout waiting for response\n", __func__);
++		memset(response, 0, sizeof(struct hidpp_report));
++		return -ETIMEDOUT;
++	}
+ 
+-		if (!wait_event_timeout(hidpp->wait, hidpp->answer_available,
+-					5*HZ)) {
+-			dbg_hid("%s:timeout waiting for response\n", __func__);
+-			memset(response, 0, sizeof(struct hidpp_report));
+-			ret = -ETIMEDOUT;
+-			break;
+-		}
++	if (response->report_id == REPORT_ID_HIDPP_SHORT &&
++	    response->rap.sub_id == HIDPP_ERROR) {
++		ret = response->rap.params[1];
++		dbg_hid("%s:got hidpp error %02X\n", __func__, ret);
++		return ret;
++	}
+ 
+-		if (response->report_id == REPORT_ID_HIDPP_SHORT &&
+-		    response->rap.sub_id == HIDPP_ERROR) {
+-			ret = response->rap.params[1];
+-			dbg_hid("%s:got hidpp error %02X\n", __func__, ret);
++	if ((response->report_id == REPORT_ID_HIDPP_LONG ||
++	     response->report_id == REPORT_ID_HIDPP_VERY_LONG) &&
++	    response->fap.feature_index == HIDPP20_ERROR) {
++		ret = response->fap.params[1];
++		dbg_hid("%s:got hidpp 2.0 error %02X\n", __func__, ret);
++		return ret;
++	}
++
++	return 0;
++}
++
++/*
++ * hidpp_send_message_sync() returns 0 in case of success, and something else
++ * in case of a failure.
++ *
++ * See __do_hidpp_send_message_sync() for a detailed explanation of the returned
++ * value.
++ */
++static int hidpp_send_message_sync(struct hidpp_device *hidpp,
++	struct hidpp_report *message,
++	struct hidpp_report *response)
++{
++	int ret;
++	int max_retries = 3;
++
++	mutex_lock(&hidpp->send_mutex);
++
++	do {
++		ret = __do_hidpp_send_message_sync(hidpp, message, response);
++		if (ret != HIDPP20_ERROR_BUSY)
+ 			break;
+-		}
+ 
+-		if ((response->report_id == REPORT_ID_HIDPP_LONG ||
+-		     response->report_id == REPORT_ID_HIDPP_VERY_LONG) &&
+-		    response->fap.feature_index == HIDPP20_ERROR) {
+-			ret = response->fap.params[1];
+-			if (ret != HIDPP20_ERROR_BUSY) {
+-				dbg_hid("%s:got hidpp 2.0 error %02X\n", __func__, ret);
+-				break;
+-			}
+-			dbg_hid("%s:got busy hidpp 2.0 error %02X, retrying\n", __func__, ret);
+-		}
+-	}
++		dbg_hid("%s:got busy hidpp 2.0 error %02X, retrying\n", __func__, ret);
++	} while (--max_retries);
+ 
+ 	mutex_unlock(&hidpp->send_mutex);
+ 	return ret;
+ 
+ }
+ 
++/*
++ * hidpp_send_fap_command_sync() returns 0 in case of success, and something else
++ * in case of a failure.
++ *
++ * See __do_hidpp_send_message_sync() for a detailed explanation of the returned
++ * value.
++ */
+ static int hidpp_send_fap_command_sync(struct hidpp_device *hidpp,
+ 	u8 feat_index, u8 funcindex_clientid, u8 *params, int param_count,
+ 	struct hidpp_report *response)
+@@ -373,6 +401,13 @@ static int hidpp_send_fap_command_sync(struct hidpp_device *hidpp,
+ 	return ret;
+ }
+ 
++/*
++ * hidpp_send_rap_command_sync() returns 0 in case of success, and something else
++ * in case of a failure.
++ *
++ * See __do_hidpp_send_message_sync() for a detailed explanation of the returned
++ * value.
++ */
+ static int hidpp_send_rap_command_sync(struct hidpp_device *hidpp_dev,
+ 	u8 report_id, u8 sub_id, u8 reg_address, u8 *params, int param_count,
+ 	struct hidpp_report *response)
+diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
+index e31be0cb8b850..521b2ffb42449 100644
+--- a/drivers/hid/hid-multitouch.c
++++ b/drivers/hid/hid-multitouch.c
+@@ -1594,7 +1594,6 @@ static void mt_post_parse(struct mt_device *td, struct mt_application *app)
+ static int mt_input_configured(struct hid_device *hdev, struct hid_input *hi)
+ {
+ 	struct mt_device *td = hid_get_drvdata(hdev);
+-	char *name;
+ 	const char *suffix = NULL;
+ 	struct mt_report_data *rdata;
+ 	struct mt_application *mt_application = NULL;
+@@ -1645,15 +1644,9 @@ static int mt_input_configured(struct hid_device *hdev, struct hid_input *hi)
+ 		break;
+ 	}
+ 
+-	if (suffix) {
+-		name = devm_kzalloc(&hi->input->dev,
+-				    strlen(hdev->name) + strlen(suffix) + 2,
+-				    GFP_KERNEL);
+-		if (name) {
+-			sprintf(name, "%s %s", hdev->name, suffix);
+-			hi->input->name = name;
+-		}
+-	}
++	if (suffix)
++		hi->input->name = devm_kasprintf(&hdev->dev, GFP_KERNEL,
++						 "%s %s", hdev->name, suffix);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/hid/hid-nvidia-shield.c b/drivers/hid/hid-nvidia-shield.c
+index a928ad2be62db..9c44974135079 100644
+--- a/drivers/hid/hid-nvidia-shield.c
++++ b/drivers/hid/hid-nvidia-shield.c
+@@ -164,7 +164,7 @@ static struct input_dev *shield_allocate_input_dev(struct hid_device *hdev,
+ 	idev->id.product = hdev->product;
+ 	idev->id.version = hdev->version;
+ 	idev->uniq = hdev->uniq;
+-	idev->name = devm_kasprintf(&idev->dev, GFP_KERNEL, "%s %s", hdev->name,
++	idev->name = devm_kasprintf(&hdev->dev, GFP_KERNEL, "%s %s", hdev->name,
+ 				    name_suffix);
+ 	if (!idev->name)
+ 		goto err_name;
+@@ -513,21 +513,22 @@ static struct shield_device *thunderstrike_create(struct hid_device *hdev)
+ 
+ 	hid_set_drvdata(hdev, shield_dev);
+ 
++	ts->haptics_dev = shield_haptics_create(shield_dev, thunderstrike_play_effect);
++	if (IS_ERR(ts->haptics_dev))
++		return ERR_CAST(ts->haptics_dev);
++
+ 	ret = thunderstrike_led_create(ts);
+ 	if (ret) {
+ 		hid_err(hdev, "Failed to create Thunderstrike LED instance\n");
+-		return ERR_PTR(ret);
+-	}
+-
+-	ts->haptics_dev = shield_haptics_create(shield_dev, thunderstrike_play_effect);
+-	if (IS_ERR(ts->haptics_dev))
+ 		goto err;
++	}
+ 
+ 	hid_info(hdev, "Registered Thunderstrike controller\n");
+ 	return shield_dev;
+ 
+ err:
+-	led_classdev_unregister(&ts->led_dev);
++	if (ts->haptics_dev)
++		input_unregister_device(ts->haptics_dev);
+ 	return ERR_CAST(ts->haptics_dev);
+ }
+ 
+diff --git a/drivers/hid/hid-uclogic-core.c b/drivers/hid/hid-uclogic-core.c
+index f67835f9ed4cc..ad74cbc9a0aa5 100644
+--- a/drivers/hid/hid-uclogic-core.c
++++ b/drivers/hid/hid-uclogic-core.c
+@@ -85,10 +85,8 @@ static int uclogic_input_configured(struct hid_device *hdev,
+ {
+ 	struct uclogic_drvdata *drvdata = hid_get_drvdata(hdev);
+ 	struct uclogic_params *params = &drvdata->params;
+-	char *name;
+ 	const char *suffix = NULL;
+ 	struct hid_field *field;
+-	size_t len;
+ 	size_t i;
+ 	const struct uclogic_params_frame *frame;
+ 
+@@ -146,14 +144,9 @@ static int uclogic_input_configured(struct hid_device *hdev,
+ 		}
+ 	}
+ 
+-	if (suffix) {
+-		len = strlen(hdev->name) + 2 + strlen(suffix);
+-		name = devm_kzalloc(&hi->input->dev, len, GFP_KERNEL);
+-		if (name) {
+-			snprintf(name, len, "%s %s", hdev->name, suffix);
+-			hi->input->name = name;
+-		}
+-	}
++	if (suffix)
++		hi->input->name = devm_kasprintf(&hdev->dev, GFP_KERNEL,
++						 "%s %s", hdev->name, suffix);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
+index 67f95a29aeca5..edbb38f6956b9 100644
+--- a/drivers/hv/vmbus_drv.c
++++ b/drivers/hv/vmbus_drv.c
+@@ -2287,7 +2287,8 @@ static int vmbus_acpi_add(struct platform_device *pdev)
+ 	 * Some ancestor of the vmbus acpi device (Gen1 or Gen2
+ 	 * firmware) is the VMOD that has the mmio ranges. Get that.
+ 	 */
+-	for (ancestor = acpi_dev_parent(device); ancestor;
++	for (ancestor = acpi_dev_parent(device);
++	     ancestor && ancestor->handle != ACPI_ROOT_OBJECT;
+ 	     ancestor = acpi_dev_parent(ancestor)) {
+ 		result = acpi_walk_resources(ancestor->handle, METHOD_NAME__CRS,
+ 					     vmbus_walk_resources, NULL);
+diff --git a/drivers/hwmon/asus-ec-sensors.c b/drivers/hwmon/asus-ec-sensors.c
+index f52a539eb33e9..51f9c2db403e7 100644
+--- a/drivers/hwmon/asus-ec-sensors.c
++++ b/drivers/hwmon/asus-ec-sensors.c
+@@ -340,7 +340,7 @@ static const struct ec_board_info board_info_crosshair_x670e_hero = {
+ 	.sensors = SENSOR_TEMP_CPU | SENSOR_TEMP_CPU_PACKAGE |
+ 		SENSOR_TEMP_MB | SENSOR_TEMP_VRM |
+ 		SENSOR_SET_TEMP_WATER,
+-	.mutex_path = ASUS_HW_ACCESS_MUTEX_RMTW_ASMX,
++	.mutex_path = ACPI_GLOBAL_LOCK_PSEUDO_PATH,
+ 	.family = family_amd_600_series,
+ };
+ 
+diff --git a/drivers/hwmon/tmp513.c b/drivers/hwmon/tmp513.c
+index bff10f4b56e19..13f0c08360638 100644
+--- a/drivers/hwmon/tmp513.c
++++ b/drivers/hwmon/tmp513.c
+@@ -434,7 +434,7 @@ static umode_t tmp51x_is_visible(const void *_data,
+ 
+ 	switch (type) {
+ 	case hwmon_temp:
+-		if (data->id == tmp512 && channel == 4)
++		if (data->id == tmp512 && channel == 3)
+ 			return 0;
+ 		switch (attr) {
+ 		case hwmon_temp_input:
+diff --git a/drivers/hwtracing/coresight/coresight-platform.c b/drivers/hwtracing/coresight/coresight-platform.c
+index 3e2e135cb8f6d..dbf508fdd8d16 100644
+--- a/drivers/hwtracing/coresight/coresight-platform.c
++++ b/drivers/hwtracing/coresight/coresight-platform.c
+@@ -494,19 +494,18 @@ static inline bool acpi_validate_dsd_graph(const union acpi_object *graph)
+ 
+ /* acpi_get_dsd_graph	- Find the _DSD Graph property for the given device. */
+ static const union acpi_object *
+-acpi_get_dsd_graph(struct acpi_device *adev)
++acpi_get_dsd_graph(struct acpi_device *adev, struct acpi_buffer *buf)
+ {
+ 	int i;
+-	struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER };
+ 	acpi_status status;
+ 	const union acpi_object *dsd;
+ 
+ 	status = acpi_evaluate_object_typed(adev->handle, "_DSD", NULL,
+-					    &buf, ACPI_TYPE_PACKAGE);
++					    buf, ACPI_TYPE_PACKAGE);
+ 	if (ACPI_FAILURE(status))
+ 		return NULL;
+ 
+-	dsd = buf.pointer;
++	dsd = buf->pointer;
+ 
+ 	/*
+ 	 * _DSD property consists tuples { Prop_UUID, Package() }
+@@ -557,12 +556,12 @@ acpi_validate_coresight_graph(const union acpi_object *cs_graph)
+  * returns NULL.
+  */
+ static const union acpi_object *
+-acpi_get_coresight_graph(struct acpi_device *adev)
++acpi_get_coresight_graph(struct acpi_device *adev, struct acpi_buffer *buf)
+ {
+ 	const union acpi_object *graph_list, *graph;
+ 	int i, nr_graphs;
+ 
+-	graph_list = acpi_get_dsd_graph(adev);
++	graph_list = acpi_get_dsd_graph(adev, buf);
+ 	if (!graph_list)
+ 		return graph_list;
+ 
+@@ -663,18 +662,24 @@ static int acpi_coresight_parse_graph(struct device *dev,
+ 				      struct acpi_device *adev,
+ 				      struct coresight_platform_data *pdata)
+ {
++	int ret = 0;
+ 	int i, nlinks;
+ 	const union acpi_object *graph;
+ 	struct coresight_connection conn, zero_conn = {};
+ 	struct coresight_connection *new_conn;
++	struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
+ 
+-	graph = acpi_get_coresight_graph(adev);
++	graph = acpi_get_coresight_graph(adev, &buf);
++	/*
++	 * There are no graph connections, which is fine for some components.
++	 * e.g., ETE
++	 */
+ 	if (!graph)
+-		return -ENOENT;
++		goto free;
+ 
+ 	nlinks = graph->package.elements[2].integer.value;
+ 	if (!nlinks)
+-		return 0;
++		goto free;
+ 
+ 	for (i = 0; i < nlinks; i++) {
+ 		const union acpi_object *link = &graph->package.elements[3 + i];
+@@ -682,17 +687,28 @@ static int acpi_coresight_parse_graph(struct device *dev,
+ 
+ 		conn = zero_conn;
+ 		dir = acpi_coresight_parse_link(adev, link, &conn);
+-		if (dir < 0)
+-			return dir;
++		if (dir < 0) {
++			ret = dir;
++			goto free;
++		}
+ 
+ 		if (dir == ACPI_CORESIGHT_LINK_MASTER) {
+ 			new_conn = coresight_add_out_conn(dev, pdata, &conn);
+-			if (IS_ERR(new_conn))
+-				return PTR_ERR(new_conn);
++			if (IS_ERR(new_conn)) {
++				ret = PTR_ERR(new_conn);
++				goto free;
++			}
+ 		}
+ 	}
+ 
+-	return 0;
++free:
++	/*
++	 * When ACPI fails to alloc a buffer, it will free the buffer
++	 * created via ACPI_ALLOCATE_BUFFER and set to NULL.
++	 * ACPI_FREE can handle NULL pointers, so free it directly.
++	 */
++	ACPI_FREE(buf.pointer);
++	return ret;
+ }
+ 
+ /*
+diff --git a/drivers/hwtracing/coresight/coresight-tmc-etf.c b/drivers/hwtracing/coresight/coresight-tmc-etf.c
+index 79d8c64eac494..7406b65e2cdda 100644
+--- a/drivers/hwtracing/coresight/coresight-tmc-etf.c
++++ b/drivers/hwtracing/coresight/coresight-tmc-etf.c
+@@ -452,7 +452,7 @@ static int tmc_set_etf_buffer(struct coresight_device *csdev,
+ 		return -EINVAL;
+ 
+ 	/* wrap head around to the amount of space we have */
+-	head = handle->head & ((buf->nr_pages << PAGE_SHIFT) - 1);
++	head = handle->head & (((unsigned long)buf->nr_pages << PAGE_SHIFT) - 1);
+ 
+ 	/* find the page to write to */
+ 	buf->cur = head / PAGE_SIZE;
+diff --git a/drivers/hwtracing/coresight/coresight-tmc-etr.c b/drivers/hwtracing/coresight/coresight-tmc-etr.c
+index 766325de0e29b..66dc5f97a0098 100644
+--- a/drivers/hwtracing/coresight/coresight-tmc-etr.c
++++ b/drivers/hwtracing/coresight/coresight-tmc-etr.c
+@@ -45,7 +45,8 @@ struct etr_perf_buffer {
+ };
+ 
+ /* Convert the perf index to an offset within the ETR buffer */
+-#define PERF_IDX2OFF(idx, buf)	((idx) % ((buf)->nr_pages << PAGE_SHIFT))
++#define PERF_IDX2OFF(idx, buf)		\
++		((idx) % ((unsigned long)(buf)->nr_pages << PAGE_SHIFT))
+ 
+ /* Lower limit for ETR hardware buffer */
+ #define TMC_ETR_PERF_MIN_BUF_SIZE	SZ_1M
+@@ -1267,7 +1268,7 @@ alloc_etr_buf(struct tmc_drvdata *drvdata, struct perf_event *event,
+ 	 * than the size requested via sysfs.
+ 	 */
+ 	if ((nr_pages << PAGE_SHIFT) > drvdata->size) {
+-		etr_buf = tmc_alloc_etr_buf(drvdata, (nr_pages << PAGE_SHIFT),
++		etr_buf = tmc_alloc_etr_buf(drvdata, ((ssize_t)nr_pages << PAGE_SHIFT),
+ 					    0, node, NULL);
+ 		if (!IS_ERR(etr_buf))
+ 			goto done;
+diff --git a/drivers/hwtracing/coresight/coresight-tmc.h b/drivers/hwtracing/coresight/coresight-tmc.h
+index b97da39652d26..0ee48c5ba764d 100644
+--- a/drivers/hwtracing/coresight/coresight-tmc.h
++++ b/drivers/hwtracing/coresight/coresight-tmc.h
+@@ -325,7 +325,7 @@ ssize_t tmc_sg_table_get_data(struct tmc_sg_table *sg_table,
+ static inline unsigned long
+ tmc_sg_table_buf_size(struct tmc_sg_table *sg_table)
+ {
+-	return sg_table->data_pages.nr_pages << PAGE_SHIFT;
++	return (unsigned long)sg_table->data_pages.nr_pages << PAGE_SHIFT;
+ }
+ 
+ struct coresight_device *tmc_etr_get_catu_device(struct tmc_drvdata *drvdata);
+diff --git a/drivers/hwtracing/coresight/coresight-trbe.c b/drivers/hwtracing/coresight/coresight-trbe.c
+index 7720619909d65..e20c1c6acc731 100644
+--- a/drivers/hwtracing/coresight/coresight-trbe.c
++++ b/drivers/hwtracing/coresight/coresight-trbe.c
+@@ -1225,6 +1225,16 @@ static void arm_trbe_enable_cpu(void *info)
+ 	enable_percpu_irq(drvdata->irq, IRQ_TYPE_NONE);
+ }
+ 
++static void arm_trbe_disable_cpu(void *info)
++{
++	struct trbe_drvdata *drvdata = info;
++	struct trbe_cpudata *cpudata = this_cpu_ptr(drvdata->cpudata);
++
++	disable_percpu_irq(drvdata->irq);
++	trbe_reset_local(cpudata);
++}
++
++
+ static void arm_trbe_register_coresight_cpu(struct trbe_drvdata *drvdata, int cpu)
+ {
+ 	struct trbe_cpudata *cpudata = per_cpu_ptr(drvdata->cpudata, cpu);
+@@ -1244,10 +1254,13 @@ static void arm_trbe_register_coresight_cpu(struct trbe_drvdata *drvdata, int cp
+ 	if (!desc.name)
+ 		goto cpu_clear;
+ 
++	desc.pdata = coresight_get_platform_data(dev);
++	if (IS_ERR(desc.pdata))
++		goto cpu_clear;
++
+ 	desc.type = CORESIGHT_DEV_TYPE_SINK;
+ 	desc.subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_PERCPU_SYSMEM;
+ 	desc.ops = &arm_trbe_cs_ops;
+-	desc.pdata = dev_get_platdata(dev);
+ 	desc.groups = arm_trbe_groups;
+ 	desc.dev = dev;
+ 	trbe_csdev = coresight_register(&desc);
+@@ -1326,18 +1339,12 @@ cpu_clear:
+ 	cpumask_clear_cpu(cpu, &drvdata->supported_cpus);
+ }
+ 
+-static void arm_trbe_remove_coresight_cpu(void *info)
++static void arm_trbe_remove_coresight_cpu(struct trbe_drvdata *drvdata, int cpu)
+ {
+-	int cpu = smp_processor_id();
+-	struct trbe_drvdata *drvdata = info;
+-	struct trbe_cpudata *cpudata = per_cpu_ptr(drvdata->cpudata, cpu);
+ 	struct coresight_device *trbe_csdev = coresight_get_percpu_sink(cpu);
+ 
+-	disable_percpu_irq(drvdata->irq);
+-	trbe_reset_local(cpudata);
+ 	if (trbe_csdev) {
+ 		coresight_unregister(trbe_csdev);
+-		cpudata->drvdata = NULL;
+ 		coresight_set_percpu_sink(cpu, NULL);
+ 	}
+ }
+@@ -1366,8 +1373,10 @@ static int arm_trbe_remove_coresight(struct trbe_drvdata *drvdata)
+ {
+ 	int cpu;
+ 
+-	for_each_cpu(cpu, &drvdata->supported_cpus)
+-		smp_call_function_single(cpu, arm_trbe_remove_coresight_cpu, drvdata, 1);
++	for_each_cpu(cpu, &drvdata->supported_cpus) {
++		smp_call_function_single(cpu, arm_trbe_disable_cpu, drvdata, 1);
++		arm_trbe_remove_coresight_cpu(drvdata, cpu);
++	}
+ 	free_percpu(drvdata->cpudata);
+ 	return 0;
+ }
+@@ -1406,12 +1415,8 @@ static int arm_trbe_cpu_teardown(unsigned int cpu, struct hlist_node *node)
+ {
+ 	struct trbe_drvdata *drvdata = hlist_entry_safe(node, struct trbe_drvdata, hotplug_node);
+ 
+-	if (cpumask_test_cpu(cpu, &drvdata->supported_cpus)) {
+-		struct trbe_cpudata *cpudata = per_cpu_ptr(drvdata->cpudata, cpu);
+-
+-		disable_percpu_irq(drvdata->irq);
+-		trbe_reset_local(cpudata);
+-	}
++	if (cpumask_test_cpu(cpu, &drvdata->supported_cpus))
++		arm_trbe_disable_cpu(drvdata);
+ 	return 0;
+ }
+ 
+@@ -1479,7 +1484,6 @@ static void arm_trbe_remove_irq(struct trbe_drvdata *drvdata)
+ 
+ static int arm_trbe_device_probe(struct platform_device *pdev)
+ {
+-	struct coresight_platform_data *pdata;
+ 	struct trbe_drvdata *drvdata;
+ 	struct device *dev = &pdev->dev;
+ 	int ret;
+@@ -1494,12 +1498,7 @@ static int arm_trbe_device_probe(struct platform_device *pdev)
+ 	if (!drvdata)
+ 		return -ENOMEM;
+ 
+-	pdata = coresight_get_platform_data(dev);
+-	if (IS_ERR(pdata))
+-		return PTR_ERR(pdata);
+-
+ 	dev_set_drvdata(dev, drvdata);
+-	dev->platform_data = pdata;
+ 	drvdata->pdev = pdev;
+ 	ret = arm_trbe_probe_irq(pdev, drvdata);
+ 	if (ret)
+diff --git a/drivers/i3c/master/svc-i3c-master.c b/drivers/i3c/master/svc-i3c-master.c
+index 0d63b732ef0c8..2fefbe55c1675 100644
+--- a/drivers/i3c/master/svc-i3c-master.c
++++ b/drivers/i3c/master/svc-i3c-master.c
+@@ -789,6 +789,10 @@ static int svc_i3c_master_do_daa_locked(struct svc_i3c_master *master,
+ 				 */
+ 				break;
+ 			} else if (SVC_I3C_MSTATUS_NACKED(reg)) {
++				/* No I3C devices attached */
++				if (dev_nb == 0)
++					break;
++
+ 				/*
+ 				 * A slave device nacked the address, this is
+ 				 * allowed only once, DAA will be stopped and
+@@ -1263,11 +1267,17 @@ static int svc_i3c_master_send_ccc_cmd(struct i3c_master_controller *m,
+ {
+ 	struct svc_i3c_master *master = to_svc_i3c_master(m);
+ 	bool broadcast = cmd->id < 0x80;
++	int ret;
+ 
+ 	if (broadcast)
+-		return svc_i3c_master_send_bdcast_ccc_cmd(master, cmd);
++		ret = svc_i3c_master_send_bdcast_ccc_cmd(master, cmd);
+ 	else
+-		return svc_i3c_master_send_direct_ccc_cmd(master, cmd);
++		ret = svc_i3c_master_send_direct_ccc_cmd(master, cmd);
++
++	if (ret)
++		cmd->err = I3C_ERROR_M2;
++
++	return ret;
+ }
+ 
+ static int svc_i3c_master_priv_xfers(struct i3c_dev_desc *dev,
+diff --git a/drivers/iio/accel/adxl313_i2c.c b/drivers/iio/accel/adxl313_i2c.c
+index 524327ea36631..e0a860ab9e58f 100644
+--- a/drivers/iio/accel/adxl313_i2c.c
++++ b/drivers/iio/accel/adxl313_i2c.c
+@@ -40,8 +40,8 @@ static const struct regmap_config adxl31x_i2c_regmap_config[] = {
+ 
+ static const struct i2c_device_id adxl313_i2c_id[] = {
+ 	{ .name = "adxl312", .driver_data = (kernel_ulong_t)&adxl31x_chip_info[ADXL312] },
+-	{ .name = "adxl313", .driver_data = (kernel_ulong_t)&adxl31x_chip_info[ADXL312] },
+-	{ .name = "adxl314", .driver_data = (kernel_ulong_t)&adxl31x_chip_info[ADXL312] },
++	{ .name = "adxl313", .driver_data = (kernel_ulong_t)&adxl31x_chip_info[ADXL313] },
++	{ .name = "adxl314", .driver_data = (kernel_ulong_t)&adxl31x_chip_info[ADXL314] },
+ 	{ }
+ };
+ 
+diff --git a/drivers/infiniband/core/uverbs_std_types_counters.c b/drivers/infiniband/core/uverbs_std_types_counters.c
+index 999da9c798668..381aa57976417 100644
+--- a/drivers/infiniband/core/uverbs_std_types_counters.c
++++ b/drivers/infiniband/core/uverbs_std_types_counters.c
+@@ -107,6 +107,8 @@ static int UVERBS_HANDLER(UVERBS_METHOD_COUNTERS_READ)(
+ 		return ret;
+ 
+ 	uattr = uverbs_attr_get(attrs, UVERBS_ATTR_READ_COUNTERS_BUFF);
++	if (IS_ERR(uattr))
++		return PTR_ERR(uattr);
+ 	read_attr.ncounters = uattr->ptr_attr.len / sizeof(u64);
+ 	read_attr.counters_buff = uverbs_zalloc(
+ 		attrs, array_size(read_attr.ncounters, sizeof(u64)));
+diff --git a/drivers/infiniband/hw/bnxt_re/bnxt_re.h b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
+index ea81b2497511a..5b6d581eb5f41 100644
+--- a/drivers/infiniband/hw/bnxt_re/bnxt_re.h
++++ b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
+@@ -112,12 +112,32 @@ struct bnxt_re_gsi_context {
+ #define BNXT_RE_NQ_IDX			1
+ #define BNXT_RE_GEN_P5_MAX_VF		64
+ 
++struct bnxt_re_pacing {
++	u64 dbr_db_fifo_reg_off;
++	void *dbr_page;
++	u64 dbr_bar_addr;
++	u32 pacing_algo_th;
++	u32 do_pacing_save;
++	u32 dbq_pacing_time; /* ms */
++	u32 dbr_def_do_pacing;
++	bool dbr_pacing;
++};
++
++#define BNXT_RE_DBR_PACING_TIME 5 /* ms */
++#define BNXT_RE_PACING_ALGO_THRESHOLD 250 /* Entries in DB FIFO */
++#define BNXT_RE_PACING_ALARM_TH_MULTIPLE 2 /* Multiple of pacing algo threshold */
++/* Default do_pacing value when there is no congestion */
++#define BNXT_RE_DBR_DO_PACING_NO_CONGESTION 0x7F /* 1 in 512 probability */
++#define BNXT_RE_DB_FIFO_ROOM_MASK 0x1FFF8000
++#define BNXT_RE_MAX_FIFO_DEPTH 0x2c00
++#define BNXT_RE_DB_FIFO_ROOM_SHIFT 15
++#define BNXT_RE_GRC_FIFO_REG_BASE 0x2000
++
+ struct bnxt_re_dev {
+ 	struct ib_device		ibdev;
+ 	struct list_head		list;
+ 	unsigned long			flags;
+ #define BNXT_RE_FLAG_NETDEV_REGISTERED		0
+-#define BNXT_RE_FLAG_GOT_MSIX			2
+ #define BNXT_RE_FLAG_HAVE_L2_REF		3
+ #define BNXT_RE_FLAG_RCFW_CHANNEL_EN		4
+ #define BNXT_RE_FLAG_QOS_WORK_REG		5
+@@ -171,6 +191,7 @@ struct bnxt_re_dev {
+ 	atomic_t nq_alloc_cnt;
+ 	u32 is_virtfn;
+ 	u32 num_vfs;
++	struct bnxt_re_pacing pacing;
+ };
+ 
+ #define to_bnxt_re_dev(ptr, member)	\
+diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c
+index 63e98e2d35962..120e588fb13ba 100644
+--- a/drivers/infiniband/hw/bnxt_re/main.c
++++ b/drivers/infiniband/hw/bnxt_re/main.c
+@@ -432,9 +432,92 @@ int bnxt_re_hwrm_qcaps(struct bnxt_re_dev *rdev)
+ 		return rc;
+ 	cctx->modes.db_push = le32_to_cpu(resp.flags) & FUNC_QCAPS_RESP_FLAGS_WCB_PUSH_MODE;
+ 
++	cctx->modes.dbr_pacing =
++		le32_to_cpu(resp.flags_ext2) & FUNC_QCAPS_RESP_FLAGS_EXT2_DBR_PACING_EXT_SUPPORTED ?
++		true : false;
+ 	return 0;
+ }
+ 
++static int bnxt_re_hwrm_dbr_pacing_qcfg(struct bnxt_re_dev *rdev)
++{
++	struct hwrm_func_dbr_pacing_qcfg_output resp = {};
++	struct hwrm_func_dbr_pacing_qcfg_input req = {};
++	struct bnxt_en_dev *en_dev = rdev->en_dev;
++	struct bnxt_qplib_chip_ctx *cctx;
++	struct bnxt_fw_msg fw_msg = {};
++	int rc;
++
++	cctx = rdev->chip_ctx;
++	bnxt_re_init_hwrm_hdr((void *)&req, HWRM_FUNC_DBR_PACING_QCFG);
++	bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
++			    sizeof(resp), DFLT_HWRM_CMD_TIMEOUT);
++	rc = bnxt_send_msg(en_dev, &fw_msg);
++	if (rc)
++		return rc;
++
++	if ((le32_to_cpu(resp.dbr_stat_db_fifo_reg) &
++	    FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_MASK) ==
++		FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_GRC)
++		cctx->dbr_stat_db_fifo =
++			le32_to_cpu(resp.dbr_stat_db_fifo_reg) &
++			~FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_MASK;
++	return 0;
++}
++
++/* Update the pacing tunable parameters to the default values */
++static void bnxt_re_set_default_pacing_data(struct bnxt_re_dev *rdev)
++{
++	struct bnxt_qplib_db_pacing_data *pacing_data = rdev->qplib_res.pacing_data;
++
++	pacing_data->do_pacing = rdev->pacing.dbr_def_do_pacing;
++	pacing_data->pacing_th = rdev->pacing.pacing_algo_th;
++	pacing_data->alarm_th =
++		pacing_data->pacing_th * BNXT_RE_PACING_ALARM_TH_MULTIPLE;
++}
++
++static int bnxt_re_initialize_dbr_pacing(struct bnxt_re_dev *rdev)
++{
++	if (bnxt_re_hwrm_dbr_pacing_qcfg(rdev))
++		return -EIO;
++
++	/* Allocate a page for app use */
++	rdev->pacing.dbr_page = (void *)__get_free_page(GFP_KERNEL);
++	if (!rdev->pacing.dbr_page)
++		return -ENOMEM;
++
++	memset((u8 *)rdev->pacing.dbr_page, 0, PAGE_SIZE);
++	rdev->qplib_res.pacing_data = (struct bnxt_qplib_db_pacing_data *)rdev->pacing.dbr_page;
++
++	/* MAP HW window 2 for reading db fifo depth */
++	writel(rdev->chip_ctx->dbr_stat_db_fifo & BNXT_GRC_BASE_MASK,
++	       rdev->en_dev->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4);
++	rdev->pacing.dbr_db_fifo_reg_off =
++		(rdev->chip_ctx->dbr_stat_db_fifo & BNXT_GRC_OFFSET_MASK) +
++		 BNXT_RE_GRC_FIFO_REG_BASE;
++	rdev->pacing.dbr_bar_addr =
++		pci_resource_start(rdev->qplib_res.pdev, 0) + rdev->pacing.dbr_db_fifo_reg_off;
++
++	rdev->pacing.pacing_algo_th = BNXT_RE_PACING_ALGO_THRESHOLD;
++	rdev->pacing.dbq_pacing_time = BNXT_RE_DBR_PACING_TIME;
++	rdev->pacing.dbr_def_do_pacing = BNXT_RE_DBR_DO_PACING_NO_CONGESTION;
++	rdev->pacing.do_pacing_save = rdev->pacing.dbr_def_do_pacing;
++	rdev->qplib_res.pacing_data->fifo_max_depth = BNXT_RE_MAX_FIFO_DEPTH;
++	rdev->qplib_res.pacing_data->fifo_room_mask = BNXT_RE_DB_FIFO_ROOM_MASK;
++	rdev->qplib_res.pacing_data->fifo_room_shift = BNXT_RE_DB_FIFO_ROOM_SHIFT;
++	rdev->qplib_res.pacing_data->grc_reg_offset = rdev->pacing.dbr_db_fifo_reg_off;
++	bnxt_re_set_default_pacing_data(rdev);
++	return 0;
++}
++
++static void bnxt_re_deinitialize_dbr_pacing(struct bnxt_re_dev *rdev)
++{
++	if (rdev->pacing.dbr_page)
++		free_page((u64)rdev->pacing.dbr_page);
++
++	rdev->pacing.dbr_page = NULL;
++	rdev->pacing.dbr_pacing = false;
++}
++
+ static int bnxt_re_net_ring_free(struct bnxt_re_dev *rdev,
+ 				 u16 fw_ring_id, int type)
+ {
+@@ -942,8 +1025,7 @@ static int bnxt_re_alloc_res(struct bnxt_re_dev *rdev)
+ 
+ 	/* Configure and allocate resources for qplib */
+ 	rdev->qplib_res.rcfw = &rdev->rcfw;
+-	rc = bnxt_qplib_get_dev_attr(&rdev->rcfw, &rdev->dev_attr,
+-				     rdev->is_virtfn);
++	rc = bnxt_qplib_get_dev_attr(&rdev->rcfw, &rdev->dev_attr);
+ 	if (rc)
+ 		goto fail;
+ 
+@@ -1214,8 +1296,11 @@ static void bnxt_re_dev_uninit(struct bnxt_re_dev *rdev)
+ 		bnxt_re_net_ring_free(rdev, rdev->rcfw.creq.ring_id, type);
+ 		bnxt_qplib_free_rcfw_channel(&rdev->rcfw);
+ 	}
+-	if (test_and_clear_bit(BNXT_RE_FLAG_GOT_MSIX, &rdev->flags))
+-		rdev->num_msix = 0;
++
++	rdev->num_msix = 0;
++
++	if (rdev->pacing.dbr_pacing)
++		bnxt_re_deinitialize_dbr_pacing(rdev);
+ 
+ 	bnxt_re_destroy_chip_ctx(rdev);
+ 	if (test_and_clear_bit(BNXT_RE_FLAG_NETDEV_REGISTERED, &rdev->flags))
+@@ -1271,7 +1356,6 @@ static int bnxt_re_dev_init(struct bnxt_re_dev *rdev, u8 wqe_mode)
+ 	ibdev_dbg(&rdev->ibdev, "Got %d MSI-X vectors\n",
+ 		  rdev->en_dev->ulp_tbl->msix_requested);
+ 	rdev->num_msix = rdev->en_dev->ulp_tbl->msix_requested;
+-	set_bit(BNXT_RE_FLAG_GOT_MSIX, &rdev->flags);
+ 
+ 	bnxt_re_query_hwrm_intf_version(rdev);
+ 
+@@ -1311,8 +1395,17 @@ static int bnxt_re_dev_init(struct bnxt_re_dev *rdev, u8 wqe_mode)
+ 		goto free_ring;
+ 	}
+ 
+-	rc = bnxt_qplib_get_dev_attr(&rdev->rcfw, &rdev->dev_attr,
+-				     rdev->is_virtfn);
++	if (bnxt_qplib_dbr_pacing_en(rdev->chip_ctx)) {
++		rc = bnxt_re_initialize_dbr_pacing(rdev);
++		if (!rc) {
++			rdev->pacing.dbr_pacing = true;
++		} else {
++			ibdev_err(&rdev->ibdev,
++				  "DBR pacing disabled with error : %d\n", rc);
++			rdev->pacing.dbr_pacing = false;
++		}
++	}
++	rc = bnxt_qplib_get_dev_attr(&rdev->rcfw, &rdev->dev_attr);
+ 	if (rc)
+ 		goto disable_rcfw;
+ 
+diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.h b/drivers/infiniband/hw/bnxt_re/qplib_res.h
+index d850a553821e3..57161d303c257 100644
+--- a/drivers/infiniband/hw/bnxt_re/qplib_res.h
++++ b/drivers/infiniband/hw/bnxt_re/qplib_res.h
+@@ -48,6 +48,7 @@ extern const struct bnxt_qplib_gid bnxt_qplib_gid_zero;
+ struct bnxt_qplib_drv_modes {
+ 	u8	wqe_mode;
+ 	bool db_push;
++	bool dbr_pacing;
+ };
+ 
+ struct bnxt_qplib_chip_ctx {
+@@ -58,6 +59,17 @@ struct bnxt_qplib_chip_ctx {
+ 	u16	hwrm_cmd_max_timeout;
+ 	struct bnxt_qplib_drv_modes modes;
+ 	u64	hwrm_intf_ver;
++	u32     dbr_stat_db_fifo;
++};
++
++struct bnxt_qplib_db_pacing_data {
++	u32 do_pacing;
++	u32 pacing_th;
++	u32 alarm_th;
++	u32 fifo_max_depth;
++	u32 fifo_room_mask;
++	u32 fifo_room_shift;
++	u32 grc_reg_offset;
+ };
+ 
+ #define BNXT_QPLIB_DBR_PF_DB_OFFSET     0x10000
+@@ -271,6 +283,7 @@ struct bnxt_qplib_res {
+ 	struct mutex                    dpi_tbl_lock;
+ 	bool				prio;
+ 	bool                            is_vf;
++	struct bnxt_qplib_db_pacing_data *pacing_data;
+ };
+ 
+ static inline bool bnxt_qplib_is_chip_gen_p5(struct bnxt_qplib_chip_ctx *cctx)
+@@ -467,4 +480,10 @@ static inline bool _is_ext_stats_supported(u16 dev_cap_flags)
+ 	return dev_cap_flags &
+ 		CREQ_QUERY_FUNC_RESP_SB_EXT_STATS;
+ }
++
++static inline u8 bnxt_qplib_dbr_pacing_en(struct bnxt_qplib_chip_ctx *cctx)
++{
++	return cctx->modes.dbr_pacing;
++}
++
+ #endif /* __BNXT_QPLIB_RES_H__ */
+diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.c b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
+index ab45f9d4bb02f..7a244fd506e2a 100644
+--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.c
++++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
+@@ -89,7 +89,7 @@ static void bnxt_qplib_query_version(struct bnxt_qplib_rcfw *rcfw,
+ }
+ 
+ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
+-			    struct bnxt_qplib_dev_attr *attr, bool vf)
++			    struct bnxt_qplib_dev_attr *attr)
+ {
+ 	struct creq_query_func_resp resp = {};
+ 	struct bnxt_qplib_cmdqmsg msg = {};
+@@ -121,9 +121,8 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
+ 
+ 	/* Extract the context from the side buffer */
+ 	attr->max_qp = le32_to_cpu(sb->max_qp);
+-	/* max_qp value reported by FW for PF doesn't include the QP1 for PF */
+-	if (!vf)
+-		attr->max_qp += 1;
++	/* max_qp value reported by FW doesn't include the QP1 */
++	attr->max_qp += 1;
+ 	attr->max_qp_rd_atom =
+ 		sb->max_qp_rd_atom > BNXT_QPLIB_MAX_OUT_RD_ATOM ?
+ 		BNXT_QPLIB_MAX_OUT_RD_ATOM : sb->max_qp_rd_atom;
+diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.h b/drivers/infiniband/hw/bnxt_re/qplib_sp.h
+index 264ef3cedc45b..d33c78b96217a 100644
+--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.h
++++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.h
+@@ -322,7 +322,7 @@ int bnxt_qplib_update_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
+ 			   struct bnxt_qplib_gid *gid, u16 gid_idx,
+ 			   const u8 *smac);
+ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
+-			    struct bnxt_qplib_dev_attr *attr, bool vf);
++			    struct bnxt_qplib_dev_attr *attr);
+ int bnxt_qplib_set_func_resources(struct bnxt_qplib_res *res,
+ 				  struct bnxt_qplib_rcfw *rcfw,
+ 				  struct bnxt_qplib_ctx *ctx);
+diff --git a/drivers/infiniband/hw/efa/efa_verbs.c b/drivers/infiniband/hw/efa/efa_verbs.c
+index 2a195c4b0f17d..3538d59521e41 100644
+--- a/drivers/infiniband/hw/efa/efa_verbs.c
++++ b/drivers/infiniband/hw/efa/efa_verbs.c
+@@ -449,12 +449,12 @@ int efa_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
+ 
+ 	ibdev_dbg(&dev->ibdev, "Destroy qp[%u]\n", ibqp->qp_num);
+ 
+-	efa_qp_user_mmap_entries_remove(qp);
+-
+ 	err = efa_destroy_qp_handle(dev, qp->qp_handle);
+ 	if (err)
+ 		return err;
+ 
++	efa_qp_user_mmap_entries_remove(qp);
++
+ 	if (qp->rq_cpu_addr) {
+ 		ibdev_dbg(&dev->ibdev,
+ 			  "qp->cpu_addr[0x%p] freed: size[%lu], dma[%pad]\n",
+@@ -1013,8 +1013,8 @@ int efa_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
+ 		  "Destroy cq[%d] virt[0x%p] freed: size[%lu], dma[%pad]\n",
+ 		  cq->cq_idx, cq->cpu_addr, cq->size, &cq->dma_addr);
+ 
+-	efa_cq_user_mmap_entries_remove(cq);
+ 	efa_destroy_cq_idx(dev, cq->cq_idx);
++	efa_cq_user_mmap_entries_remove(cq);
+ 	if (cq->eq) {
+ 		xa_erase(&dev->cqs_xa, cq->cq_idx);
+ 		synchronize_irq(cq->eq->irq.irqn);
+diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h
+index 84239b907de2a..bb94eb076858c 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_device.h
++++ b/drivers/infiniband/hw/hns/hns_roce_device.h
+@@ -97,6 +97,7 @@
+ #define HNS_ROCE_CQ_BANK_NUM 4
+ 
+ #define CQ_BANKID_SHIFT 2
++#define CQ_BANKID_MASK GENMASK(1, 0)
+ 
+ enum {
+ 	SERV_TYPE_RC,
+diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+index 8f7eb11066b43..1d998298e28fc 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+@@ -750,7 +750,8 @@ out:
+ 		qp->sq.head += nreq;
+ 		qp->next_sge = sge_idx;
+ 
+-		if (nreq == 1 && (qp->en_flags & HNS_ROCE_QP_CAP_DIRECT_WQE))
++		if (nreq == 1 && !ret &&
++		    (qp->en_flags & HNS_ROCE_QP_CAP_DIRECT_WQE))
+ 			write_dwqe(hr_dev, qp, wqe);
+ 		else
+ 			update_sq_db(hr_dev, qp);
+@@ -6722,14 +6723,14 @@ static int __hns_roce_hw_v2_init_instance(struct hnae3_handle *handle)
+ 	ret = hns_roce_init(hr_dev);
+ 	if (ret) {
+ 		dev_err(hr_dev->dev, "RoCE Engine init failed!\n");
+-		goto error_failed_cfg;
++		goto error_failed_roce_init;
+ 	}
+ 
+ 	if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) {
+ 		ret = free_mr_init(hr_dev);
+ 		if (ret) {
+ 			dev_err(hr_dev->dev, "failed to init free mr!\n");
+-			goto error_failed_roce_init;
++			goto error_failed_free_mr_init;
+ 		}
+ 	}
+ 
+@@ -6737,10 +6738,10 @@ static int __hns_roce_hw_v2_init_instance(struct hnae3_handle *handle)
+ 
+ 	return 0;
+ 
+-error_failed_roce_init:
++error_failed_free_mr_init:
+ 	hns_roce_exit(hr_dev);
+ 
+-error_failed_cfg:
++error_failed_roce_init:
+ 	kfree(hr_dev->priv);
+ 
+ error_failed_kzalloc:
+diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c
+index 485e110ca4333..9141eadf33d2a 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_main.c
++++ b/drivers/infiniband/hw/hns/hns_roce_main.c
+@@ -219,6 +219,7 @@ static int hns_roce_query_port(struct ib_device *ib_dev, u32 port_num,
+ 	unsigned long flags;
+ 	enum ib_mtu mtu;
+ 	u32 port;
++	int ret;
+ 
+ 	port = port_num - 1;
+ 
+@@ -231,8 +232,10 @@ static int hns_roce_query_port(struct ib_device *ib_dev, u32 port_num,
+ 				IB_PORT_BOOT_MGMT_SUP;
+ 	props->max_msg_sz = HNS_ROCE_MAX_MSG_LEN;
+ 	props->pkey_tbl_len = 1;
+-	props->active_width = IB_WIDTH_4X;
+-	props->active_speed = 1;
++	ret = ib_get_eth_speed(ib_dev, port_num, &props->active_speed,
++			       &props->active_width);
++	if (ret)
++		ibdev_warn(ib_dev, "failed to get speed, ret = %d.\n", ret);
+ 
+ 	spin_lock_irqsave(&hr_dev->iboe.lock, flags);
+ 
+diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c
+index d855a917f4cfa..cdc1c6de43a17 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
++++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
+@@ -170,14 +170,29 @@ static void hns_roce_ib_qp_event(struct hns_roce_qp *hr_qp,
+ 	}
+ }
+ 
+-static u8 get_least_load_bankid_for_qp(struct hns_roce_bank *bank)
++static u8 get_affinity_cq_bank(u8 qp_bank)
+ {
+-	u32 least_load = bank[0].inuse;
++	return (qp_bank >> 1) & CQ_BANKID_MASK;
++}
++
++static u8 get_least_load_bankid_for_qp(struct ib_qp_init_attr *init_attr,
++					struct hns_roce_bank *bank)
++{
++#define INVALID_LOAD_QPNUM 0xFFFFFFFF
++	struct ib_cq *scq = init_attr->send_cq;
++	u32 least_load = INVALID_LOAD_QPNUM;
++	unsigned long cqn = 0;
+ 	u8 bankid = 0;
+ 	u32 bankcnt;
+ 	u8 i;
+ 
+-	for (i = 1; i < HNS_ROCE_QP_BANK_NUM; i++) {
++	if (scq)
++		cqn = to_hr_cq(scq)->cqn;
++
++	for (i = 0; i < HNS_ROCE_QP_BANK_NUM; i++) {
++		if (scq && (get_affinity_cq_bank(i) != (cqn & CQ_BANKID_MASK)))
++			continue;
++
+ 		bankcnt = bank[i].inuse;
+ 		if (bankcnt < least_load) {
+ 			least_load = bankcnt;
+@@ -209,7 +224,8 @@ static int alloc_qpn_with_bankid(struct hns_roce_bank *bank, u8 bankid,
+ 
+ 	return 0;
+ }
+-static int alloc_qpn(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
++static int alloc_qpn(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
++		     struct ib_qp_init_attr *init_attr)
+ {
+ 	struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
+ 	unsigned long num = 0;
+@@ -220,7 +236,7 @@ static int alloc_qpn(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
+ 		num = 1;
+ 	} else {
+ 		mutex_lock(&qp_table->bank_mutex);
+-		bankid = get_least_load_bankid_for_qp(qp_table->bank);
++		bankid = get_least_load_bankid_for_qp(init_attr, qp_table->bank);
+ 
+ 		ret = alloc_qpn_with_bankid(&qp_table->bank[bankid], bankid,
+ 					    &num);
+@@ -1082,7 +1098,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
+ 		goto err_buf;
+ 	}
+ 
+-	ret = alloc_qpn(hr_dev, hr_qp);
++	ret = alloc_qpn(hr_dev, hr_qp, init_attr);
+ 	if (ret) {
+ 		ibdev_err(ibdev, "failed to alloc QPN, ret = %d.\n", ret);
+ 		goto err_qpn;
+diff --git a/drivers/infiniband/hw/irdma/ctrl.c b/drivers/infiniband/hw/irdma/ctrl.c
+index 45e3344daa048..ef47ec271e19e 100644
+--- a/drivers/infiniband/hw/irdma/ctrl.c
++++ b/drivers/infiniband/hw/irdma/ctrl.c
+@@ -1061,6 +1061,9 @@ static int irdma_sc_alloc_stag(struct irdma_sc_dev *dev,
+ 	u64 hdr;
+ 	enum irdma_page_size page_size;
+ 
++	if (!info->total_len && !info->all_memory)
++		return -EINVAL;
++
+ 	if (info->page_size == 0x40000000)
+ 		page_size = IRDMA_PAGE_SIZE_1G;
+ 	else if (info->page_size == 0x200000)
+@@ -1126,6 +1129,9 @@ static int irdma_sc_mr_reg_non_shared(struct irdma_sc_dev *dev,
+ 	u8 addr_type;
+ 	enum irdma_page_size page_size;
+ 
++	if (!info->total_len && !info->all_memory)
++		return -EINVAL;
++
+ 	if (info->page_size == 0x40000000)
+ 		page_size = IRDMA_PAGE_SIZE_1G;
+ 	else if (info->page_size == 0x200000)
+diff --git a/drivers/infiniband/hw/irdma/main.h b/drivers/infiniband/hw/irdma/main.h
+index 2323962cdeacb..de2f4c0514118 100644
+--- a/drivers/infiniband/hw/irdma/main.h
++++ b/drivers/infiniband/hw/irdma/main.h
+@@ -239,7 +239,7 @@ struct irdma_qv_info {
+ 
+ struct irdma_qvlist_info {
+ 	u32 num_vectors;
+-	struct irdma_qv_info qv_info[1];
++	struct irdma_qv_info qv_info[];
+ };
+ 
+ struct irdma_gen_ops {
+diff --git a/drivers/infiniband/hw/irdma/type.h b/drivers/infiniband/hw/irdma/type.h
+index a20709577ab0a..3b1fa5bc0a585 100644
+--- a/drivers/infiniband/hw/irdma/type.h
++++ b/drivers/infiniband/hw/irdma/type.h
+@@ -971,6 +971,7 @@ struct irdma_allocate_stag_info {
+ 	bool remote_access:1;
+ 	bool use_hmc_fcn_index:1;
+ 	bool use_pf_rid:1;
++	bool all_memory:1;
+ 	u8 hmc_fcn_index;
+ };
+ 
+@@ -998,6 +999,7 @@ struct irdma_reg_ns_stag_info {
+ 	bool use_hmc_fcn_index:1;
+ 	u8 hmc_fcn_index;
+ 	bool use_pf_rid:1;
++	bool all_memory:1;
+ };
+ 
+ struct irdma_fast_reg_stag_info {
+diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c
+index 9c4fe4fa90018..377c5bab5f2e0 100644
+--- a/drivers/infiniband/hw/irdma/verbs.c
++++ b/drivers/infiniband/hw/irdma/verbs.c
+@@ -2552,7 +2552,8 @@ static int irdma_hw_alloc_stag(struct irdma_device *iwdev,
+ 			       struct irdma_mr *iwmr)
+ {
+ 	struct irdma_allocate_stag_info *info;
+-	struct irdma_pd *iwpd = to_iwpd(iwmr->ibmr.pd);
++	struct ib_pd *pd = iwmr->ibmr.pd;
++	struct irdma_pd *iwpd = to_iwpd(pd);
+ 	int status;
+ 	struct irdma_cqp_request *cqp_request;
+ 	struct cqp_cmds_info *cqp_info;
+@@ -2568,6 +2569,7 @@ static int irdma_hw_alloc_stag(struct irdma_device *iwdev,
+ 	info->stag_idx = iwmr->stag >> IRDMA_CQPSQ_STAG_IDX_S;
+ 	info->pd_id = iwpd->sc_pd.pd_id;
+ 	info->total_len = iwmr->len;
++	info->all_memory = pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY;
+ 	info->remote_access = true;
+ 	cqp_info->cqp_cmd = IRDMA_OP_ALLOC_STAG;
+ 	cqp_info->post_sq = 1;
+@@ -2615,6 +2617,8 @@ static struct ib_mr *irdma_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
+ 	iwmr->type = IRDMA_MEMREG_TYPE_MEM;
+ 	palloc = &iwpbl->pble_alloc;
+ 	iwmr->page_cnt = max_num_sg;
++	/* Use system PAGE_SIZE as the sg page sizes are unknown at this point */
++	iwmr->len = max_num_sg * PAGE_SIZE;
+ 	err_code = irdma_get_pble(iwdev->rf->pble_rsrc, palloc, iwmr->page_cnt,
+ 				  false);
+ 	if (err_code)
+@@ -2694,7 +2698,8 @@ static int irdma_hwreg_mr(struct irdma_device *iwdev, struct irdma_mr *iwmr,
+ {
+ 	struct irdma_pbl *iwpbl = &iwmr->iwpbl;
+ 	struct irdma_reg_ns_stag_info *stag_info;
+-	struct irdma_pd *iwpd = to_iwpd(iwmr->ibmr.pd);
++	struct ib_pd *pd = iwmr->ibmr.pd;
++	struct irdma_pd *iwpd = to_iwpd(pd);
+ 	struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc;
+ 	struct irdma_cqp_request *cqp_request;
+ 	struct cqp_cmds_info *cqp_info;
+@@ -2713,6 +2718,7 @@ static int irdma_hwreg_mr(struct irdma_device *iwdev, struct irdma_mr *iwmr,
+ 	stag_info->total_len = iwmr->len;
+ 	stag_info->access_rights = irdma_get_mr_access(access);
+ 	stag_info->pd_id = iwpd->sc_pd.pd_id;
++	stag_info->all_memory = pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY;
+ 	if (stag_info->access_rights & IRDMA_ACCESS_FLAGS_ZERO_BASED)
+ 		stag_info->addr_type = IRDMA_ADDR_TYPE_ZERO_BASED;
+ 	else
+@@ -4424,7 +4430,6 @@ static int irdma_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr)
+ 		ah_attr->grh.traffic_class = ah->sc_ah.ah_info.tc_tos;
+ 		ah_attr->grh.hop_limit = ah->sc_ah.ah_info.hop_ttl;
+ 		ah_attr->grh.sgid_index = ah->sgid_index;
+-		ah_attr->grh.sgid_index = ah->sgid_index;
+ 		memcpy(&ah_attr->grh.dgid, &ah->dgid,
+ 		       sizeof(ah_attr->grh.dgid));
+ 	}
+diff --git a/drivers/infiniband/sw/rxe/rxe_comp.c b/drivers/infiniband/sw/rxe/rxe_comp.c
+index 5111735aafaed..d0bdc2d8adc82 100644
+--- a/drivers/infiniband/sw/rxe/rxe_comp.c
++++ b/drivers/infiniband/sw/rxe/rxe_comp.c
+@@ -597,6 +597,10 @@ static void flush_send_queue(struct rxe_qp *qp, bool notify)
+ 	struct rxe_queue *q = qp->sq.queue;
+ 	int err;
+ 
++	/* send queue never got created. nothing to do. */
++	if (!qp->sq.queue)
++		return;
++
+ 	while ((wqe = queue_head(q, q->type))) {
+ 		if (notify) {
+ 			err = flush_send_wqe(qp, wqe);
+diff --git a/drivers/infiniband/sw/rxe/rxe_loc.h b/drivers/infiniband/sw/rxe/rxe_loc.h
+index 666e06a82bc9e..4d2a8ef52c850 100644
+--- a/drivers/infiniband/sw/rxe/rxe_loc.h
++++ b/drivers/infiniband/sw/rxe/rxe_loc.h
+@@ -136,12 +136,6 @@ static inline int qp_mtu(struct rxe_qp *qp)
+ 		return IB_MTU_4096;
+ }
+ 
+-static inline int rcv_wqe_size(int max_sge)
+-{
+-	return sizeof(struct rxe_recv_wqe) +
+-		max_sge * sizeof(struct ib_sge);
+-}
+-
+ void free_rd_atomic_resource(struct resp_res *res);
+ 
+ static inline void rxe_advance_resp_resource(struct rxe_qp *qp)
+diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c
+index a569b111a9d2a..28e379c108bce 100644
+--- a/drivers/infiniband/sw/rxe/rxe_qp.c
++++ b/drivers/infiniband/sw/rxe/rxe_qp.c
+@@ -183,13 +183,63 @@ static void rxe_qp_init_misc(struct rxe_dev *rxe, struct rxe_qp *qp,
+ 	atomic_set(&qp->skb_out, 0);
+ }
+ 
++static int rxe_init_sq(struct rxe_qp *qp, struct ib_qp_init_attr *init,
++		       struct ib_udata *udata,
++		       struct rxe_create_qp_resp __user *uresp)
++{
++	struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
++	int wqe_size;
++	int err;
++
++	qp->sq.max_wr = init->cap.max_send_wr;
++	wqe_size = max_t(int, init->cap.max_send_sge * sizeof(struct ib_sge),
++			 init->cap.max_inline_data);
++	qp->sq.max_sge = wqe_size / sizeof(struct ib_sge);
++	qp->sq.max_inline = wqe_size;
++	wqe_size += sizeof(struct rxe_send_wqe);
++
++	qp->sq.queue = rxe_queue_init(rxe, &qp->sq.max_wr, wqe_size,
++				      QUEUE_TYPE_FROM_CLIENT);
++	if (!qp->sq.queue) {
++		rxe_err_qp(qp, "Unable to allocate send queue");
++		err = -ENOMEM;
++		goto err_out;
++	}
++
++	/* prepare info for caller to mmap send queue if user space qp */
++	err = do_mmap_info(rxe, uresp ? &uresp->sq_mi : NULL, udata,
++			   qp->sq.queue->buf, qp->sq.queue->buf_size,
++			   &qp->sq.queue->ip);
++	if (err) {
++		rxe_err_qp(qp, "do_mmap_info failed, err = %d", err);
++		goto err_free;
++	}
++
++	/* return actual capabilities to caller which may be larger
++	 * than requested
++	 */
++	init->cap.max_send_wr = qp->sq.max_wr;
++	init->cap.max_send_sge = qp->sq.max_sge;
++	init->cap.max_inline_data = qp->sq.max_inline;
++
++	return 0;
++
++err_free:
++	vfree(qp->sq.queue->buf);
++	kfree(qp->sq.queue);
++	qp->sq.queue = NULL;
++err_out:
++	return err;
++}
++
+ static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp,
+ 			   struct ib_qp_init_attr *init, struct ib_udata *udata,
+ 			   struct rxe_create_qp_resp __user *uresp)
+ {
+ 	int err;
+-	int wqe_size;
+-	enum queue_type type;
++
++	/* if we don't finish qp create make sure queue is valid */
++	skb_queue_head_init(&qp->req_pkts);
+ 
+ 	err = sock_create_kern(&init_net, AF_INET, SOCK_DGRAM, 0, &qp->sk);
+ 	if (err < 0)
+@@ -204,32 +254,10 @@ static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp,
+ 	 * (0xc000 - 0xffff).
+ 	 */
+ 	qp->src_port = RXE_ROCE_V2_SPORT + (hash_32(qp_num(qp), 14) & 0x3fff);
+-	qp->sq.max_wr		= init->cap.max_send_wr;
+-
+-	/* These caps are limited by rxe_qp_chk_cap() done by the caller */
+-	wqe_size = max_t(int, init->cap.max_send_sge * sizeof(struct ib_sge),
+-			 init->cap.max_inline_data);
+-	qp->sq.max_sge = init->cap.max_send_sge =
+-		wqe_size / sizeof(struct ib_sge);
+-	qp->sq.max_inline = init->cap.max_inline_data = wqe_size;
+-	wqe_size += sizeof(struct rxe_send_wqe);
+ 
+-	type = QUEUE_TYPE_FROM_CLIENT;
+-	qp->sq.queue = rxe_queue_init(rxe, &qp->sq.max_wr,
+-				wqe_size, type);
+-	if (!qp->sq.queue)
+-		return -ENOMEM;
+-
+-	err = do_mmap_info(rxe, uresp ? &uresp->sq_mi : NULL, udata,
+-			   qp->sq.queue->buf, qp->sq.queue->buf_size,
+-			   &qp->sq.queue->ip);
+-
+-	if (err) {
+-		vfree(qp->sq.queue->buf);
+-		kfree(qp->sq.queue);
+-		qp->sq.queue = NULL;
++	err = rxe_init_sq(qp, init, udata, uresp);
++	if (err)
+ 		return err;
+-	}
+ 
+ 	qp->req.wqe_index = queue_get_producer(qp->sq.queue,
+ 					       QUEUE_TYPE_FROM_CLIENT);
+@@ -248,36 +276,65 @@ static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp,
+ 	return 0;
+ }
+ 
++static int rxe_init_rq(struct rxe_qp *qp, struct ib_qp_init_attr *init,
++		       struct ib_udata *udata,
++		       struct rxe_create_qp_resp __user *uresp)
++{
++	struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
++	int wqe_size;
++	int err;
++
++	qp->rq.max_wr = init->cap.max_recv_wr;
++	qp->rq.max_sge = init->cap.max_recv_sge;
++	wqe_size = sizeof(struct rxe_recv_wqe) +
++				qp->rq.max_sge*sizeof(struct ib_sge);
++
++	qp->rq.queue = rxe_queue_init(rxe, &qp->rq.max_wr, wqe_size,
++				      QUEUE_TYPE_FROM_CLIENT);
++	if (!qp->rq.queue) {
++		rxe_err_qp(qp, "Unable to allocate recv queue");
++		err = -ENOMEM;
++		goto err_out;
++	}
++
++	/* prepare info for caller to mmap recv queue if user space qp */
++	err = do_mmap_info(rxe, uresp ? &uresp->rq_mi : NULL, udata,
++			   qp->rq.queue->buf, qp->rq.queue->buf_size,
++			   &qp->rq.queue->ip);
++	if (err) {
++		rxe_err_qp(qp, "do_mmap_info failed, err = %d", err);
++		goto err_free;
++	}
++
++	/* return actual capabilities to caller which may be larger
++	 * than requested
++	 */
++	init->cap.max_recv_wr = qp->rq.max_wr;
++
++	return 0;
++
++err_free:
++	vfree(qp->rq.queue->buf);
++	kfree(qp->rq.queue);
++	qp->rq.queue = NULL;
++err_out:
++	return err;
++}
++
+ static int rxe_qp_init_resp(struct rxe_dev *rxe, struct rxe_qp *qp,
+ 			    struct ib_qp_init_attr *init,
+ 			    struct ib_udata *udata,
+ 			    struct rxe_create_qp_resp __user *uresp)
+ {
+ 	int err;
+-	int wqe_size;
+-	enum queue_type type;
++
++	/* if we don't finish qp create make sure queue is valid */
++	skb_queue_head_init(&qp->resp_pkts);
+ 
+ 	if (!qp->srq) {
+-		qp->rq.max_wr		= init->cap.max_recv_wr;
+-		qp->rq.max_sge		= init->cap.max_recv_sge;
+-
+-		wqe_size = rcv_wqe_size(qp->rq.max_sge);
+-
+-		type = QUEUE_TYPE_FROM_CLIENT;
+-		qp->rq.queue = rxe_queue_init(rxe, &qp->rq.max_wr,
+-					wqe_size, type);
+-		if (!qp->rq.queue)
+-			return -ENOMEM;
+-
+-		err = do_mmap_info(rxe, uresp ? &uresp->rq_mi : NULL, udata,
+-				   qp->rq.queue->buf, qp->rq.queue->buf_size,
+-				   &qp->rq.queue->ip);
+-		if (err) {
+-			vfree(qp->rq.queue->buf);
+-			kfree(qp->rq.queue);
+-			qp->rq.queue = NULL;
++		err = rxe_init_rq(qp, init, udata, uresp);
++		if (err)
+ 			return err;
+-		}
+ 	}
+ 
+ 	rxe_init_task(&qp->resp.task, qp, rxe_responder);
+@@ -307,10 +364,10 @@ int rxe_qp_from_init(struct rxe_dev *rxe, struct rxe_qp *qp, struct rxe_pd *pd,
+ 	if (srq)
+ 		rxe_get(srq);
+ 
+-	qp->pd			= pd;
+-	qp->rcq			= rcq;
+-	qp->scq			= scq;
+-	qp->srq			= srq;
++	qp->pd = pd;
++	qp->rcq = rcq;
++	qp->scq = scq;
++	qp->srq = srq;
+ 
+ 	atomic_inc(&rcq->num_wq);
+ 	atomic_inc(&scq->num_wq);
+diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c
+index 2171f19494bca..d8c41fd626a94 100644
+--- a/drivers/infiniband/sw/rxe/rxe_req.c
++++ b/drivers/infiniband/sw/rxe/rxe_req.c
+@@ -578,10 +578,11 @@ static void save_state(struct rxe_send_wqe *wqe,
+ 		       struct rxe_send_wqe *rollback_wqe,
+ 		       u32 *rollback_psn)
+ {
+-	rollback_wqe->state     = wqe->state;
++	rollback_wqe->state = wqe->state;
+ 	rollback_wqe->first_psn = wqe->first_psn;
+-	rollback_wqe->last_psn  = wqe->last_psn;
+-	*rollback_psn		= qp->req.psn;
++	rollback_wqe->last_psn = wqe->last_psn;
++	rollback_wqe->dma = wqe->dma;
++	*rollback_psn = qp->req.psn;
+ }
+ 
+ static void rollback_state(struct rxe_send_wqe *wqe,
+@@ -589,10 +590,11 @@ static void rollback_state(struct rxe_send_wqe *wqe,
+ 			   struct rxe_send_wqe *rollback_wqe,
+ 			   u32 rollback_psn)
+ {
+-	wqe->state     = rollback_wqe->state;
++	wqe->state = rollback_wqe->state;
+ 	wqe->first_psn = rollback_wqe->first_psn;
+-	wqe->last_psn  = rollback_wqe->last_psn;
+-	qp->req.psn    = rollback_psn;
++	wqe->last_psn = rollback_wqe->last_psn;
++	wqe->dma = rollback_wqe->dma;
++	qp->req.psn = rollback_psn;
+ }
+ 
+ static void update_state(struct rxe_qp *qp, struct rxe_pkt_info *pkt)
+@@ -797,6 +799,9 @@ int rxe_requester(struct rxe_qp *qp)
+ 	pkt.mask = rxe_opcode[opcode].mask;
+ 	pkt.wqe = wqe;
+ 
++	/* save wqe state before we build and send packet */
++	save_state(wqe, qp, &rollback_wqe, &rollback_psn);
++
+ 	av = rxe_get_av(&pkt, &ah);
+ 	if (unlikely(!av)) {
+ 		rxe_dbg_qp(qp, "Failed no address vector\n");
+@@ -829,29 +834,29 @@ int rxe_requester(struct rxe_qp *qp)
+ 	if (ah)
+ 		rxe_put(ah);
+ 
+-	/*
+-	 * To prevent a race on wqe access between requester and completer,
+-	 * wqe members state and psn need to be set before calling
+-	 * rxe_xmit_packet().
+-	 * Otherwise, completer might initiate an unjustified retry flow.
+-	 */
+-	save_state(wqe, qp, &rollback_wqe, &rollback_psn);
++	/* update wqe state as though we had sent it */
+ 	update_wqe_state(qp, wqe, &pkt);
+ 	update_wqe_psn(qp, wqe, &pkt, payload);
+ 
+ 	err = rxe_xmit_packet(qp, &pkt, skb);
+ 	if (err) {
+-		qp->need_req_skb = 1;
++		if (err != -EAGAIN) {
++			wqe->status = IB_WC_LOC_QP_OP_ERR;
++			goto err;
++		}
+ 
++		/* the packet was dropped so reset wqe to the state
++		 * before we sent it so we can try to resend
++		 */
+ 		rollback_state(wqe, qp, &rollback_wqe, rollback_psn);
+ 
+-		if (err == -EAGAIN) {
+-			rxe_sched_task(&qp->req.task);
+-			goto exit;
+-		}
++		/* force a delay until the dropped packet is freed and
++		 * the send queue is drained below the low water mark
++		 */
++		qp->need_req_skb = 1;
+ 
+-		wqe->status = IB_WC_LOC_QP_OP_ERR;
+-		goto err;
++		rxe_sched_task(&qp->req.task);
++		goto exit;
+ 	}
+ 
+ 	update_state(qp, &pkt);
+diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c
+index 64c64f5f36a81..da470a925efc7 100644
+--- a/drivers/infiniband/sw/rxe/rxe_resp.c
++++ b/drivers/infiniband/sw/rxe/rxe_resp.c
+@@ -1469,6 +1469,10 @@ static void flush_recv_queue(struct rxe_qp *qp, bool notify)
+ 		return;
+ 	}
+ 
++	/* recv queue not created. nothing to do. */
++	if (!qp->rq.queue)
++		return;
++
+ 	while ((wqe = queue_head(q, q->type))) {
+ 		if (notify) {
+ 			err = flush_recv_wqe(qp, wqe);
+diff --git a/drivers/infiniband/sw/rxe/rxe_srq.c b/drivers/infiniband/sw/rxe/rxe_srq.c
+index 27ca82ec0826b..3661cb627d28a 100644
+--- a/drivers/infiniband/sw/rxe/rxe_srq.c
++++ b/drivers/infiniband/sw/rxe/rxe_srq.c
+@@ -45,40 +45,41 @@ int rxe_srq_from_init(struct rxe_dev *rxe, struct rxe_srq *srq,
+ 		      struct ib_srq_init_attr *init, struct ib_udata *udata,
+ 		      struct rxe_create_srq_resp __user *uresp)
+ {
+-	int err;
+-	int srq_wqe_size;
+ 	struct rxe_queue *q;
+-	enum queue_type type;
++	int wqe_size;
++	int err;
+ 
+-	srq->ibsrq.event_handler	= init->event_handler;
+-	srq->ibsrq.srq_context		= init->srq_context;
+-	srq->limit		= init->attr.srq_limit;
+-	srq->srq_num		= srq->elem.index;
+-	srq->rq.max_wr		= init->attr.max_wr;
+-	srq->rq.max_sge		= init->attr.max_sge;
++	srq->ibsrq.event_handler = init->event_handler;
++	srq->ibsrq.srq_context = init->srq_context;
++	srq->limit = init->attr.srq_limit;
++	srq->srq_num = srq->elem.index;
++	srq->rq.max_wr = init->attr.max_wr;
++	srq->rq.max_sge = init->attr.max_sge;
+ 
+-	srq_wqe_size		= rcv_wqe_size(srq->rq.max_sge);
++	wqe_size = sizeof(struct rxe_recv_wqe) +
++			srq->rq.max_sge*sizeof(struct ib_sge);
+ 
+ 	spin_lock_init(&srq->rq.producer_lock);
+ 	spin_lock_init(&srq->rq.consumer_lock);
+ 
+-	type = QUEUE_TYPE_FROM_CLIENT;
+-	q = rxe_queue_init(rxe, &srq->rq.max_wr, srq_wqe_size, type);
++	q = rxe_queue_init(rxe, &srq->rq.max_wr, wqe_size,
++			   QUEUE_TYPE_FROM_CLIENT);
+ 	if (!q) {
+ 		rxe_dbg_srq(srq, "Unable to allocate queue\n");
+-		return -ENOMEM;
++		err = -ENOMEM;
++		goto err_out;
+ 	}
+ 
+-	srq->rq.queue = q;
+-
+ 	err = do_mmap_info(rxe, uresp ? &uresp->mi : NULL, udata, q->buf,
+ 			   q->buf_size, &q->ip);
+ 	if (err) {
+-		vfree(q->buf);
+-		kfree(q);
+-		return err;
++		rxe_dbg_srq(srq, "Unable to init mmap info for caller\n");
++		goto err_free;
+ 	}
+ 
++	srq->rq.queue = q;
++	init->attr.max_wr = srq->rq.max_wr;
++
+ 	if (uresp) {
+ 		if (copy_to_user(&uresp->srq_num, &srq->srq_num,
+ 				 sizeof(uresp->srq_num))) {
+@@ -88,6 +89,12 @@ int rxe_srq_from_init(struct rxe_dev *rxe, struct rxe_srq *srq,
+ 	}
+ 
+ 	return 0;
++
++err_free:
++	vfree(q->buf);
++	kfree(q);
++err_out:
++	return err;
+ }
+ 
+ int rxe_srq_chk_attr(struct rxe_dev *rxe, struct rxe_srq *srq,
+@@ -145,9 +152,10 @@ int rxe_srq_from_attr(struct rxe_dev *rxe, struct rxe_srq *srq,
+ 		      struct ib_srq_attr *attr, enum ib_srq_attr_mask mask,
+ 		      struct rxe_modify_srq_cmd *ucmd, struct ib_udata *udata)
+ {
+-	int err;
+ 	struct rxe_queue *q = srq->rq.queue;
+ 	struct mminfo __user *mi = NULL;
++	int wqe_size;
++	int err;
+ 
+ 	if (mask & IB_SRQ_MAX_WR) {
+ 		/*
+@@ -156,12 +164,16 @@ int rxe_srq_from_attr(struct rxe_dev *rxe, struct rxe_srq *srq,
+ 		 */
+ 		mi = u64_to_user_ptr(ucmd->mmap_info_addr);
+ 
+-		err = rxe_queue_resize(q, &attr->max_wr,
+-				       rcv_wqe_size(srq->rq.max_sge), udata, mi,
+-				       &srq->rq.producer_lock,
++		wqe_size = sizeof(struct rxe_recv_wqe) +
++				srq->rq.max_sge*sizeof(struct ib_sge);
++
++		err = rxe_queue_resize(q, &attr->max_wr, wqe_size,
++				       udata, mi, &srq->rq.producer_lock,
+ 				       &srq->rq.consumer_lock);
+ 		if (err)
+-			goto err2;
++			goto err_free;
++
++		srq->rq.max_wr = attr->max_wr;
+ 	}
+ 
+ 	if (mask & IB_SRQ_LIMIT)
+@@ -169,7 +181,7 @@ int rxe_srq_from_attr(struct rxe_dev *rxe, struct rxe_srq *srq,
+ 
+ 	return 0;
+ 
+-err2:
++err_free:
+ 	rxe_queue_cleanup(q);
+ 	srq->rq.queue = NULL;
+ 	return err;
+diff --git a/drivers/infiniband/sw/siw/siw.h b/drivers/infiniband/sw/siw/siw.h
+index 2f3a9cda3850f..8b4a710b82bc1 100644
+--- a/drivers/infiniband/sw/siw/siw.h
++++ b/drivers/infiniband/sw/siw/siw.h
+@@ -74,6 +74,7 @@ struct siw_device {
+ 
+ 	u32 vendor_part_id;
+ 	int numa_node;
++	char raw_gid[ETH_ALEN];
+ 
+ 	/* physical port state (only one port per device) */
+ 	enum ib_port_state state;
+diff --git a/drivers/infiniband/sw/siw/siw_cm.c b/drivers/infiniband/sw/siw/siw_cm.c
+index da530c0404da4..a2605178f4eda 100644
+--- a/drivers/infiniband/sw/siw/siw_cm.c
++++ b/drivers/infiniband/sw/siw/siw_cm.c
+@@ -1501,7 +1501,6 @@ error:
+ 
+ 		cep->cm_id = NULL;
+ 		id->rem_ref(id);
+-		siw_cep_put(cep);
+ 
+ 		qp->cep = NULL;
+ 		siw_cep_put(cep);
+diff --git a/drivers/infiniband/sw/siw/siw_main.c b/drivers/infiniband/sw/siw/siw_main.c
+index 65b5cda5457ba..f45600d169ae7 100644
+--- a/drivers/infiniband/sw/siw/siw_main.c
++++ b/drivers/infiniband/sw/siw/siw_main.c
+@@ -75,8 +75,7 @@ static int siw_device_register(struct siw_device *sdev, const char *name)
+ 		return rv;
+ 	}
+ 
+-	siw_dbg(base_dev, "HWaddr=%pM\n", sdev->netdev->dev_addr);
+-
++	siw_dbg(base_dev, "HWaddr=%pM\n", sdev->raw_gid);
+ 	return 0;
+ }
+ 
+@@ -313,24 +312,19 @@ static struct siw_device *siw_device_create(struct net_device *netdev)
+ 		return NULL;
+ 
+ 	base_dev = &sdev->base_dev;
+-
+ 	sdev->netdev = netdev;
+ 
+-	if (netdev->type != ARPHRD_LOOPBACK && netdev->type != ARPHRD_NONE) {
+-		addrconf_addr_eui48((unsigned char *)&base_dev->node_guid,
+-				    netdev->dev_addr);
++	if (netdev->addr_len) {
++		memcpy(sdev->raw_gid, netdev->dev_addr,
++		       min_t(unsigned int, netdev->addr_len, ETH_ALEN));
+ 	} else {
+ 		/*
+-		 * This device does not have a HW address,
+-		 * but connection mangagement lib expects gid != 0
++		 * This device does not have a HW address, but
++		 * connection mangagement requires a unique gid.
+ 		 */
+-		size_t len = min_t(size_t, strlen(base_dev->name), 6);
+-		char addr[6] = { };
+-
+-		memcpy(addr, base_dev->name, len);
+-		addrconf_addr_eui48((unsigned char *)&base_dev->node_guid,
+-				    addr);
++		eth_random_addr(sdev->raw_gid);
+ 	}
++	addrconf_addr_eui48((u8 *)&base_dev->node_guid, sdev->raw_gid);
+ 
+ 	base_dev->uverbs_cmd_mask |= BIT_ULL(IB_USER_VERBS_CMD_POST_SEND);
+ 
+diff --git a/drivers/infiniband/sw/siw/siw_verbs.c b/drivers/infiniband/sw/siw/siw_verbs.c
+index 398ec13db6248..10cabc792c68e 100644
+--- a/drivers/infiniband/sw/siw/siw_verbs.c
++++ b/drivers/infiniband/sw/siw/siw_verbs.c
+@@ -157,7 +157,7 @@ int siw_query_device(struct ib_device *base_dev, struct ib_device_attr *attr,
+ 	attr->vendor_part_id = sdev->vendor_part_id;
+ 
+ 	addrconf_addr_eui48((u8 *)&attr->sys_image_guid,
+-			    sdev->netdev->dev_addr);
++			    sdev->raw_gid);
+ 
+ 	return 0;
+ }
+@@ -218,7 +218,7 @@ int siw_query_gid(struct ib_device *base_dev, u32 port, int idx,
+ 
+ 	/* subnet_prefix == interface_id == 0; */
+ 	memset(gid, 0, sizeof(*gid));
+-	memcpy(&gid->raw[0], sdev->netdev->dev_addr, 6);
++	memcpy(gid->raw, sdev->raw_gid, ETH_ALEN);
+ 
+ 	return 0;
+ }
+@@ -1494,7 +1494,7 @@ int siw_map_mr_sg(struct ib_mr *base_mr, struct scatterlist *sl, int num_sle,
+ 
+ 	if (pbl->max_buf < num_sle) {
+ 		siw_dbg_mem(mem, "too many SGE's: %d > %d\n",
+-			    mem->pbl->max_buf, num_sle);
++			    num_sle, pbl->max_buf);
+ 		return -ENOMEM;
+ 	}
+ 	for_each_sg(sl, slp, num_sle, i) {
+diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
+index 92e1e7587af8b..00a7303c8cc60 100644
+--- a/drivers/infiniband/ulp/isert/ib_isert.c
++++ b/drivers/infiniband/ulp/isert/ib_isert.c
+@@ -2570,6 +2570,8 @@ static void isert_wait_conn(struct iscsit_conn *conn)
+ 	isert_put_unsol_pending_cmds(conn);
+ 	isert_wait4cmds(conn);
+ 	isert_wait4logout(isert_conn);
++
++	queue_work(isert_release_wq, &isert_conn->release_work);
+ }
+ 
+ static void isert_free_conn(struct iscsit_conn *conn)
+diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
+index 0e513a7e5ac80..1574218764e0a 100644
+--- a/drivers/infiniband/ulp/srp/ib_srp.c
++++ b/drivers/infiniband/ulp/srp/ib_srp.c
+@@ -1979,12 +1979,8 @@ static void srp_process_rsp(struct srp_rdma_ch *ch, struct srp_rsp *rsp)
+ 
+ 		if (unlikely(rsp->flags & SRP_RSP_FLAG_DIUNDER))
+ 			scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt));
+-		else if (unlikely(rsp->flags & SRP_RSP_FLAG_DIOVER))
+-			scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_in_res_cnt));
+ 		else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOUNDER))
+ 			scsi_set_resid(scmnd, be32_to_cpu(rsp->data_out_res_cnt));
+-		else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOOVER))
+-			scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_out_res_cnt));
+ 
+ 		srp_free_req(ch, req, scmnd,
+ 			     be32_to_cpu(rsp->req_lim_delta));
+diff --git a/drivers/input/serio/i8042-acpipnpio.h b/drivers/input/serio/i8042-acpipnpio.h
+index 028e45bd050bf..1724d6cb8649d 100644
+--- a/drivers/input/serio/i8042-acpipnpio.h
++++ b/drivers/input/serio/i8042-acpipnpio.h
+@@ -1281,6 +1281,13 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = {
+ 		.driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
+ 					SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
+ 	},
++	/* See comment on TUXEDO InfinityBook S17 Gen6 / Clevo NS70MU above */
++	{
++		.matches = {
++			DMI_MATCH(DMI_BOARD_NAME, "PD5x_7xPNP_PNR_PNN_PNT"),
++		},
++		.driver_data = (void *)(SERIO_QUIRK_NOAUX)
++	},
+ 	{
+ 		.matches = {
+ 			DMI_MATCH(DMI_BOARD_NAME, "X170SM"),
+diff --git a/drivers/interconnect/qcom/bcm-voter.c b/drivers/interconnect/qcom/bcm-voter.c
+index d5f2a6b5376bd..a2d437a05a11f 100644
+--- a/drivers/interconnect/qcom/bcm-voter.c
++++ b/drivers/interconnect/qcom/bcm-voter.c
+@@ -58,6 +58,36 @@ static u64 bcm_div(u64 num, u32 base)
+ 	return num;
+ }
+ 
++/* BCMs with enable_mask use one-hot-encoding for on/off signaling */
++static void bcm_aggregate_mask(struct qcom_icc_bcm *bcm)
++{
++	struct qcom_icc_node *node;
++	int bucket, i;
++
++	for (bucket = 0; bucket < QCOM_ICC_NUM_BUCKETS; bucket++) {
++		bcm->vote_x[bucket] = 0;
++		bcm->vote_y[bucket] = 0;
++
++		for (i = 0; i < bcm->num_nodes; i++) {
++			node = bcm->nodes[i];
++
++			/* If any vote in this bucket exists, keep the BCM enabled */
++			if (node->sum_avg[bucket] || node->max_peak[bucket]) {
++				bcm->vote_x[bucket] = 0;
++				bcm->vote_y[bucket] = bcm->enable_mask;
++				break;
++			}
++		}
++	}
++
++	if (bcm->keepalive) {
++		bcm->vote_x[QCOM_ICC_BUCKET_AMC] = bcm->enable_mask;
++		bcm->vote_x[QCOM_ICC_BUCKET_WAKE] = bcm->enable_mask;
++		bcm->vote_y[QCOM_ICC_BUCKET_AMC] = bcm->enable_mask;
++		bcm->vote_y[QCOM_ICC_BUCKET_WAKE] = bcm->enable_mask;
++	}
++}
++
+ static void bcm_aggregate(struct qcom_icc_bcm *bcm)
+ {
+ 	struct qcom_icc_node *node;
+@@ -83,11 +113,6 @@ static void bcm_aggregate(struct qcom_icc_bcm *bcm)
+ 
+ 		temp = agg_peak[bucket] * bcm->vote_scale;
+ 		bcm->vote_y[bucket] = bcm_div(temp, bcm->aux_data.unit);
+-
+-		if (bcm->enable_mask && (bcm->vote_x[bucket] || bcm->vote_y[bucket])) {
+-			bcm->vote_x[bucket] = 0;
+-			bcm->vote_y[bucket] = bcm->enable_mask;
+-		}
+ 	}
+ 
+ 	if (bcm->keepalive && bcm->vote_x[QCOM_ICC_BUCKET_AMC] == 0 &&
+@@ -260,8 +285,12 @@ int qcom_icc_bcm_voter_commit(struct bcm_voter *voter)
+ 		return 0;
+ 
+ 	mutex_lock(&voter->lock);
+-	list_for_each_entry(bcm, &voter->commit_list, list)
+-		bcm_aggregate(bcm);
++	list_for_each_entry(bcm, &voter->commit_list, list) {
++		if (bcm->enable_mask)
++			bcm_aggregate_mask(bcm);
++		else
++			bcm_aggregate(bcm);
++	}
+ 
+ 	/*
+ 	 * Pre sort the BCMs based on VCD for ease of generating a command list
+diff --git a/drivers/interconnect/qcom/qcm2290.c b/drivers/interconnect/qcom/qcm2290.c
+index a29cdb4fac03f..82a2698ad66b1 100644
+--- a/drivers/interconnect/qcom/qcm2290.c
++++ b/drivers/interconnect/qcom/qcm2290.c
+@@ -1355,6 +1355,7 @@ static struct platform_driver qcm2290_noc_driver = {
+ 	.driver = {
+ 		.name = "qnoc-qcm2290",
+ 		.of_match_table = qcm2290_noc_of_match,
++		.sync_state = icc_sync_state,
+ 	},
+ };
+ module_platform_driver(qcm2290_noc_driver);
+diff --git a/drivers/interconnect/qcom/sm8450.c b/drivers/interconnect/qcom/sm8450.c
+index e64c214b40209..d6e582a02e628 100644
+--- a/drivers/interconnect/qcom/sm8450.c
++++ b/drivers/interconnect/qcom/sm8450.c
+@@ -1886,6 +1886,7 @@ static struct platform_driver qnoc_driver = {
+ 	.driver = {
+ 		.name = "qnoc-sm8450",
+ 		.of_match_table = qnoc_of_match,
++		.sync_state = icc_sync_state,
+ 	},
+ };
+ 
+diff --git a/drivers/iommu/amd/iommu_v2.c b/drivers/iommu/amd/iommu_v2.c
+index 261352a232716..65d78d7e04408 100644
+--- a/drivers/iommu/amd/iommu_v2.c
++++ b/drivers/iommu/amd/iommu_v2.c
+@@ -262,8 +262,8 @@ static void put_pasid_state(struct pasid_state *pasid_state)
+ 
+ static void put_pasid_state_wait(struct pasid_state *pasid_state)
+ {
+-	refcount_dec(&pasid_state->count);
+-	wait_event(pasid_state->wq, !refcount_read(&pasid_state->count));
++	if (!refcount_dec_and_test(&pasid_state->count))
++		wait_event(pasid_state->wq, !refcount_read(&pasid_state->count));
+ 	free_pasid_state(pasid_state);
+ }
+ 
+diff --git a/drivers/iommu/arm/arm-smmu/qcom_iommu.c b/drivers/iommu/arm/arm-smmu/qcom_iommu.c
+index a503ed758ec30..3e551ca6afdb9 100644
+--- a/drivers/iommu/arm/arm-smmu/qcom_iommu.c
++++ b/drivers/iommu/arm/arm-smmu/qcom_iommu.c
+@@ -273,6 +273,13 @@ static int qcom_iommu_init_domain(struct iommu_domain *domain,
+ 			ctx->secure_init = true;
+ 		}
+ 
++		/* Disable context bank before programming */
++		iommu_writel(ctx, ARM_SMMU_CB_SCTLR, 0);
++
++		/* Clear context bank fault address fault status registers */
++		iommu_writel(ctx, ARM_SMMU_CB_FAR, 0);
++		iommu_writel(ctx, ARM_SMMU_CB_FSR, ARM_SMMU_FSR_FAULT);
++
+ 		/* TTBRs */
+ 		iommu_writeq(ctx, ARM_SMMU_CB_TTBR0,
+ 				pgtbl_cfg.arm_lpae_s1_cfg.ttbr |
+diff --git a/drivers/iommu/intel/pasid.c b/drivers/iommu/intel/pasid.c
+index c5d479770e12e..49fc5a038a145 100644
+--- a/drivers/iommu/intel/pasid.c
++++ b/drivers/iommu/intel/pasid.c
+@@ -129,7 +129,7 @@ int intel_pasid_alloc_table(struct device *dev)
+ 	info->pasid_table = pasid_table;
+ 
+ 	if (!ecap_coherent(info->iommu->ecap))
+-		clflush_cache_range(pasid_table->table, size);
++		clflush_cache_range(pasid_table->table, (1 << order) * PAGE_SIZE);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
+index caaf563d38ae0..cabeb5bd3e41f 100644
+--- a/drivers/iommu/iommu.c
++++ b/drivers/iommu/iommu.c
+@@ -3203,7 +3203,7 @@ static void __iommu_release_dma_ownership(struct iommu_group *group)
+ 
+ /**
+  * iommu_group_release_dma_owner() - Release DMA ownership of a group
+- * @dev: The device
++ * @group: The group
+  *
+  * Release the DMA ownership claimed by iommu_group_claim_dma_owner().
+  */
+@@ -3217,7 +3217,7 @@ EXPORT_SYMBOL_GPL(iommu_group_release_dma_owner);
+ 
+ /**
+  * iommu_device_release_dma_owner() - Release DMA ownership of a device
+- * @group: The device.
++ * @dev: The device.
+  *
+  * Release the DMA ownership claimed by iommu_device_claim_dma_owner().
+  */
+diff --git a/drivers/iommu/iommufd/device.c b/drivers/iommu/iommufd/device.c
+index ed2937a4e196f..2e43ebf1a2b5c 100644
+--- a/drivers/iommu/iommufd/device.c
++++ b/drivers/iommu/iommufd/device.c
+@@ -298,8 +298,8 @@ static int iommufd_device_auto_get_domain(struct iommufd_device *idev,
+ 	}
+ 	hwpt->auto_domain = true;
+ 
+-	mutex_unlock(&ioas->mutex);
+ 	iommufd_object_finalize(idev->ictx, &hwpt->obj);
++	mutex_unlock(&ioas->mutex);
+ 	return 0;
+ out_unlock:
+ 	mutex_unlock(&ioas->mutex);
+diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c
+index e93906d6e112e..c2764891a779c 100644
+--- a/drivers/iommu/mtk_iommu.c
++++ b/drivers/iommu/mtk_iommu.c
+@@ -258,6 +258,8 @@ struct mtk_iommu_data {
+ 	struct device			*smicomm_dev;
+ 
+ 	struct mtk_iommu_bank_data	*bank;
++	struct mtk_iommu_domain		*share_dom; /* For 2 HWs share pgtable */
++
+ 	struct regmap			*pericfg;
+ 	struct mutex			mutex; /* Protect m4u_group/m4u_dom above */
+ 
+@@ -620,15 +622,14 @@ static int mtk_iommu_domain_finalise(struct mtk_iommu_domain *dom,
+ 				     struct mtk_iommu_data *data,
+ 				     unsigned int region_id)
+ {
++	struct mtk_iommu_domain	*share_dom = data->share_dom;
+ 	const struct mtk_iommu_iova_region *region;
+-	struct mtk_iommu_domain	*m4u_dom;
+-
+-	/* Always use bank0 in sharing pgtable case */
+-	m4u_dom = data->bank[0].m4u_dom;
+-	if (m4u_dom) {
+-		dom->iop = m4u_dom->iop;
+-		dom->cfg = m4u_dom->cfg;
+-		dom->domain.pgsize_bitmap = m4u_dom->cfg.pgsize_bitmap;
++
++	/* Always use share domain in sharing pgtable case */
++	if (MTK_IOMMU_HAS_FLAG(data->plat_data, SHARE_PGTABLE) && share_dom) {
++		dom->iop = share_dom->iop;
++		dom->cfg = share_dom->cfg;
++		dom->domain.pgsize_bitmap = share_dom->cfg.pgsize_bitmap;
+ 		goto update_iova_region;
+ 	}
+ 
+@@ -658,6 +659,9 @@ static int mtk_iommu_domain_finalise(struct mtk_iommu_domain *dom,
+ 	/* Update our support page sizes bitmap */
+ 	dom->domain.pgsize_bitmap = dom->cfg.pgsize_bitmap;
+ 
++	if (MTK_IOMMU_HAS_FLAG(data->plat_data, SHARE_PGTABLE))
++		data->share_dom = dom;
++
+ update_iova_region:
+ 	/* Update the iova region for this domain */
+ 	region = data->plat_data->iova_region + region_id;
+@@ -708,7 +712,9 @@ static int mtk_iommu_attach_device(struct iommu_domain *domain,
+ 		/* Data is in the frstdata in sharing pgtable case. */
+ 		frstdata = mtk_iommu_get_frst_data(hw_list);
+ 
++		mutex_lock(&frstdata->mutex);
+ 		ret = mtk_iommu_domain_finalise(dom, frstdata, region_id);
++		mutex_unlock(&frstdata->mutex);
+ 		if (ret) {
+ 			mutex_unlock(&dom->mutex);
+ 			return ret;
+diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c
+index 4054030c32379..ae42959bc4905 100644
+--- a/drivers/iommu/rockchip-iommu.c
++++ b/drivers/iommu/rockchip-iommu.c
+@@ -98,8 +98,6 @@ struct rk_iommu_ops {
+ 	phys_addr_t (*pt_address)(u32 dte);
+ 	u32 (*mk_dtentries)(dma_addr_t pt_dma);
+ 	u32 (*mk_ptentries)(phys_addr_t page, int prot);
+-	phys_addr_t (*dte_addr_phys)(u32 addr);
+-	u32 (*dma_addr_dte)(dma_addr_t dt_dma);
+ 	u64 dma_bit_mask;
+ };
+ 
+@@ -278,8 +276,8 @@ static u32 rk_mk_pte(phys_addr_t page, int prot)
+ /*
+  * In v2:
+  * 31:12 - Page address bit 31:0
+- *  11:9 - Page address bit 34:32
+- *   8:4 - Page address bit 39:35
++ * 11: 8 - Page address bit 35:32
++ *  7: 4 - Page address bit 39:36
+  *     3 - Security
+  *     2 - Writable
+  *     1 - Readable
+@@ -506,7 +504,7 @@ static int rk_iommu_force_reset(struct rk_iommu *iommu)
+ 
+ 	/*
+ 	 * Check if register DTE_ADDR is working by writing DTE_ADDR_DUMMY
+-	 * and verifying that upper 5 nybbles are read back.
++	 * and verifying that upper 5 (v1) or 7 (v2) nybbles are read back.
+ 	 */
+ 	for (i = 0; i < iommu->num_mmu; i++) {
+ 		dte_addr = rk_ops->pt_address(DTE_ADDR_DUMMY);
+@@ -531,33 +529,6 @@ static int rk_iommu_force_reset(struct rk_iommu *iommu)
+ 	return 0;
+ }
+ 
+-static inline phys_addr_t rk_dte_addr_phys(u32 addr)
+-{
+-	return (phys_addr_t)addr;
+-}
+-
+-static inline u32 rk_dma_addr_dte(dma_addr_t dt_dma)
+-{
+-	return dt_dma;
+-}
+-
+-#define DT_HI_MASK GENMASK_ULL(39, 32)
+-#define DTE_BASE_HI_MASK GENMASK(11, 4)
+-#define DT_SHIFT   28
+-
+-static inline phys_addr_t rk_dte_addr_phys_v2(u32 addr)
+-{
+-	u64 addr64 = addr;
+-	return (phys_addr_t)(addr64 & RK_DTE_PT_ADDRESS_MASK) |
+-	       ((addr64 & DTE_BASE_HI_MASK) << DT_SHIFT);
+-}
+-
+-static inline u32 rk_dma_addr_dte_v2(dma_addr_t dt_dma)
+-{
+-	return (dt_dma & RK_DTE_PT_ADDRESS_MASK) |
+-	       ((dt_dma & DT_HI_MASK) >> DT_SHIFT);
+-}
+-
+ static void log_iova(struct rk_iommu *iommu, int index, dma_addr_t iova)
+ {
+ 	void __iomem *base = iommu->bases[index];
+@@ -577,7 +548,7 @@ static void log_iova(struct rk_iommu *iommu, int index, dma_addr_t iova)
+ 	page_offset = rk_iova_page_offset(iova);
+ 
+ 	mmu_dte_addr = rk_iommu_read(base, RK_MMU_DTE_ADDR);
+-	mmu_dte_addr_phys = rk_ops->dte_addr_phys(mmu_dte_addr);
++	mmu_dte_addr_phys = rk_ops->pt_address(mmu_dte_addr);
+ 
+ 	dte_addr_phys = mmu_dte_addr_phys + (4 * dte_index);
+ 	dte_addr = phys_to_virt(dte_addr_phys);
+@@ -967,7 +938,7 @@ static int rk_iommu_enable(struct rk_iommu *iommu)
+ 
+ 	for (i = 0; i < iommu->num_mmu; i++) {
+ 		rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR,
+-			       rk_ops->dma_addr_dte(rk_domain->dt_dma));
++			       rk_ops->mk_dtentries(rk_domain->dt_dma));
+ 		rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_ZAP_CACHE);
+ 		rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, RK_MMU_IRQ_MASK);
+ 	}
+@@ -1405,8 +1376,6 @@ static struct rk_iommu_ops iommu_data_ops_v1 = {
+ 	.pt_address = &rk_dte_pt_address,
+ 	.mk_dtentries = &rk_mk_dte,
+ 	.mk_ptentries = &rk_mk_pte,
+-	.dte_addr_phys = &rk_dte_addr_phys,
+-	.dma_addr_dte = &rk_dma_addr_dte,
+ 	.dma_bit_mask = DMA_BIT_MASK(32),
+ };
+ 
+@@ -1414,8 +1383,6 @@ static struct rk_iommu_ops iommu_data_ops_v2 = {
+ 	.pt_address = &rk_dte_pt_address_v2,
+ 	.mk_dtentries = &rk_mk_dte_v2,
+ 	.mk_ptentries = &rk_mk_pte_v2,
+-	.dte_addr_phys = &rk_dte_addr_phys_v2,
+-	.dma_addr_dte = &rk_dma_addr_dte_v2,
+ 	.dma_bit_mask = DMA_BIT_MASK(40),
+ };
+ 
+diff --git a/drivers/iommu/sprd-iommu.c b/drivers/iommu/sprd-iommu.c
+index 39e34fdeccda7..eb684d8807cab 100644
+--- a/drivers/iommu/sprd-iommu.c
++++ b/drivers/iommu/sprd-iommu.c
+@@ -148,6 +148,7 @@ static struct iommu_domain *sprd_iommu_domain_alloc(unsigned int domain_type)
+ 
+ 	dom->domain.geometry.aperture_start = 0;
+ 	dom->domain.geometry.aperture_end = SZ_256M - 1;
++	dom->domain.geometry.force_aperture = true;
+ 
+ 	return &dom->domain;
+ }
+diff --git a/drivers/irqchip/irq-loongson-eiointc.c b/drivers/irqchip/irq-loongson-eiointc.c
+index 92d8aa28bdf54..1623cd7791752 100644
+--- a/drivers/irqchip/irq-loongson-eiointc.c
++++ b/drivers/irqchip/irq-loongson-eiointc.c
+@@ -144,7 +144,7 @@ static int eiointc_router_init(unsigned int cpu)
+ 	int i, bit;
+ 	uint32_t data;
+ 	uint32_t node = cpu_to_eio_node(cpu);
+-	uint32_t index = eiointc_index(node);
++	int index = eiointc_index(node);
+ 
+ 	if (index < 0) {
+ 		pr_err("Error: invalid nodemap!\n");
+diff --git a/drivers/leds/led-class-multicolor.c b/drivers/leds/led-class-multicolor.c
+index e317408583df9..ec62a48116135 100644
+--- a/drivers/leds/led-class-multicolor.c
++++ b/drivers/leds/led-class-multicolor.c
+@@ -6,6 +6,7 @@
+ #include <linux/device.h>
+ #include <linux/init.h>
+ #include <linux/led-class-multicolor.h>
++#include <linux/math.h>
+ #include <linux/module.h>
+ #include <linux/slab.h>
+ #include <linux/uaccess.h>
+@@ -19,9 +20,10 @@ int led_mc_calc_color_components(struct led_classdev_mc *mcled_cdev,
+ 	int i;
+ 
+ 	for (i = 0; i < mcled_cdev->num_colors; i++)
+-		mcled_cdev->subled_info[i].brightness = brightness *
+-					mcled_cdev->subled_info[i].intensity /
+-					led_cdev->max_brightness;
++		mcled_cdev->subled_info[i].brightness =
++			DIV_ROUND_CLOSEST(brightness *
++					  mcled_cdev->subled_info[i].intensity,
++					  led_cdev->max_brightness);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/leds/led-core.c b/drivers/leds/led-core.c
+index b9b1295833c90..04f9ea675f2ce 100644
+--- a/drivers/leds/led-core.c
++++ b/drivers/leds/led-core.c
+@@ -474,15 +474,15 @@ int led_compose_name(struct device *dev, struct led_init_data *init_data,
+ 	struct fwnode_handle *fwnode = init_data->fwnode;
+ 	const char *devicename = init_data->devicename;
+ 
+-	/* We want to label LEDs that can produce full range of colors
+-	 * as RGB, not multicolor */
+-	BUG_ON(props.color == LED_COLOR_ID_MULTI);
+-
+ 	if (!led_classdev_name)
+ 		return -EINVAL;
+ 
+ 	led_parse_fwnode_props(dev, fwnode, &props);
+ 
++	/* We want to label LEDs that can produce full range of colors
++	 * as RGB, not multicolor */
++	BUG_ON(props.color == LED_COLOR_ID_MULTI);
++
+ 	if (props.label) {
+ 		/*
+ 		 * If init_data.devicename is NULL, then it indicates that
+diff --git a/drivers/leds/leds-aw200xx.c b/drivers/leds/leds-aw200xx.c
+index 96979b8e09b7d..7b996bc01c469 100644
+--- a/drivers/leds/leds-aw200xx.c
++++ b/drivers/leds/leds-aw200xx.c
+@@ -368,7 +368,7 @@ static int aw200xx_probe_fw(struct device *dev, struct aw200xx *chip)
+ 
+ 	if (!chip->display_rows ||
+ 	    chip->display_rows > chip->cdef->display_size_rows_max) {
+-		return dev_err_probe(dev, ret,
++		return dev_err_probe(dev, -EINVAL,
+ 				     "Invalid leds display size %u\n",
+ 				     chip->display_rows);
+ 	}
+diff --git a/drivers/leds/leds-pwm.c b/drivers/leds/leds-pwm.c
+index 29194cc382afb..87c199242f3c8 100644
+--- a/drivers/leds/leds-pwm.c
++++ b/drivers/leds/leds-pwm.c
+@@ -146,7 +146,7 @@ static int led_pwm_create_fwnode(struct device *dev, struct led_pwm_priv *priv)
+ 			led.name = to_of_node(fwnode)->name;
+ 
+ 		if (!led.name) {
+-			ret = EINVAL;
++			ret = -EINVAL;
+ 			goto err_child_out;
+ 		}
+ 
+diff --git a/drivers/leds/simple/Kconfig b/drivers/leds/simple/Kconfig
+index 44fa0f93cb3b3..02443e745ff3b 100644
+--- a/drivers/leds/simple/Kconfig
++++ b/drivers/leds/simple/Kconfig
+@@ -1,6 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0-only
+ config LEDS_SIEMENS_SIMATIC_IPC
+ 	tristate "LED driver for Siemens Simatic IPCs"
++	depends on LEDS_CLASS
+ 	depends on SIEMENS_SIMATIC_IPC
+ 	help
+ 	  This option enables support for the LEDs of several Industrial PCs
+diff --git a/drivers/leds/trigger/ledtrig-tty.c b/drivers/leds/trigger/ledtrig-tty.c
+index f62db7e520b52..8ae0d2d284aff 100644
+--- a/drivers/leds/trigger/ledtrig-tty.c
++++ b/drivers/leds/trigger/ledtrig-tty.c
+@@ -7,6 +7,8 @@
+ #include <linux/tty.h>
+ #include <uapi/linux/serial.h>
+ 
++#define LEDTRIG_TTY_INTERVAL	50
++
+ struct ledtrig_tty_data {
+ 	struct led_classdev *led_cdev;
+ 	struct delayed_work dwork;
+@@ -122,17 +124,19 @@ static void ledtrig_tty_work(struct work_struct *work)
+ 
+ 	if (icount.rx != trigger_data->rx ||
+ 	    icount.tx != trigger_data->tx) {
+-		led_set_brightness_sync(trigger_data->led_cdev, LED_ON);
++		unsigned long interval = LEDTRIG_TTY_INTERVAL;
++
++		led_blink_set_oneshot(trigger_data->led_cdev, &interval,
++				      &interval, 0);
+ 
+ 		trigger_data->rx = icount.rx;
+ 		trigger_data->tx = icount.tx;
+-	} else {
+-		led_set_brightness_sync(trigger_data->led_cdev, LED_OFF);
+ 	}
+ 
+ out:
+ 	mutex_unlock(&trigger_data->mutex);
+-	schedule_delayed_work(&trigger_data->dwork, msecs_to_jiffies(100));
++	schedule_delayed_work(&trigger_data->dwork,
++			      msecs_to_jiffies(LEDTRIG_TTY_INTERVAL * 2));
+ }
+ 
+ static struct attribute *ledtrig_tty_attrs[] = {
+diff --git a/drivers/md/md-bitmap.c b/drivers/md/md-bitmap.c
+index 1ff712889a3b3..a08bf6b9accb4 100644
+--- a/drivers/md/md-bitmap.c
++++ b/drivers/md/md-bitmap.c
+@@ -2542,6 +2542,10 @@ backlog_store(struct mddev *mddev, const char *buf, size_t len)
+ 	if (backlog > COUNTER_MAX)
+ 		return -EINVAL;
+ 
++	rv = mddev_lock(mddev);
++	if (rv)
++		return rv;
++
+ 	/*
+ 	 * Without write mostly device, it doesn't make sense to set
+ 	 * backlog for max_write_behind.
+@@ -2555,6 +2559,7 @@ backlog_store(struct mddev *mddev, const char *buf, size_t len)
+ 	if (!has_write_mostly) {
+ 		pr_warn_ratelimited("%s: can't set backlog, no write mostly device available\n",
+ 				    mdname(mddev));
++		mddev_unlock(mddev);
+ 		return -EINVAL;
+ 	}
+ 
+@@ -2565,13 +2570,13 @@ backlog_store(struct mddev *mddev, const char *buf, size_t len)
+ 			mddev_destroy_serial_pool(mddev, NULL, false);
+ 	} else if (backlog && !mddev->serial_info_pool) {
+ 		/* serial_info_pool is needed since backlog is not zero */
+-		struct md_rdev *rdev;
+-
+ 		rdev_for_each(rdev, mddev)
+ 			mddev_create_serial_pool(mddev, rdev, false);
+ 	}
+ 	if (old_mwb != backlog)
+ 		md_bitmap_update_sb(mddev->bitmap);
++
++	mddev_unlock(mddev);
+ 	return len;
+ }
+ 
+diff --git a/drivers/md/md.c b/drivers/md/md.c
+index 78be7811a89f5..2a4a3d3039fae 100644
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -465,11 +465,13 @@ EXPORT_SYMBOL_GPL(mddev_suspend);
+ 
+ void mddev_resume(struct mddev *mddev)
+ {
+-	/* entred the memalloc scope from mddev_suspend() */
+-	memalloc_noio_restore(mddev->noio_flag);
+ 	lockdep_assert_held(&mddev->reconfig_mutex);
+ 	if (--mddev->suspended)
+ 		return;
++
++	/* entred the memalloc scope from mddev_suspend() */
++	memalloc_noio_restore(mddev->noio_flag);
++
+ 	percpu_ref_resurrect(&mddev->active_io);
+ 	wake_up(&mddev->sb_wait);
+ 	mddev->pers->quiesce(mddev, 0);
+diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
+index d1ac73fcd8529..7c6a0b4437d8f 100644
+--- a/drivers/md/raid0.c
++++ b/drivers/md/raid0.c
+@@ -557,54 +557,20 @@ static void raid0_handle_discard(struct mddev *mddev, struct bio *bio)
+ 	bio_endio(bio);
+ }
+ 
+-static bool raid0_make_request(struct mddev *mddev, struct bio *bio)
++static void raid0_map_submit_bio(struct mddev *mddev, struct bio *bio)
+ {
+ 	struct r0conf *conf = mddev->private;
+ 	struct strip_zone *zone;
+ 	struct md_rdev *tmp_dev;
+-	sector_t bio_sector;
+-	sector_t sector;
+-	sector_t orig_sector;
+-	unsigned chunk_sects;
+-	unsigned sectors;
+-
+-	if (unlikely(bio->bi_opf & REQ_PREFLUSH)
+-	    && md_flush_request(mddev, bio))
+-		return true;
++	sector_t bio_sector = bio->bi_iter.bi_sector;
++	sector_t sector = bio_sector;
+ 
+-	if (unlikely((bio_op(bio) == REQ_OP_DISCARD))) {
+-		raid0_handle_discard(mddev, bio);
+-		return true;
+-	}
++	md_account_bio(mddev, &bio);
+ 
+-	bio_sector = bio->bi_iter.bi_sector;
+-	sector = bio_sector;
+-	chunk_sects = mddev->chunk_sectors;
+-
+-	sectors = chunk_sects -
+-		(likely(is_power_of_2(chunk_sects))
+-		 ? (sector & (chunk_sects-1))
+-		 : sector_div(sector, chunk_sects));
+-
+-	/* Restore due to sector_div */
+-	sector = bio_sector;
+-
+-	if (sectors < bio_sectors(bio)) {
+-		struct bio *split = bio_split(bio, sectors, GFP_NOIO,
+-					      &mddev->bio_set);
+-		bio_chain(split, bio);
+-		submit_bio_noacct(bio);
+-		bio = split;
+-	}
+-
+-	if (bio->bi_pool != &mddev->bio_set)
+-		md_account_bio(mddev, &bio);
+-
+-	orig_sector = sector;
+ 	zone = find_zone(mddev->private, &sector);
+ 	switch (conf->layout) {
+ 	case RAID0_ORIG_LAYOUT:
+-		tmp_dev = map_sector(mddev, zone, orig_sector, &sector);
++		tmp_dev = map_sector(mddev, zone, bio_sector, &sector);
+ 		break;
+ 	case RAID0_ALT_MULTIZONE_LAYOUT:
+ 		tmp_dev = map_sector(mddev, zone, sector, &sector);
+@@ -612,13 +578,13 @@ static bool raid0_make_request(struct mddev *mddev, struct bio *bio)
+ 	default:
+ 		WARN(1, "md/raid0:%s: Invalid layout\n", mdname(mddev));
+ 		bio_io_error(bio);
+-		return true;
++		return;
+ 	}
+ 
+ 	if (unlikely(is_rdev_broken(tmp_dev))) {
+ 		bio_io_error(bio);
+ 		md_error(mddev, tmp_dev);
+-		return true;
++		return;
+ 	}
+ 
+ 	bio_set_dev(bio, tmp_dev->bdev);
+@@ -630,6 +596,40 @@ static bool raid0_make_request(struct mddev *mddev, struct bio *bio)
+ 				      bio_sector);
+ 	mddev_check_write_zeroes(mddev, bio);
+ 	submit_bio_noacct(bio);
++}
++
++static bool raid0_make_request(struct mddev *mddev, struct bio *bio)
++{
++	sector_t sector;
++	unsigned chunk_sects;
++	unsigned sectors;
++
++	if (unlikely(bio->bi_opf & REQ_PREFLUSH)
++	    && md_flush_request(mddev, bio))
++		return true;
++
++	if (unlikely((bio_op(bio) == REQ_OP_DISCARD))) {
++		raid0_handle_discard(mddev, bio);
++		return true;
++	}
++
++	sector = bio->bi_iter.bi_sector;
++	chunk_sects = mddev->chunk_sectors;
++
++	sectors = chunk_sects -
++		(likely(is_power_of_2(chunk_sects))
++		 ? (sector & (chunk_sects-1))
++		 : sector_div(sector, chunk_sects));
++
++	if (sectors < bio_sectors(bio)) {
++		struct bio *split = bio_split(bio, sectors, GFP_NOIO,
++					      &mddev->bio_set);
++		bio_chain(split, bio);
++		raid0_map_submit_bio(mddev, bio);
++		bio = split;
++	}
++
++	raid0_map_submit_bio(mddev, bio);
+ 	return true;
+ }
+ 
+diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
+index 5051149e27bbe..0578bcda7c6b7 100644
+--- a/drivers/md/raid10.c
++++ b/drivers/md/raid10.c
+@@ -1322,6 +1322,25 @@ static void raid10_write_one_disk(struct mddev *mddev, struct r10bio *r10_bio,
+ 	}
+ }
+ 
++static struct md_rdev *dereference_rdev_and_rrdev(struct raid10_info *mirror,
++						  struct md_rdev **prrdev)
++{
++	struct md_rdev *rdev, *rrdev;
++
++	rrdev = rcu_dereference(mirror->replacement);
++	/*
++	 * Read replacement first to prevent reading both rdev and
++	 * replacement as NULL during replacement replace rdev.
++	 */
++	smp_mb();
++	rdev = rcu_dereference(mirror->rdev);
++	if (rdev == rrdev)
++		rrdev = NULL;
++
++	*prrdev = rrdev;
++	return rdev;
++}
++
+ static void wait_blocked_dev(struct mddev *mddev, struct r10bio *r10_bio)
+ {
+ 	int i;
+@@ -1332,11 +1351,9 @@ retry_wait:
+ 	blocked_rdev = NULL;
+ 	rcu_read_lock();
+ 	for (i = 0; i < conf->copies; i++) {
+-		struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
+-		struct md_rdev *rrdev = rcu_dereference(
+-			conf->mirrors[i].replacement);
+-		if (rdev == rrdev)
+-			rrdev = NULL;
++		struct md_rdev *rdev, *rrdev;
++
++		rdev = dereference_rdev_and_rrdev(&conf->mirrors[i], &rrdev);
+ 		if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) {
+ 			atomic_inc(&rdev->nr_pending);
+ 			blocked_rdev = rdev;
+@@ -1465,15 +1482,7 @@ static void raid10_write_request(struct mddev *mddev, struct bio *bio,
+ 		int d = r10_bio->devs[i].devnum;
+ 		struct md_rdev *rdev, *rrdev;
+ 
+-		rrdev = rcu_dereference(conf->mirrors[d].replacement);
+-		/*
+-		 * Read replacement first to prevent reading both rdev and
+-		 * replacement as NULL during replacement replace rdev.
+-		 */
+-		smp_mb();
+-		rdev = rcu_dereference(conf->mirrors[d].rdev);
+-		if (rdev == rrdev)
+-			rrdev = NULL;
++		rdev = dereference_rdev_and_rrdev(&conf->mirrors[d], &rrdev);
+ 		if (rdev && (test_bit(Faulty, &rdev->flags)))
+ 			rdev = NULL;
+ 		if (rrdev && (test_bit(Faulty, &rrdev->flags)))
+@@ -1780,10 +1789,9 @@ retry_discard:
+ 	 */
+ 	rcu_read_lock();
+ 	for (disk = 0; disk < geo->raid_disks; disk++) {
+-		struct md_rdev *rdev = rcu_dereference(conf->mirrors[disk].rdev);
+-		struct md_rdev *rrdev = rcu_dereference(
+-			conf->mirrors[disk].replacement);
++		struct md_rdev *rdev, *rrdev;
+ 
++		rdev = dereference_rdev_and_rrdev(&conf->mirrors[disk], &rrdev);
+ 		r10_bio->devs[disk].bio = NULL;
+ 		r10_bio->devs[disk].repl_bio = NULL;
+ 
+diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c
+index 47ba7d9e81e18..8b3fc484fd758 100644
+--- a/drivers/md/raid5-cache.c
++++ b/drivers/md/raid5-cache.c
+@@ -1260,14 +1260,13 @@ static void r5l_log_flush_endio(struct bio *bio)
+ 
+ 	if (bio->bi_status)
+ 		md_error(log->rdev->mddev, log->rdev);
++	bio_uninit(bio);
+ 
+ 	spin_lock_irqsave(&log->io_list_lock, flags);
+ 	list_for_each_entry(io, &log->flushing_ios, log_sibling)
+ 		r5l_io_run_stripes(io);
+ 	list_splice_tail_init(&log->flushing_ios, &log->finished_ios);
+ 	spin_unlock_irqrestore(&log->io_list_lock, flags);
+-
+-	bio_uninit(bio);
+ }
+ 
+ /*
+@@ -3168,12 +3167,15 @@ void r5l_exit_log(struct r5conf *conf)
+ {
+ 	struct r5l_log *log = conf->log;
+ 
+-	/* Ensure disable_writeback_work wakes up and exits */
+-	wake_up(&conf->mddev->sb_wait);
+-	flush_work(&log->disable_writeback_work);
+ 	md_unregister_thread(&log->reclaim_thread);
+ 
++	/*
++	 * 'reconfig_mutex' is held by caller, set 'confg->log' to NULL to
++	 * ensure disable_writeback_work wakes up and exits.
++	 */
+ 	conf->log = NULL;
++	wake_up(&conf->mddev->sb_wait);
++	flush_work(&log->disable_writeback_work);
+ 
+ 	mempool_exit(&log->meta_pool);
+ 	bioset_exit(&log->bs);
+diff --git a/drivers/media/cec/core/cec-adap.c b/drivers/media/cec/core/cec-adap.c
+index 241b1621b197c..09ca83c233299 100644
+--- a/drivers/media/cec/core/cec-adap.c
++++ b/drivers/media/cec/core/cec-adap.c
+@@ -385,8 +385,8 @@ static void cec_data_cancel(struct cec_data *data, u8 tx_status, u8 rx_status)
+ 	cec_queue_msg_monitor(adap, &data->msg, 1);
+ 
+ 	if (!data->blocking && data->msg.sequence)
+-		/* Allow drivers to process the message first */
+-		call_op(adap, received, &data->msg);
++		/* Allow drivers to react to a canceled transmit */
++		call_void_op(adap, adap_nb_transmit_canceled, &data->msg);
+ 
+ 	cec_data_completed(data);
+ }
+@@ -1348,7 +1348,7 @@ static void cec_adap_unconfigure(struct cec_adapter *adap)
+ 	cec_flush(adap);
+ 	wake_up_interruptible(&adap->kthread_waitq);
+ 	cec_post_state_event(adap);
+-	call_void_op(adap, adap_configured, false);
++	call_void_op(adap, adap_unconfigured);
+ }
+ 
+ /*
+@@ -1539,7 +1539,7 @@ configured:
+ 	adap->kthread_config = NULL;
+ 	complete(&adap->config_completion);
+ 	mutex_unlock(&adap->lock);
+-	call_void_op(adap, adap_configured, true);
++	call_void_op(adap, configured);
+ 	return 0;
+ 
+ unconfigure:
+diff --git a/drivers/media/dvb-frontends/ascot2e.c b/drivers/media/dvb-frontends/ascot2e.c
+index 9b00b56230b61..cf8e5f1bd1018 100644
+--- a/drivers/media/dvb-frontends/ascot2e.c
++++ b/drivers/media/dvb-frontends/ascot2e.c
+@@ -533,7 +533,7 @@ struct dvb_frontend *ascot2e_attach(struct dvb_frontend *fe,
+ 		priv->i2c_address, priv->i2c);
+ 	return fe;
+ }
+-EXPORT_SYMBOL(ascot2e_attach);
++EXPORT_SYMBOL_GPL(ascot2e_attach);
+ 
+ MODULE_DESCRIPTION("Sony ASCOT2E terr/cab tuner driver");
+ MODULE_AUTHOR("info@netup.ru");
+diff --git a/drivers/media/dvb-frontends/atbm8830.c b/drivers/media/dvb-frontends/atbm8830.c
+index bdd16b9c58244..778c865085bf9 100644
+--- a/drivers/media/dvb-frontends/atbm8830.c
++++ b/drivers/media/dvb-frontends/atbm8830.c
+@@ -489,7 +489,7 @@ error_out:
+ 	return NULL;
+ 
+ }
+-EXPORT_SYMBOL(atbm8830_attach);
++EXPORT_SYMBOL_GPL(atbm8830_attach);
+ 
+ MODULE_DESCRIPTION("AltoBeam ATBM8830/8831 GB20600 demodulator driver");
+ MODULE_AUTHOR("David T. L. Wong <davidtlwong@gmail.com>");
+diff --git a/drivers/media/dvb-frontends/au8522_dig.c b/drivers/media/dvb-frontends/au8522_dig.c
+index 78cafdf279618..230436bf6cbd9 100644
+--- a/drivers/media/dvb-frontends/au8522_dig.c
++++ b/drivers/media/dvb-frontends/au8522_dig.c
+@@ -879,7 +879,7 @@ error:
+ 	au8522_release_state(state);
+ 	return NULL;
+ }
+-EXPORT_SYMBOL(au8522_attach);
++EXPORT_SYMBOL_GPL(au8522_attach);
+ 
+ static const struct dvb_frontend_ops au8522_ops = {
+ 	.delsys = { SYS_ATSC, SYS_DVBC_ANNEX_B },
+diff --git a/drivers/media/dvb-frontends/bcm3510.c b/drivers/media/dvb-frontends/bcm3510.c
+index 68b92b4419cff..b3f5c49accafd 100644
+--- a/drivers/media/dvb-frontends/bcm3510.c
++++ b/drivers/media/dvb-frontends/bcm3510.c
+@@ -835,7 +835,7 @@ error:
+ 	kfree(state);
+ 	return NULL;
+ }
+-EXPORT_SYMBOL(bcm3510_attach);
++EXPORT_SYMBOL_GPL(bcm3510_attach);
+ 
+ static const struct dvb_frontend_ops bcm3510_ops = {
+ 	.delsys = { SYS_ATSC, SYS_DVBC_ANNEX_B },
+diff --git a/drivers/media/dvb-frontends/cx22700.c b/drivers/media/dvb-frontends/cx22700.c
+index b39ff516271b2..1d04c0a652b26 100644
+--- a/drivers/media/dvb-frontends/cx22700.c
++++ b/drivers/media/dvb-frontends/cx22700.c
+@@ -432,4 +432,4 @@ MODULE_DESCRIPTION("Conexant CX22700 DVB-T Demodulator driver");
+ MODULE_AUTHOR("Holger Waechtler");
+ MODULE_LICENSE("GPL");
+ 
+-EXPORT_SYMBOL(cx22700_attach);
++EXPORT_SYMBOL_GPL(cx22700_attach);
+diff --git a/drivers/media/dvb-frontends/cx22702.c b/drivers/media/dvb-frontends/cx22702.c
+index cc6acbf6393d4..61ad34b7004b5 100644
+--- a/drivers/media/dvb-frontends/cx22702.c
++++ b/drivers/media/dvb-frontends/cx22702.c
+@@ -604,7 +604,7 @@ error:
+ 	kfree(state);
+ 	return NULL;
+ }
+-EXPORT_SYMBOL(cx22702_attach);
++EXPORT_SYMBOL_GPL(cx22702_attach);
+ 
+ static const struct dvb_frontend_ops cx22702_ops = {
+ 	.delsys = { SYS_DVBT },
+diff --git a/drivers/media/dvb-frontends/cx24110.c b/drivers/media/dvb-frontends/cx24110.c
+index 6f99d6a27be2d..9aeea089756fe 100644
+--- a/drivers/media/dvb-frontends/cx24110.c
++++ b/drivers/media/dvb-frontends/cx24110.c
+@@ -653,4 +653,4 @@ MODULE_DESCRIPTION("Conexant CX24110 DVB-S Demodulator driver");
+ MODULE_AUTHOR("Peter Hettkamp");
+ MODULE_LICENSE("GPL");
+ 
+-EXPORT_SYMBOL(cx24110_attach);
++EXPORT_SYMBOL_GPL(cx24110_attach);
+diff --git a/drivers/media/dvb-frontends/cx24113.c b/drivers/media/dvb-frontends/cx24113.c
+index dd55d314bf9af..203cb6b3f941b 100644
+--- a/drivers/media/dvb-frontends/cx24113.c
++++ b/drivers/media/dvb-frontends/cx24113.c
+@@ -590,7 +590,7 @@ error:
+ 
+ 	return NULL;
+ }
+-EXPORT_SYMBOL(cx24113_attach);
++EXPORT_SYMBOL_GPL(cx24113_attach);
+ 
+ module_param(debug, int, 0644);
+ MODULE_PARM_DESC(debug, "Activates frontend debugging (default:0)");
+diff --git a/drivers/media/dvb-frontends/cx24116.c b/drivers/media/dvb-frontends/cx24116.c
+index ea8264ccbb4e8..8b978a9f74a4e 100644
+--- a/drivers/media/dvb-frontends/cx24116.c
++++ b/drivers/media/dvb-frontends/cx24116.c
+@@ -1133,7 +1133,7 @@ struct dvb_frontend *cx24116_attach(const struct cx24116_config *config,
+ 	state->frontend.demodulator_priv = state;
+ 	return &state->frontend;
+ }
+-EXPORT_SYMBOL(cx24116_attach);
++EXPORT_SYMBOL_GPL(cx24116_attach);
+ 
+ /*
+  * Initialise or wake up device
+diff --git a/drivers/media/dvb-frontends/cx24120.c b/drivers/media/dvb-frontends/cx24120.c
+index d8acd582c7111..44515fdbe91d4 100644
+--- a/drivers/media/dvb-frontends/cx24120.c
++++ b/drivers/media/dvb-frontends/cx24120.c
+@@ -305,7 +305,7 @@ error:
+ 	kfree(state);
+ 	return NULL;
+ }
+-EXPORT_SYMBOL(cx24120_attach);
++EXPORT_SYMBOL_GPL(cx24120_attach);
+ 
+ static int cx24120_test_rom(struct cx24120_state *state)
+ {
+@@ -973,7 +973,9 @@ static void cx24120_set_clock_ratios(struct dvb_frontend *fe)
+ 	cmd.arg[8] = (clock_ratios_table[idx].rate >> 8) & 0xff;
+ 	cmd.arg[9] = (clock_ratios_table[idx].rate >> 0) & 0xff;
+ 
+-	cx24120_message_send(state, &cmd);
++	ret = cx24120_message_send(state, &cmd);
++	if (ret != 0)
++		return;
+ 
+ 	/* Calculate ber window rates for stat work */
+ 	cx24120_calculate_ber_window(state, clock_ratios_table[idx].rate);
+diff --git a/drivers/media/dvb-frontends/cx24123.c b/drivers/media/dvb-frontends/cx24123.c
+index 3d84ee17e54c6..539889e638ccc 100644
+--- a/drivers/media/dvb-frontends/cx24123.c
++++ b/drivers/media/dvb-frontends/cx24123.c
+@@ -1096,7 +1096,7 @@ error:
+ 
+ 	return NULL;
+ }
+-EXPORT_SYMBOL(cx24123_attach);
++EXPORT_SYMBOL_GPL(cx24123_attach);
+ 
+ static const struct dvb_frontend_ops cx24123_ops = {
+ 	.delsys = { SYS_DVBS },
+diff --git a/drivers/media/dvb-frontends/cxd2820r_core.c b/drivers/media/dvb-frontends/cxd2820r_core.c
+index d7ee294c68334..7feb08dccfa1c 100644
+--- a/drivers/media/dvb-frontends/cxd2820r_core.c
++++ b/drivers/media/dvb-frontends/cxd2820r_core.c
+@@ -536,7 +536,7 @@ struct dvb_frontend *cxd2820r_attach(const struct cxd2820r_config *config,
+ 
+ 	return pdata.get_dvb_frontend(client);
+ }
+-EXPORT_SYMBOL(cxd2820r_attach);
++EXPORT_SYMBOL_GPL(cxd2820r_attach);
+ 
+ static struct dvb_frontend *cxd2820r_get_dvb_frontend(struct i2c_client *client)
+ {
+diff --git a/drivers/media/dvb-frontends/cxd2841er.c b/drivers/media/dvb-frontends/cxd2841er.c
+index 5431f922f55e4..e9d1eef40c627 100644
+--- a/drivers/media/dvb-frontends/cxd2841er.c
++++ b/drivers/media/dvb-frontends/cxd2841er.c
+@@ -3930,14 +3930,14 @@ struct dvb_frontend *cxd2841er_attach_s(struct cxd2841er_config *cfg,
+ {
+ 	return cxd2841er_attach(cfg, i2c, SYS_DVBS);
+ }
+-EXPORT_SYMBOL(cxd2841er_attach_s);
++EXPORT_SYMBOL_GPL(cxd2841er_attach_s);
+ 
+ struct dvb_frontend *cxd2841er_attach_t_c(struct cxd2841er_config *cfg,
+ 					struct i2c_adapter *i2c)
+ {
+ 	return cxd2841er_attach(cfg, i2c, 0);
+ }
+-EXPORT_SYMBOL(cxd2841er_attach_t_c);
++EXPORT_SYMBOL_GPL(cxd2841er_attach_t_c);
+ 
+ static const struct dvb_frontend_ops cxd2841er_dvbs_s2_ops = {
+ 	.delsys = { SYS_DVBS, SYS_DVBS2 },
+diff --git a/drivers/media/dvb-frontends/cxd2880/cxd2880_top.c b/drivers/media/dvb-frontends/cxd2880/cxd2880_top.c
+index d5b1b3788e392..09d31c368741d 100644
+--- a/drivers/media/dvb-frontends/cxd2880/cxd2880_top.c
++++ b/drivers/media/dvb-frontends/cxd2880/cxd2880_top.c
+@@ -1950,7 +1950,7 @@ struct dvb_frontend *cxd2880_attach(struct dvb_frontend *fe,
+ 
+ 	return fe;
+ }
+-EXPORT_SYMBOL(cxd2880_attach);
++EXPORT_SYMBOL_GPL(cxd2880_attach);
+ 
+ MODULE_DESCRIPTION("Sony CXD2880 DVB-T2/T tuner + demod driver");
+ MODULE_AUTHOR("Sony Semiconductor Solutions Corporation");
+diff --git a/drivers/media/dvb-frontends/dib0070.c b/drivers/media/dvb-frontends/dib0070.c
+index cafb41dba861c..9a8e7cdd2a247 100644
+--- a/drivers/media/dvb-frontends/dib0070.c
++++ b/drivers/media/dvb-frontends/dib0070.c
+@@ -762,7 +762,7 @@ free_mem:
+ 	fe->tuner_priv = NULL;
+ 	return NULL;
+ }
+-EXPORT_SYMBOL(dib0070_attach);
++EXPORT_SYMBOL_GPL(dib0070_attach);
+ 
+ MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@posteo.de>");
+ MODULE_DESCRIPTION("Driver for the DiBcom 0070 base-band RF Tuner");
+diff --git a/drivers/media/dvb-frontends/dib0090.c b/drivers/media/dvb-frontends/dib0090.c
+index 903da33642dff..c958bcff026ec 100644
+--- a/drivers/media/dvb-frontends/dib0090.c
++++ b/drivers/media/dvb-frontends/dib0090.c
+@@ -2634,7 +2634,7 @@ struct dvb_frontend *dib0090_register(struct dvb_frontend *fe, struct i2c_adapte
+ 	return NULL;
+ }
+ 
+-EXPORT_SYMBOL(dib0090_register);
++EXPORT_SYMBOL_GPL(dib0090_register);
+ 
+ struct dvb_frontend *dib0090_fw_register(struct dvb_frontend *fe, struct i2c_adapter *i2c, const struct dib0090_config *config)
+ {
+@@ -2660,7 +2660,7 @@ free_mem:
+ 	fe->tuner_priv = NULL;
+ 	return NULL;
+ }
+-EXPORT_SYMBOL(dib0090_fw_register);
++EXPORT_SYMBOL_GPL(dib0090_fw_register);
+ 
+ MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@posteo.de>");
+ MODULE_AUTHOR("Olivier Grenie <olivier.grenie@parrot.com>");
+diff --git a/drivers/media/dvb-frontends/dib3000mb.c b/drivers/media/dvb-frontends/dib3000mb.c
+index a6c2fc4586eb3..c598b2a633256 100644
+--- a/drivers/media/dvb-frontends/dib3000mb.c
++++ b/drivers/media/dvb-frontends/dib3000mb.c
+@@ -815,4 +815,4 @@ MODULE_AUTHOR(DRIVER_AUTHOR);
+ MODULE_DESCRIPTION(DRIVER_DESC);
+ MODULE_LICENSE("GPL");
+ 
+-EXPORT_SYMBOL(dib3000mb_attach);
++EXPORT_SYMBOL_GPL(dib3000mb_attach);
+diff --git a/drivers/media/dvb-frontends/dib3000mc.c b/drivers/media/dvb-frontends/dib3000mc.c
+index 2e11a246aae0d..c2fca8289abae 100644
+--- a/drivers/media/dvb-frontends/dib3000mc.c
++++ b/drivers/media/dvb-frontends/dib3000mc.c
+@@ -935,7 +935,7 @@ error:
+ 	kfree(st);
+ 	return NULL;
+ }
+-EXPORT_SYMBOL(dib3000mc_attach);
++EXPORT_SYMBOL_GPL(dib3000mc_attach);
+ 
+ static const struct dvb_frontend_ops dib3000mc_ops = {
+ 	.delsys = { SYS_DVBT },
+diff --git a/drivers/media/dvb-frontends/dib7000m.c b/drivers/media/dvb-frontends/dib7000m.c
+index 97ce97789c9e3..fdb22f32e3a11 100644
+--- a/drivers/media/dvb-frontends/dib7000m.c
++++ b/drivers/media/dvb-frontends/dib7000m.c
+@@ -1434,7 +1434,7 @@ error:
+ 	kfree(st);
+ 	return NULL;
+ }
+-EXPORT_SYMBOL(dib7000m_attach);
++EXPORT_SYMBOL_GPL(dib7000m_attach);
+ 
+ static const struct dvb_frontend_ops dib7000m_ops = {
+ 	.delsys = { SYS_DVBT },
+diff --git a/drivers/media/dvb-frontends/dib7000p.c b/drivers/media/dvb-frontends/dib7000p.c
+index a90d2f51868ff..d1e53de5206ae 100644
+--- a/drivers/media/dvb-frontends/dib7000p.c
++++ b/drivers/media/dvb-frontends/dib7000p.c
+@@ -497,7 +497,7 @@ static int dib7000p_update_pll(struct dvb_frontend *fe, struct dibx000_bandwidth
+ 	prediv = reg_1856 & 0x3f;
+ 	loopdiv = (reg_1856 >> 6) & 0x3f;
+ 
+-	if ((bw != NULL) && (bw->pll_prediv != prediv || bw->pll_ratio != loopdiv)) {
++	if (loopdiv && bw && (bw->pll_prediv != prediv || bw->pll_ratio != loopdiv)) {
+ 		dprintk("Updating pll (prediv: old =  %d new = %d ; loopdiv : old = %d new = %d)\n", prediv, bw->pll_prediv, loopdiv, bw->pll_ratio);
+ 		reg_1856 &= 0xf000;
+ 		reg_1857 = dib7000p_read_word(state, 1857);
+@@ -2822,7 +2822,7 @@ void *dib7000p_attach(struct dib7000p_ops *ops)
+ 
+ 	return ops;
+ }
+-EXPORT_SYMBOL(dib7000p_attach);
++EXPORT_SYMBOL_GPL(dib7000p_attach);
+ 
+ static const struct dvb_frontend_ops dib7000p_ops = {
+ 	.delsys = { SYS_DVBT },
+diff --git a/drivers/media/dvb-frontends/dib8000.c b/drivers/media/dvb-frontends/dib8000.c
+index fe19d127abb3f..301d8eca7a6f9 100644
+--- a/drivers/media/dvb-frontends/dib8000.c
++++ b/drivers/media/dvb-frontends/dib8000.c
+@@ -4527,7 +4527,7 @@ void *dib8000_attach(struct dib8000_ops *ops)
+ 
+ 	return ops;
+ }
+-EXPORT_SYMBOL(dib8000_attach);
++EXPORT_SYMBOL_GPL(dib8000_attach);
+ 
+ MODULE_AUTHOR("Olivier Grenie <Olivier.Grenie@parrot.com, Patrick Boettcher <patrick.boettcher@posteo.de>");
+ MODULE_DESCRIPTION("Driver for the DiBcom 8000 ISDB-T demodulator");
+diff --git a/drivers/media/dvb-frontends/dib9000.c b/drivers/media/dvb-frontends/dib9000.c
+index 914ca820c174b..6f81890b31eeb 100644
+--- a/drivers/media/dvb-frontends/dib9000.c
++++ b/drivers/media/dvb-frontends/dib9000.c
+@@ -2546,7 +2546,7 @@ error:
+ 	kfree(st);
+ 	return NULL;
+ }
+-EXPORT_SYMBOL(dib9000_attach);
++EXPORT_SYMBOL_GPL(dib9000_attach);
+ 
+ static const struct dvb_frontend_ops dib9000_ops = {
+ 	.delsys = { SYS_DVBT },
+diff --git a/drivers/media/dvb-frontends/drx39xyj/drxj.c b/drivers/media/dvb-frontends/drx39xyj/drxj.c
+index 68f4e8b5a0abb..a738573c8cd7a 100644
+--- a/drivers/media/dvb-frontends/drx39xyj/drxj.c
++++ b/drivers/media/dvb-frontends/drx39xyj/drxj.c
+@@ -12372,7 +12372,7 @@ error:
+ 
+ 	return NULL;
+ }
+-EXPORT_SYMBOL(drx39xxj_attach);
++EXPORT_SYMBOL_GPL(drx39xxj_attach);
+ 
+ static const struct dvb_frontend_ops drx39xxj_ops = {
+ 	.delsys = { SYS_ATSC, SYS_DVBC_ANNEX_B },
+diff --git a/drivers/media/dvb-frontends/drxd_hard.c b/drivers/media/dvb-frontends/drxd_hard.c
+index 9860cae65f1cf..6a531937f4bbb 100644
+--- a/drivers/media/dvb-frontends/drxd_hard.c
++++ b/drivers/media/dvb-frontends/drxd_hard.c
+@@ -2939,7 +2939,7 @@ error:
+ 	kfree(state);
+ 	return NULL;
+ }
+-EXPORT_SYMBOL(drxd_attach);
++EXPORT_SYMBOL_GPL(drxd_attach);
+ 
+ MODULE_DESCRIPTION("DRXD driver");
+ MODULE_AUTHOR("Micronas");
+diff --git a/drivers/media/dvb-frontends/drxk_hard.c b/drivers/media/dvb-frontends/drxk_hard.c
+index 3301ef75d4417..1acdd204c25ce 100644
+--- a/drivers/media/dvb-frontends/drxk_hard.c
++++ b/drivers/media/dvb-frontends/drxk_hard.c
+@@ -6833,7 +6833,7 @@ error:
+ 	kfree(state);
+ 	return NULL;
+ }
+-EXPORT_SYMBOL(drxk_attach);
++EXPORT_SYMBOL_GPL(drxk_attach);
+ 
+ MODULE_DESCRIPTION("DRX-K driver");
+ MODULE_AUTHOR("Ralph Metzler");
+diff --git a/drivers/media/dvb-frontends/ds3000.c b/drivers/media/dvb-frontends/ds3000.c
+index 20fcf31af1658..515aa7c7baf2a 100644
+--- a/drivers/media/dvb-frontends/ds3000.c
++++ b/drivers/media/dvb-frontends/ds3000.c
+@@ -859,7 +859,7 @@ struct dvb_frontend *ds3000_attach(const struct ds3000_config *config,
+ 	ds3000_set_voltage(&state->frontend, SEC_VOLTAGE_OFF);
+ 	return &state->frontend;
+ }
+-EXPORT_SYMBOL(ds3000_attach);
++EXPORT_SYMBOL_GPL(ds3000_attach);
+ 
+ static int ds3000_set_carrier_offset(struct dvb_frontend *fe,
+ 					s32 carrier_offset_khz)
+diff --git a/drivers/media/dvb-frontends/dvb-pll.c b/drivers/media/dvb-frontends/dvb-pll.c
+index 90cb41eacf98c..ef697ab6bc2e5 100644
+--- a/drivers/media/dvb-frontends/dvb-pll.c
++++ b/drivers/media/dvb-frontends/dvb-pll.c
+@@ -866,7 +866,7 @@ out:
+ 
+ 	return NULL;
+ }
+-EXPORT_SYMBOL(dvb_pll_attach);
++EXPORT_SYMBOL_GPL(dvb_pll_attach);
+ 
+ 
+ static int
+diff --git a/drivers/media/dvb-frontends/ec100.c b/drivers/media/dvb-frontends/ec100.c
+index 03bd80666cf83..2ad0a3c2f7567 100644
+--- a/drivers/media/dvb-frontends/ec100.c
++++ b/drivers/media/dvb-frontends/ec100.c
+@@ -299,7 +299,7 @@ error:
+ 	kfree(state);
+ 	return NULL;
+ }
+-EXPORT_SYMBOL(ec100_attach);
++EXPORT_SYMBOL_GPL(ec100_attach);
+ 
+ static const struct dvb_frontend_ops ec100_ops = {
+ 	.delsys = { SYS_DVBT },
+diff --git a/drivers/media/dvb-frontends/helene.c b/drivers/media/dvb-frontends/helene.c
+index 68c1a3e0e2ba5..f127adee3ebb7 100644
+--- a/drivers/media/dvb-frontends/helene.c
++++ b/drivers/media/dvb-frontends/helene.c
+@@ -1025,7 +1025,7 @@ struct dvb_frontend *helene_attach_s(struct dvb_frontend *fe,
+ 			priv->i2c_address, priv->i2c);
+ 	return fe;
+ }
+-EXPORT_SYMBOL(helene_attach_s);
++EXPORT_SYMBOL_GPL(helene_attach_s);
+ 
+ struct dvb_frontend *helene_attach(struct dvb_frontend *fe,
+ 		const struct helene_config *config,
+@@ -1061,7 +1061,7 @@ struct dvb_frontend *helene_attach(struct dvb_frontend *fe,
+ 			priv->i2c_address, priv->i2c);
+ 	return fe;
+ }
+-EXPORT_SYMBOL(helene_attach);
++EXPORT_SYMBOL_GPL(helene_attach);
+ 
+ static int helene_probe(struct i2c_client *client)
+ {
+diff --git a/drivers/media/dvb-frontends/horus3a.c b/drivers/media/dvb-frontends/horus3a.c
+index 24bf5cbcc1846..0330b78a5b3f2 100644
+--- a/drivers/media/dvb-frontends/horus3a.c
++++ b/drivers/media/dvb-frontends/horus3a.c
+@@ -395,7 +395,7 @@ struct dvb_frontend *horus3a_attach(struct dvb_frontend *fe,
+ 		priv->i2c_address, priv->i2c);
+ 	return fe;
+ }
+-EXPORT_SYMBOL(horus3a_attach);
++EXPORT_SYMBOL_GPL(horus3a_attach);
+ 
+ MODULE_DESCRIPTION("Sony HORUS3A satellite tuner driver");
+ MODULE_AUTHOR("Sergey Kozlov <serjk@netup.ru>");
+diff --git a/drivers/media/dvb-frontends/isl6405.c b/drivers/media/dvb-frontends/isl6405.c
+index 2cd69b4ff82cb..7d28a743f97eb 100644
+--- a/drivers/media/dvb-frontends/isl6405.c
++++ b/drivers/media/dvb-frontends/isl6405.c
+@@ -141,7 +141,7 @@ struct dvb_frontend *isl6405_attach(struct dvb_frontend *fe, struct i2c_adapter
+ 
+ 	return fe;
+ }
+-EXPORT_SYMBOL(isl6405_attach);
++EXPORT_SYMBOL_GPL(isl6405_attach);
+ 
+ MODULE_DESCRIPTION("Driver for lnb supply and control ic isl6405");
+ MODULE_AUTHOR("Hartmut Hackmann & Oliver Endriss");
+diff --git a/drivers/media/dvb-frontends/isl6421.c b/drivers/media/dvb-frontends/isl6421.c
+index 43b0dfc6f453e..2e9f6f12f849e 100644
+--- a/drivers/media/dvb-frontends/isl6421.c
++++ b/drivers/media/dvb-frontends/isl6421.c
+@@ -213,7 +213,7 @@ struct dvb_frontend *isl6421_attach(struct dvb_frontend *fe, struct i2c_adapter
+ 
+ 	return fe;
+ }
+-EXPORT_SYMBOL(isl6421_attach);
++EXPORT_SYMBOL_GPL(isl6421_attach);
+ 
+ MODULE_DESCRIPTION("Driver for lnb supply and control ic isl6421");
+ MODULE_AUTHOR("Andrew de Quincey & Oliver Endriss");
+diff --git a/drivers/media/dvb-frontends/isl6423.c b/drivers/media/dvb-frontends/isl6423.c
+index 8cd1bb88ce6e7..a0d0a38340574 100644
+--- a/drivers/media/dvb-frontends/isl6423.c
++++ b/drivers/media/dvb-frontends/isl6423.c
+@@ -289,7 +289,7 @@ exit:
+ 	fe->sec_priv = NULL;
+ 	return NULL;
+ }
+-EXPORT_SYMBOL(isl6423_attach);
++EXPORT_SYMBOL_GPL(isl6423_attach);
+ 
+ MODULE_DESCRIPTION("ISL6423 SEC");
+ MODULE_AUTHOR("Manu Abraham");
+diff --git a/drivers/media/dvb-frontends/itd1000.c b/drivers/media/dvb-frontends/itd1000.c
+index 1b33478653d16..f8f362f50e78d 100644
+--- a/drivers/media/dvb-frontends/itd1000.c
++++ b/drivers/media/dvb-frontends/itd1000.c
+@@ -389,7 +389,7 @@ struct dvb_frontend *itd1000_attach(struct dvb_frontend *fe, struct i2c_adapter
+ 
+ 	return fe;
+ }
+-EXPORT_SYMBOL(itd1000_attach);
++EXPORT_SYMBOL_GPL(itd1000_attach);
+ 
+ MODULE_AUTHOR("Patrick Boettcher <pb@linuxtv.org>");
+ MODULE_DESCRIPTION("Integrant ITD1000 driver");
+diff --git a/drivers/media/dvb-frontends/ix2505v.c b/drivers/media/dvb-frontends/ix2505v.c
+index 73f27105c139d..3212e333d472b 100644
+--- a/drivers/media/dvb-frontends/ix2505v.c
++++ b/drivers/media/dvb-frontends/ix2505v.c
+@@ -302,7 +302,7 @@ error:
+ 	kfree(state);
+ 	return NULL;
+ }
+-EXPORT_SYMBOL(ix2505v_attach);
++EXPORT_SYMBOL_GPL(ix2505v_attach);
+ 
+ module_param_named(debug, ix2505v_debug, int, 0644);
+ MODULE_PARM_DESC(debug, "Turn on/off frontend debugging (default:off).");
+diff --git a/drivers/media/dvb-frontends/l64781.c b/drivers/media/dvb-frontends/l64781.c
+index c5106a1ea1cd0..fe5af2453d559 100644
+--- a/drivers/media/dvb-frontends/l64781.c
++++ b/drivers/media/dvb-frontends/l64781.c
+@@ -593,4 +593,4 @@ MODULE_DESCRIPTION("LSI L64781 DVB-T Demodulator driver");
+ MODULE_AUTHOR("Holger Waechtler, Marko Kohtala");
+ MODULE_LICENSE("GPL");
+ 
+-EXPORT_SYMBOL(l64781_attach);
++EXPORT_SYMBOL_GPL(l64781_attach);
+diff --git a/drivers/media/dvb-frontends/lg2160.c b/drivers/media/dvb-frontends/lg2160.c
+index f343066c297e2..fe700aa56bff3 100644
+--- a/drivers/media/dvb-frontends/lg2160.c
++++ b/drivers/media/dvb-frontends/lg2160.c
+@@ -1426,7 +1426,7 @@ struct dvb_frontend *lg2160_attach(const struct lg2160_config *config,
+ 
+ 	return &state->frontend;
+ }
+-EXPORT_SYMBOL(lg2160_attach);
++EXPORT_SYMBOL_GPL(lg2160_attach);
+ 
+ MODULE_DESCRIPTION("LG Electronics LG216x ATSC/MH Demodulator Driver");
+ MODULE_AUTHOR("Michael Krufky <mkrufky@linuxtv.org>");
+diff --git a/drivers/media/dvb-frontends/lgdt3305.c b/drivers/media/dvb-frontends/lgdt3305.c
+index 62d7439889196..60a97f1cc74e5 100644
+--- a/drivers/media/dvb-frontends/lgdt3305.c
++++ b/drivers/media/dvb-frontends/lgdt3305.c
+@@ -1148,7 +1148,7 @@ fail:
+ 	kfree(state);
+ 	return NULL;
+ }
+-EXPORT_SYMBOL(lgdt3305_attach);
++EXPORT_SYMBOL_GPL(lgdt3305_attach);
+ 
+ static const struct dvb_frontend_ops lgdt3304_ops = {
+ 	.delsys = { SYS_ATSC, SYS_DVBC_ANNEX_B },
+diff --git a/drivers/media/dvb-frontends/lgdt3306a.c b/drivers/media/dvb-frontends/lgdt3306a.c
+index 70258884126b0..2d7750649850c 100644
+--- a/drivers/media/dvb-frontends/lgdt3306a.c
++++ b/drivers/media/dvb-frontends/lgdt3306a.c
+@@ -1859,7 +1859,7 @@ fail:
+ 	kfree(state);
+ 	return NULL;
+ }
+-EXPORT_SYMBOL(lgdt3306a_attach);
++EXPORT_SYMBOL_GPL(lgdt3306a_attach);
+ 
+ #ifdef DBG_DUMP
+ 
+diff --git a/drivers/media/dvb-frontends/lgdt330x.c b/drivers/media/dvb-frontends/lgdt330x.c
+index 83565209c3b1e..f87937f9a6654 100644
+--- a/drivers/media/dvb-frontends/lgdt330x.c
++++ b/drivers/media/dvb-frontends/lgdt330x.c
+@@ -927,7 +927,7 @@ struct dvb_frontend *lgdt330x_attach(const struct lgdt330x_config *_config,
+ 
+ 	return lgdt330x_get_dvb_frontend(client);
+ }
+-EXPORT_SYMBOL(lgdt330x_attach);
++EXPORT_SYMBOL_GPL(lgdt330x_attach);
+ 
+ static const struct dvb_frontend_ops lgdt3302_ops = {
+ 	.delsys = { SYS_ATSC, SYS_DVBC_ANNEX_B },
+diff --git a/drivers/media/dvb-frontends/lgs8gxx.c b/drivers/media/dvb-frontends/lgs8gxx.c
+index 30014979b985b..ffaf60e16ecd4 100644
+--- a/drivers/media/dvb-frontends/lgs8gxx.c
++++ b/drivers/media/dvb-frontends/lgs8gxx.c
+@@ -1043,7 +1043,7 @@ error_out:
+ 	return NULL;
+ 
+ }
+-EXPORT_SYMBOL(lgs8gxx_attach);
++EXPORT_SYMBOL_GPL(lgs8gxx_attach);
+ 
+ MODULE_DESCRIPTION("Legend Silicon LGS8913/LGS8GXX DMB-TH demodulator driver");
+ MODULE_AUTHOR("David T. L. Wong <davidtlwong@gmail.com>");
+diff --git a/drivers/media/dvb-frontends/lnbh25.c b/drivers/media/dvb-frontends/lnbh25.c
+index 9ffe06cd787dd..41bec050642b5 100644
+--- a/drivers/media/dvb-frontends/lnbh25.c
++++ b/drivers/media/dvb-frontends/lnbh25.c
+@@ -173,7 +173,7 @@ struct dvb_frontend *lnbh25_attach(struct dvb_frontend *fe,
+ 		__func__, priv->i2c_address);
+ 	return fe;
+ }
+-EXPORT_SYMBOL(lnbh25_attach);
++EXPORT_SYMBOL_GPL(lnbh25_attach);
+ 
+ MODULE_DESCRIPTION("ST LNBH25 driver");
+ MODULE_AUTHOR("info@netup.ru");
+diff --git a/drivers/media/dvb-frontends/lnbp21.c b/drivers/media/dvb-frontends/lnbp21.c
+index e564974162d65..32593b1f75a38 100644
+--- a/drivers/media/dvb-frontends/lnbp21.c
++++ b/drivers/media/dvb-frontends/lnbp21.c
+@@ -155,7 +155,7 @@ struct dvb_frontend *lnbh24_attach(struct dvb_frontend *fe,
+ 	return lnbx2x_attach(fe, i2c, override_set, override_clear,
+ 							i2c_addr, LNBH24_TTX);
+ }
+-EXPORT_SYMBOL(lnbh24_attach);
++EXPORT_SYMBOL_GPL(lnbh24_attach);
+ 
+ struct dvb_frontend *lnbp21_attach(struct dvb_frontend *fe,
+ 				struct i2c_adapter *i2c, u8 override_set,
+@@ -164,7 +164,7 @@ struct dvb_frontend *lnbp21_attach(struct dvb_frontend *fe,
+ 	return lnbx2x_attach(fe, i2c, override_set, override_clear,
+ 							0x08, LNBP21_ISEL);
+ }
+-EXPORT_SYMBOL(lnbp21_attach);
++EXPORT_SYMBOL_GPL(lnbp21_attach);
+ 
+ MODULE_DESCRIPTION("Driver for lnb supply and control ic lnbp21, lnbh24");
+ MODULE_AUTHOR("Oliver Endriss, Igor M. Liplianin");
+diff --git a/drivers/media/dvb-frontends/lnbp22.c b/drivers/media/dvb-frontends/lnbp22.c
+index b8c7145d4cefe..cb4ea5d3fad4a 100644
+--- a/drivers/media/dvb-frontends/lnbp22.c
++++ b/drivers/media/dvb-frontends/lnbp22.c
+@@ -125,7 +125,7 @@ struct dvb_frontend *lnbp22_attach(struct dvb_frontend *fe,
+ 
+ 	return fe;
+ }
+-EXPORT_SYMBOL(lnbp22_attach);
++EXPORT_SYMBOL_GPL(lnbp22_attach);
+ 
+ MODULE_DESCRIPTION("Driver for lnb supply and control ic lnbp22");
+ MODULE_AUTHOR("Dominik Kuhlen");
+diff --git a/drivers/media/dvb-frontends/m88ds3103.c b/drivers/media/dvb-frontends/m88ds3103.c
+index cf49ac56a37ed..cf037b61b226b 100644
+--- a/drivers/media/dvb-frontends/m88ds3103.c
++++ b/drivers/media/dvb-frontends/m88ds3103.c
+@@ -1695,7 +1695,7 @@ struct dvb_frontend *m88ds3103_attach(const struct m88ds3103_config *cfg,
+ 	*tuner_i2c_adapter = pdata.get_i2c_adapter(client);
+ 	return pdata.get_dvb_frontend(client);
+ }
+-EXPORT_SYMBOL(m88ds3103_attach);
++EXPORT_SYMBOL_GPL(m88ds3103_attach);
+ 
+ static const struct dvb_frontend_ops m88ds3103_ops = {
+ 	.delsys = {SYS_DVBS, SYS_DVBS2},
+diff --git a/drivers/media/dvb-frontends/m88rs2000.c b/drivers/media/dvb-frontends/m88rs2000.c
+index b294ba87e934f..2aa98203cd659 100644
+--- a/drivers/media/dvb-frontends/m88rs2000.c
++++ b/drivers/media/dvb-frontends/m88rs2000.c
+@@ -808,7 +808,7 @@ error:
+ 
+ 	return NULL;
+ }
+-EXPORT_SYMBOL(m88rs2000_attach);
++EXPORT_SYMBOL_GPL(m88rs2000_attach);
+ 
+ MODULE_DESCRIPTION("M88RS2000 DVB-S Demodulator driver");
+ MODULE_AUTHOR("Malcolm Priestley tvboxspy@gmail.com");
+diff --git a/drivers/media/dvb-frontends/mb86a16.c b/drivers/media/dvb-frontends/mb86a16.c
+index d3e29937cf4cf..460821a986e53 100644
+--- a/drivers/media/dvb-frontends/mb86a16.c
++++ b/drivers/media/dvb-frontends/mb86a16.c
+@@ -1851,6 +1851,6 @@ error:
+ 	kfree(state);
+ 	return NULL;
+ }
+-EXPORT_SYMBOL(mb86a16_attach);
++EXPORT_SYMBOL_GPL(mb86a16_attach);
+ MODULE_LICENSE("GPL");
+ MODULE_AUTHOR("Manu Abraham");
+diff --git a/drivers/media/dvb-frontends/mb86a20s.c b/drivers/media/dvb-frontends/mb86a20s.c
+index 125fed4891ba9..f8e4bbee5bd50 100644
+--- a/drivers/media/dvb-frontends/mb86a20s.c
++++ b/drivers/media/dvb-frontends/mb86a20s.c
+@@ -2078,7 +2078,7 @@ struct dvb_frontend *mb86a20s_attach(const struct mb86a20s_config *config,
+ 	dev_info(&i2c->dev, "Detected a Fujitsu mb86a20s frontend\n");
+ 	return &state->frontend;
+ }
+-EXPORT_SYMBOL(mb86a20s_attach);
++EXPORT_SYMBOL_GPL(mb86a20s_attach);
+ 
+ static const struct dvb_frontend_ops mb86a20s_ops = {
+ 	.delsys = { SYS_ISDBT },
+diff --git a/drivers/media/dvb-frontends/mt312.c b/drivers/media/dvb-frontends/mt312.c
+index d43a67045dbe7..fb867dd8a26be 100644
+--- a/drivers/media/dvb-frontends/mt312.c
++++ b/drivers/media/dvb-frontends/mt312.c
+@@ -827,7 +827,7 @@ error:
+ 	kfree(state);
+ 	return NULL;
+ }
+-EXPORT_SYMBOL(mt312_attach);
++EXPORT_SYMBOL_GPL(mt312_attach);
+ 
+ module_param(debug, int, 0644);
+ MODULE_PARM_DESC(debug, "Turn on/off frontend debugging (default:off).");
+diff --git a/drivers/media/dvb-frontends/mt352.c b/drivers/media/dvb-frontends/mt352.c
+index 399d5c519027e..1b2889f5cf67d 100644
+--- a/drivers/media/dvb-frontends/mt352.c
++++ b/drivers/media/dvb-frontends/mt352.c
+@@ -593,4 +593,4 @@ MODULE_DESCRIPTION("Zarlink MT352 DVB-T Demodulator driver");
+ MODULE_AUTHOR("Holger Waechtler, Daniel Mack, Antonio Mancuso");
+ MODULE_LICENSE("GPL");
+ 
+-EXPORT_SYMBOL(mt352_attach);
++EXPORT_SYMBOL_GPL(mt352_attach);
+diff --git a/drivers/media/dvb-frontends/nxt200x.c b/drivers/media/dvb-frontends/nxt200x.c
+index 200b6dbc75f81..1c549ada6ebf9 100644
+--- a/drivers/media/dvb-frontends/nxt200x.c
++++ b/drivers/media/dvb-frontends/nxt200x.c
+@@ -1216,5 +1216,5 @@ MODULE_DESCRIPTION("NXT200X (ATSC 8VSB & ITU-T J.83 AnnexB 64/256 QAM) Demodulat
+ MODULE_AUTHOR("Kirk Lapray, Michael Krufky, Jean-Francois Thibert, and Taylor Jacob");
+ MODULE_LICENSE("GPL");
+ 
+-EXPORT_SYMBOL(nxt200x_attach);
++EXPORT_SYMBOL_GPL(nxt200x_attach);
+ 
+diff --git a/drivers/media/dvb-frontends/nxt6000.c b/drivers/media/dvb-frontends/nxt6000.c
+index 136918f82dda0..e8d4940370ddf 100644
+--- a/drivers/media/dvb-frontends/nxt6000.c
++++ b/drivers/media/dvb-frontends/nxt6000.c
+@@ -621,4 +621,4 @@ MODULE_DESCRIPTION("NxtWave NXT6000 DVB-T demodulator driver");
+ MODULE_AUTHOR("Florian Schirmer");
+ MODULE_LICENSE("GPL");
+ 
+-EXPORT_SYMBOL(nxt6000_attach);
++EXPORT_SYMBOL_GPL(nxt6000_attach);
+diff --git a/drivers/media/dvb-frontends/or51132.c b/drivers/media/dvb-frontends/or51132.c
+index 24de1b1151583..144a1f25dec0a 100644
+--- a/drivers/media/dvb-frontends/or51132.c
++++ b/drivers/media/dvb-frontends/or51132.c
+@@ -605,4 +605,4 @@ MODULE_AUTHOR("Kirk Lapray");
+ MODULE_AUTHOR("Trent Piepho");
+ MODULE_LICENSE("GPL");
+ 
+-EXPORT_SYMBOL(or51132_attach);
++EXPORT_SYMBOL_GPL(or51132_attach);
+diff --git a/drivers/media/dvb-frontends/or51211.c b/drivers/media/dvb-frontends/or51211.c
+index ddcaea5c9941f..dc60482162c54 100644
+--- a/drivers/media/dvb-frontends/or51211.c
++++ b/drivers/media/dvb-frontends/or51211.c
+@@ -551,5 +551,5 @@ MODULE_DESCRIPTION("Oren OR51211 VSB [pcHDTV HD-2000] Demodulator Driver");
+ MODULE_AUTHOR("Kirk Lapray");
+ MODULE_LICENSE("GPL");
+ 
+-EXPORT_SYMBOL(or51211_attach);
++EXPORT_SYMBOL_GPL(or51211_attach);
+ 
+diff --git a/drivers/media/dvb-frontends/s5h1409.c b/drivers/media/dvb-frontends/s5h1409.c
+index 3089cc174a6f5..28b1dca077ead 100644
+--- a/drivers/media/dvb-frontends/s5h1409.c
++++ b/drivers/media/dvb-frontends/s5h1409.c
+@@ -981,7 +981,7 @@ error:
+ 	kfree(state);
+ 	return NULL;
+ }
+-EXPORT_SYMBOL(s5h1409_attach);
++EXPORT_SYMBOL_GPL(s5h1409_attach);
+ 
+ static const struct dvb_frontend_ops s5h1409_ops = {
+ 	.delsys = { SYS_ATSC, SYS_DVBC_ANNEX_B },
+diff --git a/drivers/media/dvb-frontends/s5h1411.c b/drivers/media/dvb-frontends/s5h1411.c
+index 2563a72e98b70..fc48e659c2d8a 100644
+--- a/drivers/media/dvb-frontends/s5h1411.c
++++ b/drivers/media/dvb-frontends/s5h1411.c
+@@ -900,7 +900,7 @@ error:
+ 	kfree(state);
+ 	return NULL;
+ }
+-EXPORT_SYMBOL(s5h1411_attach);
++EXPORT_SYMBOL_GPL(s5h1411_attach);
+ 
+ static const struct dvb_frontend_ops s5h1411_ops = {
+ 	.delsys = { SYS_ATSC, SYS_DVBC_ANNEX_B },
+diff --git a/drivers/media/dvb-frontends/s5h1420.c b/drivers/media/dvb-frontends/s5h1420.c
+index 6bdec2898bc81..d700de1ea6c24 100644
+--- a/drivers/media/dvb-frontends/s5h1420.c
++++ b/drivers/media/dvb-frontends/s5h1420.c
+@@ -918,7 +918,7 @@ error:
+ 	kfree(state);
+ 	return NULL;
+ }
+-EXPORT_SYMBOL(s5h1420_attach);
++EXPORT_SYMBOL_GPL(s5h1420_attach);
+ 
+ static const struct dvb_frontend_ops s5h1420_ops = {
+ 	.delsys = { SYS_DVBS },
+diff --git a/drivers/media/dvb-frontends/s5h1432.c b/drivers/media/dvb-frontends/s5h1432.c
+index 956e8ee4b388e..ff5d3bdf3bc67 100644
+--- a/drivers/media/dvb-frontends/s5h1432.c
++++ b/drivers/media/dvb-frontends/s5h1432.c
+@@ -355,7 +355,7 @@ struct dvb_frontend *s5h1432_attach(const struct s5h1432_config *config,
+ 
+ 	return &state->frontend;
+ }
+-EXPORT_SYMBOL(s5h1432_attach);
++EXPORT_SYMBOL_GPL(s5h1432_attach);
+ 
+ static const struct dvb_frontend_ops s5h1432_ops = {
+ 	.delsys = { SYS_DVBT },
+diff --git a/drivers/media/dvb-frontends/s921.c b/drivers/media/dvb-frontends/s921.c
+index f118d8e641030..7e461ac159fc1 100644
+--- a/drivers/media/dvb-frontends/s921.c
++++ b/drivers/media/dvb-frontends/s921.c
+@@ -495,7 +495,7 @@ struct dvb_frontend *s921_attach(const struct s921_config *config,
+ 
+ 	return &state->frontend;
+ }
+-EXPORT_SYMBOL(s921_attach);
++EXPORT_SYMBOL_GPL(s921_attach);
+ 
+ static const struct dvb_frontend_ops s921_ops = {
+ 	.delsys = { SYS_ISDBT },
+diff --git a/drivers/media/dvb-frontends/si21xx.c b/drivers/media/dvb-frontends/si21xx.c
+index 2d29d2c4d434c..210ccd356e2bf 100644
+--- a/drivers/media/dvb-frontends/si21xx.c
++++ b/drivers/media/dvb-frontends/si21xx.c
+@@ -937,7 +937,7 @@ error:
+ 	kfree(state);
+ 	return NULL;
+ }
+-EXPORT_SYMBOL(si21xx_attach);
++EXPORT_SYMBOL_GPL(si21xx_attach);
+ 
+ module_param(debug, int, 0644);
+ MODULE_PARM_DESC(debug, "Turn on/off frontend debugging (default:off).");
+diff --git a/drivers/media/dvb-frontends/sp887x.c b/drivers/media/dvb-frontends/sp887x.c
+index 146e7f2dd3c5e..f59c0f96416b5 100644
+--- a/drivers/media/dvb-frontends/sp887x.c
++++ b/drivers/media/dvb-frontends/sp887x.c
+@@ -624,4 +624,4 @@ MODULE_PARM_DESC(debug, "Turn on/off frontend debugging (default:off).");
+ MODULE_DESCRIPTION("Spase sp887x DVB-T demodulator driver");
+ MODULE_LICENSE("GPL");
+ 
+-EXPORT_SYMBOL(sp887x_attach);
++EXPORT_SYMBOL_GPL(sp887x_attach);
+diff --git a/drivers/media/dvb-frontends/stb0899_drv.c b/drivers/media/dvb-frontends/stb0899_drv.c
+index 4ee6c1e1e9f7d..2f4d8fb400cd6 100644
+--- a/drivers/media/dvb-frontends/stb0899_drv.c
++++ b/drivers/media/dvb-frontends/stb0899_drv.c
+@@ -1638,7 +1638,7 @@ error:
+ 	kfree(state);
+ 	return NULL;
+ }
+-EXPORT_SYMBOL(stb0899_attach);
++EXPORT_SYMBOL_GPL(stb0899_attach);
+ MODULE_PARM_DESC(verbose, "Set Verbosity level");
+ MODULE_AUTHOR("Manu Abraham");
+ MODULE_DESCRIPTION("STB0899 Multi-Std frontend");
+diff --git a/drivers/media/dvb-frontends/stb6000.c b/drivers/media/dvb-frontends/stb6000.c
+index 8c9800d577e03..d74e34677b925 100644
+--- a/drivers/media/dvb-frontends/stb6000.c
++++ b/drivers/media/dvb-frontends/stb6000.c
+@@ -232,7 +232,7 @@ struct dvb_frontend *stb6000_attach(struct dvb_frontend *fe, int addr,
+ 
+ 	return fe;
+ }
+-EXPORT_SYMBOL(stb6000_attach);
++EXPORT_SYMBOL_GPL(stb6000_attach);
+ 
+ module_param(debug, int, 0644);
+ MODULE_PARM_DESC(debug, "Turn on/off frontend debugging (default:off).");
+diff --git a/drivers/media/dvb-frontends/stb6100.c b/drivers/media/dvb-frontends/stb6100.c
+index 698866c4f15a7..c5818a15a0d70 100644
+--- a/drivers/media/dvb-frontends/stb6100.c
++++ b/drivers/media/dvb-frontends/stb6100.c
+@@ -557,7 +557,7 @@ static void stb6100_release(struct dvb_frontend *fe)
+ 	kfree(state);
+ }
+ 
+-EXPORT_SYMBOL(stb6100_attach);
++EXPORT_SYMBOL_GPL(stb6100_attach);
+ MODULE_PARM_DESC(verbose, "Set Verbosity level");
+ 
+ MODULE_AUTHOR("Manu Abraham");
+diff --git a/drivers/media/dvb-frontends/stv0288.c b/drivers/media/dvb-frontends/stv0288.c
+index 3ae1f3a2f1420..a5581bd60f9e8 100644
+--- a/drivers/media/dvb-frontends/stv0288.c
++++ b/drivers/media/dvb-frontends/stv0288.c
+@@ -590,7 +590,7 @@ error:
+ 
+ 	return NULL;
+ }
+-EXPORT_SYMBOL(stv0288_attach);
++EXPORT_SYMBOL_GPL(stv0288_attach);
+ 
+ module_param(debug_legacy_dish_switch, int, 0444);
+ MODULE_PARM_DESC(debug_legacy_dish_switch,
+diff --git a/drivers/media/dvb-frontends/stv0297.c b/drivers/media/dvb-frontends/stv0297.c
+index 6d5962d5697ac..9d4dbd99a5a79 100644
+--- a/drivers/media/dvb-frontends/stv0297.c
++++ b/drivers/media/dvb-frontends/stv0297.c
+@@ -710,4 +710,4 @@ MODULE_DESCRIPTION("ST STV0297 DVB-C Demodulator driver");
+ MODULE_AUTHOR("Dennis Noermann and Andrew de Quincey");
+ MODULE_LICENSE("GPL");
+ 
+-EXPORT_SYMBOL(stv0297_attach);
++EXPORT_SYMBOL_GPL(stv0297_attach);
+diff --git a/drivers/media/dvb-frontends/stv0299.c b/drivers/media/dvb-frontends/stv0299.c
+index b5263a0ee5aa5..da7ff2c2e8e55 100644
+--- a/drivers/media/dvb-frontends/stv0299.c
++++ b/drivers/media/dvb-frontends/stv0299.c
+@@ -752,4 +752,4 @@ MODULE_DESCRIPTION("ST STV0299 DVB Demodulator driver");
+ MODULE_AUTHOR("Ralph Metzler, Holger Waechtler, Peter Schildmann, Felix Domke, Andreas Oberritter, Andrew de Quincey, Kenneth Aafly");
+ MODULE_LICENSE("GPL");
+ 
+-EXPORT_SYMBOL(stv0299_attach);
++EXPORT_SYMBOL_GPL(stv0299_attach);
+diff --git a/drivers/media/dvb-frontends/stv0367.c b/drivers/media/dvb-frontends/stv0367.c
+index 95e376f23506f..04556b77c16c9 100644
+--- a/drivers/media/dvb-frontends/stv0367.c
++++ b/drivers/media/dvb-frontends/stv0367.c
+@@ -1750,7 +1750,7 @@ error:
+ 	kfree(state);
+ 	return NULL;
+ }
+-EXPORT_SYMBOL(stv0367ter_attach);
++EXPORT_SYMBOL_GPL(stv0367ter_attach);
+ 
+ static int stv0367cab_gate_ctrl(struct dvb_frontend *fe, int enable)
+ {
+@@ -2919,7 +2919,7 @@ error:
+ 	kfree(state);
+ 	return NULL;
+ }
+-EXPORT_SYMBOL(stv0367cab_attach);
++EXPORT_SYMBOL_GPL(stv0367cab_attach);
+ 
+ /*
+  * Functions for operation on Digital Devices hardware
+@@ -3340,7 +3340,7 @@ error:
+ 	kfree(state);
+ 	return NULL;
+ }
+-EXPORT_SYMBOL(stv0367ddb_attach);
++EXPORT_SYMBOL_GPL(stv0367ddb_attach);
+ 
+ MODULE_PARM_DESC(debug, "Set debug");
+ MODULE_PARM_DESC(i2c_debug, "Set i2c debug");
+diff --git a/drivers/media/dvb-frontends/stv0900_core.c b/drivers/media/dvb-frontends/stv0900_core.c
+index 212312d20ff62..e7b9b9b11d7df 100644
+--- a/drivers/media/dvb-frontends/stv0900_core.c
++++ b/drivers/media/dvb-frontends/stv0900_core.c
+@@ -1957,7 +1957,7 @@ error:
+ 	kfree(state);
+ 	return NULL;
+ }
+-EXPORT_SYMBOL(stv0900_attach);
++EXPORT_SYMBOL_GPL(stv0900_attach);
+ 
+ MODULE_PARM_DESC(debug, "Set debug");
+ 
+diff --git a/drivers/media/dvb-frontends/stv090x.c b/drivers/media/dvb-frontends/stv090x.c
+index a07dc5fdeb3d8..cc45139057ba8 100644
+--- a/drivers/media/dvb-frontends/stv090x.c
++++ b/drivers/media/dvb-frontends/stv090x.c
+@@ -5071,7 +5071,7 @@ error:
+ 	kfree(state);
+ 	return NULL;
+ }
+-EXPORT_SYMBOL(stv090x_attach);
++EXPORT_SYMBOL_GPL(stv090x_attach);
+ 
+ static const struct i2c_device_id stv090x_id_table[] = {
+ 	{"stv090x", 0},
+diff --git a/drivers/media/dvb-frontends/stv6110.c b/drivers/media/dvb-frontends/stv6110.c
+index 963f6a896102a..1cf9c095dbff0 100644
+--- a/drivers/media/dvb-frontends/stv6110.c
++++ b/drivers/media/dvb-frontends/stv6110.c
+@@ -427,7 +427,7 @@ struct dvb_frontend *stv6110_attach(struct dvb_frontend *fe,
+ 
+ 	return fe;
+ }
+-EXPORT_SYMBOL(stv6110_attach);
++EXPORT_SYMBOL_GPL(stv6110_attach);
+ 
+ module_param(debug, int, 0644);
+ MODULE_PARM_DESC(debug, "Turn on/off frontend debugging (default:off).");
+diff --git a/drivers/media/dvb-frontends/stv6110x.c b/drivers/media/dvb-frontends/stv6110x.c
+index 11653f846c123..c678f47d2449c 100644
+--- a/drivers/media/dvb-frontends/stv6110x.c
++++ b/drivers/media/dvb-frontends/stv6110x.c
+@@ -467,7 +467,7 @@ const struct stv6110x_devctl *stv6110x_attach(struct dvb_frontend *fe,
+ 	dev_info(&stv6110x->i2c->dev, "Attaching STV6110x\n");
+ 	return stv6110x->devctl;
+ }
+-EXPORT_SYMBOL(stv6110x_attach);
++EXPORT_SYMBOL_GPL(stv6110x_attach);
+ 
+ static const struct i2c_device_id stv6110x_id_table[] = {
+ 	{"stv6110x", 0},
+diff --git a/drivers/media/dvb-frontends/tda10021.c b/drivers/media/dvb-frontends/tda10021.c
+index faa6e54b33729..462e12ab6bd14 100644
+--- a/drivers/media/dvb-frontends/tda10021.c
++++ b/drivers/media/dvb-frontends/tda10021.c
+@@ -523,4 +523,4 @@ MODULE_DESCRIPTION("Philips TDA10021 DVB-C demodulator driver");
+ MODULE_AUTHOR("Ralph Metzler, Holger Waechtler, Markus Schulz");
+ MODULE_LICENSE("GPL");
+ 
+-EXPORT_SYMBOL(tda10021_attach);
++EXPORT_SYMBOL_GPL(tda10021_attach);
+diff --git a/drivers/media/dvb-frontends/tda10023.c b/drivers/media/dvb-frontends/tda10023.c
+index 8f32edf6b700e..4c2541ecd7433 100644
+--- a/drivers/media/dvb-frontends/tda10023.c
++++ b/drivers/media/dvb-frontends/tda10023.c
+@@ -594,4 +594,4 @@ MODULE_DESCRIPTION("Philips TDA10023 DVB-C demodulator driver");
+ MODULE_AUTHOR("Georg Acher, Hartmut Birr");
+ MODULE_LICENSE("GPL");
+ 
+-EXPORT_SYMBOL(tda10023_attach);
++EXPORT_SYMBOL_GPL(tda10023_attach);
+diff --git a/drivers/media/dvb-frontends/tda10048.c b/drivers/media/dvb-frontends/tda10048.c
+index 0b3f6999515e3..f6d8a64762b99 100644
+--- a/drivers/media/dvb-frontends/tda10048.c
++++ b/drivers/media/dvb-frontends/tda10048.c
+@@ -1138,7 +1138,7 @@ error:
+ 	kfree(state);
+ 	return NULL;
+ }
+-EXPORT_SYMBOL(tda10048_attach);
++EXPORT_SYMBOL_GPL(tda10048_attach);
+ 
+ static const struct dvb_frontend_ops tda10048_ops = {
+ 	.delsys = { SYS_DVBT },
+diff --git a/drivers/media/dvb-frontends/tda1004x.c b/drivers/media/dvb-frontends/tda1004x.c
+index 83a798ca9b002..6f306db6c615f 100644
+--- a/drivers/media/dvb-frontends/tda1004x.c
++++ b/drivers/media/dvb-frontends/tda1004x.c
+@@ -1378,5 +1378,5 @@ MODULE_DESCRIPTION("Philips TDA10045H & TDA10046H DVB-T Demodulator");
+ MODULE_AUTHOR("Andrew de Quincey & Robert Schlabbach");
+ MODULE_LICENSE("GPL");
+ 
+-EXPORT_SYMBOL(tda10045_attach);
+-EXPORT_SYMBOL(tda10046_attach);
++EXPORT_SYMBOL_GPL(tda10045_attach);
++EXPORT_SYMBOL_GPL(tda10046_attach);
+diff --git a/drivers/media/dvb-frontends/tda10086.c b/drivers/media/dvb-frontends/tda10086.c
+index cdcf97664bba8..b449514ae5854 100644
+--- a/drivers/media/dvb-frontends/tda10086.c
++++ b/drivers/media/dvb-frontends/tda10086.c
+@@ -764,4 +764,4 @@ MODULE_DESCRIPTION("Philips TDA10086 DVB-S Demodulator");
+ MODULE_AUTHOR("Andrew de Quincey");
+ MODULE_LICENSE("GPL");
+ 
+-EXPORT_SYMBOL(tda10086_attach);
++EXPORT_SYMBOL_GPL(tda10086_attach);
+diff --git a/drivers/media/dvb-frontends/tda665x.c b/drivers/media/dvb-frontends/tda665x.c
+index 13e8969da7f89..346be5011fb73 100644
+--- a/drivers/media/dvb-frontends/tda665x.c
++++ b/drivers/media/dvb-frontends/tda665x.c
+@@ -227,7 +227,7 @@ struct dvb_frontend *tda665x_attach(struct dvb_frontend *fe,
+ 
+ 	return fe;
+ }
+-EXPORT_SYMBOL(tda665x_attach);
++EXPORT_SYMBOL_GPL(tda665x_attach);
+ 
+ MODULE_DESCRIPTION("TDA665x driver");
+ MODULE_AUTHOR("Manu Abraham");
+diff --git a/drivers/media/dvb-frontends/tda8083.c b/drivers/media/dvb-frontends/tda8083.c
+index e3e1c3db2c856..44f53624557bc 100644
+--- a/drivers/media/dvb-frontends/tda8083.c
++++ b/drivers/media/dvb-frontends/tda8083.c
+@@ -481,4 +481,4 @@ MODULE_DESCRIPTION("Philips TDA8083 DVB-S Demodulator");
+ MODULE_AUTHOR("Ralph Metzler, Holger Waechtler");
+ MODULE_LICENSE("GPL");
+ 
+-EXPORT_SYMBOL(tda8083_attach);
++EXPORT_SYMBOL_GPL(tda8083_attach);
+diff --git a/drivers/media/dvb-frontends/tda8261.c b/drivers/media/dvb-frontends/tda8261.c
+index 0d576d41c67d8..8b06f92745dca 100644
+--- a/drivers/media/dvb-frontends/tda8261.c
++++ b/drivers/media/dvb-frontends/tda8261.c
+@@ -188,7 +188,7 @@ exit:
+ 	return NULL;
+ }
+ 
+-EXPORT_SYMBOL(tda8261_attach);
++EXPORT_SYMBOL_GPL(tda8261_attach);
+ 
+ MODULE_AUTHOR("Manu Abraham");
+ MODULE_DESCRIPTION("TDA8261 8PSK/QPSK Tuner");
+diff --git a/drivers/media/dvb-frontends/tda826x.c b/drivers/media/dvb-frontends/tda826x.c
+index f9703a1dd758c..eafcf5f7da3dc 100644
+--- a/drivers/media/dvb-frontends/tda826x.c
++++ b/drivers/media/dvb-frontends/tda826x.c
+@@ -164,7 +164,7 @@ struct dvb_frontend *tda826x_attach(struct dvb_frontend *fe, int addr, struct i2
+ 
+ 	return fe;
+ }
+-EXPORT_SYMBOL(tda826x_attach);
++EXPORT_SYMBOL_GPL(tda826x_attach);
+ 
+ module_param(debug, int, 0644);
+ MODULE_PARM_DESC(debug, "Turn on/off frontend debugging (default:off).");
+diff --git a/drivers/media/dvb-frontends/ts2020.c b/drivers/media/dvb-frontends/ts2020.c
+index f5b60f8276974..a5ebce57f35e6 100644
+--- a/drivers/media/dvb-frontends/ts2020.c
++++ b/drivers/media/dvb-frontends/ts2020.c
+@@ -525,7 +525,7 @@ struct dvb_frontend *ts2020_attach(struct dvb_frontend *fe,
+ 
+ 	return fe;
+ }
+-EXPORT_SYMBOL(ts2020_attach);
++EXPORT_SYMBOL_GPL(ts2020_attach);
+ 
+ /*
+  * We implement own regmap locking due to legacy DVB attach which uses frontend
+diff --git a/drivers/media/dvb-frontends/tua6100.c b/drivers/media/dvb-frontends/tua6100.c
+index 2483f614d0e7d..41dd9b6d31908 100644
+--- a/drivers/media/dvb-frontends/tua6100.c
++++ b/drivers/media/dvb-frontends/tua6100.c
+@@ -186,7 +186,7 @@ struct dvb_frontend *tua6100_attach(struct dvb_frontend *fe, int addr, struct i2
+ 	fe->tuner_priv = priv;
+ 	return fe;
+ }
+-EXPORT_SYMBOL(tua6100_attach);
++EXPORT_SYMBOL_GPL(tua6100_attach);
+ 
+ MODULE_DESCRIPTION("DVB tua6100 driver");
+ MODULE_AUTHOR("Andrew de Quincey");
+diff --git a/drivers/media/dvb-frontends/ves1820.c b/drivers/media/dvb-frontends/ves1820.c
+index 9df14d0be1c1a..ee5620e731e9b 100644
+--- a/drivers/media/dvb-frontends/ves1820.c
++++ b/drivers/media/dvb-frontends/ves1820.c
+@@ -434,4 +434,4 @@ MODULE_DESCRIPTION("VLSI VES1820 DVB-C Demodulator driver");
+ MODULE_AUTHOR("Ralph Metzler, Holger Waechtler");
+ MODULE_LICENSE("GPL");
+ 
+-EXPORT_SYMBOL(ves1820_attach);
++EXPORT_SYMBOL_GPL(ves1820_attach);
+diff --git a/drivers/media/dvb-frontends/ves1x93.c b/drivers/media/dvb-frontends/ves1x93.c
+index b747272863025..c60e21d26b881 100644
+--- a/drivers/media/dvb-frontends/ves1x93.c
++++ b/drivers/media/dvb-frontends/ves1x93.c
+@@ -540,4 +540,4 @@ MODULE_DESCRIPTION("VLSI VES1x93 DVB-S Demodulator driver");
+ MODULE_AUTHOR("Ralph Metzler");
+ MODULE_LICENSE("GPL");
+ 
+-EXPORT_SYMBOL(ves1x93_attach);
++EXPORT_SYMBOL_GPL(ves1x93_attach);
+diff --git a/drivers/media/dvb-frontends/zl10036.c b/drivers/media/dvb-frontends/zl10036.c
+index d392c7cce2ce0..7ba575e9c55f4 100644
+--- a/drivers/media/dvb-frontends/zl10036.c
++++ b/drivers/media/dvb-frontends/zl10036.c
+@@ -496,7 +496,7 @@ error:
+ 	kfree(state);
+ 	return NULL;
+ }
+-EXPORT_SYMBOL(zl10036_attach);
++EXPORT_SYMBOL_GPL(zl10036_attach);
+ 
+ module_param_named(debug, zl10036_debug, int, 0644);
+ MODULE_PARM_DESC(debug, "Turn on/off frontend debugging (default:off).");
+diff --git a/drivers/media/dvb-frontends/zl10039.c b/drivers/media/dvb-frontends/zl10039.c
+index 1335bf78d5b7f..a3e4d219400ce 100644
+--- a/drivers/media/dvb-frontends/zl10039.c
++++ b/drivers/media/dvb-frontends/zl10039.c
+@@ -295,7 +295,7 @@ error:
+ 	kfree(state);
+ 	return NULL;
+ }
+-EXPORT_SYMBOL(zl10039_attach);
++EXPORT_SYMBOL_GPL(zl10039_attach);
+ 
+ module_param(debug, int, 0644);
+ MODULE_PARM_DESC(debug, "Turn on/off frontend debugging (default:off).");
+diff --git a/drivers/media/dvb-frontends/zl10353.c b/drivers/media/dvb-frontends/zl10353.c
+index 2a2cf20a73d61..8849d05475c27 100644
+--- a/drivers/media/dvb-frontends/zl10353.c
++++ b/drivers/media/dvb-frontends/zl10353.c
+@@ -665,4 +665,4 @@ MODULE_DESCRIPTION("Zarlink ZL10353 DVB-T demodulator driver");
+ MODULE_AUTHOR("Chris Pascoe");
+ MODULE_LICENSE("GPL");
+ 
+-EXPORT_SYMBOL(zl10353_attach);
++EXPORT_SYMBOL_GPL(zl10353_attach);
+diff --git a/drivers/media/i2c/Kconfig b/drivers/media/i2c/Kconfig
+index 226454b6a90dd..0669aea3eba35 100644
+--- a/drivers/media/i2c/Kconfig
++++ b/drivers/media/i2c/Kconfig
+@@ -25,8 +25,15 @@ config VIDEO_IR_I2C
+ # V4L2 I2C drivers that are related with Camera support
+ #
+ 
+-menu "Camera sensor devices"
+-	visible if MEDIA_CAMERA_SUPPORT
++menuconfig VIDEO_CAMERA_SENSOR
++	bool "Camera sensor devices"
++	depends on MEDIA_CAMERA_SUPPORT && I2C
++	select MEDIA_CONTROLLER
++	select V4L2_FWNODE
++	select VIDEO_V4L2_SUBDEV_API
++	default y
++
++if VIDEO_CAMERA_SENSOR
+ 
+ config VIDEO_APTINA_PLL
+ 	tristate
+@@ -810,7 +817,7 @@ config VIDEO_ST_VGXY61
+ source "drivers/media/i2c/ccs/Kconfig"
+ source "drivers/media/i2c/et8ek8/Kconfig"
+ 
+-endmenu
++endif
+ 
+ menu "Lens drivers"
+ 	visible if MEDIA_CAMERA_SUPPORT
+diff --git a/drivers/media/i2c/ad5820.c b/drivers/media/i2c/ad5820.c
+index 5f605b9be3b15..1543d24f522c3 100644
+--- a/drivers/media/i2c/ad5820.c
++++ b/drivers/media/i2c/ad5820.c
+@@ -349,7 +349,6 @@ static void ad5820_remove(struct i2c_client *client)
+ static const struct i2c_device_id ad5820_id_table[] = {
+ 	{ "ad5820", 0 },
+ 	{ "ad5821", 0 },
+-	{ "ad5823", 0 },
+ 	{ }
+ };
+ MODULE_DEVICE_TABLE(i2c, ad5820_id_table);
+@@ -357,7 +356,6 @@ MODULE_DEVICE_TABLE(i2c, ad5820_id_table);
+ static const struct of_device_id ad5820_of_table[] = {
+ 	{ .compatible = "adi,ad5820" },
+ 	{ .compatible = "adi,ad5821" },
+-	{ .compatible = "adi,ad5823" },
+ 	{ }
+ };
+ MODULE_DEVICE_TABLE(of, ad5820_of_table);
+diff --git a/drivers/media/i2c/ccs/ccs-data.c b/drivers/media/i2c/ccs/ccs-data.c
+index 45f2b2f55ec5c..08400edf77ced 100644
+--- a/drivers/media/i2c/ccs/ccs-data.c
++++ b/drivers/media/i2c/ccs/ccs-data.c
+@@ -464,8 +464,7 @@ static int ccs_data_parse_rules(struct bin_container *bin,
+ 		rule_payload = __rule_type + 1;
+ 		rule_plen2 = rule_plen - sizeof(*__rule_type);
+ 
+-		switch (*__rule_type) {
+-		case CCS_DATA_BLOCK_RULE_ID_IF: {
++		if (*__rule_type == CCS_DATA_BLOCK_RULE_ID_IF) {
+ 			const struct __ccs_data_block_rule_if *__if_rules =
+ 				rule_payload;
+ 			const size_t __num_if_rules =
+@@ -514,49 +513,61 @@ static int ccs_data_parse_rules(struct bin_container *bin,
+ 				rules->if_rules = if_rule;
+ 				rules->num_if_rules = __num_if_rules;
+ 			}
+-			break;
+-		}
+-		case CCS_DATA_BLOCK_RULE_ID_READ_ONLY_REGS:
+-			rval = ccs_data_parse_reg_rules(bin, &rules->read_only_regs,
+-							&rules->num_read_only_regs,
+-							rule_payload,
+-							rule_payload + rule_plen2,
+-							dev);
+-			if (rval)
+-				return rval;
+-			break;
+-		case CCS_DATA_BLOCK_RULE_ID_FFD:
+-			rval = ccs_data_parse_ffd(bin, &rules->frame_format,
+-						  rule_payload,
+-						  rule_payload + rule_plen2,
+-						  dev);
+-			if (rval)
+-				return rval;
+-			break;
+-		case CCS_DATA_BLOCK_RULE_ID_MSR:
+-			rval = ccs_data_parse_reg_rules(bin,
+-							&rules->manufacturer_regs,
+-							&rules->num_manufacturer_regs,
+-							rule_payload,
+-							rule_payload + rule_plen2,
+-							dev);
+-			if (rval)
+-				return rval;
+-			break;
+-		case CCS_DATA_BLOCK_RULE_ID_PDAF_READOUT:
+-			rval = ccs_data_parse_pdaf_readout(bin,
+-							   &rules->pdaf_readout,
+-							   rule_payload,
+-							   rule_payload + rule_plen2,
+-							   dev);
+-			if (rval)
+-				return rval;
+-			break;
+-		default:
+-			dev_dbg(dev,
+-				"Don't know how to handle rule type %u!\n",
+-				*__rule_type);
+-			return -EINVAL;
++		} else {
++			/* Check there was an if rule before any other rules */
++			if (bin->base && !rules)
++				return -EINVAL;
++
++			switch (*__rule_type) {
++			case CCS_DATA_BLOCK_RULE_ID_READ_ONLY_REGS:
++				rval = ccs_data_parse_reg_rules(bin,
++								rules ?
++								&rules->read_only_regs : NULL,
++								rules ?
++								&rules->num_read_only_regs : NULL,
++								rule_payload,
++								rule_payload + rule_plen2,
++								dev);
++				if (rval)
++					return rval;
++				break;
++			case CCS_DATA_BLOCK_RULE_ID_FFD:
++				rval = ccs_data_parse_ffd(bin, rules ?
++							  &rules->frame_format : NULL,
++							  rule_payload,
++							  rule_payload + rule_plen2,
++							  dev);
++				if (rval)
++					return rval;
++				break;
++			case CCS_DATA_BLOCK_RULE_ID_MSR:
++				rval = ccs_data_parse_reg_rules(bin,
++								rules ?
++								&rules->manufacturer_regs : NULL,
++								rules ?
++								&rules->num_manufacturer_regs : NULL,
++								rule_payload,
++								rule_payload + rule_plen2,
++								dev);
++				if (rval)
++					return rval;
++				break;
++			case CCS_DATA_BLOCK_RULE_ID_PDAF_READOUT:
++				rval = ccs_data_parse_pdaf_readout(bin,
++								   rules ?
++								   &rules->pdaf_readout : NULL,
++								   rule_payload,
++								   rule_payload + rule_plen2,
++								   dev);
++				if (rval)
++					return rval;
++				break;
++			default:
++				dev_dbg(dev,
++					"Don't know how to handle rule type %u!\n",
++					*__rule_type);
++				return -EINVAL;
++			}
+ 		}
+ 		__next_rule = __next_rule + rule_hlen + rule_plen;
+ 	}
+diff --git a/drivers/media/i2c/imx290.c b/drivers/media/i2c/imx290.c
+index b3f832e9d7e16..0622a9fcd2e07 100644
+--- a/drivers/media/i2c/imx290.c
++++ b/drivers/media/i2c/imx290.c
+@@ -902,7 +902,6 @@ static const char * const imx290_test_pattern_menu[] = {
+ };
+ 
+ static void imx290_ctrl_update(struct imx290 *imx290,
+-			       const struct v4l2_mbus_framefmt *format,
+ 			       const struct imx290_mode *mode)
+ {
+ 	unsigned int hblank_min = mode->hmax_min - mode->width;
+@@ -1195,7 +1194,7 @@ static int imx290_set_fmt(struct v4l2_subdev *sd,
+ 	if (fmt->which == V4L2_SUBDEV_FORMAT_ACTIVE) {
+ 		imx290->current_mode = mode;
+ 
+-		imx290_ctrl_update(imx290, &fmt->format, mode);
++		imx290_ctrl_update(imx290, mode);
+ 		imx290_exposure_update(imx290, mode);
+ 	}
+ 
+@@ -1300,7 +1299,6 @@ static const struct media_entity_operations imx290_subdev_entity_ops = {
+ static int imx290_subdev_init(struct imx290 *imx290)
+ {
+ 	struct i2c_client *client = to_i2c_client(imx290->dev);
+-	const struct v4l2_mbus_framefmt *format;
+ 	struct v4l2_subdev_state *state;
+ 	int ret;
+ 
+@@ -1335,8 +1333,7 @@ static int imx290_subdev_init(struct imx290 *imx290)
+ 	}
+ 
+ 	state = v4l2_subdev_lock_and_get_active_state(&imx290->sd);
+-	format = v4l2_subdev_get_pad_format(&imx290->sd, state, 0);
+-	imx290_ctrl_update(imx290, format, imx290->current_mode);
++	imx290_ctrl_update(imx290, imx290->current_mode);
+ 	v4l2_subdev_unlock_state(state);
+ 
+ 	return 0;
+diff --git a/drivers/media/i2c/ov2680.c b/drivers/media/i2c/ov2680.c
+index d06e9fc37f770..55fc56ffad31c 100644
+--- a/drivers/media/i2c/ov2680.c
++++ b/drivers/media/i2c/ov2680.c
+@@ -54,6 +54,9 @@
+ #define OV2680_WIDTH_MAX		1600
+ #define OV2680_HEIGHT_MAX		1200
+ 
++#define OV2680_DEFAULT_WIDTH			800
++#define OV2680_DEFAULT_HEIGHT			600
++
+ enum ov2680_mode_id {
+ 	OV2680_MODE_QUXGA_800_600,
+ 	OV2680_MODE_720P_1280_720,
+@@ -85,15 +88,8 @@ struct ov2680_mode_info {
+ 
+ struct ov2680_ctrls {
+ 	struct v4l2_ctrl_handler handler;
+-	struct {
+-		struct v4l2_ctrl *auto_exp;
+-		struct v4l2_ctrl *exposure;
+-	};
+-	struct {
+-		struct v4l2_ctrl *auto_gain;
+-		struct v4l2_ctrl *gain;
+-	};
+-
++	struct v4l2_ctrl *exposure;
++	struct v4l2_ctrl *gain;
+ 	struct v4l2_ctrl *hflip;
+ 	struct v4l2_ctrl *vflip;
+ 	struct v4l2_ctrl *test_pattern;
+@@ -143,6 +139,7 @@ static const struct reg_value ov2680_setting_30fps_QUXGA_800_600[] = {
+ 	{0x380e, 0x02}, {0x380f, 0x84}, {0x3811, 0x04}, {0x3813, 0x04},
+ 	{0x3814, 0x31}, {0x3815, 0x31}, {0x3820, 0xc0}, {0x4008, 0x00},
+ 	{0x4009, 0x03}, {0x4837, 0x1e}, {0x3501, 0x4e}, {0x3502, 0xe0},
++	{0x3503, 0x03},
+ };
+ 
+ static const struct reg_value ov2680_setting_30fps_720P_1280_720[] = {
+@@ -321,70 +318,62 @@ static void ov2680_power_down(struct ov2680_dev *sensor)
+ 	usleep_range(5000, 10000);
+ }
+ 
+-static int ov2680_bayer_order(struct ov2680_dev *sensor)
++static void ov2680_set_bayer_order(struct ov2680_dev *sensor,
++				   struct v4l2_mbus_framefmt *fmt)
+ {
+-	u32 format1;
+-	u32 format2;
+-	u32 hv_flip;
+-	int ret;
+-
+-	ret = ov2680_read_reg(sensor, OV2680_REG_FORMAT1, &format1);
+-	if (ret < 0)
+-		return ret;
+-
+-	ret = ov2680_read_reg(sensor, OV2680_REG_FORMAT2, &format2);
+-	if (ret < 0)
+-		return ret;
++	int hv_flip = 0;
+ 
+-	hv_flip = (format2 & BIT(2)  << 1) | (format1 & BIT(2));
++	if (sensor->ctrls.vflip && sensor->ctrls.vflip->val)
++		hv_flip += 1;
+ 
+-	sensor->fmt.code = ov2680_hv_flip_bayer_order[hv_flip];
++	if (sensor->ctrls.hflip && sensor->ctrls.hflip->val)
++		hv_flip += 2;
+ 
+-	return 0;
++	fmt->code = ov2680_hv_flip_bayer_order[hv_flip];
+ }
+ 
+-static int ov2680_vflip_enable(struct ov2680_dev *sensor)
++static void ov2680_fill_format(struct ov2680_dev *sensor,
++			       struct v4l2_mbus_framefmt *fmt,
++			       unsigned int width, unsigned int height)
+ {
+-	int ret;
+-
+-	ret = ov2680_mod_reg(sensor, OV2680_REG_FORMAT1, BIT(2), BIT(2));
+-	if (ret < 0)
+-		return ret;
+-
+-	return ov2680_bayer_order(sensor);
++	memset(fmt, 0, sizeof(*fmt));
++	fmt->width = width;
++	fmt->height = height;
++	fmt->field = V4L2_FIELD_NONE;
++	fmt->colorspace = V4L2_COLORSPACE_SRGB;
++	ov2680_set_bayer_order(sensor, fmt);
+ }
+ 
+-static int ov2680_vflip_disable(struct ov2680_dev *sensor)
++static int ov2680_set_vflip(struct ov2680_dev *sensor, s32 val)
+ {
+ 	int ret;
+ 
+-	ret = ov2680_mod_reg(sensor, OV2680_REG_FORMAT1, BIT(2), BIT(0));
+-	if (ret < 0)
+-		return ret;
+-
+-	return ov2680_bayer_order(sensor);
+-}
+-
+-static int ov2680_hflip_enable(struct ov2680_dev *sensor)
+-{
+-	int ret;
++	if (sensor->is_streaming)
++		return -EBUSY;
+ 
+-	ret = ov2680_mod_reg(sensor, OV2680_REG_FORMAT2, BIT(2), BIT(2));
++	ret = ov2680_mod_reg(sensor, OV2680_REG_FORMAT1,
++			     BIT(2), val ? BIT(2) : 0);
+ 	if (ret < 0)
+ 		return ret;
+ 
+-	return ov2680_bayer_order(sensor);
++	ov2680_set_bayer_order(sensor, &sensor->fmt);
++	return 0;
+ }
+ 
+-static int ov2680_hflip_disable(struct ov2680_dev *sensor)
++static int ov2680_set_hflip(struct ov2680_dev *sensor, s32 val)
+ {
+ 	int ret;
+ 
+-	ret = ov2680_mod_reg(sensor, OV2680_REG_FORMAT2, BIT(2), BIT(0));
++	if (sensor->is_streaming)
++		return -EBUSY;
++
++	ret = ov2680_mod_reg(sensor, OV2680_REG_FORMAT2,
++			     BIT(2), val ? BIT(2) : 0);
+ 	if (ret < 0)
+ 		return ret;
+ 
+-	return ov2680_bayer_order(sensor);
++	ov2680_set_bayer_order(sensor, &sensor->fmt);
++	return 0;
+ }
+ 
+ static int ov2680_test_pattern_set(struct ov2680_dev *sensor, int value)
+@@ -405,69 +394,15 @@ static int ov2680_test_pattern_set(struct ov2680_dev *sensor, int value)
+ 	return 0;
+ }
+ 
+-static int ov2680_gain_set(struct ov2680_dev *sensor, bool auto_gain)
++static int ov2680_gain_set(struct ov2680_dev *sensor, u32 gain)
+ {
+-	struct ov2680_ctrls *ctrls = &sensor->ctrls;
+-	u32 gain;
+-	int ret;
+-
+-	ret = ov2680_mod_reg(sensor, OV2680_REG_R_MANUAL, BIT(1),
+-			     auto_gain ? 0 : BIT(1));
+-	if (ret < 0)
+-		return ret;
+-
+-	if (auto_gain || !ctrls->gain->is_new)
+-		return 0;
+-
+-	gain = ctrls->gain->val;
+-
+-	ret = ov2680_write_reg16(sensor, OV2680_REG_GAIN_PK, gain);
+-
+-	return 0;
+-}
+-
+-static int ov2680_gain_get(struct ov2680_dev *sensor)
+-{
+-	u32 gain;
+-	int ret;
+-
+-	ret = ov2680_read_reg16(sensor, OV2680_REG_GAIN_PK, &gain);
+-	if (ret)
+-		return ret;
+-
+-	return gain;
+-}
+-
+-static int ov2680_exposure_set(struct ov2680_dev *sensor, bool auto_exp)
+-{
+-	struct ov2680_ctrls *ctrls = &sensor->ctrls;
+-	u32 exp;
+-	int ret;
+-
+-	ret = ov2680_mod_reg(sensor, OV2680_REG_R_MANUAL, BIT(0),
+-			     auto_exp ? 0 : BIT(0));
+-	if (ret < 0)
+-		return ret;
+-
+-	if (auto_exp || !ctrls->exposure->is_new)
+-		return 0;
+-
+-	exp = (u32)ctrls->exposure->val;
+-	exp <<= 4;
+-
+-	return ov2680_write_reg24(sensor, OV2680_REG_EXPOSURE_PK_HIGH, exp);
++	return ov2680_write_reg16(sensor, OV2680_REG_GAIN_PK, gain);
+ }
+ 
+-static int ov2680_exposure_get(struct ov2680_dev *sensor)
++static int ov2680_exposure_set(struct ov2680_dev *sensor, u32 exp)
+ {
+-	int ret;
+-	u32 exp;
+-
+-	ret = ov2680_read_reg24(sensor, OV2680_REG_EXPOSURE_PK_HIGH, &exp);
+-	if (ret)
+-		return ret;
+-
+-	return exp >> 4;
++	return ov2680_write_reg24(sensor, OV2680_REG_EXPOSURE_PK_HIGH,
++				  exp << 4);
+ }
+ 
+ static int ov2680_stream_enable(struct ov2680_dev *sensor)
+@@ -482,33 +417,17 @@ static int ov2680_stream_disable(struct ov2680_dev *sensor)
+ 
+ static int ov2680_mode_set(struct ov2680_dev *sensor)
+ {
+-	struct ov2680_ctrls *ctrls = &sensor->ctrls;
+ 	int ret;
+ 
+-	ret = ov2680_gain_set(sensor, false);
+-	if (ret < 0)
+-		return ret;
+-
+-	ret = ov2680_exposure_set(sensor, false);
++	ret = ov2680_load_regs(sensor, sensor->current_mode);
+ 	if (ret < 0)
+ 		return ret;
+ 
+-	ret = ov2680_load_regs(sensor, sensor->current_mode);
++	/* Restore value of all ctrls */
++	ret = __v4l2_ctrl_handler_setup(&sensor->ctrls.handler);
+ 	if (ret < 0)
+ 		return ret;
+ 
+-	if (ctrls->auto_gain->val) {
+-		ret = ov2680_gain_set(sensor, true);
+-		if (ret < 0)
+-			return ret;
+-	}
+-
+-	if (ctrls->auto_exp->val == V4L2_EXPOSURE_AUTO) {
+-		ret = ov2680_exposure_set(sensor, true);
+-		if (ret < 0)
+-			return ret;
+-	}
+-
+ 	sensor->mode_pending_changes = false;
+ 
+ 	return 0;
+@@ -556,7 +475,7 @@ static int ov2680_power_on(struct ov2680_dev *sensor)
+ 		ret = ov2680_write_reg(sensor, OV2680_REG_SOFT_RESET, 0x01);
+ 		if (ret != 0) {
+ 			dev_err(dev, "sensor soft reset failed\n");
+-			return ret;
++			goto err_disable_regulators;
+ 		}
+ 		usleep_range(1000, 2000);
+ 	} else {
+@@ -566,7 +485,7 @@ static int ov2680_power_on(struct ov2680_dev *sensor)
+ 
+ 	ret = clk_prepare_enable(sensor->xvclk);
+ 	if (ret < 0)
+-		return ret;
++		goto err_disable_regulators;
+ 
+ 	sensor->is_enabled = true;
+ 
+@@ -576,6 +495,10 @@ static int ov2680_power_on(struct ov2680_dev *sensor)
+ 	ov2680_stream_disable(sensor);
+ 
+ 	return 0;
++
++err_disable_regulators:
++	regulator_bulk_disable(OV2680_NUM_SUPPLIES, sensor->supplies);
++	return ret;
+ }
+ 
+ static int ov2680_s_power(struct v4l2_subdev *sd, int on)
+@@ -590,15 +513,10 @@ static int ov2680_s_power(struct v4l2_subdev *sd, int on)
+ 	else
+ 		ret = ov2680_power_off(sensor);
+ 
+-	mutex_unlock(&sensor->lock);
+-
+-	if (on && ret == 0) {
+-		ret = v4l2_ctrl_handler_setup(&sensor->ctrls.handler);
+-		if (ret < 0)
+-			return ret;
+-
++	if (on && ret == 0)
+ 		ret = ov2680_mode_restore(sensor);
+-	}
++
++	mutex_unlock(&sensor->lock);
+ 
+ 	return ret;
+ }
+@@ -664,7 +582,6 @@ static int ov2680_get_fmt(struct v4l2_subdev *sd,
+ {
+ 	struct ov2680_dev *sensor = to_ov2680_dev(sd);
+ 	struct v4l2_mbus_framefmt *fmt = NULL;
+-	int ret = 0;
+ 
+ 	if (format->pad != 0)
+ 		return -EINVAL;
+@@ -672,22 +589,17 @@ static int ov2680_get_fmt(struct v4l2_subdev *sd,
+ 	mutex_lock(&sensor->lock);
+ 
+ 	if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
+-#ifdef CONFIG_VIDEO_V4L2_SUBDEV_API
+ 		fmt = v4l2_subdev_get_try_format(&sensor->sd, sd_state,
+ 						 format->pad);
+-#else
+-		ret = -EINVAL;
+-#endif
+ 	} else {
+ 		fmt = &sensor->fmt;
+ 	}
+ 
+-	if (fmt)
+-		format->format = *fmt;
++	format->format = *fmt;
+ 
+ 	mutex_unlock(&sensor->lock);
+ 
+-	return ret;
++	return 0;
+ }
+ 
+ static int ov2680_set_fmt(struct v4l2_subdev *sd,
+@@ -695,43 +607,35 @@ static int ov2680_set_fmt(struct v4l2_subdev *sd,
+ 			  struct v4l2_subdev_format *format)
+ {
+ 	struct ov2680_dev *sensor = to_ov2680_dev(sd);
+-	struct v4l2_mbus_framefmt *fmt = &format->format;
+-#ifdef CONFIG_VIDEO_V4L2_SUBDEV_API
+ 	struct v4l2_mbus_framefmt *try_fmt;
+-#endif
+ 	const struct ov2680_mode_info *mode;
+ 	int ret = 0;
+ 
+ 	if (format->pad != 0)
+ 		return -EINVAL;
+ 
+-	mutex_lock(&sensor->lock);
+-
+-	if (sensor->is_streaming) {
+-		ret = -EBUSY;
+-		goto unlock;
+-	}
+-
+ 	mode = v4l2_find_nearest_size(ov2680_mode_data,
+-				      ARRAY_SIZE(ov2680_mode_data), width,
+-				      height, fmt->width, fmt->height);
+-	if (!mode) {
+-		ret = -EINVAL;
+-		goto unlock;
+-	}
++				      ARRAY_SIZE(ov2680_mode_data),
++				      width, height,
++				      format->format.width,
++				      format->format.height);
++	if (!mode)
++		return -EINVAL;
++
++	ov2680_fill_format(sensor, &format->format, mode->width, mode->height);
+ 
+ 	if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
+-#ifdef CONFIG_VIDEO_V4L2_SUBDEV_API
+ 		try_fmt = v4l2_subdev_get_try_format(sd, sd_state, 0);
+-		format->format = *try_fmt;
+-#endif
+-		goto unlock;
++		*try_fmt = format->format;
++		return 0;
+ 	}
+ 
+-	fmt->width = mode->width;
+-	fmt->height = mode->height;
+-	fmt->code = sensor->fmt.code;
+-	fmt->colorspace = sensor->fmt.colorspace;
++	mutex_lock(&sensor->lock);
++
++	if (sensor->is_streaming) {
++		ret = -EBUSY;
++		goto unlock;
++	}
+ 
+ 	sensor->current_mode = mode;
+ 	sensor->fmt = format->format;
+@@ -746,16 +650,11 @@ unlock:
+ static int ov2680_init_cfg(struct v4l2_subdev *sd,
+ 			   struct v4l2_subdev_state *sd_state)
+ {
+-	struct v4l2_subdev_format fmt = {
+-		.which = sd_state ? V4L2_SUBDEV_FORMAT_TRY
+-		: V4L2_SUBDEV_FORMAT_ACTIVE,
+-		.format = {
+-			.width = 800,
+-			.height = 600,
+-		}
+-	};
++	struct ov2680_dev *sensor = to_ov2680_dev(sd);
+ 
+-	return ov2680_set_fmt(sd, sd_state, &fmt);
++	ov2680_fill_format(sensor, &sd_state->pads[0].try_fmt,
++			   OV2680_DEFAULT_WIDTH, OV2680_DEFAULT_HEIGHT);
++	return 0;
+ }
+ 
+ static int ov2680_enum_frame_size(struct v4l2_subdev *sd,
+@@ -794,66 +693,23 @@ static int ov2680_enum_frame_interval(struct v4l2_subdev *sd,
+ 	return 0;
+ }
+ 
+-static int ov2680_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
+-{
+-	struct v4l2_subdev *sd = ctrl_to_sd(ctrl);
+-	struct ov2680_dev *sensor = to_ov2680_dev(sd);
+-	struct ov2680_ctrls *ctrls = &sensor->ctrls;
+-	int val;
+-
+-	if (!sensor->is_enabled)
+-		return 0;
+-
+-	switch (ctrl->id) {
+-	case V4L2_CID_GAIN:
+-		val = ov2680_gain_get(sensor);
+-		if (val < 0)
+-			return val;
+-		ctrls->gain->val = val;
+-		break;
+-	case V4L2_CID_EXPOSURE:
+-		val = ov2680_exposure_get(sensor);
+-		if (val < 0)
+-			return val;
+-		ctrls->exposure->val = val;
+-		break;
+-	}
+-
+-	return 0;
+-}
+-
+ static int ov2680_s_ctrl(struct v4l2_ctrl *ctrl)
+ {
+ 	struct v4l2_subdev *sd = ctrl_to_sd(ctrl);
+ 	struct ov2680_dev *sensor = to_ov2680_dev(sd);
+-	struct ov2680_ctrls *ctrls = &sensor->ctrls;
+ 
+ 	if (!sensor->is_enabled)
+ 		return 0;
+ 
+ 	switch (ctrl->id) {
+-	case V4L2_CID_AUTOGAIN:
+-		return ov2680_gain_set(sensor, !!ctrl->val);
+ 	case V4L2_CID_GAIN:
+-		return ov2680_gain_set(sensor, !!ctrls->auto_gain->val);
+-	case V4L2_CID_EXPOSURE_AUTO:
+-		return ov2680_exposure_set(sensor, !!ctrl->val);
++		return ov2680_gain_set(sensor, ctrl->val);
+ 	case V4L2_CID_EXPOSURE:
+-		return ov2680_exposure_set(sensor, !!ctrls->auto_exp->val);
++		return ov2680_exposure_set(sensor, ctrl->val);
+ 	case V4L2_CID_VFLIP:
+-		if (sensor->is_streaming)
+-			return -EBUSY;
+-		if (ctrl->val)
+-			return ov2680_vflip_enable(sensor);
+-		else
+-			return ov2680_vflip_disable(sensor);
++		return ov2680_set_vflip(sensor, ctrl->val);
+ 	case V4L2_CID_HFLIP:
+-		if (sensor->is_streaming)
+-			return -EBUSY;
+-		if (ctrl->val)
+-			return ov2680_hflip_enable(sensor);
+-		else
+-			return ov2680_hflip_disable(sensor);
++		return ov2680_set_hflip(sensor, ctrl->val);
+ 	case V4L2_CID_TEST_PATTERN:
+ 		return ov2680_test_pattern_set(sensor, ctrl->val);
+ 	default:
+@@ -864,7 +720,6 @@ static int ov2680_s_ctrl(struct v4l2_ctrl *ctrl)
+ }
+ 
+ static const struct v4l2_ctrl_ops ov2680_ctrl_ops = {
+-	.g_volatile_ctrl = ov2680_g_volatile_ctrl,
+ 	.s_ctrl = ov2680_s_ctrl,
+ };
+ 
+@@ -898,11 +753,8 @@ static int ov2680_mode_init(struct ov2680_dev *sensor)
+ 	const struct ov2680_mode_info *init_mode;
+ 
+ 	/* set initial mode */
+-	sensor->fmt.code = MEDIA_BUS_FMT_SBGGR10_1X10;
+-	sensor->fmt.width = 800;
+-	sensor->fmt.height = 600;
+-	sensor->fmt.field = V4L2_FIELD_NONE;
+-	sensor->fmt.colorspace = V4L2_COLORSPACE_SRGB;
++	ov2680_fill_format(sensor, &sensor->fmt,
++			   OV2680_DEFAULT_WIDTH, OV2680_DEFAULT_HEIGHT);
+ 
+ 	sensor->frame_interval.denominator = OV2680_FRAME_RATE;
+ 	sensor->frame_interval.numerator = 1;
+@@ -926,9 +778,7 @@ static int ov2680_v4l2_register(struct ov2680_dev *sensor)
+ 	v4l2_i2c_subdev_init(&sensor->sd, sensor->i2c_client,
+ 			     &ov2680_subdev_ops);
+ 
+-#ifdef CONFIG_VIDEO_V4L2_SUBDEV_API
+ 	sensor->sd.flags = V4L2_SUBDEV_FL_HAS_DEVNODE;
+-#endif
+ 	sensor->pad.flags = MEDIA_PAD_FL_SOURCE;
+ 	sensor->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR;
+ 
+@@ -936,7 +786,7 @@ static int ov2680_v4l2_register(struct ov2680_dev *sensor)
+ 	if (ret < 0)
+ 		return ret;
+ 
+-	v4l2_ctrl_handler_init(hdl, 7);
++	v4l2_ctrl_handler_init(hdl, 5);
+ 
+ 	hdl->lock = &sensor->lock;
+ 
+@@ -948,16 +798,9 @@ static int ov2680_v4l2_register(struct ov2680_dev *sensor)
+ 					ARRAY_SIZE(test_pattern_menu) - 1,
+ 					0, 0, test_pattern_menu);
+ 
+-	ctrls->auto_exp = v4l2_ctrl_new_std_menu(hdl, ops,
+-						 V4L2_CID_EXPOSURE_AUTO,
+-						 V4L2_EXPOSURE_MANUAL, 0,
+-						 V4L2_EXPOSURE_AUTO);
+-
+ 	ctrls->exposure = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_EXPOSURE,
+ 					    0, 32767, 1, 0);
+ 
+-	ctrls->auto_gain = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_AUTOGAIN,
+-					     0, 1, 1, 1);
+ 	ctrls->gain = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_GAIN, 0, 2047, 1, 0);
+ 
+ 	if (hdl->error) {
+@@ -965,14 +808,9 @@ static int ov2680_v4l2_register(struct ov2680_dev *sensor)
+ 		goto cleanup_entity;
+ 	}
+ 
+-	ctrls->gain->flags |= V4L2_CTRL_FLAG_VOLATILE;
+-	ctrls->exposure->flags |= V4L2_CTRL_FLAG_VOLATILE;
+ 	ctrls->vflip->flags |= V4L2_CTRL_FLAG_MODIFY_LAYOUT;
+ 	ctrls->hflip->flags |= V4L2_CTRL_FLAG_MODIFY_LAYOUT;
+ 
+-	v4l2_ctrl_auto_cluster(2, &ctrls->auto_gain, 0, true);
+-	v4l2_ctrl_auto_cluster(2, &ctrls->auto_exp, 1, true);
+-
+ 	sensor->sd.ctrl_handler = hdl;
+ 
+ 	ret = v4l2_async_register_subdev(&sensor->sd);
+diff --git a/drivers/media/i2c/ov5640.c b/drivers/media/i2c/ov5640.c
+index 36b509714c8c7..8b7ff2f3bdda7 100644
+--- a/drivers/media/i2c/ov5640.c
++++ b/drivers/media/i2c/ov5640.c
+@@ -568,9 +568,7 @@ static const struct reg_value ov5640_init_setting[] = {
+ 	{0x4001, 0x02, 0, 0}, {0x4004, 0x02, 0, 0}, {0x3000, 0x00, 0, 0},
+ 	{0x3002, 0x1c, 0, 0}, {0x3004, 0xff, 0, 0}, {0x3006, 0xc3, 0, 0},
+ 	{0x302e, 0x08, 0, 0}, {0x4300, 0x3f, 0, 0},
+-	{0x501f, 0x00, 0, 0}, {0x4407, 0x04, 0, 0},
+-	{0x440e, 0x00, 0, 0}, {0x460b, 0x35, 0, 0}, {0x460c, 0x22, 0, 0},
+-	{0x4837, 0x0a, 0, 0}, {0x3824, 0x02, 0, 0},
++	{0x501f, 0x00, 0, 0}, {0x440e, 0x00, 0, 0}, {0x4837, 0x0a, 0, 0},
+ 	{0x5000, 0xa7, 0, 0}, {0x5001, 0xa3, 0, 0}, {0x5180, 0xff, 0, 0},
+ 	{0x5181, 0xf2, 0, 0}, {0x5182, 0x00, 0, 0}, {0x5183, 0x14, 0, 0},
+ 	{0x5184, 0x25, 0, 0}, {0x5185, 0x24, 0, 0}, {0x5186, 0x09, 0, 0},
+@@ -634,7 +632,8 @@ static const struct reg_value ov5640_setting_low_res[] = {
+ 	{0x3a0a, 0x00, 0, 0}, {0x3a0b, 0xf6, 0, 0}, {0x3a0e, 0x03, 0, 0},
+ 	{0x3a0d, 0x04, 0, 0}, {0x3a14, 0x03, 0, 0}, {0x3a15, 0xd8, 0, 0},
+ 	{0x4001, 0x02, 0, 0}, {0x4004, 0x02, 0, 0},
+-	{0x4407, 0x04, 0, 0}, {0x5001, 0xa3, 0, 0},
++	{0x4407, 0x04, 0, 0}, {0x460b, 0x35, 0, 0}, {0x460c, 0x22, 0, 0},
++	{0x3824, 0x02, 0, 0}, {0x5001, 0xa3, 0, 0},
+ };
+ 
+ static const struct reg_value ov5640_setting_720P_1280_720[] = {
+@@ -2453,16 +2452,13 @@ static void ov5640_power(struct ov5640_dev *sensor, bool enable)
+ static void ov5640_powerup_sequence(struct ov5640_dev *sensor)
+ {
+ 	if (sensor->pwdn_gpio) {
+-		gpiod_set_value_cansleep(sensor->reset_gpio, 0);
++		gpiod_set_value_cansleep(sensor->reset_gpio, 1);
+ 
+ 		/* camera power cycle */
+ 		ov5640_power(sensor, false);
+-		usleep_range(5000, 10000);
++		usleep_range(5000, 10000);	/* t2 */
+ 		ov5640_power(sensor, true);
+-		usleep_range(5000, 10000);
+-
+-		gpiod_set_value_cansleep(sensor->reset_gpio, 1);
+-		usleep_range(1000, 2000);
++		usleep_range(1000, 2000);	/* t3 */
+ 
+ 		gpiod_set_value_cansleep(sensor->reset_gpio, 0);
+ 	} else {
+@@ -2470,7 +2466,7 @@ static void ov5640_powerup_sequence(struct ov5640_dev *sensor)
+ 		ov5640_write_reg(sensor, OV5640_REG_SYS_CTRL0,
+ 				 OV5640_REG_SYS_CTRL0_SW_RST);
+ 	}
+-	usleep_range(20000, 25000);
++	usleep_range(20000, 25000);	/* t4 */
+ 
+ 	/*
+ 	 * software standby: allows registers programming;
+@@ -2543,9 +2539,9 @@ static int ov5640_set_power_mipi(struct ov5640_dev *sensor, bool on)
+ 	 *		  "ov5640_set_stream_mipi()")
+ 	 * [4] = 0	: Power up MIPI HS Tx
+ 	 * [3] = 0	: Power up MIPI LS Rx
+-	 * [2] = 0	: MIPI interface disabled
++	 * [2] = 1	: MIPI interface enabled
+ 	 */
+-	ret = ov5640_write_reg(sensor, OV5640_REG_IO_MIPI_CTRL00, 0x40);
++	ret = ov5640_write_reg(sensor, OV5640_REG_IO_MIPI_CTRL00, 0x44);
+ 	if (ret)
+ 		return ret;
+ 
+diff --git a/drivers/media/i2c/rdacm21.c b/drivers/media/i2c/rdacm21.c
+index 043fec778a5e5..7a71bb30426b5 100644
+--- a/drivers/media/i2c/rdacm21.c
++++ b/drivers/media/i2c/rdacm21.c
+@@ -351,7 +351,7 @@ static void ov10640_power_up(struct rdacm21_device *dev)
+ static int ov10640_check_id(struct rdacm21_device *dev)
+ {
+ 	unsigned int i;
+-	u8 val;
++	u8 val = 0;
+ 
+ 	/* Read OV10640 ID to test communications. */
+ 	for (i = 0; i < OV10640_PID_TIMEOUT; ++i) {
+diff --git a/drivers/media/i2c/tvp5150.c b/drivers/media/i2c/tvp5150.c
+index c7fb35ee3f9de..e543b3f7a4d89 100644
+--- a/drivers/media/i2c/tvp5150.c
++++ b/drivers/media/i2c/tvp5150.c
+@@ -2068,6 +2068,10 @@ static int tvp5150_parse_dt(struct tvp5150 *decoder, struct device_node *np)
+ 		tvpc->ent.name = devm_kasprintf(dev, GFP_KERNEL, "%s %s",
+ 						v4l2c->name, v4l2c->label ?
+ 						v4l2c->label : "");
++		if (!tvpc->ent.name) {
++			ret = -ENOMEM;
++			goto err_free;
++		}
+ 	}
+ 
+ 	ep_np = of_graph_get_endpoint_by_regs(np, TVP5150_PAD_VID_OUT, 0);
+diff --git a/drivers/media/pci/Kconfig b/drivers/media/pci/Kconfig
+index 480194543d055..ee095bde0b686 100644
+--- a/drivers/media/pci/Kconfig
++++ b/drivers/media/pci/Kconfig
+@@ -73,7 +73,7 @@ config VIDEO_PCI_SKELETON
+ 	  Enable build of the skeleton PCI driver, used as a reference
+ 	  when developing new drivers.
+ 
+-source "drivers/media/pci/intel/ipu3/Kconfig"
++source "drivers/media/pci/intel/Kconfig"
+ 
+ endif #MEDIA_PCI_SUPPORT
+ endif #PCI
+diff --git a/drivers/media/pci/bt8xx/dst.c b/drivers/media/pci/bt8xx/dst.c
+index 3e52a51982d76..110651e478314 100644
+--- a/drivers/media/pci/bt8xx/dst.c
++++ b/drivers/media/pci/bt8xx/dst.c
+@@ -1722,7 +1722,7 @@ struct dst_state *dst_attach(struct dst_state *state, struct dvb_adapter *dvb_ad
+ 	return state;				/*	Manu (DST is a card not a frontend)	*/
+ }
+ 
+-EXPORT_SYMBOL(dst_attach);
++EXPORT_SYMBOL_GPL(dst_attach);
+ 
+ static const struct dvb_frontend_ops dst_dvbt_ops = {
+ 	.delsys = { SYS_DVBT },
+diff --git a/drivers/media/pci/bt8xx/dst_ca.c b/drivers/media/pci/bt8xx/dst_ca.c
+index d234a0f404d68..a9cc6e7a57f99 100644
+--- a/drivers/media/pci/bt8xx/dst_ca.c
++++ b/drivers/media/pci/bt8xx/dst_ca.c
+@@ -668,7 +668,7 @@ struct dvb_device *dst_ca_attach(struct dst_state *dst, struct dvb_adapter *dvb_
+ 	return NULL;
+ }
+ 
+-EXPORT_SYMBOL(dst_ca_attach);
++EXPORT_SYMBOL_GPL(dst_ca_attach);
+ 
+ MODULE_DESCRIPTION("DST DVB-S/T/C Combo CA driver");
+ MODULE_AUTHOR("Manu Abraham");
+diff --git a/drivers/media/pci/ddbridge/ddbridge-dummy-fe.c b/drivers/media/pci/ddbridge/ddbridge-dummy-fe.c
+index 6868a0c4fc82a..520ebd16b0c44 100644
+--- a/drivers/media/pci/ddbridge/ddbridge-dummy-fe.c
++++ b/drivers/media/pci/ddbridge/ddbridge-dummy-fe.c
+@@ -112,7 +112,7 @@ struct dvb_frontend *ddbridge_dummy_fe_qam_attach(void)
+ 	state->frontend.demodulator_priv = state;
+ 	return &state->frontend;
+ }
+-EXPORT_SYMBOL(ddbridge_dummy_fe_qam_attach);
++EXPORT_SYMBOL_GPL(ddbridge_dummy_fe_qam_attach);
+ 
+ static const struct dvb_frontend_ops ddbridge_dummy_fe_qam_ops = {
+ 	.delsys = { SYS_DVBC_ANNEX_A },
+diff --git a/drivers/media/pci/intel/Kconfig b/drivers/media/pci/intel/Kconfig
+new file mode 100644
+index 0000000000000..51b18fce6a1de
+--- /dev/null
++++ b/drivers/media/pci/intel/Kconfig
+@@ -0,0 +1,10 @@
++# SPDX-License-Identifier: GPL-2.0-only
++config IPU_BRIDGE
++	tristate
++	depends on I2C && ACPI
++	help
++	  This is a helper module for the IPU bridge, which can be
++	  used by ipu3 and other drivers. In order to handle module
++	  dependencies, this is selected by each driver that needs it.
++
++source "drivers/media/pci/intel/ipu3/Kconfig"
+diff --git a/drivers/media/pci/intel/Makefile b/drivers/media/pci/intel/Makefile
+index 0b4236c4db49a..951191a7e4011 100644
+--- a/drivers/media/pci/intel/Makefile
++++ b/drivers/media/pci/intel/Makefile
+@@ -1,6 +1,6 @@
+ # SPDX-License-Identifier: GPL-2.0-only
+ #
+-# Makefile for the IPU3 cio2 and ImGU drivers
++# Makefile for the IPU drivers
+ #
+-
++obj-$(CONFIG_IPU_BRIDGE) += ipu-bridge.o
+ obj-y	+= ipu3/
+diff --git a/drivers/media/pci/intel/ipu-bridge.c b/drivers/media/pci/intel/ipu-bridge.c
+new file mode 100644
+index 0000000000000..c5c44fb43c97a
+--- /dev/null
++++ b/drivers/media/pci/intel/ipu-bridge.c
+@@ -0,0 +1,502 @@
++// SPDX-License-Identifier: GPL-2.0
++/* Author: Dan Scally <djrscally@gmail.com> */
++
++#include <linux/acpi.h>
++#include <linux/device.h>
++#include <linux/i2c.h>
++#include <linux/pci.h>
++#include <linux/property.h>
++#include <media/v4l2-fwnode.h>
++
++#include "ipu-bridge.h"
++
++/*
++ * Extend this array with ACPI Hardware IDs of devices known to be working
++ * plus the number of link-frequencies expected by their drivers, along with
++ * the frequency values in hertz. This is somewhat opportunistic way of adding
++ * support for this for now in the hopes of a better source for the information
++ * (possibly some encoded value in the SSDB buffer that we're unaware of)
++ * becoming apparent in the future.
++ *
++ * Do not add an entry for a sensor that is not actually supported.
++ */
++static const struct ipu_sensor_config ipu_supported_sensors[] = {
++	/* Omnivision OV5693 */
++	IPU_SENSOR_CONFIG("INT33BE", 1, 419200000),
++	/* Omnivision OV8865 */
++	IPU_SENSOR_CONFIG("INT347A", 1, 360000000),
++	/* Omnivision OV7251 */
++	IPU_SENSOR_CONFIG("INT347E", 1, 319200000),
++	/* Omnivision OV2680 */
++	IPU_SENSOR_CONFIG("OVTI2680", 0),
++	/* Omnivision ov8856 */
++	IPU_SENSOR_CONFIG("OVTI8856", 3, 180000000, 360000000, 720000000),
++	/* Omnivision ov2740 */
++	IPU_SENSOR_CONFIG("INT3474", 1, 360000000),
++	/* Hynix hi556 */
++	IPU_SENSOR_CONFIG("INT3537", 1, 437000000),
++	/* Omnivision ov13b10 */
++	IPU_SENSOR_CONFIG("OVTIDB10", 1, 560000000),
++};
++
++static const struct ipu_property_names prop_names = {
++	.clock_frequency = "clock-frequency",
++	.rotation = "rotation",
++	.orientation = "orientation",
++	.bus_type = "bus-type",
++	.data_lanes = "data-lanes",
++	.remote_endpoint = "remote-endpoint",
++	.link_frequencies = "link-frequencies",
++};
++
++static const char * const ipu_vcm_types[] = {
++	"ad5823",
++	"dw9714",
++	"ad5816",
++	"dw9719",
++	"dw9718",
++	"dw9806b",
++	"wv517s",
++	"lc898122xa",
++	"lc898212axb",
++};
++
++static int ipu_bridge_read_acpi_buffer(struct acpi_device *adev, char *id,
++				       void *data, u32 size)
++{
++	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
++	union acpi_object *obj;
++	acpi_status status;
++	int ret = 0;
++
++	status = acpi_evaluate_object(adev->handle, id, NULL, &buffer);
++	if (ACPI_FAILURE(status))
++		return -ENODEV;
++
++	obj = buffer.pointer;
++	if (!obj) {
++		dev_err(&adev->dev, "Couldn't locate ACPI buffer\n");
++		return -ENODEV;
++	}
++
++	if (obj->type != ACPI_TYPE_BUFFER) {
++		dev_err(&adev->dev, "Not an ACPI buffer\n");
++		ret = -ENODEV;
++		goto out_free_buff;
++	}
++
++	if (obj->buffer.length > size) {
++		dev_err(&adev->dev, "Given buffer is too small\n");
++		ret = -EINVAL;
++		goto out_free_buff;
++	}
++
++	memcpy(data, obj->buffer.pointer, obj->buffer.length);
++
++out_free_buff:
++	kfree(buffer.pointer);
++	return ret;
++}
++
++static u32 ipu_bridge_parse_rotation(struct ipu_sensor *sensor)
++{
++	switch (sensor->ssdb.degree) {
++	case IPU_SENSOR_ROTATION_NORMAL:
++		return 0;
++	case IPU_SENSOR_ROTATION_INVERTED:
++		return 180;
++	default:
++		dev_warn(&sensor->adev->dev,
++			 "Unknown rotation %d. Assume 0 degree rotation\n",
++			 sensor->ssdb.degree);
++		return 0;
++	}
++}
++
++static enum v4l2_fwnode_orientation ipu_bridge_parse_orientation(struct ipu_sensor *sensor)
++{
++	switch (sensor->pld->panel) {
++	case ACPI_PLD_PANEL_FRONT:
++		return V4L2_FWNODE_ORIENTATION_FRONT;
++	case ACPI_PLD_PANEL_BACK:
++		return V4L2_FWNODE_ORIENTATION_BACK;
++	case ACPI_PLD_PANEL_TOP:
++	case ACPI_PLD_PANEL_LEFT:
++	case ACPI_PLD_PANEL_RIGHT:
++	case ACPI_PLD_PANEL_UNKNOWN:
++		return V4L2_FWNODE_ORIENTATION_EXTERNAL;
++	default:
++		dev_warn(&sensor->adev->dev, "Unknown _PLD panel value %d\n",
++			 sensor->pld->panel);
++		return V4L2_FWNODE_ORIENTATION_EXTERNAL;
++	}
++}
++
++static void ipu_bridge_create_fwnode_properties(
++	struct ipu_sensor *sensor,
++	struct ipu_bridge *bridge,
++	const struct ipu_sensor_config *cfg)
++{
++	u32 rotation;
++	enum v4l2_fwnode_orientation orientation;
++
++	rotation = ipu_bridge_parse_rotation(sensor);
++	orientation = ipu_bridge_parse_orientation(sensor);
++
++	sensor->prop_names = prop_names;
++
++	sensor->local_ref[0] = SOFTWARE_NODE_REFERENCE(&sensor->swnodes[SWNODE_IPU_ENDPOINT]);
++	sensor->remote_ref[0] = SOFTWARE_NODE_REFERENCE(&sensor->swnodes[SWNODE_SENSOR_ENDPOINT]);
++
++	sensor->dev_properties[0] = PROPERTY_ENTRY_U32(
++					sensor->prop_names.clock_frequency,
++					sensor->ssdb.mclkspeed);
++	sensor->dev_properties[1] = PROPERTY_ENTRY_U32(
++					sensor->prop_names.rotation,
++					rotation);
++	sensor->dev_properties[2] = PROPERTY_ENTRY_U32(
++					sensor->prop_names.orientation,
++					orientation);
++	if (sensor->ssdb.vcmtype) {
++		sensor->vcm_ref[0] =
++			SOFTWARE_NODE_REFERENCE(&sensor->swnodes[SWNODE_VCM]);
++		sensor->dev_properties[3] =
++			PROPERTY_ENTRY_REF_ARRAY("lens-focus", sensor->vcm_ref);
++	}
++
++	sensor->ep_properties[0] = PROPERTY_ENTRY_U32(
++					sensor->prop_names.bus_type,
++					V4L2_FWNODE_BUS_TYPE_CSI2_DPHY);
++	sensor->ep_properties[1] = PROPERTY_ENTRY_U32_ARRAY_LEN(
++					sensor->prop_names.data_lanes,
++					bridge->data_lanes,
++					sensor->ssdb.lanes);
++	sensor->ep_properties[2] = PROPERTY_ENTRY_REF_ARRAY(
++					sensor->prop_names.remote_endpoint,
++					sensor->local_ref);
++
++	if (cfg->nr_link_freqs > 0)
++		sensor->ep_properties[3] = PROPERTY_ENTRY_U64_ARRAY_LEN(
++			sensor->prop_names.link_frequencies,
++			cfg->link_freqs,
++			cfg->nr_link_freqs);
++
++	sensor->ipu_properties[0] = PROPERTY_ENTRY_U32_ARRAY_LEN(
++					sensor->prop_names.data_lanes,
++					bridge->data_lanes,
++					sensor->ssdb.lanes);
++	sensor->ipu_properties[1] = PROPERTY_ENTRY_REF_ARRAY(
++					sensor->prop_names.remote_endpoint,
++					sensor->remote_ref);
++}
++
++static void ipu_bridge_init_swnode_names(struct ipu_sensor *sensor)
++{
++	snprintf(sensor->node_names.remote_port,
++		 sizeof(sensor->node_names.remote_port),
++		 SWNODE_GRAPH_PORT_NAME_FMT, sensor->ssdb.link);
++	snprintf(sensor->node_names.port,
++		 sizeof(sensor->node_names.port),
++		 SWNODE_GRAPH_PORT_NAME_FMT, 0); /* Always port 0 */
++	snprintf(sensor->node_names.endpoint,
++		 sizeof(sensor->node_names.endpoint),
++		 SWNODE_GRAPH_ENDPOINT_NAME_FMT, 0); /* And endpoint 0 */
++}
++
++static void ipu_bridge_init_swnode_group(struct ipu_sensor *sensor)
++{
++	struct software_node *nodes = sensor->swnodes;
++
++	sensor->group[SWNODE_SENSOR_HID] = &nodes[SWNODE_SENSOR_HID];
++	sensor->group[SWNODE_SENSOR_PORT] = &nodes[SWNODE_SENSOR_PORT];
++	sensor->group[SWNODE_SENSOR_ENDPOINT] = &nodes[SWNODE_SENSOR_ENDPOINT];
++	sensor->group[SWNODE_IPU_PORT] = &nodes[SWNODE_IPU_PORT];
++	sensor->group[SWNODE_IPU_ENDPOINT] = &nodes[SWNODE_IPU_ENDPOINT];
++	if (sensor->ssdb.vcmtype)
++		sensor->group[SWNODE_VCM] =  &nodes[SWNODE_VCM];
++}
++
++static void ipu_bridge_create_connection_swnodes(struct ipu_bridge *bridge,
++						 struct ipu_sensor *sensor)
++{
++	struct software_node *nodes = sensor->swnodes;
++
++	ipu_bridge_init_swnode_names(sensor);
++
++	nodes[SWNODE_SENSOR_HID] = NODE_SENSOR(sensor->name,
++					       sensor->dev_properties);
++	nodes[SWNODE_SENSOR_PORT] = NODE_PORT(sensor->node_names.port,
++					      &nodes[SWNODE_SENSOR_HID]);
++	nodes[SWNODE_SENSOR_ENDPOINT] = NODE_ENDPOINT(
++						sensor->node_names.endpoint,
++						&nodes[SWNODE_SENSOR_PORT],
++						sensor->ep_properties);
++	nodes[SWNODE_IPU_PORT] = NODE_PORT(sensor->node_names.remote_port,
++					   &bridge->ipu_hid_node);
++	nodes[SWNODE_IPU_ENDPOINT] = NODE_ENDPOINT(
++						sensor->node_names.endpoint,
++						&nodes[SWNODE_IPU_PORT],
++						sensor->ipu_properties);
++	if (sensor->ssdb.vcmtype) {
++		/* append ssdb.link to distinguish VCM nodes with same HID */
++		snprintf(sensor->node_names.vcm, sizeof(sensor->node_names.vcm),
++			 "%s-%u", ipu_vcm_types[sensor->ssdb.vcmtype - 1],
++			 sensor->ssdb.link);
++		nodes[SWNODE_VCM] = NODE_VCM(sensor->node_names.vcm);
++	}
++
++	ipu_bridge_init_swnode_group(sensor);
++}
++
++static void ipu_bridge_instantiate_vcm_i2c_client(struct ipu_sensor *sensor)
++{
++	struct i2c_board_info board_info = { };
++	char name[16];
++
++	if (!sensor->ssdb.vcmtype)
++		return;
++
++	snprintf(name, sizeof(name), "%s-VCM", acpi_dev_name(sensor->adev));
++	board_info.dev_name = name;
++	strscpy(board_info.type, ipu_vcm_types[sensor->ssdb.vcmtype - 1],
++		ARRAY_SIZE(board_info.type));
++	board_info.swnode = &sensor->swnodes[SWNODE_VCM];
++
++	sensor->vcm_i2c_client =
++		i2c_acpi_new_device_by_fwnode(acpi_fwnode_handle(sensor->adev),
++					      1, &board_info);
++	if (IS_ERR(sensor->vcm_i2c_client)) {
++		dev_warn(&sensor->adev->dev, "Error instantiation VCM i2c-client: %ld\n",
++			 PTR_ERR(sensor->vcm_i2c_client));
++		sensor->vcm_i2c_client = NULL;
++	}
++}
++
++static void ipu_bridge_unregister_sensors(struct ipu_bridge *bridge)
++{
++	struct ipu_sensor *sensor;
++	unsigned int i;
++
++	for (i = 0; i < bridge->n_sensors; i++) {
++		sensor = &bridge->sensors[i];
++		software_node_unregister_node_group(sensor->group);
++		ACPI_FREE(sensor->pld);
++		acpi_dev_put(sensor->adev);
++		i2c_unregister_device(sensor->vcm_i2c_client);
++	}
++}
++
++static int ipu_bridge_connect_sensor(const struct ipu_sensor_config *cfg,
++				     struct ipu_bridge *bridge,
++				     struct pci_dev *ipu)
++{
++	struct fwnode_handle *fwnode, *primary;
++	struct ipu_sensor *sensor;
++	struct acpi_device *adev;
++	acpi_status status;
++	int ret;
++
++	for_each_acpi_dev_match(adev, cfg->hid, NULL, -1) {
++		if (!adev->status.enabled)
++			continue;
++
++		if (bridge->n_sensors >= CIO2_NUM_PORTS) {
++			acpi_dev_put(adev);
++			dev_err(&ipu->dev, "Exceeded available IPU ports\n");
++			return -EINVAL;
++		}
++
++		sensor = &bridge->sensors[bridge->n_sensors];
++		/*
++		 * Borrow our adev ref to the sensor for now, on success
++		 * acpi_dev_get(adev) is done further below.
++		 */
++		sensor->adev = adev;
++
++		ret = ipu_bridge_read_acpi_buffer(adev, "SSDB",
++						  &sensor->ssdb,
++						  sizeof(sensor->ssdb));
++		if (ret)
++			goto err_put_adev;
++
++		snprintf(sensor->name, sizeof(sensor->name), "%s-%u",
++			 cfg->hid, sensor->ssdb.link);
++
++		if (sensor->ssdb.vcmtype > ARRAY_SIZE(ipu_vcm_types)) {
++			dev_warn(&adev->dev, "Unknown VCM type %d\n",
++				 sensor->ssdb.vcmtype);
++			sensor->ssdb.vcmtype = 0;
++		}
++
++		status = acpi_get_physical_device_location(adev->handle, &sensor->pld);
++		if (ACPI_FAILURE(status)) {
++			ret = -ENODEV;
++			goto err_put_adev;
++		}
++
++		if (sensor->ssdb.lanes > IPU_MAX_LANES) {
++			dev_err(&adev->dev,
++				"Number of lanes in SSDB is invalid\n");
++			ret = -EINVAL;
++			goto err_free_pld;
++		}
++
++		ipu_bridge_create_fwnode_properties(sensor, bridge, cfg);
++		ipu_bridge_create_connection_swnodes(bridge, sensor);
++
++		ret = software_node_register_node_group(sensor->group);
++		if (ret)
++			goto err_free_pld;
++
++		fwnode = software_node_fwnode(&sensor->swnodes[
++						      SWNODE_SENSOR_HID]);
++		if (!fwnode) {
++			ret = -ENODEV;
++			goto err_free_swnodes;
++		}
++
++		sensor->adev = acpi_dev_get(adev);
++
++		primary = acpi_fwnode_handle(adev);
++		primary->secondary = fwnode;
++
++		ipu_bridge_instantiate_vcm_i2c_client(sensor);
++
++		dev_info(&ipu->dev, "Found supported sensor %s\n",
++			 acpi_dev_name(adev));
++
++		bridge->n_sensors++;
++	}
++
++	return 0;
++
++err_free_swnodes:
++	software_node_unregister_node_group(sensor->group);
++err_free_pld:
++	ACPI_FREE(sensor->pld);
++err_put_adev:
++	acpi_dev_put(adev);
++	return ret;
++}
++
++static int ipu_bridge_connect_sensors(struct ipu_bridge *bridge,
++				      struct pci_dev *ipu)
++{
++	unsigned int i;
++	int ret;
++
++	for (i = 0; i < ARRAY_SIZE(ipu_supported_sensors); i++) {
++		const struct ipu_sensor_config *cfg =
++			&ipu_supported_sensors[i];
++
++		ret = ipu_bridge_connect_sensor(cfg, bridge, ipu);
++		if (ret)
++			goto err_unregister_sensors;
++	}
++
++	return 0;
++
++err_unregister_sensors:
++	ipu_bridge_unregister_sensors(bridge);
++	return ret;
++}
++
++/*
++ * The VCM cannot be probed until the PMIC is completely setup. We cannot rely
++ * on -EPROBE_DEFER for this, since the consumer<->supplier relations between
++ * the VCM and regulators/clks are not described in ACPI, instead they are
++ * passed as board-data to the PMIC drivers. Since -PROBE_DEFER does not work
++ * for the clks/regulators the VCM i2c-clients must not be instantiated until
++ * the PMIC is fully setup.
++ *
++ * The sensor/VCM ACPI device has an ACPI _DEP on the PMIC, check this using the
++ * acpi_dev_ready_for_enumeration() helper, like the i2c-core-acpi code does
++ * for the sensors.
++ */
++static int ipu_bridge_sensors_are_ready(void)
++{
++	struct acpi_device *adev;
++	bool ready = true;
++	unsigned int i;
++
++	for (i = 0; i < ARRAY_SIZE(ipu_supported_sensors); i++) {
++		const struct ipu_sensor_config *cfg =
++			&ipu_supported_sensors[i];
++
++		for_each_acpi_dev_match(adev, cfg->hid, NULL, -1) {
++			if (!adev->status.enabled)
++				continue;
++
++			if (!acpi_dev_ready_for_enumeration(adev))
++				ready = false;
++		}
++	}
++
++	return ready;
++}
++
++int ipu_bridge_init(struct pci_dev *ipu)
++{
++	struct device *dev = &ipu->dev;
++	struct fwnode_handle *fwnode;
++	struct ipu_bridge *bridge;
++	unsigned int i;
++	int ret;
++
++	if (!ipu_bridge_sensors_are_ready())
++		return -EPROBE_DEFER;
++
++	bridge = kzalloc(sizeof(*bridge), GFP_KERNEL);
++	if (!bridge)
++		return -ENOMEM;
++
++	strscpy(bridge->ipu_node_name, IPU_HID,
++		sizeof(bridge->ipu_node_name));
++	bridge->ipu_hid_node.name = bridge->ipu_node_name;
++
++	ret = software_node_register(&bridge->ipu_hid_node);
++	if (ret < 0) {
++		dev_err(dev, "Failed to register the IPU HID node\n");
++		goto err_free_bridge;
++	}
++
++	/*
++	 * Map the lane arrangement, which is fixed for the IPU3 (meaning we
++	 * only need one, rather than one per sensor). We include it as a
++	 * member of the struct ipu_bridge rather than a global variable so
++	 * that it survives if the module is unloaded along with the rest of
++	 * the struct.
++	 */
++	for (i = 0; i < IPU_MAX_LANES; i++)
++		bridge->data_lanes[i] = i + 1;
++
++	ret = ipu_bridge_connect_sensors(bridge, ipu);
++	if (ret || bridge->n_sensors == 0)
++		goto err_unregister_ipu;
++
++	dev_info(dev, "Connected %d cameras\n", bridge->n_sensors);
++
++	fwnode = software_node_fwnode(&bridge->ipu_hid_node);
++	if (!fwnode) {
++		dev_err(dev, "Error getting fwnode from ipu software_node\n");
++		ret = -ENODEV;
++		goto err_unregister_sensors;
++	}
++
++	set_secondary_fwnode(dev, fwnode);
++
++	return 0;
++
++err_unregister_sensors:
++	ipu_bridge_unregister_sensors(bridge);
++err_unregister_ipu:
++	software_node_unregister(&bridge->ipu_hid_node);
++err_free_bridge:
++	kfree(bridge);
++
++	return ret;
++}
++EXPORT_SYMBOL_NS_GPL(ipu_bridge_init, INTEL_IPU_BRIDGE);
++
++MODULE_LICENSE("GPL");
++MODULE_DESCRIPTION("Intel IPU Sensors Bridge driver");
+diff --git a/drivers/media/pci/intel/ipu-bridge.h b/drivers/media/pci/intel/ipu-bridge.h
+new file mode 100644
+index 0000000000000..1ff0b2d04d929
+--- /dev/null
++++ b/drivers/media/pci/intel/ipu-bridge.h
+@@ -0,0 +1,153 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++/* Author: Dan Scally <djrscally@gmail.com> */
++#ifndef __IPU_BRIDGE_H
++#define __IPU_BRIDGE_H
++
++#include <linux/property.h>
++#include <linux/types.h>
++
++#include "ipu3/ipu3-cio2.h"
++
++struct i2c_client;
++
++#define IPU_HID				"INT343E"
++#define IPU_MAX_LANES				4
++#define MAX_NUM_LINK_FREQS			3
++
++/* Values are educated guesses as we don't have a spec */
++#define IPU_SENSOR_ROTATION_NORMAL		0
++#define IPU_SENSOR_ROTATION_INVERTED		1
++
++#define IPU_SENSOR_CONFIG(_HID, _NR, ...)	\
++	(const struct ipu_sensor_config) {	\
++		.hid = _HID,			\
++		.nr_link_freqs = _NR,		\
++		.link_freqs = { __VA_ARGS__ }	\
++	}
++
++#define NODE_SENSOR(_HID, _PROPS)		\
++	(const struct software_node) {		\
++		.name = _HID,			\
++		.properties = _PROPS,		\
++	}
++
++#define NODE_PORT(_PORT, _SENSOR_NODE)		\
++	(const struct software_node) {		\
++		.name = _PORT,			\
++		.parent = _SENSOR_NODE,		\
++	}
++
++#define NODE_ENDPOINT(_EP, _PORT, _PROPS)	\
++	(const struct software_node) {		\
++		.name = _EP,			\
++		.parent = _PORT,		\
++		.properties = _PROPS,		\
++	}
++
++#define NODE_VCM(_TYPE)				\
++	(const struct software_node) {		\
++		.name = _TYPE,			\
++	}
++
++enum ipu_sensor_swnodes {
++	SWNODE_SENSOR_HID,
++	SWNODE_SENSOR_PORT,
++	SWNODE_SENSOR_ENDPOINT,
++	SWNODE_IPU_PORT,
++	SWNODE_IPU_ENDPOINT,
++	/* Must be last because it is optional / maybe empty */
++	SWNODE_VCM,
++	SWNODE_COUNT
++};
++
++/* Data representation as it is in ACPI SSDB buffer */
++struct ipu_sensor_ssdb {
++	u8 version;
++	u8 sku;
++	u8 guid_csi2[16];
++	u8 devfunction;
++	u8 bus;
++	u32 dphylinkenfuses;
++	u32 clockdiv;
++	u8 link;
++	u8 lanes;
++	u32 csiparams[10];
++	u32 maxlanespeed;
++	u8 sensorcalibfileidx;
++	u8 sensorcalibfileidxInMBZ[3];
++	u8 romtype;
++	u8 vcmtype;
++	u8 platforminfo;
++	u8 platformsubinfo;
++	u8 flash;
++	u8 privacyled;
++	u8 degree;
++	u8 mipilinkdefined;
++	u32 mclkspeed;
++	u8 controllogicid;
++	u8 reserved1[3];
++	u8 mclkport;
++	u8 reserved2[13];
++} __packed;
++
++struct ipu_property_names {
++	char clock_frequency[16];
++	char rotation[9];
++	char orientation[12];
++	char bus_type[9];
++	char data_lanes[11];
++	char remote_endpoint[16];
++	char link_frequencies[17];
++};
++
++struct ipu_node_names {
++	char port[7];
++	char endpoint[11];
++	char remote_port[7];
++	char vcm[16];
++};
++
++struct ipu_sensor_config {
++	const char *hid;
++	const u8 nr_link_freqs;
++	const u64 link_freqs[MAX_NUM_LINK_FREQS];
++};
++
++struct ipu_sensor {
++	/* append ssdb.link(u8) in "-%u" format as suffix of HID */
++	char name[ACPI_ID_LEN + 4];
++	struct acpi_device *adev;
++	struct i2c_client *vcm_i2c_client;
++
++	/* SWNODE_COUNT + 1 for terminating NULL */
++	const struct software_node *group[SWNODE_COUNT + 1];
++	struct software_node swnodes[SWNODE_COUNT];
++	struct ipu_node_names node_names;
++
++	struct ipu_sensor_ssdb ssdb;
++	struct acpi_pld_info *pld;
++
++	struct ipu_property_names prop_names;
++	struct property_entry ep_properties[5];
++	struct property_entry dev_properties[5];
++	struct property_entry ipu_properties[3];
++	struct software_node_ref_args local_ref[1];
++	struct software_node_ref_args remote_ref[1];
++	struct software_node_ref_args vcm_ref[1];
++};
++
++struct ipu_bridge {
++	char ipu_node_name[ACPI_ID_LEN];
++	struct software_node ipu_hid_node;
++	u32 data_lanes[4];
++	unsigned int n_sensors;
++	struct ipu_sensor sensors[CIO2_NUM_PORTS];
++};
++
++#if IS_ENABLED(CONFIG_IPU_BRIDGE)
++int ipu_bridge_init(struct pci_dev *ipu);
++#else
++static inline int ipu_bridge_init(struct pci_dev *ipu) { return 0; }
++#endif
++
++#endif
+diff --git a/drivers/media/pci/intel/ipu3/Kconfig b/drivers/media/pci/intel/ipu3/Kconfig
+index 65b0c1598fbf1..0951545eab21a 100644
+--- a/drivers/media/pci/intel/ipu3/Kconfig
++++ b/drivers/media/pci/intel/ipu3/Kconfig
+@@ -8,6 +8,7 @@ config VIDEO_IPU3_CIO2
+ 	select VIDEO_V4L2_SUBDEV_API
+ 	select V4L2_FWNODE
+ 	select VIDEOBUF2_DMA_SG
++	select IPU_BRIDGE if CIO2_BRIDGE
+ 
+ 	help
+ 	  This is the Intel IPU3 CIO2 CSI-2 receiver unit, found in Intel
+diff --git a/drivers/media/pci/intel/ipu3/Makefile b/drivers/media/pci/intel/ipu3/Makefile
+index 933777e6ea8ab..429d516452e42 100644
+--- a/drivers/media/pci/intel/ipu3/Makefile
++++ b/drivers/media/pci/intel/ipu3/Makefile
+@@ -2,4 +2,3 @@
+ obj-$(CONFIG_VIDEO_IPU3_CIO2) += ipu3-cio2.o
+ 
+ ipu3-cio2-y += ipu3-cio2-main.o
+-ipu3-cio2-$(CONFIG_CIO2_BRIDGE) += cio2-bridge.o
+diff --git a/drivers/media/pci/intel/ipu3/cio2-bridge.c b/drivers/media/pci/intel/ipu3/cio2-bridge.c
+deleted file mode 100644
+index 3c2accfe54551..0000000000000
+--- a/drivers/media/pci/intel/ipu3/cio2-bridge.c
++++ /dev/null
+@@ -1,494 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0
+-/* Author: Dan Scally <djrscally@gmail.com> */
+-
+-#include <linux/acpi.h>
+-#include <linux/device.h>
+-#include <linux/i2c.h>
+-#include <linux/pci.h>
+-#include <linux/property.h>
+-#include <media/v4l2-fwnode.h>
+-
+-#include "cio2-bridge.h"
+-
+-/*
+- * Extend this array with ACPI Hardware IDs of devices known to be working
+- * plus the number of link-frequencies expected by their drivers, along with
+- * the frequency values in hertz. This is somewhat opportunistic way of adding
+- * support for this for now in the hopes of a better source for the information
+- * (possibly some encoded value in the SSDB buffer that we're unaware of)
+- * becoming apparent in the future.
+- *
+- * Do not add an entry for a sensor that is not actually supported.
+- */
+-static const struct cio2_sensor_config cio2_supported_sensors[] = {
+-	/* Omnivision OV5693 */
+-	CIO2_SENSOR_CONFIG("INT33BE", 1, 419200000),
+-	/* Omnivision OV8865 */
+-	CIO2_SENSOR_CONFIG("INT347A", 1, 360000000),
+-	/* Omnivision OV7251 */
+-	CIO2_SENSOR_CONFIG("INT347E", 1, 319200000),
+-	/* Omnivision OV2680 */
+-	CIO2_SENSOR_CONFIG("OVTI2680", 0),
+-	/* Omnivision ov8856 */
+-	CIO2_SENSOR_CONFIG("OVTI8856", 3, 180000000, 360000000, 720000000),
+-	/* Omnivision ov2740 */
+-	CIO2_SENSOR_CONFIG("INT3474", 1, 360000000),
+-	/* Hynix hi556 */
+-	CIO2_SENSOR_CONFIG("INT3537", 1, 437000000),
+-	/* Omnivision ov13b10 */
+-	CIO2_SENSOR_CONFIG("OVTIDB10", 1, 560000000),
+-};
+-
+-static const struct cio2_property_names prop_names = {
+-	.clock_frequency = "clock-frequency",
+-	.rotation = "rotation",
+-	.orientation = "orientation",
+-	.bus_type = "bus-type",
+-	.data_lanes = "data-lanes",
+-	.remote_endpoint = "remote-endpoint",
+-	.link_frequencies = "link-frequencies",
+-};
+-
+-static const char * const cio2_vcm_types[] = {
+-	"ad5823",
+-	"dw9714",
+-	"ad5816",
+-	"dw9719",
+-	"dw9718",
+-	"dw9806b",
+-	"wv517s",
+-	"lc898122xa",
+-	"lc898212axb",
+-};
+-
+-static int cio2_bridge_read_acpi_buffer(struct acpi_device *adev, char *id,
+-					void *data, u32 size)
+-{
+-	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+-	union acpi_object *obj;
+-	acpi_status status;
+-	int ret = 0;
+-
+-	status = acpi_evaluate_object(adev->handle, id, NULL, &buffer);
+-	if (ACPI_FAILURE(status))
+-		return -ENODEV;
+-
+-	obj = buffer.pointer;
+-	if (!obj) {
+-		dev_err(&adev->dev, "Couldn't locate ACPI buffer\n");
+-		return -ENODEV;
+-	}
+-
+-	if (obj->type != ACPI_TYPE_BUFFER) {
+-		dev_err(&adev->dev, "Not an ACPI buffer\n");
+-		ret = -ENODEV;
+-		goto out_free_buff;
+-	}
+-
+-	if (obj->buffer.length > size) {
+-		dev_err(&adev->dev, "Given buffer is too small\n");
+-		ret = -EINVAL;
+-		goto out_free_buff;
+-	}
+-
+-	memcpy(data, obj->buffer.pointer, obj->buffer.length);
+-
+-out_free_buff:
+-	kfree(buffer.pointer);
+-	return ret;
+-}
+-
+-static u32 cio2_bridge_parse_rotation(struct cio2_sensor *sensor)
+-{
+-	switch (sensor->ssdb.degree) {
+-	case CIO2_SENSOR_ROTATION_NORMAL:
+-		return 0;
+-	case CIO2_SENSOR_ROTATION_INVERTED:
+-		return 180;
+-	default:
+-		dev_warn(&sensor->adev->dev,
+-			 "Unknown rotation %d. Assume 0 degree rotation\n",
+-			 sensor->ssdb.degree);
+-		return 0;
+-	}
+-}
+-
+-static enum v4l2_fwnode_orientation cio2_bridge_parse_orientation(struct cio2_sensor *sensor)
+-{
+-	switch (sensor->pld->panel) {
+-	case ACPI_PLD_PANEL_FRONT:
+-		return V4L2_FWNODE_ORIENTATION_FRONT;
+-	case ACPI_PLD_PANEL_BACK:
+-		return V4L2_FWNODE_ORIENTATION_BACK;
+-	case ACPI_PLD_PANEL_TOP:
+-	case ACPI_PLD_PANEL_LEFT:
+-	case ACPI_PLD_PANEL_RIGHT:
+-	case ACPI_PLD_PANEL_UNKNOWN:
+-		return V4L2_FWNODE_ORIENTATION_EXTERNAL;
+-	default:
+-		dev_warn(&sensor->adev->dev, "Unknown _PLD panel value %d\n",
+-			 sensor->pld->panel);
+-		return V4L2_FWNODE_ORIENTATION_EXTERNAL;
+-	}
+-}
+-
+-static void cio2_bridge_create_fwnode_properties(
+-	struct cio2_sensor *sensor,
+-	struct cio2_bridge *bridge,
+-	const struct cio2_sensor_config *cfg)
+-{
+-	u32 rotation;
+-	enum v4l2_fwnode_orientation orientation;
+-
+-	rotation = cio2_bridge_parse_rotation(sensor);
+-	orientation = cio2_bridge_parse_orientation(sensor);
+-
+-	sensor->prop_names = prop_names;
+-
+-	sensor->local_ref[0] = SOFTWARE_NODE_REFERENCE(&sensor->swnodes[SWNODE_CIO2_ENDPOINT]);
+-	sensor->remote_ref[0] = SOFTWARE_NODE_REFERENCE(&sensor->swnodes[SWNODE_SENSOR_ENDPOINT]);
+-
+-	sensor->dev_properties[0] = PROPERTY_ENTRY_U32(
+-					sensor->prop_names.clock_frequency,
+-					sensor->ssdb.mclkspeed);
+-	sensor->dev_properties[1] = PROPERTY_ENTRY_U32(
+-					sensor->prop_names.rotation,
+-					rotation);
+-	sensor->dev_properties[2] = PROPERTY_ENTRY_U32(
+-					sensor->prop_names.orientation,
+-					orientation);
+-	if (sensor->ssdb.vcmtype) {
+-		sensor->vcm_ref[0] =
+-			SOFTWARE_NODE_REFERENCE(&sensor->swnodes[SWNODE_VCM]);
+-		sensor->dev_properties[3] =
+-			PROPERTY_ENTRY_REF_ARRAY("lens-focus", sensor->vcm_ref);
+-	}
+-
+-	sensor->ep_properties[0] = PROPERTY_ENTRY_U32(
+-					sensor->prop_names.bus_type,
+-					V4L2_FWNODE_BUS_TYPE_CSI2_DPHY);
+-	sensor->ep_properties[1] = PROPERTY_ENTRY_U32_ARRAY_LEN(
+-					sensor->prop_names.data_lanes,
+-					bridge->data_lanes,
+-					sensor->ssdb.lanes);
+-	sensor->ep_properties[2] = PROPERTY_ENTRY_REF_ARRAY(
+-					sensor->prop_names.remote_endpoint,
+-					sensor->local_ref);
+-
+-	if (cfg->nr_link_freqs > 0)
+-		sensor->ep_properties[3] = PROPERTY_ENTRY_U64_ARRAY_LEN(
+-			sensor->prop_names.link_frequencies,
+-			cfg->link_freqs,
+-			cfg->nr_link_freqs);
+-
+-	sensor->cio2_properties[0] = PROPERTY_ENTRY_U32_ARRAY_LEN(
+-					sensor->prop_names.data_lanes,
+-					bridge->data_lanes,
+-					sensor->ssdb.lanes);
+-	sensor->cio2_properties[1] = PROPERTY_ENTRY_REF_ARRAY(
+-					sensor->prop_names.remote_endpoint,
+-					sensor->remote_ref);
+-}
+-
+-static void cio2_bridge_init_swnode_names(struct cio2_sensor *sensor)
+-{
+-	snprintf(sensor->node_names.remote_port,
+-		 sizeof(sensor->node_names.remote_port),
+-		 SWNODE_GRAPH_PORT_NAME_FMT, sensor->ssdb.link);
+-	snprintf(sensor->node_names.port,
+-		 sizeof(sensor->node_names.port),
+-		 SWNODE_GRAPH_PORT_NAME_FMT, 0); /* Always port 0 */
+-	snprintf(sensor->node_names.endpoint,
+-		 sizeof(sensor->node_names.endpoint),
+-		 SWNODE_GRAPH_ENDPOINT_NAME_FMT, 0); /* And endpoint 0 */
+-}
+-
+-static void cio2_bridge_init_swnode_group(struct cio2_sensor *sensor)
+-{
+-	struct software_node *nodes = sensor->swnodes;
+-
+-	sensor->group[SWNODE_SENSOR_HID] = &nodes[SWNODE_SENSOR_HID];
+-	sensor->group[SWNODE_SENSOR_PORT] = &nodes[SWNODE_SENSOR_PORT];
+-	sensor->group[SWNODE_SENSOR_ENDPOINT] = &nodes[SWNODE_SENSOR_ENDPOINT];
+-	sensor->group[SWNODE_CIO2_PORT] = &nodes[SWNODE_CIO2_PORT];
+-	sensor->group[SWNODE_CIO2_ENDPOINT] = &nodes[SWNODE_CIO2_ENDPOINT];
+-	if (sensor->ssdb.vcmtype)
+-		sensor->group[SWNODE_VCM] =  &nodes[SWNODE_VCM];
+-}
+-
+-static void cio2_bridge_create_connection_swnodes(struct cio2_bridge *bridge,
+-						  struct cio2_sensor *sensor)
+-{
+-	struct software_node *nodes = sensor->swnodes;
+-	char vcm_name[ACPI_ID_LEN + 4];
+-
+-	cio2_bridge_init_swnode_names(sensor);
+-
+-	nodes[SWNODE_SENSOR_HID] = NODE_SENSOR(sensor->name,
+-					       sensor->dev_properties);
+-	nodes[SWNODE_SENSOR_PORT] = NODE_PORT(sensor->node_names.port,
+-					      &nodes[SWNODE_SENSOR_HID]);
+-	nodes[SWNODE_SENSOR_ENDPOINT] = NODE_ENDPOINT(
+-						sensor->node_names.endpoint,
+-						&nodes[SWNODE_SENSOR_PORT],
+-						sensor->ep_properties);
+-	nodes[SWNODE_CIO2_PORT] = NODE_PORT(sensor->node_names.remote_port,
+-					    &bridge->cio2_hid_node);
+-	nodes[SWNODE_CIO2_ENDPOINT] = NODE_ENDPOINT(
+-						sensor->node_names.endpoint,
+-						&nodes[SWNODE_CIO2_PORT],
+-						sensor->cio2_properties);
+-	if (sensor->ssdb.vcmtype) {
+-		/* append ssdb.link to distinguish VCM nodes with same HID */
+-		snprintf(vcm_name, sizeof(vcm_name), "%s-%u",
+-			 cio2_vcm_types[sensor->ssdb.vcmtype - 1],
+-			 sensor->ssdb.link);
+-		nodes[SWNODE_VCM] = NODE_VCM(vcm_name);
+-	}
+-
+-	cio2_bridge_init_swnode_group(sensor);
+-}
+-
+-static void cio2_bridge_instantiate_vcm_i2c_client(struct cio2_sensor *sensor)
+-{
+-	struct i2c_board_info board_info = { };
+-	char name[16];
+-
+-	if (!sensor->ssdb.vcmtype)
+-		return;
+-
+-	snprintf(name, sizeof(name), "%s-VCM", acpi_dev_name(sensor->adev));
+-	board_info.dev_name = name;
+-	strscpy(board_info.type, cio2_vcm_types[sensor->ssdb.vcmtype - 1],
+-		ARRAY_SIZE(board_info.type));
+-	board_info.swnode = &sensor->swnodes[SWNODE_VCM];
+-
+-	sensor->vcm_i2c_client =
+-		i2c_acpi_new_device_by_fwnode(acpi_fwnode_handle(sensor->adev),
+-					      1, &board_info);
+-	if (IS_ERR(sensor->vcm_i2c_client)) {
+-		dev_warn(&sensor->adev->dev, "Error instantiation VCM i2c-client: %ld\n",
+-			 PTR_ERR(sensor->vcm_i2c_client));
+-		sensor->vcm_i2c_client = NULL;
+-	}
+-}
+-
+-static void cio2_bridge_unregister_sensors(struct cio2_bridge *bridge)
+-{
+-	struct cio2_sensor *sensor;
+-	unsigned int i;
+-
+-	for (i = 0; i < bridge->n_sensors; i++) {
+-		sensor = &bridge->sensors[i];
+-		software_node_unregister_node_group(sensor->group);
+-		ACPI_FREE(sensor->pld);
+-		acpi_dev_put(sensor->adev);
+-		i2c_unregister_device(sensor->vcm_i2c_client);
+-	}
+-}
+-
+-static int cio2_bridge_connect_sensor(const struct cio2_sensor_config *cfg,
+-				      struct cio2_bridge *bridge,
+-				      struct pci_dev *cio2)
+-{
+-	struct fwnode_handle *fwnode, *primary;
+-	struct cio2_sensor *sensor;
+-	struct acpi_device *adev;
+-	acpi_status status;
+-	int ret;
+-
+-	for_each_acpi_dev_match(adev, cfg->hid, NULL, -1) {
+-		if (!adev->status.enabled)
+-			continue;
+-
+-		if (bridge->n_sensors >= CIO2_NUM_PORTS) {
+-			acpi_dev_put(adev);
+-			dev_err(&cio2->dev, "Exceeded available CIO2 ports\n");
+-			return -EINVAL;
+-		}
+-
+-		sensor = &bridge->sensors[bridge->n_sensors];
+-
+-		ret = cio2_bridge_read_acpi_buffer(adev, "SSDB",
+-						   &sensor->ssdb,
+-						   sizeof(sensor->ssdb));
+-		if (ret)
+-			goto err_put_adev;
+-
+-		snprintf(sensor->name, sizeof(sensor->name), "%s-%u",
+-			 cfg->hid, sensor->ssdb.link);
+-
+-		if (sensor->ssdb.vcmtype > ARRAY_SIZE(cio2_vcm_types)) {
+-			dev_warn(&adev->dev, "Unknown VCM type %d\n",
+-				 sensor->ssdb.vcmtype);
+-			sensor->ssdb.vcmtype = 0;
+-		}
+-
+-		status = acpi_get_physical_device_location(adev->handle, &sensor->pld);
+-		if (ACPI_FAILURE(status)) {
+-			ret = -ENODEV;
+-			goto err_put_adev;
+-		}
+-
+-		if (sensor->ssdb.lanes > CIO2_MAX_LANES) {
+-			dev_err(&adev->dev,
+-				"Number of lanes in SSDB is invalid\n");
+-			ret = -EINVAL;
+-			goto err_free_pld;
+-		}
+-
+-		cio2_bridge_create_fwnode_properties(sensor, bridge, cfg);
+-		cio2_bridge_create_connection_swnodes(bridge, sensor);
+-
+-		ret = software_node_register_node_group(sensor->group);
+-		if (ret)
+-			goto err_free_pld;
+-
+-		fwnode = software_node_fwnode(&sensor->swnodes[
+-						      SWNODE_SENSOR_HID]);
+-		if (!fwnode) {
+-			ret = -ENODEV;
+-			goto err_free_swnodes;
+-		}
+-
+-		sensor->adev = acpi_dev_get(adev);
+-
+-		primary = acpi_fwnode_handle(adev);
+-		primary->secondary = fwnode;
+-
+-		cio2_bridge_instantiate_vcm_i2c_client(sensor);
+-
+-		dev_info(&cio2->dev, "Found supported sensor %s\n",
+-			 acpi_dev_name(adev));
+-
+-		bridge->n_sensors++;
+-	}
+-
+-	return 0;
+-
+-err_free_swnodes:
+-	software_node_unregister_node_group(sensor->group);
+-err_free_pld:
+-	ACPI_FREE(sensor->pld);
+-err_put_adev:
+-	acpi_dev_put(adev);
+-	return ret;
+-}
+-
+-static int cio2_bridge_connect_sensors(struct cio2_bridge *bridge,
+-				       struct pci_dev *cio2)
+-{
+-	unsigned int i;
+-	int ret;
+-
+-	for (i = 0; i < ARRAY_SIZE(cio2_supported_sensors); i++) {
+-		const struct cio2_sensor_config *cfg =
+-			&cio2_supported_sensors[i];
+-
+-		ret = cio2_bridge_connect_sensor(cfg, bridge, cio2);
+-		if (ret)
+-			goto err_unregister_sensors;
+-	}
+-
+-	return 0;
+-
+-err_unregister_sensors:
+-	cio2_bridge_unregister_sensors(bridge);
+-	return ret;
+-}
+-
+-/*
+- * The VCM cannot be probed until the PMIC is completely setup. We cannot rely
+- * on -EPROBE_DEFER for this, since the consumer<->supplier relations between
+- * the VCM and regulators/clks are not described in ACPI, instead they are
+- * passed as board-data to the PMIC drivers. Since -PROBE_DEFER does not work
+- * for the clks/regulators the VCM i2c-clients must not be instantiated until
+- * the PMIC is fully setup.
+- *
+- * The sensor/VCM ACPI device has an ACPI _DEP on the PMIC, check this using the
+- * acpi_dev_ready_for_enumeration() helper, like the i2c-core-acpi code does
+- * for the sensors.
+- */
+-static int cio2_bridge_sensors_are_ready(void)
+-{
+-	struct acpi_device *adev;
+-	bool ready = true;
+-	unsigned int i;
+-
+-	for (i = 0; i < ARRAY_SIZE(cio2_supported_sensors); i++) {
+-		const struct cio2_sensor_config *cfg =
+-			&cio2_supported_sensors[i];
+-
+-		for_each_acpi_dev_match(adev, cfg->hid, NULL, -1) {
+-			if (!adev->status.enabled)
+-				continue;
+-
+-			if (!acpi_dev_ready_for_enumeration(adev))
+-				ready = false;
+-		}
+-	}
+-
+-	return ready;
+-}
+-
+-int cio2_bridge_init(struct pci_dev *cio2)
+-{
+-	struct device *dev = &cio2->dev;
+-	struct fwnode_handle *fwnode;
+-	struct cio2_bridge *bridge;
+-	unsigned int i;
+-	int ret;
+-
+-	if (!cio2_bridge_sensors_are_ready())
+-		return -EPROBE_DEFER;
+-
+-	bridge = kzalloc(sizeof(*bridge), GFP_KERNEL);
+-	if (!bridge)
+-		return -ENOMEM;
+-
+-	strscpy(bridge->cio2_node_name, CIO2_HID,
+-		sizeof(bridge->cio2_node_name));
+-	bridge->cio2_hid_node.name = bridge->cio2_node_name;
+-
+-	ret = software_node_register(&bridge->cio2_hid_node);
+-	if (ret < 0) {
+-		dev_err(dev, "Failed to register the CIO2 HID node\n");
+-		goto err_free_bridge;
+-	}
+-
+-	/*
+-	 * Map the lane arrangement, which is fixed for the IPU3 (meaning we
+-	 * only need one, rather than one per sensor). We include it as a
+-	 * member of the struct cio2_bridge rather than a global variable so
+-	 * that it survives if the module is unloaded along with the rest of
+-	 * the struct.
+-	 */
+-	for (i = 0; i < CIO2_MAX_LANES; i++)
+-		bridge->data_lanes[i] = i + 1;
+-
+-	ret = cio2_bridge_connect_sensors(bridge, cio2);
+-	if (ret || bridge->n_sensors == 0)
+-		goto err_unregister_cio2;
+-
+-	dev_info(dev, "Connected %d cameras\n", bridge->n_sensors);
+-
+-	fwnode = software_node_fwnode(&bridge->cio2_hid_node);
+-	if (!fwnode) {
+-		dev_err(dev, "Error getting fwnode from cio2 software_node\n");
+-		ret = -ENODEV;
+-		goto err_unregister_sensors;
+-	}
+-
+-	set_secondary_fwnode(dev, fwnode);
+-
+-	return 0;
+-
+-err_unregister_sensors:
+-	cio2_bridge_unregister_sensors(bridge);
+-err_unregister_cio2:
+-	software_node_unregister(&bridge->cio2_hid_node);
+-err_free_bridge:
+-	kfree(bridge);
+-
+-	return ret;
+-}
+diff --git a/drivers/media/pci/intel/ipu3/cio2-bridge.h b/drivers/media/pci/intel/ipu3/cio2-bridge.h
+deleted file mode 100644
+index b76ed8a641e20..0000000000000
+--- a/drivers/media/pci/intel/ipu3/cio2-bridge.h
++++ /dev/null
+@@ -1,146 +0,0 @@
+-/* SPDX-License-Identifier: GPL-2.0 */
+-/* Author: Dan Scally <djrscally@gmail.com> */
+-#ifndef __CIO2_BRIDGE_H
+-#define __CIO2_BRIDGE_H
+-
+-#include <linux/property.h>
+-#include <linux/types.h>
+-
+-#include "ipu3-cio2.h"
+-
+-struct i2c_client;
+-
+-#define CIO2_HID				"INT343E"
+-#define CIO2_MAX_LANES				4
+-#define MAX_NUM_LINK_FREQS			3
+-
+-/* Values are educated guesses as we don't have a spec */
+-#define CIO2_SENSOR_ROTATION_NORMAL		0
+-#define CIO2_SENSOR_ROTATION_INVERTED		1
+-
+-#define CIO2_SENSOR_CONFIG(_HID, _NR, ...)	\
+-	(const struct cio2_sensor_config) {	\
+-		.hid = _HID,			\
+-		.nr_link_freqs = _NR,		\
+-		.link_freqs = { __VA_ARGS__ }	\
+-	}
+-
+-#define NODE_SENSOR(_HID, _PROPS)		\
+-	(const struct software_node) {		\
+-		.name = _HID,			\
+-		.properties = _PROPS,		\
+-	}
+-
+-#define NODE_PORT(_PORT, _SENSOR_NODE)		\
+-	(const struct software_node) {		\
+-		.name = _PORT,			\
+-		.parent = _SENSOR_NODE,		\
+-	}
+-
+-#define NODE_ENDPOINT(_EP, _PORT, _PROPS)	\
+-	(const struct software_node) {		\
+-		.name = _EP,			\
+-		.parent = _PORT,		\
+-		.properties = _PROPS,		\
+-	}
+-
+-#define NODE_VCM(_TYPE)				\
+-	(const struct software_node) {		\
+-		.name = _TYPE,			\
+-	}
+-
+-enum cio2_sensor_swnodes {
+-	SWNODE_SENSOR_HID,
+-	SWNODE_SENSOR_PORT,
+-	SWNODE_SENSOR_ENDPOINT,
+-	SWNODE_CIO2_PORT,
+-	SWNODE_CIO2_ENDPOINT,
+-	/* Must be last because it is optional / maybe empty */
+-	SWNODE_VCM,
+-	SWNODE_COUNT
+-};
+-
+-/* Data representation as it is in ACPI SSDB buffer */
+-struct cio2_sensor_ssdb {
+-	u8 version;
+-	u8 sku;
+-	u8 guid_csi2[16];
+-	u8 devfunction;
+-	u8 bus;
+-	u32 dphylinkenfuses;
+-	u32 clockdiv;
+-	u8 link;
+-	u8 lanes;
+-	u32 csiparams[10];
+-	u32 maxlanespeed;
+-	u8 sensorcalibfileidx;
+-	u8 sensorcalibfileidxInMBZ[3];
+-	u8 romtype;
+-	u8 vcmtype;
+-	u8 platforminfo;
+-	u8 platformsubinfo;
+-	u8 flash;
+-	u8 privacyled;
+-	u8 degree;
+-	u8 mipilinkdefined;
+-	u32 mclkspeed;
+-	u8 controllogicid;
+-	u8 reserved1[3];
+-	u8 mclkport;
+-	u8 reserved2[13];
+-} __packed;
+-
+-struct cio2_property_names {
+-	char clock_frequency[16];
+-	char rotation[9];
+-	char orientation[12];
+-	char bus_type[9];
+-	char data_lanes[11];
+-	char remote_endpoint[16];
+-	char link_frequencies[17];
+-};
+-
+-struct cio2_node_names {
+-	char port[7];
+-	char endpoint[11];
+-	char remote_port[7];
+-};
+-
+-struct cio2_sensor_config {
+-	const char *hid;
+-	const u8 nr_link_freqs;
+-	const u64 link_freqs[MAX_NUM_LINK_FREQS];
+-};
+-
+-struct cio2_sensor {
+-	/* append ssdb.link(u8) in "-%u" format as suffix of HID */
+-	char name[ACPI_ID_LEN + 4];
+-	struct acpi_device *adev;
+-	struct i2c_client *vcm_i2c_client;
+-
+-	/* SWNODE_COUNT + 1 for terminating NULL */
+-	const struct software_node *group[SWNODE_COUNT + 1];
+-	struct software_node swnodes[SWNODE_COUNT];
+-	struct cio2_node_names node_names;
+-
+-	struct cio2_sensor_ssdb ssdb;
+-	struct acpi_pld_info *pld;
+-
+-	struct cio2_property_names prop_names;
+-	struct property_entry ep_properties[5];
+-	struct property_entry dev_properties[5];
+-	struct property_entry cio2_properties[3];
+-	struct software_node_ref_args local_ref[1];
+-	struct software_node_ref_args remote_ref[1];
+-	struct software_node_ref_args vcm_ref[1];
+-};
+-
+-struct cio2_bridge {
+-	char cio2_node_name[ACPI_ID_LEN];
+-	struct software_node cio2_hid_node;
+-	u32 data_lanes[4];
+-	unsigned int n_sensors;
+-	struct cio2_sensor sensors[CIO2_NUM_PORTS];
+-};
+-
+-#endif
+diff --git a/drivers/media/pci/intel/ipu3/ipu3-cio2-main.c b/drivers/media/pci/intel/ipu3/ipu3-cio2-main.c
+index 34984a7474ed8..dc09fbdb062b0 100644
+--- a/drivers/media/pci/intel/ipu3/ipu3-cio2-main.c
++++ b/drivers/media/pci/intel/ipu3/ipu3-cio2-main.c
+@@ -29,6 +29,7 @@
+ #include <media/v4l2-ioctl.h>
+ #include <media/videobuf2-dma-sg.h>
+ 
++#include "../ipu-bridge.h"
+ #include "ipu3-cio2.h"
+ 
+ struct ipu3_cio2_fmt {
+@@ -1724,7 +1725,7 @@ static int cio2_pci_probe(struct pci_dev *pci_dev,
+ 			return -EINVAL;
+ 		}
+ 
+-		r = cio2_bridge_init(pci_dev);
++		r = ipu_bridge_init(pci_dev);
+ 		if (r)
+ 			return r;
+ 	}
+@@ -2057,3 +2058,4 @@ MODULE_AUTHOR("Yuning Pu <yuning.pu@intel.com>");
+ MODULE_AUTHOR("Yong Zhi <yong.zhi@intel.com>");
+ MODULE_LICENSE("GPL v2");
+ MODULE_DESCRIPTION("IPU3 CIO2 driver");
++MODULE_IMPORT_NS(INTEL_IPU_BRIDGE);
+diff --git a/drivers/media/pci/intel/ipu3/ipu3-cio2.h b/drivers/media/pci/intel/ipu3/ipu3-cio2.h
+index 3a1f394e05aa7..d731ce8adbe31 100644
+--- a/drivers/media/pci/intel/ipu3/ipu3-cio2.h
++++ b/drivers/media/pci/intel/ipu3/ipu3-cio2.h
+@@ -459,10 +459,4 @@ static inline struct cio2_queue *vb2q_to_cio2_queue(struct vb2_queue *vq)
+ 	return container_of(vq, struct cio2_queue, vbq);
+ }
+ 
+-#if IS_ENABLED(CONFIG_CIO2_BRIDGE)
+-int cio2_bridge_init(struct pci_dev *cio2);
+-#else
+-static inline int cio2_bridge_init(struct pci_dev *cio2) { return 0; }
+-#endif
+-
+ #endif
+diff --git a/drivers/media/platform/amphion/vdec.c b/drivers/media/platform/amphion/vdec.c
+index 6515f3cdb7a74..133d77d1ea0c3 100644
+--- a/drivers/media/platform/amphion/vdec.c
++++ b/drivers/media/platform/amphion/vdec.c
+@@ -299,7 +299,8 @@ static int vdec_update_state(struct vpu_inst *inst, enum vpu_codec_state state,
+ 		vdec->state = VPU_CODEC_STATE_DYAMIC_RESOLUTION_CHANGE;
+ 
+ 	if (inst->state != pre_state)
+-		vpu_trace(inst->dev, "[%d] %d -> %d\n", inst->id, pre_state, inst->state);
++		vpu_trace(inst->dev, "[%d] %s -> %s\n", inst->id,
++			  vpu_codec_state_name(pre_state), vpu_codec_state_name(inst->state));
+ 
+ 	if (inst->state == VPU_CODEC_STATE_DYAMIC_RESOLUTION_CHANGE)
+ 		vdec_handle_resolution_change(inst);
+@@ -741,6 +742,21 @@ static int vdec_frame_decoded(struct vpu_inst *inst, void *arg)
+ 		dev_info(inst->dev, "[%d] buf[%d] has been decoded\n", inst->id, info->id);
+ 	vpu_set_buffer_state(vbuf, VPU_BUF_STATE_DECODED);
+ 	vdec->decoded_frame_count++;
++	if (vdec->params.display_delay_enable) {
++		struct vpu_format *cur_fmt;
++
++		cur_fmt = vpu_get_format(inst, inst->cap_format.type);
++		vpu_set_buffer_state(vbuf, VPU_BUF_STATE_READY);
++		for (int i = 0; i < vbuf->vb2_buf.num_planes; i++)
++			vb2_set_plane_payload(&vbuf->vb2_buf,
++					      i, vpu_get_fmt_plane_size(cur_fmt, i));
++		vbuf->field = cur_fmt->field;
++		vbuf->sequence = vdec->sequence++;
++		dev_dbg(inst->dev, "[%d][OUTPUT TS]%32lld\n", inst->id, vbuf->vb2_buf.timestamp);
++
++		v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_DONE);
++		vdec->display_frame_count++;
++	}
+ exit:
+ 	vpu_inst_unlock(inst);
+ 
+@@ -768,14 +784,14 @@ static void vdec_buf_done(struct vpu_inst *inst, struct vpu_frame_info *frame)
+ 	struct vpu_format *cur_fmt;
+ 	struct vpu_vb2_buffer *vpu_buf;
+ 	struct vb2_v4l2_buffer *vbuf;
+-	u32 sequence;
+ 	int i;
+ 
+ 	if (!frame)
+ 		return;
+ 
+ 	vpu_inst_lock(inst);
+-	sequence = vdec->sequence++;
++	if (!vdec->params.display_delay_enable)
++		vdec->sequence++;
+ 	vpu_buf = vdec_find_buffer(inst, frame->luma);
+ 	vpu_inst_unlock(inst);
+ 	if (!vpu_buf) {
+@@ -794,13 +810,17 @@ static void vdec_buf_done(struct vpu_inst *inst, struct vpu_frame_info *frame)
+ 		dev_err(inst->dev, "[%d] buffer id(%d, %d) dismatch\n",
+ 			inst->id, vbuf->vb2_buf.index, frame->id);
+ 
++	if (vpu_get_buffer_state(vbuf) == VPU_BUF_STATE_READY && vdec->params.display_delay_enable)
++		return;
++
+ 	if (vpu_get_buffer_state(vbuf) != VPU_BUF_STATE_DECODED)
+ 		dev_err(inst->dev, "[%d] buffer(%d) ready without decoded\n", inst->id, frame->id);
++
+ 	vpu_set_buffer_state(vbuf, VPU_BUF_STATE_READY);
+ 	for (i = 0; i < vbuf->vb2_buf.num_planes; i++)
+ 		vb2_set_plane_payload(&vbuf->vb2_buf, i, vpu_get_fmt_plane_size(cur_fmt, i));
+ 	vbuf->field = cur_fmt->field;
+-	vbuf->sequence = sequence;
++	vbuf->sequence = vdec->sequence;
+ 	dev_dbg(inst->dev, "[%d][OUTPUT TS]%32lld\n", inst->id, vbuf->vb2_buf.timestamp);
+ 
+ 	v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_DONE);
+@@ -999,6 +1019,7 @@ static int vdec_response_frame_abnormal(struct vpu_inst *inst)
+ {
+ 	struct vdec_t *vdec = inst->priv;
+ 	struct vpu_fs_info info;
++	int ret;
+ 
+ 	if (!vdec->req_frame_count)
+ 		return 0;
+@@ -1006,7 +1027,9 @@ static int vdec_response_frame_abnormal(struct vpu_inst *inst)
+ 	memset(&info, 0, sizeof(info));
+ 	info.type = MEM_RES_FRAME;
+ 	info.tag = vdec->seq_tag + 0xf0;
+-	vpu_session_alloc_fs(inst, &info);
++	ret = vpu_session_alloc_fs(inst, &info);
++	if (ret)
++		return ret;
+ 	vdec->req_frame_count--;
+ 
+ 	return 0;
+@@ -1037,8 +1060,8 @@ static int vdec_response_frame(struct vpu_inst *inst, struct vb2_v4l2_buffer *vb
+ 		return -EINVAL;
+ 	}
+ 
+-	dev_dbg(inst->dev, "[%d] state = %d, alloc fs %d, tag = 0x%x\n",
+-		inst->id, inst->state, vbuf->vb2_buf.index, vdec->seq_tag);
++	dev_dbg(inst->dev, "[%d] state = %s, alloc fs %d, tag = 0x%x\n",
++		inst->id, vpu_codec_state_name(inst->state), vbuf->vb2_buf.index, vdec->seq_tag);
+ 	vpu_buf = to_vpu_vb2_buffer(vbuf);
+ 
+ 	memset(&info, 0, sizeof(info));
+@@ -1400,7 +1423,7 @@ static void vdec_abort(struct vpu_inst *inst)
+ 	struct vpu_rpc_buffer_desc desc;
+ 	int ret;
+ 
+-	vpu_trace(inst->dev, "[%d] state = %d\n", inst->id, inst->state);
++	vpu_trace(inst->dev, "[%d] state = %s\n", inst->id, vpu_codec_state_name(inst->state));
+ 
+ 	vdec->aborting = true;
+ 	vpu_iface_add_scode(inst, SCODE_PADDING_ABORT);
+@@ -1453,9 +1476,7 @@ static void vdec_release(struct vpu_inst *inst)
+ {
+ 	if (inst->id != VPU_INST_NULL_ID)
+ 		vpu_trace(inst->dev, "[%d]\n", inst->id);
+-	vpu_inst_lock(inst);
+ 	vdec_stop(inst, true);
+-	vpu_inst_unlock(inst);
+ }
+ 
+ static void vdec_cleanup(struct vpu_inst *inst)
+diff --git a/drivers/media/platform/amphion/venc.c b/drivers/media/platform/amphion/venc.c
+index 58480e2755ec4..4eb57d793a9c0 100644
+--- a/drivers/media/platform/amphion/venc.c
++++ b/drivers/media/platform/amphion/venc.c
+@@ -268,7 +268,7 @@ static int venc_g_parm(struct file *file, void *fh, struct v4l2_streamparm *parm
+ {
+ 	struct vpu_inst *inst = to_inst(file);
+ 	struct venc_t *venc = inst->priv;
+-	struct v4l2_fract *timeperframe = &parm->parm.capture.timeperframe;
++	struct v4l2_fract *timeperframe;
+ 
+ 	if (!parm)
+ 		return -EINVAL;
+@@ -279,6 +279,7 @@ static int venc_g_parm(struct file *file, void *fh, struct v4l2_streamparm *parm
+ 	if (!vpu_helper_check_type(inst, parm->type))
+ 		return -EINVAL;
+ 
++	timeperframe = &parm->parm.capture.timeperframe;
+ 	parm->parm.capture.capability = V4L2_CAP_TIMEPERFRAME;
+ 	parm->parm.capture.readbuffers = 0;
+ 	timeperframe->numerator = venc->params.frame_rate.numerator;
+@@ -291,7 +292,7 @@ static int venc_s_parm(struct file *file, void *fh, struct v4l2_streamparm *parm
+ {
+ 	struct vpu_inst *inst = to_inst(file);
+ 	struct venc_t *venc = inst->priv;
+-	struct v4l2_fract *timeperframe = &parm->parm.capture.timeperframe;
++	struct v4l2_fract *timeperframe;
+ 	unsigned long n, d;
+ 
+ 	if (!parm)
+@@ -303,6 +304,7 @@ static int venc_s_parm(struct file *file, void *fh, struct v4l2_streamparm *parm
+ 	if (!vpu_helper_check_type(inst, parm->type))
+ 		return -EINVAL;
+ 
++	timeperframe = &parm->parm.capture.timeperframe;
+ 	if (!timeperframe->numerator)
+ 		timeperframe->numerator = venc->params.frame_rate.numerator;
+ 	if (!timeperframe->denominator)
+diff --git a/drivers/media/platform/amphion/vpu.h b/drivers/media/platform/amphion/vpu.h
+index 3bfe193722af4..5a701f64289ef 100644
+--- a/drivers/media/platform/amphion/vpu.h
++++ b/drivers/media/platform/amphion/vpu.h
+@@ -355,6 +355,9 @@ void vpu_inst_record_flow(struct vpu_inst *inst, u32 flow);
+ int vpu_core_driver_init(void);
+ void vpu_core_driver_exit(void);
+ 
++const char *vpu_id_name(u32 id);
++const char *vpu_codec_state_name(enum vpu_codec_state state);
++
+ extern bool debug;
+ #define vpu_trace(dev, fmt, arg...)					\
+ 	do {								\
+diff --git a/drivers/media/platform/amphion/vpu_cmds.c b/drivers/media/platform/amphion/vpu_cmds.c
+index fa581ba6bab2d..235b71398d403 100644
+--- a/drivers/media/platform/amphion/vpu_cmds.c
++++ b/drivers/media/platform/amphion/vpu_cmds.c
+@@ -98,7 +98,7 @@ static struct vpu_cmd_t *vpu_alloc_cmd(struct vpu_inst *inst, u32 id, void *data
+ 	cmd->id = id;
+ 	ret = vpu_iface_pack_cmd(inst->core, cmd->pkt, inst->id, id, data);
+ 	if (ret) {
+-		dev_err(inst->dev, "iface pack cmd(%d) fail\n", id);
++		dev_err(inst->dev, "iface pack cmd %s fail\n", vpu_id_name(id));
+ 		vfree(cmd->pkt);
+ 		vfree(cmd);
+ 		return NULL;
+@@ -125,14 +125,14 @@ static int vpu_session_process_cmd(struct vpu_inst *inst, struct vpu_cmd_t *cmd)
+ {
+ 	int ret;
+ 
+-	dev_dbg(inst->dev, "[%d]send cmd(0x%x)\n", inst->id, cmd->id);
++	dev_dbg(inst->dev, "[%d]send cmd %s\n", inst->id, vpu_id_name(cmd->id));
+ 	vpu_iface_pre_send_cmd(inst);
+ 	ret = vpu_cmd_send(inst->core, cmd->pkt);
+ 	if (!ret) {
+ 		vpu_iface_post_send_cmd(inst);
+ 		vpu_inst_record_flow(inst, cmd->id);
+ 	} else {
+-		dev_err(inst->dev, "[%d] iface send cmd(0x%x) fail\n", inst->id, cmd->id);
++		dev_err(inst->dev, "[%d] iface send cmd %s fail\n", inst->id, vpu_id_name(cmd->id));
+ 	}
+ 
+ 	return ret;
+@@ -149,7 +149,8 @@ static void vpu_process_cmd_request(struct vpu_inst *inst)
+ 	list_for_each_entry_safe(cmd, tmp, &inst->cmd_q, list) {
+ 		list_del_init(&cmd->list);
+ 		if (vpu_session_process_cmd(inst, cmd))
+-			dev_err(inst->dev, "[%d] process cmd(%d) fail\n", inst->id, cmd->id);
++			dev_err(inst->dev, "[%d] process cmd %s fail\n",
++				inst->id, vpu_id_name(cmd->id));
+ 		if (cmd->request) {
+ 			inst->pending = (void *)cmd;
+ 			break;
+@@ -305,7 +306,8 @@ static void vpu_core_keep_active(struct vpu_core *core)
+ 
+ 	dev_dbg(core->dev, "try to wake up\n");
+ 	mutex_lock(&core->cmd_lock);
+-	vpu_cmd_send(core, &pkt);
++	if (vpu_cmd_send(core, &pkt))
++		dev_err(core->dev, "fail to keep active\n");
+ 	mutex_unlock(&core->cmd_lock);
+ }
+ 
+@@ -313,7 +315,7 @@ static int vpu_session_send_cmd(struct vpu_inst *inst, u32 id, void *data)
+ {
+ 	unsigned long key;
+ 	int sync = false;
+-	int ret = -EINVAL;
++	int ret;
+ 
+ 	if (inst->id < 0)
+ 		return -EINVAL;
+@@ -339,7 +341,7 @@ static int vpu_session_send_cmd(struct vpu_inst *inst, u32 id, void *data)
+ 
+ exit:
+ 	if (ret)
+-		dev_err(inst->dev, "[%d] send cmd(0x%x) fail\n", inst->id, id);
++		dev_err(inst->dev, "[%d] send cmd %s fail\n", inst->id, vpu_id_name(id));
+ 
+ 	return ret;
+ }
+diff --git a/drivers/media/platform/amphion/vpu_core.c b/drivers/media/platform/amphion/vpu_core.c
+index 7863b7b53494c..2bb9f187e163c 100644
+--- a/drivers/media/platform/amphion/vpu_core.c
++++ b/drivers/media/platform/amphion/vpu_core.c
+@@ -88,6 +88,8 @@ static int vpu_core_boot_done(struct vpu_core *core)
+ 
+ 		core->supported_instance_count = min(core->supported_instance_count, count);
+ 	}
++	if (core->supported_instance_count >= BITS_PER_TYPE(core->instance_mask))
++		core->supported_instance_count = BITS_PER_TYPE(core->instance_mask);
+ 	core->fw_version = fw_version;
+ 	vpu_core_set_state(core, VPU_CORE_ACTIVE);
+ 
+diff --git a/drivers/media/platform/amphion/vpu_dbg.c b/drivers/media/platform/amphion/vpu_dbg.c
+index 44b830ae01d8c..982c2c777484c 100644
+--- a/drivers/media/platform/amphion/vpu_dbg.c
++++ b/drivers/media/platform/amphion/vpu_dbg.c
+@@ -50,6 +50,13 @@ static char *vpu_stat_name[] = {
+ 	[VPU_BUF_STATE_ERROR] = "error",
+ };
+ 
++static inline const char *to_vpu_stat_name(int state)
++{
++	if (state <= VPU_BUF_STATE_ERROR)
++		return vpu_stat_name[state];
++	return "unknown";
++}
++
+ static int vpu_dbg_instance(struct seq_file *s, void *data)
+ {
+ 	struct vpu_inst *inst = s->private;
+@@ -67,7 +74,7 @@ static int vpu_dbg_instance(struct seq_file *s, void *data)
+ 	num = scnprintf(str, sizeof(str), "tgig = %d,pid = %d\n", inst->tgid, inst->pid);
+ 	if (seq_write(s, str, num))
+ 		return 0;
+-	num = scnprintf(str, sizeof(str), "state = %d\n", inst->state);
++	num = scnprintf(str, sizeof(str), "state = %s\n", vpu_codec_state_name(inst->state));
+ 	if (seq_write(s, str, num))
+ 		return 0;
+ 	num = scnprintf(str, sizeof(str),
+@@ -141,7 +148,7 @@ static int vpu_dbg_instance(struct seq_file *s, void *data)
+ 		num = scnprintf(str, sizeof(str),
+ 				"output [%2d] state = %10s, %8s\n",
+ 				i, vb2_stat_name[vb->state],
+-				vpu_stat_name[vpu_get_buffer_state(vbuf)]);
++				to_vpu_stat_name(vpu_get_buffer_state(vbuf)));
+ 		if (seq_write(s, str, num))
+ 			return 0;
+ 	}
+@@ -156,7 +163,7 @@ static int vpu_dbg_instance(struct seq_file *s, void *data)
+ 		num = scnprintf(str, sizeof(str),
+ 				"capture[%2d] state = %10s, %8s\n",
+ 				i, vb2_stat_name[vb->state],
+-				vpu_stat_name[vpu_get_buffer_state(vbuf)]);
++				to_vpu_stat_name(vpu_get_buffer_state(vbuf)));
+ 		if (seq_write(s, str, num))
+ 			return 0;
+ 	}
+@@ -188,9 +195,9 @@ static int vpu_dbg_instance(struct seq_file *s, void *data)
+ 
+ 		if (!inst->flows[idx])
+ 			continue;
+-		num = scnprintf(str, sizeof(str), "\t[%s]0x%x\n",
++		num = scnprintf(str, sizeof(str), "\t[%s] %s\n",
+ 				inst->flows[idx] >= VPU_MSG_ID_NOOP ? "M" : "C",
+-				inst->flows[idx]);
++				vpu_id_name(inst->flows[idx]));
+ 		if (seq_write(s, str, num)) {
+ 			mutex_unlock(&inst->core->cmd_lock);
+ 			return 0;
+diff --git a/drivers/media/platform/amphion/vpu_helpers.c b/drivers/media/platform/amphion/vpu_helpers.c
+index 019c77e84514c..af3b336e5dc32 100644
+--- a/drivers/media/platform/amphion/vpu_helpers.c
++++ b/drivers/media/platform/amphion/vpu_helpers.c
+@@ -11,6 +11,7 @@
+ #include <linux/module.h>
+ #include <linux/platform_device.h>
+ #include "vpu.h"
++#include "vpu_defs.h"
+ #include "vpu_core.h"
+ #include "vpu_rpc.h"
+ #include "vpu_helpers.h"
+@@ -447,3 +448,63 @@ int vpu_find_src_by_dst(struct vpu_pair *pairs, u32 cnt, u32 dst)
+ 
+ 	return -EINVAL;
+ }
++
++const char *vpu_id_name(u32 id)
++{
++	switch (id) {
++	case VPU_CMD_ID_NOOP: return "noop";
++	case VPU_CMD_ID_CONFIGURE_CODEC: return "configure codec";
++	case VPU_CMD_ID_START: return "start";
++	case VPU_CMD_ID_STOP: return "stop";
++	case VPU_CMD_ID_ABORT: return "abort";
++	case VPU_CMD_ID_RST_BUF: return "reset buf";
++	case VPU_CMD_ID_SNAPSHOT: return "snapshot";
++	case VPU_CMD_ID_FIRM_RESET: return "reset firmware";
++	case VPU_CMD_ID_UPDATE_PARAMETER: return "update parameter";
++	case VPU_CMD_ID_FRAME_ENCODE: return "encode frame";
++	case VPU_CMD_ID_SKIP: return "skip";
++	case VPU_CMD_ID_FS_ALLOC: return "alloc fb";
++	case VPU_CMD_ID_FS_RELEASE: return "release fb";
++	case VPU_CMD_ID_TIMESTAMP: return "timestamp";
++	case VPU_CMD_ID_DEBUG: return "debug";
++	case VPU_MSG_ID_RESET_DONE: return "reset done";
++	case VPU_MSG_ID_START_DONE: return "start done";
++	case VPU_MSG_ID_STOP_DONE: return "stop done";
++	case VPU_MSG_ID_ABORT_DONE: return "abort done";
++	case VPU_MSG_ID_BUF_RST: return "buf reset done";
++	case VPU_MSG_ID_MEM_REQUEST: return "mem request";
++	case VPU_MSG_ID_PARAM_UPD_DONE: return "param upd done";
++	case VPU_MSG_ID_FRAME_INPUT_DONE: return "frame input done";
++	case VPU_MSG_ID_ENC_DONE: return "encode done";
++	case VPU_MSG_ID_DEC_DONE: return "frame display";
++	case VPU_MSG_ID_FRAME_REQ: return "fb request";
++	case VPU_MSG_ID_FRAME_RELEASE: return "fb release";
++	case VPU_MSG_ID_SEQ_HDR_FOUND: return "seq hdr found";
++	case VPU_MSG_ID_RES_CHANGE: return "resolution change";
++	case VPU_MSG_ID_PIC_HDR_FOUND: return "pic hdr found";
++	case VPU_MSG_ID_PIC_DECODED: return "picture decoded";
++	case VPU_MSG_ID_PIC_EOS: return "eos";
++	case VPU_MSG_ID_FIFO_LOW: return "fifo low";
++	case VPU_MSG_ID_BS_ERROR: return "bs error";
++	case VPU_MSG_ID_UNSUPPORTED: return "unsupported";
++	case VPU_MSG_ID_FIRMWARE_XCPT: return "exception";
++	case VPU_MSG_ID_PIC_SKIPPED: return "skipped";
++	}
++	return "<unknown>";
++}
++
++const char *vpu_codec_state_name(enum vpu_codec_state state)
++{
++	switch (state) {
++	case VPU_CODEC_STATE_DEINIT: return "initialization";
++	case VPU_CODEC_STATE_CONFIGURED: return "configured";
++	case VPU_CODEC_STATE_START: return "start";
++	case VPU_CODEC_STATE_STARTED: return "started";
++	case VPU_CODEC_STATE_ACTIVE: return "active";
++	case VPU_CODEC_STATE_SEEK: return "seek";
++	case VPU_CODEC_STATE_STOP: return "stop";
++	case VPU_CODEC_STATE_DRAIN: return "drain";
++	case VPU_CODEC_STATE_DYAMIC_RESOLUTION_CHANGE: return "resolution change";
++	}
++	return "<unknown>";
++}
+diff --git a/drivers/media/platform/amphion/vpu_msgs.c b/drivers/media/platform/amphion/vpu_msgs.c
+index 92672a802b492..d0ead051f7d18 100644
+--- a/drivers/media/platform/amphion/vpu_msgs.c
++++ b/drivers/media/platform/amphion/vpu_msgs.c
+@@ -32,7 +32,7 @@ static void vpu_session_handle_start_done(struct vpu_inst *inst, struct vpu_rpc_
+ 
+ static void vpu_session_handle_mem_request(struct vpu_inst *inst, struct vpu_rpc_event *pkt)
+ {
+-	struct vpu_pkt_mem_req_data req_data;
++	struct vpu_pkt_mem_req_data req_data = { 0 };
+ 
+ 	vpu_iface_unpack_msg_data(inst->core, pkt, (void *)&req_data);
+ 	vpu_trace(inst->dev, "[%d] %d:%d %d:%d %d:%d\n",
+@@ -80,7 +80,7 @@ static void vpu_session_handle_resolution_change(struct vpu_inst *inst, struct v
+ 
+ static void vpu_session_handle_enc_frame_done(struct vpu_inst *inst, struct vpu_rpc_event *pkt)
+ {
+-	struct vpu_enc_pic_info info;
++	struct vpu_enc_pic_info info = { 0 };
+ 
+ 	vpu_iface_unpack_msg_data(inst->core, pkt, (void *)&info);
+ 	dev_dbg(inst->dev, "[%d] frame id = %d, wptr = 0x%x, size = %d\n",
+@@ -90,7 +90,7 @@ static void vpu_session_handle_enc_frame_done(struct vpu_inst *inst, struct vpu_
+ 
+ static void vpu_session_handle_frame_request(struct vpu_inst *inst, struct vpu_rpc_event *pkt)
+ {
+-	struct vpu_fs_info fs;
++	struct vpu_fs_info fs = { 0 };
+ 
+ 	vpu_iface_unpack_msg_data(inst->core, pkt, &fs);
+ 	call_void_vop(inst, event_notify, VPU_MSG_ID_FRAME_REQ, &fs);
+@@ -107,7 +107,7 @@ static void vpu_session_handle_frame_release(struct vpu_inst *inst, struct vpu_r
+ 		info.type = inst->out_format.type;
+ 		call_void_vop(inst, buf_done, &info);
+ 	} else if (inst->core->type == VPU_CORE_TYPE_DEC) {
+-		struct vpu_fs_info fs;
++		struct vpu_fs_info fs = { 0 };
+ 
+ 		vpu_iface_unpack_msg_data(inst->core, pkt, &fs);
+ 		call_void_vop(inst, event_notify, VPU_MSG_ID_FRAME_RELEASE, &fs);
+@@ -122,7 +122,7 @@ static void vpu_session_handle_input_done(struct vpu_inst *inst, struct vpu_rpc_
+ 
+ static void vpu_session_handle_pic_decoded(struct vpu_inst *inst, struct vpu_rpc_event *pkt)
+ {
+-	struct vpu_dec_pic_info info;
++	struct vpu_dec_pic_info info = { 0 };
+ 
+ 	vpu_iface_unpack_msg_data(inst->core, pkt, (void *)&info);
+ 	call_void_vop(inst, get_one_frame, &info);
+@@ -130,7 +130,7 @@ static void vpu_session_handle_pic_decoded(struct vpu_inst *inst, struct vpu_rpc
+ 
+ static void vpu_session_handle_pic_done(struct vpu_inst *inst, struct vpu_rpc_event *pkt)
+ {
+-	struct vpu_dec_pic_info info;
++	struct vpu_dec_pic_info info = { 0 };
+ 	struct vpu_frame_info frame;
+ 
+ 	memset(&frame, 0, sizeof(frame));
+@@ -210,7 +210,7 @@ static int vpu_session_handle_msg(struct vpu_inst *inst, struct vpu_rpc_event *m
+ 		return -EINVAL;
+ 
+ 	msg_id = ret;
+-	dev_dbg(inst->dev, "[%d] receive event(0x%x)\n", inst->id, msg_id);
++	dev_dbg(inst->dev, "[%d] receive event(%s)\n", inst->id, vpu_id_name(msg_id));
+ 
+ 	for (i = 0; i < ARRAY_SIZE(handlers); i++) {
+ 		if (handlers[i].id == msg_id) {
+diff --git a/drivers/media/platform/amphion/vpu_v4l2.c b/drivers/media/platform/amphion/vpu_v4l2.c
+index 021235e1c1446..0f6e4c666440e 100644
+--- a/drivers/media/platform/amphion/vpu_v4l2.c
++++ b/drivers/media/platform/amphion/vpu_v4l2.c
+@@ -489,6 +489,11 @@ static int vpu_vb2_queue_setup(struct vb2_queue *vq,
+ 	for (i = 0; i < cur_fmt->mem_planes; i++)
+ 		psize[i] = vpu_get_fmt_plane_size(cur_fmt, i);
+ 
++	if (V4L2_TYPE_IS_OUTPUT(vq->type) && inst->state == VPU_CODEC_STATE_SEEK) {
++		vpu_trace(inst->dev, "reinit when VIDIOC_REQBUFS(OUTPUT, 0)\n");
++		call_void_vop(inst, release);
++	}
++
+ 	return 0;
+ }
+ 
+@@ -773,9 +778,9 @@ int vpu_v4l2_close(struct file *file)
+ 		v4l2_m2m_ctx_release(inst->fh.m2m_ctx);
+ 		inst->fh.m2m_ctx = NULL;
+ 	}
++	call_void_vop(inst, release);
+ 	vpu_inst_unlock(inst);
+ 
+-	call_void_vop(inst, release);
+ 	vpu_inst_unregister(inst);
+ 	vpu_inst_put(inst);
+ 
+diff --git a/drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c b/drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c
+index 60425c99a2b8b..7194f88edc0fb 100644
+--- a/drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c
++++ b/drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c
+@@ -1403,6 +1403,7 @@ static void mtk_jpeg_remove(struct platform_device *pdev)
+ {
+ 	struct mtk_jpeg_dev *jpeg = platform_get_drvdata(pdev);
+ 
++	cancel_delayed_work_sync(&jpeg->job_timeout_work);
+ 	pm_runtime_disable(&pdev->dev);
+ 	video_unregister_device(jpeg->vdev);
+ 	v4l2_m2m_release(jpeg->m2m_dev);
+diff --git a/drivers/media/platform/mediatek/vcodec/vdec/vdec_av1_req_lat_if.c b/drivers/media/platform/mediatek/vcodec/vdec/vdec_av1_req_lat_if.c
+index 404a1a23fd402..b00b423274b3b 100644
+--- a/drivers/media/platform/mediatek/vcodec/vdec/vdec_av1_req_lat_if.c
++++ b/drivers/media/platform/mediatek/vcodec/vdec/vdec_av1_req_lat_if.c
+@@ -1658,9 +1658,9 @@ static void vdec_av1_slice_setup_tile_buffer(struct vdec_av1_slice_instance *ins
+ 	u32 allow_update_cdf = 0;
+ 	u32 sb_boundary_x_m1 = 0, sb_boundary_y_m1 = 0;
+ 	int tile_info_base;
+-	u32 tile_buf_pa;
++	u64 tile_buf_pa;
+ 	u32 *tile_info_buf = instance->tile.va;
+-	u32 pa = (u32)bs->dma_addr;
++	u64 pa = (u64)bs->dma_addr;
+ 
+ 	if (uh->disable_cdf_update == 0)
+ 		allow_update_cdf = 1;
+@@ -1673,8 +1673,12 @@ static void vdec_av1_slice_setup_tile_buffer(struct vdec_av1_slice_instance *ins
+ 		tile_info_buf[tile_info_base + 0] = (tile_group->tile_size[tile_num] << 3);
+ 		tile_buf_pa = pa + tile_group->tile_start_offset[tile_num];
+ 
+-		tile_info_buf[tile_info_base + 1] = (tile_buf_pa >> 4) << 4;
+-		tile_info_buf[tile_info_base + 2] = (tile_buf_pa % 16) << 3;
++		/* save av1 tile high 4bits(bit 32-35) address in lower 4 bits position
++		 * and clear original for hw requirement.
++		 */
++		tile_info_buf[tile_info_base + 1] = (tile_buf_pa & 0xFFFFFFF0ull) |
++			((tile_buf_pa & 0xF00000000ull) >> 32);
++		tile_info_buf[tile_info_base + 2] = (tile_buf_pa & 0xFull) << 3;
+ 
+ 		sb_boundary_x_m1 =
+ 			(tile->mi_col_starts[tile_col + 1] - tile->mi_col_starts[tile_col] - 1) &
+diff --git a/drivers/media/platform/mediatek/vcodec/vdec/vdec_vp9_if.c b/drivers/media/platform/mediatek/vcodec/vdec/vdec_vp9_if.c
+index 70b8383f7c8ec..a27a109d8d144 100644
+--- a/drivers/media/platform/mediatek/vcodec/vdec/vdec_vp9_if.c
++++ b/drivers/media/platform/mediatek/vcodec/vdec/vdec_vp9_if.c
+@@ -226,10 +226,11 @@ static struct vdec_fb *vp9_rm_from_fb_use_list(struct vdec_vp9_inst
+ 		if (fb->base_y.va == addr) {
+ 			list_move_tail(&node->list,
+ 				       &inst->available_fb_node_list);
+-			break;
++			return fb;
+ 		}
+ 	}
+-	return fb;
++
++	return NULL;
+ }
+ 
+ static void vp9_add_to_fb_free_list(struct vdec_vp9_inst *inst,
+diff --git a/drivers/media/platform/mediatek/vcodec/vdec_msg_queue.c b/drivers/media/platform/mediatek/vcodec/vdec_msg_queue.c
+index 04e6dc6cfa1de..898f9dbb9f46d 100644
+--- a/drivers/media/platform/mediatek/vcodec/vdec_msg_queue.c
++++ b/drivers/media/platform/mediatek/vcodec/vdec_msg_queue.c
+@@ -231,6 +231,7 @@ void vdec_msg_queue_deinit(struct vdec_msg_queue *msg_queue,
+ 			mtk_vcodec_mem_free(ctx, mem);
+ 
+ 		kfree(lat_buf->private_data);
++		lat_buf->private_data = NULL;
+ 	}
+ 
+ 	if (msg_queue->wdma_addr.size)
+@@ -307,6 +308,7 @@ int vdec_msg_queue_init(struct vdec_msg_queue *msg_queue,
+ 	err = mtk_vcodec_mem_alloc(ctx, &msg_queue->wdma_addr);
+ 	if (err) {
+ 		mtk_v4l2_err("failed to allocate wdma_addr buf");
++		msg_queue->wdma_addr.size = 0;
+ 		return -ENOMEM;
+ 	}
+ 	msg_queue->wdma_rptr_addr = msg_queue->wdma_addr.dma_addr;
+@@ -338,14 +340,14 @@ int vdec_msg_queue_init(struct vdec_msg_queue *msg_queue,
+ 			err = mtk_vcodec_mem_alloc(ctx, &lat_buf->rd_mv_addr);
+ 			if (err) {
+ 				mtk_v4l2_err("failed to allocate rd_mv_addr buf[%d]", i);
+-				return -ENOMEM;
++				goto mem_alloc_err;
+ 			}
+ 
+ 			lat_buf->tile_addr.size = VDEC_LAT_TILE_SZ;
+ 			err = mtk_vcodec_mem_alloc(ctx, &lat_buf->tile_addr);
+ 			if (err) {
+ 				mtk_v4l2_err("failed to allocate tile_addr buf[%d]", i);
+-				return -ENOMEM;
++				goto mem_alloc_err;
+ 			}
+ 		}
+ 
+diff --git a/drivers/media/platform/nxp/imx8-isi/imx8-isi-crossbar.c b/drivers/media/platform/nxp/imx8-isi/imx8-isi-crossbar.c
+index f7447b2f4d777..9fcfc39257332 100644
+--- a/drivers/media/platform/nxp/imx8-isi/imx8-isi-crossbar.c
++++ b/drivers/media/platform/nxp/imx8-isi/imx8-isi-crossbar.c
+@@ -483,7 +483,7 @@ int mxc_isi_crossbar_init(struct mxc_isi_dev *isi)
+ 
+ 	xbar->inputs = kcalloc(xbar->num_sinks, sizeof(*xbar->inputs),
+ 			       GFP_KERNEL);
+-	if (!xbar->pads) {
++	if (!xbar->inputs) {
+ 		ret = -ENOMEM;
+ 		goto err_free;
+ 	}
+diff --git a/drivers/media/platform/qcom/venus/hfi_venus.c b/drivers/media/platform/qcom/venus/hfi_venus.c
+index f0b46389e8d56..5506a0d196ef9 100644
+--- a/drivers/media/platform/qcom/venus/hfi_venus.c
++++ b/drivers/media/platform/qcom/venus/hfi_venus.c
+@@ -131,7 +131,6 @@ struct venus_hfi_device {
+ 
+ static bool venus_pkt_debug;
+ int venus_fw_debug = HFI_DEBUG_MSG_ERROR | HFI_DEBUG_MSG_FATAL;
+-static bool venus_sys_idle_indicator;
+ static bool venus_fw_low_power_mode = true;
+ static int venus_hw_rsp_timeout = 1000;
+ static bool venus_fw_coverage;
+@@ -454,7 +453,6 @@ static int venus_boot_core(struct venus_hfi_device *hdev)
+ 	void __iomem *wrapper_base = hdev->core->wrapper_base;
+ 	int ret = 0;
+ 
+-	writel(BIT(VIDC_CTRL_INIT_CTRL_SHIFT), cpu_cs_base + VIDC_CTRL_INIT);
+ 	if (IS_V6(hdev->core)) {
+ 		mask_val = readl(wrapper_base + WRAPPER_INTR_MASK);
+ 		mask_val &= ~(WRAPPER_INTR_MASK_A2HWD_BASK_V6 |
+@@ -465,6 +463,7 @@ static int venus_boot_core(struct venus_hfi_device *hdev)
+ 	writel(mask_val, wrapper_base + WRAPPER_INTR_MASK);
+ 	writel(1, cpu_cs_base + CPU_CS_SCIACMDARG3);
+ 
++	writel(BIT(VIDC_CTRL_INIT_CTRL_SHIFT), cpu_cs_base + VIDC_CTRL_INIT);
+ 	while (!ctrl_status && count < max_tries) {
+ 		ctrl_status = readl(cpu_cs_base + CPU_CS_SCIACMDARG0);
+ 		if ((ctrl_status & CPU_CS_SCIACMDARG0_ERROR_STATUS_MASK) == 4) {
+@@ -927,17 +926,12 @@ static int venus_sys_set_default_properties(struct venus_hfi_device *hdev)
+ 	if (ret)
+ 		dev_warn(dev, "setting fw debug msg ON failed (%d)\n", ret);
+ 
+-	/*
+-	 * Idle indicator is disabled by default on some 4xx firmware versions,
+-	 * enable it explicitly in order to make suspend functional by checking
+-	 * WFI (wait-for-interrupt) bit.
+-	 */
+-	if (IS_V4(hdev->core) || IS_V6(hdev->core))
+-		venus_sys_idle_indicator = true;
+-
+-	ret = venus_sys_set_idle_message(hdev, venus_sys_idle_indicator);
+-	if (ret)
+-		dev_warn(dev, "setting idle response ON failed (%d)\n", ret);
++	/* HFI_PROPERTY_SYS_IDLE_INDICATOR is not supported beyond 8916 (HFI V1) */
++	if (IS_V1(hdev->core)) {
++		ret = venus_sys_set_idle_message(hdev, false);
++		if (ret)
++			dev_warn(dev, "setting idle response ON failed (%d)\n", ret);
++	}
+ 
+ 	ret = venus_sys_set_power_control(hdev, venus_fw_low_power_mode);
+ 	if (ret)
+diff --git a/drivers/media/platform/verisilicon/hantro_v4l2.c b/drivers/media/platform/verisilicon/hantro_v4l2.c
+index e871c078dd59e..b3ae037a50f61 100644
+--- a/drivers/media/platform/verisilicon/hantro_v4l2.c
++++ b/drivers/media/platform/verisilicon/hantro_v4l2.c
+@@ -297,6 +297,7 @@ static int hantro_try_fmt(const struct hantro_ctx *ctx,
+ 			  enum v4l2_buf_type type)
+ {
+ 	const struct hantro_fmt *fmt;
++	const struct hantro_fmt *vpu_fmt;
+ 	bool capture = V4L2_TYPE_IS_CAPTURE(type);
+ 	bool coded;
+ 
+@@ -316,19 +317,23 @@ static int hantro_try_fmt(const struct hantro_ctx *ctx,
+ 
+ 	if (coded) {
+ 		pix_mp->num_planes = 1;
+-	} else if (!ctx->is_encoder) {
++		vpu_fmt = fmt;
++	} else if (ctx->is_encoder) {
++		vpu_fmt = hantro_find_format(ctx, ctx->dst_fmt.pixelformat);
++	} else {
+ 		/*
+ 		 * Width/height on the CAPTURE end of a decoder are ignored and
+ 		 * replaced by the OUTPUT ones.
+ 		 */
+ 		pix_mp->width = ctx->src_fmt.width;
+ 		pix_mp->height = ctx->src_fmt.height;
++		vpu_fmt = fmt;
+ 	}
+ 
+ 	pix_mp->field = V4L2_FIELD_NONE;
+ 
+ 	v4l2_apply_frmsize_constraints(&pix_mp->width, &pix_mp->height,
+-				       &fmt->frmsize);
++				       &vpu_fmt->frmsize);
+ 
+ 	if (!coded) {
+ 		/* Fill remaining fields */
+diff --git a/drivers/media/tuners/fc0011.c b/drivers/media/tuners/fc0011.c
+index eaa3bbc903d7e..3d3b54be29557 100644
+--- a/drivers/media/tuners/fc0011.c
++++ b/drivers/media/tuners/fc0011.c
+@@ -499,7 +499,7 @@ struct dvb_frontend *fc0011_attach(struct dvb_frontend *fe,
+ 
+ 	return fe;
+ }
+-EXPORT_SYMBOL(fc0011_attach);
++EXPORT_SYMBOL_GPL(fc0011_attach);
+ 
+ MODULE_DESCRIPTION("Fitipower FC0011 silicon tuner driver");
+ MODULE_AUTHOR("Michael Buesch <m@bues.ch>");
+diff --git a/drivers/media/tuners/fc0012.c b/drivers/media/tuners/fc0012.c
+index 4429d5e8c5796..81e65acbdb170 100644
+--- a/drivers/media/tuners/fc0012.c
++++ b/drivers/media/tuners/fc0012.c
+@@ -495,7 +495,7 @@ err:
+ 
+ 	return fe;
+ }
+-EXPORT_SYMBOL(fc0012_attach);
++EXPORT_SYMBOL_GPL(fc0012_attach);
+ 
+ MODULE_DESCRIPTION("Fitipower FC0012 silicon tuner driver");
+ MODULE_AUTHOR("Hans-Frieder Vogt <hfvogt@gmx.net>");
+diff --git a/drivers/media/tuners/fc0013.c b/drivers/media/tuners/fc0013.c
+index 29dd9b55ff333..1006a2798eefc 100644
+--- a/drivers/media/tuners/fc0013.c
++++ b/drivers/media/tuners/fc0013.c
+@@ -608,7 +608,7 @@ struct dvb_frontend *fc0013_attach(struct dvb_frontend *fe,
+ 
+ 	return fe;
+ }
+-EXPORT_SYMBOL(fc0013_attach);
++EXPORT_SYMBOL_GPL(fc0013_attach);
+ 
+ MODULE_DESCRIPTION("Fitipower FC0013 silicon tuner driver");
+ MODULE_AUTHOR("Hans-Frieder Vogt <hfvogt@gmx.net>");
+diff --git a/drivers/media/tuners/max2165.c b/drivers/media/tuners/max2165.c
+index 1c746bed51fee..1575ab94e1c8b 100644
+--- a/drivers/media/tuners/max2165.c
++++ b/drivers/media/tuners/max2165.c
+@@ -410,7 +410,7 @@ struct dvb_frontend *max2165_attach(struct dvb_frontend *fe,
+ 
+ 	return fe;
+ }
+-EXPORT_SYMBOL(max2165_attach);
++EXPORT_SYMBOL_GPL(max2165_attach);
+ 
+ MODULE_AUTHOR("David T. L. Wong <davidtlwong@gmail.com>");
+ MODULE_DESCRIPTION("Maxim MAX2165 silicon tuner driver");
+diff --git a/drivers/media/tuners/mc44s803.c b/drivers/media/tuners/mc44s803.c
+index 0c9161516abdf..ed8bdf7ebd99d 100644
+--- a/drivers/media/tuners/mc44s803.c
++++ b/drivers/media/tuners/mc44s803.c
+@@ -356,7 +356,7 @@ error:
+ 	kfree(priv);
+ 	return NULL;
+ }
+-EXPORT_SYMBOL(mc44s803_attach);
++EXPORT_SYMBOL_GPL(mc44s803_attach);
+ 
+ MODULE_AUTHOR("Jochen Friedrich");
+ MODULE_DESCRIPTION("Freescale MC44S803 silicon tuner driver");
+diff --git a/drivers/media/tuners/mt2060.c b/drivers/media/tuners/mt2060.c
+index 0278a9f0aeefa..4205ed4cf4675 100644
+--- a/drivers/media/tuners/mt2060.c
++++ b/drivers/media/tuners/mt2060.c
+@@ -440,7 +440,7 @@ struct dvb_frontend * mt2060_attach(struct dvb_frontend *fe, struct i2c_adapter
+ 
+ 	return fe;
+ }
+-EXPORT_SYMBOL(mt2060_attach);
++EXPORT_SYMBOL_GPL(mt2060_attach);
+ 
+ static int mt2060_probe(struct i2c_client *client)
+ {
+diff --git a/drivers/media/tuners/mt2131.c b/drivers/media/tuners/mt2131.c
+index 37f50ff6c0bd2..eebc060883414 100644
+--- a/drivers/media/tuners/mt2131.c
++++ b/drivers/media/tuners/mt2131.c
+@@ -274,7 +274,7 @@ struct dvb_frontend * mt2131_attach(struct dvb_frontend *fe,
+ 	fe->tuner_priv = priv;
+ 	return fe;
+ }
+-EXPORT_SYMBOL(mt2131_attach);
++EXPORT_SYMBOL_GPL(mt2131_attach);
+ 
+ MODULE_AUTHOR("Steven Toth");
+ MODULE_DESCRIPTION("Microtune MT2131 silicon tuner driver");
+diff --git a/drivers/media/tuners/mt2266.c b/drivers/media/tuners/mt2266.c
+index 6136f20fa9b7f..2e92885a6bcb9 100644
+--- a/drivers/media/tuners/mt2266.c
++++ b/drivers/media/tuners/mt2266.c
+@@ -336,7 +336,7 @@ struct dvb_frontend * mt2266_attach(struct dvb_frontend *fe, struct i2c_adapter
+ 	mt2266_calibrate(priv);
+ 	return fe;
+ }
+-EXPORT_SYMBOL(mt2266_attach);
++EXPORT_SYMBOL_GPL(mt2266_attach);
+ 
+ MODULE_AUTHOR("Olivier DANET");
+ MODULE_DESCRIPTION("Microtune MT2266 silicon tuner driver");
+diff --git a/drivers/media/tuners/mxl5005s.c b/drivers/media/tuners/mxl5005s.c
+index 06dfab9fb8cbc..d9bfa257a0054 100644
+--- a/drivers/media/tuners/mxl5005s.c
++++ b/drivers/media/tuners/mxl5005s.c
+@@ -4120,7 +4120,7 @@ struct dvb_frontend *mxl5005s_attach(struct dvb_frontend *fe,
+ 	fe->tuner_priv = state;
+ 	return fe;
+ }
+-EXPORT_SYMBOL(mxl5005s_attach);
++EXPORT_SYMBOL_GPL(mxl5005s_attach);
+ 
+ MODULE_DESCRIPTION("MaxLinear MXL5005S silicon tuner driver");
+ MODULE_AUTHOR("Steven Toth");
+diff --git a/drivers/media/tuners/qt1010.c b/drivers/media/tuners/qt1010.c
+index 3853a3d43d4f2..60931367b82ca 100644
+--- a/drivers/media/tuners/qt1010.c
++++ b/drivers/media/tuners/qt1010.c
+@@ -440,7 +440,7 @@ struct dvb_frontend * qt1010_attach(struct dvb_frontend *fe,
+ 	fe->tuner_priv = priv;
+ 	return fe;
+ }
+-EXPORT_SYMBOL(qt1010_attach);
++EXPORT_SYMBOL_GPL(qt1010_attach);
+ 
+ MODULE_DESCRIPTION("Quantek QT1010 silicon tuner driver");
+ MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
+diff --git a/drivers/media/tuners/tda18218.c b/drivers/media/tuners/tda18218.c
+index 4ed94646116fa..7d8d84dcb2459 100644
+--- a/drivers/media/tuners/tda18218.c
++++ b/drivers/media/tuners/tda18218.c
+@@ -336,7 +336,7 @@ struct dvb_frontend *tda18218_attach(struct dvb_frontend *fe,
+ 
+ 	return fe;
+ }
+-EXPORT_SYMBOL(tda18218_attach);
++EXPORT_SYMBOL_GPL(tda18218_attach);
+ 
+ MODULE_DESCRIPTION("NXP TDA18218HN silicon tuner driver");
+ MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
+diff --git a/drivers/media/tuners/xc2028.c b/drivers/media/tuners/xc2028.c
+index 69c2e1b99bf17..5a967edceca93 100644
+--- a/drivers/media/tuners/xc2028.c
++++ b/drivers/media/tuners/xc2028.c
+@@ -1512,7 +1512,7 @@ fail:
+ 	return NULL;
+ }
+ 
+-EXPORT_SYMBOL(xc2028_attach);
++EXPORT_SYMBOL_GPL(xc2028_attach);
+ 
+ MODULE_DESCRIPTION("Xceive xc2028/xc3028 tuner driver");
+ MODULE_AUTHOR("Michel Ludwig <michel.ludwig@gmail.com>");
+diff --git a/drivers/media/tuners/xc4000.c b/drivers/media/tuners/xc4000.c
+index d59b4ab774302..57ded9ff3f043 100644
+--- a/drivers/media/tuners/xc4000.c
++++ b/drivers/media/tuners/xc4000.c
+@@ -1742,7 +1742,7 @@ fail2:
+ 	xc4000_release(fe);
+ 	return NULL;
+ }
+-EXPORT_SYMBOL(xc4000_attach);
++EXPORT_SYMBOL_GPL(xc4000_attach);
+ 
+ MODULE_AUTHOR("Steven Toth, Davide Ferri");
+ MODULE_DESCRIPTION("Xceive xc4000 silicon tuner driver");
+diff --git a/drivers/media/tuners/xc5000.c b/drivers/media/tuners/xc5000.c
+index 7b7d9fe4f9453..2182e5b7b6064 100644
+--- a/drivers/media/tuners/xc5000.c
++++ b/drivers/media/tuners/xc5000.c
+@@ -1460,7 +1460,7 @@ fail:
+ 	xc5000_release(fe);
+ 	return NULL;
+ }
+-EXPORT_SYMBOL(xc5000_attach);
++EXPORT_SYMBOL_GPL(xc5000_attach);
+ 
+ MODULE_AUTHOR("Steven Toth");
+ MODULE_DESCRIPTION("Xceive xc5000 silicon tuner driver");
+diff --git a/drivers/media/usb/dvb-usb/m920x.c b/drivers/media/usb/dvb-usb/m920x.c
+index fea5bcf72a31a..c88a202daf5fc 100644
+--- a/drivers/media/usb/dvb-usb/m920x.c
++++ b/drivers/media/usb/dvb-usb/m920x.c
+@@ -277,7 +277,6 @@ static int m920x_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[], int nu
+ 			char *read = kmalloc(1, GFP_KERNEL);
+ 			if (!read) {
+ 				ret = -ENOMEM;
+-				kfree(read);
+ 				goto unlock;
+ 			}
+ 
+@@ -288,8 +287,10 @@ static int m920x_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[], int nu
+ 
+ 				if ((ret = m920x_read(d->udev, M9206_I2C, 0x0,
+ 						      0x20 | stop,
+-						      read, 1)) != 0)
++						      read, 1)) != 0) {
++					kfree(read);
+ 					goto unlock;
++				}
+ 				msg[i].buf[j] = read[0];
+ 			}
+ 
+diff --git a/drivers/media/usb/go7007/go7007-i2c.c b/drivers/media/usb/go7007/go7007-i2c.c
+index 38339dd2f83f7..2880370e45c8b 100644
+--- a/drivers/media/usb/go7007/go7007-i2c.c
++++ b/drivers/media/usb/go7007/go7007-i2c.c
+@@ -165,8 +165,6 @@ static int go7007_i2c_master_xfer(struct i2c_adapter *adapter,
+ 		} else if (msgs[i].len == 3) {
+ 			if (msgs[i].flags & I2C_M_RD)
+ 				return -EIO;
+-			if (msgs[i].len != 3)
+-				return -EIO;
+ 			if (go7007_i2c_xfer(go, msgs[i].addr, 0,
+ 					(msgs[i].buf[0] << 8) | msgs[i].buf[1],
+ 					0x01, &msgs[i].buf[2]) < 0)
+diff --git a/drivers/media/usb/siano/smsusb.c b/drivers/media/usb/siano/smsusb.c
+index 640737d3b8aeb..8a39cac76c585 100644
+--- a/drivers/media/usb/siano/smsusb.c
++++ b/drivers/media/usb/siano/smsusb.c
+@@ -455,12 +455,7 @@ static int smsusb_init_device(struct usb_interface *intf, int board_id)
+ 	rc = smscore_register_device(&params, &dev->coredev, 0, mdev);
+ 	if (rc < 0) {
+ 		pr_err("smscore_register_device(...) failed, rc %d\n", rc);
+-		smsusb_term_device(intf);
+-#ifdef CONFIG_MEDIA_CONTROLLER_DVB
+-		media_device_unregister(mdev);
+-#endif
+-		kfree(mdev);
+-		return rc;
++		goto err_unregister_device;
+ 	}
+ 
+ 	smscore_set_board_id(dev->coredev, board_id);
+@@ -477,8 +472,7 @@ static int smsusb_init_device(struct usb_interface *intf, int board_id)
+ 	rc = smsusb_start_streaming(dev);
+ 	if (rc < 0) {
+ 		pr_err("smsusb_start_streaming(...) failed\n");
+-		smsusb_term_device(intf);
+-		return rc;
++		goto err_unregister_device;
+ 	}
+ 
+ 	dev->state = SMSUSB_ACTIVE;
+@@ -486,13 +480,20 @@ static int smsusb_init_device(struct usb_interface *intf, int board_id)
+ 	rc = smscore_start_device(dev->coredev);
+ 	if (rc < 0) {
+ 		pr_err("smscore_start_device(...) failed\n");
+-		smsusb_term_device(intf);
+-		return rc;
++		goto err_unregister_device;
+ 	}
+ 
+ 	pr_debug("device 0x%p created\n", dev);
+ 
+ 	return rc;
++
++err_unregister_device:
++	smsusb_term_device(intf);
++#ifdef CONFIG_MEDIA_CONTROLLER_DVB
++	media_device_unregister(mdev);
++#endif
++	kfree(mdev);
++	return rc;
+ }
+ 
+ static int smsusb_probe(struct usb_interface *intf,
+diff --git a/drivers/media/v4l2-core/v4l2-fwnode.c b/drivers/media/v4l2-core/v4l2-fwnode.c
+index 049c2f2001eaa..4fa9225aa3d93 100644
+--- a/drivers/media/v4l2-core/v4l2-fwnode.c
++++ b/drivers/media/v4l2-core/v4l2-fwnode.c
+@@ -568,19 +568,29 @@ int v4l2_fwnode_parse_link(struct fwnode_handle *fwnode,
+ 	link->local_id = fwep.id;
+ 	link->local_port = fwep.port;
+ 	link->local_node = fwnode_graph_get_port_parent(fwnode);
++	if (!link->local_node)
++		return -ENOLINK;
+ 
+ 	fwnode = fwnode_graph_get_remote_endpoint(fwnode);
+-	if (!fwnode) {
+-		fwnode_handle_put(fwnode);
+-		return -ENOLINK;
+-	}
++	if (!fwnode)
++		goto err_put_local_node;
+ 
+ 	fwnode_graph_parse_endpoint(fwnode, &fwep);
+ 	link->remote_id = fwep.id;
+ 	link->remote_port = fwep.port;
+ 	link->remote_node = fwnode_graph_get_port_parent(fwnode);
++	if (!link->remote_node)
++		goto err_put_remote_endpoint;
+ 
+ 	return 0;
++
++err_put_remote_endpoint:
++	fwnode_handle_put(fwnode);
++
++err_put_local_node:
++	fwnode_handle_put(link->local_node);
++
++	return -ENOLINK;
+ }
+ EXPORT_SYMBOL_GPL(v4l2_fwnode_parse_link);
+ 
+diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
+index 6f5b259a6d6a0..f6b519eaaa710 100644
+--- a/drivers/mfd/Kconfig
++++ b/drivers/mfd/Kconfig
+@@ -1197,7 +1197,7 @@ config MFD_RC5T583
+ 	  different functionality of the device.
+ 
+ config MFD_RK8XX
+-	bool
++	tristate
+ 	select MFD_CORE
+ 
+ config MFD_RK8XX_I2C
+diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c
+index 9666d28037e18..5a134fa8a174c 100644
+--- a/drivers/misc/fastrpc.c
++++ b/drivers/misc/fastrpc.c
+@@ -1322,13 +1322,18 @@ static int fastrpc_init_create_static_process(struct fastrpc_user *fl,
+ 	return 0;
+ err_invoke:
+ 	if (fl->cctx->vmcount) {
+-		struct qcom_scm_vmperm perm;
++		u64 src_perms = 0;
++		struct qcom_scm_vmperm dst_perms;
++		u32 i;
+ 
+-		perm.vmid = QCOM_SCM_VMID_HLOS;
+-		perm.perm = QCOM_SCM_PERM_RWX;
++		for (i = 0; i < fl->cctx->vmcount; i++)
++			src_perms |= BIT(fl->cctx->vmperms[i].vmid);
++
++		dst_perms.vmid = QCOM_SCM_VMID_HLOS;
++		dst_perms.perm = QCOM_SCM_PERM_RWX;
+ 		err = qcom_scm_assign_mem(fl->cctx->remote_heap->phys,
+ 						(u64)fl->cctx->remote_heap->size,
+-						&fl->cctx->perms, &perm, 1);
++						&src_perms, &dst_perms, 1);
+ 		if (err)
+ 			dev_err(fl->sctx->dev, "Failed to assign memory phys 0x%llx size 0x%llx err %d",
+ 				fl->cctx->remote_heap->phys, fl->cctx->remote_heap->size, err);
+diff --git a/drivers/mmc/host/renesas_sdhi_core.c b/drivers/mmc/host/renesas_sdhi_core.c
+index 345934e4f59e6..2d5ef9c37d769 100644
+--- a/drivers/mmc/host/renesas_sdhi_core.c
++++ b/drivers/mmc/host/renesas_sdhi_core.c
+@@ -1006,6 +1006,8 @@ int renesas_sdhi_probe(struct platform_device *pdev,
+ 		host->sdcard_irq_setbit_mask = TMIO_STAT_ALWAYS_SET_27;
+ 		host->sdcard_irq_mask_all = TMIO_MASK_ALL_RCAR2;
+ 		host->reset = renesas_sdhi_reset;
++	} else {
++		host->sdcard_irq_mask_all = TMIO_MASK_ALL;
+ 	}
+ 
+ 	/* Orginally registers were 16 bit apart, could be 32 or 64 nowadays */
+@@ -1100,9 +1102,7 @@ int renesas_sdhi_probe(struct platform_device *pdev,
+ 		host->ops.hs400_complete = renesas_sdhi_hs400_complete;
+ 	}
+ 
+-	ret = tmio_mmc_host_probe(host);
+-	if (ret < 0)
+-		goto edisclk;
++	sd_ctrl_write32_as_16_and_16(host, CTL_IRQ_MASK, host->sdcard_irq_mask_all);
+ 
+ 	num_irqs = platform_irq_count(pdev);
+ 	if (num_irqs < 0) {
+@@ -1129,6 +1129,10 @@ int renesas_sdhi_probe(struct platform_device *pdev,
+ 			goto eirq;
+ 	}
+ 
++	ret = tmio_mmc_host_probe(host);
++	if (ret < 0)
++		goto edisclk;
++
+ 	dev_info(&pdev->dev, "%s base at %pa, max clock rate %u MHz\n",
+ 		 mmc_hostname(host->mmc), &res->start, host->mmc->f_max / 1000000);
+ 
+diff --git a/drivers/mtd/nand/raw/brcmnand/brcmnand.c b/drivers/mtd/nand/raw/brcmnand/brcmnand.c
+index 2e9c2e2d9c9f7..d8418d7fcc372 100644
+--- a/drivers/mtd/nand/raw/brcmnand/brcmnand.c
++++ b/drivers/mtd/nand/raw/brcmnand/brcmnand.c
+@@ -2612,6 +2612,8 @@ static int brcmnand_setup_dev(struct brcmnand_host *host)
+ 	struct nand_chip *chip = &host->chip;
+ 	const struct nand_ecc_props *requirements =
+ 		nanddev_get_ecc_requirements(&chip->base);
++	struct nand_memory_organization *memorg =
++		nanddev_get_memorg(&chip->base);
+ 	struct brcmnand_controller *ctrl = host->ctrl;
+ 	struct brcmnand_cfg *cfg = &host->hwcfg;
+ 	char msg[128];
+@@ -2633,10 +2635,11 @@ static int brcmnand_setup_dev(struct brcmnand_host *host)
+ 	if (cfg->spare_area_size > ctrl->max_oob)
+ 		cfg->spare_area_size = ctrl->max_oob;
+ 	/*
+-	 * Set oobsize to be consistent with controller's spare_area_size, as
+-	 * the rest is inaccessible.
++	 * Set mtd and memorg oobsize to be consistent with controller's
++	 * spare_area_size, as the rest is inaccessible.
+ 	 */
+ 	mtd->oobsize = cfg->spare_area_size * (mtd->writesize >> FC_SHIFT);
++	memorg->oobsize = mtd->oobsize;
+ 
+ 	cfg->device_size = mtd->size;
+ 	cfg->block_size = mtd->erasesize;
+diff --git a/drivers/mtd/nand/raw/fsmc_nand.c b/drivers/mtd/nand/raw/fsmc_nand.c
+index 7b4742420dfcb..2e33ae77502a0 100644
+--- a/drivers/mtd/nand/raw/fsmc_nand.c
++++ b/drivers/mtd/nand/raw/fsmc_nand.c
+@@ -1200,9 +1200,14 @@ static int fsmc_nand_suspend(struct device *dev)
+ static int fsmc_nand_resume(struct device *dev)
+ {
+ 	struct fsmc_nand_data *host = dev_get_drvdata(dev);
++	int ret;
+ 
+ 	if (host) {
+-		clk_prepare_enable(host->clk);
++		ret = clk_prepare_enable(host->clk);
++		if (ret) {
++			dev_err(dev, "failed to enable clk\n");
++			return ret;
++		}
+ 		if (host->dev_timings)
+ 			fsmc_nand_setup(host, host->dev_timings);
+ 		nand_reset(&host->nand, 0);
+diff --git a/drivers/mtd/spi-nor/core.c b/drivers/mtd/spi-nor/core.c
+index 5f29fac8669a3..55f4a902b8be9 100644
+--- a/drivers/mtd/spi-nor/core.c
++++ b/drivers/mtd/spi-nor/core.c
+@@ -870,21 +870,22 @@ static int spi_nor_write_16bit_sr_and_check(struct spi_nor *nor, u8 sr1)
+ 		ret = spi_nor_read_cr(nor, &sr_cr[1]);
+ 		if (ret)
+ 			return ret;
+-	} else if (nor->params->quad_enable) {
++	} else if (spi_nor_get_protocol_width(nor->read_proto) == 4 &&
++		   spi_nor_get_protocol_width(nor->write_proto) == 4 &&
++		   nor->params->quad_enable) {
+ 		/*
+ 		 * If the Status Register 2 Read command (35h) is not
+ 		 * supported, we should at least be sure we don't
+ 		 * change the value of the SR2 Quad Enable bit.
+ 		 *
+-		 * We can safely assume that when the Quad Enable method is
+-		 * set, the value of the QE bit is one, as a consequence of the
+-		 * nor->params->quad_enable() call.
++		 * When the Quad Enable method is set and the buswidth is 4, we
++		 * can safely assume that the value of the QE bit is one, as a
++		 * consequence of the nor->params->quad_enable() call.
+ 		 *
+-		 * We can safely assume that the Quad Enable bit is present in
+-		 * the Status Register 2 at BIT(1). According to the JESD216
+-		 * revB standard, BFPT DWORDS[15], bits 22:20, the 16-bit
+-		 * Write Status (01h) command is available just for the cases
+-		 * in which the QE bit is described in SR2 at BIT(1).
++		 * According to the JESD216 revB standard, BFPT DWORDS[15],
++		 * bits 22:20, the 16-bit Write Status (01h) command is
++		 * available just for the cases in which the QE bit is
++		 * described in SR2 at BIT(1).
+ 		 */
+ 		sr_cr[1] = SR2_QUAD_EN_BIT1;
+ 	} else {
+diff --git a/drivers/net/arcnet/arcnet.c b/drivers/net/arcnet/arcnet.c
+index 99265667538c3..d9e052c49ba1a 100644
+--- a/drivers/net/arcnet/arcnet.c
++++ b/drivers/net/arcnet/arcnet.c
+@@ -464,7 +464,7 @@ static void arcnet_reply_tasklet(struct tasklet_struct *t)
+ 
+ 	ret = sock_queue_err_skb(sk, ackskb);
+ 	if (ret)
+-		kfree_skb(ackskb);
++		dev_kfree_skb_irq(ackskb);
+ 
+ 	local_irq_enable();
+ };
+diff --git a/drivers/net/can/m_can/tcan4x5x-regmap.c b/drivers/net/can/m_can/tcan4x5x-regmap.c
+index 2b218ce04e9f2..fafa6daa67e69 100644
+--- a/drivers/net/can/m_can/tcan4x5x-regmap.c
++++ b/drivers/net/can/m_can/tcan4x5x-regmap.c
+@@ -95,7 +95,6 @@ static const struct regmap_range tcan4x5x_reg_table_wr_range[] = {
+ 	regmap_reg_range(0x000c, 0x0010),
+ 	/* Device configuration registers and Interrupt Flags*/
+ 	regmap_reg_range(0x0800, 0x080c),
+-	regmap_reg_range(0x0814, 0x0814),
+ 	regmap_reg_range(0x0820, 0x0820),
+ 	regmap_reg_range(0x0830, 0x0830),
+ 	/* M_CAN */
+diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c
+index bd9eb066ecf15..129ef60a577c8 100644
+--- a/drivers/net/can/usb/gs_usb.c
++++ b/drivers/net/can/usb/gs_usb.c
+@@ -633,6 +633,9 @@ static void gs_usb_receive_bulk_callback(struct urb *urb)
+ 	}
+ 
+ 	if (hf->flags & GS_CAN_FLAG_OVERFLOW) {
++		stats->rx_over_errors++;
++		stats->rx_errors++;
++
+ 		skb = alloc_can_err_skb(netdev, &cf);
+ 		if (!skb)
+ 			goto resubmit_urb;
+@@ -640,8 +643,6 @@ static void gs_usb_receive_bulk_callback(struct urb *urb)
+ 		cf->can_id |= CAN_ERR_CRTL;
+ 		cf->len = CAN_ERR_DLC;
+ 		cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
+-		stats->rx_over_errors++;
+-		stats->rx_errors++;
+ 		netif_rx(skb);
+ 	}
+ 
+diff --git a/drivers/net/ethernet/amd/pds_core/core.c b/drivers/net/ethernet/amd/pds_core/core.c
+index f2c79456d7452..36f9b932b9e2a 100644
+--- a/drivers/net/ethernet/amd/pds_core/core.c
++++ b/drivers/net/ethernet/amd/pds_core/core.c
+@@ -464,7 +464,8 @@ void pdsc_teardown(struct pdsc *pdsc, bool removing)
+ {
+ 	int i;
+ 
+-	pdsc_devcmd_reset(pdsc);
++	if (!pdsc->pdev->is_virtfn)
++		pdsc_devcmd_reset(pdsc);
+ 	pdsc_qcq_free(pdsc, &pdsc->notifyqcq);
+ 	pdsc_qcq_free(pdsc, &pdsc->adminqcq);
+ 
+@@ -524,7 +525,8 @@ static void pdsc_fw_down(struct pdsc *pdsc)
+ 	}
+ 
+ 	/* Notify clients of fw_down */
+-	devlink_health_report(pdsc->fw_reporter, "FW down reported", pdsc);
++	if (pdsc->fw_reporter)
++		devlink_health_report(pdsc->fw_reporter, "FW down reported", pdsc);
+ 	pdsc_notify(PDS_EVENT_RESET, &reset_event);
+ 
+ 	pdsc_stop(pdsc);
+@@ -554,8 +556,9 @@ static void pdsc_fw_up(struct pdsc *pdsc)
+ 
+ 	/* Notify clients of fw_up */
+ 	pdsc->fw_recoveries++;
+-	devlink_health_reporter_state_update(pdsc->fw_reporter,
+-					     DEVLINK_HEALTH_REPORTER_STATE_HEALTHY);
++	if (pdsc->fw_reporter)
++		devlink_health_reporter_state_update(pdsc->fw_reporter,
++						     DEVLINK_HEALTH_REPORTER_STATE_HEALTHY);
+ 	pdsc_notify(PDS_EVENT_RESET, &reset_event);
+ 
+ 	return;
+diff --git a/drivers/net/ethernet/amd/pds_core/dev.c b/drivers/net/ethernet/amd/pds_core/dev.c
+index debe5216fe29e..f77cd9f5a2fda 100644
+--- a/drivers/net/ethernet/amd/pds_core/dev.c
++++ b/drivers/net/ethernet/amd/pds_core/dev.c
+@@ -121,7 +121,7 @@ static const char *pdsc_devcmd_str(int opcode)
+ 	}
+ }
+ 
+-static int pdsc_devcmd_wait(struct pdsc *pdsc, int max_seconds)
++static int pdsc_devcmd_wait(struct pdsc *pdsc, u8 opcode, int max_seconds)
+ {
+ 	struct device *dev = pdsc->dev;
+ 	unsigned long start_time;
+@@ -131,9 +131,6 @@ static int pdsc_devcmd_wait(struct pdsc *pdsc, int max_seconds)
+ 	int done = 0;
+ 	int err = 0;
+ 	int status;
+-	int opcode;
+-
+-	opcode = ioread8(&pdsc->cmd_regs->cmd.opcode);
+ 
+ 	start_time = jiffies;
+ 	max_wait = start_time + (max_seconds * HZ);
+@@ -180,10 +177,10 @@ int pdsc_devcmd_locked(struct pdsc *pdsc, union pds_core_dev_cmd *cmd,
+ 
+ 	memcpy_toio(&pdsc->cmd_regs->cmd, cmd, sizeof(*cmd));
+ 	pdsc_devcmd_dbell(pdsc);
+-	err = pdsc_devcmd_wait(pdsc, max_seconds);
++	err = pdsc_devcmd_wait(pdsc, cmd->opcode, max_seconds);
+ 	memcpy_fromio(comp, &pdsc->cmd_regs->comp, sizeof(*comp));
+ 
+-	if (err == -ENXIO || err == -ETIMEDOUT)
++	if ((err == -ENXIO || err == -ETIMEDOUT) && pdsc->wq)
+ 		queue_work(pdsc->wq, &pdsc->health_work);
+ 
+ 	return err;
+diff --git a/drivers/net/ethernet/amd/pds_core/devlink.c b/drivers/net/ethernet/amd/pds_core/devlink.c
+index 9c6b3653c1c7c..d9607033bbf21 100644
+--- a/drivers/net/ethernet/amd/pds_core/devlink.c
++++ b/drivers/net/ethernet/amd/pds_core/devlink.c
+@@ -10,6 +10,9 @@ pdsc_viftype *pdsc_dl_find_viftype_by_id(struct pdsc *pdsc,
+ {
+ 	int vt;
+ 
++	if (!pdsc->viftype_status)
++		return NULL;
++
+ 	for (vt = 0; vt < PDS_DEV_TYPE_MAX; vt++) {
+ 		if (pdsc->viftype_status[vt].dl_id == dl_id)
+ 			return &pdsc->viftype_status[vt];
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
+index b31de4cf6534b..a2d3a80236c4f 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
+@@ -3721,6 +3721,60 @@ struct hwrm_func_backing_store_qcaps_v2_output {
+ 	u8	valid;
+ };
+ 
++/* hwrm_func_dbr_pacing_qcfg_input (size:128b/16B) */
++struct hwrm_func_dbr_pacing_qcfg_input {
++	__le16  req_type;
++	__le16  cmpl_ring;
++	__le16  seq_id;
++	__le16  target_id;
++	__le64  resp_addr;
++};
++
++/* hwrm_func_dbr_pacing_qcfg_output (size:512b/64B) */
++struct hwrm_func_dbr_pacing_qcfg_output {
++	__le16  error_code;
++	__le16  req_type;
++	__le16  seq_id;
++	__le16  resp_len;
++	u8      flags;
++#define FUNC_DBR_PACING_QCFG_RESP_FLAGS_DBR_NQ_EVENT_ENABLED     0x1UL
++	u8      unused_0[7];
++	__le32  dbr_stat_db_fifo_reg;
++#define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_MASK    0x3UL
++#define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_SFT     0
++#define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_PCIE_CFG  0x0UL
++#define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_GRC       0x1UL
++#define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_BAR0      0x2UL
++#define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_BAR1      0x3UL
++#define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_LAST     \
++		FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_BAR1
++#define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_MASK          0xfffffffcUL
++#define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SFT           2
++	__le32  dbr_stat_db_fifo_reg_watermark_mask;
++	u8      dbr_stat_db_fifo_reg_watermark_shift;
++	u8      unused_1[3];
++	__le32  dbr_stat_db_fifo_reg_fifo_room_mask;
++	u8      dbr_stat_db_fifo_reg_fifo_room_shift;
++	u8      unused_2[3];
++	__le32  dbr_throttling_aeq_arm_reg;
++#define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_MASK    0x3UL
++#define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_SFT     0
++#define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_PCIE_CFG  0x0UL
++#define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_GRC       0x1UL
++#define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_BAR0      0x2UL
++#define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_BAR1      0x3UL
++#define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_LAST	\
++		FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_BAR1
++#define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_MASK          0xfffffffcUL
++#define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SFT           2
++	u8      dbr_throttling_aeq_arm_reg_val;
++	u8      unused_3[7];
++	__le32  primary_nq_id;
++	__le32  pacing_threshold;
++	u8      unused_4[7];
++	u8      valid;
++};
++
+ /* hwrm_func_drv_if_change_input (size:192b/24B) */
+ struct hwrm_func_drv_if_change_input {
+ 	__le16	req_type;
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
+index 852eb449ccae2..6ba2b93986333 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
+@@ -345,7 +345,7 @@ static void bnxt_set_edev_info(struct bnxt_en_dev *edev, struct bnxt *bp)
+ 	edev->hw_ring_stats_size = bp->hw_ring_stats_size;
+ 	edev->pf_port_id = bp->pf.port_id;
+ 	edev->en_state = bp->state;
+-
++	edev->bar0 = bp->bar0;
+ 	edev->ulp_tbl->msix_requested = bnxt_get_ulp_msix_num(bp);
+ }
+ 
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
+index 80cbc4b6130aa..6ff77f082e6c7 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
+@@ -81,6 +81,7 @@ struct bnxt_en_dev {
+ 							 * mode only. Will be
+ 							 * updated in resume.
+ 							 */
++	void __iomem                    *bar0;
+ };
+ 
+ static inline bool bnxt_ulp_registered(struct bnxt_en_dev *edev)
+diff --git a/drivers/net/ethernet/hisilicon/hns3/Makefile b/drivers/net/ethernet/hisilicon/hns3/Makefile
+index 6efea46628587..e214bfaece1f3 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/Makefile
++++ b/drivers/net/ethernet/hisilicon/hns3/Makefile
+@@ -17,11 +17,11 @@ hns3-$(CONFIG_HNS3_DCB) += hns3_dcbnl.o
+ 
+ obj-$(CONFIG_HNS3_HCLGEVF) += hclgevf.o
+ 
+-hclgevf-objs = hns3vf/hclgevf_main.o hns3vf/hclgevf_mbx.o  hns3vf/hclgevf_devlink.o \
++hclgevf-objs = hns3vf/hclgevf_main.o hns3vf/hclgevf_mbx.o  hns3vf/hclgevf_devlink.o hns3vf/hclgevf_regs.o \
+ 		hns3_common/hclge_comm_cmd.o hns3_common/hclge_comm_rss.o hns3_common/hclge_comm_tqp_stats.o
+ 
+ obj-$(CONFIG_HNS3_HCLGE) += hclge.o
+-hclge-objs = hns3pf/hclge_main.o hns3pf/hclge_mdio.o hns3pf/hclge_tm.o \
++hclge-objs = hns3pf/hclge_main.o hns3pf/hclge_mdio.o hns3pf/hclge_tm.o hns3pf/hclge_regs.o \
+ 		hns3pf/hclge_mbx.o hns3pf/hclge_err.o  hns3pf/hclge_debugfs.o hns3pf/hclge_ptp.o hns3pf/hclge_devlink.o \
+ 		hns3_common/hclge_comm_cmd.o hns3_common/hclge_comm_rss.o hns3_common/hclge_comm_tqp_stats.o
+ 
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+index 514a20bce4f44..a4b43bcd2f0c9 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
++++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+@@ -382,6 +382,7 @@ struct hnae3_dev_specs {
+ 	u16 umv_size;
+ 	u16 mc_mac_size;
+ 	u32 mac_stats_num;
++	u8 tnl_num;
+ };
+ 
+ struct hnae3_client_ops {
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
+index 91c173f40701a..d5cfdc4c082d8 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
+@@ -826,7 +826,9 @@ struct hclge_dev_specs_1_cmd {
+ 	u8 rsv0[2];
+ 	__le16 umv_size;
+ 	__le16 mc_mac_size;
+-	u8 rsv1[12];
++	u8 rsv1[6];
++	u8 tnl_num;
++	u8 rsv2[5];
+ };
+ 
+ /* mac speed type defined in firmware command */
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
+index 0fb2eaee3e8a0..f01a7a9ee02ca 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
+@@ -7,6 +7,7 @@
+ #include "hclge_debugfs.h"
+ #include "hclge_err.h"
+ #include "hclge_main.h"
++#include "hclge_regs.h"
+ #include "hclge_tm.h"
+ #include "hnae3.h"
+ 
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+index a940e35aef29d..2d5a2e1ef664d 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+@@ -20,6 +20,7 @@
+ #include "hclge_main.h"
+ #include "hclge_mbx.h"
+ #include "hclge_mdio.h"
++#include "hclge_regs.h"
+ #include "hclge_tm.h"
+ #include "hclge_err.h"
+ #include "hnae3.h"
+@@ -40,20 +41,6 @@
+ #define HCLGE_PF_RESET_SYNC_TIME	20
+ #define HCLGE_PF_RESET_SYNC_CNT		1500
+ 
+-/* Get DFX BD number offset */
+-#define HCLGE_DFX_BIOS_BD_OFFSET        1
+-#define HCLGE_DFX_SSU_0_BD_OFFSET       2
+-#define HCLGE_DFX_SSU_1_BD_OFFSET       3
+-#define HCLGE_DFX_IGU_BD_OFFSET         4
+-#define HCLGE_DFX_RPU_0_BD_OFFSET       5
+-#define HCLGE_DFX_RPU_1_BD_OFFSET       6
+-#define HCLGE_DFX_NCSI_BD_OFFSET        7
+-#define HCLGE_DFX_RTC_BD_OFFSET         8
+-#define HCLGE_DFX_PPP_BD_OFFSET         9
+-#define HCLGE_DFX_RCB_BD_OFFSET         10
+-#define HCLGE_DFX_TQP_BD_OFFSET         11
+-#define HCLGE_DFX_SSU_2_BD_OFFSET       12
+-
+ #define HCLGE_LINK_STATUS_MS	10
+ 
+ static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
+@@ -94,62 +81,6 @@ static const struct pci_device_id ae_algo_pci_tbl[] = {
+ 
+ MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
+ 
+-static const u32 cmdq_reg_addr_list[] = {HCLGE_COMM_NIC_CSQ_BASEADDR_L_REG,
+-					 HCLGE_COMM_NIC_CSQ_BASEADDR_H_REG,
+-					 HCLGE_COMM_NIC_CSQ_DEPTH_REG,
+-					 HCLGE_COMM_NIC_CSQ_TAIL_REG,
+-					 HCLGE_COMM_NIC_CSQ_HEAD_REG,
+-					 HCLGE_COMM_NIC_CRQ_BASEADDR_L_REG,
+-					 HCLGE_COMM_NIC_CRQ_BASEADDR_H_REG,
+-					 HCLGE_COMM_NIC_CRQ_DEPTH_REG,
+-					 HCLGE_COMM_NIC_CRQ_TAIL_REG,
+-					 HCLGE_COMM_NIC_CRQ_HEAD_REG,
+-					 HCLGE_COMM_VECTOR0_CMDQ_SRC_REG,
+-					 HCLGE_COMM_CMDQ_INTR_STS_REG,
+-					 HCLGE_COMM_CMDQ_INTR_EN_REG,
+-					 HCLGE_COMM_CMDQ_INTR_GEN_REG};
+-
+-static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
+-					   HCLGE_PF_OTHER_INT_REG,
+-					   HCLGE_MISC_RESET_STS_REG,
+-					   HCLGE_MISC_VECTOR_INT_STS,
+-					   HCLGE_GLOBAL_RESET_REG,
+-					   HCLGE_FUN_RST_ING,
+-					   HCLGE_GRO_EN_REG};
+-
+-static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
+-					 HCLGE_RING_RX_ADDR_H_REG,
+-					 HCLGE_RING_RX_BD_NUM_REG,
+-					 HCLGE_RING_RX_BD_LENGTH_REG,
+-					 HCLGE_RING_RX_MERGE_EN_REG,
+-					 HCLGE_RING_RX_TAIL_REG,
+-					 HCLGE_RING_RX_HEAD_REG,
+-					 HCLGE_RING_RX_FBD_NUM_REG,
+-					 HCLGE_RING_RX_OFFSET_REG,
+-					 HCLGE_RING_RX_FBD_OFFSET_REG,
+-					 HCLGE_RING_RX_STASH_REG,
+-					 HCLGE_RING_RX_BD_ERR_REG,
+-					 HCLGE_RING_TX_ADDR_L_REG,
+-					 HCLGE_RING_TX_ADDR_H_REG,
+-					 HCLGE_RING_TX_BD_NUM_REG,
+-					 HCLGE_RING_TX_PRIORITY_REG,
+-					 HCLGE_RING_TX_TC_REG,
+-					 HCLGE_RING_TX_MERGE_EN_REG,
+-					 HCLGE_RING_TX_TAIL_REG,
+-					 HCLGE_RING_TX_HEAD_REG,
+-					 HCLGE_RING_TX_FBD_NUM_REG,
+-					 HCLGE_RING_TX_OFFSET_REG,
+-					 HCLGE_RING_TX_EBD_NUM_REG,
+-					 HCLGE_RING_TX_EBD_OFFSET_REG,
+-					 HCLGE_RING_TX_BD_ERR_REG,
+-					 HCLGE_RING_EN_REG};
+-
+-static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
+-					     HCLGE_TQP_INTR_GL0_REG,
+-					     HCLGE_TQP_INTR_GL1_REG,
+-					     HCLGE_TQP_INTR_GL2_REG,
+-					     HCLGE_TQP_INTR_RL_REG};
+-
+ static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
+ 	"External Loopback test",
+ 	"App      Loopback test",
+@@ -375,36 +306,6 @@ static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
+ 	},
+ };
+ 
+-static const u32 hclge_dfx_bd_offset_list[] = {
+-	HCLGE_DFX_BIOS_BD_OFFSET,
+-	HCLGE_DFX_SSU_0_BD_OFFSET,
+-	HCLGE_DFX_SSU_1_BD_OFFSET,
+-	HCLGE_DFX_IGU_BD_OFFSET,
+-	HCLGE_DFX_RPU_0_BD_OFFSET,
+-	HCLGE_DFX_RPU_1_BD_OFFSET,
+-	HCLGE_DFX_NCSI_BD_OFFSET,
+-	HCLGE_DFX_RTC_BD_OFFSET,
+-	HCLGE_DFX_PPP_BD_OFFSET,
+-	HCLGE_DFX_RCB_BD_OFFSET,
+-	HCLGE_DFX_TQP_BD_OFFSET,
+-	HCLGE_DFX_SSU_2_BD_OFFSET
+-};
+-
+-static const enum hclge_opcode_type hclge_dfx_reg_opcode_list[] = {
+-	HCLGE_OPC_DFX_BIOS_COMMON_REG,
+-	HCLGE_OPC_DFX_SSU_REG_0,
+-	HCLGE_OPC_DFX_SSU_REG_1,
+-	HCLGE_OPC_DFX_IGU_EGU_REG,
+-	HCLGE_OPC_DFX_RPU_REG_0,
+-	HCLGE_OPC_DFX_RPU_REG_1,
+-	HCLGE_OPC_DFX_NCSI_REG,
+-	HCLGE_OPC_DFX_RTC_REG,
+-	HCLGE_OPC_DFX_PPP_REG,
+-	HCLGE_OPC_DFX_RCB_REG,
+-	HCLGE_OPC_DFX_TQP_REG,
+-	HCLGE_OPC_DFX_SSU_REG_2
+-};
+-
+ static const struct key_info meta_data_key_info[] = {
+ 	{ PACKET_TYPE_ID, 6 },
+ 	{ IP_FRAGEMENT, 1 },
+@@ -1425,6 +1326,7 @@ static void hclge_set_default_dev_specs(struct hclge_dev *hdev)
+ 	ae_dev->dev_specs.max_frm_size = HCLGE_MAC_MAX_FRAME;
+ 	ae_dev->dev_specs.max_qset_num = HCLGE_MAX_QSET_NUM;
+ 	ae_dev->dev_specs.umv_size = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
++	ae_dev->dev_specs.tnl_num = 0;
+ }
+ 
+ static void hclge_parse_dev_specs(struct hclge_dev *hdev,
+@@ -1448,6 +1350,7 @@ static void hclge_parse_dev_specs(struct hclge_dev *hdev,
+ 	ae_dev->dev_specs.max_frm_size = le16_to_cpu(req1->max_frm_size);
+ 	ae_dev->dev_specs.umv_size = le16_to_cpu(req1->umv_size);
+ 	ae_dev->dev_specs.mc_mac_size = le16_to_cpu(req1->mc_mac_size);
++	ae_dev->dev_specs.tnl_num = req1->tnl_num;
+ }
+ 
+ static void hclge_check_dev_specs(struct hclge_dev *hdev)
+@@ -12383,463 +12286,6 @@ out:
+ 	return ret;
+ }
+ 
+-static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
+-			      u32 *regs_num_64_bit)
+-{
+-	struct hclge_desc desc;
+-	u32 total_num;
+-	int ret;
+-
+-	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
+-	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+-	if (ret) {
+-		dev_err(&hdev->pdev->dev,
+-			"Query register number cmd failed, ret = %d.\n", ret);
+-		return ret;
+-	}
+-
+-	*regs_num_32_bit = le32_to_cpu(desc.data[0]);
+-	*regs_num_64_bit = le32_to_cpu(desc.data[1]);
+-
+-	total_num = *regs_num_32_bit + *regs_num_64_bit;
+-	if (!total_num)
+-		return -EINVAL;
+-
+-	return 0;
+-}
+-
+-static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
+-				 void *data)
+-{
+-#define HCLGE_32_BIT_REG_RTN_DATANUM 8
+-#define HCLGE_32_BIT_DESC_NODATA_LEN 2
+-
+-	struct hclge_desc *desc;
+-	u32 *reg_val = data;
+-	__le32 *desc_data;
+-	int nodata_num;
+-	int cmd_num;
+-	int i, k, n;
+-	int ret;
+-
+-	if (regs_num == 0)
+-		return 0;
+-
+-	nodata_num = HCLGE_32_BIT_DESC_NODATA_LEN;
+-	cmd_num = DIV_ROUND_UP(regs_num + nodata_num,
+-			       HCLGE_32_BIT_REG_RTN_DATANUM);
+-	desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
+-	if (!desc)
+-		return -ENOMEM;
+-
+-	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
+-	ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
+-	if (ret) {
+-		dev_err(&hdev->pdev->dev,
+-			"Query 32 bit register cmd failed, ret = %d.\n", ret);
+-		kfree(desc);
+-		return ret;
+-	}
+-
+-	for (i = 0; i < cmd_num; i++) {
+-		if (i == 0) {
+-			desc_data = (__le32 *)(&desc[i].data[0]);
+-			n = HCLGE_32_BIT_REG_RTN_DATANUM - nodata_num;
+-		} else {
+-			desc_data = (__le32 *)(&desc[i]);
+-			n = HCLGE_32_BIT_REG_RTN_DATANUM;
+-		}
+-		for (k = 0; k < n; k++) {
+-			*reg_val++ = le32_to_cpu(*desc_data++);
+-
+-			regs_num--;
+-			if (!regs_num)
+-				break;
+-		}
+-	}
+-
+-	kfree(desc);
+-	return 0;
+-}
+-
+-static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
+-				 void *data)
+-{
+-#define HCLGE_64_BIT_REG_RTN_DATANUM 4
+-#define HCLGE_64_BIT_DESC_NODATA_LEN 1
+-
+-	struct hclge_desc *desc;
+-	u64 *reg_val = data;
+-	__le64 *desc_data;
+-	int nodata_len;
+-	int cmd_num;
+-	int i, k, n;
+-	int ret;
+-
+-	if (regs_num == 0)
+-		return 0;
+-
+-	nodata_len = HCLGE_64_BIT_DESC_NODATA_LEN;
+-	cmd_num = DIV_ROUND_UP(regs_num + nodata_len,
+-			       HCLGE_64_BIT_REG_RTN_DATANUM);
+-	desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
+-	if (!desc)
+-		return -ENOMEM;
+-
+-	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
+-	ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
+-	if (ret) {
+-		dev_err(&hdev->pdev->dev,
+-			"Query 64 bit register cmd failed, ret = %d.\n", ret);
+-		kfree(desc);
+-		return ret;
+-	}
+-
+-	for (i = 0; i < cmd_num; i++) {
+-		if (i == 0) {
+-			desc_data = (__le64 *)(&desc[i].data[0]);
+-			n = HCLGE_64_BIT_REG_RTN_DATANUM - nodata_len;
+-		} else {
+-			desc_data = (__le64 *)(&desc[i]);
+-			n = HCLGE_64_BIT_REG_RTN_DATANUM;
+-		}
+-		for (k = 0; k < n; k++) {
+-			*reg_val++ = le64_to_cpu(*desc_data++);
+-
+-			regs_num--;
+-			if (!regs_num)
+-				break;
+-		}
+-	}
+-
+-	kfree(desc);
+-	return 0;
+-}
+-
+-#define MAX_SEPARATE_NUM	4
+-#define SEPARATOR_VALUE		0xFDFCFBFA
+-#define REG_NUM_PER_LINE	4
+-#define REG_LEN_PER_LINE	(REG_NUM_PER_LINE * sizeof(u32))
+-#define REG_SEPARATOR_LINE	1
+-#define REG_NUM_REMAIN_MASK	3
+-
+-int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc)
+-{
+-	int i;
+-
+-	/* initialize command BD except the last one */
+-	for (i = 0; i < HCLGE_GET_DFX_REG_TYPE_CNT - 1; i++) {
+-		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM,
+-					   true);
+-		desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
+-	}
+-
+-	/* initialize the last command BD */
+-	hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM, true);
+-
+-	return hclge_cmd_send(&hdev->hw, desc, HCLGE_GET_DFX_REG_TYPE_CNT);
+-}
+-
+-static int hclge_get_dfx_reg_bd_num(struct hclge_dev *hdev,
+-				    int *bd_num_list,
+-				    u32 type_num)
+-{
+-	u32 entries_per_desc, desc_index, index, offset, i;
+-	struct hclge_desc desc[HCLGE_GET_DFX_REG_TYPE_CNT];
+-	int ret;
+-
+-	ret = hclge_query_bd_num_cmd_send(hdev, desc);
+-	if (ret) {
+-		dev_err(&hdev->pdev->dev,
+-			"Get dfx bd num fail, status is %d.\n", ret);
+-		return ret;
+-	}
+-
+-	entries_per_desc = ARRAY_SIZE(desc[0].data);
+-	for (i = 0; i < type_num; i++) {
+-		offset = hclge_dfx_bd_offset_list[i];
+-		index = offset % entries_per_desc;
+-		desc_index = offset / entries_per_desc;
+-		bd_num_list[i] = le32_to_cpu(desc[desc_index].data[index]);
+-	}
+-
+-	return ret;
+-}
+-
+-static int hclge_dfx_reg_cmd_send(struct hclge_dev *hdev,
+-				  struct hclge_desc *desc_src, int bd_num,
+-				  enum hclge_opcode_type cmd)
+-{
+-	struct hclge_desc *desc = desc_src;
+-	int i, ret;
+-
+-	hclge_cmd_setup_basic_desc(desc, cmd, true);
+-	for (i = 0; i < bd_num - 1; i++) {
+-		desc->flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
+-		desc++;
+-		hclge_cmd_setup_basic_desc(desc, cmd, true);
+-	}
+-
+-	desc = desc_src;
+-	ret = hclge_cmd_send(&hdev->hw, desc, bd_num);
+-	if (ret)
+-		dev_err(&hdev->pdev->dev,
+-			"Query dfx reg cmd(0x%x) send fail, status is %d.\n",
+-			cmd, ret);
+-
+-	return ret;
+-}
+-
+-static int hclge_dfx_reg_fetch_data(struct hclge_desc *desc_src, int bd_num,
+-				    void *data)
+-{
+-	int entries_per_desc, reg_num, separator_num, desc_index, index, i;
+-	struct hclge_desc *desc = desc_src;
+-	u32 *reg = data;
+-
+-	entries_per_desc = ARRAY_SIZE(desc->data);
+-	reg_num = entries_per_desc * bd_num;
+-	separator_num = REG_NUM_PER_LINE - (reg_num & REG_NUM_REMAIN_MASK);
+-	for (i = 0; i < reg_num; i++) {
+-		index = i % entries_per_desc;
+-		desc_index = i / entries_per_desc;
+-		*reg++ = le32_to_cpu(desc[desc_index].data[index]);
+-	}
+-	for (i = 0; i < separator_num; i++)
+-		*reg++ = SEPARATOR_VALUE;
+-
+-	return reg_num + separator_num;
+-}
+-
+-static int hclge_get_dfx_reg_len(struct hclge_dev *hdev, int *len)
+-{
+-	u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
+-	int data_len_per_desc, bd_num, i;
+-	int *bd_num_list;
+-	u32 data_len;
+-	int ret;
+-
+-	bd_num_list = kcalloc(dfx_reg_type_num, sizeof(int), GFP_KERNEL);
+-	if (!bd_num_list)
+-		return -ENOMEM;
+-
+-	ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
+-	if (ret) {
+-		dev_err(&hdev->pdev->dev,
+-			"Get dfx reg bd num fail, status is %d.\n", ret);
+-		goto out;
+-	}
+-
+-	data_len_per_desc = sizeof_field(struct hclge_desc, data);
+-	*len = 0;
+-	for (i = 0; i < dfx_reg_type_num; i++) {
+-		bd_num = bd_num_list[i];
+-		data_len = data_len_per_desc * bd_num;
+-		*len += (data_len / REG_LEN_PER_LINE + 1) * REG_LEN_PER_LINE;
+-	}
+-
+-out:
+-	kfree(bd_num_list);
+-	return ret;
+-}
+-
+-static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data)
+-{
+-	u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
+-	int bd_num, bd_num_max, buf_len, i;
+-	struct hclge_desc *desc_src;
+-	int *bd_num_list;
+-	u32 *reg = data;
+-	int ret;
+-
+-	bd_num_list = kcalloc(dfx_reg_type_num, sizeof(int), GFP_KERNEL);
+-	if (!bd_num_list)
+-		return -ENOMEM;
+-
+-	ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
+-	if (ret) {
+-		dev_err(&hdev->pdev->dev,
+-			"Get dfx reg bd num fail, status is %d.\n", ret);
+-		goto out;
+-	}
+-
+-	bd_num_max = bd_num_list[0];
+-	for (i = 1; i < dfx_reg_type_num; i++)
+-		bd_num_max = max_t(int, bd_num_max, bd_num_list[i]);
+-
+-	buf_len = sizeof(*desc_src) * bd_num_max;
+-	desc_src = kzalloc(buf_len, GFP_KERNEL);
+-	if (!desc_src) {
+-		ret = -ENOMEM;
+-		goto out;
+-	}
+-
+-	for (i = 0; i < dfx_reg_type_num; i++) {
+-		bd_num = bd_num_list[i];
+-		ret = hclge_dfx_reg_cmd_send(hdev, desc_src, bd_num,
+-					     hclge_dfx_reg_opcode_list[i]);
+-		if (ret) {
+-			dev_err(&hdev->pdev->dev,
+-				"Get dfx reg fail, status is %d.\n", ret);
+-			break;
+-		}
+-
+-		reg += hclge_dfx_reg_fetch_data(desc_src, bd_num, reg);
+-	}
+-
+-	kfree(desc_src);
+-out:
+-	kfree(bd_num_list);
+-	return ret;
+-}
+-
+-static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data,
+-			      struct hnae3_knic_private_info *kinfo)
+-{
+-#define HCLGE_RING_REG_OFFSET		0x200
+-#define HCLGE_RING_INT_REG_OFFSET	0x4
+-
+-	int i, j, reg_num, separator_num;
+-	int data_num_sum;
+-	u32 *reg = data;
+-
+-	/* fetching per-PF registers valus from PF PCIe register space */
+-	reg_num = ARRAY_SIZE(cmdq_reg_addr_list);
+-	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
+-	for (i = 0; i < reg_num; i++)
+-		*reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
+-	for (i = 0; i < separator_num; i++)
+-		*reg++ = SEPARATOR_VALUE;
+-	data_num_sum = reg_num + separator_num;
+-
+-	reg_num = ARRAY_SIZE(common_reg_addr_list);
+-	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
+-	for (i = 0; i < reg_num; i++)
+-		*reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
+-	for (i = 0; i < separator_num; i++)
+-		*reg++ = SEPARATOR_VALUE;
+-	data_num_sum += reg_num + separator_num;
+-
+-	reg_num = ARRAY_SIZE(ring_reg_addr_list);
+-	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
+-	for (j = 0; j < kinfo->num_tqps; j++) {
+-		for (i = 0; i < reg_num; i++)
+-			*reg++ = hclge_read_dev(&hdev->hw,
+-						ring_reg_addr_list[i] +
+-						HCLGE_RING_REG_OFFSET * j);
+-		for (i = 0; i < separator_num; i++)
+-			*reg++ = SEPARATOR_VALUE;
+-	}
+-	data_num_sum += (reg_num + separator_num) * kinfo->num_tqps;
+-
+-	reg_num = ARRAY_SIZE(tqp_intr_reg_addr_list);
+-	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
+-	for (j = 0; j < hdev->num_msi_used - 1; j++) {
+-		for (i = 0; i < reg_num; i++)
+-			*reg++ = hclge_read_dev(&hdev->hw,
+-						tqp_intr_reg_addr_list[i] +
+-						HCLGE_RING_INT_REG_OFFSET * j);
+-		for (i = 0; i < separator_num; i++)
+-			*reg++ = SEPARATOR_VALUE;
+-	}
+-	data_num_sum += (reg_num + separator_num) * (hdev->num_msi_used - 1);
+-
+-	return data_num_sum;
+-}
+-
+-static int hclge_get_regs_len(struct hnae3_handle *handle)
+-{
+-	int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
+-	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
+-	struct hclge_vport *vport = hclge_get_vport(handle);
+-	struct hclge_dev *hdev = vport->back;
+-	int regs_num_32_bit, regs_num_64_bit, dfx_regs_len;
+-	int regs_lines_32_bit, regs_lines_64_bit;
+-	int ret;
+-
+-	ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
+-	if (ret) {
+-		dev_err(&hdev->pdev->dev,
+-			"Get register number failed, ret = %d.\n", ret);
+-		return ret;
+-	}
+-
+-	ret = hclge_get_dfx_reg_len(hdev, &dfx_regs_len);
+-	if (ret) {
+-		dev_err(&hdev->pdev->dev,
+-			"Get dfx reg len failed, ret = %d.\n", ret);
+-		return ret;
+-	}
+-
+-	cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE +
+-		REG_SEPARATOR_LINE;
+-	common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE +
+-		REG_SEPARATOR_LINE;
+-	ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE +
+-		REG_SEPARATOR_LINE;
+-	tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE +
+-		REG_SEPARATOR_LINE;
+-	regs_lines_32_bit = regs_num_32_bit * sizeof(u32) / REG_LEN_PER_LINE +
+-		REG_SEPARATOR_LINE;
+-	regs_lines_64_bit = regs_num_64_bit * sizeof(u64) / REG_LEN_PER_LINE +
+-		REG_SEPARATOR_LINE;
+-
+-	return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
+-		tqp_intr_lines * (hdev->num_msi_used - 1) + regs_lines_32_bit +
+-		regs_lines_64_bit) * REG_LEN_PER_LINE + dfx_regs_len;
+-}
+-
+-static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
+-			   void *data)
+-{
+-	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
+-	struct hclge_vport *vport = hclge_get_vport(handle);
+-	struct hclge_dev *hdev = vport->back;
+-	u32 regs_num_32_bit, regs_num_64_bit;
+-	int i, reg_num, separator_num, ret;
+-	u32 *reg = data;
+-
+-	*version = hdev->fw_version;
+-
+-	ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
+-	if (ret) {
+-		dev_err(&hdev->pdev->dev,
+-			"Get register number failed, ret = %d.\n", ret);
+-		return;
+-	}
+-
+-	reg += hclge_fetch_pf_reg(hdev, reg, kinfo);
+-
+-	ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
+-	if (ret) {
+-		dev_err(&hdev->pdev->dev,
+-			"Get 32 bit register failed, ret = %d.\n", ret);
+-		return;
+-	}
+-	reg_num = regs_num_32_bit;
+-	reg += reg_num;
+-	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
+-	for (i = 0; i < separator_num; i++)
+-		*reg++ = SEPARATOR_VALUE;
+-
+-	ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
+-	if (ret) {
+-		dev_err(&hdev->pdev->dev,
+-			"Get 64 bit register failed, ret = %d.\n", ret);
+-		return;
+-	}
+-	reg_num = regs_num_64_bit * 2;
+-	reg += reg_num;
+-	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
+-	for (i = 0; i < separator_num; i++)
+-		*reg++ = SEPARATOR_VALUE;
+-
+-	ret = hclge_get_dfx_reg(hdev, reg);
+-	if (ret)
+-		dev_err(&hdev->pdev->dev,
+-			"Get dfx register failed, ret = %d.\n", ret);
+-}
+-
+ static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
+ {
+ 	struct hclge_set_led_state_cmd *req;
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
+index 6a43d1515585f..8f76b568c1bf6 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
+@@ -1142,8 +1142,6 @@ int hclge_push_vf_port_base_vlan_info(struct hclge_vport *vport, u8 vfid,
+ 				      u16 state,
+ 				      struct hclge_vlan_info *vlan_info);
+ void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time);
+-int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev,
+-				struct hclge_desc *desc);
+ void hclge_report_hw_error(struct hclge_dev *hdev,
+ 			   enum hnae3_hw_error_type type);
+ void hclge_inform_vf_promisc_info(struct hclge_vport *vport);
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_regs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_regs.c
+new file mode 100644
+index 0000000000000..43c1c18fa81f8
+--- /dev/null
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_regs.c
+@@ -0,0 +1,668 @@
++// SPDX-License-Identifier: GPL-2.0+
++// Copyright (c) 2023 Hisilicon Limited.
++
++#include "hclge_cmd.h"
++#include "hclge_main.h"
++#include "hclge_regs.h"
++#include "hnae3.h"
++
++static const u32 cmdq_reg_addr_list[] = {HCLGE_COMM_NIC_CSQ_BASEADDR_L_REG,
++					 HCLGE_COMM_NIC_CSQ_BASEADDR_H_REG,
++					 HCLGE_COMM_NIC_CSQ_DEPTH_REG,
++					 HCLGE_COMM_NIC_CSQ_TAIL_REG,
++					 HCLGE_COMM_NIC_CSQ_HEAD_REG,
++					 HCLGE_COMM_NIC_CRQ_BASEADDR_L_REG,
++					 HCLGE_COMM_NIC_CRQ_BASEADDR_H_REG,
++					 HCLGE_COMM_NIC_CRQ_DEPTH_REG,
++					 HCLGE_COMM_NIC_CRQ_TAIL_REG,
++					 HCLGE_COMM_NIC_CRQ_HEAD_REG,
++					 HCLGE_COMM_VECTOR0_CMDQ_SRC_REG,
++					 HCLGE_COMM_CMDQ_INTR_STS_REG,
++					 HCLGE_COMM_CMDQ_INTR_EN_REG,
++					 HCLGE_COMM_CMDQ_INTR_GEN_REG};
++
++static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
++					   HCLGE_PF_OTHER_INT_REG,
++					   HCLGE_MISC_RESET_STS_REG,
++					   HCLGE_MISC_VECTOR_INT_STS,
++					   HCLGE_GLOBAL_RESET_REG,
++					   HCLGE_FUN_RST_ING,
++					   HCLGE_GRO_EN_REG};
++
++static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
++					 HCLGE_RING_RX_ADDR_H_REG,
++					 HCLGE_RING_RX_BD_NUM_REG,
++					 HCLGE_RING_RX_BD_LENGTH_REG,
++					 HCLGE_RING_RX_MERGE_EN_REG,
++					 HCLGE_RING_RX_TAIL_REG,
++					 HCLGE_RING_RX_HEAD_REG,
++					 HCLGE_RING_RX_FBD_NUM_REG,
++					 HCLGE_RING_RX_OFFSET_REG,
++					 HCLGE_RING_RX_FBD_OFFSET_REG,
++					 HCLGE_RING_RX_STASH_REG,
++					 HCLGE_RING_RX_BD_ERR_REG,
++					 HCLGE_RING_TX_ADDR_L_REG,
++					 HCLGE_RING_TX_ADDR_H_REG,
++					 HCLGE_RING_TX_BD_NUM_REG,
++					 HCLGE_RING_TX_PRIORITY_REG,
++					 HCLGE_RING_TX_TC_REG,
++					 HCLGE_RING_TX_MERGE_EN_REG,
++					 HCLGE_RING_TX_TAIL_REG,
++					 HCLGE_RING_TX_HEAD_REG,
++					 HCLGE_RING_TX_FBD_NUM_REG,
++					 HCLGE_RING_TX_OFFSET_REG,
++					 HCLGE_RING_TX_EBD_NUM_REG,
++					 HCLGE_RING_TX_EBD_OFFSET_REG,
++					 HCLGE_RING_TX_BD_ERR_REG,
++					 HCLGE_RING_EN_REG};
++
++static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
++					     HCLGE_TQP_INTR_GL0_REG,
++					     HCLGE_TQP_INTR_GL1_REG,
++					     HCLGE_TQP_INTR_GL2_REG,
++					     HCLGE_TQP_INTR_RL_REG};
++
++/* Get DFX BD number offset */
++#define HCLGE_DFX_BIOS_BD_OFFSET        1
++#define HCLGE_DFX_SSU_0_BD_OFFSET       2
++#define HCLGE_DFX_SSU_1_BD_OFFSET       3
++#define HCLGE_DFX_IGU_BD_OFFSET         4
++#define HCLGE_DFX_RPU_0_BD_OFFSET       5
++#define HCLGE_DFX_RPU_1_BD_OFFSET       6
++#define HCLGE_DFX_NCSI_BD_OFFSET        7
++#define HCLGE_DFX_RTC_BD_OFFSET         8
++#define HCLGE_DFX_PPP_BD_OFFSET         9
++#define HCLGE_DFX_RCB_BD_OFFSET         10
++#define HCLGE_DFX_TQP_BD_OFFSET         11
++#define HCLGE_DFX_SSU_2_BD_OFFSET       12
++
++static const u32 hclge_dfx_bd_offset_list[] = {
++	HCLGE_DFX_BIOS_BD_OFFSET,
++	HCLGE_DFX_SSU_0_BD_OFFSET,
++	HCLGE_DFX_SSU_1_BD_OFFSET,
++	HCLGE_DFX_IGU_BD_OFFSET,
++	HCLGE_DFX_RPU_0_BD_OFFSET,
++	HCLGE_DFX_RPU_1_BD_OFFSET,
++	HCLGE_DFX_NCSI_BD_OFFSET,
++	HCLGE_DFX_RTC_BD_OFFSET,
++	HCLGE_DFX_PPP_BD_OFFSET,
++	HCLGE_DFX_RCB_BD_OFFSET,
++	HCLGE_DFX_TQP_BD_OFFSET,
++	HCLGE_DFX_SSU_2_BD_OFFSET
++};
++
++static const enum hclge_opcode_type hclge_dfx_reg_opcode_list[] = {
++	HCLGE_OPC_DFX_BIOS_COMMON_REG,
++	HCLGE_OPC_DFX_SSU_REG_0,
++	HCLGE_OPC_DFX_SSU_REG_1,
++	HCLGE_OPC_DFX_IGU_EGU_REG,
++	HCLGE_OPC_DFX_RPU_REG_0,
++	HCLGE_OPC_DFX_RPU_REG_1,
++	HCLGE_OPC_DFX_NCSI_REG,
++	HCLGE_OPC_DFX_RTC_REG,
++	HCLGE_OPC_DFX_PPP_REG,
++	HCLGE_OPC_DFX_RCB_REG,
++	HCLGE_OPC_DFX_TQP_REG,
++	HCLGE_OPC_DFX_SSU_REG_2
++};
++
++enum hclge_reg_tag {
++	HCLGE_REG_TAG_CMDQ = 0,
++	HCLGE_REG_TAG_COMMON,
++	HCLGE_REG_TAG_RING,
++	HCLGE_REG_TAG_TQP_INTR,
++	HCLGE_REG_TAG_QUERY_32_BIT,
++	HCLGE_REG_TAG_QUERY_64_BIT,
++	HCLGE_REG_TAG_DFX_BIOS_COMMON,
++	HCLGE_REG_TAG_DFX_SSU_0,
++	HCLGE_REG_TAG_DFX_SSU_1,
++	HCLGE_REG_TAG_DFX_IGU_EGU,
++	HCLGE_REG_TAG_DFX_RPU_0,
++	HCLGE_REG_TAG_DFX_RPU_1,
++	HCLGE_REG_TAG_DFX_NCSI,
++	HCLGE_REG_TAG_DFX_RTC,
++	HCLGE_REG_TAG_DFX_PPP,
++	HCLGE_REG_TAG_DFX_RCB,
++	HCLGE_REG_TAG_DFX_TQP,
++	HCLGE_REG_TAG_DFX_SSU_2,
++	HCLGE_REG_TAG_RPU_TNL,
++};
++
++#pragma pack(4)
++struct hclge_reg_tlv {
++	u16 tag;
++	u16 len;
++};
++
++struct hclge_reg_header {
++	u64 magic_number;
++	u8 is_vf;
++	u8 rsv[7];
++};
++
++#pragma pack()
++
++#define HCLGE_REG_TLV_SIZE	sizeof(struct hclge_reg_tlv)
++#define HCLGE_REG_HEADER_SIZE	sizeof(struct hclge_reg_header)
++#define HCLGE_REG_TLV_SPACE	(sizeof(struct hclge_reg_tlv) / sizeof(u32))
++#define HCLGE_REG_HEADER_SPACE	(sizeof(struct hclge_reg_header) / sizeof(u32))
++#define HCLGE_REG_MAGIC_NUMBER	0x686e733372656773 /* meaning is hns3regs */
++
++#define HCLGE_REG_RPU_TNL_ID_0	1
++
++static u32 hclge_reg_get_header(void *data)
++{
++	struct hclge_reg_header *header = data;
++
++	header->magic_number = HCLGE_REG_MAGIC_NUMBER;
++	header->is_vf = 0x0;
++
++	return HCLGE_REG_HEADER_SPACE;
++}
++
++static u32 hclge_reg_get_tlv(u32 tag, u32 regs_num, void *data)
++{
++	struct hclge_reg_tlv *tlv = data;
++
++	tlv->tag = tag;
++	tlv->len = regs_num * sizeof(u32) + HCLGE_REG_TLV_SIZE;
++
++	return HCLGE_REG_TLV_SPACE;
++}
++
++static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
++				 void *data)
++{
++#define HCLGE_32_BIT_REG_RTN_DATANUM 8
++#define HCLGE_32_BIT_DESC_NODATA_LEN 2
++
++	struct hclge_desc *desc;
++	u32 *reg_val = data;
++	__le32 *desc_data;
++	int nodata_num;
++	int cmd_num;
++	int i, k, n;
++	int ret;
++
++	if (regs_num == 0)
++		return 0;
++
++	nodata_num = HCLGE_32_BIT_DESC_NODATA_LEN;
++	cmd_num = DIV_ROUND_UP(regs_num + nodata_num,
++			       HCLGE_32_BIT_REG_RTN_DATANUM);
++	desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
++	if (!desc)
++		return -ENOMEM;
++
++	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
++	ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
++	if (ret) {
++		dev_err(&hdev->pdev->dev,
++			"Query 32 bit register cmd failed, ret = %d.\n", ret);
++		kfree(desc);
++		return ret;
++	}
++
++	for (i = 0; i < cmd_num; i++) {
++		if (i == 0) {
++			desc_data = (__le32 *)(&desc[i].data[0]);
++			n = HCLGE_32_BIT_REG_RTN_DATANUM - nodata_num;
++		} else {
++			desc_data = (__le32 *)(&desc[i]);
++			n = HCLGE_32_BIT_REG_RTN_DATANUM;
++		}
++		for (k = 0; k < n; k++) {
++			*reg_val++ = le32_to_cpu(*desc_data++);
++
++			regs_num--;
++			if (!regs_num)
++				break;
++		}
++	}
++
++	kfree(desc);
++	return 0;
++}
++
++static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
++				 void *data)
++{
++#define HCLGE_64_BIT_REG_RTN_DATANUM 4
++#define HCLGE_64_BIT_DESC_NODATA_LEN 1
++
++	struct hclge_desc *desc;
++	u64 *reg_val = data;
++	__le64 *desc_data;
++	int nodata_len;
++	int cmd_num;
++	int i, k, n;
++	int ret;
++
++	if (regs_num == 0)
++		return 0;
++
++	nodata_len = HCLGE_64_BIT_DESC_NODATA_LEN;
++	cmd_num = DIV_ROUND_UP(regs_num + nodata_len,
++			       HCLGE_64_BIT_REG_RTN_DATANUM);
++	desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
++	if (!desc)
++		return -ENOMEM;
++
++	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
++	ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
++	if (ret) {
++		dev_err(&hdev->pdev->dev,
++			"Query 64 bit register cmd failed, ret = %d.\n", ret);
++		kfree(desc);
++		return ret;
++	}
++
++	for (i = 0; i < cmd_num; i++) {
++		if (i == 0) {
++			desc_data = (__le64 *)(&desc[i].data[0]);
++			n = HCLGE_64_BIT_REG_RTN_DATANUM - nodata_len;
++		} else {
++			desc_data = (__le64 *)(&desc[i]);
++			n = HCLGE_64_BIT_REG_RTN_DATANUM;
++		}
++		for (k = 0; k < n; k++) {
++			*reg_val++ = le64_to_cpu(*desc_data++);
++
++			regs_num--;
++			if (!regs_num)
++				break;
++		}
++	}
++
++	kfree(desc);
++	return 0;
++}
++
++int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc)
++{
++	int i;
++
++	/* initialize command BD except the last one */
++	for (i = 0; i < HCLGE_GET_DFX_REG_TYPE_CNT - 1; i++) {
++		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM,
++					   true);
++		desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
++	}
++
++	/* initialize the last command BD */
++	hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM, true);
++
++	return hclge_cmd_send(&hdev->hw, desc, HCLGE_GET_DFX_REG_TYPE_CNT);
++}
++
++static int hclge_get_dfx_reg_bd_num(struct hclge_dev *hdev,
++				    int *bd_num_list,
++				    u32 type_num)
++{
++	u32 entries_per_desc, desc_index, index, offset, i;
++	struct hclge_desc desc[HCLGE_GET_DFX_REG_TYPE_CNT];
++	int ret;
++
++	ret = hclge_query_bd_num_cmd_send(hdev, desc);
++	if (ret) {
++		dev_err(&hdev->pdev->dev,
++			"Get dfx bd num fail, status is %d.\n", ret);
++		return ret;
++	}
++
++	entries_per_desc = ARRAY_SIZE(desc[0].data);
++	for (i = 0; i < type_num; i++) {
++		offset = hclge_dfx_bd_offset_list[i];
++		index = offset % entries_per_desc;
++		desc_index = offset / entries_per_desc;
++		bd_num_list[i] = le32_to_cpu(desc[desc_index].data[index]);
++	}
++
++	return ret;
++}
++
++static int hclge_dfx_reg_cmd_send(struct hclge_dev *hdev,
++				  struct hclge_desc *desc_src, int bd_num,
++				  enum hclge_opcode_type cmd)
++{
++	struct hclge_desc *desc = desc_src;
++	int i, ret;
++
++	hclge_cmd_setup_basic_desc(desc, cmd, true);
++	for (i = 0; i < bd_num - 1; i++) {
++		desc->flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
++		desc++;
++		hclge_cmd_setup_basic_desc(desc, cmd, true);
++	}
++
++	desc = desc_src;
++	ret = hclge_cmd_send(&hdev->hw, desc, bd_num);
++	if (ret)
++		dev_err(&hdev->pdev->dev,
++			"Query dfx reg cmd(0x%x) send fail, status is %d.\n",
++			cmd, ret);
++
++	return ret;
++}
++
++/* tnl_id = 0 means get sum of all tnl reg's value */
++static int hclge_dfx_reg_rpu_tnl_cmd_send(struct hclge_dev *hdev, u32 tnl_id,
++					  struct hclge_desc *desc, int bd_num)
++{
++	int i, ret;
++
++	for (i = 0; i < bd_num; i++) {
++		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_RPU_REG_0,
++					   true);
++		if (i != bd_num - 1)
++			desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
++	}
++
++	desc[0].data[0] = cpu_to_le32(tnl_id);
++	ret = hclge_cmd_send(&hdev->hw, desc, bd_num);
++	if (ret)
++		dev_err(&hdev->pdev->dev,
++			"failed to query dfx rpu tnl reg, ret = %d\n",
++			ret);
++	return ret;
++}
++
++static int hclge_dfx_reg_fetch_data(struct hclge_desc *desc_src, int bd_num,
++				    void *data)
++{
++	int entries_per_desc, reg_num, desc_index, index, i;
++	struct hclge_desc *desc = desc_src;
++	u32 *reg = data;
++
++	entries_per_desc = ARRAY_SIZE(desc->data);
++	reg_num = entries_per_desc * bd_num;
++	for (i = 0; i < reg_num; i++) {
++		index = i % entries_per_desc;
++		desc_index = i / entries_per_desc;
++		*reg++ = le32_to_cpu(desc[desc_index].data[index]);
++	}
++
++	return reg_num;
++}
++
++static int hclge_get_dfx_reg_len(struct hclge_dev *hdev, int *len)
++{
++	u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
++	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
++	int data_len_per_desc;
++	int *bd_num_list;
++	int ret;
++	u32 i;
++
++	bd_num_list = kcalloc(dfx_reg_type_num, sizeof(int), GFP_KERNEL);
++	if (!bd_num_list)
++		return -ENOMEM;
++
++	ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
++	if (ret) {
++		dev_err(&hdev->pdev->dev,
++			"Get dfx reg bd num fail, status is %d.\n", ret);
++		goto out;
++	}
++
++	data_len_per_desc = sizeof_field(struct hclge_desc, data);
++	*len = 0;
++	for (i = 0; i < dfx_reg_type_num; i++)
++		*len += bd_num_list[i] * data_len_per_desc + HCLGE_REG_TLV_SIZE;
++
++	/**
++	 * the num of dfx_rpu_0 is reused by each dfx_rpu_tnl
++	 * HCLGE_DFX_BD_OFFSET is starting at 1, but the array subscript is
++	 * starting at 0, so offset need '- 1'.
++	 */
++	*len += (bd_num_list[HCLGE_DFX_RPU_0_BD_OFFSET - 1] * data_len_per_desc +
++		 HCLGE_REG_TLV_SIZE) * ae_dev->dev_specs.tnl_num;
++
++out:
++	kfree(bd_num_list);
++	return ret;
++}
++
++static int hclge_get_dfx_rpu_tnl_reg(struct hclge_dev *hdev, u32 *reg,
++				     struct hclge_desc *desc_src,
++				     int bd_num)
++{
++	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
++	int ret = 0;
++	u8 i;
++
++	for (i = HCLGE_REG_RPU_TNL_ID_0; i <= ae_dev->dev_specs.tnl_num; i++) {
++		ret = hclge_dfx_reg_rpu_tnl_cmd_send(hdev, i, desc_src, bd_num);
++		if (ret)
++			break;
++
++		reg += hclge_reg_get_tlv(HCLGE_REG_TAG_RPU_TNL,
++					 ARRAY_SIZE(desc_src->data) * bd_num,
++					 reg);
++		reg += hclge_dfx_reg_fetch_data(desc_src, bd_num, reg);
++	}
++
++	return ret;
++}
++
++static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data)
++{
++	u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
++	int bd_num, bd_num_max, buf_len;
++	struct hclge_desc *desc_src;
++	int *bd_num_list;
++	u32 *reg = data;
++	int ret;
++	u32 i;
++
++	bd_num_list = kcalloc(dfx_reg_type_num, sizeof(int), GFP_KERNEL);
++	if (!bd_num_list)
++		return -ENOMEM;
++
++	ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
++	if (ret) {
++		dev_err(&hdev->pdev->dev,
++			"Get dfx reg bd num fail, status is %d.\n", ret);
++		goto out;
++	}
++
++	bd_num_max = bd_num_list[0];
++	for (i = 1; i < dfx_reg_type_num; i++)
++		bd_num_max = max_t(int, bd_num_max, bd_num_list[i]);
++
++	buf_len = sizeof(*desc_src) * bd_num_max;
++	desc_src = kzalloc(buf_len, GFP_KERNEL);
++	if (!desc_src) {
++		ret = -ENOMEM;
++		goto out;
++	}
++
++	for (i = 0; i < dfx_reg_type_num; i++) {
++		bd_num = bd_num_list[i];
++		ret = hclge_dfx_reg_cmd_send(hdev, desc_src, bd_num,
++					     hclge_dfx_reg_opcode_list[i]);
++		if (ret) {
++			dev_err(&hdev->pdev->dev,
++				"Get dfx reg fail, status is %d.\n", ret);
++			goto free;
++		}
++
++		reg += hclge_reg_get_tlv(HCLGE_REG_TAG_DFX_BIOS_COMMON + i,
++					 ARRAY_SIZE(desc_src->data) * bd_num,
++					 reg);
++		reg += hclge_dfx_reg_fetch_data(desc_src, bd_num, reg);
++	}
++
++	/**
++	 * HCLGE_DFX_BD_OFFSET is starting at 1, but the array subscript is
++	 * starting at 0, so offset need '- 1'.
++	 */
++	bd_num = bd_num_list[HCLGE_DFX_RPU_0_BD_OFFSET - 1];
++	ret = hclge_get_dfx_rpu_tnl_reg(hdev, reg, desc_src, bd_num);
++
++free:
++	kfree(desc_src);
++out:
++	kfree(bd_num_list);
++	return ret;
++}
++
++static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data,
++			      struct hnae3_knic_private_info *kinfo)
++{
++#define HCLGE_RING_REG_OFFSET		0x200
++#define HCLGE_RING_INT_REG_OFFSET	0x4
++
++	int i, j, reg_num;
++	int data_num_sum;
++	u32 *reg = data;
++
++	/* fetching per-PF registers valus from PF PCIe register space */
++	reg_num = ARRAY_SIZE(cmdq_reg_addr_list);
++	reg += hclge_reg_get_tlv(HCLGE_REG_TAG_CMDQ, reg_num, reg);
++	for (i = 0; i < reg_num; i++)
++		*reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
++	data_num_sum = reg_num + HCLGE_REG_TLV_SPACE;
++
++	reg_num = ARRAY_SIZE(common_reg_addr_list);
++	reg += hclge_reg_get_tlv(HCLGE_REG_TAG_COMMON, reg_num, reg);
++	for (i = 0; i < reg_num; i++)
++		*reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
++	data_num_sum += reg_num + HCLGE_REG_TLV_SPACE;
++
++	reg_num = ARRAY_SIZE(ring_reg_addr_list);
++	for (j = 0; j < kinfo->num_tqps; j++) {
++		reg += hclge_reg_get_tlv(HCLGE_REG_TAG_RING, reg_num, reg);
++		for (i = 0; i < reg_num; i++)
++			*reg++ = hclge_read_dev(&hdev->hw,
++						ring_reg_addr_list[i] +
++						HCLGE_RING_REG_OFFSET * j);
++	}
++	data_num_sum += (reg_num + HCLGE_REG_TLV_SPACE) * kinfo->num_tqps;
++
++	reg_num = ARRAY_SIZE(tqp_intr_reg_addr_list);
++	for (j = 0; j < hdev->num_msi_used - 1; j++) {
++		reg += hclge_reg_get_tlv(HCLGE_REG_TAG_TQP_INTR, reg_num, reg);
++		for (i = 0; i < reg_num; i++)
++			*reg++ = hclge_read_dev(&hdev->hw,
++						tqp_intr_reg_addr_list[i] +
++						HCLGE_RING_INT_REG_OFFSET * j);
++	}
++	data_num_sum += (reg_num + HCLGE_REG_TLV_SPACE) *
++			(hdev->num_msi_used - 1);
++
++	return data_num_sum;
++}
++
++static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
++			      u32 *regs_num_64_bit)
++{
++	struct hclge_desc desc;
++	u32 total_num;
++	int ret;
++
++	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
++	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
++	if (ret) {
++		dev_err(&hdev->pdev->dev,
++			"Query register number cmd failed, ret = %d.\n", ret);
++		return ret;
++	}
++
++	*regs_num_32_bit = le32_to_cpu(desc.data[0]);
++	*regs_num_64_bit = le32_to_cpu(desc.data[1]);
++
++	total_num = *regs_num_32_bit + *regs_num_64_bit;
++	if (!total_num)
++		return -EINVAL;
++
++	return 0;
++}
++
++int hclge_get_regs_len(struct hnae3_handle *handle)
++{
++	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
++	struct hclge_vport *vport = hclge_get_vport(handle);
++	int regs_num_32_bit, regs_num_64_bit, dfx_regs_len;
++	int cmdq_len, common_len, ring_len, tqp_intr_len;
++	int regs_len_32_bit, regs_len_64_bit;
++	struct hclge_dev *hdev = vport->back;
++	int ret;
++
++	ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
++	if (ret) {
++		dev_err(&hdev->pdev->dev,
++			"Get register number failed, ret = %d.\n", ret);
++		return ret;
++	}
++
++	ret = hclge_get_dfx_reg_len(hdev, &dfx_regs_len);
++	if (ret) {
++		dev_err(&hdev->pdev->dev,
++			"Get dfx reg len failed, ret = %d.\n", ret);
++		return ret;
++	}
++
++	cmdq_len = HCLGE_REG_TLV_SIZE + sizeof(cmdq_reg_addr_list);
++	common_len = HCLGE_REG_TLV_SIZE + sizeof(common_reg_addr_list);
++	ring_len = HCLGE_REG_TLV_SIZE + sizeof(ring_reg_addr_list);
++	tqp_intr_len = HCLGE_REG_TLV_SIZE + sizeof(tqp_intr_reg_addr_list);
++	regs_len_32_bit = HCLGE_REG_TLV_SIZE + regs_num_32_bit * sizeof(u32);
++	regs_len_64_bit = HCLGE_REG_TLV_SIZE + regs_num_64_bit * sizeof(u64);
++
++	/* return the total length of all register values */
++	return HCLGE_REG_HEADER_SIZE + cmdq_len + common_len + ring_len *
++		kinfo->num_tqps + tqp_intr_len * (hdev->num_msi_used - 1) +
++		regs_len_32_bit + regs_len_64_bit + dfx_regs_len;
++}
++
++void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
++		    void *data)
++{
++#define HCLGE_REG_64_BIT_SPACE_MULTIPLE		2
++
++	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
++	struct hclge_vport *vport = hclge_get_vport(handle);
++	struct hclge_dev *hdev = vport->back;
++	u32 regs_num_32_bit, regs_num_64_bit;
++	u32 *reg = data;
++	int ret;
++
++	*version = hdev->fw_version;
++
++	ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
++	if (ret) {
++		dev_err(&hdev->pdev->dev,
++			"Get register number failed, ret = %d.\n", ret);
++		return;
++	}
++
++	reg += hclge_reg_get_header(reg);
++	reg += hclge_fetch_pf_reg(hdev, reg, kinfo);
++
++	reg += hclge_reg_get_tlv(HCLGE_REG_TAG_QUERY_32_BIT,
++				 regs_num_32_bit, reg);
++	ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
++	if (ret) {
++		dev_err(&hdev->pdev->dev,
++			"Get 32 bit register failed, ret = %d.\n", ret);
++		return;
++	}
++	reg += regs_num_32_bit;
++
++	reg += hclge_reg_get_tlv(HCLGE_REG_TAG_QUERY_64_BIT,
++				 regs_num_64_bit *
++				 HCLGE_REG_64_BIT_SPACE_MULTIPLE, reg);
++	ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
++	if (ret) {
++		dev_err(&hdev->pdev->dev,
++			"Get 64 bit register failed, ret = %d.\n", ret);
++		return;
++	}
++	reg += regs_num_64_bit * HCLGE_REG_64_BIT_SPACE_MULTIPLE;
++
++	ret = hclge_get_dfx_reg(hdev, reg);
++	if (ret)
++		dev_err(&hdev->pdev->dev,
++			"Get dfx register failed, ret = %d.\n", ret);
++}
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_regs.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_regs.h
+new file mode 100644
+index 0000000000000..b6bc1ecb8054e
+--- /dev/null
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_regs.h
+@@ -0,0 +1,17 @@
++/* SPDX-License-Identifier: GPL-2.0+ */
++// Copyright (c) 2023 Hisilicon Limited.
++
++#ifndef __HCLGE_REGS_H
++#define __HCLGE_REGS_H
++#include <linux/types.h>
++#include "hclge_comm_cmd.h"
++
++struct hnae3_handle;
++struct hclge_dev;
++
++int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev,
++				struct hclge_desc *desc);
++int hclge_get_regs_len(struct hnae3_handle *handle);
++void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
++		    void *data);
++#endif
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+index 34f02ca8d1d2d..7a2f9233d6954 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+@@ -6,6 +6,7 @@
+ #include <net/rtnetlink.h>
+ #include "hclgevf_cmd.h"
+ #include "hclgevf_main.h"
++#include "hclgevf_regs.h"
+ #include "hclge_mbx.h"
+ #include "hnae3.h"
+ #include "hclgevf_devlink.h"
+@@ -33,58 +34,6 @@ static const struct pci_device_id ae_algovf_pci_tbl[] = {
+ 
+ MODULE_DEVICE_TABLE(pci, ae_algovf_pci_tbl);
+ 
+-static const u32 cmdq_reg_addr_list[] = {HCLGE_COMM_NIC_CSQ_BASEADDR_L_REG,
+-					 HCLGE_COMM_NIC_CSQ_BASEADDR_H_REG,
+-					 HCLGE_COMM_NIC_CSQ_DEPTH_REG,
+-					 HCLGE_COMM_NIC_CSQ_TAIL_REG,
+-					 HCLGE_COMM_NIC_CSQ_HEAD_REG,
+-					 HCLGE_COMM_NIC_CRQ_BASEADDR_L_REG,
+-					 HCLGE_COMM_NIC_CRQ_BASEADDR_H_REG,
+-					 HCLGE_COMM_NIC_CRQ_DEPTH_REG,
+-					 HCLGE_COMM_NIC_CRQ_TAIL_REG,
+-					 HCLGE_COMM_NIC_CRQ_HEAD_REG,
+-					 HCLGE_COMM_VECTOR0_CMDQ_SRC_REG,
+-					 HCLGE_COMM_VECTOR0_CMDQ_STATE_REG,
+-					 HCLGE_COMM_CMDQ_INTR_EN_REG,
+-					 HCLGE_COMM_CMDQ_INTR_GEN_REG};
+-
+-static const u32 common_reg_addr_list[] = {HCLGEVF_MISC_VECTOR_REG_BASE,
+-					   HCLGEVF_RST_ING,
+-					   HCLGEVF_GRO_EN_REG};
+-
+-static const u32 ring_reg_addr_list[] = {HCLGEVF_RING_RX_ADDR_L_REG,
+-					 HCLGEVF_RING_RX_ADDR_H_REG,
+-					 HCLGEVF_RING_RX_BD_NUM_REG,
+-					 HCLGEVF_RING_RX_BD_LENGTH_REG,
+-					 HCLGEVF_RING_RX_MERGE_EN_REG,
+-					 HCLGEVF_RING_RX_TAIL_REG,
+-					 HCLGEVF_RING_RX_HEAD_REG,
+-					 HCLGEVF_RING_RX_FBD_NUM_REG,
+-					 HCLGEVF_RING_RX_OFFSET_REG,
+-					 HCLGEVF_RING_RX_FBD_OFFSET_REG,
+-					 HCLGEVF_RING_RX_STASH_REG,
+-					 HCLGEVF_RING_RX_BD_ERR_REG,
+-					 HCLGEVF_RING_TX_ADDR_L_REG,
+-					 HCLGEVF_RING_TX_ADDR_H_REG,
+-					 HCLGEVF_RING_TX_BD_NUM_REG,
+-					 HCLGEVF_RING_TX_PRIORITY_REG,
+-					 HCLGEVF_RING_TX_TC_REG,
+-					 HCLGEVF_RING_TX_MERGE_EN_REG,
+-					 HCLGEVF_RING_TX_TAIL_REG,
+-					 HCLGEVF_RING_TX_HEAD_REG,
+-					 HCLGEVF_RING_TX_FBD_NUM_REG,
+-					 HCLGEVF_RING_TX_OFFSET_REG,
+-					 HCLGEVF_RING_TX_EBD_NUM_REG,
+-					 HCLGEVF_RING_TX_EBD_OFFSET_REG,
+-					 HCLGEVF_RING_TX_BD_ERR_REG,
+-					 HCLGEVF_RING_EN_REG};
+-
+-static const u32 tqp_intr_reg_addr_list[] = {HCLGEVF_TQP_INTR_CTRL_REG,
+-					     HCLGEVF_TQP_INTR_GL0_REG,
+-					     HCLGEVF_TQP_INTR_GL1_REG,
+-					     HCLGEVF_TQP_INTR_GL2_REG,
+-					     HCLGEVF_TQP_INTR_RL_REG};
+-
+ /* hclgevf_cmd_send - send command to command queue
+  * @hw: pointer to the hw struct
+  * @desc: prefilled descriptor for describing the command
+@@ -111,7 +60,7 @@ void hclgevf_arq_init(struct hclgevf_dev *hdev)
+ 	spin_unlock(&cmdq->crq.lock);
+ }
+ 
+-static struct hclgevf_dev *hclgevf_ae_get_hdev(struct hnae3_handle *handle)
++struct hclgevf_dev *hclgevf_ae_get_hdev(struct hnae3_handle *handle)
+ {
+ 	if (!handle->client)
+ 		return container_of(handle, struct hclgevf_dev, nic);
+@@ -3258,72 +3207,6 @@ static void hclgevf_get_link_mode(struct hnae3_handle *handle,
+ 	*advertising = hdev->hw.mac.advertising;
+ }
+ 
+-#define MAX_SEPARATE_NUM	4
+-#define SEPARATOR_VALUE		0xFDFCFBFA
+-#define REG_NUM_PER_LINE	4
+-#define REG_LEN_PER_LINE	(REG_NUM_PER_LINE * sizeof(u32))
+-
+-static int hclgevf_get_regs_len(struct hnae3_handle *handle)
+-{
+-	int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
+-	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+-
+-	cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE + 1;
+-	common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE + 1;
+-	ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE + 1;
+-	tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE + 1;
+-
+-	return (cmdq_lines + common_lines + ring_lines * hdev->num_tqps +
+-		tqp_intr_lines * (hdev->num_msi_used - 1)) * REG_LEN_PER_LINE;
+-}
+-
+-static void hclgevf_get_regs(struct hnae3_handle *handle, u32 *version,
+-			     void *data)
+-{
+-	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+-	int i, j, reg_um, separator_num;
+-	u32 *reg = data;
+-
+-	*version = hdev->fw_version;
+-
+-	/* fetching per-VF registers values from VF PCIe register space */
+-	reg_um = sizeof(cmdq_reg_addr_list) / sizeof(u32);
+-	separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
+-	for (i = 0; i < reg_um; i++)
+-		*reg++ = hclgevf_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
+-	for (i = 0; i < separator_num; i++)
+-		*reg++ = SEPARATOR_VALUE;
+-
+-	reg_um = sizeof(common_reg_addr_list) / sizeof(u32);
+-	separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
+-	for (i = 0; i < reg_um; i++)
+-		*reg++ = hclgevf_read_dev(&hdev->hw, common_reg_addr_list[i]);
+-	for (i = 0; i < separator_num; i++)
+-		*reg++ = SEPARATOR_VALUE;
+-
+-	reg_um = sizeof(ring_reg_addr_list) / sizeof(u32);
+-	separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
+-	for (j = 0; j < hdev->num_tqps; j++) {
+-		for (i = 0; i < reg_um; i++)
+-			*reg++ = hclgevf_read_dev(&hdev->hw,
+-						  ring_reg_addr_list[i] +
+-						  HCLGEVF_TQP_REG_SIZE * j);
+-		for (i = 0; i < separator_num; i++)
+-			*reg++ = SEPARATOR_VALUE;
+-	}
+-
+-	reg_um = sizeof(tqp_intr_reg_addr_list) / sizeof(u32);
+-	separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
+-	for (j = 0; j < hdev->num_msi_used - 1; j++) {
+-		for (i = 0; i < reg_um; i++)
+-			*reg++ = hclgevf_read_dev(&hdev->hw,
+-						  tqp_intr_reg_addr_list[i] +
+-						  4 * j);
+-		for (i = 0; i < separator_num; i++)
+-			*reg++ = SEPARATOR_VALUE;
+-	}
+-}
+-
+ void hclgevf_update_port_base_vlan_info(struct hclgevf_dev *hdev, u16 state,
+ 				struct hclge_mbx_port_base_vlan *port_base_vlan)
+ {
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
+index 59ca6c794d6db..81c16b8c8da29 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
+@@ -294,4 +294,5 @@ void hclgevf_reset_task_schedule(struct hclgevf_dev *hdev);
+ void hclgevf_mbx_task_schedule(struct hclgevf_dev *hdev);
+ void hclgevf_update_port_base_vlan_info(struct hclgevf_dev *hdev, u16 state,
+ 			struct hclge_mbx_port_base_vlan *port_base_vlan);
++struct hclgevf_dev *hclgevf_ae_get_hdev(struct hnae3_handle *handle);
+ #endif
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_regs.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_regs.c
+new file mode 100644
+index 0000000000000..197ab733306b5
+--- /dev/null
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_regs.c
+@@ -0,0 +1,127 @@
++// SPDX-License-Identifier: GPL-2.0+
++// Copyright (c) 2023 Hisilicon Limited.
++
++#include "hclgevf_main.h"
++#include "hclgevf_regs.h"
++#include "hnae3.h"
++
++static const u32 cmdq_reg_addr_list[] = {HCLGE_COMM_NIC_CSQ_BASEADDR_L_REG,
++					 HCLGE_COMM_NIC_CSQ_BASEADDR_H_REG,
++					 HCLGE_COMM_NIC_CSQ_DEPTH_REG,
++					 HCLGE_COMM_NIC_CSQ_TAIL_REG,
++					 HCLGE_COMM_NIC_CSQ_HEAD_REG,
++					 HCLGE_COMM_NIC_CRQ_BASEADDR_L_REG,
++					 HCLGE_COMM_NIC_CRQ_BASEADDR_H_REG,
++					 HCLGE_COMM_NIC_CRQ_DEPTH_REG,
++					 HCLGE_COMM_NIC_CRQ_TAIL_REG,
++					 HCLGE_COMM_NIC_CRQ_HEAD_REG,
++					 HCLGE_COMM_VECTOR0_CMDQ_SRC_REG,
++					 HCLGE_COMM_VECTOR0_CMDQ_STATE_REG,
++					 HCLGE_COMM_CMDQ_INTR_EN_REG,
++					 HCLGE_COMM_CMDQ_INTR_GEN_REG};
++
++static const u32 common_reg_addr_list[] = {HCLGEVF_MISC_VECTOR_REG_BASE,
++					   HCLGEVF_RST_ING,
++					   HCLGEVF_GRO_EN_REG};
++
++static const u32 ring_reg_addr_list[] = {HCLGEVF_RING_RX_ADDR_L_REG,
++					 HCLGEVF_RING_RX_ADDR_H_REG,
++					 HCLGEVF_RING_RX_BD_NUM_REG,
++					 HCLGEVF_RING_RX_BD_LENGTH_REG,
++					 HCLGEVF_RING_RX_MERGE_EN_REG,
++					 HCLGEVF_RING_RX_TAIL_REG,
++					 HCLGEVF_RING_RX_HEAD_REG,
++					 HCLGEVF_RING_RX_FBD_NUM_REG,
++					 HCLGEVF_RING_RX_OFFSET_REG,
++					 HCLGEVF_RING_RX_FBD_OFFSET_REG,
++					 HCLGEVF_RING_RX_STASH_REG,
++					 HCLGEVF_RING_RX_BD_ERR_REG,
++					 HCLGEVF_RING_TX_ADDR_L_REG,
++					 HCLGEVF_RING_TX_ADDR_H_REG,
++					 HCLGEVF_RING_TX_BD_NUM_REG,
++					 HCLGEVF_RING_TX_PRIORITY_REG,
++					 HCLGEVF_RING_TX_TC_REG,
++					 HCLGEVF_RING_TX_MERGE_EN_REG,
++					 HCLGEVF_RING_TX_TAIL_REG,
++					 HCLGEVF_RING_TX_HEAD_REG,
++					 HCLGEVF_RING_TX_FBD_NUM_REG,
++					 HCLGEVF_RING_TX_OFFSET_REG,
++					 HCLGEVF_RING_TX_EBD_NUM_REG,
++					 HCLGEVF_RING_TX_EBD_OFFSET_REG,
++					 HCLGEVF_RING_TX_BD_ERR_REG,
++					 HCLGEVF_RING_EN_REG};
++
++static const u32 tqp_intr_reg_addr_list[] = {HCLGEVF_TQP_INTR_CTRL_REG,
++					     HCLGEVF_TQP_INTR_GL0_REG,
++					     HCLGEVF_TQP_INTR_GL1_REG,
++					     HCLGEVF_TQP_INTR_GL2_REG,
++					     HCLGEVF_TQP_INTR_RL_REG};
++
++#define MAX_SEPARATE_NUM	4
++#define SEPARATOR_VALUE		0xFDFCFBFA
++#define REG_NUM_PER_LINE	4
++#define REG_LEN_PER_LINE	(REG_NUM_PER_LINE * sizeof(u32))
++
++int hclgevf_get_regs_len(struct hnae3_handle *handle)
++{
++	int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
++	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
++
++	cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE + 1;
++	common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE + 1;
++	ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE + 1;
++	tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE + 1;
++
++	return (cmdq_lines + common_lines + ring_lines * hdev->num_tqps +
++		tqp_intr_lines * (hdev->num_msi_used - 1)) * REG_LEN_PER_LINE;
++}
++
++void hclgevf_get_regs(struct hnae3_handle *handle, u32 *version,
++		      void *data)
++{
++#define HCLGEVF_RING_REG_OFFSET		0x200
++#define HCLGEVF_RING_INT_REG_OFFSET	0x4
++
++	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
++	int i, j, reg_um, separator_num;
++	u32 *reg = data;
++
++	*version = hdev->fw_version;
++
++	/* fetching per-VF registers values from VF PCIe register space */
++	reg_um = sizeof(cmdq_reg_addr_list) / sizeof(u32);
++	separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
++	for (i = 0; i < reg_um; i++)
++		*reg++ = hclgevf_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
++	for (i = 0; i < separator_num; i++)
++		*reg++ = SEPARATOR_VALUE;
++
++	reg_um = sizeof(common_reg_addr_list) / sizeof(u32);
++	separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
++	for (i = 0; i < reg_um; i++)
++		*reg++ = hclgevf_read_dev(&hdev->hw, common_reg_addr_list[i]);
++	for (i = 0; i < separator_num; i++)
++		*reg++ = SEPARATOR_VALUE;
++
++	reg_um = sizeof(ring_reg_addr_list) / sizeof(u32);
++	separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
++	for (j = 0; j < hdev->num_tqps; j++) {
++		for (i = 0; i < reg_um; i++)
++			*reg++ = hclgevf_read_dev(&hdev->hw,
++						  ring_reg_addr_list[i] +
++						  HCLGEVF_RING_REG_OFFSET * j);
++		for (i = 0; i < separator_num; i++)
++			*reg++ = SEPARATOR_VALUE;
++	}
++
++	reg_um = sizeof(tqp_intr_reg_addr_list) / sizeof(u32);
++	separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
++	for (j = 0; j < hdev->num_msi_used - 1; j++) {
++		for (i = 0; i < reg_um; i++)
++			*reg++ = hclgevf_read_dev(&hdev->hw,
++						  tqp_intr_reg_addr_list[i] +
++						  HCLGEVF_RING_INT_REG_OFFSET * j);
++		for (i = 0; i < separator_num; i++)
++			*reg++ = SEPARATOR_VALUE;
++	}
++}
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_regs.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_regs.h
+new file mode 100644
+index 0000000000000..77bdcf60a1afe
+--- /dev/null
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_regs.h
+@@ -0,0 +1,13 @@
++/* SPDX-License-Identifier: GPL-2.0+ */
++/* Copyright (c) 2023 Hisilicon Limited. */
++
++#ifndef __HCLGEVF_REGS_H
++#define __HCLGEVF_REGS_H
++#include <linux/types.h>
++
++struct hnae3_handle;
++
++int hclgevf_get_regs_len(struct hnae3_handle *handle);
++void hclgevf_get_regs(struct hnae3_handle *handle, u32 *version,
++		      void *data);
++#endif
+diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
+index b40dfe6ae3217..c2cdc79308dc1 100644
+--- a/drivers/net/ethernet/intel/ice/ice_main.c
++++ b/drivers/net/ethernet/intel/ice/ice_main.c
+@@ -1346,6 +1346,7 @@ int ice_aq_wait_for_event(struct ice_pf *pf, u16 opcode, unsigned long timeout,
+ static void ice_aq_check_events(struct ice_pf *pf, u16 opcode,
+ 				struct ice_rq_event_info *event)
+ {
++	struct ice_rq_event_info *task_ev;
+ 	struct ice_aq_task *task;
+ 	bool found = false;
+ 
+@@ -1354,15 +1355,15 @@ static void ice_aq_check_events(struct ice_pf *pf, u16 opcode,
+ 		if (task->state || task->opcode != opcode)
+ 			continue;
+ 
+-		memcpy(&task->event->desc, &event->desc, sizeof(event->desc));
+-		task->event->msg_len = event->msg_len;
++		task_ev = task->event;
++		memcpy(&task_ev->desc, &event->desc, sizeof(event->desc));
++		task_ev->msg_len = event->msg_len;
+ 
+ 		/* Only copy the data buffer if a destination was set */
+-		if (task->event->msg_buf &&
+-		    task->event->buf_len > event->buf_len) {
+-			memcpy(task->event->msg_buf, event->msg_buf,
++		if (task_ev->msg_buf && task_ev->buf_len >= event->buf_len) {
++			memcpy(task_ev->msg_buf, event->msg_buf,
+ 			       event->buf_len);
+-			task->event->buf_len = event->buf_len;
++			task_ev->buf_len = event->buf_len;
+ 		}
+ 
+ 		task->state = ICE_AQ_TASK_COMPLETE;
+diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
+index a38614d21ea8f..de1d83300481d 100644
+--- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
++++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
+@@ -131,6 +131,8 @@ static void ice_ptp_src_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
+ 	case READ_TIME:
+ 		cmd_val |= GLTSYN_CMD_READ_TIME;
+ 		break;
++	case ICE_PTP_NOP:
++		break;
+ 	}
+ 
+ 	wr32(hw, GLTSYN_CMD, cmd_val);
+@@ -1226,18 +1228,18 @@ ice_ptp_read_port_capture(struct ice_hw *hw, u8 port, u64 *tx_ts, u64 *rx_ts)
+ }
+ 
+ /**
+- * ice_ptp_one_port_cmd - Prepare a single PHY port for a timer command
++ * ice_ptp_write_port_cmd_e822 - Prepare a single PHY port for a timer command
+  * @hw: pointer to HW struct
+  * @port: Port to which cmd has to be sent
+  * @cmd: Command to be sent to the port
+  *
+  * Prepare the requested port for an upcoming timer sync command.
+  *
+- * Note there is no equivalent of this operation on E810, as that device
+- * always handles all external PHYs internally.
++ * Do not use this function directly. If you want to configure exactly one
++ * port, use ice_ptp_one_port_cmd() instead.
+  */
+ static int
+-ice_ptp_one_port_cmd(struct ice_hw *hw, u8 port, enum ice_ptp_tmr_cmd cmd)
++ice_ptp_write_port_cmd_e822(struct ice_hw *hw, u8 port, enum ice_ptp_tmr_cmd cmd)
+ {
+ 	u32 cmd_val, val;
+ 	u8 tmr_idx;
+@@ -1261,6 +1263,8 @@ ice_ptp_one_port_cmd(struct ice_hw *hw, u8 port, enum ice_ptp_tmr_cmd cmd)
+ 	case ADJ_TIME_AT_TIME:
+ 		cmd_val |= PHY_CMD_ADJ_TIME_AT_TIME;
+ 		break;
++	case ICE_PTP_NOP:
++		break;
+ 	}
+ 
+ 	/* Tx case */
+@@ -1306,6 +1310,39 @@ ice_ptp_one_port_cmd(struct ice_hw *hw, u8 port, enum ice_ptp_tmr_cmd cmd)
+ 	return 0;
+ }
+ 
++/**
++ * ice_ptp_one_port_cmd - Prepare one port for a timer command
++ * @hw: pointer to the HW struct
++ * @configured_port: the port to configure with configured_cmd
++ * @configured_cmd: timer command to prepare on the configured_port
++ *
++ * Prepare the configured_port for the configured_cmd, and prepare all other
++ * ports for ICE_PTP_NOP. This causes the configured_port to execute the
++ * desired command while all other ports perform no operation.
++ */
++static int
++ice_ptp_one_port_cmd(struct ice_hw *hw, u8 configured_port,
++		     enum ice_ptp_tmr_cmd configured_cmd)
++{
++	u8 port;
++
++	for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) {
++		enum ice_ptp_tmr_cmd cmd;
++		int err;
++
++		if (port == configured_port)
++			cmd = configured_cmd;
++		else
++			cmd = ICE_PTP_NOP;
++
++		err = ice_ptp_write_port_cmd_e822(hw, port, cmd);
++		if (err)
++			return err;
++	}
++
++	return 0;
++}
++
+ /**
+  * ice_ptp_port_cmd_e822 - Prepare all ports for a timer command
+  * @hw: pointer to the HW struct
+@@ -1322,7 +1359,7 @@ ice_ptp_port_cmd_e822(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
+ 	for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) {
+ 		int err;
+ 
+-		err = ice_ptp_one_port_cmd(hw, port, cmd);
++		err = ice_ptp_write_port_cmd_e822(hw, port, cmd);
+ 		if (err)
+ 			return err;
+ 	}
+@@ -2252,6 +2289,9 @@ static int ice_sync_phy_timer_e822(struct ice_hw *hw, u8 port)
+ 	if (err)
+ 		goto err_unlock;
+ 
++	/* Do not perform any action on the main timer */
++	ice_ptp_src_cmd(hw, ICE_PTP_NOP);
++
+ 	/* Issue the sync to activate the time adjustment */
+ 	ice_ptp_exec_tmr_cmd(hw);
+ 
+@@ -2372,6 +2412,9 @@ int ice_start_phy_timer_e822(struct ice_hw *hw, u8 port)
+ 	if (err)
+ 		return err;
+ 
++	/* Do not perform any action on the main timer */
++	ice_ptp_src_cmd(hw, ICE_PTP_NOP);
++
+ 	ice_ptp_exec_tmr_cmd(hw);
+ 
+ 	err = ice_read_phy_reg_e822(hw, port, P_REG_PS, &val);
+@@ -2847,6 +2890,8 @@ static int ice_ptp_port_cmd_e810(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
+ 	case ADJ_TIME_AT_TIME:
+ 		cmd_val = GLTSYN_CMD_ADJ_INIT_TIME;
+ 		break;
++	case ICE_PTP_NOP:
++		return 0;
+ 	}
+ 
+ 	/* Read, modify, write */
+diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.h b/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
+index 3b68cb91bd819..096685237ca61 100644
+--- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
++++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
+@@ -9,7 +9,8 @@ enum ice_ptp_tmr_cmd {
+ 	INIT_INCVAL,
+ 	ADJ_TIME,
+ 	ADJ_TIME_AT_TIME,
+-	READ_TIME
++	READ_TIME,
++	ICE_PTP_NOP,
+ };
+ 
+ enum ice_ptp_serdes {
+diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
+index 9a2561409b06f..08e3df37089fe 100644
+--- a/drivers/net/ethernet/intel/igb/igb_main.c
++++ b/drivers/net/ethernet/intel/igb/igb_main.c
+@@ -4814,6 +4814,10 @@ void igb_configure_rx_ring(struct igb_adapter *adapter,
+ static void igb_set_rx_buffer_len(struct igb_adapter *adapter,
+ 				  struct igb_ring *rx_ring)
+ {
++#if (PAGE_SIZE < 8192)
++	struct e1000_hw *hw = &adapter->hw;
++#endif
++
+ 	/* set build_skb and buffer size flags */
+ 	clear_ring_build_skb_enabled(rx_ring);
+ 	clear_ring_uses_large_buffer(rx_ring);
+@@ -4824,10 +4828,9 @@ static void igb_set_rx_buffer_len(struct igb_adapter *adapter,
+ 	set_ring_build_skb_enabled(rx_ring);
+ 
+ #if (PAGE_SIZE < 8192)
+-	if (adapter->max_frame_size <= IGB_MAX_FRAME_BUILD_SKB)
+-		return;
+-
+-	set_ring_uses_large_buffer(rx_ring);
++	if (adapter->max_frame_size > IGB_MAX_FRAME_BUILD_SKB ||
++	    rd32(E1000_RCTL) & E1000_RCTL_SBP)
++		set_ring_uses_large_buffer(rx_ring);
+ #endif
+ }
+ 
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rpm.c b/drivers/net/ethernet/marvell/octeontx2/af/rpm.c
+index b4fcb20c3f4fd..af21e2030cff2 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rpm.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rpm.c
+@@ -355,8 +355,8 @@ int rpm_lmac_enadis_pause_frm(void *rpmd, int lmac_id, u8 tx_pause,
+ 
+ void rpm_lmac_pause_frm_config(void *rpmd, int lmac_id, bool enable)
+ {
++	u64 cfg, pfc_class_mask_cfg;
+ 	rpm_t *rpm = rpmd;
+-	u64 cfg;
+ 
+ 	/* ALL pause frames received are completely ignored */
+ 	cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
+@@ -380,9 +380,11 @@ void rpm_lmac_pause_frm_config(void *rpmd, int lmac_id, bool enable)
+ 		rpm_write(rpm, 0, RPMX_CMR_CHAN_MSK_OR, ~0ULL);
+ 
+ 	/* Disable all PFC classes */
+-	cfg = rpm_read(rpm, lmac_id, RPMX_CMRX_PRT_CBFC_CTL);
++	pfc_class_mask_cfg = is_dev_rpm2(rpm) ? RPM2_CMRX_PRT_CBFC_CTL :
++						RPMX_CMRX_PRT_CBFC_CTL;
++	cfg = rpm_read(rpm, lmac_id, pfc_class_mask_cfg);
+ 	cfg = FIELD_SET(RPM_PFC_CLASS_MASK, 0, cfg);
+-	rpm_write(rpm, lmac_id, RPMX_CMRX_PRT_CBFC_CTL, cfg);
++	rpm_write(rpm, lmac_id, pfc_class_mask_cfg, cfg);
+ }
+ 
+ int rpm_get_rx_stats(void *rpmd, int lmac_id, int idx, u64 *rx_stat)
+@@ -605,8 +607,11 @@ int rpm_lmac_pfc_config(void *rpmd, int lmac_id, u8 tx_pause, u8 rx_pause, u16 p
+ 	if (!is_lmac_valid(rpm, lmac_id))
+ 		return -ENODEV;
+ 
++	pfc_class_mask_cfg = is_dev_rpm2(rpm) ? RPM2_CMRX_PRT_CBFC_CTL :
++						RPMX_CMRX_PRT_CBFC_CTL;
++
+ 	cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
+-	class_en = rpm_read(rpm, lmac_id, RPMX_CMRX_PRT_CBFC_CTL);
++	class_en = rpm_read(rpm, lmac_id, pfc_class_mask_cfg);
+ 	pfc_en |= FIELD_GET(RPM_PFC_CLASS_MASK, class_en);
+ 
+ 	if (rx_pause) {
+@@ -635,10 +640,6 @@ int rpm_lmac_pfc_config(void *rpmd, int lmac_id, u8 tx_pause, u8 rx_pause, u16 p
+ 		cfg |= RPMX_MTI_MAC100X_COMMAND_CONFIG_PFC_MODE;
+ 
+ 	rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
+-
+-	pfc_class_mask_cfg = is_dev_rpm2(rpm) ? RPM2_CMRX_PRT_CBFC_CTL :
+-						RPMX_CMRX_PRT_CBFC_CTL;
+-
+ 	rpm_write(rpm, lmac_id, pfc_class_mask_cfg, class_en);
+ 
+ 	return 0;
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
+index 77c8f650f7ac1..b9712040a0bc2 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
+@@ -804,6 +804,7 @@ void otx2_txschq_free_one(struct otx2_nic *pfvf, u16 lvl, u16 schq)
+ 
+ 	mutex_unlock(&pfvf->mbox.lock);
+ }
++EXPORT_SYMBOL(otx2_txschq_free_one);
+ 
+ void otx2_txschq_stop(struct otx2_nic *pfvf)
+ {
+@@ -1432,7 +1433,7 @@ int otx2_pool_init(struct otx2_nic *pfvf, u16 pool_id,
+ 	}
+ 
+ 	pp_params.flags = PP_FLAG_PAGE_FRAG | PP_FLAG_DMA_MAP;
+-	pp_params.pool_size = numptrs;
++	pp_params.pool_size = min(OTX2_PAGE_POOL_SZ, numptrs);
+ 	pp_params.nid = NUMA_NO_NODE;
+ 	pp_params.dev = pfvf->dev;
+ 	pp_params.dma_dir = DMA_FROM_DEVICE;
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c
+index ccaf97bb1ce03..bfddbff7bcdfb 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c
+@@ -70,7 +70,7 @@ static int otx2_pfc_txschq_alloc_one(struct otx2_nic *pfvf, u8 prio)
+ 	 * link config level. These rest of the scheduler can be
+ 	 * same as hw.txschq_list.
+ 	 */
+-	for (lvl = 0; lvl < pfvf->hw.txschq_link_cfg_lvl; lvl++)
++	for (lvl = 0; lvl <= pfvf->hw.txschq_link_cfg_lvl; lvl++)
+ 		req->schq[lvl] = 1;
+ 
+ 	rc = otx2_sync_mbox_msg(&pfvf->mbox);
+@@ -83,7 +83,7 @@ static int otx2_pfc_txschq_alloc_one(struct otx2_nic *pfvf, u8 prio)
+ 		return PTR_ERR(rsp);
+ 
+ 	/* Setup transmit scheduler list */
+-	for (lvl = 0; lvl < pfvf->hw.txschq_link_cfg_lvl; lvl++) {
++	for (lvl = 0; lvl <= pfvf->hw.txschq_link_cfg_lvl; lvl++) {
+ 		if (!rsp->schq[lvl])
+ 			return -ENOSPC;
+ 
+@@ -125,19 +125,12 @@ int otx2_pfc_txschq_alloc(struct otx2_nic *pfvf)
+ 
+ static int otx2_pfc_txschq_stop_one(struct otx2_nic *pfvf, u8 prio)
+ {
+-	struct nix_txsch_free_req *free_req;
++	int lvl;
+ 
+-	mutex_lock(&pfvf->mbox.lock);
+ 	/* free PFC TLx nodes */
+-	free_req = otx2_mbox_alloc_msg_nix_txsch_free(&pfvf->mbox);
+-	if (!free_req) {
+-		mutex_unlock(&pfvf->mbox.lock);
+-		return -ENOMEM;
+-	}
+-
+-	free_req->flags = TXSCHQ_FREE_ALL;
+-	otx2_sync_mbox_msg(&pfvf->mbox);
+-	mutex_unlock(&pfvf->mbox.lock);
++	for (lvl = 0; lvl <= pfvf->hw.txschq_link_cfg_lvl; lvl++)
++		otx2_txschq_free_one(pfvf, lvl,
++				     pfvf->pfc_schq_list[lvl][prio]);
+ 
+ 	pfvf->pfc_alloc_status[prio] = false;
+ 	return 0;
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
+index b5d689eeff80b..9e3bfbe5c4809 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
+@@ -23,6 +23,8 @@
+ #define	OTX2_ETH_HLEN		(VLAN_ETH_HLEN + VLAN_HLEN)
+ #define	OTX2_MIN_MTU		60
+ 
++#define OTX2_PAGE_POOL_SZ	2048
++
+ #define OTX2_MAX_GSO_SEGS	255
+ #define OTX2_MAX_FRAGS_IN_SQE	9
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
+index 4804990b7f226..99dcbd006357a 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
+@@ -384,16 +384,11 @@ static int mlx5_pci_link_toggle(struct mlx5_core_dev *dev)
+ 		pci_cfg_access_lock(sdev);
+ 	}
+ 	/* PCI link toggle */
+-	err = pci_read_config_word(bridge, cap + PCI_EXP_LNKCTL, &reg16);
+-	if (err)
+-		return err;
+-	reg16 |= PCI_EXP_LNKCTL_LD;
+-	err = pci_write_config_word(bridge, cap + PCI_EXP_LNKCTL, reg16);
++	err = pcie_capability_set_word(bridge, PCI_EXP_LNKCTL, PCI_EXP_LNKCTL_LD);
+ 	if (err)
+ 		return err;
+ 	msleep(500);
+-	reg16 &= ~PCI_EXP_LNKCTL_LD;
+-	err = pci_write_config_word(bridge, cap + PCI_EXP_LNKCTL, reg16);
++	err = pcie_capability_clear_word(bridge, PCI_EXP_LNKCTL, PCI_EXP_LNKCTL_LD);
+ 	if (err)
+ 		return err;
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
+index 377372f0578ae..aa29f09e83564 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
+@@ -32,16 +32,13 @@
+ 
+ #include <linux/clocksource.h>
+ #include <linux/highmem.h>
++#include <linux/log2.h>
+ #include <linux/ptp_clock_kernel.h>
+ #include <rdma/mlx5-abi.h>
+ #include "lib/eq.h"
+ #include "en.h"
+ #include "clock.h"
+ 
+-enum {
+-	MLX5_CYCLES_SHIFT	= 31
+-};
+-
+ enum {
+ 	MLX5_PIN_MODE_IN		= 0x0,
+ 	MLX5_PIN_MODE_OUT		= 0x1,
+@@ -93,6 +90,31 @@ static bool mlx5_modify_mtutc_allowed(struct mlx5_core_dev *mdev)
+ 	return MLX5_CAP_MCAM_FEATURE(mdev, ptpcyc2realtime_modify);
+ }
+ 
++static u32 mlx5_ptp_shift_constant(u32 dev_freq_khz)
++{
++	/* Optimal shift constant leads to corrections above just 1 scaled ppm.
++	 *
++	 * Two sets of equations are needed to derive the optimal shift
++	 * constant for the cyclecounter.
++	 *
++	 *    dev_freq_khz * 1000 / 2^shift_constant = 1 scaled_ppm
++	 *    ppb = scaled_ppm * 1000 / 2^16
++	 *
++	 * Using the two equations together
++	 *
++	 *    dev_freq_khz * 1000 / 1 scaled_ppm = 2^shift_constant
++	 *    dev_freq_khz * 2^16 / 1 ppb = 2^shift_constant
++	 *    dev_freq_khz = 2^(shift_constant - 16)
++	 *
++	 * then yields
++	 *
++	 *    shift_constant = ilog2(dev_freq_khz) + 16
++	 */
++
++	return min(ilog2(dev_freq_khz) + 16,
++		   ilog2((U32_MAX / NSEC_PER_MSEC) * dev_freq_khz));
++}
++
+ static s32 mlx5_ptp_getmaxphase(struct ptp_clock_info *ptp)
+ {
+ 	struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
+@@ -909,7 +931,7 @@ static void mlx5_timecounter_init(struct mlx5_core_dev *mdev)
+ 
+ 	dev_freq = MLX5_CAP_GEN(mdev, device_frequency_khz);
+ 	timer->cycles.read = read_internal_timer;
+-	timer->cycles.shift = MLX5_CYCLES_SHIFT;
++	timer->cycles.shift = mlx5_ptp_shift_constant(dev_freq);
+ 	timer->cycles.mult = clocksource_khz2mult(dev_freq,
+ 						  timer->cycles.shift);
+ 	timer->nominal_c_mult = timer->cycles.mult;
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c b/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c
+index 70735068cf292..0fd290d776ffe 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c
+@@ -405,7 +405,8 @@ mlxsw_hwmon_module_temp_label_show(struct device *dev,
+ 			container_of(attr, struct mlxsw_hwmon_attr, dev_attr);
+ 
+ 	return sprintf(buf, "front panel %03u\n",
+-		       mlxsw_hwmon_attr->type_index);
++		       mlxsw_hwmon_attr->type_index + 1 -
++		       mlxsw_hwmon_attr->mlxsw_hwmon_dev->sensor_count);
+ }
+ 
+ static ssize_t
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/i2c.c b/drivers/net/ethernet/mellanox/mlxsw/i2c.c
+index 41298835a11e1..d23f293e285cb 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/i2c.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/i2c.c
+@@ -48,6 +48,7 @@
+ #define MLXSW_I2C_MBOX_SIZE_BITS	12
+ #define MLXSW_I2C_ADDR_BUF_SIZE		4
+ #define MLXSW_I2C_BLK_DEF		32
++#define MLXSW_I2C_BLK_MAX		100
+ #define MLXSW_I2C_RETRY			5
+ #define MLXSW_I2C_TIMEOUT_MSECS		5000
+ #define MLXSW_I2C_MAX_DATA_SIZE		256
+@@ -444,7 +445,7 @@ mlxsw_i2c_cmd(struct device *dev, u16 opcode, u32 in_mod, size_t in_mbox_size,
+ 	} else {
+ 		/* No input mailbox is case of initialization query command. */
+ 		reg_size = MLXSW_I2C_MAX_DATA_SIZE;
+-		num = reg_size / mlxsw_i2c->block_size;
++		num = DIV_ROUND_UP(reg_size, mlxsw_i2c->block_size);
+ 
+ 		if (mutex_lock_interruptible(&mlxsw_i2c->cmd.lock) < 0) {
+ 			dev_err(&client->dev, "Could not acquire lock");
+@@ -653,7 +654,7 @@ static int mlxsw_i2c_probe(struct i2c_client *client)
+ 			return -EOPNOTSUPP;
+ 		}
+ 
+-		mlxsw_i2c->block_size = max_t(u16, MLXSW_I2C_BLK_DEF,
++		mlxsw_i2c->block_size = min_t(u16, MLXSW_I2C_BLK_MAX,
+ 					      min_t(u16, quirks->max_read_len,
+ 						    quirks->max_write_len));
+ 	} else {
+diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c b/drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c
+index 266a21a2d1246..1da2b1f82ae93 100644
+--- a/drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c
++++ b/drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c
+@@ -59,7 +59,7 @@ static int lan966x_ptp_add_trap(struct lan966x_port *port,
+ 	int err;
+ 
+ 	vrule = vcap_get_rule(lan966x->vcap_ctrl, rule_id);
+-	if (vrule) {
++	if (!IS_ERR(vrule)) {
+ 		u32 value, mask;
+ 
+ 		/* Just modify the ingress port mask and exit */
+@@ -106,7 +106,7 @@ static int lan966x_ptp_del_trap(struct lan966x_port *port,
+ 	int err;
+ 
+ 	vrule = vcap_get_rule(lan966x->vcap_ctrl, rule_id);
+-	if (!vrule)
++	if (IS_ERR(vrule))
+ 		return -EEXIST;
+ 
+ 	vcap_rule_get_key_u32(vrule, VCAP_KF_IF_IGR_PORT_MASK, &value, &mask);
+diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
+index 5eb50b265c0bd..6351a2dc13bce 100644
+--- a/drivers/net/ethernet/realtek/r8169_main.c
++++ b/drivers/net/ethernet/realtek/r8169_main.c
+@@ -5239,13 +5239,9 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 
+ 	/* Disable ASPM L1 as that cause random device stop working
+ 	 * problems as well as full system hangs for some PCIe devices users.
+-	 * Chips from RTL8168h partially have issues with L1.2, but seem
+-	 * to work fine with L1 and L1.1.
+ 	 */
+ 	if (rtl_aspm_is_safe(tp))
+ 		rc = 0;
+-	else if (tp->mac_version >= RTL_GIGA_MAC_VER_46)
+-		rc = pci_disable_link_state(pdev, PCIE_LINK_STATE_L1_2);
+ 	else
+ 		rc = pci_disable_link_state(pdev, PCIE_LINK_STATE_L1);
+ 	tp->aspm_manageable = !rc;
+diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
+index 0c40571133cb9..00cf6de3bb2be 100644
+--- a/drivers/net/ethernet/sfc/ptp.c
++++ b/drivers/net/ethernet/sfc/ptp.c
+@@ -1485,7 +1485,9 @@ static int efx_ptp_insert_multicast_filters(struct efx_nic *efx)
+ 			goto fail;
+ 
+ 		rc = efx_ptp_insert_eth_multicast_filter(efx);
+-		if (rc < 0)
++
++		/* Not all firmware variants support this filter */
++		if (rc < 0 && rc != -EPROTONOSUPPORT)
+ 			goto fail;
+ 	}
+ 
+diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
+index 144ec756c796a..2d64650f4eb3c 100644
+--- a/drivers/net/macsec.c
++++ b/drivers/net/macsec.c
+@@ -1341,8 +1341,7 @@ static struct crypto_aead *macsec_alloc_tfm(char *key, int key_len, int icv_len)
+ 	struct crypto_aead *tfm;
+ 	int ret;
+ 
+-	/* Pick a sync gcm(aes) cipher to ensure order is preserved. */
+-	tfm = crypto_alloc_aead("gcm(aes)", 0, CRYPTO_ALG_ASYNC);
++	tfm = crypto_alloc_aead("gcm(aes)", 0, 0);
+ 
+ 	if (IS_ERR(tfm))
+ 		return tfm;
+diff --git a/drivers/net/pcs/pcs-lynx.c b/drivers/net/pcs/pcs-lynx.c
+index 9021b96d4f9df..dc3962b2aa6b0 100644
+--- a/drivers/net/pcs/pcs-lynx.c
++++ b/drivers/net/pcs/pcs-lynx.c
+@@ -216,7 +216,7 @@ static void lynx_pcs_link_up_sgmii(struct mdio_device *pcs,
+ 	/* The PCS needs to be configured manually only
+ 	 * when not operating on in-band mode
+ 	 */
+-	if (neg_mode != PHYLINK_PCS_NEG_INBAND_ENABLED)
++	if (neg_mode == PHYLINK_PCS_NEG_INBAND_ENABLED)
+ 		return;
+ 
+ 	if (duplex == DUPLEX_HALF)
+diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
+index a7f44f6335fb8..9275a672f90cb 100644
+--- a/drivers/net/wireless/ath/ath10k/pci.c
++++ b/drivers/net/wireless/ath/ath10k/pci.c
+@@ -1963,8 +1963,9 @@ static int ath10k_pci_hif_start(struct ath10k *ar)
+ 	ath10k_pci_irq_enable(ar);
+ 	ath10k_pci_rx_post(ar);
+ 
+-	pcie_capability_write_word(ar_pci->pdev, PCI_EXP_LNKCTL,
+-				   ar_pci->link_ctl);
++	pcie_capability_clear_and_set_word(ar_pci->pdev, PCI_EXP_LNKCTL,
++					   PCI_EXP_LNKCTL_ASPMC,
++					   ar_pci->link_ctl & PCI_EXP_LNKCTL_ASPMC);
+ 
+ 	return 0;
+ }
+@@ -2821,8 +2822,8 @@ static int ath10k_pci_hif_power_up(struct ath10k *ar,
+ 
+ 	pcie_capability_read_word(ar_pci->pdev, PCI_EXP_LNKCTL,
+ 				  &ar_pci->link_ctl);
+-	pcie_capability_write_word(ar_pci->pdev, PCI_EXP_LNKCTL,
+-				   ar_pci->link_ctl & ~PCI_EXP_LNKCTL_ASPMC);
++	pcie_capability_clear_word(ar_pci->pdev, PCI_EXP_LNKCTL,
++				   PCI_EXP_LNKCTL_ASPMC);
+ 
+ 	/*
+ 	 * Bring the target up cleanly.
+diff --git a/drivers/net/wireless/ath/ath11k/dp_rx.c b/drivers/net/wireless/ath/ath11k/dp_rx.c
+index 5c76664ba0dd9..1e488eed282b5 100644
+--- a/drivers/net/wireless/ath/ath11k/dp_rx.c
++++ b/drivers/net/wireless/ath/ath11k/dp_rx.c
+@@ -2408,7 +2408,7 @@ static void ath11k_dp_rx_h_ppdu(struct ath11k *ar, struct hal_rx_desc *rx_desc,
+ 		rx_status->freq = center_freq;
+ 	} else if (channel_num >= 1 && channel_num <= 14) {
+ 		rx_status->band = NL80211_BAND_2GHZ;
+-	} else if (channel_num >= 36 && channel_num <= 173) {
++	} else if (channel_num >= 36 && channel_num <= 177) {
+ 		rx_status->band = NL80211_BAND_5GHZ;
+ 	} else {
+ 		spin_lock_bh(&ar->data_lock);
+diff --git a/drivers/net/wireless/ath/ath11k/pci.c b/drivers/net/wireless/ath/ath11k/pci.c
+index 79e2cbe826384..ec40adc1cb235 100644
+--- a/drivers/net/wireless/ath/ath11k/pci.c
++++ b/drivers/net/wireless/ath/ath11k/pci.c
+@@ -581,8 +581,8 @@ static void ath11k_pci_aspm_disable(struct ath11k_pci *ab_pci)
+ 		   u16_get_bits(ab_pci->link_ctl, PCI_EXP_LNKCTL_ASPM_L1));
+ 
+ 	/* disable L0s and L1 */
+-	pcie_capability_write_word(ab_pci->pdev, PCI_EXP_LNKCTL,
+-				   ab_pci->link_ctl & ~PCI_EXP_LNKCTL_ASPMC);
++	pcie_capability_clear_word(ab_pci->pdev, PCI_EXP_LNKCTL,
++				   PCI_EXP_LNKCTL_ASPMC);
+ 
+ 	set_bit(ATH11K_PCI_ASPM_RESTORE, &ab_pci->flags);
+ }
+@@ -590,8 +590,10 @@ static void ath11k_pci_aspm_disable(struct ath11k_pci *ab_pci)
+ static void ath11k_pci_aspm_restore(struct ath11k_pci *ab_pci)
+ {
+ 	if (test_and_clear_bit(ATH11K_PCI_ASPM_RESTORE, &ab_pci->flags))
+-		pcie_capability_write_word(ab_pci->pdev, PCI_EXP_LNKCTL,
+-					   ab_pci->link_ctl);
++		pcie_capability_clear_and_set_word(ab_pci->pdev, PCI_EXP_LNKCTL,
++						   PCI_EXP_LNKCTL_ASPMC,
++						   ab_pci->link_ctl &
++						   PCI_EXP_LNKCTL_ASPMC);
+ }
+ 
+ static int ath11k_pci_power_up(struct ath11k_base *ab)
+diff --git a/drivers/net/wireless/ath/ath12k/mac.c b/drivers/net/wireless/ath/ath12k/mac.c
+index 1bb9802ef5696..45d88e35fc2eb 100644
+--- a/drivers/net/wireless/ath/ath12k/mac.c
++++ b/drivers/net/wireless/ath/ath12k/mac.c
+@@ -1637,9 +1637,9 @@ static void ath12k_peer_assoc_h_he(struct ath12k *ar,
+ 	arg->peer_nss = min(sta->deflink.rx_nss, max_nss);
+ 
+ 	memcpy(&arg->peer_he_cap_macinfo, he_cap->he_cap_elem.mac_cap_info,
+-	       sizeof(arg->peer_he_cap_macinfo));
++	       sizeof(he_cap->he_cap_elem.mac_cap_info));
+ 	memcpy(&arg->peer_he_cap_phyinfo, he_cap->he_cap_elem.phy_cap_info,
+-	       sizeof(arg->peer_he_cap_phyinfo));
++	       sizeof(he_cap->he_cap_elem.phy_cap_info));
+ 	arg->peer_he_ops = vif->bss_conf.he_oper.params;
+ 
+ 	/* the top most byte is used to indicate BSS color info */
+diff --git a/drivers/net/wireless/ath/ath12k/pci.c b/drivers/net/wireless/ath/ath12k/pci.c
+index 5990a55801f0a..e4f08a066ca10 100644
+--- a/drivers/net/wireless/ath/ath12k/pci.c
++++ b/drivers/net/wireless/ath/ath12k/pci.c
+@@ -794,8 +794,8 @@ static void ath12k_pci_aspm_disable(struct ath12k_pci *ab_pci)
+ 		   u16_get_bits(ab_pci->link_ctl, PCI_EXP_LNKCTL_ASPM_L1));
+ 
+ 	/* disable L0s and L1 */
+-	pcie_capability_write_word(ab_pci->pdev, PCI_EXP_LNKCTL,
+-				   ab_pci->link_ctl & ~PCI_EXP_LNKCTL_ASPMC);
++	pcie_capability_clear_word(ab_pci->pdev, PCI_EXP_LNKCTL,
++				   PCI_EXP_LNKCTL_ASPMC);
+ 
+ 	set_bit(ATH12K_PCI_ASPM_RESTORE, &ab_pci->flags);
+ }
+@@ -803,8 +803,10 @@ static void ath12k_pci_aspm_disable(struct ath12k_pci *ab_pci)
+ static void ath12k_pci_aspm_restore(struct ath12k_pci *ab_pci)
+ {
+ 	if (test_and_clear_bit(ATH12K_PCI_ASPM_RESTORE, &ab_pci->flags))
+-		pcie_capability_write_word(ab_pci->pdev, PCI_EXP_LNKCTL,
+-					   ab_pci->link_ctl);
++		pcie_capability_clear_and_set_word(ab_pci->pdev, PCI_EXP_LNKCTL,
++						   PCI_EXP_LNKCTL_ASPMC,
++						   ab_pci->link_ctl &
++						   PCI_EXP_LNKCTL_ASPMC);
+ }
+ 
+ static void ath12k_pci_kill_tasklets(struct ath12k_base *ab)
+diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_debug.c b/drivers/net/wireless/ath/ath9k/htc_drv_debug.c
+index b3ed65e5c4da8..c55aab01fff5d 100644
+--- a/drivers/net/wireless/ath/ath9k/htc_drv_debug.c
++++ b/drivers/net/wireless/ath/ath9k/htc_drv_debug.c
+@@ -491,7 +491,7 @@ int ath9k_htc_init_debug(struct ath_hw *ah)
+ 
+ 	priv->debug.debugfs_phy = debugfs_create_dir(KBUILD_MODNAME,
+ 					     priv->hw->wiphy->debugfsdir);
+-	if (!priv->debug.debugfs_phy)
++	if (IS_ERR(priv->debug.debugfs_phy))
+ 		return -ENOMEM;
+ 
+ 	ath9k_cmn_spectral_init_debug(&priv->spec_priv, priv->debug.debugfs_phy);
+diff --git a/drivers/net/wireless/ath/ath9k/wmi.c b/drivers/net/wireless/ath/ath9k/wmi.c
+index d652c647d56b5..1476b42b52a91 100644
+--- a/drivers/net/wireless/ath/ath9k/wmi.c
++++ b/drivers/net/wireless/ath/ath9k/wmi.c
+@@ -242,10 +242,10 @@ static void ath9k_wmi_ctrl_rx(void *priv, struct sk_buff *skb,
+ 		spin_unlock_irqrestore(&wmi->wmi_lock, flags);
+ 		goto free_skb;
+ 	}
+-	spin_unlock_irqrestore(&wmi->wmi_lock, flags);
+ 
+ 	/* WMI command response */
+ 	ath9k_wmi_rsp_callback(wmi, skb);
++	spin_unlock_irqrestore(&wmi->wmi_lock, flags);
+ 
+ free_skb:
+ 	kfree_skb(skb);
+@@ -283,7 +283,8 @@ int ath9k_wmi_connect(struct htc_target *htc, struct wmi *wmi,
+ 
+ static int ath9k_wmi_cmd_issue(struct wmi *wmi,
+ 			       struct sk_buff *skb,
+-			       enum wmi_cmd_id cmd, u16 len)
++			       enum wmi_cmd_id cmd, u16 len,
++			       u8 *rsp_buf, u32 rsp_len)
+ {
+ 	struct wmi_cmd_hdr *hdr;
+ 	unsigned long flags;
+@@ -293,6 +294,11 @@ static int ath9k_wmi_cmd_issue(struct wmi *wmi,
+ 	hdr->seq_no = cpu_to_be16(++wmi->tx_seq_id);
+ 
+ 	spin_lock_irqsave(&wmi->wmi_lock, flags);
++
++	/* record the rsp buffer and length */
++	wmi->cmd_rsp_buf = rsp_buf;
++	wmi->cmd_rsp_len = rsp_len;
++
+ 	wmi->last_seq_id = wmi->tx_seq_id;
+ 	spin_unlock_irqrestore(&wmi->wmi_lock, flags);
+ 
+@@ -308,8 +314,8 @@ int ath9k_wmi_cmd(struct wmi *wmi, enum wmi_cmd_id cmd_id,
+ 	struct ath_common *common = ath9k_hw_common(ah);
+ 	u16 headroom = sizeof(struct htc_frame_hdr) +
+ 		       sizeof(struct wmi_cmd_hdr);
++	unsigned long time_left, flags;
+ 	struct sk_buff *skb;
+-	unsigned long time_left;
+ 	int ret = 0;
+ 
+ 	if (ah->ah_flags & AH_UNPLUGGED)
+@@ -333,11 +339,7 @@ int ath9k_wmi_cmd(struct wmi *wmi, enum wmi_cmd_id cmd_id,
+ 		goto out;
+ 	}
+ 
+-	/* record the rsp buffer and length */
+-	wmi->cmd_rsp_buf = rsp_buf;
+-	wmi->cmd_rsp_len = rsp_len;
+-
+-	ret = ath9k_wmi_cmd_issue(wmi, skb, cmd_id, cmd_len);
++	ret = ath9k_wmi_cmd_issue(wmi, skb, cmd_id, cmd_len, rsp_buf, rsp_len);
+ 	if (ret)
+ 		goto out;
+ 
+@@ -345,7 +347,9 @@ int ath9k_wmi_cmd(struct wmi *wmi, enum wmi_cmd_id cmd_id,
+ 	if (!time_left) {
+ 		ath_dbg(common, WMI, "Timeout waiting for WMI command: %s\n",
+ 			wmi_cmd_to_name(cmd_id));
++		spin_lock_irqsave(&wmi->wmi_lock, flags);
+ 		wmi->last_seq_id = 0;
++		spin_unlock_irqrestore(&wmi->wmi_lock, flags);
+ 		mutex_unlock(&wmi->op_mutex);
+ 		return -ETIMEDOUT;
+ 	}
+diff --git a/drivers/net/wireless/marvell/mwifiex/debugfs.c b/drivers/net/wireless/marvell/mwifiex/debugfs.c
+index 52b18f4a774b7..0cdd6c50c1c08 100644
+--- a/drivers/net/wireless/marvell/mwifiex/debugfs.c
++++ b/drivers/net/wireless/marvell/mwifiex/debugfs.c
+@@ -253,8 +253,11 @@ mwifiex_histogram_read(struct file *file, char __user *ubuf,
+ 	if (!p)
+ 		return -ENOMEM;
+ 
+-	if (!priv || !priv->hist_data)
+-		return -EFAULT;
++	if (!priv || !priv->hist_data) {
++		ret = -EFAULT;
++		goto free_and_exit;
++	}
++
+ 	phist_data = priv->hist_data;
+ 
+ 	p += sprintf(p, "\n"
+@@ -309,6 +312,8 @@ mwifiex_histogram_read(struct file *file, char __user *ubuf,
+ 	ret = simple_read_from_buffer(ubuf, count, ppos, (char *)page,
+ 				      (unsigned long)p - page);
+ 
++free_and_exit:
++	free_page(page);
+ 	return ret;
+ }
+ 
+diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c
+index 9a698a16a8f38..6697132ecc977 100644
+--- a/drivers/net/wireless/marvell/mwifiex/pcie.c
++++ b/drivers/net/wireless/marvell/mwifiex/pcie.c
+@@ -189,6 +189,8 @@ static int mwifiex_pcie_probe_of(struct device *dev)
+ }
+ 
+ static void mwifiex_pcie_work(struct work_struct *work);
++static int mwifiex_pcie_delete_rxbd_ring(struct mwifiex_adapter *adapter);
++static int mwifiex_pcie_delete_evtbd_ring(struct mwifiex_adapter *adapter);
+ 
+ static int
+ mwifiex_map_pci_memory(struct mwifiex_adapter *adapter, struct sk_buff *skb,
+@@ -792,14 +794,15 @@ static int mwifiex_init_rxq_ring(struct mwifiex_adapter *adapter)
+ 		if (!skb) {
+ 			mwifiex_dbg(adapter, ERROR,
+ 				    "Unable to allocate skb for RX ring.\n");
+-			kfree(card->rxbd_ring_vbase);
+ 			return -ENOMEM;
+ 		}
+ 
+ 		if (mwifiex_map_pci_memory(adapter, skb,
+ 					   MWIFIEX_RX_DATA_BUF_SIZE,
+-					   DMA_FROM_DEVICE))
+-			return -1;
++					   DMA_FROM_DEVICE)) {
++			kfree_skb(skb);
++			return -ENOMEM;
++		}
+ 
+ 		buf_pa = MWIFIEX_SKB_DMA_ADDR(skb);
+ 
+@@ -849,7 +852,6 @@ static int mwifiex_pcie_init_evt_ring(struct mwifiex_adapter *adapter)
+ 		if (!skb) {
+ 			mwifiex_dbg(adapter, ERROR,
+ 				    "Unable to allocate skb for EVENT buf.\n");
+-			kfree(card->evtbd_ring_vbase);
+ 			return -ENOMEM;
+ 		}
+ 		skb_put(skb, MAX_EVENT_SIZE);
+@@ -857,8 +859,7 @@ static int mwifiex_pcie_init_evt_ring(struct mwifiex_adapter *adapter)
+ 		if (mwifiex_map_pci_memory(adapter, skb, MAX_EVENT_SIZE,
+ 					   DMA_FROM_DEVICE)) {
+ 			kfree_skb(skb);
+-			kfree(card->evtbd_ring_vbase);
+-			return -1;
++			return -ENOMEM;
+ 		}
+ 
+ 		buf_pa = MWIFIEX_SKB_DMA_ADDR(skb);
+@@ -1058,6 +1059,7 @@ static int mwifiex_pcie_delete_txbd_ring(struct mwifiex_adapter *adapter)
+  */
+ static int mwifiex_pcie_create_rxbd_ring(struct mwifiex_adapter *adapter)
+ {
++	int ret;
+ 	struct pcie_service_card *card = adapter->card;
+ 	const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
+ 
+@@ -1096,7 +1098,10 @@ static int mwifiex_pcie_create_rxbd_ring(struct mwifiex_adapter *adapter)
+ 		    (u32)((u64)card->rxbd_ring_pbase >> 32),
+ 		    card->rxbd_ring_size);
+ 
+-	return mwifiex_init_rxq_ring(adapter);
++	ret = mwifiex_init_rxq_ring(adapter);
++	if (ret)
++		mwifiex_pcie_delete_rxbd_ring(adapter);
++	return ret;
+ }
+ 
+ /*
+@@ -1127,6 +1132,7 @@ static int mwifiex_pcie_delete_rxbd_ring(struct mwifiex_adapter *adapter)
+  */
+ static int mwifiex_pcie_create_evtbd_ring(struct mwifiex_adapter *adapter)
+ {
++	int ret;
+ 	struct pcie_service_card *card = adapter->card;
+ 	const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
+ 
+@@ -1161,7 +1167,10 @@ static int mwifiex_pcie_create_evtbd_ring(struct mwifiex_adapter *adapter)
+ 		    (u32)((u64)card->evtbd_ring_pbase >> 32),
+ 		    card->evtbd_ring_size);
+ 
+-	return mwifiex_pcie_init_evt_ring(adapter);
++	ret = mwifiex_pcie_init_evt_ring(adapter);
++	if (ret)
++		mwifiex_pcie_delete_evtbd_ring(adapter);
++	return ret;
+ }
+ 
+ /*
+diff --git a/drivers/net/wireless/marvell/mwifiex/sta_rx.c b/drivers/net/wireless/marvell/mwifiex/sta_rx.c
+index 13659b02ba882..65420ad674167 100644
+--- a/drivers/net/wireless/marvell/mwifiex/sta_rx.c
++++ b/drivers/net/wireless/marvell/mwifiex/sta_rx.c
+@@ -86,6 +86,15 @@ int mwifiex_process_rx_packet(struct mwifiex_private *priv,
+ 	rx_pkt_len = le16_to_cpu(local_rx_pd->rx_pkt_length);
+ 	rx_pkt_hdr = (void *)local_rx_pd + rx_pkt_off;
+ 
++	if (sizeof(*rx_pkt_hdr) + rx_pkt_off > skb->len) {
++		mwifiex_dbg(priv->adapter, ERROR,
++			    "wrong rx packet offset: len=%d, rx_pkt_off=%d\n",
++			    skb->len, rx_pkt_off);
++		priv->stats.rx_dropped++;
++		dev_kfree_skb_any(skb);
++		return -1;
++	}
++
+ 	if ((!memcmp(&rx_pkt_hdr->rfc1042_hdr, bridge_tunnel_header,
+ 		     sizeof(bridge_tunnel_header))) ||
+ 	    (!memcmp(&rx_pkt_hdr->rfc1042_hdr, rfc1042_header,
+@@ -194,7 +203,8 @@ int mwifiex_process_sta_rx_packet(struct mwifiex_private *priv,
+ 
+ 	rx_pkt_hdr = (void *)local_rx_pd + rx_pkt_offset;
+ 
+-	if ((rx_pkt_offset + rx_pkt_length) > (u16) skb->len) {
++	if ((rx_pkt_offset + rx_pkt_length) > skb->len ||
++	    sizeof(rx_pkt_hdr->eth803_hdr) + rx_pkt_offset > skb->len) {
+ 		mwifiex_dbg(adapter, ERROR,
+ 			    "wrong rx packet: len=%d, rx_pkt_offset=%d, rx_pkt_length=%d\n",
+ 			    skb->len, rx_pkt_offset, rx_pkt_length);
+diff --git a/drivers/net/wireless/marvell/mwifiex/uap_txrx.c b/drivers/net/wireless/marvell/mwifiex/uap_txrx.c
+index e495f7eaea033..b8b9a0fcb19cd 100644
+--- a/drivers/net/wireless/marvell/mwifiex/uap_txrx.c
++++ b/drivers/net/wireless/marvell/mwifiex/uap_txrx.c
+@@ -103,6 +103,16 @@ static void mwifiex_uap_queue_bridged_pkt(struct mwifiex_private *priv,
+ 		return;
+ 	}
+ 
++	if (sizeof(*rx_pkt_hdr) +
++	    le16_to_cpu(uap_rx_pd->rx_pkt_offset) > skb->len) {
++		mwifiex_dbg(adapter, ERROR,
++			    "wrong rx packet offset: len=%d,rx_pkt_offset=%d\n",
++			    skb->len, le16_to_cpu(uap_rx_pd->rx_pkt_offset));
++		priv->stats.rx_dropped++;
++		dev_kfree_skb_any(skb);
++		return;
++	}
++
+ 	if ((!memcmp(&rx_pkt_hdr->rfc1042_hdr, bridge_tunnel_header,
+ 		     sizeof(bridge_tunnel_header))) ||
+ 	    (!memcmp(&rx_pkt_hdr->rfc1042_hdr, rfc1042_header,
+@@ -243,7 +253,15 @@ int mwifiex_handle_uap_rx_forward(struct mwifiex_private *priv,
+ 
+ 	if (is_multicast_ether_addr(ra)) {
+ 		skb_uap = skb_copy(skb, GFP_ATOMIC);
+-		mwifiex_uap_queue_bridged_pkt(priv, skb_uap);
++		if (likely(skb_uap)) {
++			mwifiex_uap_queue_bridged_pkt(priv, skb_uap);
++		} else {
++			mwifiex_dbg(adapter, ERROR,
++				    "failed to copy skb for uAP\n");
++			priv->stats.rx_dropped++;
++			dev_kfree_skb_any(skb);
++			return -1;
++		}
+ 	} else {
+ 		if (mwifiex_get_sta_entry(priv, ra)) {
+ 			/* Requeue Intra-BSS packet */
+@@ -367,6 +385,16 @@ int mwifiex_process_uap_rx_packet(struct mwifiex_private *priv,
+ 	rx_pkt_type = le16_to_cpu(uap_rx_pd->rx_pkt_type);
+ 	rx_pkt_hdr = (void *)uap_rx_pd + le16_to_cpu(uap_rx_pd->rx_pkt_offset);
+ 
++	if (le16_to_cpu(uap_rx_pd->rx_pkt_offset) +
++	    sizeof(rx_pkt_hdr->eth803_hdr) > skb->len) {
++		mwifiex_dbg(adapter, ERROR,
++			    "wrong rx packet for struct ethhdr: len=%d, offset=%d\n",
++			    skb->len, le16_to_cpu(uap_rx_pd->rx_pkt_offset));
++		priv->stats.rx_dropped++;
++		dev_kfree_skb_any(skb);
++		return 0;
++	}
++
+ 	ether_addr_copy(ta, rx_pkt_hdr->eth803_hdr.h_source);
+ 
+ 	if ((le16_to_cpu(uap_rx_pd->rx_pkt_offset) +
+diff --git a/drivers/net/wireless/marvell/mwifiex/util.c b/drivers/net/wireless/marvell/mwifiex/util.c
+index 94c2d219835da..745b1d925b217 100644
+--- a/drivers/net/wireless/marvell/mwifiex/util.c
++++ b/drivers/net/wireless/marvell/mwifiex/util.c
+@@ -393,11 +393,15 @@ mwifiex_process_mgmt_packet(struct mwifiex_private *priv,
+ 	}
+ 
+ 	rx_pd = (struct rxpd *)skb->data;
++	pkt_len = le16_to_cpu(rx_pd->rx_pkt_length);
++	if (pkt_len < sizeof(struct ieee80211_hdr) + sizeof(pkt_len)) {
++		mwifiex_dbg(priv->adapter, ERROR, "invalid rx_pkt_length");
++		return -1;
++	}
+ 
+ 	skb_pull(skb, le16_to_cpu(rx_pd->rx_pkt_offset));
+ 	skb_pull(skb, sizeof(pkt_len));
+-
+-	pkt_len = le16_to_cpu(rx_pd->rx_pkt_length);
++	pkt_len -= sizeof(pkt_len);
+ 
+ 	ieee_hdr = (void *)skb->data;
+ 	if (ieee80211_is_mgmt(ieee_hdr->frame_control)) {
+@@ -410,7 +414,7 @@ mwifiex_process_mgmt_packet(struct mwifiex_private *priv,
+ 		skb->data + sizeof(struct ieee80211_hdr),
+ 		pkt_len - sizeof(struct ieee80211_hdr));
+ 
+-	pkt_len -= ETH_ALEN + sizeof(pkt_len);
++	pkt_len -= ETH_ALEN;
+ 	rx_pd->rx_pkt_length = cpu_to_le16(pkt_len);
+ 
+ 	cfg80211_rx_mgmt(&priv->wdev, priv->roc_cfg.chan.center_freq,
+diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h
+index 6b07b8fafec2f..0e9f4197213a3 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt76.h
++++ b/drivers/net/wireless/mediatek/mt76/mt76.h
+@@ -277,7 +277,7 @@ struct mt76_sta_stats {
+ 	u64 tx_mcs[16];		/* mcs idx */
+ 	u64 tx_bytes;
+ 	/* WED TX */
+-	u32 tx_packets;
++	u32 tx_packets;		/* unit: MSDU */
+ 	u32 tx_retries;
+ 	u32 tx_failed;
+ 	/* WED RX */
+diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c b/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c
+index be4d63db5f64a..e415ac5e321f1 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c
++++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c
+@@ -522,9 +522,9 @@ void mt76_connac2_mac_write_txwi(struct mt76_dev *dev, __le32 *txwi,
+ 		q_idx = wmm_idx * MT76_CONNAC_MAX_WMM_SETS +
+ 			mt76_connac_lmac_mapping(skb_get_queue_mapping(skb));
+ 
+-		/* counting non-offloading skbs */
+-		wcid->stats.tx_bytes += skb->len;
+-		wcid->stats.tx_packets++;
++		/* mt7915 WA only counts WED path */
++		if (is_mt7915(dev) && mtk_wed_device_active(&dev->mmio.wed))
++			wcid->stats.tx_packets++;
+ 	}
+ 
+ 	val = FIELD_PREP(MT_TXD0_TX_BYTES, skb->len + sz_txd) |
+@@ -609,12 +609,11 @@ bool mt76_connac2_mac_fill_txs(struct mt76_dev *dev, struct mt76_wcid *wcid,
+ 	txs = le32_to_cpu(txs_data[0]);
+ 
+ 	/* PPDU based reporting */
+-	if (FIELD_GET(MT_TXS0_TXS_FORMAT, txs) > 1) {
++	if (mtk_wed_device_active(&dev->mmio.wed) &&
++	    FIELD_GET(MT_TXS0_TXS_FORMAT, txs) > 1) {
+ 		stats->tx_bytes +=
+ 			le32_get_bits(txs_data[5], MT_TXS5_MPDU_TX_BYTE) -
+ 			le32_get_bits(txs_data[7], MT_TXS7_MPDU_RETRY_BYTE);
+-		stats->tx_packets +=
+-			le32_get_bits(txs_data[5], MT_TXS5_MPDU_TX_CNT);
+ 		stats->tx_failed +=
+ 			le32_get_bits(txs_data[6], MT_TXS6_MPDU_FAIL_CNT);
+ 		stats->tx_retries +=
+diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h
+index ca1ce97a6d2fd..7a52b68491b6e 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h
++++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h
+@@ -998,6 +998,7 @@ enum {
+ 	MCU_EXT_EVENT_ASSERT_DUMP = 0x23,
+ 	MCU_EXT_EVENT_RDD_REPORT = 0x3a,
+ 	MCU_EXT_EVENT_CSA_NOTIFY = 0x4f,
++	MCU_EXT_EVENT_WA_TX_STAT = 0x74,
+ 	MCU_EXT_EVENT_BCC_NOTIFY = 0x75,
+ 	MCU_EXT_EVENT_MURU_CTRL = 0x9f,
+ };
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/init.c b/drivers/net/wireless/mediatek/mt76/mt7915/init.c
+index ac2049f49bb38..9defd2b3c2f8d 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7915/init.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7915/init.c
+@@ -414,7 +414,6 @@ mt7915_init_wiphy(struct mt7915_phy *phy)
+ 			if (!dev->dbdc_support)
+ 				vht_cap->cap |=
+ 					IEEE80211_VHT_CAP_SHORT_GI_160 |
+-					IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ |
+ 					FIELD_PREP(IEEE80211_VHT_CAP_EXT_NSS_BW_MASK, 1);
+ 		} else {
+ 			vht_cap->cap |=
+@@ -499,6 +498,12 @@ mt7915_mac_init_band(struct mt7915_dev *dev, u8 band)
+ 	set = FIELD_PREP(MT_WTBLOFF_TOP_RSCR_RCPI_MODE, 0) |
+ 	      FIELD_PREP(MT_WTBLOFF_TOP_RSCR_RCPI_PARAM, 0x3);
+ 	mt76_rmw(dev, MT_WTBLOFF_TOP_RSCR(band), mask, set);
++
++	/* MT_TXD5_TX_STATUS_HOST (MPDU format) has higher priority than
++	 * MT_AGG_ACR_PPDU_TXS2H (PPDU format) even though ACR bit is set.
++	 */
++	if (mtk_wed_device_active(&dev->mt76.mmio.wed))
++		mt76_set(dev, MT_AGG_ACR4(band), MT_AGG_ACR_PPDU_TXS2H);
+ }
+ 
+ static void
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/main.c b/drivers/net/wireless/mediatek/mt76/mt7915/main.c
+index 1b361199c0616..42a983e40ade9 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7915/main.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7915/main.c
+@@ -269,6 +269,7 @@ static int mt7915_add_interface(struct ieee80211_hw *hw,
+ 	vif->offload_flags |= IEEE80211_OFFLOAD_ENCAP_4ADDR;
+ 
+ 	mt7915_init_bitrate_mask(vif);
++	memset(&mvif->cap, -1, sizeof(mvif->cap));
+ 
+ 	mt7915_mcu_add_bss_info(phy, vif, true);
+ 	mt7915_mcu_add_sta(dev, vif, NULL, true);
+@@ -470,7 +471,8 @@ static int mt7915_config(struct ieee80211_hw *hw, u32 changed)
+ 		ieee80211_wake_queues(hw);
+ 	}
+ 
+-	if (changed & IEEE80211_CONF_CHANGE_POWER) {
++	if (changed & (IEEE80211_CONF_CHANGE_POWER |
++		       IEEE80211_CONF_CHANGE_CHANNEL)) {
+ 		ret = mt7915_mcu_set_txpower_sku(phy);
+ 		if (ret)
+ 			return ret;
+@@ -599,6 +601,7 @@ static void mt7915_bss_info_changed(struct ieee80211_hw *hw,
+ {
+ 	struct mt7915_phy *phy = mt7915_hw_phy(hw);
+ 	struct mt7915_dev *dev = mt7915_hw_dev(hw);
++	int set_bss_info = -1, set_sta = -1;
+ 
+ 	mutex_lock(&dev->mt76.mutex);
+ 
+@@ -607,15 +610,18 @@ static void mt7915_bss_info_changed(struct ieee80211_hw *hw,
+ 	 * and then peer references bss_info_rfch to set bandwidth cap.
+ 	 */
+ 	if (changed & BSS_CHANGED_BSSID &&
+-	    vif->type == NL80211_IFTYPE_STATION) {
+-		bool join = !is_zero_ether_addr(info->bssid);
+-
+-		mt7915_mcu_add_bss_info(phy, vif, join);
+-		mt7915_mcu_add_sta(dev, vif, NULL, join);
+-	}
+-
++	    vif->type == NL80211_IFTYPE_STATION)
++		set_bss_info = set_sta = !is_zero_ether_addr(info->bssid);
+ 	if (changed & BSS_CHANGED_ASSOC)
+-		mt7915_mcu_add_bss_info(phy, vif, vif->cfg.assoc);
++		set_bss_info = vif->cfg.assoc;
++	if (changed & BSS_CHANGED_BEACON_ENABLED &&
++	    vif->type != NL80211_IFTYPE_AP)
++		set_bss_info = set_sta = info->enable_beacon;
++
++	if (set_bss_info == 1)
++		mt7915_mcu_add_bss_info(phy, vif, true);
++	if (set_sta == 1)
++		mt7915_mcu_add_sta(dev, vif, NULL, true);
+ 
+ 	if (changed & BSS_CHANGED_ERP_CTS_PROT)
+ 		mt7915_mac_enable_rtscts(dev, vif, info->use_cts_prot);
+@@ -629,11 +635,6 @@ static void mt7915_bss_info_changed(struct ieee80211_hw *hw,
+ 		}
+ 	}
+ 
+-	if (changed & BSS_CHANGED_BEACON_ENABLED && info->enable_beacon) {
+-		mt7915_mcu_add_bss_info(phy, vif, true);
+-		mt7915_mcu_add_sta(dev, vif, NULL, true);
+-	}
+-
+ 	/* ensure that enable txcmd_mode after bss_info */
+ 	if (changed & (BSS_CHANGED_QOS | BSS_CHANGED_BEACON_ENABLED))
+ 		mt7915_mcu_set_tx(dev, vif);
+@@ -650,6 +651,62 @@ static void mt7915_bss_info_changed(struct ieee80211_hw *hw,
+ 		       BSS_CHANGED_FILS_DISCOVERY))
+ 		mt7915_mcu_add_beacon(hw, vif, info->enable_beacon, changed);
+ 
++	if (set_bss_info == 0)
++		mt7915_mcu_add_bss_info(phy, vif, false);
++	if (set_sta == 0)
++		mt7915_mcu_add_sta(dev, vif, NULL, false);
++
++	mutex_unlock(&dev->mt76.mutex);
++}
++
++static void
++mt7915_vif_check_caps(struct mt7915_phy *phy, struct ieee80211_vif *vif)
++{
++	struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
++	struct mt7915_vif_cap *vc = &mvif->cap;
++
++	vc->ht_ldpc = vif->bss_conf.ht_ldpc;
++	vc->vht_ldpc = vif->bss_conf.vht_ldpc;
++	vc->vht_su_ebfer = vif->bss_conf.vht_su_beamformer;
++	vc->vht_su_ebfee = vif->bss_conf.vht_su_beamformee;
++	vc->vht_mu_ebfer = vif->bss_conf.vht_mu_beamformer;
++	vc->vht_mu_ebfee = vif->bss_conf.vht_mu_beamformee;
++	vc->he_ldpc = vif->bss_conf.he_ldpc;
++	vc->he_su_ebfer = vif->bss_conf.he_su_beamformer;
++	vc->he_su_ebfee = vif->bss_conf.he_su_beamformee;
++	vc->he_mu_ebfer = vif->bss_conf.he_mu_beamformer;
++}
++
++static int
++mt7915_start_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
++		struct ieee80211_bss_conf *link_conf)
++{
++	struct mt7915_phy *phy = mt7915_hw_phy(hw);
++	struct mt7915_dev *dev = mt7915_hw_dev(hw);
++	int err;
++
++	mutex_lock(&dev->mt76.mutex);
++
++	mt7915_vif_check_caps(phy, vif);
++
++	err = mt7915_mcu_add_bss_info(phy, vif, true);
++	if (err)
++		goto out;
++	err = mt7915_mcu_add_sta(dev, vif, NULL, true);
++out:
++	mutex_unlock(&dev->mt76.mutex);
++
++	return err;
++}
++
++static void
++mt7915_stop_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
++	       struct ieee80211_bss_conf *link_conf)
++{
++	struct mt7915_dev *dev = mt7915_hw_dev(hw);
++
++	mutex_lock(&dev->mt76.mutex);
++	mt7915_mcu_add_sta(dev, vif, NULL, false);
+ 	mutex_unlock(&dev->mt76.mutex);
+ }
+ 
+@@ -1042,8 +1099,10 @@ static void mt7915_sta_statistics(struct ieee80211_hw *hw,
+ 		sinfo->tx_bytes = msta->wcid.stats.tx_bytes;
+ 		sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BYTES64);
+ 
+-		sinfo->tx_packets = msta->wcid.stats.tx_packets;
+-		sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_PACKETS);
++		if (!mt7915_mcu_wed_wa_tx_stats(phy->dev, msta->wcid.idx)) {
++			sinfo->tx_packets = msta->wcid.stats.tx_packets;
++			sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_PACKETS);
++		}
+ 
+ 		sinfo->tx_failed = msta->wcid.stats.tx_failed;
+ 		sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_FAILED);
+@@ -1526,6 +1585,8 @@ const struct ieee80211_ops mt7915_ops = {
+ 	.conf_tx = mt7915_conf_tx,
+ 	.configure_filter = mt7915_configure_filter,
+ 	.bss_info_changed = mt7915_bss_info_changed,
++	.start_ap = mt7915_start_ap,
++	.stop_ap = mt7915_stop_ap,
+ 	.sta_add = mt7915_sta_add,
+ 	.sta_remove = mt7915_sta_remove,
+ 	.sta_pre_rcu_remove = mt76_sta_pre_rcu_remove,
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
+index 9fcb22fa1f97e..1a8611c6b684d 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
+@@ -164,7 +164,9 @@ mt7915_mcu_parse_response(struct mt76_dev *mdev, int cmd,
+ 	}
+ 
+ 	rxd = (struct mt76_connac2_mcu_rxd *)skb->data;
+-	if (seq != rxd->seq)
++	if (seq != rxd->seq &&
++	    !(rxd->eid == MCU_CMD_EXT_CID &&
++	      rxd->ext_eid == MCU_EXT_EVENT_WA_TX_STAT))
+ 		return -EAGAIN;
+ 
+ 	if (cmd == MCU_CMD(PATCH_SEM_CONTROL)) {
+@@ -274,7 +276,7 @@ mt7915_mcu_rx_radar_detected(struct mt7915_dev *dev, struct sk_buff *skb)
+ 
+ 	r = (struct mt7915_mcu_rdd_report *)skb->data;
+ 
+-	if (r->band_idx > MT_BAND1)
++	if (r->band_idx > MT_RX_SEL2)
+ 		return;
+ 
+ 	if ((r->band_idx && !dev->phy.mt76->band_idx) &&
+@@ -395,12 +397,14 @@ void mt7915_mcu_rx_event(struct mt7915_dev *dev, struct sk_buff *skb)
+ 	struct mt76_connac2_mcu_rxd *rxd;
+ 
+ 	rxd = (struct mt76_connac2_mcu_rxd *)skb->data;
+-	if (rxd->ext_eid == MCU_EXT_EVENT_THERMAL_PROTECT ||
+-	    rxd->ext_eid == MCU_EXT_EVENT_FW_LOG_2_HOST ||
+-	    rxd->ext_eid == MCU_EXT_EVENT_ASSERT_DUMP ||
+-	    rxd->ext_eid == MCU_EXT_EVENT_PS_SYNC ||
+-	    rxd->ext_eid == MCU_EXT_EVENT_BCC_NOTIFY ||
+-	    !rxd->seq)
++	if ((rxd->ext_eid == MCU_EXT_EVENT_THERMAL_PROTECT ||
++	     rxd->ext_eid == MCU_EXT_EVENT_FW_LOG_2_HOST ||
++	     rxd->ext_eid == MCU_EXT_EVENT_ASSERT_DUMP ||
++	     rxd->ext_eid == MCU_EXT_EVENT_PS_SYNC ||
++	     rxd->ext_eid == MCU_EXT_EVENT_BCC_NOTIFY ||
++	     !rxd->seq) &&
++	     !(rxd->eid == MCU_CMD_EXT_CID &&
++	       rxd->ext_eid == MCU_EXT_EVENT_WA_TX_STAT))
+ 		mt7915_mcu_rx_unsolicited_event(dev, skb);
+ 	else
+ 		mt76_mcu_rx_event(&dev->mt76, skb);
+@@ -706,6 +710,7 @@ static void
+ mt7915_mcu_sta_he_tlv(struct sk_buff *skb, struct ieee80211_sta *sta,
+ 		      struct ieee80211_vif *vif)
+ {
++	struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
+ 	struct ieee80211_he_cap_elem *elem = &sta->deflink.he_cap.he_cap_elem;
+ 	struct ieee80211_he_mcs_nss_supp mcs_map;
+ 	struct sta_rec_he *he;
+@@ -739,7 +744,7 @@ mt7915_mcu_sta_he_tlv(struct sk_buff *skb, struct ieee80211_sta *sta,
+ 	     IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_RU_MAPPING_IN_5G))
+ 		cap |= STA_REC_HE_CAP_BW20_RU242_SUPPORT;
+ 
+-	if (vif->bss_conf.he_ldpc &&
++	if (mvif->cap.he_ldpc &&
+ 	    (elem->phy_cap_info[1] &
+ 	     IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD))
+ 		cap |= STA_REC_HE_CAP_LDPC;
+@@ -848,6 +853,7 @@ static void
+ mt7915_mcu_sta_muru_tlv(struct mt7915_dev *dev, struct sk_buff *skb,
+ 			struct ieee80211_sta *sta, struct ieee80211_vif *vif)
+ {
++	struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
+ 	struct ieee80211_he_cap_elem *elem = &sta->deflink.he_cap.he_cap_elem;
+ 	struct sta_rec_muru *muru;
+ 	struct tlv *tlv;
+@@ -860,9 +866,9 @@ mt7915_mcu_sta_muru_tlv(struct mt7915_dev *dev, struct sk_buff *skb,
+ 
+ 	muru = (struct sta_rec_muru *)tlv;
+ 
+-	muru->cfg.mimo_dl_en = vif->bss_conf.he_mu_beamformer ||
+-			       vif->bss_conf.vht_mu_beamformer ||
+-			       vif->bss_conf.vht_mu_beamformee;
++	muru->cfg.mimo_dl_en = mvif->cap.he_mu_ebfer ||
++			       mvif->cap.vht_mu_ebfer ||
++			       mvif->cap.vht_mu_ebfee;
+ 	if (!is_mt7915(&dev->mt76))
+ 		muru->cfg.mimo_ul_en = true;
+ 	muru->cfg.ofdma_dl_en = true;
+@@ -995,8 +1001,8 @@ mt7915_mcu_sta_wtbl_tlv(struct mt7915_dev *dev, struct sk_buff *skb,
+ 	mt76_connac_mcu_wtbl_hdr_trans_tlv(skb, vif, wcid, tlv, wtbl_hdr);
+ 	if (sta)
+ 		mt76_connac_mcu_wtbl_ht_tlv(&dev->mt76, skb, sta, tlv,
+-					    wtbl_hdr, vif->bss_conf.ht_ldpc,
+-					    vif->bss_conf.vht_ldpc);
++					    wtbl_hdr, mvif->cap.ht_ldpc,
++					    mvif->cap.vht_ldpc);
+ 
+ 	return 0;
+ }
+@@ -1005,6 +1011,7 @@ static inline bool
+ mt7915_is_ebf_supported(struct mt7915_phy *phy, struct ieee80211_vif *vif,
+ 			struct ieee80211_sta *sta, bool bfee)
+ {
++	struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
+ 	int tx_ant = hweight8(phy->mt76->chainmask) - 1;
+ 
+ 	if (vif->type != NL80211_IFTYPE_STATION &&
+@@ -1018,10 +1025,10 @@ mt7915_is_ebf_supported(struct mt7915_phy *phy, struct ieee80211_vif *vif,
+ 		struct ieee80211_he_cap_elem *pe = &sta->deflink.he_cap.he_cap_elem;
+ 
+ 		if (bfee)
+-			return vif->bss_conf.he_su_beamformee &&
++			return mvif->cap.he_su_ebfee &&
+ 			       HE_PHY(CAP3_SU_BEAMFORMER, pe->phy_cap_info[3]);
+ 		else
+-			return vif->bss_conf.he_su_beamformer &&
++			return mvif->cap.he_su_ebfer &&
+ 			       HE_PHY(CAP4_SU_BEAMFORMEE, pe->phy_cap_info[4]);
+ 	}
+ 
+@@ -1029,10 +1036,10 @@ mt7915_is_ebf_supported(struct mt7915_phy *phy, struct ieee80211_vif *vif,
+ 		u32 cap = sta->deflink.vht_cap.cap;
+ 
+ 		if (bfee)
+-			return vif->bss_conf.vht_su_beamformee &&
++			return mvif->cap.vht_su_ebfee &&
+ 			       (cap & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE);
+ 		else
+-			return vif->bss_conf.vht_su_beamformer &&
++			return mvif->cap.vht_su_ebfer &&
+ 			       (cap & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE);
+ 	}
+ 
+@@ -1527,7 +1534,7 @@ mt7915_mcu_sta_rate_ctrl_tlv(struct sk_buff *skb, struct mt7915_dev *dev,
+ 			cap |= STA_CAP_TX_STBC;
+ 		if (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_RX_STBC)
+ 			cap |= STA_CAP_RX_STBC;
+-		if (vif->bss_conf.ht_ldpc &&
++		if (mvif->cap.ht_ldpc &&
+ 		    (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING))
+ 			cap |= STA_CAP_LDPC;
+ 
+@@ -1553,7 +1560,7 @@ mt7915_mcu_sta_rate_ctrl_tlv(struct sk_buff *skb, struct mt7915_dev *dev,
+ 			cap |= STA_CAP_VHT_TX_STBC;
+ 		if (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_RXSTBC_1)
+ 			cap |= STA_CAP_VHT_RX_STBC;
+-		if (vif->bss_conf.vht_ldpc &&
++		if (mvif->cap.vht_ldpc &&
+ 		    (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC))
+ 			cap |= STA_CAP_VHT_LDPC;
+ 
+@@ -2993,7 +3000,7 @@ int mt7915_mcu_get_chan_mib_info(struct mt7915_phy *phy, bool chan_switch)
+ 	}
+ 
+ 	ret = mt76_mcu_send_and_get_msg(&dev->mt76, MCU_EXT_CMD(GET_MIB_INFO),
+-					req, sizeof(req), true, &skb);
++					req, len * sizeof(req[0]), true, &skb);
+ 	if (ret)
+ 		return ret;
+ 
+@@ -3733,6 +3740,62 @@ int mt7915_mcu_twt_agrt_update(struct mt7915_dev *dev,
+ 				 &req, sizeof(req), true);
+ }
+ 
++int mt7915_mcu_wed_wa_tx_stats(struct mt7915_dev *dev, u16 wlan_idx)
++{
++	struct {
++		__le32 cmd;
++		__le32 num;
++		__le32 __rsv;
++		__le16 wlan_idx;
++	} req = {
++		.cmd = cpu_to_le32(0x15),
++		.num = cpu_to_le32(1),
++		.wlan_idx = cpu_to_le16(wlan_idx),
++	};
++	struct mt7915_mcu_wa_tx_stat {
++		__le16 wlan_idx;
++		u8 __rsv[2];
++
++		/* tx_bytes is deprecated since WA byte counter uses u32,
++		 * which easily leads to overflow.
++		 */
++		__le32 tx_bytes;
++		__le32 tx_packets;
++	} *res;
++	struct mt76_wcid *wcid;
++	struct sk_buff *skb;
++	int ret;
++
++	ret = mt76_mcu_send_and_get_msg(&dev->mt76, MCU_WA_PARAM_CMD(QUERY),
++					&req, sizeof(req), true, &skb);
++	if (ret)
++		return ret;
++
++	if (!is_mt7915(&dev->mt76))
++		skb_pull(skb, 4);
++
++	res = (struct mt7915_mcu_wa_tx_stat *)skb->data;
++
++	if (le16_to_cpu(res->wlan_idx) != wlan_idx) {
++		ret = -EINVAL;
++		goto out;
++	}
++
++	rcu_read_lock();
++
++	wcid = rcu_dereference(dev->mt76.wcid[wlan_idx]);
++	if (wcid)
++		wcid->stats.tx_packets += le32_to_cpu(res->tx_packets);
++	else
++		ret = -EINVAL;
++
++	rcu_read_unlock();
++out:
++	dev_kfree_skb(skb);
++
++	return ret;
++}
++
+ int mt7915_mcu_rf_regval(struct mt7915_dev *dev, u32 regidx, u32 *val, bool set)
+ {
+ 	struct {
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c b/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c
+index 45f3558bf31c1..2fa059af23ded 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c
+@@ -545,8 +545,6 @@ static u32 mt7915_rmw(struct mt76_dev *mdev, u32 offset, u32 mask, u32 val)
+ static int mt7915_mmio_wed_offload_enable(struct mtk_wed_device *wed)
+ {
+ 	struct mt7915_dev *dev;
+-	struct mt7915_phy *phy;
+-	int ret;
+ 
+ 	dev = container_of(wed, struct mt7915_dev, mt76.mmio.wed);
+ 
+@@ -554,43 +552,19 @@ static int mt7915_mmio_wed_offload_enable(struct mtk_wed_device *wed)
+ 	dev->mt76.token_size = wed->wlan.token_start;
+ 	spin_unlock_bh(&dev->mt76.token_lock);
+ 
+-	ret = wait_event_timeout(dev->mt76.tx_wait,
+-				 !dev->mt76.wed_token_count, HZ);
+-	if (!ret)
+-		return -EAGAIN;
+-
+-	phy = &dev->phy;
+-	mt76_set(dev, MT_AGG_ACR4(phy->mt76->band_idx), MT_AGG_ACR_PPDU_TXS2H);
+-
+-	phy = dev->mt76.phys[MT_BAND1] ? dev->mt76.phys[MT_BAND1]->priv : NULL;
+-	if (phy)
+-		mt76_set(dev, MT_AGG_ACR4(phy->mt76->band_idx),
+-			 MT_AGG_ACR_PPDU_TXS2H);
+-
+-	return 0;
++	return !wait_event_timeout(dev->mt76.tx_wait,
++				   !dev->mt76.wed_token_count, HZ);
+ }
+ 
+ static void mt7915_mmio_wed_offload_disable(struct mtk_wed_device *wed)
+ {
+ 	struct mt7915_dev *dev;
+-	struct mt7915_phy *phy;
+ 
+ 	dev = container_of(wed, struct mt7915_dev, mt76.mmio.wed);
+ 
+ 	spin_lock_bh(&dev->mt76.token_lock);
+ 	dev->mt76.token_size = MT7915_TOKEN_SIZE;
+ 	spin_unlock_bh(&dev->mt76.token_lock);
+-
+-	/* MT_TXD5_TX_STATUS_HOST (MPDU format) has higher priority than
+-	 * MT_AGG_ACR_PPDU_TXS2H (PPDU format) even though ACR bit is set.
+-	 */
+-	phy = &dev->phy;
+-	mt76_clear(dev, MT_AGG_ACR4(phy->mt76->band_idx), MT_AGG_ACR_PPDU_TXS2H);
+-
+-	phy = dev->mt76.phys[MT_BAND1] ? dev->mt76.phys[MT_BAND1]->priv : NULL;
+-	if (phy)
+-		mt76_clear(dev, MT_AGG_ACR4(phy->mt76->band_idx),
+-			   MT_AGG_ACR_PPDU_TXS2H);
+ }
+ 
+ static void mt7915_mmio_wed_release_rx_buf(struct mtk_wed_device *wed)
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
+index b3ead35307406..0f76733c9c1ac 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
++++ b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
+@@ -147,9 +147,23 @@ struct mt7915_sta {
+ 	} twt;
+ };
+ 
++struct mt7915_vif_cap {
++	bool ht_ldpc:1;
++	bool vht_ldpc:1;
++	bool he_ldpc:1;
++	bool vht_su_ebfer:1;
++	bool vht_su_ebfee:1;
++	bool vht_mu_ebfer:1;
++	bool vht_mu_ebfee:1;
++	bool he_su_ebfer:1;
++	bool he_su_ebfee:1;
++	bool he_mu_ebfer:1;
++};
++
+ struct mt7915_vif {
+ 	struct mt76_vif mt76; /* must be first */
+ 
++	struct mt7915_vif_cap cap;
+ 	struct mt7915_sta sta;
+ 	struct mt7915_phy *phy;
+ 
+@@ -539,6 +553,7 @@ int mt7915_mcu_get_rx_rate(struct mt7915_phy *phy, struct ieee80211_vif *vif,
+ 			   struct ieee80211_sta *sta, struct rate_info *rate);
+ int mt7915_mcu_rdd_background_enable(struct mt7915_phy *phy,
+ 				     struct cfg80211_chan_def *chandef);
++int mt7915_mcu_wed_wa_tx_stats(struct mt7915_dev *dev, u16 wcid);
+ int mt7915_mcu_rf_regval(struct mt7915_dev *dev, u32 regidx, u32 *val, bool set);
+ int mt7915_mcu_wa_cmd(struct mt7915_dev *dev, int cmd, u32 a1, u32 a2, u32 a3);
+ int mt7915_mcu_fw_log_2_host(struct mt7915_dev *dev, u8 type, u8 ctrl);
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/init.c b/drivers/net/wireless/mediatek/mt76/mt7921/init.c
+index bf1da9fddfaba..f41975e37d06a 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7921/init.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7921/init.c
+@@ -113,7 +113,8 @@ mt7921_init_wiphy(struct ieee80211_hw *hw)
+ 	wiphy->max_sched_scan_ssids = MT76_CONNAC_MAX_SCHED_SCAN_SSID;
+ 	wiphy->max_match_sets = MT76_CONNAC_MAX_SCAN_MATCH;
+ 	wiphy->max_sched_scan_reqs = 1;
+-	wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
++	wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH |
++			WIPHY_FLAG_SPLIT_SCAN_6GHZ;
+ 	wiphy->reg_notifier = mt7921_regd_notifier;
+ 
+ 	wiphy->features |= NL80211_FEATURE_SCHED_SCAN_RANDOM_MAC_ADDR |
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/dma.c b/drivers/net/wireless/mediatek/mt76/mt7996/dma.c
+index 534143465d9b3..fbedaacffbba5 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7996/dma.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7996/dma.c
+@@ -293,7 +293,7 @@ int mt7996_dma_init(struct mt7996_dev *dev)
+ 	/* event from WA */
+ 	ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MCU_WA],
+ 			       MT_RXQ_ID(MT_RXQ_MCU_WA),
+-			       MT7996_RX_MCU_RING_SIZE,
++			       MT7996_RX_MCU_RING_SIZE_WA,
+ 			       MT_RX_BUF_SIZE,
+ 			       MT_RXQ_RING_BASE(MT_RXQ_MCU_WA));
+ 	if (ret)
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mac.c b/drivers/net/wireless/mediatek/mt76/mt7996/mac.c
+index 9b0f6053e0fa6..25c5deb15d213 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7996/mac.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7996/mac.c
+@@ -836,14 +836,19 @@ mt7996_mac_fill_rx(struct mt7996_dev *dev, struct sk_buff *skb)
+ 		skb_pull(skb, hdr_gap);
+ 		if (!hdr_trans && status->amsdu && !(ieee80211_has_a4(fc) && is_mesh)) {
+ 			pad_start = ieee80211_get_hdrlen_from_skb(skb);
+-		} else if (hdr_trans && (rxd2 & MT_RXD2_NORMAL_HDR_TRANS_ERROR) &&
+-			   get_unaligned_be16(skb->data + pad_start) == ETH_P_8021Q) {
++		} else if (hdr_trans && (rxd2 & MT_RXD2_NORMAL_HDR_TRANS_ERROR)) {
+ 			/* When header translation failure is indicated,
+ 			 * the hardware will insert an extra 2-byte field
+ 			 * containing the data length after the protocol
+-			 * type field.
++			 * type field. This happens either when the LLC-SNAP
++			 * pattern did not match, or if a VLAN header was
++			 * detected.
+ 			 */
+-			pad_start = 16;
++			pad_start = 12;
++			if (get_unaligned_be16(skb->data + pad_start) == ETH_P_8021Q)
++				pad_start += 4;
++			else
++				pad_start = 0;
+ 		}
+ 
+ 		if (pad_start) {
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c
+index 88e2f9d0e5130..62a02b03d83ba 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c
+@@ -339,7 +339,11 @@ mt7996_mcu_rx_radar_detected(struct mt7996_dev *dev, struct sk_buff *skb)
+ 	if (r->band_idx >= ARRAY_SIZE(dev->mt76.phys))
+ 		return;
+ 
+-	mphy = dev->mt76.phys[r->band_idx];
++	if (dev->rdd2_phy && r->band_idx == MT_RX_SEL2)
++		mphy = dev->rdd2_phy->mt76;
++	else
++		mphy = dev->mt76.phys[r->band_idx];
++
+ 	if (!mphy)
+ 		return;
+ 
+@@ -712,6 +716,7 @@ mt7996_mcu_bss_basic_tlv(struct sk_buff *skb,
+ 	struct cfg80211_chan_def *chandef = &phy->chandef;
+ 	struct mt76_connac_bss_basic_tlv *bss;
+ 	u32 type = CONNECTION_INFRA_AP;
++	u16 sta_wlan_idx = wlan_idx;
+ 	struct tlv *tlv;
+ 	int idx;
+ 
+@@ -731,7 +736,7 @@ mt7996_mcu_bss_basic_tlv(struct sk_buff *skb,
+ 				struct mt76_wcid *wcid;
+ 
+ 				wcid = (struct mt76_wcid *)sta->drv_priv;
+-				wlan_idx = wcid->idx;
++				sta_wlan_idx = wcid->idx;
+ 			}
+ 			rcu_read_unlock();
+ 		}
+@@ -751,7 +756,7 @@ mt7996_mcu_bss_basic_tlv(struct sk_buff *skb,
+ 	bss->bcn_interval = cpu_to_le16(vif->bss_conf.beacon_int);
+ 	bss->dtim_period = vif->bss_conf.dtim_period;
+ 	bss->bmc_tx_wlan_idx = cpu_to_le16(wlan_idx);
+-	bss->sta_idx = cpu_to_le16(wlan_idx);
++	bss->sta_idx = cpu_to_le16(sta_wlan_idx);
+ 	bss->conn_type = cpu_to_le32(type);
+ 	bss->omac_idx = mvif->omac_idx;
+ 	bss->band_idx = mvif->band_idx;
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h b/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h
+index 4d7dcb95a620a..b8bcad717d89f 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h
++++ b/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h
+@@ -26,6 +26,7 @@
+ 
+ #define MT7996_RX_RING_SIZE		1536
+ #define MT7996_RX_MCU_RING_SIZE		512
++#define MT7996_RX_MCU_RING_SIZE_WA	1024
+ 
+ #define MT7996_FIRMWARE_WA		"mediatek/mt7996/mt7996_wa.bin"
+ #define MT7996_FIRMWARE_WM		"mediatek/mt7996/mt7996_wm.bin"
+diff --git a/drivers/net/wireless/mediatek/mt76/testmode.c b/drivers/net/wireless/mediatek/mt76/testmode.c
+index 0accc71a91c9a..4644dace9bb34 100644
+--- a/drivers/net/wireless/mediatek/mt76/testmode.c
++++ b/drivers/net/wireless/mediatek/mt76/testmode.c
+@@ -8,6 +8,7 @@ const struct nla_policy mt76_tm_policy[NUM_MT76_TM_ATTRS] = {
+ 	[MT76_TM_ATTR_RESET] = { .type = NLA_FLAG },
+ 	[MT76_TM_ATTR_STATE] = { .type = NLA_U8 },
+ 	[MT76_TM_ATTR_TX_COUNT] = { .type = NLA_U32 },
++	[MT76_TM_ATTR_TX_LENGTH] = { .type = NLA_U32 },
+ 	[MT76_TM_ATTR_TX_RATE_MODE] = { .type = NLA_U8 },
+ 	[MT76_TM_ATTR_TX_RATE_NSS] = { .type = NLA_U8 },
+ 	[MT76_TM_ATTR_TX_RATE_IDX] = { .type = NLA_U8 },
+diff --git a/drivers/net/wireless/mediatek/mt76/tx.c b/drivers/net/wireless/mediatek/mt76/tx.c
+index 72b3ec715e47a..e9b9728458a9b 100644
+--- a/drivers/net/wireless/mediatek/mt76/tx.c
++++ b/drivers/net/wireless/mediatek/mt76/tx.c
+@@ -121,6 +121,7 @@ int
+ mt76_tx_status_skb_add(struct mt76_dev *dev, struct mt76_wcid *wcid,
+ 		       struct sk_buff *skb)
+ {
++	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ 	struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb);
+ 	int pid;
+@@ -134,8 +135,14 @@ mt76_tx_status_skb_add(struct mt76_dev *dev, struct mt76_wcid *wcid,
+ 		return MT_PACKET_ID_NO_ACK;
+ 
+ 	if (!(info->flags & (IEEE80211_TX_CTL_REQ_TX_STATUS |
+-			     IEEE80211_TX_CTL_RATE_CTRL_PROBE)))
++			     IEEE80211_TX_CTL_RATE_CTRL_PROBE))) {
++		if (mtk_wed_device_active(&dev->mmio.wed) &&
++		    ((info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) ||
++		     ieee80211_is_data(hdr->frame_control)))
++			return MT_PACKET_ID_WED;
++
+ 		return MT_PACKET_ID_NO_SKB;
++	}
+ 
+ 	spin_lock_bh(&dev->status_lock);
+ 
+diff --git a/drivers/net/wireless/realtek/rtw89/debug.c b/drivers/net/wireless/realtek/rtw89/debug.c
+index a4bbac916e22b..ce5a9ac081457 100644
+--- a/drivers/net/wireless/realtek/rtw89/debug.c
++++ b/drivers/net/wireless/realtek/rtw89/debug.c
+@@ -3193,12 +3193,14 @@ static ssize_t rtw89_debug_priv_btc_manual_set(struct file *filp,
+ 	struct rtw89_dev *rtwdev = debugfs_priv->rtwdev;
+ 	struct rtw89_btc *btc = &rtwdev->btc;
+ 	bool btc_manual;
++	int ret;
+ 
+-	if (kstrtobool_from_user(user_buf, count, &btc_manual))
+-		goto out;
++	ret = kstrtobool_from_user(user_buf, count, &btc_manual);
++	if (ret)
++		return ret;
+ 
+ 	btc->ctrl.manual = btc_manual;
+-out:
++
+ 	return count;
+ }
+ 
+diff --git a/drivers/net/wireless/realtek/rtw89/fw.c b/drivers/net/wireless/realtek/rtw89/fw.c
+index 9637f5e48d842..d44628a900465 100644
+--- a/drivers/net/wireless/realtek/rtw89/fw.c
++++ b/drivers/net/wireless/realtek/rtw89/fw.c
+@@ -312,31 +312,17 @@ rtw89_early_fw_feature_recognize(struct device *device,
+ 				 struct rtw89_fw_info *early_fw,
+ 				 int *used_fw_format)
+ {
+-	union rtw89_compat_fw_hdr buf = {};
+ 	const struct firmware *firmware;
+-	bool full_req = false;
+ 	char fw_name[64];
+ 	int fw_format;
+ 	u32 ver_code;
+ 	int ret;
+ 
+-	/* If SECURITY_LOADPIN_ENFORCE is enabled, reading partial files will
+-	 * be denied (-EPERM). Then, we don't get right firmware things as
+-	 * expected. So, in this case, we have to request full firmware here.
+-	 */
+-	if (IS_ENABLED(CONFIG_SECURITY_LOADPIN_ENFORCE))
+-		full_req = true;
+-
+ 	for (fw_format = chip->fw_format_max; fw_format >= 0; fw_format--) {
+ 		rtw89_fw_get_filename(fw_name, sizeof(fw_name),
+ 				      chip->fw_basename, fw_format);
+ 
+-		if (full_req)
+-			ret = request_firmware(&firmware, fw_name, device);
+-		else
+-			ret = request_partial_firmware_into_buf(&firmware, fw_name,
+-								device, &buf, sizeof(buf),
+-								0);
++		ret = request_firmware(&firmware, fw_name, device);
+ 		if (!ret) {
+ 			dev_info(device, "loaded firmware %s\n", fw_name);
+ 			*used_fw_format = fw_format;
+@@ -349,10 +335,7 @@ rtw89_early_fw_feature_recognize(struct device *device,
+ 		return NULL;
+ 	}
+ 
+-	if (full_req)
+-		ver_code = rtw89_compat_fw_hdr_ver_code(firmware->data);
+-	else
+-		ver_code = rtw89_compat_fw_hdr_ver_code(&buf);
++	ver_code = rtw89_compat_fw_hdr_ver_code(firmware->data);
+ 
+ 	if (!ver_code)
+ 		goto out;
+@@ -360,11 +343,7 @@ rtw89_early_fw_feature_recognize(struct device *device,
+ 	rtw89_fw_iterate_feature_cfg(early_fw, chip, ver_code);
+ 
+ out:
+-	if (full_req)
+-		return firmware;
+-
+-	release_firmware(firmware);
+-	return NULL;
++	return firmware;
+ }
+ 
+ int rtw89_fw_recognize(struct rtw89_dev *rtwdev)
+diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852b_rfk.c b/drivers/net/wireless/realtek/rtw89/rtw8852b_rfk.c
+index fa018e1f499b2..259df67836a0e 100644
+--- a/drivers/net/wireless/realtek/rtw89/rtw8852b_rfk.c
++++ b/drivers/net/wireless/realtek/rtw89/rtw8852b_rfk.c
+@@ -846,7 +846,7 @@ static bool _iqk_one_shot(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ 	case ID_NBTXK:
+ 		rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0);
+ 		rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT, 0x011);
+-		iqk_cmd = 0x308 | (1 << (4 + path));
++		iqk_cmd = 0x408 | (1 << (4 + path));
+ 		break;
+ 	case ID_NBRXK:
+ 		rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1);
+@@ -1078,7 +1078,7 @@ static bool _iqk_nbtxk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u8
+ {
+ 	struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
+ 	bool kfail;
+-	u8 gp = 0x3;
++	u8 gp = 0x2;
+ 
+ 	switch (iqk_info->iqk_band[path]) {
+ 	case RTW89_BAND_2G:
+diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c
+index 2abd2235bbcab..9532108d2dce1 100644
+--- a/drivers/ntb/ntb_transport.c
++++ b/drivers/ntb/ntb_transport.c
+@@ -909,7 +909,7 @@ static int ntb_set_mw(struct ntb_transport_ctx *nt, int num_mw,
+ 	return 0;
+ }
+ 
+-static void ntb_qp_link_down_reset(struct ntb_transport_qp *qp)
++static void ntb_qp_link_context_reset(struct ntb_transport_qp *qp)
+ {
+ 	qp->link_is_up = false;
+ 	qp->active = false;
+@@ -932,6 +932,13 @@ static void ntb_qp_link_down_reset(struct ntb_transport_qp *qp)
+ 	qp->tx_async = 0;
+ }
+ 
++static void ntb_qp_link_down_reset(struct ntb_transport_qp *qp)
++{
++	ntb_qp_link_context_reset(qp);
++	if (qp->remote_rx_info)
++		qp->remote_rx_info->entry = qp->rx_max_entry - 1;
++}
++
+ static void ntb_qp_link_cleanup(struct ntb_transport_qp *qp)
+ {
+ 	struct ntb_transport_ctx *nt = qp->transport;
+@@ -1174,7 +1181,7 @@ static int ntb_transport_init_queue(struct ntb_transport_ctx *nt,
+ 	qp->ndev = nt->ndev;
+ 	qp->client_ready = false;
+ 	qp->event_handler = NULL;
+-	ntb_qp_link_down_reset(qp);
++	ntb_qp_link_context_reset(qp);
+ 
+ 	if (mw_num < qp_count % mw_count)
+ 		num_qps_mw = qp_count / mw_count + 1;
+@@ -2276,9 +2283,13 @@ int ntb_transport_tx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
+ 	struct ntb_queue_entry *entry;
+ 	int rc;
+ 
+-	if (!qp || !qp->link_is_up || !len)
++	if (!qp || !len)
+ 		return -EINVAL;
+ 
++	/* If the qp link is down already, just ignore. */
++	if (!qp->link_is_up)
++		return 0;
++
+ 	entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q);
+ 	if (!entry) {
+ 		qp->tx_err_no_buf++;
+@@ -2418,7 +2429,7 @@ unsigned int ntb_transport_tx_free_entry(struct ntb_transport_qp *qp)
+ 	unsigned int head = qp->tx_index;
+ 	unsigned int tail = qp->remote_rx_info->entry;
+ 
+-	return tail > head ? tail - head : qp->tx_max_entry + tail - head;
++	return tail >= head ? tail - head : qp->tx_max_entry + tail - head;
+ }
+ EXPORT_SYMBOL_GPL(ntb_transport_tx_free_entry);
+ 
+diff --git a/drivers/nvdimm/nd_perf.c b/drivers/nvdimm/nd_perf.c
+index 433bbb68ae641..2b6dc80d8fb5b 100644
+--- a/drivers/nvdimm/nd_perf.c
++++ b/drivers/nvdimm/nd_perf.c
+@@ -308,8 +308,8 @@ int register_nvdimm_pmu(struct nvdimm_pmu *nd_pmu, struct platform_device *pdev)
+ 
+ 	rc = perf_pmu_register(&nd_pmu->pmu, nd_pmu->pmu.name, -1);
+ 	if (rc) {
+-		kfree(nd_pmu->pmu.attr_groups);
+ 		nvdimm_pmu_free_hotplug_memory(nd_pmu);
++		kfree(nd_pmu->pmu.attr_groups);
+ 		return rc;
+ 	}
+ 
+@@ -324,6 +324,7 @@ void unregister_nvdimm_pmu(struct nvdimm_pmu *nd_pmu)
+ {
+ 	perf_pmu_unregister(&nd_pmu->pmu);
+ 	nvdimm_pmu_free_hotplug_memory(nd_pmu);
++	kfree(nd_pmu->pmu.attr_groups);
+ 	kfree(nd_pmu);
+ }
+ EXPORT_SYMBOL_GPL(unregister_nvdimm_pmu);
+diff --git a/drivers/nvdimm/nd_virtio.c b/drivers/nvdimm/nd_virtio.c
+index c6a648fd8744a..1f8c667c6f1ee 100644
+--- a/drivers/nvdimm/nd_virtio.c
++++ b/drivers/nvdimm/nd_virtio.c
+@@ -105,7 +105,8 @@ int async_pmem_flush(struct nd_region *nd_region, struct bio *bio)
+ 	 * parent bio. Otherwise directly call nd_region flush.
+ 	 */
+ 	if (bio && bio->bi_iter.bi_sector != -1) {
+-		struct bio *child = bio_alloc(bio->bi_bdev, 0, REQ_PREFLUSH,
++		struct bio *child = bio_alloc(bio->bi_bdev, 0,
++					      REQ_OP_WRITE | REQ_PREFLUSH,
+ 					      GFP_ATOMIC);
+ 
+ 		if (!child)
+diff --git a/drivers/of/overlay.c b/drivers/of/overlay.c
+index 7feb643f13707..28b479afd506f 100644
+--- a/drivers/of/overlay.c
++++ b/drivers/of/overlay.c
+@@ -752,8 +752,6 @@ static int init_overlay_changeset(struct overlay_changeset *ovcs)
+ 	if (!of_node_is_root(ovcs->overlay_root))
+ 		pr_debug("%s() ovcs->overlay_root is not root\n", __func__);
+ 
+-	of_changeset_init(&ovcs->cset);
+-
+ 	cnt = 0;
+ 
+ 	/* fragment nodes */
+@@ -1013,6 +1011,7 @@ int of_overlay_fdt_apply(const void *overlay_fdt, u32 overlay_fdt_size,
+ 
+ 	INIT_LIST_HEAD(&ovcs->ovcs_list);
+ 	list_add_tail(&ovcs->ovcs_list, &ovcs_list);
++	of_changeset_init(&ovcs->cset);
+ 
+ 	/*
+ 	 * Must create permanent copy of FDT because of_fdt_unflatten_tree()
+diff --git a/drivers/of/property.c b/drivers/of/property.c
+index ddc75cd50825e..cf8dacf3e3b84 100644
+--- a/drivers/of/property.c
++++ b/drivers/of/property.c
+@@ -1266,6 +1266,7 @@ DEFINE_SIMPLE_PROP(pwms, "pwms", "#pwm-cells")
+ DEFINE_SIMPLE_PROP(resets, "resets", "#reset-cells")
+ DEFINE_SIMPLE_PROP(leds, "leds", NULL)
+ DEFINE_SIMPLE_PROP(backlight, "backlight", NULL)
++DEFINE_SIMPLE_PROP(panel, "panel", NULL)
+ DEFINE_SUFFIX_PROP(regulators, "-supply", NULL)
+ DEFINE_SUFFIX_PROP(gpio, "-gpio", "#gpio-cells")
+ 
+@@ -1354,6 +1355,7 @@ static const struct supplier_bindings of_supplier_bindings[] = {
+ 	{ .parse_prop = parse_resets, },
+ 	{ .parse_prop = parse_leds, },
+ 	{ .parse_prop = parse_backlight, },
++	{ .parse_prop = parse_panel, },
+ 	{ .parse_prop = parse_gpio_compat, },
+ 	{ .parse_prop = parse_interrupts, },
+ 	{ .parse_prop = parse_regulators, },
+diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c
+index b545fcb22536d..f6784cce8369b 100644
+--- a/drivers/of/unittest.c
++++ b/drivers/of/unittest.c
+@@ -77,7 +77,7 @@ static void __init of_unittest_find_node_by_name(void)
+ 
+ 	np = of_find_node_by_path("/testcase-data");
+ 	name = kasprintf(GFP_KERNEL, "%pOF", np);
+-	unittest(np && !strcmp("/testcase-data", name),
++	unittest(np && name && !strcmp("/testcase-data", name),
+ 		"find /testcase-data failed\n");
+ 	of_node_put(np);
+ 	kfree(name);
+@@ -88,14 +88,14 @@ static void __init of_unittest_find_node_by_name(void)
+ 
+ 	np = of_find_node_by_path("/testcase-data/phandle-tests/consumer-a");
+ 	name = kasprintf(GFP_KERNEL, "%pOF", np);
+-	unittest(np && !strcmp("/testcase-data/phandle-tests/consumer-a", name),
++	unittest(np && name && !strcmp("/testcase-data/phandle-tests/consumer-a", name),
+ 		"find /testcase-data/phandle-tests/consumer-a failed\n");
+ 	of_node_put(np);
+ 	kfree(name);
+ 
+ 	np = of_find_node_by_path("testcase-alias");
+ 	name = kasprintf(GFP_KERNEL, "%pOF", np);
+-	unittest(np && !strcmp("/testcase-data", name),
++	unittest(np && name && !strcmp("/testcase-data", name),
+ 		"find testcase-alias failed\n");
+ 	of_node_put(np);
+ 	kfree(name);
+@@ -106,7 +106,7 @@ static void __init of_unittest_find_node_by_name(void)
+ 
+ 	np = of_find_node_by_path("testcase-alias/phandle-tests/consumer-a");
+ 	name = kasprintf(GFP_KERNEL, "%pOF", np);
+-	unittest(np && !strcmp("/testcase-data/phandle-tests/consumer-a", name),
++	unittest(np && name && !strcmp("/testcase-data/phandle-tests/consumer-a", name),
+ 		"find testcase-alias/phandle-tests/consumer-a failed\n");
+ 	of_node_put(np);
+ 	kfree(name);
+@@ -1533,6 +1533,8 @@ static void attach_node_and_children(struct device_node *np)
+ 	const char *full_name;
+ 
+ 	full_name = kasprintf(GFP_KERNEL, "%pOF", np);
++	if (!full_name)
++		return;
+ 
+ 	if (!strcmp(full_name, "/__local_fixups__") ||
+ 	    !strcmp(full_name, "/__fixups__")) {
+@@ -2180,7 +2182,7 @@ static int __init of_unittest_apply_revert_overlay_check(int overlay_nr,
+ 	of_unittest_untrack_overlay(save_ovcs_id);
+ 
+ 	/* unittest device must be again in before state */
+-	if (of_unittest_device_exists(unittest_nr, PDEV_OVERLAY) != before) {
++	if (of_unittest_device_exists(unittest_nr, ovtype) != before) {
+ 		unittest(0, "%s with device @\"%s\" %s\n",
+ 				overlay_name_from_nr(overlay_nr),
+ 				unittest_path(unittest_nr, ovtype),
+diff --git a/drivers/opp/core.c b/drivers/opp/core.c
+index 3f46e499d615f..ae359ed6a1611 100644
+--- a/drivers/opp/core.c
++++ b/drivers/opp/core.c
+@@ -227,20 +227,18 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_get_level);
+ unsigned int dev_pm_opp_get_required_pstate(struct dev_pm_opp *opp,
+ 					    unsigned int index)
+ {
+-	struct opp_table *opp_table = opp->opp_table;
+-
+ 	if (IS_ERR_OR_NULL(opp) || !opp->available ||
+-	    index >= opp_table->required_opp_count) {
++	    index >= opp->opp_table->required_opp_count) {
+ 		pr_err("%s: Invalid parameters\n", __func__);
+ 		return 0;
+ 	}
+ 
+ 	/* required-opps not fully initialized yet */
+-	if (lazy_linking_pending(opp_table))
++	if (lazy_linking_pending(opp->opp_table))
+ 		return 0;
+ 
+ 	/* The required OPP table must belong to a genpd */
+-	if (unlikely(!opp_table->required_opp_tables[index]->is_genpd)) {
++	if (unlikely(!opp->opp_table->required_opp_tables[index]->is_genpd)) {
+ 		pr_err("%s: Performance state is only valid for genpds.\n", __func__);
+ 		return 0;
+ 	}
+@@ -2379,7 +2377,7 @@ static int _opp_attach_genpd(struct opp_table *opp_table, struct device *dev,
+ 
+ 		virt_dev = dev_pm_domain_attach_by_name(dev, *name);
+ 		if (IS_ERR_OR_NULL(virt_dev)) {
+-			ret = PTR_ERR(virt_dev) ? : -ENODEV;
++			ret = virt_dev ? PTR_ERR(virt_dev) : -ENODEV;
+ 			dev_err(dev, "Couldn't attach to pm_domain: %d\n", ret);
+ 			goto err;
+ 		}
+diff --git a/drivers/parisc/ccio-dma.c b/drivers/parisc/ccio-dma.c
+index 9bf652bd002cf..10e846286f4ef 100644
+--- a/drivers/parisc/ccio-dma.c
++++ b/drivers/parisc/ccio-dma.c
+@@ -71,8 +71,6 @@
+ #undef CCIO_COLLECT_STATS
+ #endif
+ 
+-#include <asm/runway.h>		/* for proc_runway_root */
+-
+ #ifdef DEBUG_CCIO_INIT
+ #define DBG_INIT(x...)  printk(x)
+ #else
+@@ -1567,10 +1565,15 @@ static int __init ccio_probe(struct parisc_device *dev)
+ 
+ #ifdef CONFIG_PROC_FS
+ 	if (ioc_count == 0) {
+-		proc_create_single(MODULE_NAME, 0, proc_runway_root,
++		struct proc_dir_entry *runway;
++
++		runway = proc_mkdir("bus/runway", NULL);
++		if (runway) {
++			proc_create_single(MODULE_NAME, 0, runway,
+ 				ccio_proc_info);
+-		proc_create_single(MODULE_NAME"-bitmap", 0, proc_runway_root,
++			proc_create_single(MODULE_NAME"-bitmap", 0, runway,
+ 				ccio_proc_bitmap_info);
++		}
+ 	}
+ #endif
+ 	ioc_count++;
+diff --git a/drivers/parisc/sba_iommu.c b/drivers/parisc/sba_iommu.c
+index 8b1dcd537020f..8f28f8696bf32 100644
+--- a/drivers/parisc/sba_iommu.c
++++ b/drivers/parisc/sba_iommu.c
+@@ -121,7 +121,7 @@ module_param(sba_reserve_agpgart, int, 0444);
+ MODULE_PARM_DESC(sba_reserve_agpgart, "Reserve half of IO pdir as AGPGART");
+ #endif
+ 
+-struct proc_dir_entry *proc_runway_root __ro_after_init;
++static struct proc_dir_entry *proc_runway_root __ro_after_init;
+ struct proc_dir_entry *proc_mckinley_root __ro_after_init;
+ 
+ /************************************
+diff --git a/drivers/pci/access.c b/drivers/pci/access.c
+index 3c230ca3de584..0b2e90d2f04f2 100644
+--- a/drivers/pci/access.c
++++ b/drivers/pci/access.c
+@@ -497,8 +497,8 @@ int pcie_capability_write_dword(struct pci_dev *dev, int pos, u32 val)
+ }
+ EXPORT_SYMBOL(pcie_capability_write_dword);
+ 
+-int pcie_capability_clear_and_set_word(struct pci_dev *dev, int pos,
+-				       u16 clear, u16 set)
++int pcie_capability_clear_and_set_word_unlocked(struct pci_dev *dev, int pos,
++						u16 clear, u16 set)
+ {
+ 	int ret;
+ 	u16 val;
+@@ -512,7 +512,21 @@ int pcie_capability_clear_and_set_word(struct pci_dev *dev, int pos,
+ 
+ 	return ret;
+ }
+-EXPORT_SYMBOL(pcie_capability_clear_and_set_word);
++EXPORT_SYMBOL(pcie_capability_clear_and_set_word_unlocked);
++
++int pcie_capability_clear_and_set_word_locked(struct pci_dev *dev, int pos,
++					      u16 clear, u16 set)
++{
++	unsigned long flags;
++	int ret;
++
++	spin_lock_irqsave(&dev->pcie_cap_lock, flags);
++	ret = pcie_capability_clear_and_set_word_unlocked(dev, pos, clear, set);
++	spin_unlock_irqrestore(&dev->pcie_cap_lock, flags);
++
++	return ret;
++}
++EXPORT_SYMBOL(pcie_capability_clear_and_set_word_locked);
+ 
+ int pcie_capability_clear_and_set_dword(struct pci_dev *dev, int pos,
+ 					u32 clear, u32 set)
+diff --git a/drivers/pci/controller/dwc/pci-layerscape-ep.c b/drivers/pci/controller/dwc/pci-layerscape-ep.c
+index de4c1758a6c33..19595e93dd4b6 100644
+--- a/drivers/pci/controller/dwc/pci-layerscape-ep.c
++++ b/drivers/pci/controller/dwc/pci-layerscape-ep.c
+@@ -45,6 +45,7 @@ struct ls_pcie_ep {
+ 	struct pci_epc_features		*ls_epc;
+ 	const struct ls_pcie_ep_drvdata *drvdata;
+ 	int				irq;
++	u32				lnkcap;
+ 	bool				big_endian;
+ };
+ 
+@@ -73,6 +74,7 @@ static irqreturn_t ls_pcie_ep_event_handler(int irq, void *dev_id)
+ 	struct ls_pcie_ep *pcie = dev_id;
+ 	struct dw_pcie *pci = pcie->pci;
+ 	u32 val, cfg;
++	u8 offset;
+ 
+ 	val = ls_lut_readl(pcie, PEX_PF0_PME_MES_DR);
+ 	ls_lut_writel(pcie, PEX_PF0_PME_MES_DR, val);
+@@ -81,6 +83,19 @@ static irqreturn_t ls_pcie_ep_event_handler(int irq, void *dev_id)
+ 		return IRQ_NONE;
+ 
+ 	if (val & PEX_PF0_PME_MES_DR_LUD) {
++
++		offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
++
++		/*
++		 * The values of the Maximum Link Width and Supported Link
++		 * Speed from the Link Capabilities Register will be lost
++		 * during link down or hot reset. Restore initial value
++		 * that configured by the Reset Configuration Word (RCW).
++		 */
++		dw_pcie_dbi_ro_wr_en(pci);
++		dw_pcie_writel_dbi(pci, offset + PCI_EXP_LNKCAP, pcie->lnkcap);
++		dw_pcie_dbi_ro_wr_dis(pci);
++
+ 		cfg = ls_lut_readl(pcie, PEX_PF0_CONFIG);
+ 		cfg |= PEX_PF0_CFG_READY;
+ 		ls_lut_writel(pcie, PEX_PF0_CONFIG, cfg);
+@@ -215,6 +230,7 @@ static int __init ls_pcie_ep_probe(struct platform_device *pdev)
+ 	struct ls_pcie_ep *pcie;
+ 	struct pci_epc_features *ls_epc;
+ 	struct resource *dbi_base;
++	u8 offset;
+ 	int ret;
+ 
+ 	pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
+@@ -251,6 +267,9 @@ static int __init ls_pcie_ep_probe(struct platform_device *pdev)
+ 
+ 	platform_set_drvdata(pdev, pcie);
+ 
++	offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
++	pcie->lnkcap = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCAP);
++
+ 	ret = dw_pcie_ep_init(&pci->ep);
+ 	if (ret)
+ 		return ret;
+diff --git a/drivers/pci/controller/dwc/pcie-qcom-ep.c b/drivers/pci/controller/dwc/pcie-qcom-ep.c
+index 0fe7f06f21026..267e1247d548f 100644
+--- a/drivers/pci/controller/dwc/pcie-qcom-ep.c
++++ b/drivers/pci/controller/dwc/pcie-qcom-ep.c
+@@ -415,7 +415,7 @@ static int qcom_pcie_perst_deassert(struct dw_pcie *pci)
+ 	/* Gate Master AXI clock to MHI bus during L1SS */
+ 	val = readl_relaxed(pcie_ep->parf + PARF_MHI_CLOCK_RESET_CTRL);
+ 	val &= ~PARF_MSTR_AXI_CLK_EN;
+-	val = readl_relaxed(pcie_ep->parf + PARF_MHI_CLOCK_RESET_CTRL);
++	writel_relaxed(val, pcie_ep->parf + PARF_MHI_CLOCK_RESET_CTRL);
+ 
+ 	dw_pcie_ep_init_notify(&pcie_ep->pci.ep);
+ 
+diff --git a/drivers/pci/controller/dwc/pcie-tegra194.c b/drivers/pci/controller/dwc/pcie-tegra194.c
+index e1db909f53ec9..ccff8cde5cff6 100644
+--- a/drivers/pci/controller/dwc/pcie-tegra194.c
++++ b/drivers/pci/controller/dwc/pcie-tegra194.c
+@@ -900,11 +900,6 @@ static int tegra_pcie_dw_host_init(struct dw_pcie_rp *pp)
+ 		pcie->pcie_cap_base = dw_pcie_find_capability(&pcie->pci,
+ 							      PCI_CAP_ID_EXP);
+ 
+-	val_16 = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_DEVCTL);
+-	val_16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
+-	val_16 |= PCI_EXP_DEVCTL_PAYLOAD_256B;
+-	dw_pcie_writew_dbi(pci, pcie->pcie_cap_base + PCI_EXP_DEVCTL, val_16);
+-
+ 	val = dw_pcie_readl_dbi(pci, PCI_IO_BASE);
+ 	val &= ~(IO_BASE_IO_DECODE | IO_BASE_IO_DECODE_BIT8);
+ 	dw_pcie_writel_dbi(pci, PCI_IO_BASE, val);
+@@ -1887,11 +1882,6 @@ static void pex_ep_event_pex_rst_deassert(struct tegra_pcie_dw *pcie)
+ 	pcie->pcie_cap_base = dw_pcie_find_capability(&pcie->pci,
+ 						      PCI_CAP_ID_EXP);
+ 
+-	val_16 = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_DEVCTL);
+-	val_16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
+-	val_16 |= PCI_EXP_DEVCTL_PAYLOAD_256B;
+-	dw_pcie_writew_dbi(pci, pcie->pcie_cap_base + PCI_EXP_DEVCTL, val_16);
+-
+ 	/* Clear Slot Clock Configuration bit if SRNS configuration */
+ 	if (pcie->enable_srns) {
+ 		val_16 = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base +
+diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c
+index 2d93d0c4f10db..bed3cefdaf198 100644
+--- a/drivers/pci/controller/pci-hyperv.c
++++ b/drivers/pci/controller/pci-hyperv.c
+@@ -3983,6 +3983,9 @@ static int hv_pci_restore_msi_msg(struct pci_dev *pdev, void *arg)
+ 	struct msi_desc *entry;
+ 	int ret = 0;
+ 
++	if (!pdev->msi_enabled && !pdev->msix_enabled)
++		return 0;
++
+ 	msi_lock_descs(&pdev->dev);
+ 	msi_for_each_desc(entry, &pdev->dev, MSI_DESC_ASSOCIATED) {
+ 		irq_data = irq_get_irq_data(entry->irq);
+diff --git a/drivers/pci/controller/pcie-apple.c b/drivers/pci/controller/pcie-apple.c
+index 66f37e403a09c..2340dab6cd5bd 100644
+--- a/drivers/pci/controller/pcie-apple.c
++++ b/drivers/pci/controller/pcie-apple.c
+@@ -783,6 +783,10 @@ static int apple_pcie_init(struct pci_config_window *cfg)
+ 	cfg->priv = pcie;
+ 	INIT_LIST_HEAD(&pcie->ports);
+ 
++	ret = apple_msi_init(pcie);
++	if (ret)
++		return ret;
++
+ 	for_each_child_of_node(dev->of_node, of_port) {
+ 		ret = apple_pcie_setup_port(pcie, of_port);
+ 		if (ret) {
+@@ -792,7 +796,7 @@ static int apple_pcie_init(struct pci_config_window *cfg)
+ 		}
+ 	}
+ 
+-	return apple_msi_init(pcie);
++	return 0;
+ }
+ 
+ static int apple_pcie_probe(struct platform_device *pdev)
+diff --git a/drivers/pci/controller/pcie-microchip-host.c b/drivers/pci/controller/pcie-microchip-host.c
+index 5e710e4854646..dd5245904c874 100644
+--- a/drivers/pci/controller/pcie-microchip-host.c
++++ b/drivers/pci/controller/pcie-microchip-host.c
+@@ -167,12 +167,12 @@
+ #define EVENT_PCIE_DLUP_EXIT			2
+ #define EVENT_SEC_TX_RAM_SEC_ERR		3
+ #define EVENT_SEC_RX_RAM_SEC_ERR		4
+-#define EVENT_SEC_AXI2PCIE_RAM_SEC_ERR		5
+-#define EVENT_SEC_PCIE2AXI_RAM_SEC_ERR		6
++#define EVENT_SEC_PCIE2AXI_RAM_SEC_ERR		5
++#define EVENT_SEC_AXI2PCIE_RAM_SEC_ERR		6
+ #define EVENT_DED_TX_RAM_DED_ERR		7
+ #define EVENT_DED_RX_RAM_DED_ERR		8
+-#define EVENT_DED_AXI2PCIE_RAM_DED_ERR		9
+-#define EVENT_DED_PCIE2AXI_RAM_DED_ERR		10
++#define EVENT_DED_PCIE2AXI_RAM_DED_ERR		9
++#define EVENT_DED_AXI2PCIE_RAM_DED_ERR		10
+ #define EVENT_LOCAL_DMA_END_ENGINE_0		11
+ #define EVENT_LOCAL_DMA_END_ENGINE_1		12
+ #define EVENT_LOCAL_DMA_ERROR_ENGINE_0		13
+diff --git a/drivers/pci/controller/pcie-rockchip.h b/drivers/pci/controller/pcie-rockchip.h
+index fe0333778fd93..6111de35f84ca 100644
+--- a/drivers/pci/controller/pcie-rockchip.h
++++ b/drivers/pci/controller/pcie-rockchip.h
+@@ -158,7 +158,9 @@
+ #define PCIE_RC_CONFIG_THP_CAP		(PCIE_RC_CONFIG_BASE + 0x274)
+ #define   PCIE_RC_CONFIG_THP_CAP_NEXT_MASK	GENMASK(31, 20)
+ 
+-#define PCIE_ADDR_MASK			0xffffff00
++#define MAX_AXI_IB_ROOTPORT_REGION_NUM		3
++#define MIN_AXI_ADDR_BITS_PASSED		8
++#define PCIE_ADDR_MASK			GENMASK_ULL(63, MIN_AXI_ADDR_BITS_PASSED)
+ #define PCIE_CORE_AXI_CONF_BASE		0xc00000
+ #define PCIE_CORE_OB_REGION_ADDR0	(PCIE_CORE_AXI_CONF_BASE + 0x0)
+ #define   PCIE_CORE_OB_REGION_ADDR0_NUM_BITS	0x3f
+@@ -185,8 +187,6 @@
+ #define AXI_WRAPPER_TYPE1_CFG			0xb
+ #define AXI_WRAPPER_NOR_MSG			0xc
+ 
+-#define MAX_AXI_IB_ROOTPORT_REGION_NUM		3
+-#define MIN_AXI_ADDR_BITS_PASSED		8
+ #define PCIE_RC_SEND_PME_OFF			0x11960
+ #define ROCKCHIP_VENDOR_ID			0x1d87
+ #define PCIE_LINK_IS_L2(x) \
+diff --git a/drivers/pci/doe.c b/drivers/pci/doe.c
+index 1b97a5ab71a96..e3aab5edaf706 100644
+--- a/drivers/pci/doe.c
++++ b/drivers/pci/doe.c
+@@ -293,8 +293,8 @@ static int pci_doe_recv_resp(struct pci_doe_mb *doe_mb, struct pci_doe_task *tas
+ static void signal_task_complete(struct pci_doe_task *task, int rv)
+ {
+ 	task->rv = rv;
+-	task->complete(task);
+ 	destroy_work_on_stack(&task->work);
++	task->complete(task);
+ }
+ 
+ static void signal_task_abort(struct pci_doe_task *task, int rv)
+diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
+index 8711325605f0a..fd713abdfb9f9 100644
+--- a/drivers/pci/hotplug/pciehp_hpc.c
++++ b/drivers/pci/hotplug/pciehp_hpc.c
+@@ -332,17 +332,11 @@ int pciehp_check_link_status(struct controller *ctrl)
+ static int __pciehp_link_set(struct controller *ctrl, bool enable)
+ {
+ 	struct pci_dev *pdev = ctrl_dev(ctrl);
+-	u16 lnk_ctrl;
+ 
+-	pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &lnk_ctrl);
++	pcie_capability_clear_and_set_word(pdev, PCI_EXP_LNKCTL,
++					   PCI_EXP_LNKCTL_LD,
++					   enable ? 0 : PCI_EXP_LNKCTL_LD);
+ 
+-	if (enable)
+-		lnk_ctrl &= ~PCI_EXP_LNKCTL_LD;
+-	else
+-		lnk_ctrl |= PCI_EXP_LNKCTL_LD;
+-
+-	pcie_capability_write_word(pdev, PCI_EXP_LNKCTL, lnk_ctrl);
+-	ctrl_dbg(ctrl, "%s: lnk_ctrl = %x\n", __func__, lnk_ctrl);
+ 	return 0;
+ }
+ 
+diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
+index 60230da957e0c..702fe577089b4 100644
+--- a/drivers/pci/pci.c
++++ b/drivers/pci/pci.c
+@@ -1226,6 +1226,10 @@ static int pci_dev_wait(struct pci_dev *dev, char *reset_type, int timeout)
+  *
+  * On success, return 0 or 1, depending on whether or not it is necessary to
+  * restore the device's BARs subsequently (1 is returned in that case).
++ *
++ * On failure, return a negative error code.  Always return failure if @dev
++ * lacks a Power Management Capability, even if the platform was able to
++ * put the device in D0 via non-PCI means.
+  */
+ int pci_power_up(struct pci_dev *dev)
+ {
+@@ -1242,9 +1246,6 @@ int pci_power_up(struct pci_dev *dev)
+ 		else
+ 			dev->current_state = state;
+ 
+-		if (state == PCI_D0)
+-			return 0;
+-
+ 		return -EIO;
+ 	}
+ 
+@@ -1302,8 +1303,12 @@ static int pci_set_full_power_state(struct pci_dev *dev)
+ 	int ret;
+ 
+ 	ret = pci_power_up(dev);
+-	if (ret < 0)
++	if (ret < 0) {
++		if (dev->current_state == PCI_D0)
++			return 0;
++
+ 		return ret;
++	}
+ 
+ 	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
+ 	dev->current_state = pmcsr & PCI_PM_CTRL_STATE_MASK;
+@@ -4927,7 +4932,6 @@ static int pcie_wait_for_link_status(struct pci_dev *pdev,
+ int pcie_retrain_link(struct pci_dev *pdev, bool use_lt)
+ {
+ 	int rc;
+-	u16 lnkctl;
+ 
+ 	/*
+ 	 * Ensure the updated LNKCTL parameters are used during link
+@@ -4939,17 +4943,14 @@ int pcie_retrain_link(struct pci_dev *pdev, bool use_lt)
+ 	if (rc)
+ 		return rc;
+ 
+-	pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &lnkctl);
+-	lnkctl |= PCI_EXP_LNKCTL_RL;
+-	pcie_capability_write_word(pdev, PCI_EXP_LNKCTL, lnkctl);
++	pcie_capability_set_word(pdev, PCI_EXP_LNKCTL, PCI_EXP_LNKCTL_RL);
+ 	if (pdev->clear_retrain_link) {
+ 		/*
+ 		 * Due to an erratum in some devices the Retrain Link bit
+ 		 * needs to be cleared again manually to allow the link
+ 		 * training to succeed.
+ 		 */
+-		lnkctl &= ~PCI_EXP_LNKCTL_RL;
+-		pcie_capability_write_word(pdev, PCI_EXP_LNKCTL, lnkctl);
++		pcie_capability_clear_word(pdev, PCI_EXP_LNKCTL, PCI_EXP_LNKCTL_RL);
+ 	}
+ 
+ 	return pcie_wait_for_link_status(pdev, use_lt, !use_lt);
+diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
+index 3dafba0b5f411..1bf6300592644 100644
+--- a/drivers/pci/pcie/aspm.c
++++ b/drivers/pci/pcie/aspm.c
+@@ -199,7 +199,7 @@ static void pcie_clkpm_cap_init(struct pcie_link_state *link, int blacklist)
+ static void pcie_aspm_configure_common_clock(struct pcie_link_state *link)
+ {
+ 	int same_clock = 1;
+-	u16 reg16, parent_reg, child_reg[8];
++	u16 reg16, ccc, parent_old_ccc, child_old_ccc[8];
+ 	struct pci_dev *child, *parent = link->pdev;
+ 	struct pci_bus *linkbus = parent->subordinate;
+ 	/*
+@@ -221,6 +221,7 @@ static void pcie_aspm_configure_common_clock(struct pcie_link_state *link)
+ 
+ 	/* Port might be already in common clock mode */
+ 	pcie_capability_read_word(parent, PCI_EXP_LNKCTL, &reg16);
++	parent_old_ccc = reg16 & PCI_EXP_LNKCTL_CCC;
+ 	if (same_clock && (reg16 & PCI_EXP_LNKCTL_CCC)) {
+ 		bool consistent = true;
+ 
+@@ -237,34 +238,29 @@ static void pcie_aspm_configure_common_clock(struct pcie_link_state *link)
+ 		pci_info(parent, "ASPM: current common clock configuration is inconsistent, reconfiguring\n");
+ 	}
+ 
++	ccc = same_clock ? PCI_EXP_LNKCTL_CCC : 0;
+ 	/* Configure downstream component, all functions */
+ 	list_for_each_entry(child, &linkbus->devices, bus_list) {
+ 		pcie_capability_read_word(child, PCI_EXP_LNKCTL, &reg16);
+-		child_reg[PCI_FUNC(child->devfn)] = reg16;
+-		if (same_clock)
+-			reg16 |= PCI_EXP_LNKCTL_CCC;
+-		else
+-			reg16 &= ~PCI_EXP_LNKCTL_CCC;
+-		pcie_capability_write_word(child, PCI_EXP_LNKCTL, reg16);
++		child_old_ccc[PCI_FUNC(child->devfn)] = reg16 & PCI_EXP_LNKCTL_CCC;
++		pcie_capability_clear_and_set_word(child, PCI_EXP_LNKCTL,
++						   PCI_EXP_LNKCTL_CCC, ccc);
+ 	}
+ 
+ 	/* Configure upstream component */
+-	pcie_capability_read_word(parent, PCI_EXP_LNKCTL, &reg16);
+-	parent_reg = reg16;
+-	if (same_clock)
+-		reg16 |= PCI_EXP_LNKCTL_CCC;
+-	else
+-		reg16 &= ~PCI_EXP_LNKCTL_CCC;
+-	pcie_capability_write_word(parent, PCI_EXP_LNKCTL, reg16);
++	pcie_capability_clear_and_set_word(parent, PCI_EXP_LNKCTL,
++					   PCI_EXP_LNKCTL_CCC, ccc);
+ 
+ 	if (pcie_retrain_link(link->pdev, true)) {
+ 
+ 		/* Training failed. Restore common clock configurations */
+ 		pci_err(parent, "ASPM: Could not configure common clock\n");
+ 		list_for_each_entry(child, &linkbus->devices, bus_list)
+-			pcie_capability_write_word(child, PCI_EXP_LNKCTL,
+-					   child_reg[PCI_FUNC(child->devfn)]);
+-		pcie_capability_write_word(parent, PCI_EXP_LNKCTL, parent_reg);
++			pcie_capability_clear_and_set_word(child, PCI_EXP_LNKCTL,
++							   PCI_EXP_LNKCTL_CCC,
++							   child_old_ccc[PCI_FUNC(child->devfn)]);
++		pcie_capability_clear_and_set_word(parent, PCI_EXP_LNKCTL,
++						   PCI_EXP_LNKCTL_CCC, parent_old_ccc);
+ 	}
+ }
+ 
+diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
+index 8bac3ce02609c..24a83cf5ace8c 100644
+--- a/drivers/pci/probe.c
++++ b/drivers/pci/probe.c
+@@ -998,6 +998,7 @@ static int pci_register_host_bridge(struct pci_host_bridge *bridge)
+ 		res = window->res;
+ 		if (!res->flags && !res->start && !res->end) {
+ 			release_resource(res);
++			resource_list_destroy_entry(window);
+ 			continue;
+ 		}
+ 
+@@ -2324,6 +2325,7 @@ struct pci_dev *pci_alloc_dev(struct pci_bus *bus)
+ 		.end = -1,
+ 	};
+ 
++	spin_lock_init(&dev->pcie_cap_lock);
+ #ifdef CONFIG_PCI_MSI
+ 	raw_spin_lock_init(&dev->msi_lock);
+ #endif
+diff --git a/drivers/perf/fsl_imx8_ddr_perf.c b/drivers/perf/fsl_imx8_ddr_perf.c
+index 5222ba1e79d0e..c684aab407f86 100644
+--- a/drivers/perf/fsl_imx8_ddr_perf.c
++++ b/drivers/perf/fsl_imx8_ddr_perf.c
+@@ -101,6 +101,7 @@ struct ddr_pmu {
+ 	const struct fsl_ddr_devtype_data *devtype_data;
+ 	int irq;
+ 	int id;
++	int active_counter;
+ };
+ 
+ static ssize_t ddr_perf_identifier_show(struct device *dev,
+@@ -495,6 +496,10 @@ static void ddr_perf_event_start(struct perf_event *event, int flags)
+ 
+ 	ddr_perf_counter_enable(pmu, event->attr.config, counter, true);
+ 
++	if (!pmu->active_counter++)
++		ddr_perf_counter_enable(pmu, EVENT_CYCLES_ID,
++			EVENT_CYCLES_COUNTER, true);
++
+ 	hwc->state = 0;
+ }
+ 
+@@ -548,6 +553,10 @@ static void ddr_perf_event_stop(struct perf_event *event, int flags)
+ 	ddr_perf_counter_enable(pmu, event->attr.config, counter, false);
+ 	ddr_perf_event_update(event);
+ 
++	if (!--pmu->active_counter)
++		ddr_perf_counter_enable(pmu, EVENT_CYCLES_ID,
++			EVENT_CYCLES_COUNTER, false);
++
+ 	hwc->state |= PERF_HES_STOPPED;
+ }
+ 
+@@ -565,25 +574,10 @@ static void ddr_perf_event_del(struct perf_event *event, int flags)
+ 
+ static void ddr_perf_pmu_enable(struct pmu *pmu)
+ {
+-	struct ddr_pmu *ddr_pmu = to_ddr_pmu(pmu);
+-
+-	/* enable cycle counter if cycle is not active event list */
+-	if (ddr_pmu->events[EVENT_CYCLES_COUNTER] == NULL)
+-		ddr_perf_counter_enable(ddr_pmu,
+-				      EVENT_CYCLES_ID,
+-				      EVENT_CYCLES_COUNTER,
+-				      true);
+ }
+ 
+ static void ddr_perf_pmu_disable(struct pmu *pmu)
+ {
+-	struct ddr_pmu *ddr_pmu = to_ddr_pmu(pmu);
+-
+-	if (ddr_pmu->events[EVENT_CYCLES_COUNTER] == NULL)
+-		ddr_perf_counter_enable(ddr_pmu,
+-				      EVENT_CYCLES_ID,
+-				      EVENT_CYCLES_COUNTER,
+-				      false);
+ }
+ 
+ static int ddr_perf_init(struct ddr_pmu *pmu, void __iomem *base,
+diff --git a/drivers/phy/rockchip/phy-rockchip-inno-hdmi.c b/drivers/phy/rockchip/phy-rockchip-inno-hdmi.c
+index 1e1563f5fffc4..fbdc23953b52e 100644
+--- a/drivers/phy/rockchip/phy-rockchip-inno-hdmi.c
++++ b/drivers/phy/rockchip/phy-rockchip-inno-hdmi.c
+@@ -745,10 +745,12 @@ unsigned long inno_hdmi_phy_rk3328_clk_recalc_rate(struct clk_hw *hw,
+ 		do_div(vco, (nd * (no_a == 1 ? no_b : no_a) * no_d * 2));
+ 	}
+ 
+-	inno->pixclock = vco;
+-	dev_dbg(inno->dev, "%s rate %lu\n", __func__, inno->pixclock);
++	inno->pixclock = DIV_ROUND_CLOSEST((unsigned long)vco, 1000) * 1000;
+ 
+-	return vco;
++	dev_dbg(inno->dev, "%s rate %lu vco %llu\n",
++		__func__, inno->pixclock, vco);
++
++	return inno->pixclock;
+ }
+ 
+ static long inno_hdmi_phy_rk3328_clk_round_rate(struct clk_hw *hw,
+@@ -790,8 +792,8 @@ static int inno_hdmi_phy_rk3328_clk_set_rate(struct clk_hw *hw,
+ 			 RK3328_PRE_PLL_POWER_DOWN);
+ 
+ 	/* Configure pre-pll */
+-	inno_update_bits(inno, 0xa0, RK3228_PCLK_VCO_DIV_5_MASK,
+-			 RK3228_PCLK_VCO_DIV_5(cfg->vco_div_5_en));
++	inno_update_bits(inno, 0xa0, RK3328_PCLK_VCO_DIV_5_MASK,
++			 RK3328_PCLK_VCO_DIV_5(cfg->vco_div_5_en));
+ 	inno_write(inno, 0xa1, RK3328_PRE_PLL_PRE_DIV(cfg->prediv));
+ 
+ 	val = RK3328_SPREAD_SPECTRUM_MOD_DISABLE;
+@@ -1021,9 +1023,10 @@ inno_hdmi_phy_rk3328_power_on(struct inno_hdmi_phy *inno,
+ 
+ 	inno_write(inno, 0xac, RK3328_POST_PLL_FB_DIV_7_0(cfg->fbdiv));
+ 	if (cfg->postdiv == 1) {
+-		inno_write(inno, 0xaa, RK3328_POST_PLL_REFCLK_SEL_TMDS);
+ 		inno_write(inno, 0xab, RK3328_POST_PLL_FB_DIV_8(cfg->fbdiv) |
+ 			   RK3328_POST_PLL_PRE_DIV(cfg->prediv));
++		inno_write(inno, 0xaa, RK3328_POST_PLL_REFCLK_SEL_TMDS |
++			   RK3328_POST_PLL_POWER_DOWN);
+ 	} else {
+ 		v = (cfg->postdiv / 2) - 1;
+ 		v &= RK3328_POST_PLL_POST_DIV_MASK;
+@@ -1031,7 +1034,8 @@ inno_hdmi_phy_rk3328_power_on(struct inno_hdmi_phy *inno,
+ 		inno_write(inno, 0xab, RK3328_POST_PLL_FB_DIV_8(cfg->fbdiv) |
+ 			   RK3328_POST_PLL_PRE_DIV(cfg->prediv));
+ 		inno_write(inno, 0xaa, RK3328_POST_PLL_POST_DIV_ENABLE |
+-			   RK3328_POST_PLL_REFCLK_SEL_TMDS);
++			   RK3328_POST_PLL_REFCLK_SEL_TMDS |
++			   RK3328_POST_PLL_POWER_DOWN);
+ 	}
+ 
+ 	for (v = 0; v < 14; v++)
+diff --git a/drivers/pinctrl/mediatek/pinctrl-mt7981.c b/drivers/pinctrl/mediatek/pinctrl-mt7981.c
+index 18abc57800111..0fd2c0c451f95 100644
+--- a/drivers/pinctrl/mediatek/pinctrl-mt7981.c
++++ b/drivers/pinctrl/mediatek/pinctrl-mt7981.c
+@@ -457,37 +457,15 @@ static const unsigned int mt7981_pull_type[] = {
+ 	MTK_PULL_PUPD_R1R0_TYPE,/*34*/ MTK_PULL_PUPD_R1R0_TYPE,/*35*/
+ 	MTK_PULL_PUPD_R1R0_TYPE,/*36*/ MTK_PULL_PUPD_R1R0_TYPE,/*37*/
+ 	MTK_PULL_PUPD_R1R0_TYPE,/*38*/ MTK_PULL_PUPD_R1R0_TYPE,/*39*/
+-	MTK_PULL_PUPD_R1R0_TYPE,/*40*/ MTK_PULL_PUPD_R1R0_TYPE,/*41*/
+-	MTK_PULL_PUPD_R1R0_TYPE,/*42*/ MTK_PULL_PUPD_R1R0_TYPE,/*43*/
+-	MTK_PULL_PUPD_R1R0_TYPE,/*44*/ MTK_PULL_PUPD_R1R0_TYPE,/*45*/
+-	MTK_PULL_PUPD_R1R0_TYPE,/*46*/ MTK_PULL_PUPD_R1R0_TYPE,/*47*/
+-	MTK_PULL_PUPD_R1R0_TYPE,/*48*/ MTK_PULL_PUPD_R1R0_TYPE,/*49*/
+-	MTK_PULL_PUPD_R1R0_TYPE,/*50*/ MTK_PULL_PUPD_R1R0_TYPE,/*51*/
+-	MTK_PULL_PUPD_R1R0_TYPE,/*52*/ MTK_PULL_PUPD_R1R0_TYPE,/*53*/
+-	MTK_PULL_PUPD_R1R0_TYPE,/*54*/ MTK_PULL_PUPD_R1R0_TYPE,/*55*/
+-	MTK_PULL_PUPD_R1R0_TYPE,/*56*/ MTK_PULL_PUPD_R1R0_TYPE,/*57*/
+-	MTK_PULL_PUPD_R1R0_TYPE,/*58*/ MTK_PULL_PUPD_R1R0_TYPE,/*59*/
+-	MTK_PULL_PUPD_R1R0_TYPE,/*60*/ MTK_PULL_PUPD_R1R0_TYPE,/*61*/
+-	MTK_PULL_PUPD_R1R0_TYPE,/*62*/ MTK_PULL_PUPD_R1R0_TYPE,/*63*/
+-	MTK_PULL_PUPD_R1R0_TYPE,/*64*/ MTK_PULL_PUPD_R1R0_TYPE,/*65*/
+-	MTK_PULL_PUPD_R1R0_TYPE,/*66*/ MTK_PULL_PUPD_R1R0_TYPE,/*67*/
+-	MTK_PULL_PUPD_R1R0_TYPE,/*68*/ MTK_PULL_PU_PD_TYPE,/*69*/
+-	MTK_PULL_PU_PD_TYPE,/*70*/ MTK_PULL_PU_PD_TYPE,/*71*/
+-	MTK_PULL_PU_PD_TYPE,/*72*/ MTK_PULL_PU_PD_TYPE,/*73*/
+-	MTK_PULL_PU_PD_TYPE,/*74*/ MTK_PULL_PU_PD_TYPE,/*75*/
+-	MTK_PULL_PU_PD_TYPE,/*76*/ MTK_PULL_PU_PD_TYPE,/*77*/
+-	MTK_PULL_PU_PD_TYPE,/*78*/ MTK_PULL_PU_PD_TYPE,/*79*/
+-	MTK_PULL_PU_PD_TYPE,/*80*/ MTK_PULL_PU_PD_TYPE,/*81*/
+-	MTK_PULL_PU_PD_TYPE,/*82*/ MTK_PULL_PU_PD_TYPE,/*83*/
+-	MTK_PULL_PU_PD_TYPE,/*84*/ MTK_PULL_PU_PD_TYPE,/*85*/
+-	MTK_PULL_PU_PD_TYPE,/*86*/ MTK_PULL_PU_PD_TYPE,/*87*/
+-	MTK_PULL_PU_PD_TYPE,/*88*/ MTK_PULL_PU_PD_TYPE,/*89*/
+-	MTK_PULL_PU_PD_TYPE,/*90*/ MTK_PULL_PU_PD_TYPE,/*91*/
+-	MTK_PULL_PU_PD_TYPE,/*92*/ MTK_PULL_PU_PD_TYPE,/*93*/
+-	MTK_PULL_PU_PD_TYPE,/*94*/ MTK_PULL_PU_PD_TYPE,/*95*/
+-	MTK_PULL_PU_PD_TYPE,/*96*/ MTK_PULL_PU_PD_TYPE,/*97*/
+-	MTK_PULL_PU_PD_TYPE,/*98*/ MTK_PULL_PU_PD_TYPE,/*99*/
+-	MTK_PULL_PU_PD_TYPE,/*100*/
++	MTK_PULL_PU_PD_TYPE,/*40*/ MTK_PULL_PU_PD_TYPE,/*41*/
++	MTK_PULL_PU_PD_TYPE,/*42*/ MTK_PULL_PU_PD_TYPE,/*43*/
++	MTK_PULL_PU_PD_TYPE,/*44*/ MTK_PULL_PU_PD_TYPE,/*45*/
++	MTK_PULL_PU_PD_TYPE,/*46*/ MTK_PULL_PU_PD_TYPE,/*47*/
++	MTK_PULL_PU_PD_TYPE,/*48*/ MTK_PULL_PU_PD_TYPE,/*49*/
++	MTK_PULL_PU_PD_TYPE,/*50*/ MTK_PULL_PU_PD_TYPE,/*51*/
++	MTK_PULL_PU_PD_TYPE,/*52*/ MTK_PULL_PU_PD_TYPE,/*53*/
++	MTK_PULL_PU_PD_TYPE,/*54*/ MTK_PULL_PU_PD_TYPE,/*55*/
++	MTK_PULL_PU_PD_TYPE,/*56*/
+ };
+ 
+ static const struct mtk_pin_reg_calc mt7981_reg_cals[] = {
+@@ -1014,6 +992,10 @@ static struct mtk_pin_soc mt7981_data = {
+ 	.ies_present = false,
+ 	.base_names = mt7981_pinctrl_register_base_names,
+ 	.nbase_names = ARRAY_SIZE(mt7981_pinctrl_register_base_names),
++	.bias_disable_set = mtk_pinconf_bias_disable_set,
++	.bias_disable_get = mtk_pinconf_bias_disable_get,
++	.bias_set = mtk_pinconf_bias_set,
++	.bias_get = mtk_pinconf_bias_get,
+ 	.pull_type = mt7981_pull_type,
+ 	.bias_set_combo = mtk_pinconf_bias_set_combo,
+ 	.bias_get_combo = mtk_pinconf_bias_get_combo,
+diff --git a/drivers/pinctrl/mediatek/pinctrl-mt7986.c b/drivers/pinctrl/mediatek/pinctrl-mt7986.c
+index aa0ccd67f4f4e..acaac9b38aa8a 100644
+--- a/drivers/pinctrl/mediatek/pinctrl-mt7986.c
++++ b/drivers/pinctrl/mediatek/pinctrl-mt7986.c
+@@ -922,6 +922,10 @@ static struct mtk_pin_soc mt7986a_data = {
+ 	.ies_present = false,
+ 	.base_names = mt7986_pinctrl_register_base_names,
+ 	.nbase_names = ARRAY_SIZE(mt7986_pinctrl_register_base_names),
++	.bias_disable_set = mtk_pinconf_bias_disable_set,
++	.bias_disable_get = mtk_pinconf_bias_disable_get,
++	.bias_set = mtk_pinconf_bias_set,
++	.bias_get = mtk_pinconf_bias_get,
+ 	.pull_type = mt7986_pull_type,
+ 	.bias_set_combo = mtk_pinconf_bias_set_combo,
+ 	.bias_get_combo = mtk_pinconf_bias_get_combo,
+@@ -944,6 +948,10 @@ static struct mtk_pin_soc mt7986b_data = {
+ 	.ies_present = false,
+ 	.base_names = mt7986_pinctrl_register_base_names,
+ 	.nbase_names = ARRAY_SIZE(mt7986_pinctrl_register_base_names),
++	.bias_disable_set = mtk_pinconf_bias_disable_set,
++	.bias_disable_get = mtk_pinconf_bias_disable_get,
++	.bias_set = mtk_pinconf_bias_set,
++	.bias_get = mtk_pinconf_bias_get,
+ 	.pull_type = mt7986_pull_type,
+ 	.bias_set_combo = mtk_pinconf_bias_set_combo,
+ 	.bias_get_combo = mtk_pinconf_bias_get_combo,
+diff --git a/drivers/pinctrl/pinctrl-mcp23s08_spi.c b/drivers/pinctrl/pinctrl-mcp23s08_spi.c
+index 9ae10318f6f35..ea059b9c5542e 100644
+--- a/drivers/pinctrl/pinctrl-mcp23s08_spi.c
++++ b/drivers/pinctrl/pinctrl-mcp23s08_spi.c
+@@ -91,18 +91,28 @@ static int mcp23s08_spi_regmap_init(struct mcp23s08 *mcp, struct device *dev,
+ 		mcp->reg_shift = 0;
+ 		mcp->chip.ngpio = 8;
+ 		mcp->chip.label = devm_kasprintf(dev, GFP_KERNEL, "mcp23s08.%d", addr);
++		if (!mcp->chip.label)
++			return -ENOMEM;
+ 
+ 		config = &mcp23x08_regmap;
+ 		name = devm_kasprintf(dev, GFP_KERNEL, "%d", addr);
++		if (!name)
++			return -ENOMEM;
++
+ 		break;
+ 
+ 	case MCP_TYPE_S17:
+ 		mcp->reg_shift = 1;
+ 		mcp->chip.ngpio = 16;
+ 		mcp->chip.label = devm_kasprintf(dev, GFP_KERNEL, "mcp23s17.%d", addr);
++		if (!mcp->chip.label)
++			return -ENOMEM;
+ 
+ 		config = &mcp23x17_regmap;
+ 		name = devm_kasprintf(dev, GFP_KERNEL, "%d", addr);
++		if (!name)
++			return -ENOMEM;
++
+ 		break;
+ 
+ 	case MCP_TYPE_S18:
+diff --git a/drivers/platform/chrome/chromeos_acpi.c b/drivers/platform/chrome/chromeos_acpi.c
+index 50d8a4d4352d6..1312aaaa8750b 100644
+--- a/drivers/platform/chrome/chromeos_acpi.c
++++ b/drivers/platform/chrome/chromeos_acpi.c
+@@ -90,7 +90,36 @@ static int chromeos_acpi_handle_package(struct device *dev, union acpi_object *o
+ 	case ACPI_TYPE_STRING:
+ 		return sysfs_emit(buf, "%s\n", element->string.pointer);
+ 	case ACPI_TYPE_BUFFER:
+-		return sysfs_emit(buf, "%s\n", element->buffer.pointer);
++		{
++			int i, r, at, room_left;
++			const int byte_per_line = 16;
++
++			at = 0;
++			room_left = PAGE_SIZE - 1;
++			for (i = 0; i < element->buffer.length && room_left; i += byte_per_line) {
++				r = hex_dump_to_buffer(element->buffer.pointer + i,
++						       element->buffer.length - i,
++						       byte_per_line, 1, buf + at, room_left,
++						       false);
++				if (r > room_left)
++					goto truncating;
++				at += r;
++				room_left -= r;
++
++				r = sysfs_emit_at(buf, at, "\n");
++				if (!r)
++					goto truncating;
++				at += r;
++				room_left -= r;
++			}
++
++			buf[at] = 0;
++			return at;
++truncating:
++			dev_info_once(dev, "truncating sysfs content for %s\n", name);
++			sysfs_emit_at(buf, PAGE_SIZE - 4, "..\n");
++			return PAGE_SIZE - 1;
++		}
+ 	default:
+ 		dev_err(dev, "element type %d not supported\n", element->type);
+ 		return -EINVAL;
+diff --git a/drivers/platform/x86/amd/pmf/core.c b/drivers/platform/x86/amd/pmf/core.c
+index 57bf1a9f0e766..78ed3ee22555d 100644
+--- a/drivers/platform/x86/amd/pmf/core.c
++++ b/drivers/platform/x86/amd/pmf/core.c
+@@ -324,7 +324,8 @@ static void amd_pmf_init_features(struct amd_pmf_dev *dev)
+ 
+ static void amd_pmf_deinit_features(struct amd_pmf_dev *dev)
+ {
+-	if (is_apmf_func_supported(dev, APMF_FUNC_STATIC_SLIDER_GRANULAR)) {
++	if (is_apmf_func_supported(dev, APMF_FUNC_STATIC_SLIDER_GRANULAR) ||
++	    is_apmf_func_supported(dev, APMF_FUNC_OS_POWER_SLIDER_UPDATE)) {
+ 		power_supply_unreg_notifier(&dev->pwr_src_notifier);
+ 		amd_pmf_deinit_sps(dev);
+ 	}
+diff --git a/drivers/platform/x86/dell/dell-wmi-sysman/sysman.c b/drivers/platform/x86/dell/dell-wmi-sysman/sysman.c
+index b68dd11cb8924..b929b4f824205 100644
+--- a/drivers/platform/x86/dell/dell-wmi-sysman/sysman.c
++++ b/drivers/platform/x86/dell/dell-wmi-sysman/sysman.c
+@@ -393,6 +393,7 @@ static int init_bios_attributes(int attr_type, const char *guid)
+ 	struct kobject *attr_name_kobj; //individual attribute names
+ 	union acpi_object *obj = NULL;
+ 	union acpi_object *elements;
++	struct kobject *duplicate;
+ 	struct kset *tmp_set;
+ 	int min_elements;
+ 
+@@ -451,9 +452,11 @@ static int init_bios_attributes(int attr_type, const char *guid)
+ 		else
+ 			tmp_set = wmi_priv.main_dir_kset;
+ 
+-		if (kset_find_obj(tmp_set, elements[ATTR_NAME].string.pointer)) {
+-			pr_debug("duplicate attribute name found - %s\n",
+-				elements[ATTR_NAME].string.pointer);
++		duplicate = kset_find_obj(tmp_set, elements[ATTR_NAME].string.pointer);
++		if (duplicate) {
++			pr_debug("Duplicate attribute name found - %s\n",
++				 elements[ATTR_NAME].string.pointer);
++			kobject_put(duplicate);
+ 			goto nextobj;
+ 		}
+ 
+diff --git a/drivers/power/supply/qcom_pmi8998_charger.c b/drivers/power/supply/qcom_pmi8998_charger.c
+index d16c5ee172496..cac89d233c388 100644
+--- a/drivers/power/supply/qcom_pmi8998_charger.c
++++ b/drivers/power/supply/qcom_pmi8998_charger.c
+@@ -556,7 +556,8 @@ static int smb2_set_current_limit(struct smb2_chip *chip, unsigned int val)
+ static void smb2_status_change_work(struct work_struct *work)
+ {
+ 	unsigned int charger_type, current_ua;
+-	int usb_online, count, rc;
++	int usb_online = 0;
++	int count, rc;
+ 	struct smb2_chip *chip;
+ 
+ 	chip = container_of(work, struct smb2_chip, status_change_work.work);
+diff --git a/drivers/powercap/arm_scmi_powercap.c b/drivers/powercap/arm_scmi_powercap.c
+index 5231f6d52ae3a..a081f177e702e 100644
+--- a/drivers/powercap/arm_scmi_powercap.c
++++ b/drivers/powercap/arm_scmi_powercap.c
+@@ -12,6 +12,7 @@
+ #include <linux/module.h>
+ #include <linux/powercap.h>
+ #include <linux/scmi_protocol.h>
++#include <linux/slab.h>
+ 
+ #define to_scmi_powercap_zone(z)		\
+ 	container_of(z, struct scmi_powercap_zone, zone)
+@@ -19,6 +20,8 @@
+ static const struct scmi_powercap_proto_ops *powercap_ops;
+ 
+ struct scmi_powercap_zone {
++	bool registered;
++	bool invalid;
+ 	unsigned int height;
+ 	struct device *dev;
+ 	struct scmi_protocol_handle *ph;
+@@ -32,6 +35,7 @@ struct scmi_powercap_root {
+ 	unsigned int num_zones;
+ 	struct scmi_powercap_zone *spzones;
+ 	struct list_head *registered_zones;
++	struct list_head scmi_zones;
+ };
+ 
+ static struct powercap_control_type *scmi_top_pcntrl;
+@@ -271,12 +275,6 @@ static void scmi_powercap_unregister_all_zones(struct scmi_powercap_root *pr)
+ 	}
+ }
+ 
+-static inline bool
+-scmi_powercap_is_zone_registered(struct scmi_powercap_zone *spz)
+-{
+-	return !list_empty(&spz->node);
+-}
+-
+ static inline unsigned int
+ scmi_powercap_get_zone_height(struct scmi_powercap_zone *spz)
+ {
+@@ -295,11 +293,46 @@ scmi_powercap_get_parent_zone(struct scmi_powercap_zone *spz)
+ 	return &spz->spzones[spz->info->parent_id];
+ }
+ 
++static int scmi_powercap_register_zone(struct scmi_powercap_root *pr,
++				       struct scmi_powercap_zone *spz,
++				       struct scmi_powercap_zone *parent)
++{
++	int ret = 0;
++	struct powercap_zone *z;
++
++	if (spz->invalid) {
++		list_del(&spz->node);
++		return -EINVAL;
++	}
++
++	z = powercap_register_zone(&spz->zone, scmi_top_pcntrl, spz->info->name,
++				   parent ? &parent->zone : NULL,
++				   &zone_ops, 1, &constraint_ops);
++	if (!IS_ERR(z)) {
++		spz->height = scmi_powercap_get_zone_height(spz);
++		spz->registered = true;
++		list_move(&spz->node, &pr->registered_zones[spz->height]);
++		dev_dbg(spz->dev, "Registered node %s - parent %s - height:%d\n",
++			spz->info->name, parent ? parent->info->name : "ROOT",
++			spz->height);
++	} else {
++		list_del(&spz->node);
++		ret = PTR_ERR(z);
++		dev_err(spz->dev,
++			"Error registering node:%s - parent:%s - h:%d - ret:%d\n",
++			spz->info->name,
++			parent ? parent->info->name : "ROOT",
++			spz->height, ret);
++	}
++
++	return ret;
++}
++
+ /**
+- * scmi_powercap_register_zone  - Register an SCMI powercap zone recursively
++ * scmi_zones_register- Register SCMI powercap zones starting from parent zones
+  *
++ * @dev: A reference to the SCMI device
+  * @pr: A reference to the root powercap zones descriptors
+- * @spz: A reference to the SCMI powercap zone to register
+  *
+  * When registering SCMI powercap zones with the powercap framework we should
+  * take care to always register zones starting from the root ones and to
+@@ -309,10 +342,10 @@ scmi_powercap_get_parent_zone(struct scmi_powercap_zone *spz)
+  * zones provided by the SCMI platform firmware is built to comply with such
+  * requirement.
+  *
+- * This function, given an SCMI powercap zone to register, takes care to walk
+- * the SCMI powercap zones tree up to the root looking recursively for
+- * unregistered parent zones before registering the provided zone; at the same
+- * time each registered zone height in such a tree is accounted for and each
++ * This function, given the set of SCMI powercap zones to register, takes care
++ * to walk the SCMI powercap zones trees up to the root registering any
++ * unregistered parent zone before registering the child zones; at the same
++ * time each registered-zone height in such a tree is accounted for and each
+  * zone, once registered, is stored in the @registered_zones array that is
+  * indexed by zone height: this way will be trivial, at unregister time, to walk
+  * the @registered_zones array backward and unregister all the zones starting
+@@ -330,57 +363,55 @@ scmi_powercap_get_parent_zone(struct scmi_powercap_zone *spz)
+  *
+  * Return: 0 on Success
+  */
+-static int scmi_powercap_register_zone(struct scmi_powercap_root *pr,
+-				       struct scmi_powercap_zone *spz)
++static int scmi_zones_register(struct device *dev,
++			       struct scmi_powercap_root *pr)
+ {
+ 	int ret = 0;
+-	struct scmi_powercap_zone *parent;
+-
+-	if (!spz->info)
+-		return ret;
++	unsigned int sp = 0, reg_zones = 0;
++	struct scmi_powercap_zone *spz, **zones_stack;
+ 
+-	parent = scmi_powercap_get_parent_zone(spz);
+-	if (parent && !scmi_powercap_is_zone_registered(parent)) {
+-		/*
+-		 * Bail out if a parent domain was marked as unsupported:
+-		 * only domains participating as leaves can be skipped.
+-		 */
+-		if (!parent->info)
+-			return -ENODEV;
++	zones_stack = kcalloc(pr->num_zones, sizeof(spz), GFP_KERNEL);
++	if (!zones_stack)
++		return -ENOMEM;
+ 
+-		ret = scmi_powercap_register_zone(pr, parent);
+-		if (ret)
+-			return ret;
+-	}
++	spz = list_first_entry_or_null(&pr->scmi_zones,
++				       struct scmi_powercap_zone, node);
++	while (spz) {
++		struct scmi_powercap_zone *parent;
+ 
+-	if (!scmi_powercap_is_zone_registered(spz)) {
+-		struct powercap_zone *z;
+-
+-		z = powercap_register_zone(&spz->zone,
+-					   scmi_top_pcntrl,
+-					   spz->info->name,
+-					   parent ? &parent->zone : NULL,
+-					   &zone_ops, 1, &constraint_ops);
+-		if (!IS_ERR(z)) {
+-			spz->height = scmi_powercap_get_zone_height(spz);
+-			list_add(&spz->node,
+-				 &pr->registered_zones[spz->height]);
+-			dev_dbg(spz->dev,
+-				"Registered node %s - parent %s - height:%d\n",
+-				spz->info->name,
+-				parent ? parent->info->name : "ROOT",
+-				spz->height);
+-			ret = 0;
++		parent = scmi_powercap_get_parent_zone(spz);
++		if (parent && !parent->registered) {
++			zones_stack[sp++] = spz;
++			spz = parent;
+ 		} else {
+-			ret = PTR_ERR(z);
+-			dev_err(spz->dev,
+-				"Error registering node:%s - parent:%s - h:%d - ret:%d\n",
+-				 spz->info->name,
+-				 parent ? parent->info->name : "ROOT",
+-				 spz->height, ret);
++			ret = scmi_powercap_register_zone(pr, spz, parent);
++			if (!ret) {
++				reg_zones++;
++			} else if (sp) {
++				/* Failed to register a non-leaf zone.
++				 * Bail-out.
++				 */
++				dev_err(dev,
++					"Failed to register non-leaf zone - ret:%d\n",
++					ret);
++				scmi_powercap_unregister_all_zones(pr);
++				reg_zones = 0;
++				goto out;
++			}
++			/* Pick next zone to process */
++			if (sp)
++				spz = zones_stack[--sp];
++			else
++				spz = list_first_entry_or_null(&pr->scmi_zones,
++							       struct scmi_powercap_zone,
++							       node);
+ 		}
+ 	}
+ 
++out:
++	kfree(zones_stack);
++	dev_info(dev, "Registered %d SCMI Powercap domains !\n", reg_zones);
++
+ 	return ret;
+ }
+ 
+@@ -424,6 +455,8 @@ static int scmi_powercap_probe(struct scmi_device *sdev)
+ 	if (!pr->registered_zones)
+ 		return -ENOMEM;
+ 
++	INIT_LIST_HEAD(&pr->scmi_zones);
++
+ 	for (i = 0, spz = pr->spzones; i < pr->num_zones; i++, spz++) {
+ 		/*
+ 		 * Powercap domains are validate by the protocol layer, i.e.
+@@ -438,6 +471,7 @@ static int scmi_powercap_probe(struct scmi_device *sdev)
+ 		INIT_LIST_HEAD(&spz->node);
+ 		INIT_LIST_HEAD(&pr->registered_zones[i]);
+ 
++		list_add_tail(&spz->node, &pr->scmi_zones);
+ 		/*
+ 		 * Forcibly skip powercap domains using an abstract scale.
+ 		 * Note that only leaves domains can be skipped, so this could
+@@ -448,7 +482,7 @@ static int scmi_powercap_probe(struct scmi_device *sdev)
+ 			dev_warn(dev,
+ 				 "Abstract power scale not supported. Skip %s.\n",
+ 				 spz->info->name);
+-			spz->info = NULL;
++			spz->invalid = true;
+ 			continue;
+ 		}
+ 	}
+@@ -457,21 +491,12 @@ static int scmi_powercap_probe(struct scmi_device *sdev)
+ 	 * Scan array of retrieved SCMI powercap domains and register them
+ 	 * recursively starting from the root domains.
+ 	 */
+-	for (i = 0, spz = pr->spzones; i < pr->num_zones; i++, spz++) {
+-		ret = scmi_powercap_register_zone(pr, spz);
+-		if (ret) {
+-			dev_err(dev,
+-				"Failed to register powercap zone %s - ret:%d\n",
+-				spz->info->name, ret);
+-			scmi_powercap_unregister_all_zones(pr);
+-			return ret;
+-		}
+-	}
++	ret = scmi_zones_register(dev, pr);
++	if (ret)
++		return ret;
+ 
+ 	dev_set_drvdata(dev, pr);
+ 
+-	dev_info(dev, "Registered %d SCMI Powercap domains !\n", pr->num_zones);
+-
+ 	return ret;
+ }
+ 
+diff --git a/drivers/powercap/intel_rapl_common.c b/drivers/powercap/intel_rapl_common.c
+index 8fac57b28f8a3..e618ed5aa8caa 100644
+--- a/drivers/powercap/intel_rapl_common.c
++++ b/drivers/powercap/intel_rapl_common.c
+@@ -658,8 +658,6 @@ static struct rapl_primitive_info rpi_msr[NR_RAPL_PRIMITIVES] = {
+ 			    RAPL_DOMAIN_REG_LIMIT, ARBITRARY_UNIT, 0),
+ 	[PL2_CLAMP] = PRIMITIVE_INFO_INIT(PL2_CLAMP, POWER_LIMIT2_CLAMP, 48,
+ 			    RAPL_DOMAIN_REG_LIMIT, ARBITRARY_UNIT, 0),
+-	[PL4_ENABLE] = PRIMITIVE_INFO_INIT(PL4_ENABLE, POWER_LIMIT4_MASK, 0,
+-				RAPL_DOMAIN_REG_PL4, ARBITRARY_UNIT, 0),
+ 	[TIME_WINDOW1] = PRIMITIVE_INFO_INIT(TIME_WINDOW1, TIME_WINDOW1_MASK, 17,
+ 			    RAPL_DOMAIN_REG_LIMIT, TIME_UNIT, 0),
+ 	[TIME_WINDOW2] = PRIMITIVE_INFO_INIT(TIME_WINDOW2, TIME_WINDOW2_MASK, 49,
+@@ -1458,7 +1456,7 @@ static void rapl_detect_powerlimit(struct rapl_domain *rd)
+ 			}
+ 		}
+ 
+-		if (rapl_read_pl_data(rd, i, PL_ENABLE, false, &val64))
++		if (rapl_read_pl_data(rd, i, PL_LIMIT, false, &val64))
+ 			rd->rpl[i].name = NULL;
+ 	}
+ }
+diff --git a/drivers/pwm/Kconfig b/drivers/pwm/Kconfig
+index 6210babb0741a..8ebcddf91f7b7 100644
+--- a/drivers/pwm/Kconfig
++++ b/drivers/pwm/Kconfig
+@@ -505,7 +505,7 @@ config PWM_ROCKCHIP
+ 
+ config PWM_RZ_MTU3
+ 	tristate "Renesas RZ/G2L MTU3a PWM Timer support"
+-	depends on RZ_MTU3 || COMPILE_TEST
++	depends on RZ_MTU3
+ 	depends on HAS_IOMEM
+ 	help
+ 	  This driver exposes the MTU3a PWM Timer controller found in Renesas
+diff --git a/drivers/remoteproc/stm32_rproc.c b/drivers/remoteproc/stm32_rproc.c
+index cf073bac79f73..48dca3503fa45 100644
+--- a/drivers/remoteproc/stm32_rproc.c
++++ b/drivers/remoteproc/stm32_rproc.c
+@@ -921,7 +921,7 @@ static void stm32_rproc_remove(struct platform_device *pdev)
+ 	rproc_free(rproc);
+ }
+ 
+-static int __maybe_unused stm32_rproc_suspend(struct device *dev)
++static int stm32_rproc_suspend(struct device *dev)
+ {
+ 	struct rproc *rproc = dev_get_drvdata(dev);
+ 	struct stm32_rproc *ddata = rproc->priv;
+@@ -932,7 +932,7 @@ static int __maybe_unused stm32_rproc_suspend(struct device *dev)
+ 	return 0;
+ }
+ 
+-static int __maybe_unused stm32_rproc_resume(struct device *dev)
++static int stm32_rproc_resume(struct device *dev)
+ {
+ 	struct rproc *rproc = dev_get_drvdata(dev);
+ 	struct stm32_rproc *ddata = rproc->priv;
+@@ -943,16 +943,16 @@ static int __maybe_unused stm32_rproc_resume(struct device *dev)
+ 	return 0;
+ }
+ 
+-static SIMPLE_DEV_PM_OPS(stm32_rproc_pm_ops,
+-			 stm32_rproc_suspend, stm32_rproc_resume);
++static DEFINE_SIMPLE_DEV_PM_OPS(stm32_rproc_pm_ops,
++				stm32_rproc_suspend, stm32_rproc_resume);
+ 
+ static struct platform_driver stm32_rproc_driver = {
+ 	.probe = stm32_rproc_probe,
+ 	.remove_new = stm32_rproc_remove,
+ 	.driver = {
+ 		.name = "stm32-rproc",
+-		.pm = &stm32_rproc_pm_ops,
+-		.of_match_table = of_match_ptr(stm32_rproc_match),
++		.pm = pm_ptr(&stm32_rproc_pm_ops),
++		.of_match_table = stm32_rproc_match,
+ 	},
+ };
+ module_platform_driver(stm32_rproc_driver);
+diff --git a/drivers/rpmsg/qcom_glink_native.c b/drivers/rpmsg/qcom_glink_native.c
+index 1beb40a1d3df2..e4015db99899d 100644
+--- a/drivers/rpmsg/qcom_glink_native.c
++++ b/drivers/rpmsg/qcom_glink_native.c
+@@ -221,6 +221,10 @@ static struct glink_channel *qcom_glink_alloc_channel(struct qcom_glink *glink,
+ 
+ 	channel->glink = glink;
+ 	channel->name = kstrdup(name, GFP_KERNEL);
++	if (!channel->name) {
++		kfree(channel);
++		return ERR_PTR(-ENOMEM);
++	}
+ 
+ 	init_completion(&channel->open_req);
+ 	init_completion(&channel->open_ack);
+diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c
+index 620fab01b710b..c4e36650c4264 100644
+--- a/drivers/s390/block/dasd_devmap.c
++++ b/drivers/s390/block/dasd_devmap.c
+@@ -1378,16 +1378,12 @@ static ssize_t dasd_vendor_show(struct device *dev,
+ 
+ static DEVICE_ATTR(vendor, 0444, dasd_vendor_show, NULL);
+ 
+-#define UID_STRLEN ( /* vendor */ 3 + 1 + /* serial    */ 14 + 1 +\
+-		     /* SSID   */ 4 + 1 + /* unit addr */ 2 + 1 +\
+-		     /* vduit */ 32 + 1)
+-
+ static ssize_t
+ dasd_uid_show(struct device *dev, struct device_attribute *attr, char *buf)
+ {
++	char uid_string[DASD_UID_STRLEN];
+ 	struct dasd_device *device;
+ 	struct dasd_uid uid;
+-	char uid_string[UID_STRLEN];
+ 	char ua_string[3];
+ 
+ 	device = dasd_device_from_cdev(to_ccwdev(dev));
+diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
+index 8587e423169ec..bd89b032968a4 100644
+--- a/drivers/s390/block/dasd_eckd.c
++++ b/drivers/s390/block/dasd_eckd.c
+@@ -1079,12 +1079,12 @@ static void dasd_eckd_get_uid_string(struct dasd_conf *conf,
+ 
+ 	create_uid(conf, &uid);
+ 	if (strlen(uid.vduit) > 0)
+-		snprintf(print_uid, sizeof(*print_uid),
++		snprintf(print_uid, DASD_UID_STRLEN,
+ 			 "%s.%s.%04x.%02x.%s",
+ 			 uid.vendor, uid.serial, uid.ssid,
+ 			 uid.real_unit_addr, uid.vduit);
+ 	else
+-		snprintf(print_uid, sizeof(*print_uid),
++		snprintf(print_uid, DASD_UID_STRLEN,
+ 			 "%s.%s.%04x.%02x",
+ 			 uid.vendor, uid.serial, uid.ssid,
+ 			 uid.real_unit_addr);
+@@ -1093,8 +1093,8 @@ static void dasd_eckd_get_uid_string(struct dasd_conf *conf,
+ static int dasd_eckd_check_cabling(struct dasd_device *device,
+ 				   void *conf_data, __u8 lpm)
+ {
++	char print_path_uid[DASD_UID_STRLEN], print_device_uid[DASD_UID_STRLEN];
+ 	struct dasd_eckd_private *private = device->private;
+-	char print_path_uid[60], print_device_uid[60];
+ 	struct dasd_conf path_conf;
+ 
+ 	path_conf.data = conf_data;
+@@ -1293,9 +1293,9 @@ static void dasd_eckd_path_available_action(struct dasd_device *device,
+ 	__u8 path_rcd_buf[DASD_ECKD_RCD_DATA_SIZE];
+ 	__u8 lpm, opm, npm, ppm, epm, hpfpm, cablepm;
+ 	struct dasd_conf_data *conf_data;
++	char print_uid[DASD_UID_STRLEN];
+ 	struct dasd_conf path_conf;
+ 	unsigned long flags;
+-	char print_uid[60];
+ 	int rc, pos;
+ 
+ 	opm = 0;
+@@ -5855,8 +5855,8 @@ static void dasd_eckd_dump_sense(struct dasd_device *device,
+ static int dasd_eckd_reload_device(struct dasd_device *device)
+ {
+ 	struct dasd_eckd_private *private = device->private;
++	char print_uid[DASD_UID_STRLEN];
+ 	int rc, old_base;
+-	char print_uid[60];
+ 	struct dasd_uid uid;
+ 	unsigned long flags;
+ 
+diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
+index 0aa56351da720..8a4dbe9d77411 100644
+--- a/drivers/s390/block/dasd_int.h
++++ b/drivers/s390/block/dasd_int.h
+@@ -259,6 +259,10 @@ struct dasd_uid {
+ 	char vduit[33];
+ };
+ 
++#define DASD_UID_STRLEN ( /* vendor */ 3 + 1 + /* serial    */ 14 + 1 +	\
++			  /* SSID   */ 4 + 1 + /* unit addr */ 2 + 1 +	\
++			  /* vduit */ 32 + 1)
++
+ /*
+  * PPRC Status data
+  */
+diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
+index 09acf3853a77e..4abd8f9ec2527 100644
+--- a/drivers/s390/block/dcssblk.c
++++ b/drivers/s390/block/dcssblk.c
+@@ -412,6 +412,7 @@ removeseg:
+ 	}
+ 	list_del(&dev_info->lh);
+ 
++	dax_remove_host(dev_info->gd);
+ 	kill_dax(dev_info->dax_dev);
+ 	put_dax(dev_info->dax_dev);
+ 	del_gendisk(dev_info->gd);
+@@ -707,9 +708,9 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char
+ 	goto out;
+ 
+ out_dax_host:
++	put_device(&dev_info->dev);
+ 	dax_remove_host(dev_info->gd);
+ out_dax:
+-	put_device(&dev_info->dev);
+ 	kill_dax(dev_info->dax_dev);
+ 	put_dax(dev_info->dax_dev);
+ put_dev:
+@@ -789,6 +790,7 @@ dcssblk_remove_store(struct device *dev, struct device_attribute *attr, const ch
+ 	}
+ 
+ 	list_del(&dev_info->lh);
++	dax_remove_host(dev_info->gd);
+ 	kill_dax(dev_info->dax_dev);
+ 	put_dax(dev_info->dax_dev);
+ 	del_gendisk(dev_info->gd);
+diff --git a/drivers/s390/crypto/pkey_api.c b/drivers/s390/crypto/pkey_api.c
+index e58bfd2253231..396a159afdf5b 100644
+--- a/drivers/s390/crypto/pkey_api.c
++++ b/drivers/s390/crypto/pkey_api.c
+@@ -272,7 +272,8 @@ static int pkey_clr2ep11key(const u8 *clrkey, size_t clrkeylen,
+ 		card = apqns[i] >> 16;
+ 		dom = apqns[i] & 0xFFFF;
+ 		rc = ep11_clr2keyblob(card, dom, clrkeylen * 8,
+-				      0, clrkey, keybuf, keybuflen);
++				      0, clrkey, keybuf, keybuflen,
++				      PKEY_TYPE_EP11);
+ 		if (rc == 0)
+ 			break;
+ 	}
+@@ -287,10 +288,9 @@ out:
+ /*
+  * Find card and transform EP11 secure key into protected key.
+  */
+-static int pkey_ep11key2pkey(const u8 *key, u8 *protkey,
+-			     u32 *protkeylen, u32 *protkeytype)
++static int pkey_ep11key2pkey(const u8 *key, size_t keylen,
++			     u8 *protkey, u32 *protkeylen, u32 *protkeytype)
+ {
+-	struct ep11keyblob *kb = (struct ep11keyblob *)key;
+ 	u32 nr_apqns, *apqns = NULL;
+ 	u16 card, dom;
+ 	int i, rc;
+@@ -299,7 +299,8 @@ static int pkey_ep11key2pkey(const u8 *key, u8 *protkey,
+ 
+ 	/* build a list of apqns suitable for this key */
+ 	rc = ep11_findcard2(&apqns, &nr_apqns, 0xFFFF, 0xFFFF,
+-			    ZCRYPT_CEX7, EP11_API_V, kb->wkvp);
++			    ZCRYPT_CEX7, EP11_API_V,
++			    ep11_kb_wkvp(key, keylen));
+ 	if (rc)
+ 		goto out;
+ 
+@@ -307,7 +308,7 @@ static int pkey_ep11key2pkey(const u8 *key, u8 *protkey,
+ 	for (rc = -ENODEV, i = 0; i < nr_apqns; i++) {
+ 		card = apqns[i] >> 16;
+ 		dom = apqns[i] & 0xFFFF;
+-		rc = ep11_kblob2protkey(card, dom, key, kb->head.len,
++		rc = ep11_kblob2protkey(card, dom, key, keylen,
+ 					protkey, protkeylen, protkeytype);
+ 		if (rc == 0)
+ 			break;
+@@ -495,7 +496,7 @@ try_via_ep11:
+ 			      tmpbuf, &tmpbuflen);
+ 	if (rc)
+ 		goto failure;
+-	rc = pkey_ep11key2pkey(tmpbuf,
++	rc = pkey_ep11key2pkey(tmpbuf, tmpbuflen,
+ 			       protkey, protkeylen, protkeytype);
+ 	if (!rc)
+ 		goto out;
+@@ -611,7 +612,7 @@ static int pkey_nonccatok2pkey(const u8 *key, u32 keylen,
+ 		rc = ep11_check_aes_key(debug_info, 3, key, keylen, 1);
+ 		if (rc)
+ 			goto out;
+-		rc = pkey_ep11key2pkey(key,
++		rc = pkey_ep11key2pkey(key, keylen,
+ 				       protkey, protkeylen, protkeytype);
+ 		break;
+ 	}
+@@ -620,7 +621,7 @@ static int pkey_nonccatok2pkey(const u8 *key, u32 keylen,
+ 		rc = ep11_check_aes_key_with_hdr(debug_info, 3, key, keylen, 1);
+ 		if (rc)
+ 			goto out;
+-		rc = pkey_ep11key2pkey(key + sizeof(struct ep11kblob_header),
++		rc = pkey_ep11key2pkey(key, keylen,
+ 				       protkey, protkeylen, protkeytype);
+ 		break;
+ 	default:
+@@ -713,6 +714,11 @@ static int pkey_genseckey2(const struct pkey_apqn *apqns, size_t nr_apqns,
+ 		if (*keybufsize < MINEP11AESKEYBLOBSIZE)
+ 			return -EINVAL;
+ 		break;
++	case PKEY_TYPE_EP11_AES:
++		if (*keybufsize < (sizeof(struct ep11kblob_header) +
++				   MINEP11AESKEYBLOBSIZE))
++			return -EINVAL;
++		break;
+ 	default:
+ 		return -EINVAL;
+ 	}
+@@ -729,9 +735,10 @@ static int pkey_genseckey2(const struct pkey_apqn *apqns, size_t nr_apqns,
+ 	for (i = 0, rc = -ENODEV; i < nr_apqns; i++) {
+ 		card = apqns[i].card;
+ 		dom = apqns[i].domain;
+-		if (ktype == PKEY_TYPE_EP11) {
++		if (ktype == PKEY_TYPE_EP11 ||
++		    ktype == PKEY_TYPE_EP11_AES) {
+ 			rc = ep11_genaeskey(card, dom, ksize, kflags,
+-					    keybuf, keybufsize);
++					    keybuf, keybufsize, ktype);
+ 		} else if (ktype == PKEY_TYPE_CCA_DATA) {
+ 			rc = cca_genseckey(card, dom, ksize, keybuf);
+ 			*keybufsize = (rc ? 0 : SECKEYBLOBSIZE);
+@@ -769,6 +776,11 @@ static int pkey_clr2seckey2(const struct pkey_apqn *apqns, size_t nr_apqns,
+ 		if (*keybufsize < MINEP11AESKEYBLOBSIZE)
+ 			return -EINVAL;
+ 		break;
++	case PKEY_TYPE_EP11_AES:
++		if (*keybufsize < (sizeof(struct ep11kblob_header) +
++				   MINEP11AESKEYBLOBSIZE))
++			return -EINVAL;
++		break;
+ 	default:
+ 		return -EINVAL;
+ 	}
+@@ -787,9 +799,11 @@ static int pkey_clr2seckey2(const struct pkey_apqn *apqns, size_t nr_apqns,
+ 	for (i = 0, rc = -ENODEV; i < nr_apqns; i++) {
+ 		card = apqns[i].card;
+ 		dom = apqns[i].domain;
+-		if (ktype == PKEY_TYPE_EP11) {
++		if (ktype == PKEY_TYPE_EP11 ||
++		    ktype == PKEY_TYPE_EP11_AES) {
+ 			rc = ep11_clr2keyblob(card, dom, ksize, kflags,
+-					      clrkey, keybuf, keybufsize);
++					      clrkey, keybuf, keybufsize,
++					      ktype);
+ 		} else if (ktype == PKEY_TYPE_CCA_DATA) {
+ 			rc = cca_clr2seckey(card, dom, ksize,
+ 					    clrkey, keybuf);
+@@ -895,10 +909,11 @@ static int pkey_verifykey2(const u8 *key, size_t keylen,
+ 		if (ktype)
+ 			*ktype = PKEY_TYPE_EP11;
+ 		if (ksize)
+-			*ksize = kb->head.keybitlen;
++			*ksize = kb->head.bitlen;
+ 
+ 		rc = ep11_findcard2(&_apqns, &_nr_apqns, *cardnr, *domain,
+-				    ZCRYPT_CEX7, EP11_API_V, kb->wkvp);
++				    ZCRYPT_CEX7, EP11_API_V,
++				    ep11_kb_wkvp(key, keylen));
+ 		if (rc)
+ 			goto out;
+ 
+@@ -908,6 +923,30 @@ static int pkey_verifykey2(const u8 *key, size_t keylen,
+ 		*cardnr = ((struct pkey_apqn *)_apqns)->card;
+ 		*domain = ((struct pkey_apqn *)_apqns)->domain;
+ 
++	} else if (hdr->type == TOKTYPE_NON_CCA &&
++		   hdr->version == TOKVER_EP11_AES_WITH_HEADER) {
++		struct ep11kblob_header *kh = (struct ep11kblob_header *)key;
++
++		rc = ep11_check_aes_key_with_hdr(debug_info, 3,
++						 key, keylen, 1);
++		if (rc)
++			goto out;
++		if (ktype)
++			*ktype = PKEY_TYPE_EP11_AES;
++		if (ksize)
++			*ksize = kh->bitlen;
++
++		rc = ep11_findcard2(&_apqns, &_nr_apqns, *cardnr, *domain,
++				    ZCRYPT_CEX7, EP11_API_V,
++				    ep11_kb_wkvp(key, keylen));
++		if (rc)
++			goto out;
++
++		if (flags)
++			*flags = PKEY_FLAGS_MATCH_CUR_MKVP;
++
++		*cardnr = ((struct pkey_apqn *)_apqns)->card;
++		*domain = ((struct pkey_apqn *)_apqns)->domain;
+ 	} else {
+ 		rc = -EINVAL;
+ 	}
+@@ -949,10 +988,12 @@ static int pkey_keyblob2pkey2(const struct pkey_apqn *apqns, size_t nr_apqns,
+ 		}
+ 	} else if (hdr->type == TOKTYPE_NON_CCA) {
+ 		if (hdr->version == TOKVER_EP11_AES) {
+-			if (keylen < sizeof(struct ep11keyblob))
+-				return -EINVAL;
+ 			if (ep11_check_aes_key(debug_info, 3, key, keylen, 1))
+ 				return -EINVAL;
++		} else if (hdr->version == TOKVER_EP11_AES_WITH_HEADER) {
++			if (ep11_check_aes_key_with_hdr(debug_info, 3,
++							key, keylen, 1))
++				return -EINVAL;
+ 		} else {
+ 			return pkey_nonccatok2pkey(key, keylen,
+ 						   protkey, protkeylen,
+@@ -980,10 +1021,7 @@ static int pkey_keyblob2pkey2(const struct pkey_apqn *apqns, size_t nr_apqns,
+ 						protkey, protkeylen,
+ 						protkeytype);
+ 		} else {
+-			/* EP11 AES secure key blob */
+-			struct ep11keyblob *kb = (struct ep11keyblob *)key;
+-
+-			rc = ep11_kblob2protkey(card, dom, key, kb->head.len,
++			rc = ep11_kblob2protkey(card, dom, key, keylen,
+ 						protkey, protkeylen,
+ 						protkeytype);
+ 		}
+@@ -1243,12 +1281,14 @@ static int pkey_keyblob2pkey3(const struct pkey_apqn *apqns, size_t nr_apqns,
+ 		     hdr->version == TOKVER_EP11_ECC_WITH_HEADER) &&
+ 		    is_ep11_keyblob(key + sizeof(struct ep11kblob_header)))
+ 			rc = ep11_kblob2protkey(card, dom, key, hdr->len,
+-						protkey, protkeylen, protkeytype);
++						protkey, protkeylen,
++						protkeytype);
+ 		else if (hdr->type == TOKTYPE_NON_CCA &&
+ 			 hdr->version == TOKVER_EP11_AES &&
+ 			 is_ep11_keyblob(key))
+ 			rc = ep11_kblob2protkey(card, dom, key, hdr->len,
+-						protkey, protkeylen, protkeytype);
++						protkey, protkeylen,
++						protkeytype);
+ 		else if (hdr->type == TOKTYPE_CCA_INTERNAL &&
+ 			 hdr->version == TOKVER_CCA_AES)
+ 			rc = cca_sec2protkey(card, dom, key, protkey,
+@@ -1466,7 +1506,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
+ 		apqns = _copy_apqns_from_user(kgs.apqns, kgs.apqn_entries);
+ 		if (IS_ERR(apqns))
+ 			return PTR_ERR(apqns);
+-		kkey = kmalloc(klen, GFP_KERNEL);
++		kkey = kzalloc(klen, GFP_KERNEL);
+ 		if (!kkey) {
+ 			kfree(apqns);
+ 			return -ENOMEM;
+@@ -1508,7 +1548,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
+ 		apqns = _copy_apqns_from_user(kcs.apqns, kcs.apqn_entries);
+ 		if (IS_ERR(apqns))
+ 			return PTR_ERR(apqns);
+-		kkey = kmalloc(klen, GFP_KERNEL);
++		kkey = kzalloc(klen, GFP_KERNEL);
+ 		if (!kkey) {
+ 			kfree(apqns);
+ 			return -ENOMEM;
+@@ -2102,7 +2142,7 @@ static struct attribute_group ccacipher_attr_group = {
+  * (i.e. off != 0 or count < key blob size) -EINVAL is returned.
+  * This function and the sysfs attributes using it provide EP11 key blobs
+  * padded to the upper limit of MAXEP11AESKEYBLOBSIZE which is currently
+- * 320 bytes.
++ * 336 bytes.
+  */
+ static ssize_t pkey_ep11_aes_attr_read(enum pkey_key_size keybits,
+ 				       bool is_xts, char *buf, loff_t off,
+@@ -2130,7 +2170,8 @@ static ssize_t pkey_ep11_aes_attr_read(enum pkey_key_size keybits,
+ 	for (i = 0, rc = -ENODEV; i < nr_apqns; i++) {
+ 		card = apqns[i] >> 16;
+ 		dom = apqns[i] & 0xFFFF;
+-		rc = ep11_genaeskey(card, dom, keybits, 0, buf, &keysize);
++		rc = ep11_genaeskey(card, dom, keybits, 0, buf, &keysize,
++				    PKEY_TYPE_EP11_AES);
+ 		if (rc == 0)
+ 			break;
+ 	}
+@@ -2140,7 +2181,8 @@ static ssize_t pkey_ep11_aes_attr_read(enum pkey_key_size keybits,
+ 	if (is_xts) {
+ 		keysize = MAXEP11AESKEYBLOBSIZE;
+ 		buf += MAXEP11AESKEYBLOBSIZE;
+-		rc = ep11_genaeskey(card, dom, keybits, 0, buf, &keysize);
++		rc = ep11_genaeskey(card, dom, keybits, 0, buf, &keysize,
++				    PKEY_TYPE_EP11_AES);
+ 		if (rc == 0)
+ 			return 2 * MAXEP11AESKEYBLOBSIZE;
+ 	}
+diff --git a/drivers/s390/crypto/zcrypt_ep11misc.c b/drivers/s390/crypto/zcrypt_ep11misc.c
+index 958f5ee47f1b0..669ad6f5d5b07 100644
+--- a/drivers/s390/crypto/zcrypt_ep11misc.c
++++ b/drivers/s390/crypto/zcrypt_ep11misc.c
+@@ -113,6 +113,109 @@ static void __exit card_cache_free(void)
+ 	spin_unlock_bh(&card_list_lock);
+ }
+ 
++static int ep11_kb_split(const u8 *kb, size_t kblen, u32 kbver,
++			 struct ep11kblob_header **kbhdr, size_t *kbhdrsize,
++			 u8 **kbpl, size_t *kbplsize)
++{
++	struct ep11kblob_header *hdr = NULL;
++	size_t hdrsize, plsize = 0;
++	int rc = -EINVAL;
++	u8 *pl = NULL;
++
++	if (kblen < sizeof(struct ep11kblob_header))
++		goto out;
++	hdr = (struct ep11kblob_header *)kb;
++
++	switch (kbver) {
++	case TOKVER_EP11_AES:
++		/* header overlays the payload */
++		hdrsize = 0;
++		break;
++	case TOKVER_EP11_ECC_WITH_HEADER:
++	case TOKVER_EP11_AES_WITH_HEADER:
++		/* payload starts after the header */
++		hdrsize = sizeof(struct ep11kblob_header);
++		break;
++	default:
++		goto out;
++	}
++
++	plsize = kblen - hdrsize;
++	pl = (u8 *)kb + hdrsize;
++
++	if (kbhdr)
++		*kbhdr = hdr;
++	if (kbhdrsize)
++		*kbhdrsize = hdrsize;
++	if (kbpl)
++		*kbpl = pl;
++	if (kbplsize)
++		*kbplsize = plsize;
++
++	rc = 0;
++out:
++	return rc;
++}
++
++static int ep11_kb_decode(const u8 *kb, size_t kblen,
++			  struct ep11kblob_header **kbhdr, size_t *kbhdrsize,
++			  struct ep11keyblob **kbpl, size_t *kbplsize)
++{
++	struct ep11kblob_header *tmph, *hdr = NULL;
++	size_t hdrsize = 0, plsize = 0;
++	struct ep11keyblob *pl = NULL;
++	int rc = -EINVAL;
++	u8 *tmpp;
++
++	if (kblen < sizeof(struct ep11kblob_header))
++		goto out;
++	tmph = (struct ep11kblob_header *)kb;
++
++	if (tmph->type != TOKTYPE_NON_CCA &&
++	    tmph->len > kblen)
++		goto out;
++
++	if (ep11_kb_split(kb, kblen, tmph->version,
++			  &hdr, &hdrsize, &tmpp, &plsize))
++		goto out;
++
++	if (plsize < sizeof(struct ep11keyblob))
++		goto out;
++
++	if (!is_ep11_keyblob(tmpp))
++		goto out;
++
++	pl = (struct ep11keyblob *)tmpp;
++	plsize = hdr->len - hdrsize;
++
++	if (kbhdr)
++		*kbhdr = hdr;
++	if (kbhdrsize)
++		*kbhdrsize = hdrsize;
++	if (kbpl)
++		*kbpl = pl;
++	if (kbplsize)
++		*kbplsize = plsize;
++
++	rc = 0;
++out:
++	return rc;
++}
++
++/*
++ * For valid ep11 keyblobs, returns a reference to the wrappingkey verification
++ * pattern. Otherwise NULL.
++ */
++const u8 *ep11_kb_wkvp(const u8 *keyblob, size_t keybloblen)
++{
++	struct ep11keyblob *kb;
++
++	if (ep11_kb_decode(keyblob, keybloblen, NULL, NULL, &kb, NULL))
++		return NULL;
++	return kb->wkvp;
++}
++EXPORT_SYMBOL(ep11_kb_wkvp);
++
+ /*
+  * Simple check if the key blob is a valid EP11 AES key blob with header.
+  */
+@@ -664,8 +767,9 @@ EXPORT_SYMBOL(ep11_get_domain_info);
+  */
+ #define KEY_ATTR_DEFAULTS 0x00200c00
+ 
+-int ep11_genaeskey(u16 card, u16 domain, u32 keybitsize, u32 keygenflags,
+-		   u8 *keybuf, size_t *keybufsize)
++static int _ep11_genaeskey(u16 card, u16 domain,
++			   u32 keybitsize, u32 keygenflags,
++			   u8 *keybuf, size_t *keybufsize)
+ {
+ 	struct keygen_req_pl {
+ 		struct pl_head head;
+@@ -701,7 +805,6 @@ int ep11_genaeskey(u16 card, u16 domain, u32 keybitsize, u32 keygenflags,
+ 	struct ep11_cprb *req = NULL, *rep = NULL;
+ 	struct ep11_target_dev target;
+ 	struct ep11_urb *urb = NULL;
+-	struct ep11keyblob *kb;
+ 	int api, rc = -ENOMEM;
+ 
+ 	switch (keybitsize) {
+@@ -780,14 +883,9 @@ int ep11_genaeskey(u16 card, u16 domain, u32 keybitsize, u32 keygenflags,
+ 		goto out;
+ 	}
+ 
+-	/* copy key blob and set header values */
++	/* copy key blob */
+ 	memcpy(keybuf, rep_pl->data, rep_pl->data_len);
+ 	*keybufsize = rep_pl->data_len;
+-	kb = (struct ep11keyblob *)keybuf;
+-	kb->head.type = TOKTYPE_NON_CCA;
+-	kb->head.len = rep_pl->data_len;
+-	kb->head.version = TOKVER_EP11_AES;
+-	kb->head.keybitlen = keybitsize;
+ 
+ out:
+ 	kfree(req);
+@@ -795,6 +893,43 @@ out:
+ 	kfree(urb);
+ 	return rc;
+ }
++
++int ep11_genaeskey(u16 card, u16 domain, u32 keybitsize, u32 keygenflags,
++		   u8 *keybuf, size_t *keybufsize, u32 keybufver)
++{
++	struct ep11kblob_header *hdr;
++	size_t hdr_size, pl_size;
++	u8 *pl;
++	int rc;
++
++	switch (keybufver) {
++	case TOKVER_EP11_AES:
++	case TOKVER_EP11_AES_WITH_HEADER:
++		break;
++	default:
++		return -EINVAL;
++	}
++
++	rc = ep11_kb_split(keybuf, *keybufsize, keybufver,
++			   &hdr, &hdr_size, &pl, &pl_size);
++	if (rc)
++		return rc;
++
++	rc = _ep11_genaeskey(card, domain, keybitsize, keygenflags,
++			     pl, &pl_size);
++	if (rc)
++		return rc;
++
++	*keybufsize = hdr_size + pl_size;
++
++	/* update header information */
++	hdr->type = TOKTYPE_NON_CCA;
++	hdr->len = *keybufsize;
++	hdr->version = keybufver;
++	hdr->bitlen = keybitsize;
++
++	return 0;
++}
+ EXPORT_SYMBOL(ep11_genaeskey);
+ 
+ static int ep11_cryptsingle(u16 card, u16 domain,
+@@ -924,12 +1059,12 @@ out:
+ 	return rc;
+ }
+ 
+-static int ep11_unwrapkey(u16 card, u16 domain,
+-			  const u8 *kek, size_t keksize,
+-			  const u8 *enckey, size_t enckeysize,
+-			  u32 mech, const u8 *iv,
+-			  u32 keybitsize, u32 keygenflags,
+-			  u8 *keybuf, size_t *keybufsize)
++static int _ep11_unwrapkey(u16 card, u16 domain,
++			   const u8 *kek, size_t keksize,
++			   const u8 *enckey, size_t enckeysize,
++			   u32 mech, const u8 *iv,
++			   u32 keybitsize, u32 keygenflags,
++			   u8 *keybuf, size_t *keybufsize)
+ {
+ 	struct uw_req_pl {
+ 		struct pl_head head;
+@@ -966,7 +1101,6 @@ static int ep11_unwrapkey(u16 card, u16 domain,
+ 	struct ep11_cprb *req = NULL, *rep = NULL;
+ 	struct ep11_target_dev target;
+ 	struct ep11_urb *urb = NULL;
+-	struct ep11keyblob *kb;
+ 	size_t req_pl_size;
+ 	int api, rc = -ENOMEM;
+ 	u8 *p;
+@@ -1048,14 +1182,9 @@ static int ep11_unwrapkey(u16 card, u16 domain,
+ 		goto out;
+ 	}
+ 
+-	/* copy key blob and set header values */
++	/* copy key blob */
+ 	memcpy(keybuf, rep_pl->data, rep_pl->data_len);
+ 	*keybufsize = rep_pl->data_len;
+-	kb = (struct ep11keyblob *)keybuf;
+-	kb->head.type = TOKTYPE_NON_CCA;
+-	kb->head.len = rep_pl->data_len;
+-	kb->head.version = TOKVER_EP11_AES;
+-	kb->head.keybitlen = keybitsize;
+ 
+ out:
+ 	kfree(req);
+@@ -1064,10 +1193,46 @@ out:
+ 	return rc;
+ }
+ 
+-static int ep11_wrapkey(u16 card, u16 domain,
+-			const u8 *key, size_t keysize,
+-			u32 mech, const u8 *iv,
+-			u8 *databuf, size_t *datasize)
++static int ep11_unwrapkey(u16 card, u16 domain,
++			  const u8 *kek, size_t keksize,
++			  const u8 *enckey, size_t enckeysize,
++			  u32 mech, const u8 *iv,
++			  u32 keybitsize, u32 keygenflags,
++			  u8 *keybuf, size_t *keybufsize,
++			  u8 keybufver)
++{
++	struct ep11kblob_header *hdr;
++	size_t hdr_size, pl_size;
++	u8 *pl;
++	int rc;
++
++	rc = ep11_kb_split(keybuf, *keybufsize, keybufver,
++			   &hdr, &hdr_size, &pl, &pl_size);
++	if (rc)
++		return rc;
++
++	rc = _ep11_unwrapkey(card, domain, kek, keksize, enckey, enckeysize,
++			     mech, iv, keybitsize, keygenflags,
++			     pl, &pl_size);
++	if (rc)
++		return rc;
++
++	*keybufsize = hdr_size + pl_size;
++
++	/* update header information */
++	hdr = (struct ep11kblob_header *)keybuf;
++	hdr->type = TOKTYPE_NON_CCA;
++	hdr->len = *keybufsize;
++	hdr->version = keybufver;
++	hdr->bitlen = keybitsize;
++
++	return 0;
++}
++
++static int _ep11_wrapkey(u16 card, u16 domain,
++			 const u8 *key, size_t keysize,
++			 u32 mech, const u8 *iv,
++			 u8 *databuf, size_t *datasize)
+ {
+ 	struct wk_req_pl {
+ 		struct pl_head head;
+@@ -1097,20 +1262,10 @@ static int ep11_wrapkey(u16 card, u16 domain,
+ 	struct ep11_cprb *req = NULL, *rep = NULL;
+ 	struct ep11_target_dev target;
+ 	struct ep11_urb *urb = NULL;
+-	struct ep11keyblob *kb;
+ 	size_t req_pl_size;
+ 	int api, rc = -ENOMEM;
+-	bool has_header = false;
+ 	u8 *p;
+ 
+-	/* maybe the session field holds a header with key info */
+-	kb = (struct ep11keyblob *)key;
+-	if (kb->head.type == TOKTYPE_NON_CCA &&
+-	    kb->head.version == TOKVER_EP11_AES) {
+-		has_header = true;
+-		keysize = min_t(size_t, kb->head.len, keysize);
+-	}
+-
+ 	/* request cprb and payload */
+ 	req_pl_size = sizeof(struct wk_req_pl) + (iv ? 16 : 0)
+ 		+ ASN1TAGLEN(keysize) + 4;
+@@ -1135,11 +1290,6 @@ static int ep11_wrapkey(u16 card, u16 domain,
+ 	}
+ 	/* key blob */
+ 	p += asn1tag_write(p, 0x04, key, keysize);
+-	/* maybe the key argument needs the head data cleaned out */
+-	if (has_header) {
+-		kb = (struct ep11keyblob *)(p - keysize);
+-		memset(&kb->head, 0, sizeof(kb->head));
+-	}
+ 	/* empty kek tag */
+ 	*p++ = 0x04;
+ 	*p++ = 0;
+@@ -1198,10 +1348,10 @@ out:
+ }
+ 
+ int ep11_clr2keyblob(u16 card, u16 domain, u32 keybitsize, u32 keygenflags,
+-		     const u8 *clrkey, u8 *keybuf, size_t *keybufsize)
++		     const u8 *clrkey, u8 *keybuf, size_t *keybufsize,
++		     u32 keytype)
+ {
+ 	int rc;
+-	struct ep11keyblob *kb;
+ 	u8 encbuf[64], *kek = NULL;
+ 	size_t clrkeylen, keklen, encbuflen = sizeof(encbuf);
+ 
+@@ -1223,17 +1373,15 @@ int ep11_clr2keyblob(u16 card, u16 domain, u32 keybitsize, u32 keygenflags,
+ 	}
+ 
+ 	/* Step 1: generate AES 256 bit random kek key */
+-	rc = ep11_genaeskey(card, domain, 256,
+-			    0x00006c00, /* EN/DECRYPT, WRAP/UNWRAP */
+-			    kek, &keklen);
++	rc = _ep11_genaeskey(card, domain, 256,
++			     0x00006c00, /* EN/DECRYPT, WRAP/UNWRAP */
++			     kek, &keklen);
+ 	if (rc) {
+ 		DEBUG_ERR(
+ 			"%s generate kek key failed, rc=%d\n",
+ 			__func__, rc);
+ 		goto out;
+ 	}
+-	kb = (struct ep11keyblob *)kek;
+-	memset(&kb->head, 0, sizeof(kb->head));
+ 
+ 	/* Step 2: encrypt clear key value with the kek key */
+ 	rc = ep11_cryptsingle(card, domain, 0, 0, def_iv, kek, keklen,
+@@ -1248,7 +1396,7 @@ int ep11_clr2keyblob(u16 card, u16 domain, u32 keybitsize, u32 keygenflags,
+ 	/* Step 3: import the encrypted key value as a new key */
+ 	rc = ep11_unwrapkey(card, domain, kek, keklen,
+ 			    encbuf, encbuflen, 0, def_iv,
+-			    keybitsize, 0, keybuf, keybufsize);
++			    keybitsize, 0, keybuf, keybufsize, keytype);
+ 	if (rc) {
+ 		DEBUG_ERR(
+ 			"%s importing key value as new key failed,, rc=%d\n",
+@@ -1262,11 +1410,12 @@ out:
+ }
+ EXPORT_SYMBOL(ep11_clr2keyblob);
+ 
+-int ep11_kblob2protkey(u16 card, u16 dom, const u8 *keyblob, size_t keybloblen,
++int ep11_kblob2protkey(u16 card, u16 dom,
++		       const u8 *keyblob, size_t keybloblen,
+ 		       u8 *protkey, u32 *protkeylen, u32 *protkeytype)
+ {
+-	int rc = -EIO;
+-	u8 *wkbuf = NULL;
++	struct ep11kblob_header *hdr;
++	struct ep11keyblob *key;
+ 	size_t wkbuflen, keylen;
+ 	struct wk_info {
+ 		u16 version;
+@@ -1277,31 +1426,17 @@ int ep11_kblob2protkey(u16 card, u16 dom, const u8 *keyblob, size_t keybloblen,
+ 		u8  res2[8];
+ 		u8  pkey[];
+ 	} __packed * wki;
+-	const u8 *key;
+-	struct ep11kblob_header *hdr;
++	u8 *wkbuf = NULL;
++	int rc = -EIO;
+ 
+-	/* key with or without header ? */
+-	hdr = (struct ep11kblob_header *)keyblob;
+-	if (hdr->type == TOKTYPE_NON_CCA &&
+-	    (hdr->version == TOKVER_EP11_AES_WITH_HEADER ||
+-	     hdr->version == TOKVER_EP11_ECC_WITH_HEADER) &&
+-	    is_ep11_keyblob(keyblob + sizeof(struct ep11kblob_header))) {
+-		/* EP11 AES or ECC key with header */
+-		key = keyblob + sizeof(struct ep11kblob_header);
+-		keylen = hdr->len - sizeof(struct ep11kblob_header);
+-	} else if (hdr->type == TOKTYPE_NON_CCA &&
+-		   hdr->version == TOKVER_EP11_AES &&
+-		   is_ep11_keyblob(keyblob)) {
+-		/* EP11 AES key (old style) */
+-		key = keyblob;
+-		keylen = hdr->len;
+-	} else if (is_ep11_keyblob(keyblob)) {
+-		/* raw EP11 key blob */
+-		key = keyblob;
+-		keylen = keybloblen;
+-	} else {
++	if (ep11_kb_decode((u8 *)keyblob, keybloblen, &hdr, NULL, &key, &keylen))
+ 		return -EINVAL;
++
++	if (hdr->version == TOKVER_EP11_AES) {
++		/* wipe overlayed header */
++		memset(hdr, 0, sizeof(*hdr));
+ 	}
++	/* !!! hdr is no longer a valid header !!! */
+ 
+ 	/* alloc temp working buffer */
+ 	wkbuflen = (keylen + AES_BLOCK_SIZE) & (~(AES_BLOCK_SIZE - 1));
+@@ -1310,8 +1445,8 @@ int ep11_kblob2protkey(u16 card, u16 dom, const u8 *keyblob, size_t keybloblen,
+ 		return -ENOMEM;
+ 
+ 	/* ep11 secure key -> protected key + info */
+-	rc = ep11_wrapkey(card, dom, key, keylen,
+-			  0, def_iv, wkbuf, &wkbuflen);
++	rc = _ep11_wrapkey(card, dom, (u8 *)key, keylen,
++			   0, def_iv, wkbuf, &wkbuflen);
+ 	if (rc) {
+ 		DEBUG_ERR(
+ 			"%s rewrapping ep11 key to pkey failed, rc=%d\n",
+diff --git a/drivers/s390/crypto/zcrypt_ep11misc.h b/drivers/s390/crypto/zcrypt_ep11misc.h
+index a3eddf51242da..a0de1cccebbe0 100644
+--- a/drivers/s390/crypto/zcrypt_ep11misc.h
++++ b/drivers/s390/crypto/zcrypt_ep11misc.h
+@@ -29,14 +29,7 @@ struct ep11keyblob {
+ 	union {
+ 		u8 session[32];
+ 		/* only used for PKEY_TYPE_EP11: */
+-		struct {
+-			u8  type;      /* 0x00 (TOKTYPE_NON_CCA) */
+-			u8  res0;      /* unused */
+-			u16 len;       /* total length in bytes of this blob */
+-			u8  version;   /* 0x03 (TOKVER_EP11_AES) */
+-			u8  res1;      /* unused */
+-			u16 keybitlen; /* clear key bit len, 0 for unknown */
+-		} head;
++		struct ep11kblob_header head;
+ 	};
+ 	u8  wkvp[16];  /* wrapping key verification pattern */
+ 	u64 attr;      /* boolean key attributes */
+@@ -55,6 +48,12 @@ static inline bool is_ep11_keyblob(const u8 *key)
+ 	return (kb->version == EP11_STRUCT_MAGIC);
+ }
+ 
++/*
++ * For valid ep11 keyblobs, returns a reference to the wrappingkey verification
++ * pattern. Otherwise NULL.
++ */
++const u8 *ep11_kb_wkvp(const u8 *kblob, size_t kbloblen);
++
+ /*
+  * Simple check if the key blob is a valid EP11 AES key blob with header.
+  * If checkcpacfexport is enabled, the key is also checked for the
+@@ -114,13 +113,14 @@ int ep11_get_domain_info(u16 card, u16 domain, struct ep11_domain_info *info);
+  * Generate (random) EP11 AES secure key.
+  */
+ int ep11_genaeskey(u16 card, u16 domain, u32 keybitsize, u32 keygenflags,
+-		   u8 *keybuf, size_t *keybufsize);
++		   u8 *keybuf, size_t *keybufsize, u32 keybufver);
+ 
+ /*
+  * Generate EP11 AES secure key with given clear key value.
+  */
+ int ep11_clr2keyblob(u16 cardnr, u16 domain, u32 keybitsize, u32 keygenflags,
+-		     const u8 *clrkey, u8 *keybuf, size_t *keybufsize);
++		     const u8 *clrkey, u8 *keybuf, size_t *keybufsize,
++		     u32 keytype);
+ 
+ /*
+  * Build a list of ep11 apqns meeting the following constrains:
+diff --git a/drivers/scsi/be2iscsi/be_iscsi.c b/drivers/scsi/be2iscsi/be_iscsi.c
+index 8aeaddc93b167..8d374ae863ba2 100644
+--- a/drivers/scsi/be2iscsi/be_iscsi.c
++++ b/drivers/scsi/be2iscsi/be_iscsi.c
+@@ -450,6 +450,10 @@ int beiscsi_iface_set_param(struct Scsi_Host *shost,
+ 	}
+ 
+ 	nla_for_each_attr(attrib, data, dt_len, rm_len) {
++		/* ignore nla_type as it is never used */
++		if (nla_len(attrib) < sizeof(*iface_param))
++			return -EINVAL;
++
+ 		iface_param = nla_data(attrib);
+ 
+ 		if (iface_param->param_type != ISCSI_NET_PARAM)
+diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c
+index 5c8d1ba3f8f3c..19eee108db021 100644
+--- a/drivers/scsi/fcoe/fcoe_ctlr.c
++++ b/drivers/scsi/fcoe/fcoe_ctlr.c
+@@ -319,16 +319,17 @@ static void fcoe_ctlr_announce(struct fcoe_ctlr *fip)
+ {
+ 	struct fcoe_fcf *sel;
+ 	struct fcoe_fcf *fcf;
++	unsigned long flags;
+ 
+ 	mutex_lock(&fip->ctlr_mutex);
+-	spin_lock_bh(&fip->ctlr_lock);
++	spin_lock_irqsave(&fip->ctlr_lock, flags);
+ 
+ 	kfree_skb(fip->flogi_req);
+ 	fip->flogi_req = NULL;
+ 	list_for_each_entry(fcf, &fip->fcfs, list)
+ 		fcf->flogi_sent = 0;
+ 
+-	spin_unlock_bh(&fip->ctlr_lock);
++	spin_unlock_irqrestore(&fip->ctlr_lock, flags);
+ 	sel = fip->sel_fcf;
+ 
+ 	if (sel && ether_addr_equal(sel->fcf_mac, fip->dest_addr))
+@@ -699,6 +700,7 @@ int fcoe_ctlr_els_send(struct fcoe_ctlr *fip, struct fc_lport *lport,
+ {
+ 	struct fc_frame *fp;
+ 	struct fc_frame_header *fh;
++	unsigned long flags;
+ 	u16 old_xid;
+ 	u8 op;
+ 	u8 mac[ETH_ALEN];
+@@ -732,11 +734,11 @@ int fcoe_ctlr_els_send(struct fcoe_ctlr *fip, struct fc_lport *lport,
+ 		op = FIP_DT_FLOGI;
+ 		if (fip->mode == FIP_MODE_VN2VN)
+ 			break;
+-		spin_lock_bh(&fip->ctlr_lock);
++		spin_lock_irqsave(&fip->ctlr_lock, flags);
+ 		kfree_skb(fip->flogi_req);
+ 		fip->flogi_req = skb;
+ 		fip->flogi_req_send = 1;
+-		spin_unlock_bh(&fip->ctlr_lock);
++		spin_unlock_irqrestore(&fip->ctlr_lock, flags);
+ 		schedule_work(&fip->timer_work);
+ 		return -EINPROGRESS;
+ 	case ELS_FDISC:
+@@ -1705,10 +1707,11 @@ static int fcoe_ctlr_flogi_send_locked(struct fcoe_ctlr *fip)
+ static int fcoe_ctlr_flogi_retry(struct fcoe_ctlr *fip)
+ {
+ 	struct fcoe_fcf *fcf;
++	unsigned long flags;
+ 	int error;
+ 
+ 	mutex_lock(&fip->ctlr_mutex);
+-	spin_lock_bh(&fip->ctlr_lock);
++	spin_lock_irqsave(&fip->ctlr_lock, flags);
+ 	LIBFCOE_FIP_DBG(fip, "re-sending FLOGI - reselect\n");
+ 	fcf = fcoe_ctlr_select(fip);
+ 	if (!fcf || fcf->flogi_sent) {
+@@ -1719,7 +1722,7 @@ static int fcoe_ctlr_flogi_retry(struct fcoe_ctlr *fip)
+ 		fcoe_ctlr_solicit(fip, NULL);
+ 		error = fcoe_ctlr_flogi_send_locked(fip);
+ 	}
+-	spin_unlock_bh(&fip->ctlr_lock);
++	spin_unlock_irqrestore(&fip->ctlr_lock, flags);
+ 	mutex_unlock(&fip->ctlr_mutex);
+ 	return error;
+ }
+@@ -1736,8 +1739,9 @@ static int fcoe_ctlr_flogi_retry(struct fcoe_ctlr *fip)
+ static void fcoe_ctlr_flogi_send(struct fcoe_ctlr *fip)
+ {
+ 	struct fcoe_fcf *fcf;
++	unsigned long flags;
+ 
+-	spin_lock_bh(&fip->ctlr_lock);
++	spin_lock_irqsave(&fip->ctlr_lock, flags);
+ 	fcf = fip->sel_fcf;
+ 	if (!fcf || !fip->flogi_req_send)
+ 		goto unlock;
+@@ -1764,7 +1768,7 @@ static void fcoe_ctlr_flogi_send(struct fcoe_ctlr *fip)
+ 	} else /* XXX */
+ 		LIBFCOE_FIP_DBG(fip, "No FCF selected - defer send\n");
+ unlock:
+-	spin_unlock_bh(&fip->ctlr_lock);
++	spin_unlock_irqrestore(&fip->ctlr_lock, flags);
+ }
+ 
+ /**
+diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
+index 87d8e408ccd1c..404aa7e179cba 100644
+--- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
++++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
+@@ -2026,6 +2026,11 @@ static void slot_err_v2_hw(struct hisi_hba *hisi_hba,
+ 	u16 dma_tx_err_type = le16_to_cpu(err_record->dma_tx_err_type);
+ 	u16 sipc_rx_err_type = le16_to_cpu(err_record->sipc_rx_err_type);
+ 	u32 dma_rx_err_type = le32_to_cpu(err_record->dma_rx_err_type);
++	struct hisi_sas_complete_v2_hdr *complete_queue =
++			hisi_hba->complete_hdr[slot->cmplt_queue];
++	struct hisi_sas_complete_v2_hdr *complete_hdr =
++			&complete_queue[slot->cmplt_queue_slot];
++	u32 dw0 = le32_to_cpu(complete_hdr->dw0);
+ 	int error = -1;
+ 
+ 	if (err_phase == 1) {
+@@ -2310,7 +2315,8 @@ static void slot_err_v2_hw(struct hisi_hba *hisi_hba,
+ 			break;
+ 		}
+ 		}
+-		hisi_sas_sata_done(task, slot);
++		if (dw0 & CMPLT_HDR_RSPNS_XFRD_MSK)
++			hisi_sas_sata_done(task, slot);
+ 	}
+ 		break;
+ 	default:
+@@ -2443,7 +2449,8 @@ static void slot_complete_v2_hw(struct hisi_hba *hisi_hba,
+ 	case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
+ 	{
+ 		ts->stat = SAS_SAM_STAT_GOOD;
+-		hisi_sas_sata_done(task, slot);
++		if (dw0 & CMPLT_HDR_RSPNS_XFRD_MSK)
++			hisi_sas_sata_done(task, slot);
+ 		break;
+ 	}
+ 	default:
+diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+index 20e1607c62828..2f33e6b4a92fb 100644
+--- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
++++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+@@ -2257,7 +2257,8 @@ slot_err_v3_hw(struct hisi_hba *hisi_hba, struct sas_task *task,
+ 			ts->stat = SAS_OPEN_REJECT;
+ 			ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+ 		}
+-		hisi_sas_sata_done(task, slot);
++		if (dw0 & CMPLT_HDR_RSPNS_XFRD_MSK)
++			hisi_sas_sata_done(task, slot);
+ 		break;
+ 	case SAS_PROTOCOL_SMP:
+ 		ts->stat = SAS_SAM_STAT_CHECK_CONDITION;
+@@ -2384,7 +2385,8 @@ static void slot_complete_v3_hw(struct hisi_hba *hisi_hba,
+ 	case SAS_PROTOCOL_STP:
+ 	case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
+ 		ts->stat = SAS_SAM_STAT_GOOD;
+-		hisi_sas_sata_done(task, slot);
++		if (dw0 & CMPLT_HDR_RSPNS_XFRD_MSK)
++			hisi_sas_sata_done(task, slot);
+ 		break;
+ 	default:
+ 		ts->stat = SAS_SAM_STAT_CHECK_CONDITION;
+diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
+index 198edf03f9297..d7f51b84f3c78 100644
+--- a/drivers/scsi/hosts.c
++++ b/drivers/scsi/hosts.c
+@@ -537,7 +537,7 @@ EXPORT_SYMBOL(scsi_host_alloc);
+ static int __scsi_host_match(struct device *dev, const void *data)
+ {
+ 	struct Scsi_Host *p;
+-	const unsigned short *hostnum = data;
++	const unsigned int *hostnum = data;
+ 
+ 	p = class_to_shost(dev);
+ 	return p->host_no == *hostnum;
+@@ -554,7 +554,7 @@ static int __scsi_host_match(struct device *dev, const void *data)
+  *	that scsi_host_get() took. The put_device() below dropped
+  *	the reference from class_find_device().
+  **/
+-struct Scsi_Host *scsi_host_lookup(unsigned short hostnum)
++struct Scsi_Host *scsi_host_lookup(unsigned int hostnum)
+ {
+ 	struct device *cdev;
+ 	struct Scsi_Host *shost = NULL;
+diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
+index 53f5492579cb7..5284584e4cd2b 100644
+--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
++++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
+@@ -138,6 +138,9 @@ _base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc);
+ static void
+ _base_clear_outstanding_commands(struct MPT3SAS_ADAPTER *ioc);
+ 
++static u32
++_base_readl_ext_retry(const volatile void __iomem *addr);
++
+ /**
+  * mpt3sas_base_check_cmd_timeout - Function
+  *		to check timeout and command termination due
+@@ -213,6 +216,20 @@ _base_readl_aero(const volatile void __iomem *addr)
+ 	return ret_val;
+ }
+ 
++static u32
++_base_readl_ext_retry(const volatile void __iomem *addr)
++{
++	u32 i, ret_val;
++
++	for (i = 0 ; i < 30 ; i++) {
++		ret_val = readl(addr);
++		if (ret_val == 0)
++			continue;
++	}
++
++	return ret_val;
++}
++
+ static inline u32
+ _base_readl(const volatile void __iomem *addr)
+ {
+@@ -940,7 +957,7 @@ mpt3sas_halt_firmware(struct MPT3SAS_ADAPTER *ioc)
+ 
+ 	dump_stack();
+ 
+-	doorbell = ioc->base_readl(&ioc->chip->Doorbell);
++	doorbell = ioc->base_readl_ext_retry(&ioc->chip->Doorbell);
+ 	if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
+ 		mpt3sas_print_fault_code(ioc, doorbell &
+ 		    MPI2_DOORBELL_DATA_MASK);
+@@ -6686,7 +6703,7 @@ mpt3sas_base_get_iocstate(struct MPT3SAS_ADAPTER *ioc, int cooked)
+ {
+ 	u32 s, sc;
+ 
+-	s = ioc->base_readl(&ioc->chip->Doorbell);
++	s = ioc->base_readl_ext_retry(&ioc->chip->Doorbell);
+ 	sc = s & MPI2_IOC_STATE_MASK;
+ 	return cooked ? sc : s;
+ }
+@@ -6831,7 +6848,7 @@ _base_wait_for_doorbell_ack(struct MPT3SAS_ADAPTER *ioc, int timeout)
+ 					   __func__, count, timeout));
+ 			return 0;
+ 		} else if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
+-			doorbell = ioc->base_readl(&ioc->chip->Doorbell);
++			doorbell = ioc->base_readl_ext_retry(&ioc->chip->Doorbell);
+ 			if ((doorbell & MPI2_IOC_STATE_MASK) ==
+ 			    MPI2_IOC_STATE_FAULT) {
+ 				mpt3sas_print_fault_code(ioc, doorbell);
+@@ -6871,7 +6888,7 @@ _base_wait_for_doorbell_not_used(struct MPT3SAS_ADAPTER *ioc, int timeout)
+ 	count = 0;
+ 	cntdn = 1000 * timeout;
+ 	do {
+-		doorbell_reg = ioc->base_readl(&ioc->chip->Doorbell);
++		doorbell_reg = ioc->base_readl_ext_retry(&ioc->chip->Doorbell);
+ 		if (!(doorbell_reg & MPI2_DOORBELL_USED)) {
+ 			dhsprintk(ioc,
+ 				  ioc_info(ioc, "%s: successful count(%d), timeout(%d)\n",
+@@ -7019,7 +7036,7 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
+ 	__le32 *mfp;
+ 
+ 	/* make sure doorbell is not in use */
+-	if ((ioc->base_readl(&ioc->chip->Doorbell) & MPI2_DOORBELL_USED)) {
++	if ((ioc->base_readl_ext_retry(&ioc->chip->Doorbell) & MPI2_DOORBELL_USED)) {
+ 		ioc_err(ioc, "doorbell is in use (line=%d)\n", __LINE__);
+ 		return -EFAULT;
+ 	}
+@@ -7068,7 +7085,7 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
+ 	}
+ 
+ 	/* read the first two 16-bits, it gives the total length of the reply */
+-	reply[0] = le16_to_cpu(ioc->base_readl(&ioc->chip->Doorbell)
++	reply[0] = le16_to_cpu(ioc->base_readl_ext_retry(&ioc->chip->Doorbell)
+ 	    & MPI2_DOORBELL_DATA_MASK);
+ 	writel(0, &ioc->chip->HostInterruptStatus);
+ 	if ((_base_wait_for_doorbell_int(ioc, 5))) {
+@@ -7076,7 +7093,7 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
+ 			__LINE__);
+ 		return -EFAULT;
+ 	}
+-	reply[1] = le16_to_cpu(ioc->base_readl(&ioc->chip->Doorbell)
++	reply[1] = le16_to_cpu(ioc->base_readl_ext_retry(&ioc->chip->Doorbell)
+ 	    & MPI2_DOORBELL_DATA_MASK);
+ 	writel(0, &ioc->chip->HostInterruptStatus);
+ 
+@@ -7087,10 +7104,10 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
+ 			return -EFAULT;
+ 		}
+ 		if (i >=  reply_bytes/2) /* overflow case */
+-			ioc->base_readl(&ioc->chip->Doorbell);
++			ioc->base_readl_ext_retry(&ioc->chip->Doorbell);
+ 		else
+ 			reply[i] = le16_to_cpu(
+-			    ioc->base_readl(&ioc->chip->Doorbell)
++			    ioc->base_readl_ext_retry(&ioc->chip->Doorbell)
+ 			    & MPI2_DOORBELL_DATA_MASK);
+ 		writel(0, &ioc->chip->HostInterruptStatus);
+ 	}
+@@ -7949,7 +7966,7 @@ _base_diag_reset(struct MPT3SAS_ADAPTER *ioc)
+ 			goto out;
+ 		}
+ 
+-		host_diagnostic = ioc->base_readl(&ioc->chip->HostDiagnostic);
++		host_diagnostic = ioc->base_readl_ext_retry(&ioc->chip->HostDiagnostic);
+ 		drsprintk(ioc,
+ 			  ioc_info(ioc, "wrote magic sequence: count(%d), host_diagnostic(0x%08x)\n",
+ 				   count, host_diagnostic));
+@@ -7969,7 +7986,7 @@ _base_diag_reset(struct MPT3SAS_ADAPTER *ioc)
+ 	for (count = 0; count < (300000000 /
+ 		MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC); count++) {
+ 
+-		host_diagnostic = ioc->base_readl(&ioc->chip->HostDiagnostic);
++		host_diagnostic = ioc->base_readl_ext_retry(&ioc->chip->HostDiagnostic);
+ 
+ 		if (host_diagnostic == 0xFFFFFFFF) {
+ 			ioc_info(ioc,
+@@ -8359,10 +8376,13 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
+ 	ioc->rdpq_array_enable_assigned = 0;
+ 	ioc->use_32bit_dma = false;
+ 	ioc->dma_mask = 64;
+-	if (ioc->is_aero_ioc)
++	if (ioc->is_aero_ioc) {
+ 		ioc->base_readl = &_base_readl_aero;
+-	else
++		ioc->base_readl_ext_retry = &_base_readl_ext_retry;
++	} else {
+ 		ioc->base_readl = &_base_readl;
++		ioc->base_readl_ext_retry = &_base_readl;
++	}
+ 	r = mpt3sas_base_map_resources(ioc);
+ 	if (r)
+ 		goto out_free_resources;
+diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h
+index 05364aa15ecdb..10055c7e4a9f7 100644
+--- a/drivers/scsi/mpt3sas/mpt3sas_base.h
++++ b/drivers/scsi/mpt3sas/mpt3sas_base.h
+@@ -1618,6 +1618,7 @@ struct MPT3SAS_ADAPTER {
+ 	u8		diag_trigger_active;
+ 	u8		atomic_desc_capable;
+ 	BASE_READ_REG	base_readl;
++	BASE_READ_REG	base_readl_ext_retry;
+ 	struct SL_WH_MASTER_TRIGGER_T diag_trigger_master;
+ 	struct SL_WH_EVENT_TRIGGERS_T diag_trigger_event;
+ 	struct SL_WH_SCSI_TRIGGERS_T diag_trigger_scsi;
+diff --git a/drivers/scsi/qedf/qedf_dbg.h b/drivers/scsi/qedf/qedf_dbg.h
+index f4d81127239eb..5ec2b817c694a 100644
+--- a/drivers/scsi/qedf/qedf_dbg.h
++++ b/drivers/scsi/qedf/qedf_dbg.h
+@@ -59,6 +59,8 @@ extern uint qedf_debug;
+ #define QEDF_LOG_NOTICE	0x40000000	/* Notice logs */
+ #define QEDF_LOG_WARN		0x80000000	/* Warning logs */
+ 
++#define QEDF_DEBUGFS_LOG_LEN (2 * PAGE_SIZE)
++
+ /* Debug context structure */
+ struct qedf_dbg_ctx {
+ 	unsigned int host_no;
+diff --git a/drivers/scsi/qedf/qedf_debugfs.c b/drivers/scsi/qedf/qedf_debugfs.c
+index a3ed681c8ce3f..451fd236bfd05 100644
+--- a/drivers/scsi/qedf/qedf_debugfs.c
++++ b/drivers/scsi/qedf/qedf_debugfs.c
+@@ -8,6 +8,7 @@
+ #include <linux/uaccess.h>
+ #include <linux/debugfs.h>
+ #include <linux/module.h>
++#include <linux/vmalloc.h>
+ 
+ #include "qedf.h"
+ #include "qedf_dbg.h"
+@@ -98,7 +99,9 @@ static ssize_t
+ qedf_dbg_fp_int_cmd_read(struct file *filp, char __user *buffer, size_t count,
+ 			 loff_t *ppos)
+ {
++	ssize_t ret;
+ 	size_t cnt = 0;
++	char *cbuf;
+ 	int id;
+ 	struct qedf_fastpath *fp = NULL;
+ 	struct qedf_dbg_ctx *qedf_dbg =
+@@ -108,19 +111,25 @@ qedf_dbg_fp_int_cmd_read(struct file *filp, char __user *buffer, size_t count,
+ 
+ 	QEDF_INFO(qedf_dbg, QEDF_LOG_DEBUGFS, "entered\n");
+ 
+-	cnt = sprintf(buffer, "\nFastpath I/O completions\n\n");
++	cbuf = vmalloc(QEDF_DEBUGFS_LOG_LEN);
++	if (!cbuf)
++		return 0;
++
++	cnt += scnprintf(cbuf + cnt, QEDF_DEBUGFS_LOG_LEN - cnt, "\nFastpath I/O completions\n\n");
+ 
+ 	for (id = 0; id < qedf->num_queues; id++) {
+ 		fp = &(qedf->fp_array[id]);
+ 		if (fp->sb_id == QEDF_SB_ID_NULL)
+ 			continue;
+-		cnt += sprintf((buffer + cnt), "#%d: %lu\n", id,
+-			       fp->completions);
++		cnt += scnprintf(cbuf + cnt, QEDF_DEBUGFS_LOG_LEN - cnt,
++				 "#%d: %lu\n", id, fp->completions);
+ 	}
+ 
+-	cnt = min_t(int, count, cnt - *ppos);
+-	*ppos += cnt;
+-	return cnt;
++	ret = simple_read_from_buffer(buffer, count, ppos, cbuf, cnt);
++
++	vfree(cbuf);
++
++	return ret;
+ }
+ 
+ static ssize_t
+@@ -138,15 +147,14 @@ qedf_dbg_debug_cmd_read(struct file *filp, char __user *buffer, size_t count,
+ 			loff_t *ppos)
+ {
+ 	int cnt;
++	char cbuf[32];
+ 	struct qedf_dbg_ctx *qedf_dbg =
+ 				(struct qedf_dbg_ctx *)filp->private_data;
+ 
+ 	QEDF_INFO(qedf_dbg, QEDF_LOG_DEBUGFS, "debug mask=0x%x\n", qedf_debug);
+-	cnt = sprintf(buffer, "debug mask = 0x%x\n", qedf_debug);
++	cnt = scnprintf(cbuf, sizeof(cbuf), "debug mask = 0x%x\n", qedf_debug);
+ 
+-	cnt = min_t(int, count, cnt - *ppos);
+-	*ppos += cnt;
+-	return cnt;
++	return simple_read_from_buffer(buffer, count, ppos, cbuf, cnt);
+ }
+ 
+ static ssize_t
+@@ -185,18 +193,17 @@ qedf_dbg_stop_io_on_error_cmd_read(struct file *filp, char __user *buffer,
+ 				   size_t count, loff_t *ppos)
+ {
+ 	int cnt;
++	char cbuf[7];
+ 	struct qedf_dbg_ctx *qedf_dbg =
+ 				(struct qedf_dbg_ctx *)filp->private_data;
+ 	struct qedf_ctx *qedf = container_of(qedf_dbg,
+ 	    struct qedf_ctx, dbg_ctx);
+ 
+ 	QEDF_INFO(qedf_dbg, QEDF_LOG_DEBUGFS, "entered\n");
+-	cnt = sprintf(buffer, "%s\n",
++	cnt = scnprintf(cbuf, sizeof(cbuf), "%s\n",
+ 	    qedf->stop_io_on_error ? "true" : "false");
+ 
+-	cnt = min_t(int, count, cnt - *ppos);
+-	*ppos += cnt;
+-	return cnt;
++	return simple_read_from_buffer(buffer, count, ppos, cbuf, cnt);
+ }
+ 
+ static ssize_t
+diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
+index 367fba27fe699..33d4914e19fa6 100644
+--- a/drivers/scsi/qla2xxx/qla_init.c
++++ b/drivers/scsi/qla2xxx/qla_init.c
+@@ -5549,7 +5549,7 @@ static void qla_get_login_template(scsi_qla_host_t *vha)
+ 	__be32 *q;
+ 
+ 	memset(ha->init_cb, 0, ha->init_cb_size);
+-	sz = min_t(int, sizeof(struct fc_els_csp), ha->init_cb_size);
++	sz = min_t(int, sizeof(struct fc_els_flogi), ha->init_cb_size);
+ 	rval = qla24xx_get_port_login_templ(vha, ha->init_cb_dma,
+ 					    ha->init_cb, sz);
+ 	if (rval != QLA_SUCCESS) {
+diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
+index b2a3988e1e159..675332e49a7b0 100644
+--- a/drivers/scsi/qla4xxx/ql4_os.c
++++ b/drivers/scsi/qla4xxx/ql4_os.c
+@@ -968,6 +968,11 @@ static int qla4xxx_set_chap_entry(struct Scsi_Host *shost, void *data, int len)
+ 	memset(&chap_rec, 0, sizeof(chap_rec));
+ 
+ 	nla_for_each_attr(attr, data, len, rem) {
++		if (nla_len(attr) < sizeof(*param_info)) {
++			rc = -EINVAL;
++			goto exit_set_chap;
++		}
++
+ 		param_info = nla_data(attr);
+ 
+ 		switch (param_info->param) {
+@@ -2750,6 +2755,11 @@ qla4xxx_iface_set_param(struct Scsi_Host *shost, void *data, uint32_t len)
+ 	}
+ 
+ 	nla_for_each_attr(attr, data, len, rem) {
++		if (nla_len(attr) < sizeof(*iface_param)) {
++			rval = -EINVAL;
++			goto exit_init_fw_cb;
++		}
++
+ 		iface_param = nla_data(attr);
+ 
+ 		if (iface_param->param_type == ISCSI_NET_PARAM) {
+@@ -8104,6 +8114,11 @@ qla4xxx_sysfs_ddb_set_param(struct iscsi_bus_flash_session *fnode_sess,
+ 
+ 	memset((void *)&chap_tbl, 0, sizeof(chap_tbl));
+ 	nla_for_each_attr(attr, data, len, rem) {
++		if (nla_len(attr) < sizeof(*fnode_param)) {
++			rc = -EINVAL;
++			goto exit_set_param;
++		}
++
+ 		fnode_param = nla_data(attr);
+ 
+ 		switch (fnode_param->param) {
+diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
+index e527ece12453a..3075b2ddf7a69 100644
+--- a/drivers/scsi/scsi_transport_iscsi.c
++++ b/drivers/scsi/scsi_transport_iscsi.c
+@@ -3014,14 +3014,15 @@ iscsi_if_destroy_conn(struct iscsi_transport *transport, struct iscsi_uevent *ev
+ }
+ 
+ static int
+-iscsi_if_set_param(struct iscsi_transport *transport, struct iscsi_uevent *ev)
++iscsi_if_set_param(struct iscsi_transport *transport, struct iscsi_uevent *ev, u32 rlen)
+ {
+ 	char *data = (char*)ev + sizeof(*ev);
+ 	struct iscsi_cls_conn *conn;
+ 	struct iscsi_cls_session *session;
+ 	int err = 0, value = 0, state;
+ 
+-	if (ev->u.set_param.len > PAGE_SIZE)
++	if (ev->u.set_param.len > rlen ||
++	    ev->u.set_param.len > PAGE_SIZE)
+ 		return -EINVAL;
+ 
+ 	session = iscsi_session_lookup(ev->u.set_param.sid);
+@@ -3029,6 +3030,10 @@ iscsi_if_set_param(struct iscsi_transport *transport, struct iscsi_uevent *ev)
+ 	if (!conn || !session)
+ 		return -EINVAL;
+ 
++	/* data will be regarded as NULL-ended string, do length check */
++	if (strlen(data) > ev->u.set_param.len)
++		return -EINVAL;
++
+ 	switch (ev->u.set_param.param) {
+ 	case ISCSI_PARAM_SESS_RECOVERY_TMO:
+ 		sscanf(data, "%d", &value);
+@@ -3118,7 +3123,7 @@ put_ep:
+ 
+ static int
+ iscsi_if_transport_ep(struct iscsi_transport *transport,
+-		      struct iscsi_uevent *ev, int msg_type)
++		      struct iscsi_uevent *ev, int msg_type, u32 rlen)
+ {
+ 	struct iscsi_endpoint *ep;
+ 	int rc = 0;
+@@ -3126,7 +3131,10 @@ iscsi_if_transport_ep(struct iscsi_transport *transport,
+ 	switch (msg_type) {
+ 	case ISCSI_UEVENT_TRANSPORT_EP_CONNECT_THROUGH_HOST:
+ 	case ISCSI_UEVENT_TRANSPORT_EP_CONNECT:
+-		rc = iscsi_if_ep_connect(transport, ev, msg_type);
++		if (rlen < sizeof(struct sockaddr))
++			rc = -EINVAL;
++		else
++			rc = iscsi_if_ep_connect(transport, ev, msg_type);
+ 		break;
+ 	case ISCSI_UEVENT_TRANSPORT_EP_POLL:
+ 		if (!transport->ep_poll)
+@@ -3150,12 +3158,15 @@ iscsi_if_transport_ep(struct iscsi_transport *transport,
+ 
+ static int
+ iscsi_tgt_dscvr(struct iscsi_transport *transport,
+-		struct iscsi_uevent *ev)
++		struct iscsi_uevent *ev, u32 rlen)
+ {
+ 	struct Scsi_Host *shost;
+ 	struct sockaddr *dst_addr;
+ 	int err;
+ 
++	if (rlen < sizeof(*dst_addr))
++		return -EINVAL;
++
+ 	if (!transport->tgt_dscvr)
+ 		return -EINVAL;
+ 
+@@ -3176,7 +3187,7 @@ iscsi_tgt_dscvr(struct iscsi_transport *transport,
+ 
+ static int
+ iscsi_set_host_param(struct iscsi_transport *transport,
+-		     struct iscsi_uevent *ev)
++		     struct iscsi_uevent *ev, u32 rlen)
+ {
+ 	char *data = (char*)ev + sizeof(*ev);
+ 	struct Scsi_Host *shost;
+@@ -3185,7 +3196,8 @@ iscsi_set_host_param(struct iscsi_transport *transport,
+ 	if (!transport->set_host_param)
+ 		return -ENOSYS;
+ 
+-	if (ev->u.set_host_param.len > PAGE_SIZE)
++	if (ev->u.set_host_param.len > rlen ||
++	    ev->u.set_host_param.len > PAGE_SIZE)
+ 		return -EINVAL;
+ 
+ 	shost = scsi_host_lookup(ev->u.set_host_param.host_no);
+@@ -3195,6 +3207,10 @@ iscsi_set_host_param(struct iscsi_transport *transport,
+ 		return -ENODEV;
+ 	}
+ 
++	/* see similar check in iscsi_if_set_param() */
++	if (strlen(data) > ev->u.set_host_param.len)
++		return -EINVAL;
++
+ 	err = transport->set_host_param(shost, ev->u.set_host_param.param,
+ 					data, ev->u.set_host_param.len);
+ 	scsi_host_put(shost);
+@@ -3202,12 +3218,15 @@ iscsi_set_host_param(struct iscsi_transport *transport,
+ }
+ 
+ static int
+-iscsi_set_path(struct iscsi_transport *transport, struct iscsi_uevent *ev)
++iscsi_set_path(struct iscsi_transport *transport, struct iscsi_uevent *ev, u32 rlen)
+ {
+ 	struct Scsi_Host *shost;
+ 	struct iscsi_path *params;
+ 	int err;
+ 
++	if (rlen < sizeof(*params))
++		return -EINVAL;
++
+ 	if (!transport->set_path)
+ 		return -ENOSYS;
+ 
+@@ -3267,12 +3286,15 @@ iscsi_set_iface_params(struct iscsi_transport *transport,
+ }
+ 
+ static int
+-iscsi_send_ping(struct iscsi_transport *transport, struct iscsi_uevent *ev)
++iscsi_send_ping(struct iscsi_transport *transport, struct iscsi_uevent *ev, u32 rlen)
+ {
+ 	struct Scsi_Host *shost;
+ 	struct sockaddr *dst_addr;
+ 	int err;
+ 
++	if (rlen < sizeof(*dst_addr))
++		return -EINVAL;
++
+ 	if (!transport->send_ping)
+ 		return -ENOSYS;
+ 
+@@ -3770,13 +3792,12 @@ exit_host_stats:
+ }
+ 
+ static int iscsi_if_transport_conn(struct iscsi_transport *transport,
+-				   struct nlmsghdr *nlh)
++				   struct nlmsghdr *nlh, u32 pdu_len)
+ {
+ 	struct iscsi_uevent *ev = nlmsg_data(nlh);
+ 	struct iscsi_cls_session *session;
+ 	struct iscsi_cls_conn *conn = NULL;
+ 	struct iscsi_endpoint *ep;
+-	uint32_t pdu_len;
+ 	int err = 0;
+ 
+ 	switch (nlh->nlmsg_type) {
+@@ -3861,8 +3882,6 @@ static int iscsi_if_transport_conn(struct iscsi_transport *transport,
+ 
+ 		break;
+ 	case ISCSI_UEVENT_SEND_PDU:
+-		pdu_len = nlh->nlmsg_len - sizeof(*nlh) - sizeof(*ev);
+-
+ 		if ((ev->u.send_pdu.hdr_size > pdu_len) ||
+ 		    (ev->u.send_pdu.data_size > (pdu_len - ev->u.send_pdu.hdr_size))) {
+ 			err = -EINVAL;
+@@ -3892,6 +3911,7 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
+ 	struct iscsi_internal *priv;
+ 	struct iscsi_cls_session *session;
+ 	struct iscsi_endpoint *ep = NULL;
++	u32 rlen;
+ 
+ 	if (!netlink_capable(skb, CAP_SYS_ADMIN))
+ 		return -EPERM;
+@@ -3911,6 +3931,13 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
+ 
+ 	portid = NETLINK_CB(skb).portid;
+ 
++	/*
++	 * Even though the remaining payload may not be regarded as nlattr,
++	 * (like address or something else), calculate the remaining length
++	 * here to ease following length checks.
++	 */
++	rlen = nlmsg_attrlen(nlh, sizeof(*ev));
++
+ 	switch (nlh->nlmsg_type) {
+ 	case ISCSI_UEVENT_CREATE_SESSION:
+ 		err = iscsi_if_create_session(priv, ep, ev,
+@@ -3967,7 +3994,7 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
+ 			err = -EINVAL;
+ 		break;
+ 	case ISCSI_UEVENT_SET_PARAM:
+-		err = iscsi_if_set_param(transport, ev);
++		err = iscsi_if_set_param(transport, ev, rlen);
+ 		break;
+ 	case ISCSI_UEVENT_CREATE_CONN:
+ 	case ISCSI_UEVENT_DESTROY_CONN:
+@@ -3975,7 +4002,7 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
+ 	case ISCSI_UEVENT_START_CONN:
+ 	case ISCSI_UEVENT_BIND_CONN:
+ 	case ISCSI_UEVENT_SEND_PDU:
+-		err = iscsi_if_transport_conn(transport, nlh);
++		err = iscsi_if_transport_conn(transport, nlh, rlen);
+ 		break;
+ 	case ISCSI_UEVENT_GET_STATS:
+ 		err = iscsi_if_get_stats(transport, nlh);
+@@ -3984,23 +4011,22 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
+ 	case ISCSI_UEVENT_TRANSPORT_EP_POLL:
+ 	case ISCSI_UEVENT_TRANSPORT_EP_DISCONNECT:
+ 	case ISCSI_UEVENT_TRANSPORT_EP_CONNECT_THROUGH_HOST:
+-		err = iscsi_if_transport_ep(transport, ev, nlh->nlmsg_type);
++		err = iscsi_if_transport_ep(transport, ev, nlh->nlmsg_type, rlen);
+ 		break;
+ 	case ISCSI_UEVENT_TGT_DSCVR:
+-		err = iscsi_tgt_dscvr(transport, ev);
++		err = iscsi_tgt_dscvr(transport, ev, rlen);
+ 		break;
+ 	case ISCSI_UEVENT_SET_HOST_PARAM:
+-		err = iscsi_set_host_param(transport, ev);
++		err = iscsi_set_host_param(transport, ev, rlen);
+ 		break;
+ 	case ISCSI_UEVENT_PATH_UPDATE:
+-		err = iscsi_set_path(transport, ev);
++		err = iscsi_set_path(transport, ev, rlen);
+ 		break;
+ 	case ISCSI_UEVENT_SET_IFACE_PARAMS:
+-		err = iscsi_set_iface_params(transport, ev,
+-					     nlmsg_attrlen(nlh, sizeof(*ev)));
++		err = iscsi_set_iface_params(transport, ev, rlen);
+ 		break;
+ 	case ISCSI_UEVENT_PING:
+-		err = iscsi_send_ping(transport, ev);
++		err = iscsi_send_ping(transport, ev, rlen);
+ 		break;
+ 	case ISCSI_UEVENT_GET_CHAP:
+ 		err = iscsi_get_chap(transport, nlh);
+@@ -4009,13 +4035,10 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
+ 		err = iscsi_delete_chap(transport, ev);
+ 		break;
+ 	case ISCSI_UEVENT_SET_FLASHNODE_PARAMS:
+-		err = iscsi_set_flashnode_param(transport, ev,
+-						nlmsg_attrlen(nlh,
+-							      sizeof(*ev)));
++		err = iscsi_set_flashnode_param(transport, ev, rlen);
+ 		break;
+ 	case ISCSI_UEVENT_NEW_FLASHNODE:
+-		err = iscsi_new_flashnode(transport, ev,
+-					  nlmsg_attrlen(nlh, sizeof(*ev)));
++		err = iscsi_new_flashnode(transport, ev, rlen);
+ 		break;
+ 	case ISCSI_UEVENT_DEL_FLASHNODE:
+ 		err = iscsi_del_flashnode(transport, ev);
+@@ -4030,8 +4053,7 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
+ 		err = iscsi_logout_flashnode_sid(transport, ev);
+ 		break;
+ 	case ISCSI_UEVENT_SET_CHAP:
+-		err = iscsi_set_chap(transport, ev,
+-				     nlmsg_attrlen(nlh, sizeof(*ev)));
++		err = iscsi_set_chap(transport, ev, rlen);
+ 		break;
+ 	case ISCSI_UEVENT_GET_HOST_STATS:
+ 		err = iscsi_get_host_stats(transport, nlh);
+diff --git a/drivers/soc/qcom/ocmem.c b/drivers/soc/qcom/ocmem.c
+index aaddc3cc53b7f..ef7c1748242ac 100644
+--- a/drivers/soc/qcom/ocmem.c
++++ b/drivers/soc/qcom/ocmem.c
+@@ -80,8 +80,8 @@ struct ocmem {
+ #define OCMEM_HW_VERSION_MINOR(val)		FIELD_GET(GENMASK(27, 16), val)
+ #define OCMEM_HW_VERSION_STEP(val)		FIELD_GET(GENMASK(15, 0), val)
+ 
+-#define OCMEM_HW_PROFILE_NUM_PORTS(val)		FIELD_PREP(0x0000000f, (val))
+-#define OCMEM_HW_PROFILE_NUM_MACROS(val)	FIELD_PREP(0x00003f00, (val))
++#define OCMEM_HW_PROFILE_NUM_PORTS(val)		FIELD_GET(0x0000000f, (val))
++#define OCMEM_HW_PROFILE_NUM_MACROS(val)	FIELD_GET(0x00003f00, (val))
+ 
+ #define OCMEM_HW_PROFILE_LAST_REGN_HALFSIZE	0x00010000
+ #define OCMEM_HW_PROFILE_INTERLEAVING		0x00020000
+diff --git a/drivers/soc/qcom/smem.c b/drivers/soc/qcom/smem.c
+index b0d59e815c3b7..a516b8b5efac9 100644
+--- a/drivers/soc/qcom/smem.c
++++ b/drivers/soc/qcom/smem.c
+@@ -724,7 +724,7 @@ EXPORT_SYMBOL_GPL(qcom_smem_get_free_space);
+ 
+ static bool addr_in_range(void __iomem *base, size_t size, void *addr)
+ {
+-	return base && (addr >= base && addr < base + size);
++	return base && ((void __iomem *)addr >= base && (void __iomem *)addr < base + size);
+ }
+ 
+ /**
+diff --git a/drivers/spi/spi-mpc512x-psc.c b/drivers/spi/spi-mpc512x-psc.c
+index 99aeef28a4774..5cecca1bef026 100644
+--- a/drivers/spi/spi-mpc512x-psc.c
++++ b/drivers/spi/spi-mpc512x-psc.c
+@@ -53,7 +53,7 @@ struct mpc512x_psc_spi {
+ 	int type;
+ 	void __iomem *psc;
+ 	struct mpc512x_psc_fifo __iomem *fifo;
+-	unsigned int irq;
++	int irq;
+ 	u8 bits_per_word;
+ 	u32 mclk_rate;
+ 
+diff --git a/drivers/spi/spi-tegra20-sflash.c b/drivers/spi/spi-tegra20-sflash.c
+index 4286310628a2b..0c5507473f972 100644
+--- a/drivers/spi/spi-tegra20-sflash.c
++++ b/drivers/spi/spi-tegra20-sflash.c
+@@ -455,7 +455,11 @@ static int tegra_sflash_probe(struct platform_device *pdev)
+ 		goto exit_free_master;
+ 	}
+ 
+-	tsd->irq = platform_get_irq(pdev, 0);
++	ret = platform_get_irq(pdev, 0);
++	if (ret < 0)
++		goto exit_free_master;
++	tsd->irq = ret;
++
+ 	ret = request_irq(tsd->irq, tegra_sflash_isr, 0,
+ 			dev_name(&pdev->dev), tsd);
+ 	if (ret < 0) {
+diff --git a/drivers/staging/media/av7110/sp8870.c b/drivers/staging/media/av7110/sp8870.c
+index 9767159aeb9b2..abf5c72607b64 100644
+--- a/drivers/staging/media/av7110/sp8870.c
++++ b/drivers/staging/media/av7110/sp8870.c
+@@ -606,4 +606,4 @@ MODULE_DESCRIPTION("Spase SP8870 DVB-T Demodulator driver");
+ MODULE_AUTHOR("Juergen Peitz");
+ MODULE_LICENSE("GPL");
+ 
+-EXPORT_SYMBOL(sp8870_attach);
++EXPORT_SYMBOL_GPL(sp8870_attach);
+diff --git a/drivers/staging/media/rkvdec/rkvdec.c b/drivers/staging/media/rkvdec/rkvdec.c
+index 134e2b9fa7d9a..84a41792cb4b8 100644
+--- a/drivers/staging/media/rkvdec/rkvdec.c
++++ b/drivers/staging/media/rkvdec/rkvdec.c
+@@ -120,7 +120,7 @@ static const struct rkvdec_coded_fmt_desc rkvdec_coded_fmts[] = {
+ 			.max_width = 4096,
+ 			.step_width = 16,
+ 			.min_height = 48,
+-			.max_height = 2304,
++			.max_height = 2560,
+ 			.step_height = 16,
+ 		},
+ 		.ctrls = &rkvdec_h264_ctrls,
+diff --git a/drivers/thermal/imx8mm_thermal.c b/drivers/thermal/imx8mm_thermal.c
+index d4b40869c7d7b..dd474166ca671 100644
+--- a/drivers/thermal/imx8mm_thermal.c
++++ b/drivers/thermal/imx8mm_thermal.c
+@@ -179,10 +179,8 @@ static int imx8mm_tmu_probe_set_calib_v1(struct platform_device *pdev,
+ 	int ret;
+ 
+ 	ret = nvmem_cell_read_u32(&pdev->dev, "calib", &ana0);
+-	if (ret) {
+-		dev_warn(dev, "Failed to read OCOTP nvmem cell (%d).\n", ret);
+-		return ret;
+-	}
++	if (ret)
++		return dev_err_probe(dev, ret, "Failed to read OCOTP nvmem cell\n");
+ 
+ 	writel(FIELD_PREP(TASR_BUF_VREF_MASK,
+ 			  FIELD_GET(ANA0_BUF_VREF_MASK, ana0)) |
+diff --git a/drivers/thermal/mediatek/lvts_thermal.c b/drivers/thermal/mediatek/lvts_thermal.c
+index b693fac2d6779..b0d71b74a928e 100644
+--- a/drivers/thermal/mediatek/lvts_thermal.c
++++ b/drivers/thermal/mediatek/lvts_thermal.c
+@@ -65,7 +65,12 @@
+ #define LVTS_HW_FILTER				0x2
+ #define LVTS_TSSEL_CONF				0x13121110
+ #define LVTS_CALSCALE_CONF			0x300
+-#define LVTS_MONINT_CONF			0x9FBF7BDE
++#define LVTS_MONINT_CONF			0x8300318C
++
++#define LVTS_MONINT_OFFSET_SENSOR0		0xC
++#define LVTS_MONINT_OFFSET_SENSOR1		0x180
++#define LVTS_MONINT_OFFSET_SENSOR2		0x3000
++#define LVTS_MONINT_OFFSET_SENSOR3		0x3000000
+ 
+ #define LVTS_INT_SENSOR0			0x0009001F
+ #define LVTS_INT_SENSOR1			0x001203E0
+@@ -83,6 +88,8 @@
+ 
+ #define LVTS_HW_SHUTDOWN_MT8195		105000
+ 
++#define LVTS_MINIMUM_THRESHOLD		20000
++
+ static int golden_temp = LVTS_GOLDEN_TEMP_DEFAULT;
+ static int coeff_b = LVTS_COEFF_B;
+ 
+@@ -110,6 +117,8 @@ struct lvts_sensor {
+ 	void __iomem *base;
+ 	int id;
+ 	int dt_id;
++	int low_thresh;
++	int high_thresh;
+ };
+ 
+ struct lvts_ctrl {
+@@ -119,6 +128,8 @@ struct lvts_ctrl {
+ 	int num_lvts_sensor;
+ 	int mode;
+ 	void __iomem *base;
++	int low_thresh;
++	int high_thresh;
+ };
+ 
+ struct lvts_domain {
+@@ -290,32 +301,84 @@ static int lvts_get_temp(struct thermal_zone_device *tz, int *temp)
+ 	return 0;
+ }
+ 
++static void lvts_update_irq_mask(struct lvts_ctrl *lvts_ctrl)
++{
++	u32 masks[] = {
++		LVTS_MONINT_OFFSET_SENSOR0,
++		LVTS_MONINT_OFFSET_SENSOR1,
++		LVTS_MONINT_OFFSET_SENSOR2,
++		LVTS_MONINT_OFFSET_SENSOR3,
++	};
++	u32 value = 0;
++	int i;
++
++	value = readl(LVTS_MONINT(lvts_ctrl->base));
++
++	for (i = 0; i < ARRAY_SIZE(masks); i++) {
++		if (lvts_ctrl->sensors[i].high_thresh == lvts_ctrl->high_thresh
++		    && lvts_ctrl->sensors[i].low_thresh == lvts_ctrl->low_thresh)
++			value |= masks[i];
++		else
++			value &= ~masks[i];
++	}
++
++	writel(value, LVTS_MONINT(lvts_ctrl->base));
++}
++
++static bool lvts_should_update_thresh(struct lvts_ctrl *lvts_ctrl, int high)
++{
++	int i;
++
++	if (high > lvts_ctrl->high_thresh)
++		return true;
++
++	for (i = 0; i < lvts_ctrl->num_lvts_sensor; i++)
++		if (lvts_ctrl->sensors[i].high_thresh == lvts_ctrl->high_thresh
++		    && lvts_ctrl->sensors[i].low_thresh == lvts_ctrl->low_thresh)
++			return false;
++
++	return true;
++}
++
+ static int lvts_set_trips(struct thermal_zone_device *tz, int low, int high)
+ {
+ 	struct lvts_sensor *lvts_sensor = thermal_zone_device_priv(tz);
++	struct lvts_ctrl *lvts_ctrl = container_of(lvts_sensor, struct lvts_ctrl, sensors[lvts_sensor->id]);
+ 	void __iomem *base = lvts_sensor->base;
+-	u32 raw_low = lvts_temp_to_raw(low);
++	u32 raw_low = lvts_temp_to_raw(low != -INT_MAX ? low : LVTS_MINIMUM_THRESHOLD);
+ 	u32 raw_high = lvts_temp_to_raw(high);
++	bool should_update_thresh;
++
++	lvts_sensor->low_thresh = low;
++	lvts_sensor->high_thresh = high;
++
++	should_update_thresh = lvts_should_update_thresh(lvts_ctrl, high);
++	if (should_update_thresh) {
++		lvts_ctrl->high_thresh = high;
++		lvts_ctrl->low_thresh = low;
++	}
++	lvts_update_irq_mask(lvts_ctrl);
++
++	if (!should_update_thresh)
++		return 0;
+ 
+ 	/*
+-	 * Hot to normal temperature threshold
++	 * Low offset temperature threshold
+ 	 *
+-	 * LVTS_H2NTHRE
++	 * LVTS_OFFSETL
+ 	 *
+ 	 * Bits:
+ 	 *
+ 	 * 14-0 : Raw temperature for threshold
+ 	 */
+-	if (low != -INT_MAX) {
+-		pr_debug("%s: Setting low limit temperature interrupt: %d\n",
+-			 thermal_zone_device_type(tz), low);
+-		writel(raw_low, LVTS_H2NTHRE(base));
+-	}
++	pr_debug("%s: Setting low limit temperature interrupt: %d\n",
++		 thermal_zone_device_type(tz), low);
++	writel(raw_low, LVTS_OFFSETL(base));
+ 
+ 	/*
+-	 * Hot temperature threshold
++	 * High offset temperature threshold
+ 	 *
+-	 * LVTS_HTHRE
++	 * LVTS_OFFSETH
+ 	 *
+ 	 * Bits:
+ 	 *
+@@ -323,7 +386,7 @@ static int lvts_set_trips(struct thermal_zone_device *tz, int low, int high)
+ 	 */
+ 	pr_debug("%s: Setting high limit temperature interrupt: %d\n",
+ 		 thermal_zone_device_type(tz), high);
+-	writel(raw_high, LVTS_HTHRE(base));
++	writel(raw_high, LVTS_OFFSETH(base));
+ 
+ 	return 0;
+ }
+@@ -451,7 +514,7 @@ static irqreturn_t lvts_irq_handler(int irq, void *data)
+ 
+ 	for (i = 0; i < lvts_td->num_lvts_ctrl; i++) {
+ 
+-		aux = lvts_ctrl_irq_handler(lvts_td->lvts_ctrl);
++		aux = lvts_ctrl_irq_handler(&lvts_td->lvts_ctrl[i]);
+ 		if (aux != IRQ_HANDLED)
+ 			continue;
+ 
+@@ -521,6 +584,9 @@ static int lvts_sensor_init(struct device *dev, struct lvts_ctrl *lvts_ctrl,
+ 		 */
+ 		lvts_sensor[i].msr = lvts_ctrl_data->mode == LVTS_MSR_IMMEDIATE_MODE ?
+ 			imm_regs[i] : msr_regs[i];
++
++		lvts_sensor[i].low_thresh = INT_MIN;
++		lvts_sensor[i].high_thresh = INT_MIN;
+ 	};
+ 
+ 	lvts_ctrl->num_lvts_sensor = lvts_ctrl_data->num_lvts_sensor;
+@@ -688,6 +754,9 @@ static int lvts_ctrl_init(struct device *dev, struct lvts_domain *lvts_td,
+ 		 */
+ 		lvts_ctrl[i].hw_tshut_raw_temp =
+ 			lvts_temp_to_raw(lvts_data->lvts_ctrl[i].hw_tshut_temp);
++
++		lvts_ctrl[i].low_thresh = INT_MIN;
++		lvts_ctrl[i].high_thresh = INT_MIN;
+ 	}
+ 
+ 	/*
+@@ -896,24 +965,6 @@ static int lvts_ctrl_configure(struct device *dev, struct lvts_ctrl *lvts_ctrl)
+ 			LVTS_HW_FILTER << 3 | LVTS_HW_FILTER;
+ 	writel(value, LVTS_MSRCTL0(lvts_ctrl->base));
+ 
+-	/*
+-	 * LVTS_MSRCTL1 : Measurement control
+-	 *
+-	 * Bits:
+-	 *
+-	 * 9: Ignore MSRCTL0 config and do immediate measurement on sensor3
+-	 * 6: Ignore MSRCTL0 config and do immediate measurement on sensor2
+-	 * 5: Ignore MSRCTL0 config and do immediate measurement on sensor1
+-	 * 4: Ignore MSRCTL0 config and do immediate measurement on sensor0
+-	 *
+-	 * That configuration will ignore the filtering and the delays
+-	 * introduced below in MONCTL1 and MONCTL2
+-	 */
+-	if (lvts_ctrl->mode == LVTS_MSR_IMMEDIATE_MODE) {
+-		value = BIT(9) | BIT(6) | BIT(5) | BIT(4);
+-		writel(value, LVTS_MSRCTL1(lvts_ctrl->base));
+-	}
+-
+ 	/*
+ 	 * LVTS_MONCTL1 : Period unit and group interval configuration
+ 	 *
+@@ -979,6 +1030,15 @@ static int lvts_ctrl_start(struct device *dev, struct lvts_ctrl *lvts_ctrl)
+ 	struct thermal_zone_device *tz;
+ 	u32 sensor_map = 0;
+ 	int i;
++	/*
++	 * Bitmaps to enable each sensor on immediate and filtered modes, as
++	 * described in MSRCTL1 and MONCTL0 registers below, respectively.
++	 */
++	u32 sensor_imm_bitmap[] = { BIT(4), BIT(5), BIT(6), BIT(9) };
++	u32 sensor_filt_bitmap[] = { BIT(0), BIT(1), BIT(2), BIT(3) };
++
++	u32 *sensor_bitmap = lvts_ctrl->mode == LVTS_MSR_IMMEDIATE_MODE ?
++			     sensor_imm_bitmap : sensor_filt_bitmap;
+ 
+ 	for (i = 0; i < lvts_ctrl->num_lvts_sensor; i++) {
+ 
+@@ -1016,20 +1076,38 @@ static int lvts_ctrl_start(struct device *dev, struct lvts_ctrl *lvts_ctrl)
+ 		 * map, so we can enable the temperature monitoring in
+ 		 * the hardware thermal controller.
+ 		 */
+-		sensor_map |= BIT(i);
++		sensor_map |= sensor_bitmap[i];
+ 	}
+ 
+ 	/*
+-	 * Bits:
+-	 *      9: Single point access flow
+-	 *    0-3: Enable sensing point 0-3
+-	 *
+ 	 * The initialization of the thermal zones give us
+ 	 * which sensor point to enable. If any thermal zone
+ 	 * was not described in the device tree, it won't be
+ 	 * enabled here in the sensor map.
+ 	 */
+-	writel(sensor_map | BIT(9), LVTS_MONCTL0(lvts_ctrl->base));
++	if (lvts_ctrl->mode == LVTS_MSR_IMMEDIATE_MODE) {
++		/*
++		 * LVTS_MSRCTL1 : Measurement control
++		 *
++		 * Bits:
++		 *
++		 * 9: Ignore MSRCTL0 config and do immediate measurement on sensor3
++		 * 6: Ignore MSRCTL0 config and do immediate measurement on sensor2
++		 * 5: Ignore MSRCTL0 config and do immediate measurement on sensor1
++		 * 4: Ignore MSRCTL0 config and do immediate measurement on sensor0
++		 *
++		 * That configuration will ignore the filtering and the delays
++		 * introduced in MONCTL1 and MONCTL2
++		 */
++		writel(sensor_map, LVTS_MSRCTL1(lvts_ctrl->base));
++	} else {
++		/*
++		 * Bits:
++		 *      9: Single point access flow
++		 *    0-3: Enable sensing point 0-3
++		 */
++		writel(sensor_map | BIT(9), LVTS_MONCTL0(lvts_ctrl->base));
++	}
+ 
+ 	return 0;
+ }
+diff --git a/drivers/thermal/thermal_of.c b/drivers/thermal/thermal_of.c
+index bc07ae1c284cf..22272f9c5934a 100644
+--- a/drivers/thermal/thermal_of.c
++++ b/drivers/thermal/thermal_of.c
+@@ -292,13 +292,13 @@ static int __thermal_of_unbind(struct device_node *map_np, int index, int trip_i
+ 	ret = of_parse_phandle_with_args(map_np, "cooling-device", "#cooling-cells",
+ 					 index, &cooling_spec);
+ 
+-	of_node_put(cooling_spec.np);
+-
+ 	if (ret < 0) {
+ 		pr_err("Invalid cooling-device entry\n");
+ 		return ret;
+ 	}
+ 
++	of_node_put(cooling_spec.np);
++
+ 	if (cooling_spec.args_count < 2) {
+ 		pr_err("wrong reference to cooling device, missing limits\n");
+ 		return -EINVAL;
+@@ -325,13 +325,13 @@ static int __thermal_of_bind(struct device_node *map_np, int index, int trip_id,
+ 	ret = of_parse_phandle_with_args(map_np, "cooling-device", "#cooling-cells",
+ 					 index, &cooling_spec);
+ 
+-	of_node_put(cooling_spec.np);
+-
+ 	if (ret < 0) {
+ 		pr_err("Invalid cooling-device entry\n");
+ 		return ret;
+ 	}
+ 
++	of_node_put(cooling_spec.np);
++
+ 	if (cooling_spec.args_count < 2) {
+ 		pr_err("wrong reference to cooling device, missing limits\n");
+ 		return -EINVAL;
+diff --git a/drivers/tty/serial/qcom_geni_serial.c b/drivers/tty/serial/qcom_geni_serial.c
+index 54b22cbc0fcef..67484c062edd1 100644
+--- a/drivers/tty/serial/qcom_geni_serial.c
++++ b/drivers/tty/serial/qcom_geni_serial.c
+@@ -592,7 +592,6 @@ static void qcom_geni_serial_stop_tx_dma(struct uart_port *uport)
+ {
+ 	struct qcom_geni_serial_port *port = to_dev_port(uport);
+ 	bool done;
+-	u32 m_irq_en;
+ 
+ 	if (!qcom_geni_serial_main_active(uport))
+ 		return;
+@@ -604,12 +603,10 @@ static void qcom_geni_serial_stop_tx_dma(struct uart_port *uport)
+ 		port->tx_remaining = 0;
+ 	}
+ 
+-	m_irq_en = readl(uport->membase + SE_GENI_M_IRQ_EN);
+-	writel(m_irq_en, uport->membase + SE_GENI_M_IRQ_EN);
+ 	geni_se_cancel_m_cmd(&port->se);
+ 
+-	done = qcom_geni_serial_poll_bit(uport, SE_GENI_S_IRQ_STATUS,
+-					 S_CMD_CANCEL_EN, true);
++	done = qcom_geni_serial_poll_bit(uport, SE_GENI_M_IRQ_STATUS,
++					 M_CMD_CANCEL_EN, true);
+ 	if (!done) {
+ 		geni_se_abort_m_cmd(&port->se);
+ 		done = qcom_geni_serial_poll_bit(uport, SE_GENI_M_IRQ_STATUS,
+diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c
+index faeb3dc371c05..289ca7d4e5669 100644
+--- a/drivers/tty/serial/sc16is7xx.c
++++ b/drivers/tty/serial/sc16is7xx.c
+@@ -236,7 +236,8 @@
+ 
+ /* IOControl register bits (Only 750/760) */
+ #define SC16IS7XX_IOCONTROL_LATCH_BIT	(1 << 0) /* Enable input latching */
+-#define SC16IS7XX_IOCONTROL_MODEM_BIT	(1 << 1) /* Enable GPIO[7:4] as modem pins */
++#define SC16IS7XX_IOCONTROL_MODEM_A_BIT	(1 << 1) /* Enable GPIO[7:4] as modem A pins */
++#define SC16IS7XX_IOCONTROL_MODEM_B_BIT	(1 << 2) /* Enable GPIO[3:0] as modem B pins */
+ #define SC16IS7XX_IOCONTROL_SRESET_BIT	(1 << 3) /* Software Reset */
+ 
+ /* EFCR register bits */
+@@ -301,12 +302,12 @@
+ /* Misc definitions */
+ #define SC16IS7XX_FIFO_SIZE		(64)
+ #define SC16IS7XX_REG_SHIFT		2
++#define SC16IS7XX_GPIOS_PER_BANK	4
+ 
+ struct sc16is7xx_devtype {
+ 	char	name[10];
+ 	int	nr_gpio;
+ 	int	nr_uart;
+-	int	has_mctrl;
+ };
+ 
+ #define SC16IS7XX_RECONF_MD		(1 << 0)
+@@ -336,7 +337,9 @@ struct sc16is7xx_port {
+ 	struct clk			*clk;
+ #ifdef CONFIG_GPIOLIB
+ 	struct gpio_chip		gpio;
++	unsigned long			gpio_valid_mask;
+ #endif
++	u8				mctrl_mask;
+ 	unsigned char			buf[SC16IS7XX_FIFO_SIZE];
+ 	struct kthread_worker		kworker;
+ 	struct task_struct		*kworker_task;
+@@ -447,35 +450,30 @@ static const struct sc16is7xx_devtype sc16is74x_devtype = {
+ 	.name		= "SC16IS74X",
+ 	.nr_gpio	= 0,
+ 	.nr_uart	= 1,
+-	.has_mctrl	= 0,
+ };
+ 
+ static const struct sc16is7xx_devtype sc16is750_devtype = {
+ 	.name		= "SC16IS750",
+-	.nr_gpio	= 4,
++	.nr_gpio	= 8,
+ 	.nr_uart	= 1,
+-	.has_mctrl	= 1,
+ };
+ 
+ static const struct sc16is7xx_devtype sc16is752_devtype = {
+ 	.name		= "SC16IS752",
+-	.nr_gpio	= 0,
++	.nr_gpio	= 8,
+ 	.nr_uart	= 2,
+-	.has_mctrl	= 1,
+ };
+ 
+ static const struct sc16is7xx_devtype sc16is760_devtype = {
+ 	.name		= "SC16IS760",
+-	.nr_gpio	= 4,
++	.nr_gpio	= 8,
+ 	.nr_uart	= 1,
+-	.has_mctrl	= 1,
+ };
+ 
+ static const struct sc16is7xx_devtype sc16is762_devtype = {
+ 	.name		= "SC16IS762",
+-	.nr_gpio	= 0,
++	.nr_gpio	= 8,
+ 	.nr_uart	= 2,
+-	.has_mctrl	= 1,
+ };
+ 
+ static bool sc16is7xx_regmap_volatile(struct device *dev, unsigned int reg)
+@@ -1357,8 +1355,98 @@ static int sc16is7xx_gpio_direction_output(struct gpio_chip *chip,
+ 
+ 	return 0;
+ }
++
++static int sc16is7xx_gpio_init_valid_mask(struct gpio_chip *chip,
++					  unsigned long *valid_mask,
++					  unsigned int ngpios)
++{
++	struct sc16is7xx_port *s = gpiochip_get_data(chip);
++
++	*valid_mask = s->gpio_valid_mask;
++
++	return 0;
++}
++
++static int sc16is7xx_setup_gpio_chip(struct sc16is7xx_port *s)
++{
++	struct device *dev = s->p[0].port.dev;
++
++	if (!s->devtype->nr_gpio)
++		return 0;
++
++	switch (s->mctrl_mask) {
++	case 0:
++		s->gpio_valid_mask = GENMASK(7, 0);
++		break;
++	case SC16IS7XX_IOCONTROL_MODEM_A_BIT:
++		s->gpio_valid_mask = GENMASK(3, 0);
++		break;
++	case SC16IS7XX_IOCONTROL_MODEM_B_BIT:
++		s->gpio_valid_mask = GENMASK(7, 4);
++		break;
++	default:
++		break;
++	}
++
++	if (s->gpio_valid_mask == 0)
++		return 0;
++
++	s->gpio.owner		 = THIS_MODULE;
++	s->gpio.parent		 = dev;
++	s->gpio.label		 = dev_name(dev);
++	s->gpio.init_valid_mask	 = sc16is7xx_gpio_init_valid_mask;
++	s->gpio.direction_input	 = sc16is7xx_gpio_direction_input;
++	s->gpio.get		 = sc16is7xx_gpio_get;
++	s->gpio.direction_output = sc16is7xx_gpio_direction_output;
++	s->gpio.set		 = sc16is7xx_gpio_set;
++	s->gpio.base		 = -1;
++	s->gpio.ngpio		 = s->devtype->nr_gpio;
++	s->gpio.can_sleep	 = 1;
++
++	return gpiochip_add_data(&s->gpio, s);
++}
+ #endif
+ 
++/*
++ * Configure ports designated to operate as modem control lines.
++ */
++static int sc16is7xx_setup_mctrl_ports(struct sc16is7xx_port *s)
++{
++	int i;
++	int ret;
++	int count;
++	u32 mctrl_port[2];
++	struct device *dev = s->p[0].port.dev;
++
++	count = device_property_count_u32(dev, "nxp,modem-control-line-ports");
++	if (count < 0 || count > ARRAY_SIZE(mctrl_port))
++		return 0;
++
++	ret = device_property_read_u32_array(dev, "nxp,modem-control-line-ports",
++					     mctrl_port, count);
++	if (ret)
++		return ret;
++
++	s->mctrl_mask = 0;
++
++	for (i = 0; i < count; i++) {
++		/* Use GPIO lines as modem control lines */
++		if (mctrl_port[i] == 0)
++			s->mctrl_mask |= SC16IS7XX_IOCONTROL_MODEM_A_BIT;
++		else if (mctrl_port[i] == 1)
++			s->mctrl_mask |= SC16IS7XX_IOCONTROL_MODEM_B_BIT;
++	}
++
++	if (s->mctrl_mask)
++		regmap_update_bits(
++			s->regmap,
++			SC16IS7XX_IOCONTROL_REG << SC16IS7XX_REG_SHIFT,
++			SC16IS7XX_IOCONTROL_MODEM_A_BIT |
++			SC16IS7XX_IOCONTROL_MODEM_B_BIT, s->mctrl_mask);
++
++	return 0;
++}
++
+ static const struct serial_rs485 sc16is7xx_rs485_supported = {
+ 	.flags = SER_RS485_ENABLED | SER_RS485_RTS_AFTER_SEND,
+ 	.delay_rts_before_send = 1,
+@@ -1471,12 +1559,6 @@ static int sc16is7xx_probe(struct device *dev,
+ 				     SC16IS7XX_EFCR_RXDISABLE_BIT |
+ 				     SC16IS7XX_EFCR_TXDISABLE_BIT);
+ 
+-		/* Use GPIO lines as modem status registers */
+-		if (devtype->has_mctrl)
+-			sc16is7xx_port_write(&s->p[i].port,
+-					     SC16IS7XX_IOCONTROL_REG,
+-					     SC16IS7XX_IOCONTROL_MODEM_BIT);
+-
+ 		/* Initialize kthread work structs */
+ 		kthread_init_work(&s->p[i].tx_work, sc16is7xx_tx_proc);
+ 		kthread_init_work(&s->p[i].reg_work, sc16is7xx_reg_proc);
+@@ -1514,23 +1596,14 @@ static int sc16is7xx_probe(struct device *dev,
+ 				s->p[u].irda_mode = true;
+ 	}
+ 
++	ret = sc16is7xx_setup_mctrl_ports(s);
++	if (ret)
++		goto out_ports;
++
+ #ifdef CONFIG_GPIOLIB
+-	if (devtype->nr_gpio) {
+-		/* Setup GPIO cotroller */
+-		s->gpio.owner		 = THIS_MODULE;
+-		s->gpio.parent		 = dev;
+-		s->gpio.label		 = dev_name(dev);
+-		s->gpio.direction_input	 = sc16is7xx_gpio_direction_input;
+-		s->gpio.get		 = sc16is7xx_gpio_get;
+-		s->gpio.direction_output = sc16is7xx_gpio_direction_output;
+-		s->gpio.set		 = sc16is7xx_gpio_set;
+-		s->gpio.base		 = -1;
+-		s->gpio.ngpio		 = devtype->nr_gpio;
+-		s->gpio.can_sleep	 = 1;
+-		ret = gpiochip_add_data(&s->gpio, s);
+-		if (ret)
+-			goto out_thread;
+-	}
++	ret = sc16is7xx_setup_gpio_chip(s);
++	if (ret)
++		goto out_ports;
+ #endif
+ 
+ 	/*
+@@ -1553,10 +1626,8 @@ static int sc16is7xx_probe(struct device *dev,
+ 		return 0;
+ 
+ #ifdef CONFIG_GPIOLIB
+-	if (devtype->nr_gpio)
++	if (s->gpio_valid_mask)
+ 		gpiochip_remove(&s->gpio);
+-
+-out_thread:
+ #endif
+ 
+ out_ports:
+@@ -1579,7 +1650,7 @@ static void sc16is7xx_remove(struct device *dev)
+ 	int i;
+ 
+ #ifdef CONFIG_GPIOLIB
+-	if (s->devtype->nr_gpio)
++	if (s->gpio_valid_mask)
+ 		gpiochip_remove(&s->gpio);
+ #endif
+ 
+diff --git a/drivers/tty/serial/serial-tegra.c b/drivers/tty/serial/serial-tegra.c
+index 1cf08b33456c9..37e1e05bc87e6 100644
+--- a/drivers/tty/serial/serial-tegra.c
++++ b/drivers/tty/serial/serial-tegra.c
+@@ -998,7 +998,11 @@ static int tegra_uart_hw_init(struct tegra_uart_port *tup)
+ 	tup->ier_shadow = 0;
+ 	tup->current_baud = 0;
+ 
+-	clk_prepare_enable(tup->uart_clk);
++	ret = clk_prepare_enable(tup->uart_clk);
++	if (ret) {
++		dev_err(tup->uport.dev, "could not enable clk\n");
++		return ret;
++	}
+ 
+ 	/* Reset the UART controller to clear all previous status.*/
+ 	reset_control_assert(tup->rst);
+diff --git a/drivers/tty/serial/sprd_serial.c b/drivers/tty/serial/sprd_serial.c
+index b58f51296ace2..99da964e8bd44 100644
+--- a/drivers/tty/serial/sprd_serial.c
++++ b/drivers/tty/serial/sprd_serial.c
+@@ -364,7 +364,7 @@ static void sprd_rx_free_buf(struct sprd_uart_port *sp)
+ 	if (sp->rx_dma.virt)
+ 		dma_free_coherent(sp->port.dev, SPRD_UART_RX_SIZE,
+ 				  sp->rx_dma.virt, sp->rx_dma.phys_addr);
+-
++	sp->rx_dma.virt = NULL;
+ }
+ 
+ static int sprd_rx_dma_config(struct uart_port *port, u32 burst)
+@@ -1106,7 +1106,7 @@ static bool sprd_uart_is_console(struct uart_port *uport)
+ static int sprd_clk_init(struct uart_port *uport)
+ {
+ 	struct clk *clk_uart, *clk_parent;
+-	struct sprd_uart_port *u = sprd_port[uport->line];
++	struct sprd_uart_port *u = container_of(uport, struct sprd_uart_port, port);
+ 
+ 	clk_uart = devm_clk_get(uport->dev, "uart");
+ 	if (IS_ERR(clk_uart)) {
+@@ -1149,22 +1149,22 @@ static int sprd_probe(struct platform_device *pdev)
+ {
+ 	struct resource *res;
+ 	struct uart_port *up;
++	struct sprd_uart_port *sport;
+ 	int irq;
+ 	int index;
+ 	int ret;
+ 
+ 	index = of_alias_get_id(pdev->dev.of_node, "serial");
+-	if (index < 0 || index >= ARRAY_SIZE(sprd_port)) {
++	if (index < 0 || index >= UART_NR_MAX) {
+ 		dev_err(&pdev->dev, "got a wrong serial alias id %d\n", index);
+ 		return -EINVAL;
+ 	}
+ 
+-	sprd_port[index] = devm_kzalloc(&pdev->dev, sizeof(*sprd_port[index]),
+-					GFP_KERNEL);
+-	if (!sprd_port[index])
++	sport = devm_kzalloc(&pdev->dev, sizeof(*sport), GFP_KERNEL);
++	if (!sport)
+ 		return -ENOMEM;
+ 
+-	up = &sprd_port[index]->port;
++	up = &sport->port;
+ 	up->dev = &pdev->dev;
+ 	up->line = index;
+ 	up->type = PORT_SPRD;
+@@ -1195,7 +1195,7 @@ static int sprd_probe(struct platform_device *pdev)
+ 	 * Allocate one dma buffer to prepare for receive transfer, in case
+ 	 * memory allocation failure at runtime.
+ 	 */
+-	ret = sprd_rx_alloc_buf(sprd_port[index]);
++	ret = sprd_rx_alloc_buf(sport);
+ 	if (ret)
+ 		return ret;
+ 
+@@ -1203,17 +1203,27 @@ static int sprd_probe(struct platform_device *pdev)
+ 		ret = uart_register_driver(&sprd_uart_driver);
+ 		if (ret < 0) {
+ 			pr_err("Failed to register SPRD-UART driver\n");
+-			return ret;
++			goto free_rx_buf;
+ 		}
+ 	}
++
+ 	sprd_ports_num++;
++	sprd_port[index] = sport;
+ 
+ 	ret = uart_add_one_port(&sprd_uart_driver, up);
+ 	if (ret)
+-		sprd_remove(pdev);
++		goto clean_port;
+ 
+ 	platform_set_drvdata(pdev, up);
+ 
++	return 0;
++
++clean_port:
++	sprd_port[index] = NULL;
++	if (--sprd_ports_num == 0)
++		uart_unregister_driver(&sprd_uart_driver);
++free_rx_buf:
++	sprd_rx_free_buf(sport);
+ 	return ret;
+ }
+ 
+diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
+index 1294467757964..fa18806e80b61 100644
+--- a/drivers/ufs/core/ufshcd.c
++++ b/drivers/ufs/core/ufshcd.c
+@@ -5251,9 +5251,17 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp,
+ 	int result = 0;
+ 	int scsi_status;
+ 	enum utp_ocs ocs;
++	u8 upiu_flags;
++	u32 resid;
+ 
+-	scsi_set_resid(lrbp->cmd,
+-		be32_to_cpu(lrbp->ucd_rsp_ptr->sr.residual_transfer_count));
++	upiu_flags = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_0) >> 16;
++	resid = be32_to_cpu(lrbp->ucd_rsp_ptr->sr.residual_transfer_count);
++	/*
++	 * Test !overflow instead of underflow to support UFS devices that do
++	 * not set either flag.
++	 */
++	if (resid && !(upiu_flags & UPIU_RSP_FLAG_OVERFLOW))
++		scsi_set_resid(lrbp->cmd, resid);
+ 
+ 	/* overall command status of utrd */
+ 	ocs = ufshcd_get_tr_ocs(lrbp, cqe);
+diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
+index 8300baedafd20..6af0a31ff1475 100644
+--- a/drivers/usb/core/hcd.c
++++ b/drivers/usb/core/hcd.c
+@@ -983,6 +983,7 @@ static int register_root_hub(struct usb_hcd *hcd)
+ {
+ 	struct device *parent_dev = hcd->self.controller;
+ 	struct usb_device *usb_dev = hcd->self.root_hub;
++	struct usb_device_descriptor *descr;
+ 	const int devnum = 1;
+ 	int retval;
+ 
+@@ -994,13 +995,16 @@ static int register_root_hub(struct usb_hcd *hcd)
+ 	mutex_lock(&usb_bus_idr_lock);
+ 
+ 	usb_dev->ep0.desc.wMaxPacketSize = cpu_to_le16(64);
+-	retval = usb_get_device_descriptor(usb_dev, USB_DT_DEVICE_SIZE);
+-	if (retval != sizeof usb_dev->descriptor) {
++	descr = usb_get_device_descriptor(usb_dev);
++	if (IS_ERR(descr)) {
++		retval = PTR_ERR(descr);
+ 		mutex_unlock(&usb_bus_idr_lock);
+ 		dev_dbg (parent_dev, "can't read %s device descriptor %d\n",
+ 				dev_name(&usb_dev->dev), retval);
+-		return (retval < 0) ? retval : -EMSGSIZE;
++		return retval;
+ 	}
++	usb_dev->descriptor = *descr;
++	kfree(descr);
+ 
+ 	if (le16_to_cpu(usb_dev->descriptor.bcdUSB) >= 0x0201) {
+ 		retval = usb_get_bos_descriptor(usb_dev);
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
+index a739403a9e455..26a27ff504085 100644
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -2671,12 +2671,17 @@ int usb_authorize_device(struct usb_device *usb_dev)
+ 	}
+ 
+ 	if (usb_dev->wusb) {
+-		result = usb_get_device_descriptor(usb_dev, sizeof(usb_dev->descriptor));
+-		if (result < 0) {
++		struct usb_device_descriptor *descr;
++
++		descr = usb_get_device_descriptor(usb_dev);
++		if (IS_ERR(descr)) {
++			result = PTR_ERR(descr);
+ 			dev_err(&usb_dev->dev, "can't re-read device descriptor for "
+ 				"authorization: %d\n", result);
+ 			goto error_device_descriptor;
+ 		}
++		usb_dev->descriptor = *descr;
++		kfree(descr);
+ 	}
+ 
+ 	usb_dev->authorized = 1;
+@@ -4718,6 +4723,67 @@ static int hub_enable_device(struct usb_device *udev)
+ 	return hcd->driver->enable_device(hcd, udev);
+ }
+ 
++/*
++ * Get the bMaxPacketSize0 value during initialization by reading the
++ * device's device descriptor.  Since we don't already know this value,
++ * the transfer is unsafe and it ignores I/O errors, only testing for
++ * reasonable received values.
++ *
++ * For "old scheme" initialization, size will be 8 so we read just the
++ * start of the device descriptor, which should work okay regardless of
++ * the actual bMaxPacketSize0 value.  For "new scheme" initialization,
++ * size will be 64 (and buf will point to a sufficiently large buffer),
++ * which might not be kosher according to the USB spec but it's what
++ * Windows does and what many devices expect.
++ *
++ * Returns: bMaxPacketSize0 or a negative error code.
++ */
++static int get_bMaxPacketSize0(struct usb_device *udev,
++		struct usb_device_descriptor *buf, int size, bool first_time)
++{
++	int i, rc;
++
++	/*
++	 * Retry on all errors; some devices are flakey.
++	 * 255 is for WUSB devices, we actually need to use
++	 * 512 (WUSB1.0[4.8.1]).
++	 */
++	for (i = 0; i < GET_MAXPACKET0_TRIES; ++i) {
++		/* Start with invalid values in case the transfer fails */
++		buf->bDescriptorType = buf->bMaxPacketSize0 = 0;
++		rc = usb_control_msg(udev, usb_rcvaddr0pipe(),
++				USB_REQ_GET_DESCRIPTOR, USB_DIR_IN,
++				USB_DT_DEVICE << 8, 0,
++				buf, size,
++				initial_descriptor_timeout);
++		switch (buf->bMaxPacketSize0) {
++		case 8: case 16: case 32: case 64: case 9:
++			if (buf->bDescriptorType == USB_DT_DEVICE) {
++				rc = buf->bMaxPacketSize0;
++				break;
++			}
++			fallthrough;
++		default:
++			if (rc >= 0)
++				rc = -EPROTO;
++			break;
++		}
++
++		/*
++		 * Some devices time out if they are powered on
++		 * when already connected. They need a second
++		 * reset, so return early. But only on the first
++		 * attempt, lest we get into a time-out/reset loop.
++		 */
++		if (rc > 0 || (rc == -ETIMEDOUT && first_time &&
++				udev->speed > USB_SPEED_FULL))
++			break;
++	}
++	return rc;
++}
++
++#define GET_DESCRIPTOR_BUFSIZE	64
++
+ /* Reset device, (re)assign address, get device descriptor.
+  * Device connection must be stable, no more debouncing needed.
+  * Returns device in USB_STATE_ADDRESS, except on error.
+@@ -4727,10 +4793,17 @@ static int hub_enable_device(struct usb_device *udev)
+  * the port lock.  For a newly detected device that is not accessible
+  * through any global pointers, it's not necessary to lock the device,
+  * but it is still necessary to lock the port.
++ *
++ * For a newly detected device, @dev_descr must be NULL.  The device
++ * descriptor retrieved from the device will then be stored in
++ * @udev->descriptor.  For an already existing device, @dev_descr
++ * must be non-NULL.  The device descriptor will be stored there,
++ * not in @udev->descriptor, because descriptors for registered
++ * devices are meant to be immutable.
+  */
+ static int
+ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1,
+-		int retry_counter)
++		int retry_counter, struct usb_device_descriptor *dev_descr)
+ {
+ 	struct usb_device	*hdev = hub->hdev;
+ 	struct usb_hcd		*hcd = bus_to_hcd(hdev->bus);
+@@ -4742,6 +4815,13 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1,
+ 	int			devnum = udev->devnum;
+ 	const char		*driver_name;
+ 	bool			do_new_scheme;
++	const bool		initial = !dev_descr;
++	int			maxp0;
++	struct usb_device_descriptor	*buf, *descr;
++
++	buf = kmalloc(GET_DESCRIPTOR_BUFSIZE, GFP_NOIO);
++	if (!buf)
++		return -ENOMEM;
+ 
+ 	/* root hub ports have a slightly longer reset period
+ 	 * (from USB 2.0 spec, section 7.1.7.5)
+@@ -4774,32 +4854,34 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1,
+ 	}
+ 	oldspeed = udev->speed;
+ 
+-	/* USB 2.0 section 5.5.3 talks about ep0 maxpacket ...
+-	 * it's fixed size except for full speed devices.
+-	 * For Wireless USB devices, ep0 max packet is always 512 (tho
+-	 * reported as 0xff in the device descriptor). WUSB1.0[4.8.1].
+-	 */
+-	switch (udev->speed) {
+-	case USB_SPEED_SUPER_PLUS:
+-	case USB_SPEED_SUPER:
+-	case USB_SPEED_WIRELESS:	/* fixed at 512 */
+-		udev->ep0.desc.wMaxPacketSize = cpu_to_le16(512);
+-		break;
+-	case USB_SPEED_HIGH:		/* fixed at 64 */
+-		udev->ep0.desc.wMaxPacketSize = cpu_to_le16(64);
+-		break;
+-	case USB_SPEED_FULL:		/* 8, 16, 32, or 64 */
+-		/* to determine the ep0 maxpacket size, try to read
+-		 * the device descriptor to get bMaxPacketSize0 and
+-		 * then correct our initial guess.
++	if (initial) {
++		/* USB 2.0 section 5.5.3 talks about ep0 maxpacket ...
++		 * it's fixed size except for full speed devices.
++		 * For Wireless USB devices, ep0 max packet is always 512 (tho
++		 * reported as 0xff in the device descriptor). WUSB1.0[4.8.1].
+ 		 */
+-		udev->ep0.desc.wMaxPacketSize = cpu_to_le16(64);
+-		break;
+-	case USB_SPEED_LOW:		/* fixed at 8 */
+-		udev->ep0.desc.wMaxPacketSize = cpu_to_le16(8);
+-		break;
+-	default:
+-		goto fail;
++		switch (udev->speed) {
++		case USB_SPEED_SUPER_PLUS:
++		case USB_SPEED_SUPER:
++		case USB_SPEED_WIRELESS:	/* fixed at 512 */
++			udev->ep0.desc.wMaxPacketSize = cpu_to_le16(512);
++			break;
++		case USB_SPEED_HIGH:		/* fixed at 64 */
++			udev->ep0.desc.wMaxPacketSize = cpu_to_le16(64);
++			break;
++		case USB_SPEED_FULL:		/* 8, 16, 32, or 64 */
++			/* to determine the ep0 maxpacket size, try to read
++			 * the device descriptor to get bMaxPacketSize0 and
++			 * then correct our initial guess.
++			 */
++			udev->ep0.desc.wMaxPacketSize = cpu_to_le16(64);
++			break;
++		case USB_SPEED_LOW:		/* fixed at 8 */
++			udev->ep0.desc.wMaxPacketSize = cpu_to_le16(8);
++			break;
++		default:
++			goto fail;
++		}
+ 	}
+ 
+ 	if (udev->speed == USB_SPEED_WIRELESS)
+@@ -4822,22 +4904,24 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1,
+ 	if (udev->speed < USB_SPEED_SUPER)
+ 		dev_info(&udev->dev,
+ 				"%s %s USB device number %d using %s\n",
+-				(udev->config) ? "reset" : "new", speed,
++				(initial ? "new" : "reset"), speed,
+ 				devnum, driver_name);
+ 
+-	/* Set up TT records, if needed  */
+-	if (hdev->tt) {
+-		udev->tt = hdev->tt;
+-		udev->ttport = hdev->ttport;
+-	} else if (udev->speed != USB_SPEED_HIGH
+-			&& hdev->speed == USB_SPEED_HIGH) {
+-		if (!hub->tt.hub) {
+-			dev_err(&udev->dev, "parent hub has no TT\n");
+-			retval = -EINVAL;
+-			goto fail;
++	if (initial) {
++		/* Set up TT records, if needed  */
++		if (hdev->tt) {
++			udev->tt = hdev->tt;
++			udev->ttport = hdev->ttport;
++		} else if (udev->speed != USB_SPEED_HIGH
++				&& hdev->speed == USB_SPEED_HIGH) {
++			if (!hub->tt.hub) {
++				dev_err(&udev->dev, "parent hub has no TT\n");
++				retval = -EINVAL;
++				goto fail;
++			}
++			udev->tt = &hub->tt;
++			udev->ttport = port1;
+ 		}
+-		udev->tt = &hub->tt;
+-		udev->ttport = port1;
+ 	}
+ 
+ 	/* Why interleave GET_DESCRIPTOR and SET_ADDRESS this way?
+@@ -4861,9 +4945,6 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1,
+ 		}
+ 
+ 		if (do_new_scheme) {
+-			struct usb_device_descriptor *buf;
+-			int r = 0;
+-
+ 			retval = hub_enable_device(udev);
+ 			if (retval < 0) {
+ 				dev_err(&udev->dev,
+@@ -4872,52 +4953,14 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1,
+ 				goto fail;
+ 			}
+ 
+-#define GET_DESCRIPTOR_BUFSIZE	64
+-			buf = kmalloc(GET_DESCRIPTOR_BUFSIZE, GFP_NOIO);
+-			if (!buf) {
+-				retval = -ENOMEM;
+-				continue;
+-			}
+-
+-			/* Retry on all errors; some devices are flakey.
+-			 * 255 is for WUSB devices, we actually need to use
+-			 * 512 (WUSB1.0[4.8.1]).
+-			 */
+-			for (operations = 0; operations < GET_MAXPACKET0_TRIES;
+-					++operations) {
+-				buf->bMaxPacketSize0 = 0;
+-				r = usb_control_msg(udev, usb_rcvaddr0pipe(),
+-					USB_REQ_GET_DESCRIPTOR, USB_DIR_IN,
+-					USB_DT_DEVICE << 8, 0,
+-					buf, GET_DESCRIPTOR_BUFSIZE,
+-					initial_descriptor_timeout);
+-				switch (buf->bMaxPacketSize0) {
+-				case 8: case 16: case 32: case 64: case 255:
+-					if (buf->bDescriptorType ==
+-							USB_DT_DEVICE) {
+-						r = 0;
+-						break;
+-					}
+-					fallthrough;
+-				default:
+-					if (r == 0)
+-						r = -EPROTO;
+-					break;
+-				}
+-				/*
+-				 * Some devices time out if they are powered on
+-				 * when already connected. They need a second
+-				 * reset. But only on the first attempt,
+-				 * lest we get into a time out/reset loop
+-				 */
+-				if (r == 0 || (r == -ETIMEDOUT &&
+-						retries == 0 &&
+-						udev->speed > USB_SPEED_FULL))
+-					break;
++			maxp0 = get_bMaxPacketSize0(udev, buf,
++					GET_DESCRIPTOR_BUFSIZE, retries == 0);
++			if (maxp0 > 0 && !initial &&
++					maxp0 != udev->descriptor.bMaxPacketSize0) {
++				dev_err(&udev->dev, "device reset changed ep0 maxpacket size!\n");
++				retval = -ENODEV;
++				goto fail;
+ 			}
+-			udev->descriptor.bMaxPacketSize0 =
+-					buf->bMaxPacketSize0;
+-			kfree(buf);
+ 
+ 			retval = hub_port_reset(hub, port1, udev, delay, false);
+ 			if (retval < 0)		/* error or disconnect */
+@@ -4928,14 +4971,13 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1,
+ 				retval = -ENODEV;
+ 				goto fail;
+ 			}
+-			if (r) {
+-				if (r != -ENODEV)
++			if (maxp0 < 0) {
++				if (maxp0 != -ENODEV)
+ 					dev_err(&udev->dev, "device descriptor read/64, error %d\n",
+-							r);
+-				retval = -EMSGSIZE;
++							maxp0);
++				retval = maxp0;
+ 				continue;
+ 			}
+-#undef GET_DESCRIPTOR_BUFSIZE
+ 		}
+ 
+ 		/*
+@@ -4981,18 +5023,22 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1,
+ 				break;
+ 		}
+ 
+-		retval = usb_get_device_descriptor(udev, 8);
+-		if (retval < 8) {
++		/* !do_new_scheme || wusb */
++		maxp0 = get_bMaxPacketSize0(udev, buf, 8, retries == 0);
++		if (maxp0 < 0) {
++			retval = maxp0;
+ 			if (retval != -ENODEV)
+ 				dev_err(&udev->dev,
+ 					"device descriptor read/8, error %d\n",
+ 					retval);
+-			if (retval >= 0)
+-				retval = -EMSGSIZE;
+ 		} else {
+ 			u32 delay;
+ 
+-			retval = 0;
++			if (!initial && maxp0 != udev->descriptor.bMaxPacketSize0) {
++				dev_err(&udev->dev, "device reset changed ep0 maxpacket size!\n");
++				retval = -ENODEV;
++				goto fail;
++			}
+ 
+ 			delay = udev->parent->hub_delay;
+ 			udev->hub_delay = min_t(u32, delay,
+@@ -5011,48 +5057,61 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1,
+ 		goto fail;
+ 
+ 	/*
+-	 * Some superspeed devices have finished the link training process
+-	 * and attached to a superspeed hub port, but the device descriptor
+-	 * got from those devices show they aren't superspeed devices. Warm
+-	 * reset the port attached by the devices can fix them.
++	 * Check the ep0 maxpacket guess and correct it if necessary.
++	 * maxp0 is the value stored in the device descriptor;
++	 * i is the value it encodes (logarithmic for SuperSpeed or greater).
+ 	 */
+-	if ((udev->speed >= USB_SPEED_SUPER) &&
+-			(le16_to_cpu(udev->descriptor.bcdUSB) < 0x0300)) {
+-		dev_err(&udev->dev, "got a wrong device descriptor, "
+-				"warm reset device\n");
+-		hub_port_reset(hub, port1, udev,
+-				HUB_BH_RESET_TIME, true);
+-		retval = -EINVAL;
+-		goto fail;
+-	}
+-
+-	if (udev->descriptor.bMaxPacketSize0 == 0xff ||
+-			udev->speed >= USB_SPEED_SUPER)
+-		i = 512;
+-	else
+-		i = udev->descriptor.bMaxPacketSize0;
+-	if (usb_endpoint_maxp(&udev->ep0.desc) != i) {
+-		if (udev->speed == USB_SPEED_LOW ||
+-				!(i == 8 || i == 16 || i == 32 || i == 64)) {
+-			dev_err(&udev->dev, "Invalid ep0 maxpacket: %d\n", i);
+-			retval = -EMSGSIZE;
+-			goto fail;
+-		}
++	i = maxp0;
++	if (udev->speed >= USB_SPEED_SUPER) {
++		if (maxp0 <= 16)
++			i = 1 << maxp0;
++		else
++			i = 0;		/* Invalid */
++	}
++	if (usb_endpoint_maxp(&udev->ep0.desc) == i) {
++		;	/* Initial ep0 maxpacket guess is right */
++	} else if ((udev->speed == USB_SPEED_FULL ||
++				udev->speed == USB_SPEED_HIGH) &&
++			(i == 8 || i == 16 || i == 32 || i == 64)) {
++		/* Initial guess is wrong; use the descriptor's value */
+ 		if (udev->speed == USB_SPEED_FULL)
+ 			dev_dbg(&udev->dev, "ep0 maxpacket = %d\n", i);
+ 		else
+ 			dev_warn(&udev->dev, "Using ep0 maxpacket: %d\n", i);
+ 		udev->ep0.desc.wMaxPacketSize = cpu_to_le16(i);
+ 		usb_ep0_reinit(udev);
++	} else {
++		/* Initial guess is wrong and descriptor's value is invalid */
++		dev_err(&udev->dev, "Invalid ep0 maxpacket: %d\n", maxp0);
++		retval = -EMSGSIZE;
++		goto fail;
+ 	}
+ 
+-	retval = usb_get_device_descriptor(udev, USB_DT_DEVICE_SIZE);
+-	if (retval < (signed)sizeof(udev->descriptor)) {
++	descr = usb_get_device_descriptor(udev);
++	if (IS_ERR(descr)) {
++		retval = PTR_ERR(descr);
+ 		if (retval != -ENODEV)
+ 			dev_err(&udev->dev, "device descriptor read/all, error %d\n",
+ 					retval);
+-		if (retval >= 0)
+-			retval = -ENOMSG;
++		goto fail;
++	}
++	if (initial)
++		udev->descriptor = *descr;
++	else
++		*dev_descr = *descr;
++	kfree(descr);
++
++	/*
++	 * Some superspeed devices have finished the link training process
++	 * and attached to a superspeed hub port, but the device descriptor
++	 * got from those devices show they aren't superspeed devices. Warm
++	 * reset the port attached by the devices can fix them.
++	 */
++	if ((udev->speed >= USB_SPEED_SUPER) &&
++			(le16_to_cpu(udev->descriptor.bcdUSB) < 0x0300)) {
++		dev_err(&udev->dev, "got a wrong device descriptor, warm reset device\n");
++		hub_port_reset(hub, port1, udev, HUB_BH_RESET_TIME, true);
++		retval = -EINVAL;
+ 		goto fail;
+ 	}
+ 
+@@ -5078,6 +5137,7 @@ fail:
+ 		hub_port_disable(hub, port1, 0);
+ 		update_devnum(udev, devnum);	/* for disconnect processing */
+ 	}
++	kfree(buf);
+ 	return retval;
+ }
+ 
+@@ -5158,7 +5218,7 @@ hub_power_remaining(struct usb_hub *hub)
+ 
+ 
+ static int descriptors_changed(struct usb_device *udev,
+-		struct usb_device_descriptor *old_device_descriptor,
++		struct usb_device_descriptor *new_device_descriptor,
+ 		struct usb_host_bos *old_bos)
+ {
+ 	int		changed = 0;
+@@ -5169,8 +5229,8 @@ static int descriptors_changed(struct usb_device *udev,
+ 	int		length;
+ 	char		*buf;
+ 
+-	if (memcmp(&udev->descriptor, old_device_descriptor,
+-			sizeof(*old_device_descriptor)) != 0)
++	if (memcmp(&udev->descriptor, new_device_descriptor,
++			sizeof(*new_device_descriptor)) != 0)
+ 		return 1;
+ 
+ 	if ((old_bos && !udev->bos) || (!old_bos && udev->bos))
+@@ -5348,7 +5408,7 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
+ 		}
+ 
+ 		/* reset (non-USB 3.0 devices) and get descriptor */
+-		status = hub_port_init(hub, udev, port1, i);
++		status = hub_port_init(hub, udev, port1, i, NULL);
+ 		if (status < 0)
+ 			goto loop;
+ 
+@@ -5495,9 +5555,8 @@ static void hub_port_connect_change(struct usb_hub *hub, int port1,
+ {
+ 	struct usb_port *port_dev = hub->ports[port1 - 1];
+ 	struct usb_device *udev = port_dev->child;
+-	struct usb_device_descriptor descriptor;
++	struct usb_device_descriptor *descr;
+ 	int status = -ENODEV;
+-	int retval;
+ 
+ 	dev_dbg(&port_dev->dev, "status %04x, change %04x, %s\n", portstatus,
+ 			portchange, portspeed(hub, portstatus));
+@@ -5524,23 +5583,20 @@ static void hub_port_connect_change(struct usb_hub *hub, int port1,
+ 			 * changed device descriptors before resuscitating the
+ 			 * device.
+ 			 */
+-			descriptor = udev->descriptor;
+-			retval = usb_get_device_descriptor(udev,
+-					sizeof(udev->descriptor));
+-			if (retval < 0) {
++			descr = usb_get_device_descriptor(udev);
++			if (IS_ERR(descr)) {
+ 				dev_dbg(&udev->dev,
+-						"can't read device descriptor %d\n",
+-						retval);
++						"can't read device descriptor %ld\n",
++						PTR_ERR(descr));
+ 			} else {
+-				if (descriptors_changed(udev, &descriptor,
++				if (descriptors_changed(udev, descr,
+ 						udev->bos)) {
+ 					dev_dbg(&udev->dev,
+ 							"device descriptor has changed\n");
+-					/* for disconnect() calls */
+-					udev->descriptor = descriptor;
+ 				} else {
+ 					status = 0; /* Nothing to do */
+ 				}
++				kfree(descr);
+ 			}
+ #ifdef CONFIG_PM
+ 		} else if (udev->state == USB_STATE_SUSPENDED &&
+@@ -5982,7 +6038,7 @@ static int usb_reset_and_verify_device(struct usb_device *udev)
+ 	struct usb_device		*parent_hdev = udev->parent;
+ 	struct usb_hub			*parent_hub;
+ 	struct usb_hcd			*hcd = bus_to_hcd(udev->bus);
+-	struct usb_device_descriptor	descriptor = udev->descriptor;
++	struct usb_device_descriptor	descriptor;
+ 	struct usb_host_bos		*bos;
+ 	int				i, j, ret = 0;
+ 	int				port1 = udev->portnum;
+@@ -6018,7 +6074,7 @@ static int usb_reset_and_verify_device(struct usb_device *udev)
+ 		/* ep0 maxpacket size may change; let the HCD know about it.
+ 		 * Other endpoints will be handled by re-enumeration. */
+ 		usb_ep0_reinit(udev);
+-		ret = hub_port_init(parent_hub, udev, port1, i);
++		ret = hub_port_init(parent_hub, udev, port1, i, &descriptor);
+ 		if (ret >= 0 || ret == -ENOTCONN || ret == -ENODEV)
+ 			break;
+ 	}
+@@ -6030,7 +6086,6 @@ static int usb_reset_and_verify_device(struct usb_device *udev)
+ 	/* Device might have changed firmware (DFU or similar) */
+ 	if (descriptors_changed(udev, &descriptor, bos)) {
+ 		dev_info(&udev->dev, "device firmware changed\n");
+-		udev->descriptor = descriptor;	/* for disconnect() calls */
+ 		goto re_enumerate;
+ 	}
+ 
+diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
+index b5811620f1de1..1da8e7ff39830 100644
+--- a/drivers/usb/core/message.c
++++ b/drivers/usb/core/message.c
+@@ -1040,40 +1040,35 @@ char *usb_cache_string(struct usb_device *udev, int index)
+ EXPORT_SYMBOL_GPL(usb_cache_string);
+ 
+ /*
+- * usb_get_device_descriptor - (re)reads the device descriptor (usbcore)
+- * @dev: the device whose device descriptor is being updated
+- * @size: how much of the descriptor to read
++ * usb_get_device_descriptor - read the device descriptor
++ * @udev: the device whose device descriptor should be read
+  *
+  * Context: task context, might sleep.
+  *
+- * Updates the copy of the device descriptor stored in the device structure,
+- * which dedicates space for this purpose.
+- *
+  * Not exported, only for use by the core.  If drivers really want to read
+  * the device descriptor directly, they can call usb_get_descriptor() with
+  * type = USB_DT_DEVICE and index = 0.
+  *
+- * This call is synchronous, and may not be used in an interrupt context.
+- *
+- * Return: The number of bytes received on success, or else the status code
+- * returned by the underlying usb_control_msg() call.
++ * Returns: a pointer to a dynamically allocated usb_device_descriptor
++ * structure (which the caller must deallocate), or an ERR_PTR value.
+  */
+-int usb_get_device_descriptor(struct usb_device *dev, unsigned int size)
++struct usb_device_descriptor *usb_get_device_descriptor(struct usb_device *udev)
+ {
+ 	struct usb_device_descriptor *desc;
+ 	int ret;
+ 
+-	if (size > sizeof(*desc))
+-		return -EINVAL;
+ 	desc = kmalloc(sizeof(*desc), GFP_NOIO);
+ 	if (!desc)
+-		return -ENOMEM;
++		return ERR_PTR(-ENOMEM);
++
++	ret = usb_get_descriptor(udev, USB_DT_DEVICE, 0, desc, sizeof(*desc));
++	if (ret == sizeof(*desc))
++		return desc;
+ 
+-	ret = usb_get_descriptor(dev, USB_DT_DEVICE, 0, desc, size);
+ 	if (ret >= 0)
+-		memcpy(&dev->descriptor, desc, size);
++		ret = -EMSGSIZE;
+ 	kfree(desc);
+-	return ret;
++	return ERR_PTR(ret);
+ }
+ 
+ /*
+diff --git a/drivers/usb/core/usb.h b/drivers/usb/core/usb.h
+index ffe3f6818e9cf..4a16d559d3bff 100644
+--- a/drivers/usb/core/usb.h
++++ b/drivers/usb/core/usb.h
+@@ -43,8 +43,8 @@ extern bool usb_endpoint_is_ignored(struct usb_device *udev,
+ 		struct usb_endpoint_descriptor *epd);
+ extern int usb_remove_device(struct usb_device *udev);
+ 
+-extern int usb_get_device_descriptor(struct usb_device *dev,
+-		unsigned int size);
++extern struct usb_device_descriptor *usb_get_device_descriptor(
++		struct usb_device *udev);
+ extern int usb_set_isoch_delay(struct usb_device *dev);
+ extern int usb_get_bos_descriptor(struct usb_device *dev);
+ extern void usb_release_bos_descriptor(struct usb_device *dev);
+diff --git a/drivers/usb/gadget/function/f_mass_storage.c b/drivers/usb/gadget/function/f_mass_storage.c
+index da07e45ae6df5..722a3ab2b3379 100644
+--- a/drivers/usb/gadget/function/f_mass_storage.c
++++ b/drivers/usb/gadget/function/f_mass_storage.c
+@@ -927,7 +927,7 @@ static void invalidate_sub(struct fsg_lun *curlun)
+ {
+ 	struct file	*filp = curlun->filp;
+ 	struct inode	*inode = file_inode(filp);
+-	unsigned long	rc;
++	unsigned long __maybe_unused	rc;
+ 
+ 	rc = invalidate_mapping_pages(inode->i_mapping, 0, -1);
+ 	VLDBG(curlun, "invalidate_mapping_pages -> %ld\n", rc);
+diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c
+index 7d49d8a0b00c2..7166d1117742a 100644
+--- a/drivers/usb/gadget/udc/core.c
++++ b/drivers/usb/gadget/udc/core.c
+@@ -40,6 +40,7 @@ static const struct bus_type gadget_bus_type;
+  * @allow_connect: Indicates whether UDC is allowed to be pulled up.
+  * Set/cleared by gadget_(un)bind_driver() after gadget driver is bound or
+  * unbound.
++ * @vbus_work: work routine to handle VBUS status change notifications.
+  * @connect_lock: protects udc->started, gadget->connect,
+  * gadget->allow_connect and gadget->deactivate. The routines
+  * usb_gadget_connect_locked(), usb_gadget_disconnect_locked(),
+diff --git a/drivers/usb/phy/phy-mxs-usb.c b/drivers/usb/phy/phy-mxs-usb.c
+index e1a2b2ea098b5..cceabb9d37e98 100644
+--- a/drivers/usb/phy/phy-mxs-usb.c
++++ b/drivers/usb/phy/phy-mxs-usb.c
+@@ -388,14 +388,8 @@ static void __mxs_phy_disconnect_line(struct mxs_phy *mxs_phy, bool disconnect)
+ 
+ static bool mxs_phy_is_otg_host(struct mxs_phy *mxs_phy)
+ {
+-	void __iomem *base = mxs_phy->phy.io_priv;
+-	u32 phyctrl = readl(base + HW_USBPHY_CTRL);
+-
+-	if (IS_ENABLED(CONFIG_USB_OTG) &&
+-			!(phyctrl & BM_USBPHY_CTRL_OTG_ID_VALUE))
+-		return true;
+-
+-	return false;
++	return IS_ENABLED(CONFIG_USB_OTG) &&
++		mxs_phy->phy.last_event == USB_EVENT_ID;
+ }
+ 
+ static void mxs_phy_disconnect_line(struct mxs_phy *mxs_phy, bool on)
+diff --git a/drivers/usb/typec/bus.c b/drivers/usb/typec/bus.c
+index fe5b9a2e61f58..e95ec7e382bb7 100644
+--- a/drivers/usb/typec/bus.c
++++ b/drivers/usb/typec/bus.c
+@@ -183,12 +183,20 @@ EXPORT_SYMBOL_GPL(typec_altmode_exit);
+  *
+  * Notifies the partner of @adev about Attention command.
+  */
+-void typec_altmode_attention(struct typec_altmode *adev, u32 vdo)
++int typec_altmode_attention(struct typec_altmode *adev, u32 vdo)
+ {
+-	struct typec_altmode *pdev = &to_altmode(adev)->partner->adev;
++	struct altmode *partner = to_altmode(adev)->partner;
++	struct typec_altmode *pdev;
++
++	if (!partner)
++		return -ENODEV;
++
++	pdev = &partner->adev;
+ 
+ 	if (pdev->ops && pdev->ops->attention)
+ 		pdev->ops->attention(pdev, vdo);
++
++	return 0;
+ }
+ EXPORT_SYMBOL_GPL(typec_altmode_attention);
+ 
+diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
+index bf97b81ff5b07..1596afee6c86f 100644
+--- a/drivers/usb/typec/tcpm/tcpm.c
++++ b/drivers/usb/typec/tcpm/tcpm.c
+@@ -1877,7 +1877,8 @@ static void tcpm_handle_vdm_request(struct tcpm_port *port,
+ 			}
+ 			break;
+ 		case ADEV_ATTENTION:
+-			typec_altmode_attention(adev, p[1]);
++			if (typec_altmode_attention(adev, p[1]))
++				tcpm_log(port, "typec_altmode_attention no port partner altmode");
+ 			break;
+ 		}
+ 	}
+@@ -3935,6 +3936,29 @@ static enum typec_cc_status tcpm_pwr_opmode_to_rp(enum typec_pwr_opmode opmode)
+ 	}
+ }
+ 
++static void tcpm_set_initial_svdm_version(struct tcpm_port *port)
++{
++	switch (port->negotiated_rev) {
++	case PD_REV30:
++		break;
++	/*
++	 * 6.4.4.2.3 Structured VDM Version
++	 * 2.0 states "At this time, there is only one version (1.0) defined.
++	 * This field Shall be set to zero to indicate Version 1.0."
++	 * 3.0 states "This field Shall be set to 01b to indicate Version 2.0."
++	 * To ensure that we follow the Power Delivery revision we are currently
++	 * operating on, downgrade the SVDM version to the highest one supported
++	 * by the Power Delivery revision.
++	 */
++	case PD_REV20:
++		typec_partner_set_svdm_version(port->partner, SVDM_VER_1_0);
++		break;
++	default:
++		typec_partner_set_svdm_version(port->partner, SVDM_VER_1_0);
++		break;
++	}
++}
++
+ static void run_state_machine(struct tcpm_port *port)
+ {
+ 	int ret;
+@@ -4172,10 +4196,12 @@ static void run_state_machine(struct tcpm_port *port)
+ 		 * For now, this driver only supports SOP for DISCOVER_IDENTITY, thus using
+ 		 * port->explicit_contract to decide whether to send the command.
+ 		 */
+-		if (port->explicit_contract)
++		if (port->explicit_contract) {
++			tcpm_set_initial_svdm_version(port);
+ 			mod_send_discover_delayed_work(port, 0);
+-		else
++		} else {
+ 			port->send_discover = false;
++		}
+ 
+ 		/*
+ 		 * 6.3.5
+@@ -4462,10 +4488,12 @@ static void run_state_machine(struct tcpm_port *port)
+ 		 * For now, this driver only supports SOP for DISCOVER_IDENTITY, thus using
+ 		 * port->explicit_contract.
+ 		 */
+-		if (port->explicit_contract)
++		if (port->explicit_contract) {
++			tcpm_set_initial_svdm_version(port);
+ 			mod_send_discover_delayed_work(port, 0);
+-		else
++		} else {
+ 			port->send_discover = false;
++		}
+ 
+ 		power_supply_changed(port->psy);
+ 		break;
+diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
+index ebe0ad31d0b03..d662aa9d1b4b6 100644
+--- a/drivers/vfio/vfio_iommu_type1.c
++++ b/drivers/vfio/vfio_iommu_type1.c
+@@ -2732,7 +2732,7 @@ static int vfio_iommu_iova_build_caps(struct vfio_iommu *iommu,
+ static int vfio_iommu_migration_build_caps(struct vfio_iommu *iommu,
+ 					   struct vfio_info_cap *caps)
+ {
+-	struct vfio_iommu_type1_info_cap_migration cap_mig;
++	struct vfio_iommu_type1_info_cap_migration cap_mig = {};
+ 
+ 	cap_mig.header.id = VFIO_IOMMU_TYPE1_INFO_CAP_MIGRATION;
+ 	cap_mig.header.version = 1;
+diff --git a/drivers/video/backlight/bd6107.c b/drivers/video/backlight/bd6107.c
+index 7df25faa07a59..40979ee7133aa 100644
+--- a/drivers/video/backlight/bd6107.c
++++ b/drivers/video/backlight/bd6107.c
+@@ -104,7 +104,7 @@ static int bd6107_backlight_check_fb(struct backlight_device *backlight,
+ {
+ 	struct bd6107 *bd = bl_get_data(backlight);
+ 
+-	return bd->pdata->fbdev == NULL || bd->pdata->fbdev == info->dev;
++	return bd->pdata->fbdev == NULL || bd->pdata->fbdev == info->device;
+ }
+ 
+ static const struct backlight_ops bd6107_backlight_ops = {
+diff --git a/drivers/video/backlight/gpio_backlight.c b/drivers/video/backlight/gpio_backlight.c
+index 6f78d928f054a..5c5c99f7979e3 100644
+--- a/drivers/video/backlight/gpio_backlight.c
++++ b/drivers/video/backlight/gpio_backlight.c
+@@ -35,7 +35,7 @@ static int gpio_backlight_check_fb(struct backlight_device *bl,
+ {
+ 	struct gpio_backlight *gbl = bl_get_data(bl);
+ 
+-	return gbl->fbdev == NULL || gbl->fbdev == info->dev;
++	return gbl->fbdev == NULL || gbl->fbdev == info->device;
+ }
+ 
+ static const struct backlight_ops gpio_backlight_ops = {
+diff --git a/drivers/video/backlight/lv5207lp.c b/drivers/video/backlight/lv5207lp.c
+index 56695ce67e480..dce2983315444 100644
+--- a/drivers/video/backlight/lv5207lp.c
++++ b/drivers/video/backlight/lv5207lp.c
+@@ -67,7 +67,7 @@ static int lv5207lp_backlight_check_fb(struct backlight_device *backlight,
+ {
+ 	struct lv5207lp *lv = bl_get_data(backlight);
+ 
+-	return lv->pdata->fbdev == NULL || lv->pdata->fbdev == info->dev;
++	return lv->pdata->fbdev == NULL || lv->pdata->fbdev == info->device;
+ }
+ 
+ static const struct backlight_ops lv5207lp_backlight_ops = {
+diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
+index c5310eaf8b468..da1150d127c24 100644
+--- a/drivers/virtio/virtio_ring.c
++++ b/drivers/virtio/virtio_ring.c
+@@ -1461,7 +1461,7 @@ static inline int virtqueue_add_packed(struct virtqueue *_vq,
+ 		}
+ 	}
+ 
+-	if (i < head)
++	if (i <= head)
+ 		vq->packed.avail_wrap_counter ^= 1;
+ 
+ 	/* We're using some buffers from the free list. */
+diff --git a/drivers/virtio/virtio_vdpa.c b/drivers/virtio/virtio_vdpa.c
+index 961161da59000..06ce6d8c2e004 100644
+--- a/drivers/virtio/virtio_vdpa.c
++++ b/drivers/virtio/virtio_vdpa.c
+@@ -366,11 +366,14 @@ static int virtio_vdpa_find_vqs(struct virtio_device *vdev, unsigned int nvqs,
+ 	struct irq_affinity default_affd = { 0 };
+ 	struct cpumask *masks;
+ 	struct vdpa_callback cb;
++	bool has_affinity = desc && ops->set_vq_affinity;
+ 	int i, err, queue_idx = 0;
+ 
+-	masks = create_affinity_masks(nvqs, desc ? desc : &default_affd);
+-	if (!masks)
+-		return -ENOMEM;
++	if (has_affinity) {
++		masks = create_affinity_masks(nvqs, desc ? desc : &default_affd);
++		if (!masks)
++			return -ENOMEM;
++	}
+ 
+ 	for (i = 0; i < nvqs; ++i) {
+ 		if (!names[i]) {
+@@ -386,20 +389,22 @@ static int virtio_vdpa_find_vqs(struct virtio_device *vdev, unsigned int nvqs,
+ 			goto err_setup_vq;
+ 		}
+ 
+-		if (ops->set_vq_affinity)
++		if (has_affinity)
+ 			ops->set_vq_affinity(vdpa, i, &masks[i]);
+ 	}
+ 
+ 	cb.callback = virtio_vdpa_config_cb;
+ 	cb.private = vd_dev;
+ 	ops->set_config_cb(vdpa, &cb);
+-	kfree(masks);
++	if (has_affinity)
++		kfree(masks);
+ 
+ 	return 0;
+ 
+ err_setup_vq:
+ 	virtio_vdpa_del_vqs(vdev);
+-	kfree(masks);
++	if (has_affinity)
++		kfree(masks);
+ 	return err;
+ }
+ 
+diff --git a/fs/btrfs/zoned.c b/fs/btrfs/zoned.c
+index 72b90bc19a191..2490301350015 100644
+--- a/fs/btrfs/zoned.c
++++ b/fs/btrfs/zoned.c
+@@ -1707,10 +1707,21 @@ void btrfs_finish_ordered_zoned(struct btrfs_ordered_extent *ordered)
+ {
+ 	struct btrfs_inode *inode = BTRFS_I(ordered->inode);
+ 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
+-	struct btrfs_ordered_sum *sum =
+-		list_first_entry(&ordered->list, typeof(*sum), list);
+-	u64 logical = sum->logical;
+-	u64 len = sum->len;
++	struct btrfs_ordered_sum *sum;
++	u64 logical, len;
++
++	/*
++	 * Write to pre-allocated region is for the data relocation, and so
++	 * it should use WRITE operation. No split/rewrite are necessary.
++	 */
++	if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered->flags))
++		return;
++
++	ASSERT(!list_empty(&ordered->list));
++	/* The ordered->list can be empty in the above pre-alloc case. */
++	sum = list_first_entry(&ordered->list, struct btrfs_ordered_sum, list);
++	logical = sum->logical;
++	len = sum->len;
+ 
+ 	while (len < ordered->disk_num_bytes) {
+ 		sum = list_next_entry(sum, list);
+diff --git a/fs/dlm/plock.c b/fs/dlm/plock.c
+index 70a4752ed913a..fd603e06d07fe 100644
+--- a/fs/dlm/plock.c
++++ b/fs/dlm/plock.c
+@@ -456,7 +456,8 @@ static ssize_t dev_write(struct file *file, const char __user *u, size_t count,
+ 		}
+ 	} else {
+ 		list_for_each_entry(iter, &recv_list, list) {
+-			if (!iter->info.wait) {
++			if (!iter->info.wait &&
++			    iter->info.fsid == info.fsid) {
+ 				op = iter;
+ 				break;
+ 			}
+@@ -468,8 +469,7 @@ static ssize_t dev_write(struct file *file, const char __user *u, size_t count,
+ 		if (info.wait)
+ 			WARN_ON(op->info.optype != DLM_PLOCK_OP_LOCK);
+ 		else
+-			WARN_ON(op->info.fsid != info.fsid ||
+-				op->info.number != info.number ||
++			WARN_ON(op->info.number != info.number ||
+ 				op->info.owner != info.owner ||
+ 				op->info.optype != info.optype);
+ 
+diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c
+index 9c9350eb17040..9bfdb4ad7c763 100644
+--- a/fs/erofs/zdata.c
++++ b/fs/erofs/zdata.c
+@@ -1412,7 +1412,10 @@ static void z_erofs_decompress_queue(const struct z_erofs_decompressqueue *io,
+ 		owned = READ_ONCE(be.pcl->next);
+ 
+ 		z_erofs_decompress_pcluster(&be, io->eio ? -EIO : 0);
+-		erofs_workgroup_put(&be.pcl->obj);
++		if (z_erofs_is_inline_pcluster(be.pcl))
++			z_erofs_free_pcluster(be.pcl);
++		else
++			erofs_workgroup_put(&be.pcl->obj);
+ 	}
+ }
+ 
+diff --git a/fs/eventfd.c b/fs/eventfd.c
+index 8aa36cd373516..33a918f9566c3 100644
+--- a/fs/eventfd.c
++++ b/fs/eventfd.c
+@@ -189,7 +189,7 @@ void eventfd_ctx_do_read(struct eventfd_ctx *ctx, __u64 *cnt)
+ {
+ 	lockdep_assert_held(&ctx->wqh.lock);
+ 
+-	*cnt = (ctx->flags & EFD_SEMAPHORE) ? 1 : ctx->count;
++	*cnt = ((ctx->flags & EFD_SEMAPHORE) && ctx->count) ? 1 : ctx->count;
+ 	ctx->count -= *cnt;
+ }
+ EXPORT_SYMBOL_GPL(eventfd_ctx_do_read);
+diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
+index 21b903fe546e8..a197ef71b7b02 100644
+--- a/fs/ext4/mballoc.c
++++ b/fs/ext4/mballoc.c
+@@ -1080,8 +1080,9 @@ static inline int should_optimize_scan(struct ext4_allocation_context *ac)
+  * Return next linear group for allocation. If linear traversal should not be
+  * performed, this function just returns the same group
+  */
+-static int
+-next_linear_group(struct ext4_allocation_context *ac, int group, int ngroups)
++static ext4_group_t
++next_linear_group(struct ext4_allocation_context *ac, ext4_group_t group,
++		  ext4_group_t ngroups)
+ {
+ 	if (!should_optimize_scan(ac))
+ 		goto inc_and_return;
+@@ -2553,7 +2554,7 @@ static bool ext4_mb_good_group(struct ext4_allocation_context *ac,
+ 
+ 	BUG_ON(cr < CR_POWER2_ALIGNED || cr >= EXT4_MB_NUM_CRS);
+ 
+-	if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(grp) || !grp))
++	if (unlikely(!grp || EXT4_MB_GRP_BBITMAP_CORRUPT(grp)))
+ 		return false;
+ 
+ 	free = grp->bb_free;
+diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
+index 0caf6c730ce34..6bcc3770ee19f 100644
+--- a/fs/ext4/namei.c
++++ b/fs/ext4/namei.c
+@@ -2799,6 +2799,7 @@ static int ext4_add_nondir(handle_t *handle,
+ 		return err;
+ 	}
+ 	drop_nlink(inode);
++	ext4_mark_inode_dirty(handle, inode);
+ 	ext4_orphan_add(handle, inode);
+ 	unlock_new_inode(inode);
+ 	return err;
+@@ -3436,6 +3437,7 @@ retry:
+ 
+ err_drop_inode:
+ 	clear_nlink(inode);
++	ext4_mark_inode_dirty(handle, inode);
+ 	ext4_orphan_add(handle, inode);
+ 	unlock_new_inode(inode);
+ 	if (handle)
+@@ -4021,6 +4023,7 @@ end_rename:
+ 			ext4_resetent(handle, &old,
+ 				      old.inode->i_ino, old_file_type);
+ 			drop_nlink(whiteout);
++			ext4_mark_inode_dirty(handle, whiteout);
+ 			ext4_orphan_add(handle, whiteout);
+ 		}
+ 		unlock_new_inode(whiteout);
+diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c
+index 8fd3b7f9fb88e..b0597a539fc54 100644
+--- a/fs/f2fs/checkpoint.c
++++ b/fs/f2fs/checkpoint.c
+@@ -1701,9 +1701,9 @@ int f2fs_write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
+ 	}
+ 
+ 	f2fs_restore_inmem_curseg(sbi);
++	stat_inc_cp_count(sbi);
+ stop:
+ 	unblock_operations(sbi);
+-	stat_inc_cp_count(sbi->stat_info);
+ 
+ 	if (cpc->reason & CP_RECOVERY)
+ 		f2fs_notice(sbi, "checkpoint: version = %llx", ckpt_ver);
+diff --git a/fs/f2fs/debug.c b/fs/f2fs/debug.c
+index 61c35b59126ec..fdbf994f12718 100644
+--- a/fs/f2fs/debug.c
++++ b/fs/f2fs/debug.c
+@@ -215,6 +215,9 @@ static void update_general_status(struct f2fs_sb_info *sbi)
+ 		si->valid_blks[type] += blks;
+ 	}
+ 
++	for (i = 0; i < MAX_CALL_TYPE; i++)
++		si->cp_call_count[i] = atomic_read(&sbi->cp_call_count[i]);
++
+ 	for (i = 0; i < 2; i++) {
+ 		si->segment_count[i] = sbi->segment_count[i];
+ 		si->block_count[i] = sbi->block_count[i];
+@@ -497,7 +500,9 @@ static int stat_show(struct seq_file *s, void *v)
+ 		seq_printf(s, "  - Prefree: %d\n  - Free: %d (%d)\n\n",
+ 			   si->prefree_count, si->free_segs, si->free_secs);
+ 		seq_printf(s, "CP calls: %d (BG: %d)\n",
+-				si->cp_count, si->bg_cp_count);
++			   si->cp_call_count[TOTAL_CALL],
++			   si->cp_call_count[BACKGROUND]);
++		seq_printf(s, "CP count: %d\n", si->cp_count);
+ 		seq_printf(s, "  - cp blocks : %u\n", si->meta_count[META_CP]);
+ 		seq_printf(s, "  - sit blocks : %u\n",
+ 				si->meta_count[META_SIT]);
+@@ -511,12 +516,24 @@ static int stat_show(struct seq_file *s, void *v)
+ 		seq_printf(s, "  - Total : %4d\n", si->nr_total_ckpt);
+ 		seq_printf(s, "  - Cur time : %4d(ms)\n", si->cur_ckpt_time);
+ 		seq_printf(s, "  - Peak time : %4d(ms)\n", si->peak_ckpt_time);
+-		seq_printf(s, "GC calls: %d (BG: %d)\n",
+-			   si->call_count, si->bg_gc);
+-		seq_printf(s, "  - data segments : %d (%d)\n",
+-				si->data_segs, si->bg_data_segs);
+-		seq_printf(s, "  - node segments : %d (%d)\n",
+-				si->node_segs, si->bg_node_segs);
++		seq_printf(s, "GC calls: %d (gc_thread: %d)\n",
++			   si->gc_call_count[BACKGROUND] +
++			   si->gc_call_count[FOREGROUND],
++			   si->gc_call_count[BACKGROUND]);
++		if (__is_large_section(sbi)) {
++			seq_printf(s, "  - data sections : %d (BG: %d)\n",
++					si->gc_secs[DATA][BG_GC] + si->gc_secs[DATA][FG_GC],
++					si->gc_secs[DATA][BG_GC]);
++			seq_printf(s, "  - node sections : %d (BG: %d)\n",
++					si->gc_secs[NODE][BG_GC] + si->gc_secs[NODE][FG_GC],
++					si->gc_secs[NODE][BG_GC]);
++		}
++		seq_printf(s, "  - data segments : %d (BG: %d)\n",
++				si->gc_segs[DATA][BG_GC] + si->gc_segs[DATA][FG_GC],
++				si->gc_segs[DATA][BG_GC]);
++		seq_printf(s, "  - node segments : %d (BG: %d)\n",
++				si->gc_segs[NODE][BG_GC] + si->gc_segs[NODE][FG_GC],
++				si->gc_segs[NODE][BG_GC]);
+ 		seq_puts(s, "  - Reclaimed segs :\n");
+ 		seq_printf(s, "    - Normal : %d\n", sbi->gc_reclaimed_segs[GC_NORMAL]);
+ 		seq_printf(s, "    - Idle CB : %d\n", sbi->gc_reclaimed_segs[GC_IDLE_CB]);
+@@ -687,6 +704,8 @@ int f2fs_build_stats(struct f2fs_sb_info *sbi)
+ 	atomic_set(&sbi->inplace_count, 0);
+ 	for (i = META_CP; i < META_MAX; i++)
+ 		atomic_set(&sbi->meta_count[i], 0);
++	for (i = 0; i < MAX_CALL_TYPE; i++)
++		atomic_set(&sbi->cp_call_count[i], 0);
+ 
+ 	atomic_set(&sbi->max_aw_cnt, 0);
+ 
+diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
+index c7cb2177b2527..c602ff2403b67 100644
+--- a/fs/f2fs/f2fs.h
++++ b/fs/f2fs/f2fs.h
+@@ -1383,6 +1383,13 @@ enum errors_option {
+ 	MOUNT_ERRORS_PANIC,	/* panic on errors */
+ };
+ 
++enum {
++	BACKGROUND,
++	FOREGROUND,
++	MAX_CALL_TYPE,
++	TOTAL_CALL = FOREGROUND,
++};
++
+ static inline int f2fs_test_bit(unsigned int nr, char *addr);
+ static inline void f2fs_set_bit(unsigned int nr, char *addr);
+ static inline void f2fs_clear_bit(unsigned int nr, char *addr);
+@@ -1695,6 +1702,7 @@ struct f2fs_sb_info {
+ 	unsigned int io_skip_bggc;		/* skip background gc for in-flight IO */
+ 	unsigned int other_skip_bggc;		/* skip background gc for other reasons */
+ 	unsigned int ndirty_inode[NR_INODE_TYPE];	/* # of dirty inodes */
++	atomic_t cp_call_count[MAX_CALL_TYPE];	/* # of cp call */
+ #endif
+ 	spinlock_t stat_lock;			/* lock for stat operations */
+ 
+@@ -3885,7 +3893,7 @@ struct f2fs_stat_info {
+ 	int nats, dirty_nats, sits, dirty_sits;
+ 	int free_nids, avail_nids, alloc_nids;
+ 	int total_count, utilization;
+-	int bg_gc, nr_wb_cp_data, nr_wb_data;
++	int nr_wb_cp_data, nr_wb_data;
+ 	int nr_rd_data, nr_rd_node, nr_rd_meta;
+ 	int nr_dio_read, nr_dio_write;
+ 	unsigned int io_skip_bggc, other_skip_bggc;
+@@ -3905,9 +3913,11 @@ struct f2fs_stat_info {
+ 	int rsvd_segs, overp_segs;
+ 	int dirty_count, node_pages, meta_pages, compress_pages;
+ 	int compress_page_hit;
+-	int prefree_count, call_count, cp_count, bg_cp_count;
+-	int tot_segs, node_segs, data_segs, free_segs, free_secs;
+-	int bg_node_segs, bg_data_segs;
++	int prefree_count, free_segs, free_secs;
++	int cp_call_count[MAX_CALL_TYPE], cp_count;
++	int gc_call_count[MAX_CALL_TYPE];
++	int gc_segs[2][2];
++	int gc_secs[2][2];
+ 	int tot_blks, data_blks, node_blks;
+ 	int bg_data_blks, bg_node_blks;
+ 	int curseg[NR_CURSEG_TYPE];
+@@ -3929,10 +3939,9 @@ static inline struct f2fs_stat_info *F2FS_STAT(struct f2fs_sb_info *sbi)
+ 	return (struct f2fs_stat_info *)sbi->stat_info;
+ }
+ 
+-#define stat_inc_cp_count(si)		((si)->cp_count++)
+-#define stat_inc_bg_cp_count(si)	((si)->bg_cp_count++)
+-#define stat_inc_call_count(si)		((si)->call_count++)
+-#define stat_inc_bggc_count(si)		((si)->bg_gc++)
++#define stat_inc_cp_call_count(sbi, foreground)				\
++		atomic_inc(&sbi->cp_call_count[(foreground)])
++#define stat_inc_cp_count(si)		(F2FS_STAT(sbi)->cp_count++)
+ #define stat_io_skip_bggc_count(sbi)	((sbi)->io_skip_bggc++)
+ #define stat_other_skip_bggc_count(sbi)	((sbi)->other_skip_bggc++)
+ #define stat_inc_dirty_inode(sbi, type)	((sbi)->ndirty_inode[type]++)
+@@ -4017,18 +4026,12 @@ static inline struct f2fs_stat_info *F2FS_STAT(struct f2fs_sb_info *sbi)
+ 		if (cur > max)						\
+ 			atomic_set(&F2FS_I_SB(inode)->max_aw_cnt, cur);	\
+ 	} while (0)
+-#define stat_inc_seg_count(sbi, type, gc_type)				\
+-	do {								\
+-		struct f2fs_stat_info *si = F2FS_STAT(sbi);		\
+-		si->tot_segs++;						\
+-		if ((type) == SUM_TYPE_DATA) {				\
+-			si->data_segs++;				\
+-			si->bg_data_segs += (gc_type == BG_GC) ? 1 : 0;	\
+-		} else {						\
+-			si->node_segs++;				\
+-			si->bg_node_segs += (gc_type == BG_GC) ? 1 : 0;	\
+-		}							\
+-	} while (0)
++#define stat_inc_gc_call_count(sbi, foreground)				\
++		(F2FS_STAT(sbi)->gc_call_count[(foreground)]++)
++#define stat_inc_gc_sec_count(sbi, type, gc_type)			\
++		(F2FS_STAT(sbi)->gc_secs[(type)][(gc_type)]++)
++#define stat_inc_gc_seg_count(sbi, type, gc_type)			\
++		(F2FS_STAT(sbi)->gc_segs[(type)][(gc_type)]++)
+ 
+ #define stat_inc_tot_blk_count(si, blks)				\
+ 	((si)->tot_blks += (blks))
+@@ -4055,10 +4058,8 @@ void __init f2fs_create_root_stats(void);
+ void f2fs_destroy_root_stats(void);
+ void f2fs_update_sit_info(struct f2fs_sb_info *sbi);
+ #else
+-#define stat_inc_cp_count(si)				do { } while (0)
+-#define stat_inc_bg_cp_count(si)			do { } while (0)
+-#define stat_inc_call_count(si)				do { } while (0)
+-#define stat_inc_bggc_count(si)				do { } while (0)
++#define stat_inc_cp_call_count(sbi, foreground)		do { } while (0)
++#define stat_inc_cp_count(sbi)				do { } while (0)
+ #define stat_io_skip_bggc_count(sbi)			do { } while (0)
+ #define stat_other_skip_bggc_count(sbi)			do { } while (0)
+ #define stat_inc_dirty_inode(sbi, type)			do { } while (0)
+@@ -4086,7 +4087,9 @@ void f2fs_update_sit_info(struct f2fs_sb_info *sbi);
+ #define stat_inc_seg_type(sbi, curseg)			do { } while (0)
+ #define stat_inc_block_count(sbi, curseg)		do { } while (0)
+ #define stat_inc_inplace_blocks(sbi)			do { } while (0)
+-#define stat_inc_seg_count(sbi, type, gc_type)		do { } while (0)
++#define stat_inc_gc_call_count(sbi, foreground)		do { } while (0)
++#define stat_inc_gc_sec_count(sbi, type, gc_type)	do { } while (0)
++#define stat_inc_gc_seg_count(sbi, type, gc_type)	do { } while (0)
+ #define stat_inc_tot_blk_count(si, blks)		do { } while (0)
+ #define stat_inc_data_blk_count(sbi, blks, gc_type)	do { } while (0)
+ #define stat_inc_node_blk_count(sbi, blks, gc_type)	do { } while (0)
+@@ -4423,6 +4426,22 @@ static inline bool f2fs_blkz_is_seq(struct f2fs_sb_info *sbi, int devi,
+ }
+ #endif
+ 
++static inline int f2fs_bdev_index(struct f2fs_sb_info *sbi,
++				  struct block_device *bdev)
++{
++	int i;
++
++	if (!f2fs_is_multi_device(sbi))
++		return 0;
++
++	for (i = 0; i < sbi->s_ndevs; i++)
++		if (FDEV(i).bdev == bdev)
++			return i;
++
++	WARN_ON(1);
++	return -1;
++}
++
+ static inline bool f2fs_hw_should_discard(struct f2fs_sb_info *sbi)
+ {
+ 	return f2fs_sb_has_blkzoned(sbi);
+@@ -4483,7 +4502,8 @@ static inline bool f2fs_low_mem_mode(struct f2fs_sb_info *sbi)
+ static inline bool f2fs_may_compress(struct inode *inode)
+ {
+ 	if (IS_SWAPFILE(inode) || f2fs_is_pinned_file(inode) ||
+-		f2fs_is_atomic_file(inode) || f2fs_has_inline_data(inode))
++		f2fs_is_atomic_file(inode) || f2fs_has_inline_data(inode) ||
++		f2fs_is_mmap_file(inode))
+ 		return false;
+ 	return S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode);
+ }
+diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
+index 093039dee9920..ea4a094c518f9 100644
+--- a/fs/f2fs/file.c
++++ b/fs/f2fs/file.c
+@@ -526,7 +526,11 @@ static int f2fs_file_mmap(struct file *file, struct vm_area_struct *vma)
+ 
+ 	file_accessed(file);
+ 	vma->vm_ops = &f2fs_file_vm_ops;
++
++	f2fs_down_read(&F2FS_I(inode)->i_sem);
+ 	set_inode_flag(inode, FI_MMAP_FILE);
++	f2fs_up_read(&F2FS_I(inode)->i_sem);
++
+ 	return 0;
+ }
+ 
+@@ -1724,6 +1728,7 @@ next_alloc:
+ 		if (has_not_enough_free_secs(sbi, 0,
+ 			GET_SEC_FROM_SEG(sbi, overprovision_segments(sbi)))) {
+ 			f2fs_down_write(&sbi->gc_lock);
++			stat_inc_gc_call_count(sbi, FOREGROUND);
+ 			err = f2fs_gc(sbi, &gc_control);
+ 			if (err && err != -ENODATA)
+ 				goto out_err;
+@@ -1919,12 +1924,19 @@ static int f2fs_setflags_common(struct inode *inode, u32 iflags, u32 mask)
+ 			int err = f2fs_convert_inline_inode(inode);
+ 			if (err)
+ 				return err;
+-			if (!f2fs_may_compress(inode))
+-				return -EINVAL;
+-			if (S_ISREG(inode->i_mode) && F2FS_HAS_BLOCKS(inode))
++
++			f2fs_down_write(&F2FS_I(inode)->i_sem);
++			if (!f2fs_may_compress(inode) ||
++					(S_ISREG(inode->i_mode) &&
++					F2FS_HAS_BLOCKS(inode))) {
++				f2fs_up_write(&F2FS_I(inode)->i_sem);
+ 				return -EINVAL;
+-			if (set_compress_context(inode))
+-				return -EOPNOTSUPP;
++			}
++			err = set_compress_context(inode);
++			f2fs_up_write(&F2FS_I(inode)->i_sem);
++
++			if (err)
++				return err;
+ 		}
+ 	}
+ 
+@@ -2465,6 +2477,7 @@ static int f2fs_ioc_gc(struct file *filp, unsigned long arg)
+ 
+ 	gc_control.init_gc_type = sync ? FG_GC : BG_GC;
+ 	gc_control.err_gc_skipped = sync;
++	stat_inc_gc_call_count(sbi, FOREGROUND);
+ 	ret = f2fs_gc(sbi, &gc_control);
+ out:
+ 	mnt_drop_write_file(filp);
+@@ -2508,6 +2521,7 @@ do_more:
+ 	}
+ 
+ 	gc_control.victim_segno = GET_SEGNO(sbi, range->start);
++	stat_inc_gc_call_count(sbi, FOREGROUND);
+ 	ret = f2fs_gc(sbi, &gc_control);
+ 	if (ret) {
+ 		if (ret == -EBUSY)
+@@ -2990,6 +3004,7 @@ static int f2fs_ioc_flush_device(struct file *filp, unsigned long arg)
+ 		sm->last_victim[ALLOC_NEXT] = end_segno + 1;
+ 
+ 		gc_control.victim_segno = start_segno;
++		stat_inc_gc_call_count(sbi, FOREGROUND);
+ 		ret = f2fs_gc(sbi, &gc_control);
+ 		if (ret == -EAGAIN)
+ 			ret = 0;
+@@ -3976,6 +3991,7 @@ static int f2fs_ioc_set_compress_option(struct file *filp, unsigned long arg)
+ 	file_start_write(filp);
+ 	inode_lock(inode);
+ 
++	f2fs_down_write(&F2FS_I(inode)->i_sem);
+ 	if (f2fs_is_mmap_file(inode) || get_dirty_pages(inode)) {
+ 		ret = -EBUSY;
+ 		goto out;
+@@ -3995,6 +4011,7 @@ static int f2fs_ioc_set_compress_option(struct file *filp, unsigned long arg)
+ 		f2fs_warn(sbi, "compression algorithm is successfully set, "
+ 			"but current kernel doesn't support this algorithm.");
+ out:
++	f2fs_up_write(&F2FS_I(inode)->i_sem);
+ 	inode_unlock(inode);
+ 	file_end_write(filp);
+ 
+diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
+index 01effd3fcb6c7..6690323fff83b 100644
+--- a/fs/f2fs/gc.c
++++ b/fs/f2fs/gc.c
+@@ -121,8 +121,8 @@ static int gc_thread_func(void *data)
+ 		else
+ 			increase_sleep_time(gc_th, &wait_ms);
+ do_gc:
+-		if (!foreground)
+-			stat_inc_bggc_count(sbi->stat_info);
++		stat_inc_gc_call_count(sbi, foreground ?
++					FOREGROUND : BACKGROUND);
+ 
+ 		sync_mode = F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_SYNC;
+ 
+@@ -1685,6 +1685,7 @@ static int do_garbage_collect(struct f2fs_sb_info *sbi,
+ 	int seg_freed = 0, migrated = 0;
+ 	unsigned char type = IS_DATASEG(get_seg_entry(sbi, segno)->type) ?
+ 						SUM_TYPE_DATA : SUM_TYPE_NODE;
++	unsigned char data_type = (type == SUM_TYPE_DATA) ? DATA : NODE;
+ 	int submitted = 0;
+ 
+ 	if (__is_large_section(sbi))
+@@ -1766,7 +1767,7 @@ static int do_garbage_collect(struct f2fs_sb_info *sbi,
+ 							segno, gc_type,
+ 							force_migrate);
+ 
+-		stat_inc_seg_count(sbi, type, gc_type);
++		stat_inc_gc_seg_count(sbi, data_type, gc_type);
+ 		sbi->gc_reclaimed_segs[sbi->gc_mode]++;
+ 		migrated++;
+ 
+@@ -1783,12 +1784,12 @@ skip:
+ 	}
+ 
+ 	if (submitted)
+-		f2fs_submit_merged_write(sbi,
+-				(type == SUM_TYPE_NODE) ? NODE : DATA);
++		f2fs_submit_merged_write(sbi, data_type);
+ 
+ 	blk_finish_plug(&plug);
+ 
+-	stat_inc_call_count(sbi->stat_info);
++	if (migrated)
++		stat_inc_gc_sec_count(sbi, data_type, gc_type);
+ 
+ 	return seg_freed;
+ }
+@@ -1839,6 +1840,7 @@ gc_more:
+ 		 * secure free segments which doesn't need fggc any more.
+ 		 */
+ 		if (prefree_segments(sbi)) {
++			stat_inc_cp_call_count(sbi, TOTAL_CALL);
+ 			ret = f2fs_write_checkpoint(sbi, &cpc);
+ 			if (ret)
+ 				goto stop;
+@@ -1887,6 +1889,7 @@ retry:
+ 		round++;
+ 		if (skipped_round > MAX_SKIP_GC_COUNT &&
+ 				skipped_round * 2 >= round) {
++			stat_inc_cp_call_count(sbi, TOTAL_CALL);
+ 			ret = f2fs_write_checkpoint(sbi, &cpc);
+ 			goto stop;
+ 		}
+@@ -1902,6 +1905,7 @@ retry:
+ 	 */
+ 	if (free_sections(sbi) <= upper_secs + NR_GC_CHECKPOINT_SECS &&
+ 				prefree_segments(sbi)) {
++		stat_inc_cp_call_count(sbi, TOTAL_CALL);
+ 		ret = f2fs_write_checkpoint(sbi, &cpc);
+ 		if (ret)
+ 			goto stop;
+@@ -2029,6 +2033,7 @@ static int free_segment_range(struct f2fs_sb_info *sbi,
+ 	if (gc_only)
+ 		goto out;
+ 
++	stat_inc_cp_call_count(sbi, TOTAL_CALL);
+ 	err = f2fs_write_checkpoint(sbi, &cpc);
+ 	if (err)
+ 		goto out;
+@@ -2221,6 +2226,7 @@ out_drop_write:
+ 	clear_sbi_flag(sbi, SBI_IS_RESIZEFS);
+ 	set_sbi_flag(sbi, SBI_IS_DIRTY);
+ 
++	stat_inc_cp_call_count(sbi, TOTAL_CALL);
+ 	err = f2fs_write_checkpoint(sbi, &cpc);
+ 	if (err) {
+ 		update_fs_metadata(sbi, secs);
+diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c
+index 09e986b050c61..e81725c922cd4 100644
+--- a/fs/f2fs/inode.c
++++ b/fs/f2fs/inode.c
+@@ -475,6 +475,12 @@ static int do_read_inode(struct inode *inode)
+ 		fi->i_inline_xattr_size = 0;
+ 	}
+ 
++	if (!sanity_check_inode(inode, node_page)) {
++		f2fs_put_page(node_page, 1);
++		f2fs_handle_error(sbi, ERROR_CORRUPTED_INODE);
++		return -EFSCORRUPTED;
++	}
++
+ 	/* check data exist */
+ 	if (f2fs_has_inline_data(inode) && !f2fs_exist_data(inode))
+ 		__recover_inline_status(inode, node_page);
+@@ -544,12 +550,6 @@ static int do_read_inode(struct inode *inode)
+ 	f2fs_init_read_extent_tree(inode, node_page);
+ 	f2fs_init_age_extent_tree(inode);
+ 
+-	if (!sanity_check_inode(inode, node_page)) {
+-		f2fs_put_page(node_page, 1);
+-		f2fs_handle_error(sbi, ERROR_CORRUPTED_INODE);
+-		return -EFSCORRUPTED;
+-	}
+-
+ 	if (!sanity_check_extent_cache(inode)) {
+ 		f2fs_put_page(node_page, 1);
+ 		f2fs_handle_error(sbi, ERROR_CORRUPTED_INODE);
+diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c
+index 4e7d4ceeb084c..e91f4619aa5bb 100644
+--- a/fs/f2fs/recovery.c
++++ b/fs/f2fs/recovery.c
+@@ -924,6 +924,7 @@ skip:
+ 			struct cp_control cpc = {
+ 				.reason = CP_RECOVERY,
+ 			};
++			stat_inc_cp_call_count(sbi, TOTAL_CALL);
+ 			err = f2fs_write_checkpoint(sbi, &cpc);
+ 		}
+ 	}
+diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
+index 0457d620011f6..be08be6f4bfd6 100644
+--- a/fs/f2fs/segment.c
++++ b/fs/f2fs/segment.c
+@@ -433,6 +433,7 @@ void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need)
+ 			.err_gc_skipped = false,
+ 			.nr_free_secs = 1 };
+ 		f2fs_down_write(&sbi->gc_lock);
++		stat_inc_gc_call_count(sbi, FOREGROUND);
+ 		f2fs_gc(sbi, &gc_control);
+ 	}
+ }
+@@ -510,8 +511,8 @@ do_sync:
+ 
+ 		mutex_unlock(&sbi->flush_lock);
+ 	}
++	stat_inc_cp_call_count(sbi, BACKGROUND);
+ 	f2fs_sync_fs(sbi->sb, 1);
+-	stat_inc_bg_cp_count(sbi->stat_info);
+ }
+ 
+ static int __submit_flush_wait(struct f2fs_sb_info *sbi,
+@@ -1258,8 +1259,16 @@ static int __submit_discard_cmd(struct f2fs_sb_info *sbi,
+ 
+ #ifdef CONFIG_BLK_DEV_ZONED
+ 	if (f2fs_sb_has_blkzoned(sbi) && bdev_is_zoned(bdev)) {
+-		__submit_zone_reset_cmd(sbi, dc, flag, wait_list, issued);
+-		return 0;
++		int devi = f2fs_bdev_index(sbi, bdev);
++
++		if (devi < 0)
++			return -EINVAL;
++
++		if (f2fs_blkz_is_seq(sbi, devi, dc->di.start)) {
++			__submit_zone_reset_cmd(sbi, dc, flag,
++						wait_list, issued);
++			return 0;
++		}
+ 	}
+ #endif
+ 
+@@ -1785,15 +1794,24 @@ static void f2fs_wait_discard_bio(struct f2fs_sb_info *sbi, block_t blkaddr)
+ 	dc = __lookup_discard_cmd(sbi, blkaddr);
+ #ifdef CONFIG_BLK_DEV_ZONED
+ 	if (dc && f2fs_sb_has_blkzoned(sbi) && bdev_is_zoned(dc->bdev)) {
+-		/* force submit zone reset */
+-		if (dc->state == D_PREP)
+-			__submit_zone_reset_cmd(sbi, dc, REQ_SYNC,
+-						&dcc->wait_list, NULL);
+-		dc->ref++;
+-		mutex_unlock(&dcc->cmd_lock);
+-		/* wait zone reset */
+-		__wait_one_discard_bio(sbi, dc);
+-		return;
++		int devi = f2fs_bdev_index(sbi, dc->bdev);
++
++		if (devi < 0) {
++			mutex_unlock(&dcc->cmd_lock);
++			return;
++		}
++
++		if (f2fs_blkz_is_seq(sbi, devi, dc->di.start)) {
++			/* force submit zone reset */
++			if (dc->state == D_PREP)
++				__submit_zone_reset_cmd(sbi, dc, REQ_SYNC,
++							&dcc->wait_list, NULL);
++			dc->ref++;
++			mutex_unlock(&dcc->cmd_lock);
++			/* wait zone reset */
++			__wait_one_discard_bio(sbi, dc);
++			return;
++		}
+ 	}
+ #endif
+ 	if (dc) {
+@@ -2193,7 +2211,7 @@ find_next:
+ 			len = next_pos - cur_pos;
+ 
+ 			if (f2fs_sb_has_blkzoned(sbi) ||
+-					!force || len < cpc->trim_minlen)
++			    (force && len < cpc->trim_minlen))
+ 				goto skip;
+ 
+ 			f2fs_issue_discard(sbi, entry->start_blkaddr + cur_pos,
+@@ -3228,6 +3246,7 @@ int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range)
+ 		goto out;
+ 
+ 	f2fs_down_write(&sbi->gc_lock);
++	stat_inc_cp_call_count(sbi, TOTAL_CALL);
+ 	err = f2fs_write_checkpoint(sbi, &cpc);
+ 	f2fs_up_write(&sbi->gc_lock);
+ 	if (err)
+@@ -4846,17 +4865,17 @@ static int check_zone_write_pointer(struct f2fs_sb_info *sbi,
+ {
+ 	unsigned int wp_segno, wp_blkoff, zone_secno, zone_segno, segno;
+ 	block_t zone_block, wp_block, last_valid_block;
++	unsigned int log_sectors_per_block = sbi->log_blocksize - SECTOR_SHIFT;
+ 	int i, s, b, ret;
+ 	struct seg_entry *se;
+ 
+ 	if (zone->type != BLK_ZONE_TYPE_SEQWRITE_REQ)
+ 		return 0;
+ 
+-	wp_block = fdev->start_blk + (zone->wp >> sbi->log_sectors_per_block);
++	wp_block = fdev->start_blk + (zone->wp >> log_sectors_per_block);
+ 	wp_segno = GET_SEGNO(sbi, wp_block);
+ 	wp_blkoff = wp_block - START_BLOCK(sbi, wp_segno);
+-	zone_block = fdev->start_blk + (zone->start >>
+-						sbi->log_sectors_per_block);
++	zone_block = fdev->start_blk + (zone->start >> log_sectors_per_block);
+ 	zone_segno = GET_SEGNO(sbi, zone_block);
+ 	zone_secno = GET_SEC_FROM_SEG(sbi, zone_segno);
+ 
+@@ -4906,7 +4925,7 @@ static int check_zone_write_pointer(struct f2fs_sb_info *sbi,
+ 			    "pointer. Reset the write pointer: wp[0x%x,0x%x]",
+ 			    wp_segno, wp_blkoff);
+ 		ret = __f2fs_issue_discard_zone(sbi, fdev->bdev, zone_block,
+-				zone->len >> sbi->log_sectors_per_block);
++					zone->len >> log_sectors_per_block);
+ 		if (ret)
+ 			f2fs_err(sbi, "Discard zone failed: %s (errno=%d)",
+ 				 fdev->path, ret);
+@@ -4967,6 +4986,7 @@ static int fix_curseg_write_pointer(struct f2fs_sb_info *sbi, int type)
+ 	struct blk_zone zone;
+ 	unsigned int cs_section, wp_segno, wp_blkoff, wp_sector_off;
+ 	block_t cs_zone_block, wp_block;
++	unsigned int log_sectors_per_block = sbi->log_blocksize - SECTOR_SHIFT;
+ 	sector_t zone_sector;
+ 	int err;
+ 
+@@ -4978,8 +4998,8 @@ static int fix_curseg_write_pointer(struct f2fs_sb_info *sbi, int type)
+ 		return 0;
+ 
+ 	/* report zone for the sector the curseg points to */
+-	zone_sector = (sector_t)(cs_zone_block - zbd->start_blk) <<
+-						sbi->log_sectors_per_block;
++	zone_sector = (sector_t)(cs_zone_block - zbd->start_blk)
++		<< log_sectors_per_block;
+ 	err = blkdev_report_zones(zbd->bdev, zone_sector, 1,
+ 				  report_one_zone_cb, &zone);
+ 	if (err != 1) {
+@@ -4991,10 +5011,10 @@ static int fix_curseg_write_pointer(struct f2fs_sb_info *sbi, int type)
+ 	if (zone.type != BLK_ZONE_TYPE_SEQWRITE_REQ)
+ 		return 0;
+ 
+-	wp_block = zbd->start_blk + (zone.wp >> sbi->log_sectors_per_block);
++	wp_block = zbd->start_blk + (zone.wp >> log_sectors_per_block);
+ 	wp_segno = GET_SEGNO(sbi, wp_block);
+ 	wp_blkoff = wp_block - START_BLOCK(sbi, wp_segno);
+-	wp_sector_off = zone.wp & GENMASK(sbi->log_sectors_per_block - 1, 0);
++	wp_sector_off = zone.wp & GENMASK(log_sectors_per_block - 1, 0);
+ 
+ 	if (cs->segno == wp_segno && cs->next_blkoff == wp_blkoff &&
+ 		wp_sector_off == 0)
+@@ -5021,8 +5041,8 @@ static int fix_curseg_write_pointer(struct f2fs_sb_info *sbi, int type)
+ 	if (!zbd)
+ 		return 0;
+ 
+-	zone_sector = (sector_t)(cs_zone_block - zbd->start_blk) <<
+-						sbi->log_sectors_per_block;
++	zone_sector = (sector_t)(cs_zone_block - zbd->start_blk)
++		<< log_sectors_per_block;
+ 	err = blkdev_report_zones(zbd->bdev, zone_sector, 1,
+ 				  report_one_zone_cb, &zone);
+ 	if (err != 1) {
+@@ -5040,7 +5060,7 @@ static int fix_curseg_write_pointer(struct f2fs_sb_info *sbi, int type)
+ 			    "Reset the zone: curseg[0x%x,0x%x]",
+ 			    type, cs->segno, cs->next_blkoff);
+ 		err = __f2fs_issue_discard_zone(sbi, zbd->bdev,	cs_zone_block,
+-					zone.len >> sbi->log_sectors_per_block);
++					zone.len >> log_sectors_per_block);
+ 		if (err) {
+ 			f2fs_err(sbi, "Discard zone failed: %s (errno=%d)",
+ 				 zbd->path, err);
+diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
+index ca31163da00a5..8d9d2ee7f3c7f 100644
+--- a/fs/f2fs/super.c
++++ b/fs/f2fs/super.c
+@@ -591,7 +591,7 @@ static int f2fs_set_lz4hc_level(struct f2fs_sb_info *sbi, const char *str)
+ 	unsigned int level;
+ 
+ 	if (strlen(str) == 3) {
+-		F2FS_OPTION(sbi).compress_level = LZ4HC_DEFAULT_CLEVEL;
++		F2FS_OPTION(sbi).compress_level = 0;
+ 		return 0;
+ 	}
+ 
+@@ -862,11 +862,6 @@ static int parse_options(struct super_block *sb, char *options, bool is_remount)
+ 			if (!name)
+ 				return -ENOMEM;
+ 			if (!strcmp(name, "adaptive")) {
+-				if (f2fs_sb_has_blkzoned(sbi)) {
+-					f2fs_warn(sbi, "adaptive mode is not allowed with zoned block device feature");
+-					kfree(name);
+-					return -EINVAL;
+-				}
+ 				F2FS_OPTION(sbi).fs_mode = FS_MODE_ADAPTIVE;
+ 			} else if (!strcmp(name, "lfs")) {
+ 				F2FS_OPTION(sbi).fs_mode = FS_MODE_LFS;
+@@ -1331,6 +1326,11 @@ default_check:
+ 			F2FS_OPTION(sbi).discard_unit =
+ 					DISCARD_UNIT_SECTION;
+ 		}
++
++		if (F2FS_OPTION(sbi).fs_mode != FS_MODE_LFS) {
++			f2fs_info(sbi, "Only lfs mode is allowed with zoned block device feature");
++			return -EINVAL;
++		}
+ #else
+ 		f2fs_err(sbi, "Zoned block device support is not enabled");
+ 		return -EINVAL;
+@@ -1561,7 +1561,8 @@ static void destroy_device_list(struct f2fs_sb_info *sbi)
+ 	int i;
+ 
+ 	for (i = 0; i < sbi->s_ndevs; i++) {
+-		blkdev_put(FDEV(i).bdev, sbi->sb->s_type);
++		if (i > 0)
++			blkdev_put(FDEV(i).bdev, sbi->sb->s_type);
+ #ifdef CONFIG_BLK_DEV_ZONED
+ 		kvfree(FDEV(i).blkz_seq);
+ #endif
+@@ -1600,6 +1601,7 @@ static void f2fs_put_super(struct super_block *sb)
+ 		struct cp_control cpc = {
+ 			.reason = CP_UMOUNT,
+ 		};
++		stat_inc_cp_call_count(sbi, TOTAL_CALL);
+ 		err = f2fs_write_checkpoint(sbi, &cpc);
+ 	}
+ 
+@@ -1609,6 +1611,7 @@ static void f2fs_put_super(struct super_block *sb)
+ 		struct cp_control cpc = {
+ 			.reason = CP_UMOUNT | CP_TRIMMED,
+ 		};
++		stat_inc_cp_call_count(sbi, TOTAL_CALL);
+ 		err = f2fs_write_checkpoint(sbi, &cpc);
+ 	}
+ 
+@@ -1705,8 +1708,10 @@ int f2fs_sync_fs(struct super_block *sb, int sync)
+ 	if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
+ 		return -EAGAIN;
+ 
+-	if (sync)
++	if (sync) {
++		stat_inc_cp_call_count(sbi, TOTAL_CALL);
+ 		err = f2fs_issue_checkpoint(sbi);
++	}
+ 
+ 	return err;
+ }
+@@ -2205,6 +2210,7 @@ static int f2fs_disable_checkpoint(struct f2fs_sb_info *sbi)
+ 			.nr_free_secs = 1 };
+ 
+ 		f2fs_down_write(&sbi->gc_lock);
++		stat_inc_gc_call_count(sbi, FOREGROUND);
+ 		err = f2fs_gc(sbi, &gc_control);
+ 		if (err == -ENODATA) {
+ 			err = 0;
+@@ -2230,6 +2236,7 @@ skip_gc:
+ 	f2fs_down_write(&sbi->gc_lock);
+ 	cpc.reason = CP_PAUSE;
+ 	set_sbi_flag(sbi, SBI_CP_DISABLED);
++	stat_inc_cp_call_count(sbi, TOTAL_CALL);
+ 	err = f2fs_write_checkpoint(sbi, &cpc);
+ 	if (err)
+ 		goto out_unlock;
+@@ -4190,16 +4197,12 @@ static int f2fs_scan_devices(struct f2fs_sb_info *sbi)
+ 	sbi->aligned_blksize = true;
+ 
+ 	for (i = 0; i < max_devices; i++) {
+-
+-		if (i > 0 && !RDEV(i).path[0])
++		if (i == 0)
++			FDEV(0).bdev = sbi->sb->s_bdev;
++		else if (!RDEV(i).path[0])
+ 			break;
+ 
+-		if (max_devices == 1) {
+-			/* Single zoned block device mount */
+-			FDEV(0).bdev =
+-				blkdev_get_by_dev(sbi->sb->s_bdev->bd_dev, mode,
+-						  sbi->sb->s_type, NULL);
+-		} else {
++		if (max_devices > 1) {
+ 			/* Multi-device mount */
+ 			memcpy(FDEV(i).path, RDEV(i).path, MAX_PATH_LEN);
+ 			FDEV(i).total_segments =
+@@ -4215,10 +4218,9 @@ static int f2fs_scan_devices(struct f2fs_sb_info *sbi)
+ 				FDEV(i).end_blk = FDEV(i).start_blk +
+ 					(FDEV(i).total_segments <<
+ 					sbi->log_blocks_per_seg) - 1;
++				FDEV(i).bdev = blkdev_get_by_path(FDEV(i).path,
++					mode, sbi->sb->s_type, NULL);
+ 			}
+-			FDEV(i).bdev = blkdev_get_by_path(FDEV(i).path, mode,
+-							  sbi->sb->s_type,
+-							  NULL);
+ 		}
+ 		if (IS_ERR(FDEV(i).bdev))
+ 			return PTR_ERR(FDEV(i).bdev);
+@@ -4871,6 +4873,7 @@ static void kill_f2fs_super(struct super_block *sb)
+ 			struct cp_control cpc = {
+ 				.reason = CP_UMOUNT,
+ 			};
++			stat_inc_cp_call_count(sbi, TOTAL_CALL);
+ 			f2fs_write_checkpoint(sbi, &cpc);
+ 		}
+ 
+diff --git a/fs/f2fs/sysfs.c b/fs/f2fs/sysfs.c
+index 48b7e0073884a..417fae96890f6 100644
+--- a/fs/f2fs/sysfs.c
++++ b/fs/f2fs/sysfs.c
+@@ -356,6 +356,16 @@ static ssize_t f2fs_sbi_show(struct f2fs_attr *a,
+ 	if (!strcmp(a->attr.name, "revoked_atomic_block"))
+ 		return sysfs_emit(buf, "%llu\n", sbi->revoked_atomic_block);
+ 
++#ifdef CONFIG_F2FS_STAT_FS
++	if (!strcmp(a->attr.name, "cp_foreground_calls"))
++		return sysfs_emit(buf, "%d\n",
++				atomic_read(&sbi->cp_call_count[TOTAL_CALL]) -
++				atomic_read(&sbi->cp_call_count[BACKGROUND]));
++	if (!strcmp(a->attr.name, "cp_background_calls"))
++		return sysfs_emit(buf, "%d\n",
++				atomic_read(&sbi->cp_call_count[BACKGROUND]));
++#endif
++
+ 	ui = (unsigned int *)(ptr + a->offset);
+ 
+ 	return sysfs_emit(buf, "%u\n", *ui);
+@@ -972,10 +982,10 @@ F2FS_SBI_GENERAL_RO_ATTR(unusable_blocks_per_sec);
+ 
+ /* STAT_INFO ATTR */
+ #ifdef CONFIG_F2FS_STAT_FS
+-STAT_INFO_RO_ATTR(cp_foreground_calls, cp_count);
+-STAT_INFO_RO_ATTR(cp_background_calls, bg_cp_count);
+-STAT_INFO_RO_ATTR(gc_foreground_calls, call_count);
+-STAT_INFO_RO_ATTR(gc_background_calls, bg_gc);
++STAT_INFO_RO_ATTR(cp_foreground_calls, cp_call_count[FOREGROUND]);
++STAT_INFO_RO_ATTR(cp_background_calls, cp_call_count[BACKGROUND]);
++STAT_INFO_RO_ATTR(gc_foreground_calls, gc_call_count[FOREGROUND]);
++STAT_INFO_RO_ATTR(gc_background_calls, gc_call_count[BACKGROUND]);
+ #endif
+ 
+ /* FAULT_INFO ATTR */
+diff --git a/fs/fs_context.c b/fs/fs_context.c
+index 851214d1d013d..375023e40161d 100644
+--- a/fs/fs_context.c
++++ b/fs/fs_context.c
+@@ -315,10 +315,31 @@ struct fs_context *fs_context_for_reconfigure(struct dentry *dentry,
+ }
+ EXPORT_SYMBOL(fs_context_for_reconfigure);
+ 
++/**
++ * fs_context_for_submount: allocate a new fs_context for a submount
++ * @type: file_system_type of the new context
++ * @reference: reference dentry from which to copy relevant info
++ *
++ * Allocate a new fs_context suitable for a submount. This also ensures that
++ * the fc->security object is inherited from @reference (if needed).
++ */
+ struct fs_context *fs_context_for_submount(struct file_system_type *type,
+ 					   struct dentry *reference)
+ {
+-	return alloc_fs_context(type, reference, 0, 0, FS_CONTEXT_FOR_SUBMOUNT);
++	struct fs_context *fc;
++	int ret;
++
++	fc = alloc_fs_context(type, reference, 0, 0, FS_CONTEXT_FOR_SUBMOUNT);
++	if (IS_ERR(fc))
++		return fc;
++
++	ret = security_fs_context_submount(fc, reference->d_sb);
++	if (ret) {
++		put_fs_context(fc);
++		return ERR_PTR(ret);
++	}
++
++	return fc;
+ }
+ EXPORT_SYMBOL(fs_context_for_submount);
+ 
+diff --git a/fs/fuse/file.c b/fs/fuse/file.c
+index bc4115288eec7..1c7599ed90625 100644
+--- a/fs/fuse/file.c
++++ b/fs/fuse/file.c
+@@ -19,7 +19,6 @@
+ #include <linux/uio.h>
+ #include <linux/fs.h>
+ #include <linux/filelock.h>
+-#include <linux/file.h>
+ 
+ static int fuse_send_open(struct fuse_mount *fm, u64 nodeid,
+ 			  unsigned int open_flags, int opcode,
+@@ -479,36 +478,48 @@ static void fuse_sync_writes(struct inode *inode)
+ 	fuse_release_nowrite(inode);
+ }
+ 
+-struct fuse_flush_args {
+-	struct fuse_args args;
+-	struct fuse_flush_in inarg;
+-	struct work_struct work;
+-	struct file *file;
+-};
+-
+-static int fuse_do_flush(struct fuse_flush_args *fa)
++static int fuse_flush(struct file *file, fl_owner_t id)
+ {
+-	int err;
+-	struct inode *inode = file_inode(fa->file);
++	struct inode *inode = file_inode(file);
+ 	struct fuse_mount *fm = get_fuse_mount(inode);
++	struct fuse_file *ff = file->private_data;
++	struct fuse_flush_in inarg;
++	FUSE_ARGS(args);
++	int err;
++
++	if (fuse_is_bad(inode))
++		return -EIO;
++
++	if (ff->open_flags & FOPEN_NOFLUSH && !fm->fc->writeback_cache)
++		return 0;
+ 
+ 	err = write_inode_now(inode, 1);
+ 	if (err)
+-		goto out;
++		return err;
+ 
+ 	inode_lock(inode);
+ 	fuse_sync_writes(inode);
+ 	inode_unlock(inode);
+ 
+-	err = filemap_check_errors(fa->file->f_mapping);
++	err = filemap_check_errors(file->f_mapping);
+ 	if (err)
+-		goto out;
++		return err;
+ 
+ 	err = 0;
+ 	if (fm->fc->no_flush)
+ 		goto inval_attr_out;
+ 
+-	err = fuse_simple_request(fm, &fa->args);
++	memset(&inarg, 0, sizeof(inarg));
++	inarg.fh = ff->fh;
++	inarg.lock_owner = fuse_lock_owner_id(fm->fc, id);
++	args.opcode = FUSE_FLUSH;
++	args.nodeid = get_node_id(inode);
++	args.in_numargs = 1;
++	args.in_args[0].size = sizeof(inarg);
++	args.in_args[0].value = &inarg;
++	args.force = true;
++
++	err = fuse_simple_request(fm, &args);
+ 	if (err == -ENOSYS) {
+ 		fm->fc->no_flush = 1;
+ 		err = 0;
+@@ -521,57 +532,9 @@ inval_attr_out:
+ 	 */
+ 	if (!err && fm->fc->writeback_cache)
+ 		fuse_invalidate_attr_mask(inode, STATX_BLOCKS);
+-
+-out:
+-	fput(fa->file);
+-	kfree(fa);
+ 	return err;
+ }
+ 
+-static void fuse_flush_async(struct work_struct *work)
+-{
+-	struct fuse_flush_args *fa = container_of(work, typeof(*fa), work);
+-
+-	fuse_do_flush(fa);
+-}
+-
+-static int fuse_flush(struct file *file, fl_owner_t id)
+-{
+-	struct fuse_flush_args *fa;
+-	struct inode *inode = file_inode(file);
+-	struct fuse_mount *fm = get_fuse_mount(inode);
+-	struct fuse_file *ff = file->private_data;
+-
+-	if (fuse_is_bad(inode))
+-		return -EIO;
+-
+-	if (ff->open_flags & FOPEN_NOFLUSH && !fm->fc->writeback_cache)
+-		return 0;
+-
+-	fa = kzalloc(sizeof(*fa), GFP_KERNEL);
+-	if (!fa)
+-		return -ENOMEM;
+-
+-	fa->inarg.fh = ff->fh;
+-	fa->inarg.lock_owner = fuse_lock_owner_id(fm->fc, id);
+-	fa->args.opcode = FUSE_FLUSH;
+-	fa->args.nodeid = get_node_id(inode);
+-	fa->args.in_numargs = 1;
+-	fa->args.in_args[0].size = sizeof(fa->inarg);
+-	fa->args.in_args[0].value = &fa->inarg;
+-	fa->args.force = true;
+-	fa->file = get_file(file);
+-
+-	/* Don't wait if the task is exiting */
+-	if (current->flags & PF_EXITING) {
+-		INIT_WORK(&fa->work, fuse_flush_async);
+-		schedule_work(&fa->work);
+-		return 0;
+-	}
+-
+-	return fuse_do_flush(fa);
+-}
+-
+ int fuse_fsync_common(struct file *file, loff_t start, loff_t end,
+ 		      int datasync, int opcode)
+ {
+diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
+index aa8967cca1a31..7d2f70708f37d 100644
+--- a/fs/iomap/buffered-io.c
++++ b/fs/iomap/buffered-io.c
+@@ -508,11 +508,6 @@ void iomap_invalidate_folio(struct folio *folio, size_t offset, size_t len)
+ 		WARN_ON_ONCE(folio_test_writeback(folio));
+ 		folio_cancel_dirty(folio);
+ 		iomap_page_release(folio);
+-	} else if (folio_test_large(folio)) {
+-		/* Must release the iop so the page can be split */
+-		WARN_ON_ONCE(!folio_test_uptodate(folio) &&
+-			     folio_test_dirty(folio));
+-		iomap_page_release(folio);
+ 	}
+ }
+ EXPORT_SYMBOL_GPL(iomap_invalidate_folio);
+diff --git a/fs/jfs/jfs_extent.c b/fs/jfs/jfs_extent.c
+index ae99a7e232eeb..a82751e6c47f9 100644
+--- a/fs/jfs/jfs_extent.c
++++ b/fs/jfs/jfs_extent.c
+@@ -311,6 +311,11 @@ extBalloc(struct inode *ip, s64 hint, s64 * nblocks, s64 * blkno)
+ 	 * blocks in the map. in that case, we'll start off with the
+ 	 * maximum free.
+ 	 */
++
++	/* give up if no space left */
++	if (bmp->db_maxfreebud == -1)
++		return -ENOSPC;
++
+ 	max = (s64) 1 << bmp->db_maxfreebud;
+ 	if (*nblocks >= max && *nblocks > nbperpage)
+ 		nb = nblks = (max > nbperpage) ? max : nbperpage;
+diff --git a/fs/lockd/mon.c b/fs/lockd/mon.c
+index 1d9488cf05348..87a0f207df0b9 100644
+--- a/fs/lockd/mon.c
++++ b/fs/lockd/mon.c
+@@ -276,6 +276,9 @@ static struct nsm_handle *nsm_create_handle(const struct sockaddr *sap,
+ {
+ 	struct nsm_handle *new;
+ 
++	if (!hostname)
++		return NULL;
++
+ 	new = kzalloc(sizeof(*new) + hostname_len + 1, GFP_KERNEL);
+ 	if (unlikely(new == NULL))
+ 		return NULL;
+diff --git a/fs/namei.c b/fs/namei.c
+index e56ff39a79bc8..2bae29ea52ffa 100644
+--- a/fs/namei.c
++++ b/fs/namei.c
+@@ -2890,7 +2890,7 @@ int path_pts(struct path *path)
+ 	dput(path->dentry);
+ 	path->dentry = parent;
+ 	child = d_hash_and_lookup(parent, &this);
+-	if (!child)
++	if (IS_ERR_OR_NULL(child))
+ 		return -ENOENT;
+ 
+ 	path->dentry = child;
+diff --git a/fs/nfs/blocklayout/dev.c b/fs/nfs/blocklayout/dev.c
+index 70f5563a8e81c..65cbb5607a5fc 100644
+--- a/fs/nfs/blocklayout/dev.c
++++ b/fs/nfs/blocklayout/dev.c
+@@ -404,7 +404,7 @@ bl_parse_concat(struct nfs_server *server, struct pnfs_block_dev *d,
+ 	int ret, i;
+ 
+ 	d->children = kcalloc(v->concat.volumes_count,
+-			sizeof(struct pnfs_block_dev), GFP_KERNEL);
++			sizeof(struct pnfs_block_dev), gfp_mask);
+ 	if (!d->children)
+ 		return -ENOMEM;
+ 
+@@ -433,7 +433,7 @@ bl_parse_stripe(struct nfs_server *server, struct pnfs_block_dev *d,
+ 	int ret, i;
+ 
+ 	d->children = kcalloc(v->stripe.volumes_count,
+-			sizeof(struct pnfs_block_dev), GFP_KERNEL);
++			sizeof(struct pnfs_block_dev), gfp_mask);
+ 	if (!d->children)
+ 		return -ENOMEM;
+ 
+diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
+index 913c09806c7f5..41abea340ad84 100644
+--- a/fs/nfs/internal.h
++++ b/fs/nfs/internal.h
+@@ -493,6 +493,7 @@ extern const struct nfs_pgio_completion_ops nfs_async_read_completion_ops;
+ extern void nfs_pageio_init_read(struct nfs_pageio_descriptor *pgio,
+ 			struct inode *inode, bool force_mds,
+ 			const struct nfs_pgio_completion_ops *compl_ops);
++extern bool nfs_read_alloc_scratch(struct nfs_pgio_header *hdr, size_t size);
+ extern int nfs_read_add_folio(struct nfs_pageio_descriptor *pgio,
+ 			       struct nfs_open_context *ctx,
+ 			       struct folio *folio);
+diff --git a/fs/nfs/nfs2xdr.c b/fs/nfs/nfs2xdr.c
+index 05c3b4b2b3dd8..c190938142960 100644
+--- a/fs/nfs/nfs2xdr.c
++++ b/fs/nfs/nfs2xdr.c
+@@ -949,7 +949,7 @@ int nfs2_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry,
+ 
+ 	error = decode_filename_inline(xdr, &entry->name, &entry->len);
+ 	if (unlikely(error))
+-		return -EAGAIN;
++		return error == -ENAMETOOLONG ? -ENAMETOOLONG : -EAGAIN;
+ 
+ 	/*
+ 	 * The type (size and byte order) of nfscookie isn't defined in
+diff --git a/fs/nfs/nfs3xdr.c b/fs/nfs/nfs3xdr.c
+index 3b0b650c9c5ab..60f032be805ae 100644
+--- a/fs/nfs/nfs3xdr.c
++++ b/fs/nfs/nfs3xdr.c
+@@ -1991,7 +1991,7 @@ int nfs3_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry,
+ 
+ 	error = decode_inline_filename3(xdr, &entry->name, &entry->len);
+ 	if (unlikely(error))
+-		return -EAGAIN;
++		return error == -ENAMETOOLONG ? -ENAMETOOLONG : -EAGAIN;
+ 
+ 	error = decode_cookie3(xdr, &new_cookie);
+ 	if (unlikely(error))
+diff --git a/fs/nfs/nfs42.h b/fs/nfs/nfs42.h
+index 0fe5aacbcfdf1..b59876b01a1e3 100644
+--- a/fs/nfs/nfs42.h
++++ b/fs/nfs/nfs42.h
+@@ -13,6 +13,7 @@
+  * more? Need to consider not to pre-alloc too much for a compound.
+  */
+ #define PNFS_LAYOUTSTATS_MAXDEV (4)
++#define READ_PLUS_SCRATCH_SIZE (16)
+ 
+ /* nfs4.2proc.c */
+ #ifdef CONFIG_NFS_V4_2
+diff --git a/fs/nfs/nfs42proc.c b/fs/nfs/nfs42proc.c
+index 49f78e23b34c0..063e00aff87ed 100644
+--- a/fs/nfs/nfs42proc.c
++++ b/fs/nfs/nfs42proc.c
+@@ -471,8 +471,9 @@ ssize_t nfs42_proc_copy(struct file *src, loff_t pos_src,
+ 				continue;
+ 			}
+ 			break;
+-		} else if (err == -NFS4ERR_OFFLOAD_NO_REQS && !args.sync) {
+-			args.sync = true;
++		} else if (err == -NFS4ERR_OFFLOAD_NO_REQS &&
++				args.sync != res.synchronous) {
++			args.sync = res.synchronous;
+ 			dst_exception.retry = 1;
+ 			continue;
+ 		} else if ((err == -ESTALE ||
+diff --git a/fs/nfs/nfs42xdr.c b/fs/nfs/nfs42xdr.c
+index 95234208dc9ee..9e3ae53e22058 100644
+--- a/fs/nfs/nfs42xdr.c
++++ b/fs/nfs/nfs42xdr.c
+@@ -54,10 +54,16 @@
+ 					(1 /* data_content4 */ + \
+ 					 2 /* data_info4.di_offset */ + \
+ 					 1 /* data_info4.di_length */)
++#define NFS42_READ_PLUS_HOLE_SEGMENT_SIZE \
++					(1 /* data_content4 */ + \
++					 2 /* data_info4.di_offset */ + \
++					 2 /* data_info4.di_length */)
++#define READ_PLUS_SEGMENT_SIZE_DIFF	(NFS42_READ_PLUS_HOLE_SEGMENT_SIZE - \
++					 NFS42_READ_PLUS_DATA_SEGMENT_SIZE)
+ #define decode_read_plus_maxsz		(op_decode_hdr_maxsz + \
+ 					 1 /* rpr_eof */ + \
+ 					 1 /* rpr_contents count */ + \
+-					 NFS42_READ_PLUS_DATA_SEGMENT_SIZE)
++					 NFS42_READ_PLUS_HOLE_SEGMENT_SIZE)
+ #define encode_seek_maxsz		(op_encode_hdr_maxsz + \
+ 					 encode_stateid_maxsz + \
+ 					 2 /* offset */ + \
+@@ -617,8 +623,8 @@ static void nfs4_xdr_enc_read_plus(struct rpc_rqst *req,
+ 	encode_putfh(xdr, args->fh, &hdr);
+ 	encode_read_plus(xdr, args, &hdr);
+ 
+-	rpc_prepare_reply_pages(req, args->pages, args->pgbase,
+-				args->count, hdr.replen);
++	rpc_prepare_reply_pages(req, args->pages, args->pgbase, args->count,
++				hdr.replen - READ_PLUS_SEGMENT_SIZE_DIFF);
+ 	encode_nops(&hdr);
+ }
+ 
+@@ -1056,13 +1062,12 @@ static int decode_read_plus(struct xdr_stream *xdr, struct nfs_pgio_res *res)
+ 	res->eof = be32_to_cpup(p++);
+ 	segments = be32_to_cpup(p++);
+ 	if (segments == 0)
+-		return status;
++		return 0;
+ 
+ 	segs = kmalloc_array(segments, sizeof(*segs), GFP_KERNEL);
+ 	if (!segs)
+ 		return -ENOMEM;
+ 
+-	status = -EIO;
+ 	for (i = 0; i < segments; i++) {
+ 		status = decode_read_plus_segment(xdr, &segs[i]);
+ 		if (status < 0)
+@@ -1428,7 +1433,7 @@ static int nfs4_xdr_dec_read_plus(struct rpc_rqst *rqstp,
+ 	struct compound_hdr hdr;
+ 	int status;
+ 
+-	xdr_set_scratch_buffer(xdr, res->scratch, sizeof(res->scratch));
++	xdr_set_scratch_buffer(xdr, res->scratch, READ_PLUS_SCRATCH_SIZE);
+ 
+ 	status = decode_compound_hdr(xdr, &hdr);
+ 	if (status)
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index 832fa226b8f26..3c24c3c99e8ac 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -5438,18 +5438,8 @@ static bool nfs4_read_plus_not_supported(struct rpc_task *task,
+ 	return false;
+ }
+ 
+-static inline void nfs4_read_plus_scratch_free(struct nfs_pgio_header *hdr)
+-{
+-	if (hdr->res.scratch) {
+-		kfree(hdr->res.scratch);
+-		hdr->res.scratch = NULL;
+-	}
+-}
+-
+ static int nfs4_read_done(struct rpc_task *task, struct nfs_pgio_header *hdr)
+ {
+-	nfs4_read_plus_scratch_free(hdr);
+-
+ 	if (!nfs4_sequence_done(task, &hdr->res.seq_res))
+ 		return -EAGAIN;
+ 	if (nfs4_read_stateid_changed(task, &hdr->args))
+@@ -5469,8 +5459,7 @@ static bool nfs42_read_plus_support(struct nfs_pgio_header *hdr,
+ 	/* Note: We don't use READ_PLUS with pNFS yet */
+ 	if (nfs_server_capable(hdr->inode, NFS_CAP_READ_PLUS) && !hdr->ds_clp) {
+ 		msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READ_PLUS];
+-		hdr->res.scratch = kmalloc(32, GFP_KERNEL);
+-		return hdr->res.scratch != NULL;
++		return nfs_read_alloc_scratch(hdr, READ_PLUS_SCRATCH_SIZE);
+ 	}
+ 	return false;
+ }
+diff --git a/fs/nfs/pnfs_nfs.c b/fs/nfs/pnfs_nfs.c
+index a0112ad4937aa..2e14ce2f82191 100644
+--- a/fs/nfs/pnfs_nfs.c
++++ b/fs/nfs/pnfs_nfs.c
+@@ -943,7 +943,7 @@ static int _nfs4_pnfs_v4_ds_connect(struct nfs_server *mds_srv,
+ 			* Test this address for session trunking and
+ 			* add as an alias
+ 			*/
+-			xprtdata.cred = nfs4_get_clid_cred(clp),
++			xprtdata.cred = nfs4_get_clid_cred(clp);
+ 			rpc_clnt_add_xprt(clp->cl_rpcclient, &xprt_args,
+ 					  rpc_clnt_setup_test_and_add_xprt,
+ 					  &rpcdata);
+diff --git a/fs/nfs/read.c b/fs/nfs/read.c
+index f71eeee67e201..7dc21a48e3e7b 100644
+--- a/fs/nfs/read.c
++++ b/fs/nfs/read.c
+@@ -47,6 +47,8 @@ static struct nfs_pgio_header *nfs_readhdr_alloc(void)
+ 
+ static void nfs_readhdr_free(struct nfs_pgio_header *rhdr)
+ {
++	if (rhdr->res.scratch != NULL)
++		kfree(rhdr->res.scratch);
+ 	kmem_cache_free(nfs_rdata_cachep, rhdr);
+ }
+ 
+@@ -108,6 +110,14 @@ void nfs_pageio_reset_read_mds(struct nfs_pageio_descriptor *pgio)
+ }
+ EXPORT_SYMBOL_GPL(nfs_pageio_reset_read_mds);
+ 
++bool nfs_read_alloc_scratch(struct nfs_pgio_header *hdr, size_t size)
++{
++	WARN_ON(hdr->res.scratch != NULL);
++	hdr->res.scratch = kmalloc(size, GFP_KERNEL);
++	return hdr->res.scratch != NULL;
++}
++EXPORT_SYMBOL_GPL(nfs_read_alloc_scratch);
++
+ static void nfs_readpage_release(struct nfs_page *req, int error)
+ {
+ 	struct folio *folio = nfs_page_to_folio(req);
+diff --git a/fs/nfsd/blocklayoutxdr.c b/fs/nfsd/blocklayoutxdr.c
+index 8e9c1a0f8d380..1ed2f691ebb90 100644
+--- a/fs/nfsd/blocklayoutxdr.c
++++ b/fs/nfsd/blocklayoutxdr.c
+@@ -83,6 +83,15 @@ nfsd4_block_encode_getdeviceinfo(struct xdr_stream *xdr,
+ 	int len = sizeof(__be32), ret, i;
+ 	__be32 *p;
+ 
++	/*
++	 * See paragraph 5 of RFC 8881 S18.40.3.
++	 */
++	if (!gdp->gd_maxcount) {
++		if (xdr_stream_encode_u32(xdr, 0) != XDR_UNIT)
++			return nfserr_resource;
++		return nfs_ok;
++	}
++
+ 	p = xdr_reserve_space(xdr, len + sizeof(__be32));
+ 	if (!p)
+ 		return nfserr_resource;
+diff --git a/fs/nfsd/flexfilelayoutxdr.c b/fs/nfsd/flexfilelayoutxdr.c
+index e81d2a5cf381e..bb205328e043d 100644
+--- a/fs/nfsd/flexfilelayoutxdr.c
++++ b/fs/nfsd/flexfilelayoutxdr.c
+@@ -85,6 +85,15 @@ nfsd4_ff_encode_getdeviceinfo(struct xdr_stream *xdr,
+ 	int addr_len;
+ 	__be32 *p;
+ 
++	/*
++	 * See paragraph 5 of RFC 8881 S18.40.3.
++	 */
++	if (!gdp->gd_maxcount) {
++		if (xdr_stream_encode_u32(xdr, 0) != XDR_UNIT)
++			return nfserr_resource;
++		return nfs_ok;
++	}
++
+ 	/* len + padding for two strings */
+ 	addr_len = 16 + da->netaddr.netid_len + da->netaddr.addr_len;
+ 	ver_len = 20;
+diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
+index b30dca7de8cc0..be72628b13376 100644
+--- a/fs/nfsd/nfs4xdr.c
++++ b/fs/nfsd/nfs4xdr.c
+@@ -4678,20 +4678,17 @@ nfsd4_encode_getdeviceinfo(struct nfsd4_compoundres *resp, __be32 nfserr,
+ 
+ 	*p++ = cpu_to_be32(gdev->gd_layout_type);
+ 
+-	/* If maxcount is 0 then just update notifications */
+-	if (gdev->gd_maxcount != 0) {
+-		ops = nfsd4_layout_ops[gdev->gd_layout_type];
+-		nfserr = ops->encode_getdeviceinfo(xdr, gdev);
+-		if (nfserr) {
+-			/*
+-			 * We don't bother to burden the layout drivers with
+-			 * enforcing gd_maxcount, just tell the client to
+-			 * come back with a bigger buffer if it's not enough.
+-			 */
+-			if (xdr->buf->len + 4 > gdev->gd_maxcount)
+-				goto toosmall;
+-			return nfserr;
+-		}
++	ops = nfsd4_layout_ops[gdev->gd_layout_type];
++	nfserr = ops->encode_getdeviceinfo(xdr, gdev);
++	if (nfserr) {
++		/*
++		 * We don't bother to burden the layout drivers with
++		 * enforcing gd_maxcount, just tell the client to
++		 * come back with a bigger buffer if it's not enough.
++		 */
++		if (xdr->buf->len + 4 > gdev->gd_maxcount)
++			goto toosmall;
++		return nfserr;
+ 	}
+ 
+ 	if (gdev->gd_notify_types) {
+diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c
+index 17c52225b87d4..03bccfd183f3c 100644
+--- a/fs/ocfs2/namei.c
++++ b/fs/ocfs2/namei.c
+@@ -1535,6 +1535,10 @@ static int ocfs2_rename(struct mnt_idmap *idmap,
+ 		status = ocfs2_add_entry(handle, new_dentry, old_inode,
+ 					 OCFS2_I(old_inode)->ip_blkno,
+ 					 new_dir_bh, &target_insert);
++		if (status < 0) {
++			mlog_errno(status);
++			goto bail;
++		}
+ 	}
+ 
+ 	old_inode->i_ctime = current_time(old_inode);
+diff --git a/fs/proc/base.c b/fs/proc/base.c
+index 9df3f48396628..ee4b824658a0a 100644
+--- a/fs/proc/base.c
++++ b/fs/proc/base.c
+@@ -3583,7 +3583,8 @@ static int proc_tid_comm_permission(struct mnt_idmap *idmap,
+ }
+ 
+ static const struct inode_operations proc_tid_comm_inode_operations = {
+-		.permission = proc_tid_comm_permission,
++		.setattr	= proc_setattr,
++		.permission	= proc_tid_comm_permission,
+ };
+ 
+ /*
+diff --git a/fs/pstore/ram_core.c b/fs/pstore/ram_core.c
+index 85aaf0fc6d7d1..eb6df190d7523 100644
+--- a/fs/pstore/ram_core.c
++++ b/fs/pstore/ram_core.c
+@@ -519,7 +519,7 @@ static int persistent_ram_post_init(struct persistent_ram_zone *prz, u32 sig,
+ 	sig ^= PERSISTENT_RAM_SIG;
+ 
+ 	if (prz->buffer->sig == sig) {
+-		if (buffer_size(prz) == 0) {
++		if (buffer_size(prz) == 0 && buffer_start(prz) == 0) {
+ 			pr_debug("found existing empty buffer\n");
+ 			return 0;
+ 		}
+diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
+index e3e4f40476579..c7afe433d991a 100644
+--- a/fs/quota/dquot.c
++++ b/fs/quota/dquot.c
+@@ -225,13 +225,22 @@ static void put_quota_format(struct quota_format_type *fmt)
+ 
+ /*
+  * Dquot List Management:
+- * The quota code uses four lists for dquot management: the inuse_list,
+- * free_dquots, dqi_dirty_list, and dquot_hash[] array. A single dquot
+- * structure may be on some of those lists, depending on its current state.
++ * The quota code uses five lists for dquot management: the inuse_list,
++ * releasing_dquots, free_dquots, dqi_dirty_list, and dquot_hash[] array.
++ * A single dquot structure may be on some of those lists, depending on
++ * its current state.
+  *
+  * All dquots are placed to the end of inuse_list when first created, and this
+  * list is used for invalidate operation, which must look at every dquot.
+  *
++ * When the last reference of a dquot will be dropped, the dquot will be
++ * added to releasing_dquots. We'd then queue work item which would call
++ * synchronize_srcu() and after that perform the final cleanup of all the
++ * dquots on the list. Both releasing_dquots and free_dquots use the
++ * dq_free list_head in the dquot struct. When a dquot is removed from
++ * releasing_dquots, a reference count is always subtracted, and if
++ * dq_count == 0 at that point, the dquot will be added to the free_dquots.
++ *
+  * Unused dquots (dq_count == 0) are added to the free_dquots list when freed,
+  * and this list is searched whenever we need an available dquot.  Dquots are
+  * removed from the list as soon as they are used again, and
+@@ -250,6 +259,7 @@ static void put_quota_format(struct quota_format_type *fmt)
+ 
+ static LIST_HEAD(inuse_list);
+ static LIST_HEAD(free_dquots);
++static LIST_HEAD(releasing_dquots);
+ static unsigned int dq_hash_bits, dq_hash_mask;
+ static struct hlist_head *dquot_hash;
+ 
+@@ -260,6 +270,9 @@ static qsize_t inode_get_rsv_space(struct inode *inode);
+ static qsize_t __inode_get_rsv_space(struct inode *inode);
+ static int __dquot_initialize(struct inode *inode, int type);
+ 
++static void quota_release_workfn(struct work_struct *work);
++static DECLARE_DELAYED_WORK(quota_release_work, quota_release_workfn);
++
+ static inline unsigned int
+ hashfn(const struct super_block *sb, struct kqid qid)
+ {
+@@ -305,12 +318,18 @@ static inline void put_dquot_last(struct dquot *dquot)
+ 	dqstats_inc(DQST_FREE_DQUOTS);
+ }
+ 
++static inline void put_releasing_dquots(struct dquot *dquot)
++{
++	list_add_tail(&dquot->dq_free, &releasing_dquots);
++}
++
+ static inline void remove_free_dquot(struct dquot *dquot)
+ {
+ 	if (list_empty(&dquot->dq_free))
+ 		return;
+ 	list_del_init(&dquot->dq_free);
+-	dqstats_dec(DQST_FREE_DQUOTS);
++	if (!atomic_read(&dquot->dq_count))
++		dqstats_dec(DQST_FREE_DQUOTS);
+ }
+ 
+ static inline void put_inuse(struct dquot *dquot)
+@@ -336,6 +355,11 @@ static void wait_on_dquot(struct dquot *dquot)
+ 	mutex_unlock(&dquot->dq_lock);
+ }
+ 
++static inline int dquot_active(struct dquot *dquot)
++{
++	return test_bit(DQ_ACTIVE_B, &dquot->dq_flags);
++}
++
+ static inline int dquot_dirty(struct dquot *dquot)
+ {
+ 	return test_bit(DQ_MOD_B, &dquot->dq_flags);
+@@ -351,14 +375,14 @@ int dquot_mark_dquot_dirty(struct dquot *dquot)
+ {
+ 	int ret = 1;
+ 
+-	if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags))
++	if (!dquot_active(dquot))
+ 		return 0;
+ 
+ 	if (sb_dqopt(dquot->dq_sb)->flags & DQUOT_NOLIST_DIRTY)
+ 		return test_and_set_bit(DQ_MOD_B, &dquot->dq_flags);
+ 
+ 	/* If quota is dirty already, we don't have to acquire dq_list_lock */
+-	if (test_bit(DQ_MOD_B, &dquot->dq_flags))
++	if (dquot_dirty(dquot))
+ 		return 1;
+ 
+ 	spin_lock(&dq_list_lock);
+@@ -440,7 +464,7 @@ int dquot_acquire(struct dquot *dquot)
+ 	smp_mb__before_atomic();
+ 	set_bit(DQ_READ_B, &dquot->dq_flags);
+ 	/* Instantiate dquot if needed */
+-	if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags) && !dquot->dq_off) {
++	if (!dquot_active(dquot) && !dquot->dq_off) {
+ 		ret = dqopt->ops[dquot->dq_id.type]->commit_dqblk(dquot);
+ 		/* Write the info if needed */
+ 		if (info_dirty(&dqopt->info[dquot->dq_id.type])) {
+@@ -482,7 +506,7 @@ int dquot_commit(struct dquot *dquot)
+ 		goto out_lock;
+ 	/* Inactive dquot can be only if there was error during read/init
+ 	 * => we have better not writing it */
+-	if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags))
++	if (dquot_active(dquot))
+ 		ret = dqopt->ops[dquot->dq_id.type]->commit_dqblk(dquot);
+ 	else
+ 		ret = -EIO;
+@@ -547,6 +571,8 @@ static void invalidate_dquots(struct super_block *sb, int type)
+ 	struct dquot *dquot, *tmp;
+ 
+ restart:
++	flush_delayed_work(&quota_release_work);
++
+ 	spin_lock(&dq_list_lock);
+ 	list_for_each_entry_safe(dquot, tmp, &inuse_list, dq_inuse) {
+ 		if (dquot->dq_sb != sb)
+@@ -555,6 +581,12 @@ restart:
+ 			continue;
+ 		/* Wait for dquot users */
+ 		if (atomic_read(&dquot->dq_count)) {
++			/* dquot in releasing_dquots, flush and retry */
++			if (!list_empty(&dquot->dq_free)) {
++				spin_unlock(&dq_list_lock);
++				goto restart;
++			}
++
+ 			atomic_inc(&dquot->dq_count);
+ 			spin_unlock(&dq_list_lock);
+ 			/*
+@@ -597,7 +629,7 @@ int dquot_scan_active(struct super_block *sb,
+ 
+ 	spin_lock(&dq_list_lock);
+ 	list_for_each_entry(dquot, &inuse_list, dq_inuse) {
+-		if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags))
++		if (!dquot_active(dquot))
+ 			continue;
+ 		if (dquot->dq_sb != sb)
+ 			continue;
+@@ -612,7 +644,7 @@ int dquot_scan_active(struct super_block *sb,
+ 		 * outstanding call and recheck the DQ_ACTIVE_B after that.
+ 		 */
+ 		wait_on_dquot(dquot);
+-		if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) {
++		if (dquot_active(dquot)) {
+ 			ret = fn(dquot, priv);
+ 			if (ret < 0)
+ 				goto out;
+@@ -628,6 +660,18 @@ out:
+ }
+ EXPORT_SYMBOL(dquot_scan_active);
+ 
++static inline int dquot_write_dquot(struct dquot *dquot)
++{
++	int ret = dquot->dq_sb->dq_op->write_dquot(dquot);
++	if (ret < 0) {
++		quota_error(dquot->dq_sb, "Can't write quota structure "
++			    "(error %d). Quota may get out of sync!", ret);
++		/* Clear dirty bit anyway to avoid infinite loop. */
++		clear_dquot_dirty(dquot);
++	}
++	return ret;
++}
++
+ /* Write all dquot structures to quota files */
+ int dquot_writeback_dquots(struct super_block *sb, int type)
+ {
+@@ -651,23 +695,16 @@ int dquot_writeback_dquots(struct super_block *sb, int type)
+ 			dquot = list_first_entry(&dirty, struct dquot,
+ 						 dq_dirty);
+ 
+-			WARN_ON(!test_bit(DQ_ACTIVE_B, &dquot->dq_flags));
++			WARN_ON(!dquot_active(dquot));
+ 
+ 			/* Now we have active dquot from which someone is
+  			 * holding reference so we can safely just increase
+ 			 * use count */
+ 			dqgrab(dquot);
+ 			spin_unlock(&dq_list_lock);
+-			err = sb->dq_op->write_dquot(dquot);
+-			if (err) {
+-				/*
+-				 * Clear dirty bit anyway to avoid infinite
+-				 * loop here.
+-				 */
+-				clear_dquot_dirty(dquot);
+-				if (!ret)
+-					ret = err;
+-			}
++			err = dquot_write_dquot(dquot);
++			if (err && !ret)
++				ret = err;
+ 			dqput(dquot);
+ 			spin_lock(&dq_list_lock);
+ 		}
+@@ -760,13 +797,54 @@ static struct shrinker dqcache_shrinker = {
+ 	.seeks = DEFAULT_SEEKS,
+ };
+ 
++/*
++ * Safely release dquot and put reference to dquot.
++ */
++static void quota_release_workfn(struct work_struct *work)
++{
++	struct dquot *dquot;
++	struct list_head rls_head;
++
++	spin_lock(&dq_list_lock);
++	/* Exchange the list head to avoid livelock. */
++	list_replace_init(&releasing_dquots, &rls_head);
++	spin_unlock(&dq_list_lock);
++
++restart:
++	synchronize_srcu(&dquot_srcu);
++	spin_lock(&dq_list_lock);
++	while (!list_empty(&rls_head)) {
++		dquot = list_first_entry(&rls_head, struct dquot, dq_free);
++		/* Dquot got used again? */
++		if (atomic_read(&dquot->dq_count) > 1) {
++			remove_free_dquot(dquot);
++			atomic_dec(&dquot->dq_count);
++			continue;
++		}
++		if (dquot_dirty(dquot)) {
++			spin_unlock(&dq_list_lock);
++			/* Commit dquot before releasing */
++			dquot_write_dquot(dquot);
++			goto restart;
++		}
++		if (dquot_active(dquot)) {
++			spin_unlock(&dq_list_lock);
++			dquot->dq_sb->dq_op->release_dquot(dquot);
++			goto restart;
++		}
++		/* Dquot is inactive and clean, now move it to free list */
++		remove_free_dquot(dquot);
++		atomic_dec(&dquot->dq_count);
++		put_dquot_last(dquot);
++	}
++	spin_unlock(&dq_list_lock);
++}
++
+ /*
+  * Put reference to dquot
+  */
+ void dqput(struct dquot *dquot)
+ {
+-	int ret;
+-
+ 	if (!dquot)
+ 		return;
+ #ifdef CONFIG_QUOTA_DEBUG
+@@ -778,7 +856,7 @@ void dqput(struct dquot *dquot)
+ 	}
+ #endif
+ 	dqstats_inc(DQST_DROPS);
+-we_slept:
++
+ 	spin_lock(&dq_list_lock);
+ 	if (atomic_read(&dquot->dq_count) > 1) {
+ 		/* We have more than one user... nothing to do */
+@@ -790,35 +868,15 @@ we_slept:
+ 		spin_unlock(&dq_list_lock);
+ 		return;
+ 	}
++
+ 	/* Need to release dquot? */
+-	if (dquot_dirty(dquot)) {
+-		spin_unlock(&dq_list_lock);
+-		/* Commit dquot before releasing */
+-		ret = dquot->dq_sb->dq_op->write_dquot(dquot);
+-		if (ret < 0) {
+-			quota_error(dquot->dq_sb, "Can't write quota structure"
+-				    " (error %d). Quota may get out of sync!",
+-				    ret);
+-			/*
+-			 * We clear dirty bit anyway, so that we avoid
+-			 * infinite loop here
+-			 */
+-			clear_dquot_dirty(dquot);
+-		}
+-		goto we_slept;
+-	}
+-	if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) {
+-		spin_unlock(&dq_list_lock);
+-		dquot->dq_sb->dq_op->release_dquot(dquot);
+-		goto we_slept;
+-	}
+-	atomic_dec(&dquot->dq_count);
+ #ifdef CONFIG_QUOTA_DEBUG
+ 	/* sanity check */
+ 	BUG_ON(!list_empty(&dquot->dq_free));
+ #endif
+-	put_dquot_last(dquot);
++	put_releasing_dquots(dquot);
+ 	spin_unlock(&dq_list_lock);
++	queue_delayed_work(system_unbound_wq, &quota_release_work, 1);
+ }
+ EXPORT_SYMBOL(dqput);
+ 
+@@ -908,7 +966,7 @@ we_slept:
+ 	 * already finished or it will be canceled due to dq_count > 1 test */
+ 	wait_on_dquot(dquot);
+ 	/* Read the dquot / allocate space in quota file */
+-	if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) {
++	if (!dquot_active(dquot)) {
+ 		int err;
+ 
+ 		err = sb->dq_op->acquire_dquot(dquot);
+@@ -1425,7 +1483,7 @@ static int info_bdq_free(struct dquot *dquot, qsize_t space)
+ 	return QUOTA_NL_NOWARN;
+ }
+ 
+-static int dquot_active(const struct inode *inode)
++static int inode_quota_active(const struct inode *inode)
+ {
+ 	struct super_block *sb = inode->i_sb;
+ 
+@@ -1448,7 +1506,7 @@ static int __dquot_initialize(struct inode *inode, int type)
+ 	qsize_t rsv;
+ 	int ret = 0;
+ 
+-	if (!dquot_active(inode))
++	if (!inode_quota_active(inode))
+ 		return 0;
+ 
+ 	dquots = i_dquot(inode);
+@@ -1556,7 +1614,7 @@ bool dquot_initialize_needed(struct inode *inode)
+ 	struct dquot **dquots;
+ 	int i;
+ 
+-	if (!dquot_active(inode))
++	if (!inode_quota_active(inode))
+ 		return false;
+ 
+ 	dquots = i_dquot(inode);
+@@ -1667,7 +1725,7 @@ int __dquot_alloc_space(struct inode *inode, qsize_t number, int flags)
+ 	int reserve = flags & DQUOT_SPACE_RESERVE;
+ 	struct dquot **dquots;
+ 
+-	if (!dquot_active(inode)) {
++	if (!inode_quota_active(inode)) {
+ 		if (reserve) {
+ 			spin_lock(&inode->i_lock);
+ 			*inode_reserved_space(inode) += number;
+@@ -1737,7 +1795,7 @@ int dquot_alloc_inode(struct inode *inode)
+ 	struct dquot_warn warn[MAXQUOTAS];
+ 	struct dquot * const *dquots;
+ 
+-	if (!dquot_active(inode))
++	if (!inode_quota_active(inode))
+ 		return 0;
+ 	for (cnt = 0; cnt < MAXQUOTAS; cnt++)
+ 		warn[cnt].w_type = QUOTA_NL_NOWARN;
+@@ -1780,7 +1838,7 @@ int dquot_claim_space_nodirty(struct inode *inode, qsize_t number)
+ 	struct dquot **dquots;
+ 	int cnt, index;
+ 
+-	if (!dquot_active(inode)) {
++	if (!inode_quota_active(inode)) {
+ 		spin_lock(&inode->i_lock);
+ 		*inode_reserved_space(inode) -= number;
+ 		__inode_add_bytes(inode, number);
+@@ -1822,7 +1880,7 @@ void dquot_reclaim_space_nodirty(struct inode *inode, qsize_t number)
+ 	struct dquot **dquots;
+ 	int cnt, index;
+ 
+-	if (!dquot_active(inode)) {
++	if (!inode_quota_active(inode)) {
+ 		spin_lock(&inode->i_lock);
+ 		*inode_reserved_space(inode) += number;
+ 		__inode_sub_bytes(inode, number);
+@@ -1866,7 +1924,7 @@ void __dquot_free_space(struct inode *inode, qsize_t number, int flags)
+ 	struct dquot **dquots;
+ 	int reserve = flags & DQUOT_SPACE_RESERVE, index;
+ 
+-	if (!dquot_active(inode)) {
++	if (!inode_quota_active(inode)) {
+ 		if (reserve) {
+ 			spin_lock(&inode->i_lock);
+ 			*inode_reserved_space(inode) -= number;
+@@ -1921,7 +1979,7 @@ void dquot_free_inode(struct inode *inode)
+ 	struct dquot * const *dquots;
+ 	int index;
+ 
+-	if (!dquot_active(inode))
++	if (!inode_quota_active(inode))
+ 		return;
+ 
+ 	dquots = i_dquot(inode);
+@@ -2093,7 +2151,7 @@ int dquot_transfer(struct mnt_idmap *idmap, struct inode *inode,
+ 	struct super_block *sb = inode->i_sb;
+ 	int ret;
+ 
+-	if (!dquot_active(inode))
++	if (!inode_quota_active(inode))
+ 		return 0;
+ 
+ 	if (i_uid_needs_update(idmap, iattr, inode)) {
+diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c
+index 479aa4a57602f..015bfe4e45241 100644
+--- a/fs/reiserfs/journal.c
++++ b/fs/reiserfs/journal.c
+@@ -2326,7 +2326,7 @@ static struct buffer_head *reiserfs_breada(struct block_device *dev,
+ 	int i, j;
+ 
+ 	bh = __getblk(dev, block, bufsize);
+-	if (buffer_uptodate(bh))
++	if (!bh || buffer_uptodate(bh))
+ 		return (bh);
+ 
+ 	if (block + BUFNR > max_block) {
+@@ -2336,6 +2336,8 @@ static struct buffer_head *reiserfs_breada(struct block_device *dev,
+ 	j = 1;
+ 	for (i = 1; i < blocks; i++) {
+ 		bh = __getblk(dev, block + i, bufsize);
++		if (!bh)
++			break;
+ 		if (buffer_uptodate(bh)) {
+ 			brelse(bh);
+ 			break;
+diff --git a/fs/splice.c b/fs/splice.c
+index 3e2a31e1ce6a8..2e4cab57fb2ff 100644
+--- a/fs/splice.c
++++ b/fs/splice.c
+@@ -1269,10 +1269,8 @@ long do_splice(struct file *in, loff_t *off_in, struct file *out,
+ 		if ((in->f_flags | out->f_flags) & O_NONBLOCK)
+ 			flags |= SPLICE_F_NONBLOCK;
+ 
+-		return splice_pipe_to_pipe(ipipe, opipe, len, flags);
+-	}
+-
+-	if (ipipe) {
++		ret = splice_pipe_to_pipe(ipipe, opipe, len, flags);
++	} else if (ipipe) {
+ 		if (off_in)
+ 			return -ESPIPE;
+ 		if (off_out) {
+@@ -1297,18 +1295,11 @@ long do_splice(struct file *in, loff_t *off_in, struct file *out,
+ 		ret = do_splice_from(ipipe, out, &offset, len, flags);
+ 		file_end_write(out);
+ 
+-		if (ret > 0)
+-			fsnotify_modify(out);
+-
+ 		if (!off_out)
+ 			out->f_pos = offset;
+ 		else
+ 			*off_out = offset;
+-
+-		return ret;
+-	}
+-
+-	if (opipe) {
++	} else if (opipe) {
+ 		if (off_out)
+ 			return -ESPIPE;
+ 		if (off_in) {
+@@ -1324,18 +1315,25 @@ long do_splice(struct file *in, loff_t *off_in, struct file *out,
+ 
+ 		ret = splice_file_to_pipe(in, opipe, &offset, len, flags);
+ 
+-		if (ret > 0)
+-			fsnotify_access(in);
+-
+ 		if (!off_in)
+ 			in->f_pos = offset;
+ 		else
+ 			*off_in = offset;
++	} else {
++		ret = -EINVAL;
++	}
+ 
+-		return ret;
++	if (ret > 0) {
++		/*
++		 * Generate modify out before access in:
++		 * do_splice_from() may've already sent modify out,
++		 * and this ensures the events get merged.
++		 */
++		fsnotify_modify(out);
++		fsnotify_access(in);
+ 	}
+ 
+-	return -EINVAL;
++	return ret;
+ }
+ 
+ static long __do_splice(struct file *in, loff_t __user *off_in,
+@@ -1464,6 +1462,9 @@ static long vmsplice_to_user(struct file *file, struct iov_iter *iter,
+ 		pipe_unlock(pipe);
+ 	}
+ 
++	if (ret > 0)
++		fsnotify_access(file);
++
+ 	return ret;
+ }
+ 
+@@ -1493,8 +1494,10 @@ static long vmsplice_to_pipe(struct file *file, struct iov_iter *iter,
+ 	if (!ret)
+ 		ret = iter_to_pipe(iter, pipe, buf_flag);
+ 	pipe_unlock(pipe);
+-	if (ret > 0)
++	if (ret > 0) {
+ 		wakeup_pipe_readers(pipe);
++		fsnotify_modify(file);
++	}
+ 	return ret;
+ }
+ 
+@@ -1928,6 +1931,11 @@ long do_tee(struct file *in, struct file *out, size_t len, unsigned int flags)
+ 		}
+ 	}
+ 
++	if (ret > 0) {
++		fsnotify_access(in);
++		fsnotify_modify(out);
++	}
++
+ 	return ret;
+ }
+ 
+diff --git a/fs/verity/signature.c b/fs/verity/signature.c
+index 72034bc71c9d9..e22fb25d7babe 100644
+--- a/fs/verity/signature.c
++++ b/fs/verity/signature.c
+@@ -62,6 +62,22 @@ int fsverity_verify_signature(const struct fsverity_info *vi,
+ 		return 0;
+ 	}
+ 
++	if (fsverity_keyring->keys.nr_leaves_on_tree == 0) {
++		/*
++		 * The ".fs-verity" keyring is empty, due to builtin signatures
++		 * being supported by the kernel but not actually being used.
++		 * In this case, verify_pkcs7_signature() would always return an
++		 * error, usually ENOKEY.  It could also be EBADMSG if the
++		 * PKCS#7 is malformed, but that isn't very important to
++		 * distinguish.  So, just skip to ENOKEY to avoid the attack
++		 * surface of the PKCS#7 parser, which would otherwise be
++		 * reachable by any task able to execute FS_IOC_ENABLE_VERITY.
++		 */
++		fsverity_err(inode,
++			     "fs-verity keyring is empty, rejecting signed file!");
++		return -ENOKEY;
++	}
++
+ 	d = kzalloc(sizeof(*d) + hash_alg->digest_size, GFP_KERNEL);
+ 	if (!d)
+ 		return -ENOMEM;
+diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h
+index 6156161b181f1..ca86f4c6ba439 100644
+--- a/include/crypto/algapi.h
++++ b/include/crypto/algapi.h
+@@ -12,6 +12,7 @@
+ #include <linux/cache.h>
+ #include <linux/crypto.h>
+ #include <linux/types.h>
++#include <linux/workqueue.h>
+ 
+ /*
+  * Maximum values for blocksize and alignmask, used to allocate
+@@ -82,6 +83,8 @@ struct crypto_instance {
+ 		struct crypto_spawn *spawns;
+ 	};
+ 
++	struct work_struct free_work;
++
+ 	void *__ctx[] CRYPTO_MINALIGN_ATTR;
+ };
+ 
+diff --git a/include/dt-bindings/clock/qcom,gcc-sc8280xp.h b/include/dt-bindings/clock/qcom,gcc-sc8280xp.h
+index 721105ea4fad8..8454915917849 100644
+--- a/include/dt-bindings/clock/qcom,gcc-sc8280xp.h
++++ b/include/dt-bindings/clock/qcom,gcc-sc8280xp.h
+@@ -494,5 +494,15 @@
+ #define USB30_SEC_GDSC					11
+ #define EMAC_0_GDSC					12
+ #define EMAC_1_GDSC					13
++#define USB4_1_GDSC					14
++#define USB4_GDSC					15
++#define HLOS1_VOTE_MMNOC_MMU_TBU_HF0_GDSC		16
++#define HLOS1_VOTE_MMNOC_MMU_TBU_HF1_GDSC		17
++#define HLOS1_VOTE_MMNOC_MMU_TBU_SF0_GDSC		18
++#define HLOS1_VOTE_MMNOC_MMU_TBU_SF1_GDSC		19
++#define HLOS1_VOTE_TURING_MMU_TBU0_GDSC			20
++#define HLOS1_VOTE_TURING_MMU_TBU1_GDSC			21
++#define HLOS1_VOTE_TURING_MMU_TBU2_GDSC			22
++#define HLOS1_VOTE_TURING_MMU_TBU3_GDSC			23
+ 
+ #endif
+diff --git a/include/linux/arm_sdei.h b/include/linux/arm_sdei.h
+index 14dc461b0e829..255701e1251b4 100644
+--- a/include/linux/arm_sdei.h
++++ b/include/linux/arm_sdei.h
+@@ -47,10 +47,12 @@ int sdei_unregister_ghes(struct ghes *ghes);
+ int sdei_mask_local_cpu(void);
+ int sdei_unmask_local_cpu(void);
+ void __init sdei_init(void);
++void sdei_handler_abort(void);
+ #else
+ static inline int sdei_mask_local_cpu(void) { return 0; }
+ static inline int sdei_unmask_local_cpu(void) { return 0; }
+ static inline void sdei_init(void) { }
++static inline void sdei_handler_abort(void) { }
+ #endif /* CONFIG_ARM_SDE_INTERFACE */
+ 
+ 
+diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
+index 87d94be7825af..56f7f79137921 100644
+--- a/include/linux/blkdev.h
++++ b/include/linux/blkdev.h
+@@ -538,6 +538,7 @@ struct request_queue {
+ #define QUEUE_FLAG_ADD_RANDOM	10	/* Contributes to random pool */
+ #define QUEUE_FLAG_SYNCHRONOUS	11	/* always completes in submit context */
+ #define QUEUE_FLAG_SAME_FORCE	12	/* force complete on same CPU */
++#define QUEUE_FLAG_HW_WC	18	/* Write back caching supported */
+ #define QUEUE_FLAG_INIT_DONE	14	/* queue is initialized */
+ #define QUEUE_FLAG_STABLE_WRITES 15	/* don't modify blks until WB is done */
+ #define QUEUE_FLAG_POLL		16	/* IO polling enabled if set */
+diff --git a/include/linux/hid.h b/include/linux/hid.h
+index 39e21e3815ad4..9e8f87800e21a 100644
+--- a/include/linux/hid.h
++++ b/include/linux/hid.h
+@@ -360,6 +360,7 @@ struct hid_item {
+ #define HID_QUIRK_NO_OUTPUT_REPORTS_ON_INTR_EP	BIT(18)
+ #define HID_QUIRK_HAVE_SPECIAL_DRIVER		BIT(19)
+ #define HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE	BIT(20)
++#define HID_QUIRK_NOINVERT			BIT(21)
+ #define HID_QUIRK_FULLSPEED_INTERVAL		BIT(28)
+ #define HID_QUIRK_NO_INIT_REPORTS		BIT(29)
+ #define HID_QUIRK_NO_IGNORE			BIT(30)
+diff --git a/include/linux/if_arp.h b/include/linux/if_arp.h
+index 1ed52441972f9..10a1e81434cb9 100644
+--- a/include/linux/if_arp.h
++++ b/include/linux/if_arp.h
+@@ -53,6 +53,10 @@ static inline bool dev_is_mac_header_xmit(const struct net_device *dev)
+ 	case ARPHRD_NONE:
+ 	case ARPHRD_RAWIP:
+ 	case ARPHRD_PIMREG:
++	/* PPP adds its l2 header automatically in ppp_start_xmit().
++	 * This makes it look like an l3 device to __bpf_redirect() and tcf_mirred_init().
++	 */
++	case ARPHRD_PPP:
+ 		return false;
+ 	default:
+ 		return true;
+diff --git a/include/linux/kernfs.h b/include/linux/kernfs.h
+index 73f5c120def88..2a36f3218b510 100644
+--- a/include/linux/kernfs.h
++++ b/include/linux/kernfs.h
+@@ -550,6 +550,10 @@ static inline int kernfs_setattr(struct kernfs_node *kn,
+ 				 const struct iattr *iattr)
+ { return -ENOSYS; }
+ 
++static inline __poll_t kernfs_generic_poll(struct kernfs_open_file *of,
++					   struct poll_table_struct *pt)
++{ return -ENOSYS; }
++
+ static inline void kernfs_notify(struct kernfs_node *kn) { }
+ 
+ static inline int kernfs_xattr_get(struct kernfs_node *kn, const char *name,
+diff --git a/include/linux/lsm_hook_defs.h b/include/linux/lsm_hook_defs.h
+index 7308a1a7599b4..af796986baee6 100644
+--- a/include/linux/lsm_hook_defs.h
++++ b/include/linux/lsm_hook_defs.h
+@@ -54,6 +54,7 @@ LSM_HOOK(int, 0, bprm_creds_from_file, struct linux_binprm *bprm, struct file *f
+ LSM_HOOK(int, 0, bprm_check_security, struct linux_binprm *bprm)
+ LSM_HOOK(void, LSM_RET_VOID, bprm_committing_creds, struct linux_binprm *bprm)
+ LSM_HOOK(void, LSM_RET_VOID, bprm_committed_creds, struct linux_binprm *bprm)
++LSM_HOOK(int, 0, fs_context_submount, struct fs_context *fc, struct super_block *reference)
+ LSM_HOOK(int, 0, fs_context_dup, struct fs_context *fc,
+ 	 struct fs_context *src_sc)
+ LSM_HOOK(int, -ENOPARAM, fs_context_parse_param, struct fs_context *fc,
+diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
+index 5818af8eca5a5..dbf26bc89dd46 100644
+--- a/include/linux/memcontrol.h
++++ b/include/linux/memcontrol.h
+@@ -284,6 +284,11 @@ struct mem_cgroup {
+ 	atomic_long_t		memory_events[MEMCG_NR_MEMORY_EVENTS];
+ 	atomic_long_t		memory_events_local[MEMCG_NR_MEMORY_EVENTS];
+ 
++	/*
++	 * Hint of reclaim pressure for socket memroy management. Note
++	 * that this indicator should NOT be used in legacy cgroup mode
++	 * where socket memory is accounted/charged separately.
++	 */
+ 	unsigned long		socket_pressure;
+ 
+ 	/* Legacy tcp memory accounting */
+@@ -1727,8 +1732,8 @@ void mem_cgroup_sk_alloc(struct sock *sk);
+ void mem_cgroup_sk_free(struct sock *sk);
+ static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
+ {
+-	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_pressure)
+-		return true;
++	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
++		return !!memcg->tcpmem_pressure;
+ 	do {
+ 		if (time_before(jiffies, READ_ONCE(memcg->socket_pressure)))
+ 			return true;
+diff --git a/include/linux/mfd/rz-mtu3.h b/include/linux/mfd/rz-mtu3.h
+index c5173bc062701..8421d49500bf4 100644
+--- a/include/linux/mfd/rz-mtu3.h
++++ b/include/linux/mfd/rz-mtu3.h
+@@ -151,7 +151,6 @@ struct rz_mtu3 {
+ 	void *priv_data;
+ };
+ 
+-#if IS_ENABLED(CONFIG_RZ_MTU3)
+ static inline bool rz_mtu3_request_channel(struct rz_mtu3_channel *ch)
+ {
+ 	mutex_lock(&ch->lock);
+@@ -188,70 +187,5 @@ void rz_mtu3_32bit_ch_write(struct rz_mtu3_channel *ch, u16 off, u32 val);
+ void rz_mtu3_shared_reg_write(struct rz_mtu3_channel *ch, u16 off, u16 val);
+ void rz_mtu3_shared_reg_update_bit(struct rz_mtu3_channel *ch, u16 off,
+ 				   u16 pos, u8 val);
+-#else
+-static inline bool rz_mtu3_request_channel(struct rz_mtu3_channel *ch)
+-{
+-	return false;
+-}
+-
+-static inline void rz_mtu3_release_channel(struct rz_mtu3_channel *ch)
+-{
+-}
+-
+-static inline bool rz_mtu3_is_enabled(struct rz_mtu3_channel *ch)
+-{
+-	return false;
+-}
+-
+-static inline void rz_mtu3_disable(struct rz_mtu3_channel *ch)
+-{
+-}
+-
+-static inline int rz_mtu3_enable(struct rz_mtu3_channel *ch)
+-{
+-	return 0;
+-}
+-
+-static inline u8 rz_mtu3_8bit_ch_read(struct rz_mtu3_channel *ch, u16 off)
+-{
+-	return 0;
+-}
+-
+-static inline u16 rz_mtu3_16bit_ch_read(struct rz_mtu3_channel *ch, u16 off)
+-{
+-	return 0;
+-}
+-
+-static inline u32 rz_mtu3_32bit_ch_read(struct rz_mtu3_channel *ch, u16 off)
+-{
+-	return 0;
+-}
+-
+-static inline u16 rz_mtu3_shared_reg_read(struct rz_mtu3_channel *ch, u16 off)
+-{
+-	return 0;
+-}
+-
+-static inline void rz_mtu3_8bit_ch_write(struct rz_mtu3_channel *ch, u16 off, u8 val)
+-{
+-}
+-
+-static inline void rz_mtu3_16bit_ch_write(struct rz_mtu3_channel *ch, u16 off, u16 val)
+-{
+-}
+-
+-static inline void rz_mtu3_32bit_ch_write(struct rz_mtu3_channel *ch, u16 off, u32 val)
+-{
+-}
+-
+-static inline void rz_mtu3_shared_reg_write(struct rz_mtu3_channel *ch, u16 off, u16 val)
+-{
+-}
+-
+-static inline void rz_mtu3_shared_reg_update_bit(struct rz_mtu3_channel *ch,
+-						 u16 off, u16 pos, u8 val)
+-{
+-}
+-#endif
+ 
+ #endif /* __MFD_RZ_MTU3_H__ */
+diff --git a/include/linux/nmi.h b/include/linux/nmi.h
+index e3e6a64b98e09..e92e378df000f 100644
+--- a/include/linux/nmi.h
++++ b/include/linux/nmi.h
+@@ -157,31 +157,31 @@ static inline void touch_nmi_watchdog(void)
+ #ifdef arch_trigger_cpumask_backtrace
+ static inline bool trigger_all_cpu_backtrace(void)
+ {
+-	arch_trigger_cpumask_backtrace(cpu_online_mask, false);
++	arch_trigger_cpumask_backtrace(cpu_online_mask, -1);
+ 	return true;
+ }
+ 
+-static inline bool trigger_allbutself_cpu_backtrace(void)
++static inline bool trigger_allbutcpu_cpu_backtrace(int exclude_cpu)
+ {
+-	arch_trigger_cpumask_backtrace(cpu_online_mask, true);
++	arch_trigger_cpumask_backtrace(cpu_online_mask, exclude_cpu);
+ 	return true;
+ }
+ 
+ static inline bool trigger_cpumask_backtrace(struct cpumask *mask)
+ {
+-	arch_trigger_cpumask_backtrace(mask, false);
++	arch_trigger_cpumask_backtrace(mask, -1);
+ 	return true;
+ }
+ 
+ static inline bool trigger_single_cpu_backtrace(int cpu)
+ {
+-	arch_trigger_cpumask_backtrace(cpumask_of(cpu), false);
++	arch_trigger_cpumask_backtrace(cpumask_of(cpu), -1);
+ 	return true;
+ }
+ 
+ /* generic implementation */
+ void nmi_trigger_cpumask_backtrace(const cpumask_t *mask,
+-				   bool exclude_self,
++				   int exclude_cpu,
+ 				   void (*raise)(cpumask_t *mask));
+ bool nmi_cpu_backtrace(struct pt_regs *regs);
+ 
+@@ -190,7 +190,7 @@ static inline bool trigger_all_cpu_backtrace(void)
+ {
+ 	return false;
+ }
+-static inline bool trigger_allbutself_cpu_backtrace(void)
++static inline bool trigger_allbutcpu_cpu_backtrace(int exclude_cpu)
+ {
+ 	return false;
+ }
+diff --git a/include/linux/nvmem-consumer.h b/include/linux/nvmem-consumer.h
+index fa030d93b768e..27373024856dc 100644
+--- a/include/linux/nvmem-consumer.h
++++ b/include/linux/nvmem-consumer.h
+@@ -256,7 +256,7 @@ static inline struct nvmem_device *of_nvmem_device_get(struct device_node *np,
+ static inline struct device_node *
+ of_nvmem_layout_get_container(struct nvmem_device *nvmem)
+ {
+-	return ERR_PTR(-EOPNOTSUPP);
++	return NULL;
+ }
+ #endif /* CONFIG_NVMEM && CONFIG_OF */
+ 
+diff --git a/include/linux/pci.h b/include/linux/pci.h
+index c69a2cc1f4123..7ee498cd1f374 100644
+--- a/include/linux/pci.h
++++ b/include/linux/pci.h
+@@ -467,6 +467,7 @@ struct pci_dev {
+ 	pci_dev_flags_t dev_flags;
+ 	atomic_t	enable_cnt;	/* pci_enable_device has been called */
+ 
++	spinlock_t	pcie_cap_lock;		/* Protects RMW ops in capability accessors */
+ 	u32		saved_config_space[16]; /* Config space saved at suspend time */
+ 	struct hlist_head saved_cap_space;
+ 	int		rom_attr_enabled;	/* Display of ROM attribute enabled? */
+@@ -1217,11 +1218,40 @@ int pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val);
+ int pcie_capability_read_dword(struct pci_dev *dev, int pos, u32 *val);
+ int pcie_capability_write_word(struct pci_dev *dev, int pos, u16 val);
+ int pcie_capability_write_dword(struct pci_dev *dev, int pos, u32 val);
+-int pcie_capability_clear_and_set_word(struct pci_dev *dev, int pos,
+-				       u16 clear, u16 set);
++int pcie_capability_clear_and_set_word_unlocked(struct pci_dev *dev, int pos,
++						u16 clear, u16 set);
++int pcie_capability_clear_and_set_word_locked(struct pci_dev *dev, int pos,
++					      u16 clear, u16 set);
+ int pcie_capability_clear_and_set_dword(struct pci_dev *dev, int pos,
+ 					u32 clear, u32 set);
+ 
++/**
++ * pcie_capability_clear_and_set_word - RMW accessor for PCI Express Capability Registers
++ * @dev:	PCI device structure of the PCI Express device
++ * @pos:	PCI Express Capability Register
++ * @clear:	Clear bitmask
++ * @set:	Set bitmask
++ *
++ * Perform a Read-Modify-Write (RMW) operation using @clear and @set
++ * bitmasks on PCI Express Capability Register at @pos. Certain PCI Express
++ * Capability Registers are accessed concurrently in RMW fashion, hence
++ * require locking which is handled transparently to the caller.
++ */
++static inline int pcie_capability_clear_and_set_word(struct pci_dev *dev,
++						     int pos,
++						     u16 clear, u16 set)
++{
++	switch (pos) {
++	case PCI_EXP_LNKCTL:
++	case PCI_EXP_RTCTL:
++		return pcie_capability_clear_and_set_word_locked(dev, pos,
++								 clear, set);
++	default:
++		return pcie_capability_clear_and_set_word_unlocked(dev, pos,
++								   clear, set);
++	}
++}
++
+ static inline int pcie_capability_set_word(struct pci_dev *dev, int pos,
+ 					   u16 set)
+ {
+diff --git a/include/linux/pid_namespace.h b/include/linux/pid_namespace.h
+index c758809d5bcf3..f9f9931e02d6a 100644
+--- a/include/linux/pid_namespace.h
++++ b/include/linux/pid_namespace.h
+@@ -17,18 +17,10 @@
+ struct fs_pin;
+ 
+ #if defined(CONFIG_SYSCTL) && defined(CONFIG_MEMFD_CREATE)
+-/*
+- * sysctl for vm.memfd_noexec
+- * 0: memfd_create() without MFD_EXEC nor MFD_NOEXEC_SEAL
+- *	acts like MFD_EXEC was set.
+- * 1: memfd_create() without MFD_EXEC nor MFD_NOEXEC_SEAL
+- *	acts like MFD_NOEXEC_SEAL was set.
+- * 2: memfd_create() without MFD_NOEXEC_SEAL will be
+- *	rejected.
+- */
+-#define MEMFD_NOEXEC_SCOPE_EXEC			0
+-#define MEMFD_NOEXEC_SCOPE_NOEXEC_SEAL		1
+-#define MEMFD_NOEXEC_SCOPE_NOEXEC_ENFORCED	2
++/* modes for vm.memfd_noexec sysctl */
++#define MEMFD_NOEXEC_SCOPE_EXEC			0 /* MFD_EXEC implied if unset */
++#define MEMFD_NOEXEC_SCOPE_NOEXEC_SEAL		1 /* MFD_NOEXEC_SEAL implied if unset */
++#define MEMFD_NOEXEC_SCOPE_NOEXEC_ENFORCED	2 /* same as 1, except MFD_EXEC rejected */
+ #endif
+ 
+ struct pid_namespace {
+@@ -47,7 +39,6 @@ struct pid_namespace {
+ 	int reboot;	/* group exit code if this pidns was rebooted */
+ 	struct ns_common ns;
+ #if defined(CONFIG_SYSCTL) && defined(CONFIG_MEMFD_CREATE)
+-	/* sysctl for vm.memfd_noexec */
+ 	int memfd_noexec_scope;
+ #endif
+ } __randomize_layout;
+@@ -64,6 +55,23 @@ static inline struct pid_namespace *get_pid_ns(struct pid_namespace *ns)
+ 	return ns;
+ }
+ 
++#if defined(CONFIG_SYSCTL) && defined(CONFIG_MEMFD_CREATE)
++static inline int pidns_memfd_noexec_scope(struct pid_namespace *ns)
++{
++	int scope = MEMFD_NOEXEC_SCOPE_EXEC;
++
++	for (; ns; ns = ns->parent)
++		scope = max(scope, READ_ONCE(ns->memfd_noexec_scope));
++
++	return scope;
++}
++#else
++static inline int pidns_memfd_noexec_scope(struct pid_namespace *ns)
++{
++	return 0;
++}
++#endif
++
+ extern struct pid_namespace *copy_pid_ns(unsigned long flags,
+ 	struct user_namespace *user_ns, struct pid_namespace *ns);
+ extern void zap_pid_ns_processes(struct pid_namespace *pid_ns);
+@@ -78,6 +86,11 @@ static inline struct pid_namespace *get_pid_ns(struct pid_namespace *ns)
+ 	return ns;
+ }
+ 
++static inline int pidns_memfd_noexec_scope(struct pid_namespace *ns)
++{
++	return 0;
++}
++
+ static inline struct pid_namespace *copy_pid_ns(unsigned long flags,
+ 	struct user_namespace *user_ns, struct pid_namespace *ns)
+ {
+diff --git a/include/linux/security.h b/include/linux/security.h
+index 32828502f09ea..bac98ea18f78b 100644
+--- a/include/linux/security.h
++++ b/include/linux/security.h
+@@ -293,6 +293,7 @@ int security_bprm_creds_from_file(struct linux_binprm *bprm, struct file *file);
+ int security_bprm_check(struct linux_binprm *bprm);
+ void security_bprm_committing_creds(struct linux_binprm *bprm);
+ void security_bprm_committed_creds(struct linux_binprm *bprm);
++int security_fs_context_submount(struct fs_context *fc, struct super_block *reference);
+ int security_fs_context_dup(struct fs_context *fc, struct fs_context *src_fc);
+ int security_fs_context_parse_param(struct fs_context *fc, struct fs_parameter *param);
+ int security_sb_alloc(struct super_block *sb);
+@@ -629,6 +630,11 @@ static inline void security_bprm_committed_creds(struct linux_binprm *bprm)
+ {
+ }
+ 
++static inline int security_fs_context_submount(struct fs_context *fc,
++					   struct super_block *reference)
++{
++	return 0;
++}
+ static inline int security_fs_context_dup(struct fs_context *fc,
+ 					  struct fs_context *src_fc)
+ {
+diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h
+index 1e8bbdb8da905..f99d798093ab3 100644
+--- a/include/linux/trace_events.h
++++ b/include/linux/trace_events.h
+@@ -878,7 +878,8 @@ extern int  perf_uprobe_init(struct perf_event *event,
+ extern void perf_uprobe_destroy(struct perf_event *event);
+ extern int bpf_get_uprobe_info(const struct perf_event *event,
+ 			       u32 *fd_type, const char **filename,
+-			       u64 *probe_offset, bool perf_type_tracepoint);
++			       u64 *probe_offset, u64 *probe_addr,
++			       bool perf_type_tracepoint);
+ #endif
+ extern int  ftrace_profile_set_filter(struct perf_event *event, int event_id,
+ 				     char *filter_str);
+diff --git a/include/linux/usb/typec_altmode.h b/include/linux/usb/typec_altmode.h
+index 350d49012659b..28aeef8f9e7b5 100644
+--- a/include/linux/usb/typec_altmode.h
++++ b/include/linux/usb/typec_altmode.h
+@@ -67,7 +67,7 @@ struct typec_altmode_ops {
+ 
+ int typec_altmode_enter(struct typec_altmode *altmode, u32 *vdo);
+ int typec_altmode_exit(struct typec_altmode *altmode);
+-void typec_altmode_attention(struct typec_altmode *altmode, u32 vdo);
++int typec_altmode_attention(struct typec_altmode *altmode, u32 vdo);
+ int typec_altmode_vdm(struct typec_altmode *altmode,
+ 		      const u32 header, const u32 *vdo, int count);
+ int typec_altmode_notify(struct typec_altmode *altmode, unsigned long conf,
+diff --git a/include/media/cec.h b/include/media/cec.h
+index abee41ae02d0e..9c007f83569aa 100644
+--- a/include/media/cec.h
++++ b/include/media/cec.h
+@@ -113,22 +113,25 @@ struct cec_fh {
+ #define CEC_FREE_TIME_TO_USEC(ft)		((ft) * 2400)
+ 
+ struct cec_adap_ops {
+-	/* Low-level callbacks */
++	/* Low-level callbacks, called with adap->lock held */
+ 	int (*adap_enable)(struct cec_adapter *adap, bool enable);
+ 	int (*adap_monitor_all_enable)(struct cec_adapter *adap, bool enable);
+ 	int (*adap_monitor_pin_enable)(struct cec_adapter *adap, bool enable);
+ 	int (*adap_log_addr)(struct cec_adapter *adap, u8 logical_addr);
+-	void (*adap_configured)(struct cec_adapter *adap, bool configured);
++	void (*adap_unconfigured)(struct cec_adapter *adap);
+ 	int (*adap_transmit)(struct cec_adapter *adap, u8 attempts,
+ 			     u32 signal_free_time, struct cec_msg *msg);
++	void (*adap_nb_transmit_canceled)(struct cec_adapter *adap,
++					  const struct cec_msg *msg);
+ 	void (*adap_status)(struct cec_adapter *adap, struct seq_file *file);
+ 	void (*adap_free)(struct cec_adapter *adap);
+ 
+-	/* Error injection callbacks */
++	/* Error injection callbacks, called without adap->lock held */
+ 	int (*error_inj_show)(struct cec_adapter *adap, struct seq_file *sf);
+ 	bool (*error_inj_parse_line)(struct cec_adapter *adap, char *line);
+ 
+-	/* High-level CEC message callback */
++	/* High-level CEC message callback, called without adap->lock held */
++	void (*configured)(struct cec_adapter *adap);
+ 	int (*received)(struct cec_adapter *adap, struct cec_msg *msg);
+ };
+ 
+diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
+index 872dcb91a540e..3ff822ebb3a47 100644
+--- a/include/net/bluetooth/hci.h
++++ b/include/net/bluetooth/hci.h
+@@ -309,6 +309,26 @@ enum {
+ 	 * to support it.
+ 	 */
+ 	HCI_QUIRK_BROKEN_SET_RPA_TIMEOUT,
++
++	/* When this quirk is set, MSFT extension monitor tracking by
++	 * address filter is supported. Since tracking quantity of each
++	 * pattern is limited, this feature supports tracking multiple
++	 * devices concurrently if controller supports multiple
++	 * address filters.
++	 *
++	 * This quirk must be set before hci_register_dev is called.
++	 */
++	HCI_QUIRK_USE_MSFT_EXT_ADDRESS_FILTER,
++
++	/*
++	 * When this quirk is set, LE Coded PHY shall not be used. This is
++	 * required for some Intel controllers which erroneously claim to
++	 * support it but it causes problems with extended scanning.
++	 *
++	 * This quirk can be set before hci_register_dev is called or
++	 * during the hdev->setup vendor callback.
++	 */
++	HCI_QUIRK_BROKEN_LE_CODED,
+ };
+ 
+ /* HCI device flags */
+diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
+index e01d52cb668c0..c0a87558aea71 100644
+--- a/include/net/bluetooth/hci_core.h
++++ b/include/net/bluetooth/hci_core.h
+@@ -321,8 +321,8 @@ struct adv_monitor {
+ 
+ #define HCI_MAX_SHORT_NAME_LENGTH	10
+ 
+-#define HCI_CONN_HANDLE_UNSET		0xffff
+ #define HCI_CONN_HANDLE_MAX		0x0eff
++#define HCI_CONN_HANDLE_UNSET(_handle)	(_handle > HCI_CONN_HANDLE_MAX)
+ 
+ /* Min encryption key size to match with SMP */
+ #define HCI_MIN_ENC_KEY_SIZE		7
+@@ -739,6 +739,7 @@ struct hci_conn {
+ 	unsigned long	flags;
+ 
+ 	enum conn_reasons conn_reason;
++	__u8		abort_reason;
+ 
+ 	__u32		clock;
+ 	__u16		clock_accuracy;
+@@ -758,7 +759,6 @@ struct hci_conn {
+ 	struct delayed_work auto_accept_work;
+ 	struct delayed_work idle_work;
+ 	struct delayed_work le_conn_timeout;
+-	struct work_struct  le_scan_cleanup;
+ 
+ 	struct device	dev;
+ 	struct dentry	*debugfs;
+@@ -974,6 +974,10 @@ enum {
+ 	HCI_CONN_SCANNING,
+ 	HCI_CONN_AUTH_FAILURE,
+ 	HCI_CONN_PER_ADV,
++	HCI_CONN_BIG_CREATED,
++	HCI_CONN_CREATE_CIS,
++	HCI_CONN_BIG_SYNC,
++	HCI_CONN_BIG_SYNC_FAILED,
+ };
+ 
+ static inline bool hci_conn_ssp_enabled(struct hci_conn *conn)
+@@ -1115,6 +1119,32 @@ static inline struct hci_conn *hci_conn_hash_lookup_bis(struct hci_dev *hdev,
+ 	return NULL;
+ }
+ 
++static inline struct hci_conn *
++hci_conn_hash_lookup_per_adv_bis(struct hci_dev *hdev,
++				 bdaddr_t *ba,
++				 __u8 big, __u8 bis)
++{
++	struct hci_conn_hash *h = &hdev->conn_hash;
++	struct hci_conn  *c;
++
++	rcu_read_lock();
++
++	list_for_each_entry_rcu(c, &h->list, list) {
++		if (bacmp(&c->dst, ba) || c->type != ISO_LINK ||
++			!test_bit(HCI_CONN_PER_ADV, &c->flags))
++			continue;
++
++		if (c->iso_qos.bcast.big == big &&
++		    c->iso_qos.bcast.bis == bis) {
++			rcu_read_unlock();
++			return c;
++		}
++	}
++	rcu_read_unlock();
++
++	return NULL;
++}
++
+ static inline struct hci_conn *hci_conn_hash_lookup_handle(struct hci_dev *hdev,
+ 								__u16 handle)
+ {
+@@ -1259,6 +1289,29 @@ static inline struct hci_conn *hci_conn_hash_lookup_big(struct hci_dev *hdev,
+ 	return NULL;
+ }
+ 
++static inline struct hci_conn *hci_conn_hash_lookup_big_any_dst(struct hci_dev *hdev,
++							__u8 handle)
++{
++	struct hci_conn_hash *h = &hdev->conn_hash;
++	struct hci_conn  *c;
++
++	rcu_read_lock();
++
++	list_for_each_entry_rcu(c, &h->list, list) {
++		if (c->type != ISO_LINK)
++			continue;
++
++		if (handle == c->iso_qos.bcast.big) {
++			rcu_read_unlock();
++			return c;
++		}
++	}
++
++	rcu_read_unlock();
++
++	return NULL;
++}
++
+ static inline struct hci_conn *hci_conn_hash_lookup_state(struct hci_dev *hdev,
+ 							__u8 type, __u16 state)
+ {
+@@ -1324,7 +1377,8 @@ int hci_disconnect(struct hci_conn *conn, __u8 reason);
+ bool hci_setup_sync(struct hci_conn *conn, __u16 handle);
+ void hci_sco_setup(struct hci_conn *conn, __u8 status);
+ bool hci_iso_setup_path(struct hci_conn *conn);
+-int hci_le_create_cis(struct hci_conn *conn);
++int hci_le_create_cis_pending(struct hci_dev *hdev);
++int hci_conn_check_create_cis(struct hci_conn *conn);
+ 
+ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
+ 			      u8 role);
+@@ -1351,6 +1405,9 @@ struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst,
+ 				 __u16 setting, struct bt_codec *codec);
+ struct hci_conn *hci_bind_cis(struct hci_dev *hdev, bdaddr_t *dst,
+ 			      __u8 dst_type, struct bt_iso_qos *qos);
++struct hci_conn *hci_bind_bis(struct hci_dev *hdev, bdaddr_t *dst,
++			      struct bt_iso_qos *qos,
++			      __u8 base_len, __u8 *base);
+ struct hci_conn *hci_connect_cis(struct hci_dev *hdev, bdaddr_t *dst,
+ 				 __u8 dst_type, struct bt_iso_qos *qos);
+ struct hci_conn *hci_connect_bis(struct hci_dev *hdev, bdaddr_t *dst,
+@@ -1713,7 +1770,9 @@ void hci_conn_del_sysfs(struct hci_conn *conn);
+ #define scan_2m(dev) (((dev)->le_tx_def_phys & HCI_LE_SET_PHY_2M) || \
+ 		      ((dev)->le_rx_def_phys & HCI_LE_SET_PHY_2M))
+ 
+-#define le_coded_capable(dev) (((dev)->le_features[1] & HCI_LE_PHY_CODED))
++#define le_coded_capable(dev) (((dev)->le_features[1] & HCI_LE_PHY_CODED) && \
++			       !test_bit(HCI_QUIRK_BROKEN_LE_CODED, \
++					 &(dev)->quirks))
+ 
+ #define scan_coded(dev) (((dev)->le_tx_def_phys & HCI_LE_SET_PHY_CODED) || \
+ 			 ((dev)->le_rx_def_phys & HCI_LE_SET_PHY_CODED))
+diff --git a/include/net/bluetooth/hci_sync.h b/include/net/bluetooth/hci_sync.h
+index 2495be4d8b828..b516a0f4a55b8 100644
+--- a/include/net/bluetooth/hci_sync.h
++++ b/include/net/bluetooth/hci_sync.h
+@@ -124,7 +124,7 @@ int hci_abort_conn_sync(struct hci_dev *hdev, struct hci_conn *conn, u8 reason);
+ 
+ int hci_le_create_conn_sync(struct hci_dev *hdev, struct hci_conn *conn);
+ 
+-int hci_le_create_cis_sync(struct hci_dev *hdev, struct hci_conn *conn);
++int hci_le_create_cis_sync(struct hci_dev *hdev);
+ 
+ int hci_le_remove_cig_sync(struct hci_dev *hdev, u8 handle);
+ 
+diff --git a/include/net/lwtunnel.h b/include/net/lwtunnel.h
+index 6f15e6fa154e6..53bd2d02a4f0d 100644
+--- a/include/net/lwtunnel.h
++++ b/include/net/lwtunnel.h
+@@ -16,9 +16,12 @@
+ #define LWTUNNEL_STATE_INPUT_REDIRECT	BIT(1)
+ #define LWTUNNEL_STATE_XMIT_REDIRECT	BIT(2)
+ 
++/* LWTUNNEL_XMIT_CONTINUE should be distinguishable from dst_output return
++ * values (NET_XMIT_xxx and NETDEV_TX_xxx in linux/netdevice.h) for safety.
++ */
+ enum {
+ 	LWTUNNEL_XMIT_DONE,
+-	LWTUNNEL_XMIT_CONTINUE,
++	LWTUNNEL_XMIT_CONTINUE = 0x100,
+ };
+ 
+ 
+diff --git a/include/net/mac80211.h b/include/net/mac80211.h
+index 2a55ae932c568..ad41581384d9f 100644
+--- a/include/net/mac80211.h
++++ b/include/net/mac80211.h
+@@ -1192,9 +1192,11 @@ struct ieee80211_tx_info {
+ 			u8 ampdu_ack_len;
+ 			u8 ampdu_len;
+ 			u8 antenna;
++			u8 pad;
+ 			u16 tx_time;
+ 			u8 flags;
+-			void *status_driver_data[18 / sizeof(void *)];
++			u8 pad2;
++			void *status_driver_data[16 / sizeof(void *)];
+ 		} status;
+ 		struct {
+ 			struct ieee80211_tx_rate driver_rates[
+diff --git a/include/net/tcp.h b/include/net/tcp.h
+index 0ca972ebd3dd0..10fc5c5928f71 100644
+--- a/include/net/tcp.h
++++ b/include/net/tcp.h
+@@ -350,7 +350,6 @@ ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos,
+ struct sk_buff *tcp_stream_alloc_skb(struct sock *sk, gfp_t gfp,
+ 				     bool force_schedule);
+ 
+-void tcp_enter_quickack_mode(struct sock *sk, unsigned int max_quickacks);
+ static inline void tcp_dec_quickack_mode(struct sock *sk,
+ 					 const unsigned int pkts)
+ {
+diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
+index 70b7475dcf56b..a2b8d30c4c803 100644
+--- a/include/scsi/scsi_host.h
++++ b/include/scsi/scsi_host.h
+@@ -769,7 +769,7 @@ extern void scsi_remove_host(struct Scsi_Host *);
+ extern struct Scsi_Host *scsi_host_get(struct Scsi_Host *);
+ extern int scsi_host_busy(struct Scsi_Host *shost);
+ extern void scsi_host_put(struct Scsi_Host *t);
+-extern struct Scsi_Host *scsi_host_lookup(unsigned short);
++extern struct Scsi_Host *scsi_host_lookup(unsigned int hostnum);
+ extern const char *scsi_host_state_name(enum scsi_host_state);
+ extern void scsi_host_complete_all_commands(struct Scsi_Host *shost,
+ 					    enum scsi_host_status status);
+diff --git a/include/sound/ump.h b/include/sound/ump.h
+index 44d2c2fd021d2..91238dabe3075 100644
+--- a/include/sound/ump.h
++++ b/include/sound/ump.h
+@@ -45,6 +45,7 @@ struct snd_ump_endpoint {
+ 	spinlock_t legacy_locks[2];
+ 	struct snd_rawmidi *legacy_rmidi;
+ 	struct snd_rawmidi_substream *legacy_substreams[2][SNDRV_UMP_MAX_GROUPS];
++	unsigned char legacy_mapping[SNDRV_UMP_MAX_GROUPS];
+ 
+ 	/* for legacy output; need to open the actual substream unlike input */
+ 	int legacy_out_opens;
+diff --git a/include/uapi/linux/elf.h b/include/uapi/linux/elf.h
+index e0e1591383312..20e285fdbc463 100644
+--- a/include/uapi/linux/elf.h
++++ b/include/uapi/linux/elf.h
+@@ -443,6 +443,8 @@ typedef struct elf64_shdr {
+ #define NT_MIPS_DSP	0x800		/* MIPS DSP ASE registers */
+ #define NT_MIPS_FP_MODE	0x801		/* MIPS floating-point mode */
+ #define NT_MIPS_MSA	0x802		/* MIPS SIMD registers */
++#define NT_RISCV_CSR	0x900		/* RISC-V Control and Status Registers */
++#define NT_RISCV_VECTOR	0x901		/* RISC-V vector registers */
+ #define NT_LOONGARCH_CPUCFG	0xa00	/* LoongArch CPU config registers */
+ #define NT_LOONGARCH_CSR	0xa01	/* LoongArch control and status registers */
+ #define NT_LOONGARCH_LSX	0xa02	/* LoongArch Loongson SIMD Extension registers */
+diff --git a/include/uapi/linux/ioprio.h b/include/uapi/linux/ioprio.h
+index 99440b2e8c352..bee2bdb0eedbc 100644
+--- a/include/uapi/linux/ioprio.h
++++ b/include/uapi/linux/ioprio.h
+@@ -107,20 +107,21 @@ enum {
+ /*
+  * Return an I/O priority value based on a class, a level and a hint.
+  */
+-static __always_inline __u16 ioprio_value(int class, int level, int hint)
++static __always_inline __u16 ioprio_value(int prioclass, int priolevel,
++					  int priohint)
+ {
+-	if (IOPRIO_BAD_VALUE(class, IOPRIO_NR_CLASSES) ||
+-	    IOPRIO_BAD_VALUE(level, IOPRIO_NR_LEVELS) ||
+-	    IOPRIO_BAD_VALUE(hint, IOPRIO_NR_HINTS))
++	if (IOPRIO_BAD_VALUE(prioclass, IOPRIO_NR_CLASSES) ||
++	    IOPRIO_BAD_VALUE(priolevel, IOPRIO_NR_LEVELS) ||
++	    IOPRIO_BAD_VALUE(priohint, IOPRIO_NR_HINTS))
+ 		return IOPRIO_CLASS_INVALID << IOPRIO_CLASS_SHIFT;
+ 
+-	return (class << IOPRIO_CLASS_SHIFT) |
+-		(hint << IOPRIO_HINT_SHIFT) | level;
++	return (prioclass << IOPRIO_CLASS_SHIFT) |
++		(priohint << IOPRIO_HINT_SHIFT) | priolevel;
+ }
+ 
+-#define IOPRIO_PRIO_VALUE(class, level)			\
+-	ioprio_value(class, level, IOPRIO_HINT_NONE)
+-#define IOPRIO_PRIO_VALUE_HINT(class, level, hint)	\
+-	ioprio_value(class, level, hint)
++#define IOPRIO_PRIO_VALUE(prioclass, priolevel)			\
++	ioprio_value(prioclass, priolevel, IOPRIO_HINT_NONE)
++#define IOPRIO_PRIO_VALUE_HINT(prioclass, priolevel, priohint)	\
++	ioprio_value(prioclass, priolevel, priohint)
+ 
+ #endif /* _UAPI_LINUX_IOPRIO_H */
+diff --git a/include/uapi/linux/sync_file.h b/include/uapi/linux/sync_file.h
+index 7e42a5b7558bf..ff0a931833e25 100644
+--- a/include/uapi/linux/sync_file.h
++++ b/include/uapi/linux/sync_file.h
+@@ -56,7 +56,7 @@ struct sync_fence_info {
+  * @name:	name of fence
+  * @status:	status of fence. 1: signaled 0:active <0:error
+  * @flags:	sync_file_info flags
+- * @num_fences	number of fences in the sync_file
++ * @num_fences:	number of fences in the sync_file
+  * @pad:	padding for 64-bit alignment, should always be zero
+  * @sync_fence_info: pointer to array of struct &sync_fence_info with all
+  *		 fences in the sync_file
+diff --git a/include/ufs/ufs.h b/include/ufs/ufs.h
+index 198cb391f9db2..29760d5cb273c 100644
+--- a/include/ufs/ufs.h
++++ b/include/ufs/ufs.h
+@@ -102,6 +102,12 @@ enum {
+ 	UPIU_CMD_FLAGS_READ	= 0x40,
+ };
+ 
++/* UPIU response flags */
++enum {
++	UPIU_RSP_FLAG_UNDERFLOW	= 0x20,
++	UPIU_RSP_FLAG_OVERFLOW	= 0x40,
++};
++
+ /* UPIU Task Attributes */
+ enum {
+ 	UPIU_TASK_ATTR_SIMPLE	= 0x00,
+diff --git a/init/Kconfig b/init/Kconfig
+index f7f65af4ee129..5e7d4885d1bf8 100644
+--- a/init/Kconfig
++++ b/init/Kconfig
+@@ -629,6 +629,7 @@ config TASK_IO_ACCOUNTING
+ 
+ config PSI
+ 	bool "Pressure stall information tracking"
++	select KERNFS
+ 	help
+ 	  Collect metrics that indicate how overcommitted the CPU, memory,
+ 	  and IO capacity are in the system.
+diff --git a/io_uring/io-wq.c b/io_uring/io-wq.c
+index 399e9a15c38d6..2c03bc881edfd 100644
+--- a/io_uring/io-wq.c
++++ b/io_uring/io-wq.c
+@@ -174,6 +174,16 @@ static void io_worker_ref_put(struct io_wq *wq)
+ 		complete(&wq->worker_done);
+ }
+ 
++bool io_wq_worker_stopped(void)
++{
++	struct io_worker *worker = current->worker_private;
++
++	if (WARN_ON_ONCE(!io_wq_current_is_worker()))
++		return true;
++
++	return test_bit(IO_WQ_BIT_EXIT, &worker->wq->state);
++}
++
+ static void io_worker_cancel_cb(struct io_worker *worker)
+ {
+ 	struct io_wq_acct *acct = io_wq_get_acct(worker);
+@@ -1285,13 +1295,16 @@ static int io_wq_cpu_offline(unsigned int cpu, struct hlist_node *node)
+ 	return __io_wq_cpu_online(wq, cpu, false);
+ }
+ 
+-int io_wq_cpu_affinity(struct io_wq *wq, cpumask_var_t mask)
++int io_wq_cpu_affinity(struct io_uring_task *tctx, cpumask_var_t mask)
+ {
++	if (!tctx || !tctx->io_wq)
++		return -EINVAL;
++
+ 	rcu_read_lock();
+ 	if (mask)
+-		cpumask_copy(wq->cpu_mask, mask);
++		cpumask_copy(tctx->io_wq->cpu_mask, mask);
+ 	else
+-		cpumask_copy(wq->cpu_mask, cpu_possible_mask);
++		cpumask_copy(tctx->io_wq->cpu_mask, cpu_possible_mask);
+ 	rcu_read_unlock();
+ 
+ 	return 0;
+diff --git a/io_uring/io-wq.h b/io_uring/io-wq.h
+index 31228426d1924..2b2a6406dd8ee 100644
+--- a/io_uring/io-wq.h
++++ b/io_uring/io-wq.h
+@@ -50,8 +50,9 @@ void io_wq_put_and_exit(struct io_wq *wq);
+ void io_wq_enqueue(struct io_wq *wq, struct io_wq_work *work);
+ void io_wq_hash_work(struct io_wq_work *work, void *val);
+ 
+-int io_wq_cpu_affinity(struct io_wq *wq, cpumask_var_t mask);
++int io_wq_cpu_affinity(struct io_uring_task *tctx, cpumask_var_t mask);
+ int io_wq_max_workers(struct io_wq *wq, int *new_count);
++bool io_wq_worker_stopped(void);
+ 
+ static inline bool io_wq_is_hashed(struct io_wq_work *work)
+ {
+diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
+index 93db3e4e7b688..4e9217c1eb2e0 100644
+--- a/io_uring/io_uring.c
++++ b/io_uring/io_uring.c
+@@ -229,7 +229,6 @@ static inline void req_fail_link_node(struct io_kiocb *req, int res)
+ static inline void io_req_add_to_cache(struct io_kiocb *req, struct io_ring_ctx *ctx)
+ {
+ 	wq_stack_add_head(&req->comp_list, &ctx->submit_state.free_list);
+-	kasan_poison_object_data(req_cachep, req);
+ }
+ 
+ static __cold void io_ring_ctx_ref_free(struct percpu_ref *ref)
+@@ -1674,6 +1673,9 @@ static int io_iopoll_check(struct io_ring_ctx *ctx, long min)
+ 			break;
+ 		nr_events += ret;
+ 		ret = 0;
++
++		if (task_sigpending(current))
++			return -EINTR;
+ 	} while (nr_events < min && !need_resched());
+ 
+ 	return ret;
+@@ -1964,6 +1966,8 @@ fail:
+ 		if (!needs_poll) {
+ 			if (!(req->ctx->flags & IORING_SETUP_IOPOLL))
+ 				break;
++			if (io_wq_worker_stopped())
++				break;
+ 			cond_resched();
+ 			continue;
+ 		}
+@@ -2382,7 +2386,9 @@ static bool io_get_sqe(struct io_ring_ctx *ctx, const struct io_uring_sqe **sqe)
+ 	}
+ 
+ 	/* drop invalid entries */
++	spin_lock(&ctx->completion_lock);
+ 	ctx->cq_extra--;
++	spin_unlock(&ctx->completion_lock);
+ 	WRITE_ONCE(ctx->rings->sq_dropped,
+ 		   READ_ONCE(ctx->rings->sq_dropped) + 1);
+ 	return false;
+@@ -4197,16 +4203,28 @@ static int io_register_enable_rings(struct io_ring_ctx *ctx)
+ 	return 0;
+ }
+ 
++static __cold int __io_register_iowq_aff(struct io_ring_ctx *ctx,
++					 cpumask_var_t new_mask)
++{
++	int ret;
++
++	if (!(ctx->flags & IORING_SETUP_SQPOLL)) {
++		ret = io_wq_cpu_affinity(current->io_uring, new_mask);
++	} else {
++		mutex_unlock(&ctx->uring_lock);
++		ret = io_sqpoll_wq_cpu_affinity(ctx, new_mask);
++		mutex_lock(&ctx->uring_lock);
++	}
++
++	return ret;
++}
++
+ static __cold int io_register_iowq_aff(struct io_ring_ctx *ctx,
+ 				       void __user *arg, unsigned len)
+ {
+-	struct io_uring_task *tctx = current->io_uring;
+ 	cpumask_var_t new_mask;
+ 	int ret;
+ 
+-	if (!tctx || !tctx->io_wq)
+-		return -EINVAL;
+-
+ 	if (!alloc_cpumask_var(&new_mask, GFP_KERNEL))
+ 		return -ENOMEM;
+ 
+@@ -4227,19 +4245,14 @@ static __cold int io_register_iowq_aff(struct io_ring_ctx *ctx,
+ 		return -EFAULT;
+ 	}
+ 
+-	ret = io_wq_cpu_affinity(tctx->io_wq, new_mask);
++	ret = __io_register_iowq_aff(ctx, new_mask);
+ 	free_cpumask_var(new_mask);
+ 	return ret;
+ }
+ 
+ static __cold int io_unregister_iowq_aff(struct io_ring_ctx *ctx)
+ {
+-	struct io_uring_task *tctx = current->io_uring;
+-
+-	if (!tctx || !tctx->io_wq)
+-		return -EINVAL;
+-
+-	return io_wq_cpu_affinity(tctx->io_wq, NULL);
++	return __io_register_iowq_aff(ctx, NULL);
+ }
+ 
+ static __cold int io_register_iowq_max_workers(struct io_ring_ctx *ctx,
+diff --git a/io_uring/io_uring.h b/io_uring/io_uring.h
+index d3606d30cf6fd..12769bad5cee0 100644
+--- a/io_uring/io_uring.h
++++ b/io_uring/io_uring.h
+@@ -354,7 +354,6 @@ static inline struct io_kiocb *io_extract_req(struct io_ring_ctx *ctx)
+ 	struct io_kiocb *req;
+ 
+ 	req = container_of(ctx->submit_state.free_list.next, struct io_kiocb, comp_list);
+-	kasan_unpoison_object_data(req_cachep, req);
+ 	wq_stack_extract(&ctx->submit_state.free_list);
+ 	return req;
+ }
+diff --git a/io_uring/net.c b/io_uring/net.c
+index eb1f51ddcb232..8c419c01a5dba 100644
+--- a/io_uring/net.c
++++ b/io_uring/net.c
+@@ -642,7 +642,7 @@ static inline bool io_recv_finish(struct io_kiocb *req, int *ret,
+ 
+ 	if (!mshot_finished) {
+ 		if (io_aux_cqe(req, issue_flags & IO_URING_F_COMPLETE_DEFER,
+-			       *ret, cflags | IORING_CQE_F_MORE, true)) {
++			       *ret, cflags | IORING_CQE_F_MORE, false)) {
+ 			io_recv_prep_retry(req);
+ 			/* Known not-empty or unknown state, retry */
+ 			if (cflags & IORING_CQE_F_SOCK_NONEMPTY ||
+@@ -1367,7 +1367,7 @@ retry:
+ 	if (ret < 0)
+ 		return ret;
+ 	if (io_aux_cqe(req, issue_flags & IO_URING_F_COMPLETE_DEFER, ret,
+-		       IORING_CQE_F_MORE, true))
++		       IORING_CQE_F_MORE, false))
+ 		goto retry;
+ 
+ 	return -ECANCELED;
+diff --git a/io_uring/sqpoll.c b/io_uring/sqpoll.c
+index 5e329e3cd4706..bd6c2c7959a5b 100644
+--- a/io_uring/sqpoll.c
++++ b/io_uring/sqpoll.c
+@@ -421,3 +421,20 @@ err:
+ 	io_sq_thread_finish(ctx);
+ 	return ret;
+ }
++
++__cold int io_sqpoll_wq_cpu_affinity(struct io_ring_ctx *ctx,
++				     cpumask_var_t mask)
++{
++	struct io_sq_data *sqd = ctx->sq_data;
++	int ret = -EINVAL;
++
++	if (sqd) {
++		io_sq_thread_park(sqd);
++		/* Don't set affinity for a dying thread */
++		if (sqd->thread)
++			ret = io_wq_cpu_affinity(sqd->thread->io_uring, mask);
++		io_sq_thread_unpark(sqd);
++	}
++
++	return ret;
++}
+diff --git a/io_uring/sqpoll.h b/io_uring/sqpoll.h
+index e1b8d508d22d1..8df37e8c91493 100644
+--- a/io_uring/sqpoll.h
++++ b/io_uring/sqpoll.h
+@@ -27,3 +27,4 @@ void io_sq_thread_park(struct io_sq_data *sqd);
+ void io_sq_thread_unpark(struct io_sq_data *sqd);
+ void io_put_sq_data(struct io_sq_data *sqd);
+ void io_sqpoll_wait_sq(struct io_ring_ctx *ctx);
++int io_sqpoll_wq_cpu_affinity(struct io_ring_ctx *ctx, cpumask_var_t mask);
+diff --git a/kernel/auditsc.c b/kernel/auditsc.c
+index addeed3df15d3..8dfd581cd5543 100644
+--- a/kernel/auditsc.c
++++ b/kernel/auditsc.c
+@@ -2456,6 +2456,8 @@ void __audit_inode_child(struct inode *parent,
+ 		}
+ 	}
+ 
++	cond_resched();
++
+ 	/* is there a matching child entry? */
+ 	list_for_each_entry(n, &context->names_list, list) {
+ 		/* can only match entries that have a name */
+diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c
+index 817204d533723..4b38c97990872 100644
+--- a/kernel/bpf/btf.c
++++ b/kernel/bpf/btf.c
+@@ -6133,7 +6133,6 @@ static int btf_struct_walk(struct bpf_verifier_log *log, const struct btf *btf,
+ 	const char *tname, *mname, *tag_value;
+ 	u32 vlen, elem_id, mid;
+ 
+-	*flag = 0;
+ again:
+ 	tname = __btf_name_by_offset(btf, t->name_off);
+ 	if (!btf_type_is_struct(t)) {
+@@ -6142,6 +6141,14 @@ again:
+ 	}
+ 
+ 	vlen = btf_type_vlen(t);
++	if (BTF_INFO_KIND(t->info) == BTF_KIND_UNION && vlen != 1 && !(*flag & PTR_UNTRUSTED))
++		/*
++		 * walking unions yields untrusted pointers
++		 * with exception of __bpf_md_ptr and other
++		 * unions with a single member
++		 */
++		*flag |= PTR_UNTRUSTED;
++
+ 	if (off + size > t->size) {
+ 		/* If the last element is a variable size array, we may
+ 		 * need to relax the rule.
+@@ -6302,15 +6309,6 @@ error:
+ 		 * of this field or inside of this struct
+ 		 */
+ 		if (btf_type_is_struct(mtype)) {
+-			if (BTF_INFO_KIND(mtype->info) == BTF_KIND_UNION &&
+-			    btf_type_vlen(mtype) != 1)
+-				/*
+-				 * walking unions yields untrusted pointers
+-				 * with exception of __bpf_md_ptr and other
+-				 * unions with a single member
+-				 */
+-				*flag |= PTR_UNTRUSTED;
+-
+ 			/* our field must be inside that union or struct */
+ 			t = mtype;
+ 
+@@ -6368,7 +6366,7 @@ error:
+ 		 * that also allows using an array of int as a scratch
+ 		 * space. e.g. skb->cb[].
+ 		 */
+-		if (off + size > mtrue_end) {
++		if (off + size > mtrue_end && !(*flag & PTR_UNTRUSTED)) {
+ 			bpf_log(log,
+ 				"access beyond the end of member %s (mend:%u) in struct %s with off %u size %u\n",
+ 				mname, mtrue_end, tname, off, size);
+@@ -6476,7 +6474,7 @@ bool btf_struct_ids_match(struct bpf_verifier_log *log,
+ 			  bool strict)
+ {
+ 	const struct btf_type *type;
+-	enum bpf_type_flag flag;
++	enum bpf_type_flag flag = 0;
+ 	int err;
+ 
+ 	/* Are we already done? */
+diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
+index 9e80efa59a5d6..8812397a5cd96 100644
+--- a/kernel/bpf/helpers.c
++++ b/kernel/bpf/helpers.c
+@@ -2243,7 +2243,7 @@ __bpf_kfunc void *bpf_dynptr_slice(const struct bpf_dynptr_kern *ptr, u32 offset
+ 	case BPF_DYNPTR_TYPE_XDP:
+ 	{
+ 		void *xdp_ptr = bpf_xdp_pointer(ptr->data, ptr->offset + offset, len);
+-		if (xdp_ptr)
++		if (!IS_ERR_OR_NULL(xdp_ptr))
+ 			return xdp_ptr;
+ 
+ 		if (!buffer__opt)
+diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
+index 02a021c524ab8..76845dd22cd26 100644
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -4982,20 +4982,22 @@ static int map_kptr_match_type(struct bpf_verifier_env *env,
+ 			       struct bpf_reg_state *reg, u32 regno)
+ {
+ 	const char *targ_name = btf_type_name(kptr_field->kptr.btf, kptr_field->kptr.btf_id);
+-	int perm_flags = PTR_MAYBE_NULL | PTR_TRUSTED | MEM_RCU;
++	int perm_flags;
+ 	const char *reg_name = "";
+ 
+-	/* Only unreferenced case accepts untrusted pointers */
+-	if (kptr_field->type == BPF_KPTR_UNREF)
+-		perm_flags |= PTR_UNTRUSTED;
++	if (btf_is_kernel(reg->btf)) {
++		perm_flags = PTR_MAYBE_NULL | PTR_TRUSTED | MEM_RCU;
++
++		/* Only unreferenced case accepts untrusted pointers */
++		if (kptr_field->type == BPF_KPTR_UNREF)
++			perm_flags |= PTR_UNTRUSTED;
++	} else {
++		perm_flags = PTR_MAYBE_NULL | MEM_ALLOC;
++	}
+ 
+ 	if (base_type(reg->type) != PTR_TO_BTF_ID || (type_flag(reg->type) & ~perm_flags))
+ 		goto bad_type;
+ 
+-	if (!btf_is_kernel(reg->btf)) {
+-		verbose(env, "R%d must point to kernel BTF\n", regno);
+-		return -EINVAL;
+-	}
+ 	/* We need to verify reg->type and reg->btf, before accessing reg->btf */
+ 	reg_name = btf_type_name(reg->btf, reg->btf_id);
+ 
+@@ -5008,7 +5010,7 @@ static int map_kptr_match_type(struct bpf_verifier_env *env,
+ 	if (__check_ptr_off_reg(env, reg, regno, true))
+ 		return -EACCES;
+ 
+-	/* A full type match is needed, as BTF can be vmlinux or module BTF, and
++	/* A full type match is needed, as BTF can be vmlinux, module or prog BTF, and
+ 	 * we also need to take into account the reg->off.
+ 	 *
+ 	 * We want to support cases like:
+@@ -6085,6 +6087,11 @@ static int check_ptr_to_btf_access(struct bpf_verifier_env *env,
+ 				   type_is_rcu_or_null(env, reg, field_name, btf_id)) {
+ 				/* __rcu tagged pointers can be NULL */
+ 				flag |= MEM_RCU | PTR_MAYBE_NULL;
++
++				/* We always trust them */
++				if (type_is_rcu_or_null(env, reg, field_name, btf_id) &&
++				    flag & PTR_UNTRUSTED)
++					flag &= ~PTR_UNTRUSTED;
+ 			} else if (flag & (MEM_PERCPU | MEM_USER)) {
+ 				/* keep as-is */
+ 			} else {
+@@ -7745,7 +7752,10 @@ found:
+ 			verbose(env, "verifier internal error: unimplemented handling of MEM_ALLOC\n");
+ 			return -EFAULT;
+ 		}
+-		/* Handled by helper specific checks */
++		if (meta->func_id == BPF_FUNC_kptr_xchg) {
++			if (map_kptr_match_type(env, meta->kptr_field, reg, regno))
++				return -EACCES;
++		}
+ 		break;
+ 	case PTR_TO_BTF_ID | MEM_PERCPU:
+ 	case PTR_TO_BTF_ID | MEM_PERCPU | PTR_TRUSTED:
+@@ -7797,17 +7807,6 @@ int check_func_arg_reg_off(struct bpf_verifier_env *env,
+ 		if (arg_type_is_dynptr(arg_type) && type == PTR_TO_STACK)
+ 			return 0;
+ 
+-		if ((type_is_ptr_alloc_obj(type) || type_is_non_owning_ref(type)) && reg->off) {
+-			if (reg_find_field_offset(reg, reg->off, BPF_GRAPH_NODE_OR_ROOT))
+-				return __check_ptr_off_reg(env, reg, regno, true);
+-
+-			verbose(env, "R%d must have zero offset when passed to release func\n",
+-				regno);
+-			verbose(env, "No graph node or root found at R%d type:%s off:%d\n", regno,
+-				btf_type_name(reg->btf, reg->btf_id), reg->off);
+-			return -EINVAL;
+-		}
+-
+ 		/* Doing check_ptr_off_reg check for the offset will catch this
+ 		 * because fixed_off_ok is false, but checking here allows us
+ 		 * to give the user a better error message.
+@@ -13817,6 +13816,12 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
+ 		return -EINVAL;
+ 	}
+ 
++	/* check src2 operand */
++	err = check_reg_arg(env, insn->dst_reg, SRC_OP);
++	if (err)
++		return err;
++
++	dst_reg = &regs[insn->dst_reg];
+ 	if (BPF_SRC(insn->code) == BPF_X) {
+ 		if (insn->imm != 0) {
+ 			verbose(env, "BPF_JMP/JMP32 uses reserved fields\n");
+@@ -13828,12 +13833,13 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
+ 		if (err)
+ 			return err;
+ 
+-		if (is_pointer_value(env, insn->src_reg)) {
++		src_reg = &regs[insn->src_reg];
++		if (!(reg_is_pkt_pointer_any(dst_reg) && reg_is_pkt_pointer_any(src_reg)) &&
++		    is_pointer_value(env, insn->src_reg)) {
+ 			verbose(env, "R%d pointer comparison prohibited\n",
+ 				insn->src_reg);
+ 			return -EACCES;
+ 		}
+-		src_reg = &regs[insn->src_reg];
+ 	} else {
+ 		if (insn->src_reg != BPF_REG_0) {
+ 			verbose(env, "BPF_JMP/JMP32 uses reserved fields\n");
+@@ -13841,12 +13847,6 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
+ 		}
+ 	}
+ 
+-	/* check src2 operand */
+-	err = check_reg_arg(env, insn->dst_reg, SRC_OP);
+-	if (err)
+-		return err;
+-
+-	dst_reg = &regs[insn->dst_reg];
+ 	is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32;
+ 
+ 	if (BPF_SRC(insn->code) == BPF_K) {
+diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
+index 58e6f18f01c1b..170e342b07e3d 100644
+--- a/kernel/cgroup/cpuset.c
++++ b/kernel/cgroup/cpuset.c
+@@ -1588,11 +1588,16 @@ static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp,
+ 		}
+ 
+ 		/*
+-		 * Skip the whole subtree if the cpumask remains the same
+-		 * and has no partition root state and force flag not set.
++		 * Skip the whole subtree if
++		 * 1) the cpumask remains the same,
++		 * 2) has no partition root state,
++		 * 3) force flag not set, and
++		 * 4) for v2 load balance state same as its parent.
+ 		 */
+ 		if (!cp->partition_root_state && !force &&
+-		    cpumask_equal(tmp->new_cpus, cp->effective_cpus)) {
++		    cpumask_equal(tmp->new_cpus, cp->effective_cpus) &&
++		    (!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) ||
++		    (is_sched_load_balance(parent) == is_sched_load_balance(cp)))) {
+ 			pos_css = css_rightmost_descendant(pos_css);
+ 			continue;
+ 		}
+@@ -1675,6 +1680,20 @@ update_parent_subparts:
+ 
+ 		update_tasks_cpumask(cp, tmp->new_cpus);
+ 
++		/*
++		 * On default hierarchy, inherit the CS_SCHED_LOAD_BALANCE
++		 * from parent if current cpuset isn't a valid partition root
++		 * and their load balance states differ.
++		 */
++		if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys) &&
++		    !is_partition_valid(cp) &&
++		    (is_sched_load_balance(parent) != is_sched_load_balance(cp))) {
++			if (is_sched_load_balance(parent))
++				set_bit(CS_SCHED_LOAD_BALANCE, &cp->flags);
++			else
++				clear_bit(CS_SCHED_LOAD_BALANCE, &cp->flags);
++		}
++
+ 		/*
+ 		 * On legacy hierarchy, if the effective cpumask of any non-
+ 		 * empty cpuset is changed, we need to rebuild sched domains.
+@@ -3222,6 +3241,14 @@ static int cpuset_css_online(struct cgroup_subsys_state *css)
+ 		cs->use_parent_ecpus = true;
+ 		parent->child_ecpus_count++;
+ 	}
++
++	/*
++	 * For v2, clear CS_SCHED_LOAD_BALANCE if parent is isolated
++	 */
++	if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys) &&
++	    !is_sched_load_balance(parent))
++		clear_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
++
+ 	spin_unlock_irq(&callback_lock);
+ 
+ 	if (!test_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags))
+diff --git a/kernel/cgroup/namespace.c b/kernel/cgroup/namespace.c
+index 0d5c29879a50b..144a464e45c66 100644
+--- a/kernel/cgroup/namespace.c
++++ b/kernel/cgroup/namespace.c
+@@ -149,9 +149,3 @@ const struct proc_ns_operations cgroupns_operations = {
+ 	.install	= cgroupns_install,
+ 	.owner		= cgroupns_owner,
+ };
+-
+-static __init int cgroup_namespaces_init(void)
+-{
+-	return 0;
+-}
+-subsys_initcall(cgroup_namespaces_init);
+diff --git a/kernel/cpu.c b/kernel/cpu.c
+index 88a7ede322bd5..9628ae3c2825b 100644
+--- a/kernel/cpu.c
++++ b/kernel/cpu.c
+@@ -1467,8 +1467,22 @@ out:
+ 	return ret;
+ }
+ 
++struct cpu_down_work {
++	unsigned int		cpu;
++	enum cpuhp_state	target;
++};
++
++static long __cpu_down_maps_locked(void *arg)
++{
++	struct cpu_down_work *work = arg;
++
++	return _cpu_down(work->cpu, 0, work->target);
++}
++
+ static int cpu_down_maps_locked(unsigned int cpu, enum cpuhp_state target)
+ {
++	struct cpu_down_work work = { .cpu = cpu, .target = target, };
++
+ 	/*
+ 	 * If the platform does not support hotplug, report it explicitly to
+ 	 * differentiate it from a transient offlining failure.
+@@ -1477,7 +1491,15 @@ static int cpu_down_maps_locked(unsigned int cpu, enum cpuhp_state target)
+ 		return -EOPNOTSUPP;
+ 	if (cpu_hotplug_disabled)
+ 		return -EBUSY;
+-	return _cpu_down(cpu, 0, target);
++
++	/*
++	 * Ensure that the control task does not run on the to be offlined
++	 * CPU to prevent a deadlock against cfs_b->period_timer.
++	 */
++	cpu = cpumask_any_but(cpu_online_mask, cpu);
++	if (cpu >= nr_cpu_ids)
++		return -EBUSY;
++	return work_on_cpu(cpu, __cpu_down_maps_locked, &work);
+ }
+ 
+ static int cpu_down(unsigned int cpu, enum cpuhp_state target)
+diff --git a/kernel/pid.c b/kernel/pid.c
+index 6a1d23a11026c..fee14a4486a31 100644
+--- a/kernel/pid.c
++++ b/kernel/pid.c
+@@ -83,6 +83,9 @@ struct pid_namespace init_pid_ns = {
+ #ifdef CONFIG_PID_NS
+ 	.ns.ops = &pidns_operations,
+ #endif
++#if defined(CONFIG_SYSCTL) && defined(CONFIG_MEMFD_CREATE)
++	.memfd_noexec_scope = MEMFD_NOEXEC_SCOPE_EXEC,
++#endif
+ };
+ EXPORT_SYMBOL_GPL(init_pid_ns);
+ 
+diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c
+index 0bf44afe04dd1..619972c78774f 100644
+--- a/kernel/pid_namespace.c
++++ b/kernel/pid_namespace.c
+@@ -110,9 +110,9 @@ static struct pid_namespace *create_pid_namespace(struct user_namespace *user_ns
+ 	ns->user_ns = get_user_ns(user_ns);
+ 	ns->ucounts = ucounts;
+ 	ns->pid_allocated = PIDNS_ADDING;
+-
+-	initialize_memfd_noexec_scope(ns);
+-
++#if defined(CONFIG_SYSCTL) && defined(CONFIG_MEMFD_CREATE)
++	ns->memfd_noexec_scope = pidns_memfd_noexec_scope(parent_pid_ns);
++#endif
+ 	return ns;
+ 
+ out_free_idr:
+diff --git a/kernel/pid_sysctl.h b/kernel/pid_sysctl.h
+index b26e027fc9cd4..2ee41a3a1dfde 100644
+--- a/kernel/pid_sysctl.h
++++ b/kernel/pid_sysctl.h
+@@ -5,33 +5,30 @@
+ #include <linux/pid_namespace.h>
+ 
+ #if defined(CONFIG_SYSCTL) && defined(CONFIG_MEMFD_CREATE)
+-static inline void initialize_memfd_noexec_scope(struct pid_namespace *ns)
+-{
+-	ns->memfd_noexec_scope =
+-		task_active_pid_ns(current)->memfd_noexec_scope;
+-}
+-
+ static int pid_mfd_noexec_dointvec_minmax(struct ctl_table *table,
+ 	int write, void *buf, size_t *lenp, loff_t *ppos)
+ {
+ 	struct pid_namespace *ns = task_active_pid_ns(current);
+ 	struct ctl_table table_copy;
++	int err, scope, parent_scope;
+ 
+ 	if (write && !ns_capable(ns->user_ns, CAP_SYS_ADMIN))
+ 		return -EPERM;
+ 
+ 	table_copy = *table;
+-	if (ns != &init_pid_ns)
+-		table_copy.data = &ns->memfd_noexec_scope;
+ 
+-	/*
+-	 * set minimum to current value, the effect is only bigger
+-	 * value is accepted.
+-	 */
+-	if (*(int *)table_copy.data > *(int *)table_copy.extra1)
+-		table_copy.extra1 = table_copy.data;
++	/* You cannot set a lower enforcement value than your parent. */
++	parent_scope = pidns_memfd_noexec_scope(ns->parent);
++	/* Equivalent to pidns_memfd_noexec_scope(ns). */
++	scope = max(READ_ONCE(ns->memfd_noexec_scope), parent_scope);
++
++	table_copy.data = &scope;
++	table_copy.extra1 = &parent_scope;
+ 
+-	return proc_dointvec_minmax(&table_copy, write, buf, lenp, ppos);
++	err = proc_dointvec_minmax(&table_copy, write, buf, lenp, ppos);
++	if (!err && write)
++		WRITE_ONCE(ns->memfd_noexec_scope, scope);
++	return err;
+ }
+ 
+ static struct ctl_table pid_ns_ctl_table_vm[] = {
+@@ -51,7 +48,6 @@ static inline void register_pid_ns_sysctl_table_vm(void)
+ 	register_sysctl("vm", pid_ns_ctl_table_vm);
+ }
+ #else
+-static inline void initialize_memfd_noexec_scope(struct pid_namespace *ns) {}
+ static inline void register_pid_ns_sysctl_table_vm(void) {}
+ #endif
+ 
+diff --git a/kernel/printk/printk_ringbuffer.c b/kernel/printk/printk_ringbuffer.c
+index 2dc4d5a1f1ff8..fde338606ce83 100644
+--- a/kernel/printk/printk_ringbuffer.c
++++ b/kernel/printk/printk_ringbuffer.c
+@@ -1735,7 +1735,7 @@ static bool copy_data(struct prb_data_ring *data_ring,
+ 	if (!buf || !buf_size)
+ 		return true;
+ 
+-	data_size = min_t(u16, buf_size, len);
++	data_size = min_t(unsigned int, buf_size, len);
+ 
+ 	memcpy(&buf[0], data, data_size); /* LMM(copy_data:A) */
+ 	return true;
+diff --git a/kernel/rcu/refscale.c b/kernel/rcu/refscale.c
+index 1970ce5f22d40..71d138573856f 100644
+--- a/kernel/rcu/refscale.c
++++ b/kernel/rcu/refscale.c
+@@ -1107,12 +1107,11 @@ ref_scale_init(void)
+ 	VERBOSE_SCALEOUT("Starting %d reader threads", nreaders);
+ 
+ 	for (i = 0; i < nreaders; i++) {
++		init_waitqueue_head(&reader_tasks[i].wq);
+ 		firsterr = torture_create_kthread(ref_scale_reader, (void *)i,
+ 						  reader_tasks[i].task);
+ 		if (torture_init_error(firsterr))
+ 			goto unwind;
+-
+-		init_waitqueue_head(&(reader_tasks[i].wq));
+ 	}
+ 
+ 	// Main Task
+diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
+index b3e25be58e2b7..1d9c2482c5a35 100644
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -7289,9 +7289,6 @@ cpu_util(int cpu, struct task_struct *p, int dst_cpu, int boost)
+ 
+ 		util_est = READ_ONCE(cfs_rq->avg.util_est.enqueued);
+ 
+-		if (boost)
+-			util_est = max(util_est, runnable);
+-
+ 		/*
+ 		 * During wake-up @p isn't enqueued yet and doesn't contribute
+ 		 * to any cpu_rq(cpu)->cfs.avg.util_est.enqueued.
+diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
+index 00e0e50741153..185d3d749f6b6 100644
+--- a/kernel/sched/rt.c
++++ b/kernel/sched/rt.c
+@@ -25,7 +25,7 @@ unsigned int sysctl_sched_rt_period = 1000000;
+ int sysctl_sched_rt_runtime = 950000;
+ 
+ #ifdef CONFIG_SYSCTL
+-static int sysctl_sched_rr_timeslice = (MSEC_PER_SEC / HZ) * RR_TIMESLICE;
++static int sysctl_sched_rr_timeslice = (MSEC_PER_SEC * RR_TIMESLICE) / HZ;
+ static int sched_rt_handler(struct ctl_table *table, int write, void *buffer,
+ 		size_t *lenp, loff_t *ppos);
+ static int sched_rr_handler(struct ctl_table *table, int write, void *buffer,
+diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
+index 88cbc1181b239..c108ed8a9804a 100644
+--- a/kernel/time/clocksource.c
++++ b/kernel/time/clocksource.c
+@@ -473,8 +473,8 @@ static void clocksource_watchdog(struct timer_list *unused)
+ 		/* Check the deviation from the watchdog clocksource. */
+ 		md = cs->uncertainty_margin + watchdog->uncertainty_margin;
+ 		if (abs(cs_nsec - wd_nsec) > md) {
+-			u64 cs_wd_msec;
+-			u64 wd_msec;
++			s64 cs_wd_msec;
++			s64 wd_msec;
+ 			u32 wd_rem;
+ 
+ 			pr_warn("timekeeping watchdog on CPU%d: Marking clocksource '%s' as unstable because the skew is too large:\n",
+@@ -483,8 +483,8 @@ static void clocksource_watchdog(struct timer_list *unused)
+ 				watchdog->name, wd_nsec, wdnow, wdlast, watchdog->mask);
+ 			pr_warn("                      '%s' cs_nsec: %lld cs_now: %llx cs_last: %llx mask: %llx\n",
+ 				cs->name, cs_nsec, csnow, cslast, cs->mask);
+-			cs_wd_msec = div_u64_rem(cs_nsec - wd_nsec, 1000U * 1000U, &wd_rem);
+-			wd_msec = div_u64_rem(wd_nsec, 1000U * 1000U, &wd_rem);
++			cs_wd_msec = div_s64_rem(cs_nsec - wd_nsec, 1000 * 1000, &wd_rem);
++			wd_msec = div_s64_rem(wd_nsec, 1000 * 1000, &wd_rem);
+ 			pr_warn("                      Clocksource '%s' skewed %lld ns (%lld ms) over watchdog '%s' interval of %lld ns (%lld ms)\n",
+ 				cs->name, cs_nsec - wd_nsec, cs_wd_msec, watchdog->name, wd_nsec, wd_msec);
+ 			if (curr_clocksource == cs)
+diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
+index 4df14db4da490..87015e9deacc9 100644
+--- a/kernel/time/tick-sched.c
++++ b/kernel/time/tick-sched.c
+@@ -1045,7 +1045,7 @@ static bool report_idle_softirq(void)
+ 		return false;
+ 
+ 	/* On RT, softirqs handling may be waiting on some lock */
+-	if (!local_bh_blocked())
++	if (local_bh_blocked())
+ 		return false;
+ 
+ 	pr_warn("NOHZ tick-stop error: local softirq work is pending, handler #%02x!!!\n",
+diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
+index bd1a42b23f3ff..30d8db47c1e2f 100644
+--- a/kernel/trace/bpf_trace.c
++++ b/kernel/trace/bpf_trace.c
+@@ -2391,7 +2391,7 @@ int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id,
+ #ifdef CONFIG_UPROBE_EVENTS
+ 		if (flags & TRACE_EVENT_FL_UPROBE)
+ 			err = bpf_get_uprobe_info(event, fd_type, buf,
+-						  probe_offset,
++						  probe_offset, probe_addr,
+ 						  event->attr.type == PERF_TYPE_TRACEPOINT);
+ #endif
+ 	}
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index 2656ca3b9b39c..745332d10b3e1 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -7618,6 +7618,11 @@ out:
+ 	return ret;
+ }
+ 
++static void tracing_swap_cpu_buffer(void *tr)
++{
++	update_max_tr_single((struct trace_array *)tr, current, smp_processor_id());
++}
++
+ static ssize_t
+ tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
+ 		       loff_t *ppos)
+@@ -7676,13 +7681,15 @@ tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
+ 			ret = tracing_alloc_snapshot_instance(tr);
+ 		if (ret < 0)
+ 			break;
+-		local_irq_disable();
+ 		/* Now, we're going to swap */
+-		if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
++		if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
++			local_irq_disable();
+ 			update_max_tr(tr, current, smp_processor_id(), NULL);
+-		else
+-			update_max_tr_single(tr, current, iter->cpu_file);
+-		local_irq_enable();
++			local_irq_enable();
++		} else {
++			smp_call_function_single(iter->cpu_file, tracing_swap_cpu_buffer,
++						 (void *)tr, 1);
++		}
+ 		break;
+ 	default:
+ 		if (tr->allocated_snapshot) {
+diff --git a/kernel/trace/trace_hwlat.c b/kernel/trace/trace_hwlat.c
+index 2f37a6e68aa9f..b791524a6536a 100644
+--- a/kernel/trace/trace_hwlat.c
++++ b/kernel/trace/trace_hwlat.c
+@@ -635,7 +635,7 @@ static int s_mode_show(struct seq_file *s, void *v)
+ 	else
+ 		seq_printf(s, "%s", thread_mode_str[mode]);
+ 
+-	if (mode != MODE_MAX)
++	if (mode < MODE_MAX - 1) /* if mode is any but last */
+ 		seq_puts(s, " ");
+ 
+ 	return 0;
+diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
+index 688bf579f2f1e..555c223c32321 100644
+--- a/kernel/trace/trace_uprobe.c
++++ b/kernel/trace/trace_uprobe.c
+@@ -1418,7 +1418,7 @@ static void uretprobe_perf_func(struct trace_uprobe *tu, unsigned long func,
+ 
+ int bpf_get_uprobe_info(const struct perf_event *event, u32 *fd_type,
+ 			const char **filename, u64 *probe_offset,
+-			bool perf_type_tracepoint)
++			u64 *probe_addr, bool perf_type_tracepoint)
+ {
+ 	const char *pevent = trace_event_name(event->tp_event);
+ 	const char *group = event->tp_event->class->system;
+@@ -1435,6 +1435,7 @@ int bpf_get_uprobe_info(const struct perf_event *event, u32 *fd_type,
+ 				    : BPF_FD_TYPE_UPROBE;
+ 	*filename = tu->filename;
+ 	*probe_offset = tu->offset;
++	*probe_addr = 0;
+ 	return 0;
+ }
+ #endif	/* CONFIG_PERF_EVENTS */
+diff --git a/kernel/watchdog.c b/kernel/watchdog.c
+index be38276a365f3..d145305d95fe8 100644
+--- a/kernel/watchdog.c
++++ b/kernel/watchdog.c
+@@ -151,9 +151,6 @@ void watchdog_hardlockup_check(unsigned int cpu, struct pt_regs *regs)
+ 	 */
+ 	if (is_hardlockup(cpu)) {
+ 		unsigned int this_cpu = smp_processor_id();
+-		struct cpumask backtrace_mask;
+-
+-		cpumask_copy(&backtrace_mask, cpu_online_mask);
+ 
+ 		/* Only print hardlockups once. */
+ 		if (per_cpu(watchdog_hardlockup_warned, cpu))
+@@ -167,10 +164,8 @@ void watchdog_hardlockup_check(unsigned int cpu, struct pt_regs *regs)
+ 				show_regs(regs);
+ 			else
+ 				dump_stack();
+-			cpumask_clear_cpu(cpu, &backtrace_mask);
+ 		} else {
+-			if (trigger_single_cpu_backtrace(cpu))
+-				cpumask_clear_cpu(cpu, &backtrace_mask);
++			trigger_single_cpu_backtrace(cpu);
+ 		}
+ 
+ 		/*
+@@ -179,7 +174,7 @@ void watchdog_hardlockup_check(unsigned int cpu, struct pt_regs *regs)
+ 		 */
+ 		if (sysctl_hardlockup_all_cpu_backtrace &&
+ 		    !test_and_set_bit(0, &watchdog_hardlockup_all_cpu_dumped))
+-			trigger_cpumask_backtrace(&backtrace_mask);
++			trigger_allbutcpu_cpu_backtrace(cpu);
+ 
+ 		if (hardlockup_panic)
+ 			nmi_panic(regs, "Hard LOCKUP");
+@@ -523,7 +518,7 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
+ 			dump_stack();
+ 
+ 		if (softlockup_all_cpu_backtrace) {
+-			trigger_allbutself_cpu_backtrace();
++			trigger_allbutcpu_cpu_backtrace(smp_processor_id());
+ 			clear_bit_unlock(0, &soft_lockup_nmi_warn);
+ 		}
+ 
+diff --git a/kernel/workqueue.c b/kernel/workqueue.c
+index 800b4208dba9a..e51ab3d4765eb 100644
+--- a/kernel/workqueue.c
++++ b/kernel/workqueue.c
+@@ -2569,6 +2569,7 @@ __acquires(&pool->lock)
+ 	 */
+ 	set_work_pool_and_clear_pending(work, pool->id);
+ 
++	pwq->stats[PWQ_STAT_STARTED]++;
+ 	raw_spin_unlock_irq(&pool->lock);
+ 
+ 	lock_map_acquire(&pwq->wq->lockdep_map);
+@@ -2595,7 +2596,6 @@ __acquires(&pool->lock)
+ 	 * workqueues), so hiding them isn't a problem.
+ 	 */
+ 	lockdep_invariant_state(true);
+-	pwq->stats[PWQ_STAT_STARTED]++;
+ 	trace_workqueue_execute_start(work);
+ 	worker->current_func(work);
+ 	/*
+diff --git a/lib/checksum_kunit.c b/lib/checksum_kunit.c
+index ace3c4799fe15..0eed92b77ba37 100644
+--- a/lib/checksum_kunit.c
++++ b/lib/checksum_kunit.c
+@@ -10,7 +10,8 @@
+ #define MAX_ALIGN 64
+ #define TEST_BUFLEN (MAX_LEN + MAX_ALIGN)
+ 
+-static const __wsum random_init_sum = 0x2847aab;
++/* Values for a little endian CPU. Byte swap each half on big endian CPU. */
++static const u32 random_init_sum = 0x2847aab;
+ static const u8 random_buf[] = {
+ 	0xac, 0xd7, 0x76, 0x69, 0x6e, 0xf2, 0x93, 0x2c, 0x1f, 0xe0, 0xde, 0x86,
+ 	0x8f, 0x54, 0x33, 0x90, 0x95, 0xbf, 0xff, 0xb9, 0xea, 0x62, 0x6e, 0xb5,
+@@ -56,7 +57,9 @@ static const u8 random_buf[] = {
+ 	0xe1, 0xdf, 0x4b, 0xe1, 0x81, 0xe2, 0x17, 0x02, 0x7b, 0x58, 0x8b, 0x92,
+ 	0x1a, 0xac, 0x46, 0xdd, 0x2e, 0xce, 0x40, 0x09
+ };
+-static const __sum16 expected_results[] = {
++
++/* Values for a little endian CPU. Byte swap on big endian CPU. */
++static const u16 expected_results[] = {
+ 	0x82d0, 0x8224, 0xab23, 0xaaad, 0x41ad, 0x413f, 0x4f3e, 0x4eab, 0x22ab,
+ 	0x228c, 0x428b, 0x41ad, 0xbbac, 0xbb1d, 0x671d, 0x66ea, 0xd6e9, 0xd654,
+ 	0x1754, 0x1655, 0x5d54, 0x5c6a, 0xfa69, 0xf9fb, 0x44fb, 0x4428, 0xf527,
+@@ -115,7 +118,9 @@ static const __sum16 expected_results[] = {
+ 	0x1d47, 0x3c46, 0x3bc5, 0x59c4, 0x59ad, 0x57ad, 0x5732, 0xff31, 0xfea6,
+ 	0x6ca6, 0x6c8c, 0xc08b, 0xc045, 0xe344, 0xe316, 0x1516, 0x14d6,
+ };
+-static const __wsum init_sums_no_overflow[] = {
++
++/* Values for a little endian CPU. Byte swap each half on big endian CPU. */
++static const u32 init_sums_no_overflow[] = {
+ 	0xffffffff, 0xfffffffb, 0xfffffbfb, 0xfffffbf7, 0xfffff7f7, 0xfffff7f3,
+ 	0xfffff3f3, 0xfffff3ef, 0xffffefef, 0xffffefeb, 0xffffebeb, 0xffffebe7,
+ 	0xffffe7e7, 0xffffe7e3, 0xffffe3e3, 0xffffe3df, 0xffffdfdf, 0xffffdfdb,
+@@ -208,7 +213,21 @@ static u8 tmp_buf[TEST_BUFLEN];
+ 
+ #define full_csum(buff, len, sum) csum_fold(csum_partial(buff, len, sum))
+ 
+-#define CHECK_EQ(lhs, rhs) KUNIT_ASSERT_EQ(test, lhs, rhs)
++#define CHECK_EQ(lhs, rhs) KUNIT_ASSERT_EQ(test, (__force u64)lhs, (__force u64)rhs)
++
++static __sum16 to_sum16(u16 x)
++{
++	return (__force __sum16)le16_to_cpu((__force __le16)x);
++}
++
++/* This function swaps the bytes inside each half of a __wsum */
++static __wsum to_wsum(u32 x)
++{
++	u16 hi = le16_to_cpu((__force __le16)(x >> 16));
++	u16 lo = le16_to_cpu((__force __le16)x);
++
++	return (__force __wsum)((hi << 16) | lo);
++}
+ 
+ static void assert_setup_correct(struct kunit *test)
+ {
+@@ -226,7 +245,8 @@ static void assert_setup_correct(struct kunit *test)
+ static void test_csum_fixed_random_inputs(struct kunit *test)
+ {
+ 	int len, align;
+-	__wsum result, expec, sum;
++	__wsum sum;
++	__sum16 result, expec;
+ 
+ 	assert_setup_correct(test);
+ 	for (align = 0; align < TEST_BUFLEN; ++align) {
+@@ -237,9 +257,9 @@ static void test_csum_fixed_random_inputs(struct kunit *test)
+ 			/*
+ 			 * Test the precomputed random input.
+ 			 */
+-			sum = random_init_sum;
++			sum = to_wsum(random_init_sum);
+ 			result = full_csum(&tmp_buf[align], len, sum);
+-			expec = expected_results[len];
++			expec = to_sum16(expected_results[len]);
+ 			CHECK_EQ(result, expec);
+ 		}
+ 	}
+@@ -251,7 +271,8 @@ static void test_csum_fixed_random_inputs(struct kunit *test)
+ static void test_csum_all_carry_inputs(struct kunit *test)
+ {
+ 	int len, align;
+-	__wsum result, expec, sum;
++	__wsum sum;
++	__sum16 result, expec;
+ 
+ 	assert_setup_correct(test);
+ 	memset(tmp_buf, 0xff, TEST_BUFLEN);
+@@ -261,9 +282,9 @@ static void test_csum_all_carry_inputs(struct kunit *test)
+ 			/*
+ 			 * All carries from input and initial sum.
+ 			 */
+-			sum = 0xffffffff;
++			sum = to_wsum(0xffffffff);
+ 			result = full_csum(&tmp_buf[align], len, sum);
+-			expec = (len & 1) ? 0xff00 : 0;
++			expec = to_sum16((len & 1) ? 0xff00 : 0);
+ 			CHECK_EQ(result, expec);
+ 
+ 			/*
+@@ -272,11 +293,11 @@ static void test_csum_all_carry_inputs(struct kunit *test)
+ 			sum = 0;
+ 			result = full_csum(&tmp_buf[align], len, sum);
+ 			if (len & 1)
+-				expec = 0xff00;
++				expec = to_sum16(0xff00);
+ 			else if (len)
+ 				expec = 0;
+ 			else
+-				expec = 0xffff;
++				expec = to_sum16(0xffff);
+ 			CHECK_EQ(result, expec);
+ 		}
+ 	}
+@@ -290,7 +311,8 @@ static void test_csum_all_carry_inputs(struct kunit *test)
+ static void test_csum_no_carry_inputs(struct kunit *test)
+ {
+ 	int len, align;
+-	__wsum result, expec, sum;
++	__wsum sum;
++	__sum16 result, expec;
+ 
+ 	assert_setup_correct(test);
+ 	memset(tmp_buf, 0x4, TEST_BUFLEN);
+@@ -300,7 +322,7 @@ static void test_csum_no_carry_inputs(struct kunit *test)
+ 			/*
+ 			 * Expect no carries.
+ 			 */
+-			sum = init_sums_no_overflow[len];
++			sum = to_wsum(init_sums_no_overflow[len]);
+ 			result = full_csum(&tmp_buf[align], len, sum);
+ 			expec = 0;
+ 			CHECK_EQ(result, expec);
+@@ -308,9 +330,9 @@ static void test_csum_no_carry_inputs(struct kunit *test)
+ 			/*
+ 			 * Expect one carry.
+ 			 */
+-			sum = init_sums_no_overflow[len] + 1;
++			sum = to_wsum(init_sums_no_overflow[len] + 1);
+ 			result = full_csum(&tmp_buf[align], len, sum);
+-			expec = len ? 0xfffe : 0xffff;
++			expec = to_sum16(len ? 0xfffe : 0xffff);
+ 			CHECK_EQ(result, expec);
+ 		}
+ 	}
+diff --git a/lib/iov_iter.c b/lib/iov_iter.c
+index e4dc809d10754..37f78d7b3d323 100644
+--- a/lib/iov_iter.c
++++ b/lib/iov_iter.c
+@@ -1640,14 +1640,14 @@ static ssize_t iov_iter_extract_bvec_pages(struct iov_iter *i,
+ 					   size_t *offset0)
+ {
+ 	struct page **p, *page;
+-	size_t skip = i->iov_offset, offset;
++	size_t skip = i->iov_offset, offset, size;
+ 	int k;
+ 
+ 	for (;;) {
+ 		if (i->nr_segs == 0)
+ 			return 0;
+-		maxsize = min(maxsize, i->bvec->bv_len - skip);
+-		if (maxsize)
++		size = min(maxsize, i->bvec->bv_len - skip);
++		if (size)
+ 			break;
+ 		i->iov_offset = 0;
+ 		i->nr_segs--;
+@@ -1660,16 +1660,16 @@ static ssize_t iov_iter_extract_bvec_pages(struct iov_iter *i,
+ 	offset = skip % PAGE_SIZE;
+ 	*offset0 = offset;
+ 
+-	maxpages = want_pages_array(pages, maxsize, offset, maxpages);
++	maxpages = want_pages_array(pages, size, offset, maxpages);
+ 	if (!maxpages)
+ 		return -ENOMEM;
+ 	p = *pages;
+ 	for (k = 0; k < maxpages; k++)
+ 		p[k] = page + k;
+ 
+-	maxsize = min_t(size_t, maxsize, maxpages * PAGE_SIZE - offset);
+-	iov_iter_advance(i, maxsize);
+-	return maxsize;
++	size = min_t(size_t, size, maxpages * PAGE_SIZE - offset);
++	iov_iter_advance(i, size);
++	return size;
+ }
+ 
+ /*
+@@ -1684,14 +1684,14 @@ static ssize_t iov_iter_extract_kvec_pages(struct iov_iter *i,
+ {
+ 	struct page **p, *page;
+ 	const void *kaddr;
+-	size_t skip = i->iov_offset, offset, len;
++	size_t skip = i->iov_offset, offset, len, size;
+ 	int k;
+ 
+ 	for (;;) {
+ 		if (i->nr_segs == 0)
+ 			return 0;
+-		maxsize = min(maxsize, i->kvec->iov_len - skip);
+-		if (maxsize)
++		size = min(maxsize, i->kvec->iov_len - skip);
++		if (size)
+ 			break;
+ 		i->iov_offset = 0;
+ 		i->nr_segs--;
+@@ -1703,13 +1703,13 @@ static ssize_t iov_iter_extract_kvec_pages(struct iov_iter *i,
+ 	offset = (unsigned long)kaddr & ~PAGE_MASK;
+ 	*offset0 = offset;
+ 
+-	maxpages = want_pages_array(pages, maxsize, offset, maxpages);
++	maxpages = want_pages_array(pages, size, offset, maxpages);
+ 	if (!maxpages)
+ 		return -ENOMEM;
+ 	p = *pages;
+ 
+ 	kaddr -= offset;
+-	len = offset + maxsize;
++	len = offset + size;
+ 	for (k = 0; k < maxpages; k++) {
+ 		size_t seg = min_t(size_t, len, PAGE_SIZE);
+ 
+@@ -1723,9 +1723,9 @@ static ssize_t iov_iter_extract_kvec_pages(struct iov_iter *i,
+ 		kaddr += PAGE_SIZE;
+ 	}
+ 
+-	maxsize = min_t(size_t, maxsize, maxpages * PAGE_SIZE - offset);
+-	iov_iter_advance(i, maxsize);
+-	return maxsize;
++	size = min_t(size_t, size, maxpages * PAGE_SIZE - offset);
++	iov_iter_advance(i, size);
++	return size;
+ }
+ 
+ /*
+diff --git a/lib/nmi_backtrace.c b/lib/nmi_backtrace.c
+index 5274bbb026d79..33c154264bfe2 100644
+--- a/lib/nmi_backtrace.c
++++ b/lib/nmi_backtrace.c
+@@ -34,7 +34,7 @@ static unsigned long backtrace_flag;
+  * they are passed being updated as a side effect of this call.
+  */
+ void nmi_trigger_cpumask_backtrace(const cpumask_t *mask,
+-				   bool exclude_self,
++				   int exclude_cpu,
+ 				   void (*raise)(cpumask_t *mask))
+ {
+ 	int i, this_cpu = get_cpu();
+@@ -49,8 +49,8 @@ void nmi_trigger_cpumask_backtrace(const cpumask_t *mask,
+ 	}
+ 
+ 	cpumask_copy(to_cpumask(backtrace_mask), mask);
+-	if (exclude_self)
+-		cpumask_clear_cpu(this_cpu, to_cpumask(backtrace_mask));
++	if (exclude_cpu != -1)
++		cpumask_clear_cpu(exclude_cpu, to_cpumask(backtrace_mask));
+ 
+ 	/*
+ 	 * Don't try to send an NMI to this cpu; it may work on some
+diff --git a/lib/xarray.c b/lib/xarray.c
+index 2071a3718f4ed..142e36f9dfda1 100644
+--- a/lib/xarray.c
++++ b/lib/xarray.c
+@@ -206,7 +206,7 @@ static void *xas_descend(struct xa_state *xas, struct xa_node *node)
+ 	void *entry = xa_entry(xas->xa, node, offset);
+ 
+ 	xas->xa_node = node;
+-	if (xa_is_sibling(entry)) {
++	while (xa_is_sibling(entry)) {
+ 		offset = xa_to_sibling(entry);
+ 		entry = xa_entry(xas->xa, node, offset);
+ 		if (node->shift && xa_is_node(entry))
+diff --git a/mm/memfd.c b/mm/memfd.c
+index e763e76f11064..2dba2cb6f0d0f 100644
+--- a/mm/memfd.c
++++ b/mm/memfd.c
+@@ -268,11 +268,33 @@ long memfd_fcntl(struct file *file, unsigned int cmd, unsigned int arg)
+ 
+ #define MFD_ALL_FLAGS (MFD_CLOEXEC | MFD_ALLOW_SEALING | MFD_HUGETLB | MFD_NOEXEC_SEAL | MFD_EXEC)
+ 
++static int check_sysctl_memfd_noexec(unsigned int *flags)
++{
++#ifdef CONFIG_SYSCTL
++	struct pid_namespace *ns = task_active_pid_ns(current);
++	int sysctl = pidns_memfd_noexec_scope(ns);
++
++	if (!(*flags & (MFD_EXEC | MFD_NOEXEC_SEAL))) {
++		if (sysctl >= MEMFD_NOEXEC_SCOPE_NOEXEC_SEAL)
++			*flags |= MFD_NOEXEC_SEAL;
++		else
++			*flags |= MFD_EXEC;
++	}
++
++	if (!(*flags & MFD_NOEXEC_SEAL) && sysctl >= MEMFD_NOEXEC_SCOPE_NOEXEC_ENFORCED) {
++		pr_err_ratelimited(
++			"%s[%d]: memfd_create() requires MFD_NOEXEC_SEAL with vm.memfd_noexec=%d\n",
++			current->comm, task_pid_nr(current), sysctl);
++		return -EACCES;
++	}
++#endif
++	return 0;
++}
++
+ SYSCALL_DEFINE2(memfd_create,
+ 		const char __user *, uname,
+ 		unsigned int, flags)
+ {
+-	char comm[TASK_COMM_LEN];
+ 	unsigned int *file_seals;
+ 	struct file *file;
+ 	int fd, error;
+@@ -294,35 +316,15 @@ SYSCALL_DEFINE2(memfd_create,
+ 		return -EINVAL;
+ 
+ 	if (!(flags & (MFD_EXEC | MFD_NOEXEC_SEAL))) {
+-#ifdef CONFIG_SYSCTL
+-		int sysctl = MEMFD_NOEXEC_SCOPE_EXEC;
+-		struct pid_namespace *ns;
+-
+-		ns = task_active_pid_ns(current);
+-		if (ns)
+-			sysctl = ns->memfd_noexec_scope;
+-
+-		switch (sysctl) {
+-		case MEMFD_NOEXEC_SCOPE_EXEC:
+-			flags |= MFD_EXEC;
+-			break;
+-		case MEMFD_NOEXEC_SCOPE_NOEXEC_SEAL:
+-			flags |= MFD_NOEXEC_SEAL;
+-			break;
+-		default:
+-			pr_warn_once(
+-				"memfd_create(): MFD_NOEXEC_SEAL is enforced, pid=%d '%s'\n",
+-				task_pid_nr(current), get_task_comm(comm, current));
+-			return -EINVAL;
+-		}
+-#else
+-		flags |= MFD_EXEC;
+-#endif
+ 		pr_warn_once(
+-			"memfd_create() without MFD_EXEC nor MFD_NOEXEC_SEAL, pid=%d '%s'\n",
+-			task_pid_nr(current), get_task_comm(comm, current));
++			"%s[%d]: memfd_create() called without MFD_EXEC or MFD_NOEXEC_SEAL set\n",
++			current->comm, task_pid_nr(current));
+ 	}
+ 
++	error = check_sysctl_memfd_noexec(&flags);
++	if (error < 0)
++		return error;
++
+ 	/* length includes terminating zero */
+ 	len = strnlen_user(uname, MFD_NAME_MAX_LEN + 1);
+ 	if (len <= 0)
+diff --git a/mm/pagewalk.c b/mm/pagewalk.c
+index 9b2d23fbf4d35..b7d7e4fcfad7a 100644
+--- a/mm/pagewalk.c
++++ b/mm/pagewalk.c
+@@ -58,7 +58,7 @@ static int walk_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
+ 			pte = pte_offset_map(pmd, addr);
+ 		if (pte) {
+ 			err = walk_pte_range_inner(pte, addr, end, walk);
+-			if (walk->mm != &init_mm)
++			if (walk->mm != &init_mm && addr < TASK_SIZE)
+ 				pte_unmap(pte);
+ 		}
+ 	} else {
+diff --git a/mm/shmem.c b/mm/shmem.c
+index d963c747dabca..79a998b38ac85 100644
+--- a/mm/shmem.c
++++ b/mm/shmem.c
+@@ -3641,6 +3641,8 @@ static int shmem_parse_one(struct fs_context *fc, struct fs_parameter *param)
+ 	unsigned long long size;
+ 	char *rest;
+ 	int opt;
++	kuid_t kuid;
++	kgid_t kgid;
+ 
+ 	opt = fs_parse(fc, shmem_fs_parameters, param, &result);
+ 	if (opt < 0)
+@@ -3676,14 +3678,32 @@ static int shmem_parse_one(struct fs_context *fc, struct fs_parameter *param)
+ 		ctx->mode = result.uint_32 & 07777;
+ 		break;
+ 	case Opt_uid:
+-		ctx->uid = make_kuid(current_user_ns(), result.uint_32);
+-		if (!uid_valid(ctx->uid))
++		kuid = make_kuid(current_user_ns(), result.uint_32);
++		if (!uid_valid(kuid))
+ 			goto bad_value;
++
++		/*
++		 * The requested uid must be representable in the
++		 * filesystem's idmapping.
++		 */
++		if (!kuid_has_mapping(fc->user_ns, kuid))
++			goto bad_value;
++
++		ctx->uid = kuid;
+ 		break;
+ 	case Opt_gid:
+-		ctx->gid = make_kgid(current_user_ns(), result.uint_32);
+-		if (!gid_valid(ctx->gid))
++		kgid = make_kgid(current_user_ns(), result.uint_32);
++		if (!gid_valid(kgid))
+ 			goto bad_value;
++
++		/*
++		 * The requested gid must be representable in the
++		 * filesystem's idmapping.
++		 */
++		if (!kgid_has_mapping(fc->user_ns, kgid))
++			goto bad_value;
++
++		ctx->gid = kgid;
+ 		break;
+ 	case Opt_huge:
+ 		ctx->huge = result.uint_32;
+diff --git a/mm/util.c b/mm/util.c
+index dd12b9531ac4c..406634f26918c 100644
+--- a/mm/util.c
++++ b/mm/util.c
+@@ -1071,7 +1071,9 @@ void mem_dump_obj(void *object)
+ 	if (vmalloc_dump_obj(object))
+ 		return;
+ 
+-	if (virt_addr_valid(object))
++	if (is_vmalloc_addr(object))
++		type = "vmalloc memory";
++	else if (virt_addr_valid(object))
+ 		type = "non-slab/vmalloc memory";
+ 	else if (object == NULL)
+ 		type = "NULL pointer";
+diff --git a/mm/vmalloc.c b/mm/vmalloc.c
+index 228a4a5312f22..ef8599d394fd0 100644
+--- a/mm/vmalloc.c
++++ b/mm/vmalloc.c
+@@ -4278,14 +4278,32 @@ void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)
+ #ifdef CONFIG_PRINTK
+ bool vmalloc_dump_obj(void *object)
+ {
+-	struct vm_struct *vm;
+ 	void *objp = (void *)PAGE_ALIGN((unsigned long)object);
++	const void *caller;
++	struct vm_struct *vm;
++	struct vmap_area *va;
++	unsigned long addr;
++	unsigned int nr_pages;
+ 
+-	vm = find_vm_area(objp);
+-	if (!vm)
++	if (!spin_trylock(&vmap_area_lock))
++		return false;
++	va = __find_vmap_area((unsigned long)objp, &vmap_area_root);
++	if (!va) {
++		spin_unlock(&vmap_area_lock);
+ 		return false;
++	}
++
++	vm = va->vm;
++	if (!vm) {
++		spin_unlock(&vmap_area_lock);
++		return false;
++	}
++	addr = (unsigned long)vm->addr;
++	caller = vm->caller;
++	nr_pages = vm->nr_pages;
++	spin_unlock(&vmap_area_lock);
+ 	pr_cont(" %u-page vmalloc region starting at %#lx allocated at %pS\n",
+-		vm->nr_pages, (unsigned long)vm->addr, vm->caller);
++		nr_pages, addr, caller);
+ 	return true;
+ }
+ #endif
+diff --git a/mm/vmpressure.c b/mm/vmpressure.c
+index b52644771cc43..22c6689d93027 100644
+--- a/mm/vmpressure.c
++++ b/mm/vmpressure.c
+@@ -244,6 +244,14 @@ void vmpressure(gfp_t gfp, struct mem_cgroup *memcg, bool tree,
+ 	if (mem_cgroup_disabled())
+ 		return;
+ 
++	/*
++	 * The in-kernel users only care about the reclaim efficiency
++	 * for this @memcg rather than the whole subtree, and there
++	 * isn't and won't be any in-kernel user in a legacy cgroup.
++	 */
++	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && !tree)
++		return;
++
+ 	vmpr = memcg_to_vmpressure(memcg);
+ 
+ 	/*
+diff --git a/mm/vmscan.c b/mm/vmscan.c
+index 2fe4a11d63f44..5be64834a8527 100644
+--- a/mm/vmscan.c
++++ b/mm/vmscan.c
+@@ -4891,7 +4891,8 @@ static int lru_gen_memcg_seg(struct lruvec *lruvec)
+  *                          the eviction
+  ******************************************************************************/
+ 
+-static bool sort_folio(struct lruvec *lruvec, struct folio *folio, int tier_idx)
++static bool sort_folio(struct lruvec *lruvec, struct folio *folio, struct scan_control *sc,
++		       int tier_idx)
+ {
+ 	bool success;
+ 	int gen = folio_lru_gen(folio);
+@@ -4941,6 +4942,13 @@ static bool sort_folio(struct lruvec *lruvec, struct folio *folio, int tier_idx)
+ 		return true;
+ 	}
+ 
++	/* ineligible */
++	if (zone > sc->reclaim_idx) {
++		gen = folio_inc_gen(lruvec, folio, false);
++		list_move_tail(&folio->lru, &lrugen->folios[gen][type][zone]);
++		return true;
++	}
++
+ 	/* waiting for writeback */
+ 	if (folio_test_locked(folio) || folio_test_writeback(folio) ||
+ 	    (type == LRU_GEN_FILE && folio_test_dirty(folio))) {
+@@ -4989,7 +4997,8 @@ static bool isolate_folio(struct lruvec *lruvec, struct folio *folio, struct sca
+ static int scan_folios(struct lruvec *lruvec, struct scan_control *sc,
+ 		       int type, int tier, struct list_head *list)
+ {
+-	int gen, zone;
++	int i;
++	int gen;
+ 	enum vm_event_item item;
+ 	int sorted = 0;
+ 	int scanned = 0;
+@@ -5005,9 +5014,10 @@ static int scan_folios(struct lruvec *lruvec, struct scan_control *sc,
+ 
+ 	gen = lru_gen_from_seq(lrugen->min_seq[type]);
+ 
+-	for (zone = sc->reclaim_idx; zone >= 0; zone--) {
++	for (i = MAX_NR_ZONES; i > 0; i--) {
+ 		LIST_HEAD(moved);
+ 		int skipped = 0;
++		int zone = (sc->reclaim_idx + i) % MAX_NR_ZONES;
+ 		struct list_head *head = &lrugen->folios[gen][type][zone];
+ 
+ 		while (!list_empty(head)) {
+@@ -5021,7 +5031,7 @@ static int scan_folios(struct lruvec *lruvec, struct scan_control *sc,
+ 
+ 			scanned += delta;
+ 
+-			if (sort_folio(lruvec, folio, tier))
++			if (sort_folio(lruvec, folio, sc, tier))
+ 				sorted += delta;
+ 			else if (isolate_folio(lruvec, folio, sc)) {
+ 				list_add(&folio->lru, list);
+diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
+index 76222565e2df0..ce76931d11d86 100644
+--- a/net/bluetooth/hci_conn.c
++++ b/net/bluetooth/hci_conn.c
+@@ -178,57 +178,6 @@ static void hci_conn_cleanup(struct hci_conn *conn)
+ 	hci_conn_put(conn);
+ }
+ 
+-static void le_scan_cleanup(struct work_struct *work)
+-{
+-	struct hci_conn *conn = container_of(work, struct hci_conn,
+-					     le_scan_cleanup);
+-	struct hci_dev *hdev = conn->hdev;
+-	struct hci_conn *c = NULL;
+-
+-	BT_DBG("%s hcon %p", hdev->name, conn);
+-
+-	hci_dev_lock(hdev);
+-
+-	/* Check that the hci_conn is still around */
+-	rcu_read_lock();
+-	list_for_each_entry_rcu(c, &hdev->conn_hash.list, list) {
+-		if (c == conn)
+-			break;
+-	}
+-	rcu_read_unlock();
+-
+-	if (c == conn) {
+-		hci_connect_le_scan_cleanup(conn, 0x00);
+-		hci_conn_cleanup(conn);
+-	}
+-
+-	hci_dev_unlock(hdev);
+-	hci_dev_put(hdev);
+-	hci_conn_put(conn);
+-}
+-
+-static void hci_connect_le_scan_remove(struct hci_conn *conn)
+-{
+-	BT_DBG("%s hcon %p", conn->hdev->name, conn);
+-
+-	/* We can't call hci_conn_del/hci_conn_cleanup here since that
+-	 * could deadlock with another hci_conn_del() call that's holding
+-	 * hci_dev_lock and doing cancel_delayed_work_sync(&conn->disc_work).
+-	 * Instead, grab temporary extra references to the hci_dev and
+-	 * hci_conn and perform the necessary cleanup in a separate work
+-	 * callback.
+-	 */
+-
+-	hci_dev_hold(conn->hdev);
+-	hci_conn_get(conn);
+-
+-	/* Even though we hold a reference to the hdev, many other
+-	 * things might get cleaned up meanwhile, including the hdev's
+-	 * own workqueue, so we can't use that for scheduling.
+-	 */
+-	schedule_work(&conn->le_scan_cleanup);
+-}
+-
+ static void hci_acl_create_connection(struct hci_conn *conn)
+ {
+ 	struct hci_dev *hdev = conn->hdev;
+@@ -679,13 +628,6 @@ static void hci_conn_timeout(struct work_struct *work)
+ 	if (refcnt > 0)
+ 		return;
+ 
+-	/* LE connections in scanning state need special handling */
+-	if (conn->state == BT_CONNECT && conn->type == LE_LINK &&
+-	    test_bit(HCI_CONN_SCANNING, &conn->flags)) {
+-		hci_connect_le_scan_remove(conn);
+-		return;
+-	}
+-
+ 	hci_abort_conn(conn, hci_proto_disconn_ind(conn));
+ }
+ 
+@@ -791,7 +733,8 @@ struct iso_list_data {
+ 		u16 sync_handle;
+ 	};
+ 	int count;
+-	struct iso_cig_params pdu;
++	bool big_term;
++	bool big_sync_term;
+ };
+ 
+ static void bis_list(struct hci_conn *conn, void *data)
+@@ -809,17 +752,6 @@ static void bis_list(struct hci_conn *conn, void *data)
+ 	d->count++;
+ }
+ 
+-static void find_bis(struct hci_conn *conn, void *data)
+-{
+-	struct iso_list_data *d = data;
+-
+-	/* Ignore unicast */
+-	if (bacmp(&conn->dst, BDADDR_ANY))
+-		return;
+-
+-	d->count++;
+-}
+-
+ static int terminate_big_sync(struct hci_dev *hdev, void *data)
+ {
+ 	struct iso_list_data *d = data;
+@@ -828,11 +760,8 @@ static int terminate_big_sync(struct hci_dev *hdev, void *data)
+ 
+ 	hci_remove_ext_adv_instance_sync(hdev, d->bis, NULL);
+ 
+-	/* Check if ISO connection is a BIS and terminate BIG if there are
+-	 * no other connections using it.
+-	 */
+-	hci_conn_hash_list_state(hdev, find_bis, ISO_LINK, BT_CONNECTED, d);
+-	if (d->count)
++	/* Only terminate BIG if it has been created */
++	if (!d->big_term)
+ 		return 0;
+ 
+ 	return hci_le_terminate_big_sync(hdev, d->big,
+@@ -844,19 +773,21 @@ static void terminate_big_destroy(struct hci_dev *hdev, void *data, int err)
+ 	kfree(data);
+ }
+ 
+-static int hci_le_terminate_big(struct hci_dev *hdev, u8 big, u8 bis)
++static int hci_le_terminate_big(struct hci_dev *hdev, struct hci_conn *conn)
+ {
+ 	struct iso_list_data *d;
+ 	int ret;
+ 
+-	bt_dev_dbg(hdev, "big 0x%2.2x bis 0x%2.2x", big, bis);
++	bt_dev_dbg(hdev, "big 0x%2.2x bis 0x%2.2x", conn->iso_qos.bcast.big,
++		   conn->iso_qos.bcast.bis);
+ 
+ 	d = kzalloc(sizeof(*d), GFP_KERNEL);
+ 	if (!d)
+ 		return -ENOMEM;
+ 
+-	d->big = big;
+-	d->bis = bis;
++	d->big = conn->iso_qos.bcast.big;
++	d->bis = conn->iso_qos.bcast.bis;
++	d->big_term = test_and_clear_bit(HCI_CONN_BIG_CREATED, &conn->flags);
+ 
+ 	ret = hci_cmd_sync_queue(hdev, terminate_big_sync, d,
+ 				 terminate_big_destroy);
+@@ -873,31 +804,26 @@ static int big_terminate_sync(struct hci_dev *hdev, void *data)
+ 	bt_dev_dbg(hdev, "big 0x%2.2x sync_handle 0x%4.4x", d->big,
+ 		   d->sync_handle);
+ 
+-	/* Check if ISO connection is a BIS and terminate BIG if there are
+-	 * no other connections using it.
+-	 */
+-	hci_conn_hash_list_state(hdev, find_bis, ISO_LINK, BT_CONNECTED, d);
+-	if (d->count)
+-		return 0;
+-
+-	hci_le_big_terminate_sync(hdev, d->big);
++	if (d->big_sync_term)
++		hci_le_big_terminate_sync(hdev, d->big);
+ 
+ 	return hci_le_pa_terminate_sync(hdev, d->sync_handle);
+ }
+ 
+-static int hci_le_big_terminate(struct hci_dev *hdev, u8 big, u16 sync_handle)
++static int hci_le_big_terminate(struct hci_dev *hdev, u8 big, struct hci_conn *conn)
+ {
+ 	struct iso_list_data *d;
+ 	int ret;
+ 
+-	bt_dev_dbg(hdev, "big 0x%2.2x sync_handle 0x%4.4x", big, sync_handle);
++	bt_dev_dbg(hdev, "big 0x%2.2x sync_handle 0x%4.4x", big, conn->sync_handle);
+ 
+ 	d = kzalloc(sizeof(*d), GFP_KERNEL);
+ 	if (!d)
+ 		return -ENOMEM;
+ 
+ 	d->big = big;
+-	d->sync_handle = sync_handle;
++	d->sync_handle = conn->sync_handle;
++	d->big_sync_term = test_and_clear_bit(HCI_CONN_BIG_SYNC, &conn->flags);
+ 
+ 	ret = hci_cmd_sync_queue(hdev, big_terminate_sync, d,
+ 				 terminate_big_destroy);
+@@ -916,6 +842,7 @@ static int hci_le_big_terminate(struct hci_dev *hdev, u8 big, u16 sync_handle)
+ static void bis_cleanup(struct hci_conn *conn)
+ {
+ 	struct hci_dev *hdev = conn->hdev;
++	struct hci_conn *bis;
+ 
+ 	bt_dev_dbg(hdev, "conn %p", conn);
+ 
+@@ -923,11 +850,25 @@ static void bis_cleanup(struct hci_conn *conn)
+ 		if (!test_and_clear_bit(HCI_CONN_PER_ADV, &conn->flags))
+ 			return;
+ 
+-		hci_le_terminate_big(hdev, conn->iso_qos.bcast.big,
+-				     conn->iso_qos.bcast.bis);
++		/* Check if ISO connection is a BIS and terminate advertising
++		 * set and BIG if there are no other connections using it.
++		 */
++		bis = hci_conn_hash_lookup_bis(hdev, BDADDR_ANY,
++					       conn->iso_qos.bcast.big,
++					       conn->iso_qos.bcast.bis);
++		if (bis)
++			return;
++
++		hci_le_terminate_big(hdev, conn);
+ 	} else {
++		bis = hci_conn_hash_lookup_big_any_dst(hdev,
++						       conn->iso_qos.bcast.big);
++
++		if (bis)
++			return;
++
+ 		hci_le_big_terminate(hdev, conn->iso_qos.bcast.big,
+-				     conn->sync_handle);
++				     conn);
+ 	}
+ }
+ 
+@@ -983,6 +924,25 @@ static void cis_cleanup(struct hci_conn *conn)
+ 	hci_le_remove_cig(hdev, conn->iso_qos.ucast.cig);
+ }
+ 
++static u16 hci_conn_hash_alloc_unset(struct hci_dev *hdev)
++{
++	struct hci_conn_hash *h = &hdev->conn_hash;
++	struct hci_conn  *c;
++	u16 handle = HCI_CONN_HANDLE_MAX + 1;
++
++	rcu_read_lock();
++
++	list_for_each_entry_rcu(c, &h->list, list) {
++		/* Find the first unused handle */
++		if (handle == 0xffff || c->handle != handle)
++			break;
++		handle++;
++	}
++	rcu_read_unlock();
++
++	return handle;
++}
++
+ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
+ 			      u8 role)
+ {
+@@ -996,7 +956,7 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
+ 
+ 	bacpy(&conn->dst, dst);
+ 	bacpy(&conn->src, &hdev->bdaddr);
+-	conn->handle = HCI_CONN_HANDLE_UNSET;
++	conn->handle = hci_conn_hash_alloc_unset(hdev);
+ 	conn->hdev  = hdev;
+ 	conn->type  = type;
+ 	conn->role  = role;
+@@ -1059,7 +1019,6 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
+ 	INIT_DELAYED_WORK(&conn->auto_accept_work, hci_conn_auto_accept);
+ 	INIT_DELAYED_WORK(&conn->idle_work, hci_conn_idle);
+ 	INIT_DELAYED_WORK(&conn->le_conn_timeout, le_conn_timeout);
+-	INIT_WORK(&conn->le_scan_cleanup, le_scan_cleanup);
+ 
+ 	atomic_set(&conn->refcnt, 0);
+ 
+@@ -1081,6 +1040,29 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
+ 	return conn;
+ }
+ 
++static void hci_conn_cleanup_child(struct hci_conn *conn, u8 reason)
++{
++	if (!reason)
++		reason = HCI_ERROR_REMOTE_USER_TERM;
++
++	/* Due to race, SCO/ISO conn might be not established yet at this point,
++	 * and nothing else will clean it up. In other cases it is done via HCI
++	 * events.
++	 */
++	switch (conn->type) {
++	case SCO_LINK:
++	case ESCO_LINK:
++		if (HCI_CONN_HANDLE_UNSET(conn->handle))
++			hci_conn_failed(conn, reason);
++		break;
++	case ISO_LINK:
++		if (conn->state != BT_CONNECTED &&
++		    !test_bit(HCI_CONN_CREATE_CIS, &conn->flags))
++			hci_conn_failed(conn, reason);
++		break;
++	}
++}
++
+ static void hci_conn_unlink(struct hci_conn *conn)
+ {
+ 	struct hci_dev *hdev = conn->hdev;
+@@ -1103,14 +1085,7 @@ static void hci_conn_unlink(struct hci_conn *conn)
+ 			if (!test_bit(HCI_UP, &hdev->flags))
+ 				continue;
+ 
+-			/* Due to race, SCO connection might be not established
+-			 * yet at this point. Delete it now, otherwise it is
+-			 * possible for it to be stuck and can't be deleted.
+-			 */
+-			if ((child->type == SCO_LINK ||
+-			     child->type == ESCO_LINK) &&
+-			    child->handle == HCI_CONN_HANDLE_UNSET)
+-				hci_conn_del(child);
++			hci_conn_cleanup_child(child, conn->abort_reason);
+ 		}
+ 
+ 		return;
+@@ -1495,10 +1470,10 @@ static int qos_set_bis(struct hci_dev *hdev, struct bt_iso_qos *qos)
+ 
+ /* This function requires the caller holds hdev->lock */
+ static struct hci_conn *hci_add_bis(struct hci_dev *hdev, bdaddr_t *dst,
+-				    struct bt_iso_qos *qos)
++				    struct bt_iso_qos *qos, __u8 base_len,
++				    __u8 *base)
+ {
+ 	struct hci_conn *conn;
+-	struct iso_list_data data;
+ 	int err;
+ 
+ 	/* Let's make sure that le is enabled.*/
+@@ -1516,24 +1491,27 @@ static struct hci_conn *hci_add_bis(struct hci_dev *hdev, bdaddr_t *dst,
+ 	if (err)
+ 		return ERR_PTR(err);
+ 
+-	data.big = qos->bcast.big;
+-	data.bis = qos->bcast.bis;
+-	data.count = 0;
+-
+-	/* Check if there is already a matching BIG/BIS */
+-	hci_conn_hash_list_state(hdev, bis_list, ISO_LINK, BT_BOUND, &data);
+-	if (data.count)
++	/* Check if the LE Create BIG command has already been sent */
++	conn = hci_conn_hash_lookup_per_adv_bis(hdev, dst, qos->bcast.big,
++						qos->bcast.big);
++	if (conn)
+ 		return ERR_PTR(-EADDRINUSE);
+ 
+-	conn = hci_conn_hash_lookup_bis(hdev, dst, qos->bcast.big, qos->bcast.bis);
+-	if (conn)
++	/* Check BIS settings against other bound BISes, since all
++	 * BISes in a BIG must have the same value for all parameters
++	 */
++	conn = hci_conn_hash_lookup_bis(hdev, dst, qos->bcast.big,
++					qos->bcast.bis);
++
++	if (conn && (memcmp(qos, &conn->iso_qos, sizeof(*qos)) ||
++		     base_len != conn->le_per_adv_data_len ||
++		     memcmp(conn->le_per_adv_data, base, base_len)))
+ 		return ERR_PTR(-EADDRINUSE);
+ 
+ 	conn = hci_conn_add(hdev, ISO_LINK, dst, HCI_ROLE_MASTER);
+ 	if (!conn)
+ 		return ERR_PTR(-ENOMEM);
+ 
+-	set_bit(HCI_CONN_PER_ADV, &conn->flags);
+ 	conn->state = BT_CONNECT;
+ 
+ 	hci_conn_hold(conn);
+@@ -1707,52 +1685,25 @@ struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst,
+ 	return sco;
+ }
+ 
+-static void cis_add(struct iso_list_data *d, struct bt_iso_qos *qos)
+-{
+-	struct hci_cis_params *cis = &d->pdu.cis[d->pdu.cp.num_cis];
+-
+-	cis->cis_id = qos->ucast.cis;
+-	cis->c_sdu  = cpu_to_le16(qos->ucast.out.sdu);
+-	cis->p_sdu  = cpu_to_le16(qos->ucast.in.sdu);
+-	cis->c_phy  = qos->ucast.out.phy ? qos->ucast.out.phy : qos->ucast.in.phy;
+-	cis->p_phy  = qos->ucast.in.phy ? qos->ucast.in.phy : qos->ucast.out.phy;
+-	cis->c_rtn  = qos->ucast.out.rtn;
+-	cis->p_rtn  = qos->ucast.in.rtn;
+-
+-	d->pdu.cp.num_cis++;
+-}
+-
+-static void cis_list(struct hci_conn *conn, void *data)
+-{
+-	struct iso_list_data *d = data;
+-
+-	/* Skip if broadcast/ANY address */
+-	if (!bacmp(&conn->dst, BDADDR_ANY))
+-		return;
+-
+-	if (d->cig != conn->iso_qos.ucast.cig || d->cis == BT_ISO_QOS_CIS_UNSET ||
+-	    d->cis != conn->iso_qos.ucast.cis)
+-		return;
+-
+-	d->count++;
+-
+-	if (d->pdu.cp.cig_id == BT_ISO_QOS_CIG_UNSET ||
+-	    d->count >= ARRAY_SIZE(d->pdu.cis))
+-		return;
+-
+-	cis_add(d, &conn->iso_qos);
+-}
+-
+ static int hci_le_create_big(struct hci_conn *conn, struct bt_iso_qos *qos)
+ {
+ 	struct hci_dev *hdev = conn->hdev;
+ 	struct hci_cp_le_create_big cp;
++	struct iso_list_data data;
+ 
+ 	memset(&cp, 0, sizeof(cp));
+ 
++	data.big = qos->bcast.big;
++	data.bis = qos->bcast.bis;
++	data.count = 0;
++
++	/* Create a BIS for each bound connection */
++	hci_conn_hash_list_state(hdev, bis_list, ISO_LINK,
++				 BT_BOUND, &data);
++
+ 	cp.handle = qos->bcast.big;
+ 	cp.adv_handle = qos->bcast.bis;
+-	cp.num_bis  = 0x01;
++	cp.num_bis  = data.count;
+ 	hci_cpu_to_le24(qos->bcast.out.interval, cp.bis.sdu_interval);
+ 	cp.bis.sdu = cpu_to_le16(qos->bcast.out.sdu);
+ 	cp.bis.latency =  cpu_to_le16(qos->bcast.out.latency);
+@@ -1766,25 +1717,62 @@ static int hci_le_create_big(struct hci_conn *conn, struct bt_iso_qos *qos)
+ 	return hci_send_cmd(hdev, HCI_OP_LE_CREATE_BIG, sizeof(cp), &cp);
+ }
+ 
+-static void set_cig_params_complete(struct hci_dev *hdev, void *data, int err)
++static int set_cig_params_sync(struct hci_dev *hdev, void *data)
+ {
+-	struct iso_cig_params *pdu = data;
++	u8 cig_id = PTR_ERR(data);
++	struct hci_conn *conn;
++	struct bt_iso_qos *qos;
++	struct iso_cig_params pdu;
++	u8 cis_id;
+ 
+-	bt_dev_dbg(hdev, "");
++	conn = hci_conn_hash_lookup_cig(hdev, cig_id);
++	if (!conn)
++		return 0;
+ 
+-	if (err)
+-		bt_dev_err(hdev, "Unable to set CIG parameters: %d", err);
++	memset(&pdu, 0, sizeof(pdu));
+ 
+-	kfree(pdu);
+-}
++	qos = &conn->iso_qos;
++	pdu.cp.cig_id = cig_id;
++	hci_cpu_to_le24(qos->ucast.out.interval, pdu.cp.c_interval);
++	hci_cpu_to_le24(qos->ucast.in.interval, pdu.cp.p_interval);
++	pdu.cp.sca = qos->ucast.sca;
++	pdu.cp.packing = qos->ucast.packing;
++	pdu.cp.framing = qos->ucast.framing;
++	pdu.cp.c_latency = cpu_to_le16(qos->ucast.out.latency);
++	pdu.cp.p_latency = cpu_to_le16(qos->ucast.in.latency);
++
++	/* Reprogram all CIS(s) with the same CIG, valid range are:
++	 * num_cis: 0x00 to 0x1F
++	 * cis_id: 0x00 to 0xEF
++	 */
++	for (cis_id = 0x00; cis_id < 0xf0 &&
++	     pdu.cp.num_cis < ARRAY_SIZE(pdu.cis); cis_id++) {
++		struct hci_cis_params *cis;
+ 
+-static int set_cig_params_sync(struct hci_dev *hdev, void *data)
+-{
+-	struct iso_cig_params *pdu = data;
+-	u32 plen;
++		conn = hci_conn_hash_lookup_cis(hdev, NULL, 0, cig_id, cis_id);
++		if (!conn)
++			continue;
++
++		qos = &conn->iso_qos;
++
++		cis = &pdu.cis[pdu.cp.num_cis++];
++		cis->cis_id = cis_id;
++		cis->c_sdu  = cpu_to_le16(conn->iso_qos.ucast.out.sdu);
++		cis->p_sdu  = cpu_to_le16(conn->iso_qos.ucast.in.sdu);
++		cis->c_phy  = qos->ucast.out.phy ? qos->ucast.out.phy :
++			      qos->ucast.in.phy;
++		cis->p_phy  = qos->ucast.in.phy ? qos->ucast.in.phy :
++			      qos->ucast.out.phy;
++		cis->c_rtn  = qos->ucast.out.rtn;
++		cis->p_rtn  = qos->ucast.in.rtn;
++	}
++
++	if (!pdu.cp.num_cis)
++		return 0;
+ 
+-	plen = sizeof(pdu->cp) + pdu->cp.num_cis * sizeof(pdu->cis[0]);
+-	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_CIG_PARAMS, plen, pdu,
++	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_CIG_PARAMS,
++				     sizeof(pdu.cp) +
++				     pdu.cp.num_cis * sizeof(pdu.cis[0]), &pdu,
+ 				     HCI_CMD_TIMEOUT);
+ }
+ 
+@@ -1792,7 +1780,6 @@ static bool hci_le_set_cig_params(struct hci_conn *conn, struct bt_iso_qos *qos)
+ {
+ 	struct hci_dev *hdev = conn->hdev;
+ 	struct iso_list_data data;
+-	struct iso_cig_params *pdu;
+ 
+ 	memset(&data, 0, sizeof(data));
+ 
+@@ -1819,58 +1806,31 @@ static bool hci_le_set_cig_params(struct hci_conn *conn, struct bt_iso_qos *qos)
+ 		qos->ucast.cig = data.cig;
+ 	}
+ 
+-	data.pdu.cp.cig_id = qos->ucast.cig;
+-	hci_cpu_to_le24(qos->ucast.out.interval, data.pdu.cp.c_interval);
+-	hci_cpu_to_le24(qos->ucast.in.interval, data.pdu.cp.p_interval);
+-	data.pdu.cp.sca = qos->ucast.sca;
+-	data.pdu.cp.packing = qos->ucast.packing;
+-	data.pdu.cp.framing = qos->ucast.framing;
+-	data.pdu.cp.c_latency = cpu_to_le16(qos->ucast.out.latency);
+-	data.pdu.cp.p_latency = cpu_to_le16(qos->ucast.in.latency);
+-
+ 	if (qos->ucast.cis != BT_ISO_QOS_CIS_UNSET) {
+-		data.count = 0;
+-		data.cig = qos->ucast.cig;
+-		data.cis = qos->ucast.cis;
+-
+-		hci_conn_hash_list_state(hdev, cis_list, ISO_LINK, BT_BOUND,
+-					 &data);
+-		if (data.count)
++		if (hci_conn_hash_lookup_cis(hdev, NULL, 0, qos->ucast.cig,
++					     qos->ucast.cis))
+ 			return false;
+-
+-		cis_add(&data, qos);
++		goto done;
+ 	}
+ 
+-	/* Reprogram all CIS(s) with the same CIG */
+-	for (data.cig = qos->ucast.cig, data.cis = 0x00; data.cis < 0x11;
++	/* Allocate first available CIS if not set */
++	for (data.cig = qos->ucast.cig, data.cis = 0x00; data.cis < 0xf0;
+ 	     data.cis++) {
+-		data.count = 0;
+-
+-		hci_conn_hash_list_state(hdev, cis_list, ISO_LINK, BT_BOUND,
+-					 &data);
+-		if (data.count)
+-			continue;
+-
+-		/* Allocate a CIS if not set */
+-		if (qos->ucast.cis == BT_ISO_QOS_CIS_UNSET) {
++		if (!hci_conn_hash_lookup_cis(hdev, NULL, 0, data.cig,
++					      data.cis)) {
+ 			/* Update CIS */
+ 			qos->ucast.cis = data.cis;
+-			cis_add(&data, qos);
++			break;
+ 		}
+ 	}
+ 
+-	if (qos->ucast.cis == BT_ISO_QOS_CIS_UNSET || !data.pdu.cp.num_cis)
++	if (qos->ucast.cis == BT_ISO_QOS_CIS_UNSET)
+ 		return false;
+ 
+-	pdu = kmemdup(&data.pdu, sizeof(*pdu), GFP_KERNEL);
+-	if (!pdu)
+-		return false;
+-
+-	if (hci_cmd_sync_queue(hdev, set_cig_params_sync, pdu,
+-			       set_cig_params_complete) < 0) {
+-		kfree(pdu);
++done:
++	if (hci_cmd_sync_queue(hdev, set_cig_params_sync,
++			       ERR_PTR(qos->ucast.cig), NULL) < 0)
+ 		return false;
+-	}
+ 
+ 	return true;
+ }
+@@ -1969,59 +1929,47 @@ bool hci_iso_setup_path(struct hci_conn *conn)
+ 	return true;
+ }
+ 
+-static int hci_create_cis_sync(struct hci_dev *hdev, void *data)
++int hci_conn_check_create_cis(struct hci_conn *conn)
+ {
+-	return hci_le_create_cis_sync(hdev, data);
+-}
++	if (conn->type != ISO_LINK || !bacmp(&conn->dst, BDADDR_ANY))
++		return -EINVAL;
+ 
+-int hci_le_create_cis(struct hci_conn *conn)
+-{
+-	struct hci_conn *cis;
+-	struct hci_link *link, *t;
+-	struct hci_dev *hdev = conn->hdev;
+-	int err;
++	if (!conn->parent || conn->parent->state != BT_CONNECTED ||
++	    conn->state != BT_CONNECT || HCI_CONN_HANDLE_UNSET(conn->handle))
++		return 1;
+ 
+-	bt_dev_dbg(hdev, "hcon %p", conn);
++	return 0;
++}
+ 
+-	switch (conn->type) {
+-	case LE_LINK:
+-		if (conn->state != BT_CONNECTED || list_empty(&conn->link_list))
+-			return -EINVAL;
++static int hci_create_cis_sync(struct hci_dev *hdev, void *data)
++{
++	return hci_le_create_cis_sync(hdev);
++}
+ 
+-		cis = NULL;
++int hci_le_create_cis_pending(struct hci_dev *hdev)
++{
++	struct hci_conn *conn;
++	bool pending = false;
+ 
+-		/* hci_conn_link uses list_add_tail_rcu so the list is in
+-		 * the same order as the connections are requested.
+-		 */
+-		list_for_each_entry_safe(link, t, &conn->link_list, list) {
+-			if (link->conn->state == BT_BOUND) {
+-				err = hci_le_create_cis(link->conn);
+-				if (err)
+-					return err;
++	rcu_read_lock();
+ 
+-				cis = link->conn;
+-			}
++	list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) {
++		if (test_bit(HCI_CONN_CREATE_CIS, &conn->flags)) {
++			rcu_read_unlock();
++			return -EBUSY;
+ 		}
+ 
+-		return cis ? 0 : -EINVAL;
+-	case ISO_LINK:
+-		cis = conn;
+-		break;
+-	default:
+-		return -EINVAL;
++		if (!hci_conn_check_create_cis(conn))
++			pending = true;
+ 	}
+ 
+-	if (cis->state == BT_CONNECT)
++	rcu_read_unlock();
++
++	if (!pending)
+ 		return 0;
+ 
+ 	/* Queue Create CIS */
+-	err = hci_cmd_sync_queue(hdev, hci_create_cis_sync, cis, NULL);
+-	if (err)
+-		return err;
+-
+-	cis->state = BT_CONNECT;
+-
+-	return 0;
++	return hci_cmd_sync_queue(hdev, hci_create_cis_sync, NULL, NULL);
+ }
+ 
+ static void hci_iso_qos_setup(struct hci_dev *hdev, struct hci_conn *conn,
+@@ -2051,16 +1999,6 @@ static void hci_iso_qos_setup(struct hci_dev *hdev, struct hci_conn *conn,
+ 		qos->latency = conn->le_conn_latency;
+ }
+ 
+-static void hci_bind_bis(struct hci_conn *conn,
+-			 struct bt_iso_qos *qos)
+-{
+-	/* Update LINK PHYs according to QoS preference */
+-	conn->le_tx_phy = qos->bcast.out.phy;
+-	conn->le_tx_phy = qos->bcast.out.phy;
+-	conn->iso_qos = *qos;
+-	conn->state = BT_BOUND;
+-}
+-
+ static int create_big_sync(struct hci_dev *hdev, void *data)
+ {
+ 	struct hci_conn *conn = data;
+@@ -2183,27 +2121,80 @@ static void create_big_complete(struct hci_dev *hdev, void *data, int err)
+ 	}
+ }
+ 
+-struct hci_conn *hci_connect_bis(struct hci_dev *hdev, bdaddr_t *dst,
+-				 __u8 dst_type, struct bt_iso_qos *qos,
+-				 __u8 base_len, __u8 *base)
++struct hci_conn *hci_bind_bis(struct hci_dev *hdev, bdaddr_t *dst,
++			      struct bt_iso_qos *qos,
++			      __u8 base_len, __u8 *base)
+ {
+ 	struct hci_conn *conn;
+-	int err;
++	__u8 eir[HCI_MAX_PER_AD_LENGTH];
++
++	if (base_len && base)
++		base_len = eir_append_service_data(eir, 0,  0x1851,
++						   base, base_len);
+ 
+ 	/* We need hci_conn object using the BDADDR_ANY as dst */
+-	conn = hci_add_bis(hdev, dst, qos);
++	conn = hci_add_bis(hdev, dst, qos, base_len, eir);
+ 	if (IS_ERR(conn))
+ 		return conn;
+ 
+-	hci_bind_bis(conn, qos);
++	/* Update LINK PHYs according to QoS preference */
++	conn->le_tx_phy = qos->bcast.out.phy;
++	conn->le_tx_phy = qos->bcast.out.phy;
+ 
+ 	/* Add Basic Announcement into Peridic Adv Data if BASE is set */
+ 	if (base_len && base) {
+-		base_len = eir_append_service_data(conn->le_per_adv_data, 0,
+-						   0x1851, base, base_len);
++		memcpy(conn->le_per_adv_data,  eir, sizeof(eir));
+ 		conn->le_per_adv_data_len = base_len;
+ 	}
+ 
++	hci_iso_qos_setup(hdev, conn, &qos->bcast.out,
++			  conn->le_tx_phy ? conn->le_tx_phy :
++			  hdev->le_tx_def_phys);
++
++	conn->iso_qos = *qos;
++	conn->state = BT_BOUND;
++
++	return conn;
++}
++
++static void bis_mark_per_adv(struct hci_conn *conn, void *data)
++{
++	struct iso_list_data *d = data;
++
++	/* Skip if not broadcast/ANY address */
++	if (bacmp(&conn->dst, BDADDR_ANY))
++		return;
++
++	if (d->big != conn->iso_qos.bcast.big ||
++	    d->bis == BT_ISO_QOS_BIS_UNSET ||
++	    d->bis != conn->iso_qos.bcast.bis)
++		return;
++
++	set_bit(HCI_CONN_PER_ADV, &conn->flags);
++}
++
++struct hci_conn *hci_connect_bis(struct hci_dev *hdev, bdaddr_t *dst,
++				 __u8 dst_type, struct bt_iso_qos *qos,
++				 __u8 base_len, __u8 *base)
++{
++	struct hci_conn *conn;
++	int err;
++	struct iso_list_data data;
++
++	conn = hci_bind_bis(hdev, dst, qos, base_len, base);
++	if (IS_ERR(conn))
++		return conn;
++
++	data.big = qos->bcast.big;
++	data.bis = qos->bcast.bis;
++
++	/* Set HCI_CONN_PER_ADV for all bound connections, to mark that
++	 * the start periodic advertising and create BIG commands have
++	 * been queued
++	 */
++	hci_conn_hash_list_state(hdev, bis_mark_per_adv, ISO_LINK,
++				 BT_BOUND, &data);
++
+ 	/* Queue start periodic advertising and create BIG */
+ 	err = hci_cmd_sync_queue(hdev, create_big_sync, conn,
+ 				 create_big_complete);
+@@ -2212,10 +2203,6 @@ struct hci_conn *hci_connect_bis(struct hci_dev *hdev, bdaddr_t *dst,
+ 		return ERR_PTR(err);
+ 	}
+ 
+-	hci_iso_qos_setup(hdev, conn, &qos->bcast.out,
+-			  conn->le_tx_phy ? conn->le_tx_phy :
+-			  hdev->le_tx_def_phys);
+-
+ 	return conn;
+ }
+ 
+@@ -2257,11 +2244,9 @@ struct hci_conn *hci_connect_cis(struct hci_dev *hdev, bdaddr_t *dst,
+ 		return ERR_PTR(-ENOLINK);
+ 	}
+ 
+-	/* If LE is already connected and CIS handle is already set proceed to
+-	 * Create CIS immediately.
+-	 */
+-	if (le->state == BT_CONNECTED && cis->handle != HCI_CONN_HANDLE_UNSET)
+-		hci_le_create_cis(cis);
++	cis->state = BT_CONNECT;
++
++	hci_le_create_cis_pending(hdev);
+ 
+ 	return cis;
+ }
+@@ -2848,81 +2833,46 @@ u32 hci_conn_get_phy(struct hci_conn *conn)
+ 	return phys;
+ }
+ 
+-int hci_abort_conn(struct hci_conn *conn, u8 reason)
++static int abort_conn_sync(struct hci_dev *hdev, void *data)
+ {
+-	int r = 0;
++	struct hci_conn *conn;
++	u16 handle = PTR_ERR(data);
+ 
+-	if (test_and_set_bit(HCI_CONN_CANCEL, &conn->flags))
++	conn = hci_conn_hash_lookup_handle(hdev, handle);
++	if (!conn)
+ 		return 0;
+ 
+-	switch (conn->state) {
+-	case BT_CONNECTED:
+-	case BT_CONFIG:
+-		if (conn->type == AMP_LINK) {
+-			struct hci_cp_disconn_phy_link cp;
++	return hci_abort_conn_sync(hdev, conn, conn->abort_reason);
++}
+ 
+-			cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
+-			cp.reason = reason;
+-			r = hci_send_cmd(conn->hdev, HCI_OP_DISCONN_PHY_LINK,
+-					 sizeof(cp), &cp);
+-		} else {
+-			struct hci_cp_disconnect dc;
++int hci_abort_conn(struct hci_conn *conn, u8 reason)
++{
++	struct hci_dev *hdev = conn->hdev;
+ 
+-			dc.handle = cpu_to_le16(conn->handle);
+-			dc.reason = reason;
+-			r = hci_send_cmd(conn->hdev, HCI_OP_DISCONNECT,
+-					 sizeof(dc), &dc);
+-		}
++	/* If abort_reason has already been set it means the connection is
++	 * already being aborted so don't attempt to overwrite it.
++	 */
++	if (conn->abort_reason)
++		return 0;
+ 
+-		conn->state = BT_DISCONN;
++	bt_dev_dbg(hdev, "handle 0x%2.2x reason 0x%2.2x", conn->handle, reason);
+ 
+-		break;
+-	case BT_CONNECT:
+-		if (conn->type == LE_LINK) {
+-			if (test_bit(HCI_CONN_SCANNING, &conn->flags))
+-				break;
+-			r = hci_send_cmd(conn->hdev,
+-					 HCI_OP_LE_CREATE_CONN_CANCEL, 0, NULL);
+-		} else if (conn->type == ACL_LINK) {
+-			if (conn->hdev->hci_ver < BLUETOOTH_VER_1_2)
+-				break;
+-			r = hci_send_cmd(conn->hdev,
+-					 HCI_OP_CREATE_CONN_CANCEL,
+-					 6, &conn->dst);
+-		}
+-		break;
+-	case BT_CONNECT2:
+-		if (conn->type == ACL_LINK) {
+-			struct hci_cp_reject_conn_req rej;
+-
+-			bacpy(&rej.bdaddr, &conn->dst);
+-			rej.reason = reason;
+-
+-			r = hci_send_cmd(conn->hdev,
+-					 HCI_OP_REJECT_CONN_REQ,
+-					 sizeof(rej), &rej);
+-		} else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
+-			struct hci_cp_reject_sync_conn_req rej;
+-
+-			bacpy(&rej.bdaddr, &conn->dst);
+-
+-			/* SCO rejection has its own limited set of
+-			 * allowed error values (0x0D-0x0F) which isn't
+-			 * compatible with most values passed to this
+-			 * function. To be safe hard-code one of the
+-			 * values that's suitable for SCO.
+-			 */
+-			rej.reason = HCI_ERROR_REJ_LIMITED_RESOURCES;
++	conn->abort_reason = reason;
+ 
+-			r = hci_send_cmd(conn->hdev,
+-					 HCI_OP_REJECT_SYNC_CONN_REQ,
+-					 sizeof(rej), &rej);
++	/* If the connection is pending check the command opcode since that
++	 * might be blocking on hci_cmd_sync_work while waiting its respective
++	 * event so we need to hci_cmd_sync_cancel to cancel it.
++	 */
++	if (conn->state == BT_CONNECT && hdev->req_status == HCI_REQ_PEND) {
++		switch (hci_skb_event(hdev->sent_cmd)) {
++		case HCI_EV_LE_CONN_COMPLETE:
++		case HCI_EV_LE_ENHANCED_CONN_COMPLETE:
++		case HCI_EVT_LE_CIS_ESTABLISHED:
++			hci_cmd_sync_cancel(hdev, -ECANCELED);
++			break;
+ 		}
+-		break;
+-	default:
+-		conn->state = BT_CLOSED;
+-		break;
+ 	}
+ 
+-	return r;
++	return hci_cmd_sync_queue(hdev, abort_conn_sync, ERR_PTR(conn->handle),
++				  NULL);
+ }
+diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
+index 1ec83985f1ab0..2c845c9a26be0 100644
+--- a/net/bluetooth/hci_core.c
++++ b/net/bluetooth/hci_core.c
+@@ -1074,9 +1074,9 @@ void hci_uuids_clear(struct hci_dev *hdev)
+ 
+ void hci_link_keys_clear(struct hci_dev *hdev)
+ {
+-	struct link_key *key;
++	struct link_key *key, *tmp;
+ 
+-	list_for_each_entry(key, &hdev->link_keys, list) {
++	list_for_each_entry_safe(key, tmp, &hdev->link_keys, list) {
+ 		list_del_rcu(&key->list);
+ 		kfree_rcu(key, rcu);
+ 	}
+@@ -1084,9 +1084,9 @@ void hci_link_keys_clear(struct hci_dev *hdev)
+ 
+ void hci_smp_ltks_clear(struct hci_dev *hdev)
+ {
+-	struct smp_ltk *k;
++	struct smp_ltk *k, *tmp;
+ 
+-	list_for_each_entry(k, &hdev->long_term_keys, list) {
++	list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
+ 		list_del_rcu(&k->list);
+ 		kfree_rcu(k, rcu);
+ 	}
+@@ -1094,9 +1094,9 @@ void hci_smp_ltks_clear(struct hci_dev *hdev)
+ 
+ void hci_smp_irks_clear(struct hci_dev *hdev)
+ {
+-	struct smp_irk *k;
++	struct smp_irk *k, *tmp;
+ 
+-	list_for_each_entry(k, &hdev->identity_resolving_keys, list) {
++	list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
+ 		list_del_rcu(&k->list);
+ 		kfree_rcu(k, rcu);
+ 	}
+@@ -1104,9 +1104,9 @@ void hci_smp_irks_clear(struct hci_dev *hdev)
+ 
+ void hci_blocked_keys_clear(struct hci_dev *hdev)
+ {
+-	struct blocked_key *b;
++	struct blocked_key *b, *tmp;
+ 
+-	list_for_each_entry(b, &hdev->blocked_keys, list) {
++	list_for_each_entry_safe(b, tmp, &hdev->blocked_keys, list) {
+ 		list_del_rcu(&b->list);
+ 		kfree_rcu(b, rcu);
+ 	}
+@@ -1949,15 +1949,15 @@ int hci_add_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor)
+ 
+ 	switch (hci_get_adv_monitor_offload_ext(hdev)) {
+ 	case HCI_ADV_MONITOR_EXT_NONE:
+-		bt_dev_dbg(hdev, "%s add monitor %d status %d", hdev->name,
++		bt_dev_dbg(hdev, "add monitor %d status %d",
+ 			   monitor->handle, status);
+ 		/* Message was not forwarded to controller - not an error */
+ 		break;
+ 
+ 	case HCI_ADV_MONITOR_EXT_MSFT:
+ 		status = msft_add_monitor_pattern(hdev, monitor);
+-		bt_dev_dbg(hdev, "%s add monitor %d msft status %d", hdev->name,
+-			   monitor->handle, status);
++		bt_dev_dbg(hdev, "add monitor %d msft status %d",
++			   handle, status);
+ 		break;
+ 	}
+ 
+@@ -1976,15 +1976,15 @@ static int hci_remove_adv_monitor(struct hci_dev *hdev,
+ 
+ 	switch (hci_get_adv_monitor_offload_ext(hdev)) {
+ 	case HCI_ADV_MONITOR_EXT_NONE: /* also goes here when powered off */
+-		bt_dev_dbg(hdev, "%s remove monitor %d status %d", hdev->name,
++		bt_dev_dbg(hdev, "remove monitor %d status %d",
+ 			   monitor->handle, status);
+ 		goto free_monitor;
+ 
+ 	case HCI_ADV_MONITOR_EXT_MSFT:
+ 		handle = monitor->handle;
+ 		status = msft_remove_monitor(hdev, monitor);
+-		bt_dev_dbg(hdev, "%s remove monitor %d msft status %d",
+-			   hdev->name, handle, status);
++		bt_dev_dbg(hdev, "remove monitor %d msft status %d",
++			   handle, status);
+ 		break;
+ 	}
+ 
+diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
+index 31ca320ce38d3..2358c1835d475 100644
+--- a/net/bluetooth/hci_event.c
++++ b/net/bluetooth/hci_event.c
+@@ -3173,7 +3173,7 @@ static void hci_conn_complete_evt(struct hci_dev *hdev, void *data,
+ 	 * As the connection handle is set here for the first time, it indicates
+ 	 * whether the connection is already set up.
+ 	 */
+-	if (conn->handle != HCI_CONN_HANDLE_UNSET) {
++	if (!HCI_CONN_HANDLE_UNSET(conn->handle)) {
+ 		bt_dev_err(hdev, "Ignoring HCI_Connection_Complete for existing connection");
+ 		goto unlock;
+ 	}
+@@ -3803,6 +3803,22 @@ static u8 hci_cc_le_read_buffer_size_v2(struct hci_dev *hdev, void *data,
+ 	return rp->status;
+ }
+ 
++static void hci_unbound_cis_failed(struct hci_dev *hdev, u8 cig, u8 status)
++{
++	struct hci_conn *conn, *tmp;
++
++	lockdep_assert_held(&hdev->lock);
++
++	list_for_each_entry_safe(conn, tmp, &hdev->conn_hash.list, list) {
++		if (conn->type != ISO_LINK || !bacmp(&conn->dst, BDADDR_ANY) ||
++		    conn->state == BT_OPEN || conn->iso_qos.ucast.cig != cig)
++			continue;
++
++		if (HCI_CONN_HANDLE_UNSET(conn->handle))
++			hci_conn_failed(conn, status);
++	}
++}
++
+ static u8 hci_cc_le_set_cig_params(struct hci_dev *hdev, void *data,
+ 				   struct sk_buff *skb)
+ {
+@@ -3810,6 +3826,7 @@ static u8 hci_cc_le_set_cig_params(struct hci_dev *hdev, void *data,
+ 	struct hci_cp_le_set_cig_params *cp;
+ 	struct hci_conn *conn;
+ 	u8 status = rp->status;
++	bool pending = false;
+ 	int i;
+ 
+ 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
+@@ -3823,12 +3840,15 @@ static u8 hci_cc_le_set_cig_params(struct hci_dev *hdev, void *data,
+ 
+ 	hci_dev_lock(hdev);
+ 
++	/* BLUETOOTH CORE SPECIFICATION Version 5.4 | Vol 4, Part E page 2554
++	 *
++	 * If the Status return parameter is non-zero, then the state of the CIG
++	 * and its CIS configurations shall not be changed by the command. If
++	 * the CIG did not already exist, it shall not be created.
++	 */
+ 	if (status) {
+-		while ((conn = hci_conn_hash_lookup_cig(hdev, rp->cig_id))) {
+-			conn->state = BT_CLOSED;
+-			hci_connect_cfm(conn, status);
+-			hci_conn_del(conn);
+-		}
++		/* Keep current configuration, fail only the unbound CIS */
++		hci_unbound_cis_failed(hdev, rp->cig_id, status);
+ 		goto unlock;
+ 	}
+ 
+@@ -3852,13 +3872,15 @@ static u8 hci_cc_le_set_cig_params(struct hci_dev *hdev, void *data,
+ 
+ 		bt_dev_dbg(hdev, "%p handle 0x%4.4x parent %p", conn,
+ 			   conn->handle, conn->parent);
+-
+-		/* Create CIS if LE is already connected */
+-		if (conn->parent && conn->parent->state == BT_CONNECTED)
+-			hci_le_create_cis(conn);
++		
++		if (conn->state == BT_CONNECT)
++			pending = true;
+ 	}
+ 
+ unlock:
++	if (pending)
++		hci_le_create_cis_pending(hdev);
++
+ 	hci_dev_unlock(hdev);
+ 
+ 	return rp->status;
+@@ -4224,6 +4246,7 @@ static void hci_cmd_complete_evt(struct hci_dev *hdev, void *data,
+ static void hci_cs_le_create_cis(struct hci_dev *hdev, u8 status)
+ {
+ 	struct hci_cp_le_create_cis *cp;
++	bool pending = false;
+ 	int i;
+ 
+ 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
+@@ -4246,12 +4269,18 @@ static void hci_cs_le_create_cis(struct hci_dev *hdev, u8 status)
+ 
+ 		conn = hci_conn_hash_lookup_handle(hdev, handle);
+ 		if (conn) {
++			if (test_and_clear_bit(HCI_CONN_CREATE_CIS,
++					       &conn->flags))
++				pending = true;
+ 			conn->state = BT_CLOSED;
+ 			hci_connect_cfm(conn, status);
+ 			hci_conn_del(conn);
+ 		}
+ 	}
+ 
++	if (pending)
++		hci_le_create_cis_pending(hdev);
++
+ 	hci_dev_unlock(hdev);
+ }
+ 
+@@ -4999,7 +5028,7 @@ static void hci_sync_conn_complete_evt(struct hci_dev *hdev, void *data,
+ 	 * As the connection handle is set here for the first time, it indicates
+ 	 * whether the connection is already set up.
+ 	 */
+-	if (conn->handle != HCI_CONN_HANDLE_UNSET) {
++	if (!HCI_CONN_HANDLE_UNSET(conn->handle)) {
+ 		bt_dev_err(hdev, "Ignoring HCI_Sync_Conn_Complete event for existing connection");
+ 		goto unlock;
+ 	}
+@@ -5863,7 +5892,7 @@ static void le_conn_complete_evt(struct hci_dev *hdev, u8 status,
+ 	 * As the connection handle is set here for the first time, it indicates
+ 	 * whether the connection is already set up.
+ 	 */
+-	if (conn->handle != HCI_CONN_HANDLE_UNSET) {
++	if (!HCI_CONN_HANDLE_UNSET(conn->handle)) {
+ 		bt_dev_err(hdev, "Ignoring HCI_Connection_Complete for existing connection");
+ 		goto unlock;
+ 	}
+@@ -6790,6 +6819,7 @@ static void hci_le_cis_estabilished_evt(struct hci_dev *hdev, void *data,
+ 	struct hci_evt_le_cis_established *ev = data;
+ 	struct hci_conn *conn;
+ 	struct bt_iso_qos *qos;
++	bool pending = false;
+ 	u16 handle = __le16_to_cpu(ev->handle);
+ 
+ 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
+@@ -6813,6 +6843,8 @@ static void hci_le_cis_estabilished_evt(struct hci_dev *hdev, void *data,
+ 
+ 	qos = &conn->iso_qos;
+ 
++	pending = test_and_clear_bit(HCI_CONN_CREATE_CIS, &conn->flags);
++
+ 	/* Convert ISO Interval (1.25 ms slots) to SDU Interval (us) */
+ 	qos->ucast.in.interval = le16_to_cpu(ev->interval) * 1250;
+ 	qos->ucast.out.interval = qos->ucast.in.interval;
+@@ -6854,10 +6886,14 @@ static void hci_le_cis_estabilished_evt(struct hci_dev *hdev, void *data,
+ 		goto unlock;
+ 	}
+ 
++	conn->state = BT_CLOSED;
+ 	hci_connect_cfm(conn, ev->status);
+ 	hci_conn_del(conn);
+ 
+ unlock:
++	if (pending)
++		hci_le_create_cis_pending(hdev);
++
+ 	hci_dev_unlock(hdev);
+ }
+ 
+@@ -6936,6 +6972,7 @@ static void hci_le_create_big_complete_evt(struct hci_dev *hdev, void *data,
+ {
+ 	struct hci_evt_le_create_big_complete *ev = data;
+ 	struct hci_conn *conn;
++	__u8 bis_idx = 0;
+ 
+ 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
+ 
+@@ -6944,33 +6981,44 @@ static void hci_le_create_big_complete_evt(struct hci_dev *hdev, void *data,
+ 		return;
+ 
+ 	hci_dev_lock(hdev);
++	rcu_read_lock();
+ 
+-	conn = hci_conn_hash_lookup_big(hdev, ev->handle);
+-	if (!conn)
+-		goto unlock;
++	/* Connect all BISes that are bound to the BIG */
++	list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) {
++		if (bacmp(&conn->dst, BDADDR_ANY) ||
++		    conn->type != ISO_LINK ||
++		    conn->iso_qos.bcast.big != ev->handle)
++			continue;
+ 
+-	if (conn->type != ISO_LINK) {
+-		bt_dev_err(hdev,
+-			   "Invalid connection link type handle 0x%2.2x",
+-			   ev->handle);
+-		goto unlock;
+-	}
++		conn->handle = __le16_to_cpu(ev->bis_handle[bis_idx++]);
+ 
+-	if (ev->num_bis)
+-		conn->handle = __le16_to_cpu(ev->bis_handle[0]);
++		if (!ev->status) {
++			conn->state = BT_CONNECTED;
++			set_bit(HCI_CONN_BIG_CREATED, &conn->flags);
++			rcu_read_unlock();
++			hci_debugfs_create_conn(conn);
++			hci_conn_add_sysfs(conn);
++			hci_iso_setup_path(conn);
++			rcu_read_lock();
++			continue;
++		}
+ 
+-	if (!ev->status) {
+-		conn->state = BT_CONNECTED;
+-		hci_debugfs_create_conn(conn);
+-		hci_conn_add_sysfs(conn);
+-		hci_iso_setup_path(conn);
+-		goto unlock;
++		hci_connect_cfm(conn, ev->status);
++		rcu_read_unlock();
++		hci_conn_del(conn);
++		rcu_read_lock();
+ 	}
+ 
+-	hci_connect_cfm(conn, ev->status);
+-	hci_conn_del(conn);
++	if (!ev->status && !bis_idx)
++		/* If no BISes have been connected for the BIG,
++		 * terminate. This is in case all bound connections
++		 * have been closed before the BIG creation
++		 * has completed.
++		 */
++		hci_le_terminate_big_sync(hdev, ev->handle,
++					  HCI_ERROR_LOCAL_HOST_TERM);
+ 
+-unlock:
++	rcu_read_unlock();
+ 	hci_dev_unlock(hdev);
+ }
+ 
+@@ -6987,9 +7035,6 @@ static void hci_le_big_sync_established_evt(struct hci_dev *hdev, void *data,
+ 				flex_array_size(ev, bis, ev->num_bis)))
+ 		return;
+ 
+-	if (ev->status)
+-		return;
+-
+ 	hci_dev_lock(hdev);
+ 
+ 	for (i = 0; i < ev->num_bis; i++) {
+@@ -7013,9 +7058,25 @@ static void hci_le_big_sync_established_evt(struct hci_dev *hdev, void *data,
+ 		bis->iso_qos.bcast.in.latency = le16_to_cpu(ev->interval) * 125 / 100;
+ 		bis->iso_qos.bcast.in.sdu = le16_to_cpu(ev->max_pdu);
+ 
+-		hci_iso_setup_path(bis);
++		if (!ev->status) {
++			set_bit(HCI_CONN_BIG_SYNC, &bis->flags);
++			hci_iso_setup_path(bis);
++		}
+ 	}
+ 
++	/* In case BIG sync failed, notify each failed connection to
++	 * the user after all hci connections have been added
++	 */
++	if (ev->status)
++		for (i = 0; i < ev->num_bis; i++) {
++			u16 handle = le16_to_cpu(ev->bis[i]);
++
++			bis = hci_conn_hash_lookup_handle(hdev, handle);
++
++			set_bit(HCI_CONN_BIG_SYNC_FAILED, &bis->flags);
++			hci_connect_cfm(bis, ev->status);
++		}
++
+ 	hci_dev_unlock(hdev);
+ }
+ 
+diff --git a/net/bluetooth/hci_sync.c b/net/bluetooth/hci_sync.c
+index 4d1e32bb6a9c6..402b8522c2228 100644
+--- a/net/bluetooth/hci_sync.c
++++ b/net/bluetooth/hci_sync.c
+@@ -4684,7 +4684,10 @@ static const struct {
+ 			 "advertised, but not supported."),
+ 	HCI_QUIRK_BROKEN(SET_RPA_TIMEOUT,
+ 			 "HCI LE Set Random Private Address Timeout command is "
+-			 "advertised, but not supported.")
++			 "advertised, but not supported."),
++	HCI_QUIRK_BROKEN(LE_CODED,
++			 "HCI LE Coded PHY feature bit is set, "
++			 "but its usage is not supported.")
+ };
+ 
+ /* This function handles hdev setup stage:
+@@ -5269,22 +5272,27 @@ static int hci_disconnect_sync(struct hci_dev *hdev, struct hci_conn *conn,
+ }
+ 
+ static int hci_le_connect_cancel_sync(struct hci_dev *hdev,
+-				      struct hci_conn *conn)
++				      struct hci_conn *conn, u8 reason)
+ {
++	/* Return reason if scanning since the connection shall probably be
++	 * cleanup directly.
++	 */
+ 	if (test_bit(HCI_CONN_SCANNING, &conn->flags))
+-		return 0;
++		return reason;
+ 
+-	if (test_and_set_bit(HCI_CONN_CANCEL, &conn->flags))
++	if (conn->role == HCI_ROLE_SLAVE ||
++	    test_and_set_bit(HCI_CONN_CANCEL, &conn->flags))
+ 		return 0;
+ 
+ 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_CREATE_CONN_CANCEL,
+ 				     0, NULL, HCI_CMD_TIMEOUT);
+ }
+ 
+-static int hci_connect_cancel_sync(struct hci_dev *hdev, struct hci_conn *conn)
++static int hci_connect_cancel_sync(struct hci_dev *hdev, struct hci_conn *conn,
++				   u8 reason)
+ {
+ 	if (conn->type == LE_LINK)
+-		return hci_le_connect_cancel_sync(hdev, conn);
++		return hci_le_connect_cancel_sync(hdev, conn, reason);
+ 
+ 	if (hdev->hci_ver < BLUETOOTH_VER_1_2)
+ 		return 0;
+@@ -5330,43 +5338,81 @@ static int hci_reject_conn_sync(struct hci_dev *hdev, struct hci_conn *conn,
+ 
+ int hci_abort_conn_sync(struct hci_dev *hdev, struct hci_conn *conn, u8 reason)
+ {
+-	int err;
++	int err = 0;
++	u16 handle = conn->handle;
++	struct hci_conn *c;
+ 
+ 	switch (conn->state) {
+ 	case BT_CONNECTED:
+ 	case BT_CONFIG:
+-		return hci_disconnect_sync(hdev, conn, reason);
++		err = hci_disconnect_sync(hdev, conn, reason);
++		break;
+ 	case BT_CONNECT:
+-		err = hci_connect_cancel_sync(hdev, conn);
+-		/* Cleanup hci_conn object if it cannot be cancelled as it
+-		 * likelly means the controller and host stack are out of sync.
+-		 */
+-		if (err) {
++		err = hci_connect_cancel_sync(hdev, conn, reason);
++		break;
++	case BT_CONNECT2:
++		err = hci_reject_conn_sync(hdev, conn, reason);
++		break;
++	case BT_OPEN:
++		/* Cleanup bises that failed to be established */
++		if (test_and_clear_bit(HCI_CONN_BIG_SYNC_FAILED, &conn->flags)) {
+ 			hci_dev_lock(hdev);
+-			hci_conn_failed(conn, err);
++			hci_conn_failed(conn, reason);
+ 			hci_dev_unlock(hdev);
+ 		}
+-		return err;
+-	case BT_CONNECT2:
+-		return hci_reject_conn_sync(hdev, conn, reason);
++		break;
+ 	default:
++		hci_dev_lock(hdev);
+ 		conn->state = BT_CLOSED;
+-		break;
++		hci_disconn_cfm(conn, reason);
++		hci_conn_del(conn);
++		hci_dev_unlock(hdev);
++		return 0;
+ 	}
+ 
+-	return 0;
++	hci_dev_lock(hdev);
++
++	/* Check if the connection hasn't been cleanup while waiting
++	 * commands to complete.
++	 */
++	c = hci_conn_hash_lookup_handle(hdev, handle);
++	if (!c || c != conn) {
++		err = 0;
++		goto unlock;
++	}
++
++	/* Cleanup hci_conn object if it cannot be cancelled as it
++	 * likelly means the controller and host stack are out of sync
++	 * or in case of LE it was still scanning so it can be cleanup
++	 * safely.
++	 */
++	hci_conn_failed(conn, reason);
++
++unlock:
++	hci_dev_unlock(hdev);
++	return err;
+ }
+ 
+ static int hci_disconnect_all_sync(struct hci_dev *hdev, u8 reason)
+ {
+-	struct hci_conn *conn, *tmp;
+-	int err;
++	struct list_head *head = &hdev->conn_hash.list;
++	struct hci_conn *conn;
+ 
+-	list_for_each_entry_safe(conn, tmp, &hdev->conn_hash.list, list) {
+-		err = hci_abort_conn_sync(hdev, conn, reason);
+-		if (err)
+-			return err;
++	rcu_read_lock();
++	while ((conn = list_first_or_null_rcu(head, struct hci_conn, list))) {
++		/* Make sure the connection is not freed while unlocking */
++		conn = hci_conn_get(conn);
++		rcu_read_unlock();
++		/* Disregard possible errors since hci_conn_del shall have been
++		 * called even in case of errors had occurred since it would
++		 * then cause hci_conn_failed to be called which calls
++		 * hci_conn_del internally.
++		 */
++		hci_abort_conn_sync(hdev, conn, reason);
++		hci_conn_put(conn);
++		rcu_read_lock();
+ 	}
++	rcu_read_unlock();
+ 
+ 	return 0;
+ }
+@@ -6253,63 +6299,99 @@ int hci_le_create_conn_sync(struct hci_dev *hdev, struct hci_conn *conn)
+ 
+ done:
+ 	if (err == -ETIMEDOUT)
+-		hci_le_connect_cancel_sync(hdev, conn);
++		hci_le_connect_cancel_sync(hdev, conn, 0x00);
+ 
+ 	/* Re-enable advertising after the connection attempt is finished. */
+ 	hci_resume_advertising_sync(hdev);
+ 	return err;
+ }
+ 
+-int hci_le_create_cis_sync(struct hci_dev *hdev, struct hci_conn *conn)
++int hci_le_create_cis_sync(struct hci_dev *hdev)
+ {
+ 	struct {
+ 		struct hci_cp_le_create_cis cp;
+ 		struct hci_cis cis[0x1f];
+ 	} cmd;
+-	u8 cig;
+-	struct hci_conn *hcon = conn;
++	struct hci_conn *conn;
++	u8 cig = BT_ISO_QOS_CIG_UNSET;
++
++	/* The spec allows only one pending LE Create CIS command at a time. If
++	 * the command is pending now, don't do anything. We check for pending
++	 * connections after each CIS Established event.
++	 *
++	 * BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E
++	 * page 2566:
++	 *
++	 * If the Host issues this command before all the
++	 * HCI_LE_CIS_Established events from the previous use of the
++	 * command have been generated, the Controller shall return the
++	 * error code Command Disallowed (0x0C).
++	 *
++	 * BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E
++	 * page 2567:
++	 *
++	 * When the Controller receives the HCI_LE_Create_CIS command, the
++	 * Controller sends the HCI_Command_Status event to the Host. An
++	 * HCI_LE_CIS_Established event will be generated for each CIS when it
++	 * is established or if it is disconnected or considered lost before
++	 * being established; until all the events are generated, the command
++	 * remains pending.
++	 */
+ 
+ 	memset(&cmd, 0, sizeof(cmd));
+-	cmd.cis[0].acl_handle = cpu_to_le16(conn->parent->handle);
+-	cmd.cis[0].cis_handle = cpu_to_le16(conn->handle);
+-	cmd.cp.num_cis++;
+-	cig = conn->iso_qos.ucast.cig;
+ 
+ 	hci_dev_lock(hdev);
+ 
+ 	rcu_read_lock();
+ 
++	/* Wait until previous Create CIS has completed */
+ 	list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) {
+-		struct hci_cis *cis = &cmd.cis[cmd.cp.num_cis];
++		if (test_bit(HCI_CONN_CREATE_CIS, &conn->flags))
++			goto done;
++	}
+ 
+-		if (conn == hcon || conn->type != ISO_LINK ||
+-		    conn->state == BT_CONNECTED ||
+-		    conn->iso_qos.ucast.cig != cig)
++	/* Find CIG with all CIS ready */
++	list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) {
++		struct hci_conn *link;
++
++		if (hci_conn_check_create_cis(conn))
+ 			continue;
+ 
+-		/* Check if all CIS(s) belonging to a CIG are ready */
+-		if (!conn->parent || conn->parent->state != BT_CONNECTED ||
+-		    conn->state != BT_CONNECT) {
+-			cmd.cp.num_cis = 0;
+-			break;
++		cig = conn->iso_qos.ucast.cig;
++
++		list_for_each_entry_rcu(link, &hdev->conn_hash.list, list) {
++			if (hci_conn_check_create_cis(link) > 0 &&
++			    link->iso_qos.ucast.cig == cig &&
++			    link->state != BT_CONNECTED) {
++				cig = BT_ISO_QOS_CIG_UNSET;
++				break;
++			}
+ 		}
+ 
+-		/* Group all CIS with state BT_CONNECT since the spec don't
+-		 * allow to send them individually:
+-		 *
+-		 * BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E
+-		 * page 2566:
+-		 *
+-		 * If the Host issues this command before all the
+-		 * HCI_LE_CIS_Established events from the previous use of the
+-		 * command have been generated, the Controller shall return the
+-		 * error code Command Disallowed (0x0C).
+-		 */
++		if (cig != BT_ISO_QOS_CIG_UNSET)
++			break;
++	}
++
++	if (cig == BT_ISO_QOS_CIG_UNSET)
++		goto done;
++
++	list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) {
++		struct hci_cis *cis = &cmd.cis[cmd.cp.num_cis];
++
++		if (hci_conn_check_create_cis(conn) ||
++		    conn->iso_qos.ucast.cig != cig)
++			continue;
++
++		set_bit(HCI_CONN_CREATE_CIS, &conn->flags);
+ 		cis->acl_handle = cpu_to_le16(conn->parent->handle);
+ 		cis->cis_handle = cpu_to_le16(conn->handle);
+ 		cmd.cp.num_cis++;
++
++		if (cmd.cp.num_cis >= ARRAY_SIZE(cmd.cis))
++			break;
+ 	}
+ 
++done:
+ 	rcu_read_unlock();
+ 
+ 	hci_dev_unlock(hdev);
+diff --git a/net/bluetooth/iso.c b/net/bluetooth/iso.c
+index 505d622472688..9b6a7eb2015f0 100644
+--- a/net/bluetooth/iso.c
++++ b/net/bluetooth/iso.c
+@@ -48,6 +48,11 @@ static void iso_sock_kill(struct sock *sk);
+ #define EIR_SERVICE_DATA_LENGTH 4
+ #define BASE_MAX_LENGTH (HCI_MAX_PER_AD_LENGTH - EIR_SERVICE_DATA_LENGTH)
+ 
++/* iso_pinfo flags values */
++enum {
++	BT_SK_BIG_SYNC,
++};
++
+ struct iso_pinfo {
+ 	struct bt_sock		bt;
+ 	bdaddr_t		src;
+@@ -58,7 +63,7 @@ struct iso_pinfo {
+ 	__u8			bc_num_bis;
+ 	__u8			bc_bis[ISO_MAX_NUM_BIS];
+ 	__u16			sync_handle;
+-	__u32			flags;
++	unsigned long		flags;
+ 	struct bt_iso_qos	qos;
+ 	bool			qos_user_set;
+ 	__u8			base_len;
+@@ -287,13 +292,24 @@ static int iso_connect_bis(struct sock *sk)
+ 		goto unlock;
+ 	}
+ 
+-	hcon = hci_connect_bis(hdev, &iso_pi(sk)->dst,
+-			       le_addr_type(iso_pi(sk)->dst_type),
+-			       &iso_pi(sk)->qos, iso_pi(sk)->base_len,
+-			       iso_pi(sk)->base);
+-	if (IS_ERR(hcon)) {
+-		err = PTR_ERR(hcon);
+-		goto unlock;
++	/* Just bind if DEFER_SETUP has been set */
++	if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) {
++		hcon = hci_bind_bis(hdev, &iso_pi(sk)->dst,
++				    &iso_pi(sk)->qos, iso_pi(sk)->base_len,
++				    iso_pi(sk)->base);
++		if (IS_ERR(hcon)) {
++			err = PTR_ERR(hcon);
++			goto unlock;
++		}
++	} else {
++		hcon = hci_connect_bis(hdev, &iso_pi(sk)->dst,
++				       le_addr_type(iso_pi(sk)->dst_type),
++				       &iso_pi(sk)->qos, iso_pi(sk)->base_len,
++				       iso_pi(sk)->base);
++		if (IS_ERR(hcon)) {
++			err = PTR_ERR(hcon);
++			goto unlock;
++		}
+ 	}
+ 
+ 	conn = iso_conn_add(hcon);
+@@ -317,6 +333,9 @@ static int iso_connect_bis(struct sock *sk)
+ 	if (hcon->state == BT_CONNECTED) {
+ 		iso_sock_clear_timer(sk);
+ 		sk->sk_state = BT_CONNECTED;
++	} else if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) {
++		iso_sock_clear_timer(sk);
++		sk->sk_state = BT_CONNECT;
+ 	} else {
+ 		sk->sk_state = BT_CONNECT;
+ 		iso_sock_set_timer(sk, sk->sk_sndtimeo);
+@@ -1202,6 +1221,12 @@ static bool check_io_qos(struct bt_iso_io_qos *qos)
+ 
+ static bool check_ucast_qos(struct bt_iso_qos *qos)
+ {
++	if (qos->ucast.cig > 0xef && qos->ucast.cig != BT_ISO_QOS_CIG_UNSET)
++		return false;
++
++	if (qos->ucast.cis > 0xef && qos->ucast.cis != BT_ISO_QOS_CIS_UNSET)
++		return false;
++
+ 	if (qos->ucast.sca > 0x07)
+ 		return false;
+ 
+@@ -1466,7 +1491,7 @@ static int iso_sock_release(struct socket *sock)
+ 
+ 	iso_sock_close(sk);
+ 
+-	if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime &&
++	if (sock_flag(sk, SOCK_LINGER) && READ_ONCE(sk->sk_lingertime) &&
+ 	    !(current->flags & PF_EXITING)) {
+ 		lock_sock(sk);
+ 		err = bt_sock_wait_state(sk, BT_CLOSED, sk->sk_lingertime);
+@@ -1563,6 +1588,12 @@ static void iso_conn_ready(struct iso_conn *conn)
+ 		hci_conn_hold(hcon);
+ 		iso_chan_add(conn, sk, parent);
+ 
++		if (ev && ((struct hci_evt_le_big_sync_estabilished *)ev)->status) {
++			/* Trigger error signal on child socket */
++			sk->sk_err = ECONNREFUSED;
++			sk->sk_error_report(sk);
++		}
++
+ 		if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(parent)->flags))
+ 			sk->sk_state = BT_CONNECT2;
+ 		else
+@@ -1631,15 +1662,17 @@ int iso_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 *flags)
+ 			if (ev2->num_bis < iso_pi(sk)->bc_num_bis)
+ 				iso_pi(sk)->bc_num_bis = ev2->num_bis;
+ 
+-			err = hci_le_big_create_sync(hdev,
+-						     &iso_pi(sk)->qos,
+-						     iso_pi(sk)->sync_handle,
+-						     iso_pi(sk)->bc_num_bis,
+-						     iso_pi(sk)->bc_bis);
+-			if (err) {
+-				bt_dev_err(hdev, "hci_le_big_create_sync: %d",
+-					   err);
+-				sk = NULL;
++			if (!test_and_set_bit(BT_SK_BIG_SYNC, &iso_pi(sk)->flags)) {
++				err = hci_le_big_create_sync(hdev,
++							     &iso_pi(sk)->qos,
++							     iso_pi(sk)->sync_handle,
++							     iso_pi(sk)->bc_num_bis,
++							     iso_pi(sk)->bc_bis);
++				if (err) {
++					bt_dev_err(hdev, "hci_le_big_create_sync: %d",
++						   err);
++					sk = NULL;
++				}
+ 			}
+ 		}
+ 	} else {
+@@ -1676,13 +1709,18 @@ static void iso_connect_cfm(struct hci_conn *hcon, __u8 status)
+ 		}
+ 
+ 		/* Create CIS if pending */
+-		hci_le_create_cis(hcon);
++		hci_le_create_cis_pending(hcon->hdev);
+ 		return;
+ 	}
+ 
+ 	BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
+ 
+-	if (!status) {
++	/* Similar to the success case, if HCI_CONN_BIG_SYNC_FAILED is set,
++	 * queue the failed bis connection into the accept queue of the
++	 * listening socket and wake up userspace, to inform the user about
++	 * the BIG sync failed event.
++	 */
++	if (!status || test_bit(HCI_CONN_BIG_SYNC_FAILED, &hcon->flags)) {
+ 		struct iso_conn *conn;
+ 
+ 		conn = iso_conn_add(hcon);
+diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
+index d4498037fadc6..6240b20f020a8 100644
+--- a/net/bluetooth/mgmt.c
++++ b/net/bluetooth/mgmt.c
+@@ -3580,18 +3580,6 @@ unlock:
+ 	return err;
+ }
+ 
+-static int abort_conn_sync(struct hci_dev *hdev, void *data)
+-{
+-	struct hci_conn *conn;
+-	u16 handle = PTR_ERR(data);
+-
+-	conn = hci_conn_hash_lookup_handle(hdev, handle);
+-	if (!conn)
+-		return 0;
+-
+-	return hci_abort_conn_sync(hdev, conn, HCI_ERROR_REMOTE_USER_TERM);
+-}
+-
+ static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
+ 			      u16 len)
+ {
+@@ -3642,8 +3630,7 @@ static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
+ 					      le_addr_type(addr->type));
+ 
+ 	if (conn->conn_reason == CONN_REASON_PAIR_DEVICE)
+-		hci_cmd_sync_queue(hdev, abort_conn_sync, ERR_PTR(conn->handle),
+-				   NULL);
++		hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
+ 
+ unlock:
+ 	hci_dev_unlock(hdev);
+diff --git a/net/bluetooth/msft.c b/net/bluetooth/msft.c
+index bf5cee48916c7..b80a2162a5c33 100644
+--- a/net/bluetooth/msft.c
++++ b/net/bluetooth/msft.c
+@@ -91,6 +91,33 @@ struct msft_ev_le_monitor_device {
+ struct msft_monitor_advertisement_handle_data {
+ 	__u8  msft_handle;
+ 	__u16 mgmt_handle;
++	__s8 rssi_high;
++	__s8 rssi_low;
++	__u8 rssi_low_interval;
++	__u8 rssi_sampling_period;
++	__u8 cond_type;
++	struct list_head list;
++};
++
++enum monitor_addr_filter_state {
++	AF_STATE_IDLE,
++	AF_STATE_ADDING,
++	AF_STATE_ADDED,
++	AF_STATE_REMOVING,
++};
++
++#define MSFT_MONITOR_ADVERTISEMENT_TYPE_ADDR	0x04
++struct msft_monitor_addr_filter_data {
++	__u8     msft_handle;
++	__u8     pattern_handle; /* address filters pertain to */
++	__u16    mgmt_handle;
++	int      state;
++	__s8     rssi_high;
++	__s8     rssi_low;
++	__u8     rssi_low_interval;
++	__u8     rssi_sampling_period;
++	__u8     addr_type;
++	bdaddr_t bdaddr;
+ 	struct list_head list;
+ };
+ 
+@@ -99,9 +126,12 @@ struct msft_data {
+ 	__u8  evt_prefix_len;
+ 	__u8  *evt_prefix;
+ 	struct list_head handle_map;
++	struct list_head address_filters;
+ 	__u8 resuming;
+ 	__u8 suspending;
+ 	__u8 filter_enabled;
++	/* To synchronize add/remove address filter and monitor device event.*/
++	struct mutex filter_lock;
+ };
+ 
+ bool msft_monitor_supported(struct hci_dev *hdev)
+@@ -180,6 +210,24 @@ static struct msft_monitor_advertisement_handle_data *msft_find_handle_data
+ 	return NULL;
+ }
+ 
++/* This function requires the caller holds msft->filter_lock */
++static struct msft_monitor_addr_filter_data *msft_find_address_data
++			(struct hci_dev *hdev, u8 addr_type, bdaddr_t *addr,
++			 u8 pattern_handle)
++{
++	struct msft_monitor_addr_filter_data *entry;
++	struct msft_data *msft = hdev->msft_data;
++
++	list_for_each_entry(entry, &msft->address_filters, list) {
++		if (entry->pattern_handle == pattern_handle &&
++		    addr_type == entry->addr_type &&
++		    !bacmp(addr, &entry->bdaddr))
++			return entry;
++	}
++
++	return NULL;
++}
++
+ /* This function requires the caller holds hdev->lock */
+ static int msft_monitor_device_del(struct hci_dev *hdev, __u16 mgmt_handle,
+ 				   bdaddr_t *bdaddr, __u8 addr_type,
+@@ -240,6 +288,7 @@ static int msft_le_monitor_advertisement_cb(struct hci_dev *hdev, u16 opcode,
+ 
+ 	handle_data->mgmt_handle = monitor->handle;
+ 	handle_data->msft_handle = rp->handle;
++	handle_data->cond_type   = MSFT_MONITOR_ADVERTISEMENT_TYPE_PATTERN;
+ 	INIT_LIST_HEAD(&handle_data->list);
+ 	list_add(&handle_data->list, &msft->handle_map);
+ 
+@@ -254,6 +303,70 @@ unlock:
+ 	return status;
+ }
+ 
++/* This function requires the caller holds hci_req_sync_lock */
++static void msft_remove_addr_filters_sync(struct hci_dev *hdev, u8 handle)
++{
++	struct msft_monitor_addr_filter_data *address_filter, *n;
++	struct msft_cp_le_cancel_monitor_advertisement cp;
++	struct msft_data *msft = hdev->msft_data;
++	struct list_head head;
++	struct sk_buff *skb;
++
++	INIT_LIST_HEAD(&head);
++
++	/* Cancel all corresponding address monitors */
++	mutex_lock(&msft->filter_lock);
++
++	list_for_each_entry_safe(address_filter, n, &msft->address_filters,
++				 list) {
++		if (address_filter->pattern_handle != handle)
++			continue;
++
++		list_del(&address_filter->list);
++
++		/* Keep the address filter and let
++		 * msft_add_address_filter_sync() remove and free the address
++		 * filter.
++		 */
++		if (address_filter->state == AF_STATE_ADDING) {
++			address_filter->state = AF_STATE_REMOVING;
++			continue;
++		}
++
++		/* Keep the address filter and let
++		 * msft_cancel_address_filter_sync() remove and free the address
++		 * filter
++		 */
++		if (address_filter->state == AF_STATE_REMOVING)
++			continue;
++
++		list_add_tail(&address_filter->list, &head);
++	}
++
++	mutex_unlock(&msft->filter_lock);
++
++	list_for_each_entry_safe(address_filter, n, &head, list) {
++		list_del(&address_filter->list);
++
++		cp.sub_opcode = MSFT_OP_LE_CANCEL_MONITOR_ADVERTISEMENT;
++		cp.handle = address_filter->msft_handle;
++
++		skb = __hci_cmd_sync(hdev, hdev->msft_opcode, sizeof(cp), &cp,
++				     HCI_CMD_TIMEOUT);
++		if (IS_ERR_OR_NULL(skb)) {
++			kfree(address_filter);
++			continue;
++		}
++
++		kfree_skb(skb);
++
++		bt_dev_dbg(hdev, "MSFT: Canceled device %pMR address filter",
++			   &address_filter->bdaddr);
++
++		kfree(address_filter);
++	}
++}
++
+ static int msft_le_cancel_monitor_advertisement_cb(struct hci_dev *hdev,
+ 						   u16 opcode,
+ 						   struct adv_monitor *monitor,
+@@ -263,6 +376,7 @@ static int msft_le_cancel_monitor_advertisement_cb(struct hci_dev *hdev,
+ 	struct msft_monitor_advertisement_handle_data *handle_data;
+ 	struct msft_data *msft = hdev->msft_data;
+ 	int status = 0;
++	u8 msft_handle;
+ 
+ 	rp = (struct msft_rp_le_cancel_monitor_advertisement *)skb->data;
+ 	if (skb->len < sizeof(*rp)) {
+@@ -293,11 +407,17 @@ static int msft_le_cancel_monitor_advertisement_cb(struct hci_dev *hdev,
+ 						NULL, 0, false);
+ 		}
+ 
++		msft_handle = handle_data->msft_handle;
++
+ 		list_del(&handle_data->list);
+ 		kfree(handle_data);
+-	}
+ 
+-	hci_dev_unlock(hdev);
++		hci_dev_unlock(hdev);
++
++		msft_remove_addr_filters_sync(hdev, msft_handle);
++	} else {
++		hci_dev_unlock(hdev);
++	}
+ 
+ done:
+ 	return status;
+@@ -394,12 +514,14 @@ static int msft_add_monitor_sync(struct hci_dev *hdev,
+ {
+ 	struct msft_cp_le_monitor_advertisement *cp;
+ 	struct msft_le_monitor_advertisement_pattern_data *pattern_data;
++	struct msft_monitor_advertisement_handle_data *handle_data;
+ 	struct msft_le_monitor_advertisement_pattern *pattern;
+ 	struct adv_pattern *entry;
+ 	size_t total_size = sizeof(*cp) + sizeof(*pattern_data);
+ 	ptrdiff_t offset = 0;
+ 	u8 pattern_count = 0;
+ 	struct sk_buff *skb;
++	int err;
+ 
+ 	if (!msft_monitor_pattern_valid(monitor))
+ 		return -EINVAL;
+@@ -436,16 +558,31 @@ static int msft_add_monitor_sync(struct hci_dev *hdev,
+ 
+ 	skb = __hci_cmd_sync(hdev, hdev->msft_opcode, total_size, cp,
+ 			     HCI_CMD_TIMEOUT);
+-	kfree(cp);
+ 
+ 	if (IS_ERR_OR_NULL(skb)) {
+-		if (!skb)
+-			return -EIO;
+-		return PTR_ERR(skb);
++		err = PTR_ERR(skb);
++		goto out_free;
+ 	}
+ 
+-	return msft_le_monitor_advertisement_cb(hdev, hdev->msft_opcode,
+-						monitor, skb);
++	err = msft_le_monitor_advertisement_cb(hdev, hdev->msft_opcode,
++					       monitor, skb);
++	if (err)
++		goto out_free;
++
++	handle_data = msft_find_handle_data(hdev, monitor->handle, true);
++	if (!handle_data) {
++		err = -ENODATA;
++		goto out_free;
++	}
++
++	handle_data->rssi_high	= cp->rssi_high;
++	handle_data->rssi_low	= cp->rssi_low;
++	handle_data->rssi_low_interval	  = cp->rssi_low_interval;
++	handle_data->rssi_sampling_period = cp->rssi_sampling_period;
++
++out_free:
++	kfree(cp);
++	return err;
+ }
+ 
+ /* This function requires the caller holds hci_req_sync_lock */
+@@ -538,6 +675,7 @@ void msft_do_close(struct hci_dev *hdev)
+ {
+ 	struct msft_data *msft = hdev->msft_data;
+ 	struct msft_monitor_advertisement_handle_data *handle_data, *tmp;
++	struct msft_monitor_addr_filter_data *address_filter, *n;
+ 	struct adv_monitor *monitor;
+ 
+ 	if (!msft)
+@@ -559,6 +697,14 @@ void msft_do_close(struct hci_dev *hdev)
+ 		kfree(handle_data);
+ 	}
+ 
++	mutex_lock(&msft->filter_lock);
++	list_for_each_entry_safe(address_filter, n, &msft->address_filters,
++				 list) {
++		list_del(&address_filter->list);
++		kfree(address_filter);
++	}
++	mutex_unlock(&msft->filter_lock);
++
+ 	hci_dev_lock(hdev);
+ 
+ 	/* Clear any devices that are being monitored and notify device lost */
+@@ -568,6 +714,49 @@ void msft_do_close(struct hci_dev *hdev)
+ 	hci_dev_unlock(hdev);
+ }
+ 
++static int msft_cancel_address_filter_sync(struct hci_dev *hdev, void *data)
++{
++	struct msft_monitor_addr_filter_data *address_filter = data;
++	struct msft_cp_le_cancel_monitor_advertisement cp;
++	struct msft_data *msft = hdev->msft_data;
++	struct sk_buff *skb;
++	int err = 0;
++
++	if (!msft) {
++		bt_dev_err(hdev, "MSFT: msft data is freed");
++		return -EINVAL;
++	}
++
++	/* The address filter has been removed by hci dev close */
++	if (!test_bit(HCI_UP, &hdev->flags))
++		return 0;
++
++	mutex_lock(&msft->filter_lock);
++	list_del(&address_filter->list);
++	mutex_unlock(&msft->filter_lock);
++
++	cp.sub_opcode = MSFT_OP_LE_CANCEL_MONITOR_ADVERTISEMENT;
++	cp.handle = address_filter->msft_handle;
++
++	skb = __hci_cmd_sync(hdev, hdev->msft_opcode, sizeof(cp), &cp,
++			     HCI_CMD_TIMEOUT);
++	if (IS_ERR_OR_NULL(skb)) {
++		bt_dev_err(hdev, "MSFT: Failed to cancel address (%pMR) filter",
++			   &address_filter->bdaddr);
++		err = EIO;
++		goto done;
++	}
++	kfree_skb(skb);
++
++	bt_dev_dbg(hdev, "MSFT: Canceled device %pMR address filter",
++		   &address_filter->bdaddr);
++
++done:
++	kfree(address_filter);
++
++	return err;
++}
++
+ void msft_register(struct hci_dev *hdev)
+ {
+ 	struct msft_data *msft = NULL;
+@@ -581,7 +770,9 @@ void msft_register(struct hci_dev *hdev)
+ 	}
+ 
+ 	INIT_LIST_HEAD(&msft->handle_map);
++	INIT_LIST_HEAD(&msft->address_filters);
+ 	hdev->msft_data = msft;
++	mutex_init(&msft->filter_lock);
+ }
+ 
+ void msft_unregister(struct hci_dev *hdev)
+@@ -596,6 +787,7 @@ void msft_unregister(struct hci_dev *hdev)
+ 	hdev->msft_data = NULL;
+ 
+ 	kfree(msft->evt_prefix);
++	mutex_destroy(&msft->filter_lock);
+ 	kfree(msft);
+ }
+ 
+@@ -645,11 +837,149 @@ static void *msft_skb_pull(struct hci_dev *hdev, struct sk_buff *skb,
+ 	return data;
+ }
+ 
++static int msft_add_address_filter_sync(struct hci_dev *hdev, void *data)
++{
++	struct msft_monitor_addr_filter_data *address_filter = data;
++	struct msft_rp_le_monitor_advertisement *rp;
++	struct msft_cp_le_monitor_advertisement *cp;
++	struct msft_data *msft = hdev->msft_data;
++	struct sk_buff *skb = NULL;
++	bool remove = false;
++	size_t size;
++
++	if (!msft) {
++		bt_dev_err(hdev, "MSFT: msft data is freed");
++		return -EINVAL;
++	}
++
++	/* The address filter has been removed by hci dev close */
++	if (!test_bit(HCI_UP, &hdev->flags))
++		return -ENODEV;
++
++	/* We are safe to use the address filter from now on.
++	 * msft_monitor_device_evt() wouldn't delete this filter because it's
++	 * not been added by now.
++	 * And all other functions that requiring hci_req_sync_lock wouldn't
++	 * touch this filter before this func completes because it's protected
++	 * by hci_req_sync_lock.
++	 */
++
++	if (address_filter->state == AF_STATE_REMOVING) {
++		mutex_lock(&msft->filter_lock);
++		list_del(&address_filter->list);
++		mutex_unlock(&msft->filter_lock);
++		kfree(address_filter);
++		return 0;
++	}
++
++	size = sizeof(*cp) +
++	       sizeof(address_filter->addr_type) +
++	       sizeof(address_filter->bdaddr);
++	cp = kzalloc(size, GFP_KERNEL);
++	if (!cp) {
++		bt_dev_err(hdev, "MSFT: Alloc cmd param err");
++		remove = true;
++		goto done;
++	}
++	cp->sub_opcode           = MSFT_OP_LE_MONITOR_ADVERTISEMENT;
++	cp->rssi_high		 = address_filter->rssi_high;
++	cp->rssi_low		 = address_filter->rssi_low;
++	cp->rssi_low_interval    = address_filter->rssi_low_interval;
++	cp->rssi_sampling_period = address_filter->rssi_sampling_period;
++	cp->cond_type            = MSFT_MONITOR_ADVERTISEMENT_TYPE_ADDR;
++	cp->data[0]              = address_filter->addr_type;
++	memcpy(&cp->data[1], &address_filter->bdaddr,
++	       sizeof(address_filter->bdaddr));
++
++	skb = __hci_cmd_sync(hdev, hdev->msft_opcode, size, cp,
++			     HCI_CMD_TIMEOUT);
++	if (IS_ERR_OR_NULL(skb)) {
++		bt_dev_err(hdev, "Failed to enable address %pMR filter",
++			   &address_filter->bdaddr);
++		skb = NULL;
++		remove = true;
++		goto done;
++	}
++
++	rp = skb_pull_data(skb, sizeof(*rp));
++	if (!rp || rp->sub_opcode != MSFT_OP_LE_MONITOR_ADVERTISEMENT ||
++	    rp->status)
++		remove = true;
++
++done:
++	mutex_lock(&msft->filter_lock);
++
++	if (remove) {
++		bt_dev_warn(hdev, "MSFT: Remove address (%pMR) filter",
++			    &address_filter->bdaddr);
++		list_del(&address_filter->list);
++		kfree(address_filter);
++	} else {
++		address_filter->state = AF_STATE_ADDED;
++		address_filter->msft_handle = rp->handle;
++		bt_dev_dbg(hdev, "MSFT: Address %pMR filter enabled",
++			   &address_filter->bdaddr);
++	}
++	mutex_unlock(&msft->filter_lock);
++
++	kfree_skb(skb);
++
++	return 0;
++}
++
++/* This function requires the caller holds msft->filter_lock */
++static struct msft_monitor_addr_filter_data *msft_add_address_filter
++		(struct hci_dev *hdev, u8 addr_type, bdaddr_t *bdaddr,
++		 struct msft_monitor_advertisement_handle_data *handle_data)
++{
++	struct msft_monitor_addr_filter_data *address_filter = NULL;
++	struct msft_data *msft = hdev->msft_data;
++	int err;
++
++	address_filter = kzalloc(sizeof(*address_filter), GFP_KERNEL);
++	if (!address_filter)
++		return NULL;
++
++	address_filter->state             = AF_STATE_ADDING;
++	address_filter->msft_handle       = 0xff;
++	address_filter->pattern_handle    = handle_data->msft_handle;
++	address_filter->mgmt_handle       = handle_data->mgmt_handle;
++	address_filter->rssi_high         = handle_data->rssi_high;
++	address_filter->rssi_low          = handle_data->rssi_low;
++	address_filter->rssi_low_interval = handle_data->rssi_low_interval;
++	address_filter->rssi_sampling_period = handle_data->rssi_sampling_period;
++	address_filter->addr_type            = addr_type;
++	bacpy(&address_filter->bdaddr, bdaddr);
++
++	/* With the above AF_STATE_ADDING, duplicated address filter can be
++	 * avoided when receiving monitor device event (found/lost) frequently
++	 * for the same device.
++	 */
++	list_add_tail(&address_filter->list, &msft->address_filters);
++
++	err = hci_cmd_sync_queue(hdev, msft_add_address_filter_sync,
++				 address_filter, NULL);
++	if (err < 0) {
++		bt_dev_err(hdev, "MSFT: Add address %pMR filter err", bdaddr);
++		list_del(&address_filter->list);
++		kfree(address_filter);
++		return NULL;
++	}
++
++	bt_dev_dbg(hdev, "MSFT: Add device %pMR address filter",
++		   &address_filter->bdaddr);
++
++	return address_filter;
++}
++
+ /* This function requires the caller holds hdev->lock */
+ static void msft_monitor_device_evt(struct hci_dev *hdev, struct sk_buff *skb)
+ {
++	struct msft_monitor_addr_filter_data *n, *address_filter = NULL;
+ 	struct msft_ev_le_monitor_device *ev;
+ 	struct msft_monitor_advertisement_handle_data *handle_data;
++	struct msft_data *msft = hdev->msft_data;
++	u16 mgmt_handle = 0xffff;
+ 	u8 addr_type;
+ 
+ 	ev = msft_skb_pull(hdev, skb, MSFT_EV_LE_MONITOR_DEVICE, sizeof(*ev));
+@@ -662,9 +992,53 @@ static void msft_monitor_device_evt(struct hci_dev *hdev, struct sk_buff *skb)
+ 		   ev->monitor_state, &ev->bdaddr);
+ 
+ 	handle_data = msft_find_handle_data(hdev, ev->monitor_handle, false);
+-	if (!handle_data)
++
++	if (!test_bit(HCI_QUIRK_USE_MSFT_EXT_ADDRESS_FILTER, &hdev->quirks)) {
++		if (!handle_data)
++			return;
++		mgmt_handle = handle_data->mgmt_handle;
++		goto report_state;
++	}
++
++	if (handle_data) {
++		/* Don't report any device found/lost event from pattern
++		 * monitors. Pattern monitor always has its address filters for
++		 * tracking devices.
++		 */
++
++		address_filter = msft_find_address_data(hdev, ev->addr_type,
++							&ev->bdaddr,
++							handle_data->msft_handle);
++		if (address_filter)
++			return;
++
++		if (ev->monitor_state && handle_data->cond_type ==
++				MSFT_MONITOR_ADVERTISEMENT_TYPE_PATTERN)
++			msft_add_address_filter(hdev, ev->addr_type,
++						&ev->bdaddr, handle_data);
++
+ 		return;
++	}
+ 
++	/* This device event is not from pattern monitor.
++	 * Report it if there is a corresponding address_filter for it.
++	 */
++	list_for_each_entry(n, &msft->address_filters, list) {
++		if (n->state == AF_STATE_ADDED &&
++		    n->msft_handle == ev->monitor_handle) {
++			mgmt_handle = n->mgmt_handle;
++			address_filter = n;
++			break;
++		}
++	}
++
++	if (!address_filter) {
++		bt_dev_warn(hdev, "MSFT: Unexpected device event %pMR, %u, %u",
++			    &ev->bdaddr, ev->monitor_handle, ev->monitor_state);
++		return;
++	}
++
++report_state:
+ 	switch (ev->addr_type) {
+ 	case ADDR_LE_DEV_PUBLIC:
+ 		addr_type = BDADDR_LE_PUBLIC;
+@@ -681,12 +1055,18 @@ static void msft_monitor_device_evt(struct hci_dev *hdev, struct sk_buff *skb)
+ 		return;
+ 	}
+ 
+-	if (ev->monitor_state)
+-		msft_device_found(hdev, &ev->bdaddr, addr_type,
+-				  handle_data->mgmt_handle);
+-	else
+-		msft_device_lost(hdev, &ev->bdaddr, addr_type,
+-				 handle_data->mgmt_handle);
++	if (ev->monitor_state) {
++		msft_device_found(hdev, &ev->bdaddr, addr_type, mgmt_handle);
++	} else {
++		if (address_filter && address_filter->state == AF_STATE_ADDED) {
++			address_filter->state = AF_STATE_REMOVING;
++			hci_cmd_sync_queue(hdev,
++					   msft_cancel_address_filter_sync,
++					   address_filter,
++					   NULL);
++		}
++		msft_device_lost(hdev, &ev->bdaddr, addr_type, mgmt_handle);
++	}
+ }
+ 
+ void msft_vendor_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb)
+@@ -724,7 +1104,9 @@ void msft_vendor_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb)
+ 
+ 	switch (*evt) {
+ 	case MSFT_EV_LE_MONITOR_DEVICE:
++		mutex_lock(&msft->filter_lock);
+ 		msft_monitor_device_evt(hdev, skb);
++		mutex_unlock(&msft->filter_lock);
+ 		break;
+ 
+ 	default:
+diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
+index 7762604ddfc05..99b149261949a 100644
+--- a/net/bluetooth/sco.c
++++ b/net/bluetooth/sco.c
+@@ -1267,7 +1267,7 @@ static int sco_sock_release(struct socket *sock)
+ 
+ 	sco_sock_close(sk);
+ 
+-	if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime &&
++	if (sock_flag(sk, SOCK_LINGER) && READ_ONCE(sk->sk_lingertime) &&
+ 	    !(current->flags & PF_EXITING)) {
+ 		lock_sock(sk);
+ 		err = bt_sock_wait_state(sk, BT_CLOSED, sk->sk_lingertime);
+diff --git a/net/core/filter.c b/net/core/filter.c
+index 28a59596987a9..f1a5775400658 100644
+--- a/net/core/filter.c
++++ b/net/core/filter.c
+@@ -7352,6 +7352,8 @@ BPF_CALL_3(bpf_sk_assign, struct sk_buff *, skb, struct sock *, sk, u64, flags)
+ 		return -ENETUNREACH;
+ 	if (unlikely(sk_fullsock(sk) && sk->sk_reuseport))
+ 		return -ESOCKTNOSUPPORT;
++	if (sk_unhashed(sk))
++		return -EOPNOTSUPP;
+ 	if (sk_is_refcounted(sk) &&
+ 	    unlikely(!refcount_inc_not_zero(&sk->sk_refcnt)))
+ 		return -ENOENT;
+diff --git a/net/core/lwt_bpf.c b/net/core/lwt_bpf.c
+index 8b6b5e72b2179..4a0797f0a154b 100644
+--- a/net/core/lwt_bpf.c
++++ b/net/core/lwt_bpf.c
+@@ -60,9 +60,8 @@ static int run_lwt_bpf(struct sk_buff *skb, struct bpf_lwt_prog *lwt,
+ 			ret = BPF_OK;
+ 		} else {
+ 			skb_reset_mac_header(skb);
+-			ret = skb_do_redirect(skb);
+-			if (ret == 0)
+-				ret = BPF_REDIRECT;
++			skb_do_redirect(skb);
++			ret = BPF_REDIRECT;
+ 		}
+ 		break;
+ 
+@@ -255,7 +254,7 @@ static int bpf_lwt_xmit_reroute(struct sk_buff *skb)
+ 
+ 	err = dst_output(dev_net(skb_dst(skb)->dev), skb->sk, skb);
+ 	if (unlikely(err))
+-		return err;
++		return net_xmit_errno(err);
+ 
+ 	/* ip[6]_finish_output2 understand LWTUNNEL_XMIT_DONE */
+ 	return LWTUNNEL_XMIT_DONE;
+diff --git a/net/core/skbuff.c b/net/core/skbuff.c
+index a298992060e6e..acdf94bb54c80 100644
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -550,7 +550,7 @@ static void *kmalloc_reserve(unsigned int *size, gfp_t flags, int node,
+ 			     bool *pfmemalloc)
+ {
+ 	bool ret_pfmemalloc = false;
+-	unsigned int obj_size;
++	size_t obj_size;
+ 	void *obj;
+ 
+ 	obj_size = SKB_HEAD_ALIGN(*size);
+@@ -567,7 +567,13 @@ static void *kmalloc_reserve(unsigned int *size, gfp_t flags, int node,
+ 		obj = kmem_cache_alloc_node(skb_small_head_cache, flags, node);
+ 		goto out;
+ 	}
+-	*size = obj_size = kmalloc_size_roundup(obj_size);
++
++	obj_size = kmalloc_size_roundup(obj_size);
++	/* The following cast might truncate high-order bits of obj_size, this
++	 * is harmless because kmalloc(obj_size >= 2^32) will fail anyway.
++	 */
++	*size = (unsigned int)obj_size;
++
+ 	/*
+ 	 * Try a regular allocation, when that fails and we're not entitled
+ 	 * to the reserves, fail.
+@@ -4354,21 +4360,20 @@ struct sk_buff *skb_segment(struct sk_buff *head_skb,
+ 	struct sk_buff *segs = NULL;
+ 	struct sk_buff *tail = NULL;
+ 	struct sk_buff *list_skb = skb_shinfo(head_skb)->frag_list;
+-	skb_frag_t *frag = skb_shinfo(head_skb)->frags;
+ 	unsigned int mss = skb_shinfo(head_skb)->gso_size;
+ 	unsigned int doffset = head_skb->data - skb_mac_header(head_skb);
+-	struct sk_buff *frag_skb = head_skb;
+ 	unsigned int offset = doffset;
+ 	unsigned int tnl_hlen = skb_tnl_header_len(head_skb);
+ 	unsigned int partial_segs = 0;
+ 	unsigned int headroom;
+ 	unsigned int len = head_skb->len;
++	struct sk_buff *frag_skb;
++	skb_frag_t *frag;
+ 	__be16 proto;
+ 	bool csum, sg;
+-	int nfrags = skb_shinfo(head_skb)->nr_frags;
+ 	int err = -ENOMEM;
+ 	int i = 0;
+-	int pos;
++	int nfrags, pos;
+ 
+ 	if ((skb_shinfo(head_skb)->gso_type & SKB_GSO_DODGY) &&
+ 	    mss != GSO_BY_FRAGS && mss != skb_headlen(head_skb)) {
+@@ -4445,6 +4450,13 @@ normal:
+ 	headroom = skb_headroom(head_skb);
+ 	pos = skb_headlen(head_skb);
+ 
++	if (skb_orphan_frags(head_skb, GFP_ATOMIC))
++		return ERR_PTR(-ENOMEM);
++
++	nfrags = skb_shinfo(head_skb)->nr_frags;
++	frag = skb_shinfo(head_skb)->frags;
++	frag_skb = head_skb;
++
+ 	do {
+ 		struct sk_buff *nskb;
+ 		skb_frag_t *nskb_frag;
+@@ -4465,6 +4477,10 @@ normal:
+ 		    (skb_headlen(list_skb) == len || sg)) {
+ 			BUG_ON(skb_headlen(list_skb) > len);
+ 
++			nskb = skb_clone(list_skb, GFP_ATOMIC);
++			if (unlikely(!nskb))
++				goto err;
++
+ 			i = 0;
+ 			nfrags = skb_shinfo(list_skb)->nr_frags;
+ 			frag = skb_shinfo(list_skb)->frags;
+@@ -4483,12 +4499,8 @@ normal:
+ 				frag++;
+ 			}
+ 
+-			nskb = skb_clone(list_skb, GFP_ATOMIC);
+ 			list_skb = list_skb->next;
+ 
+-			if (unlikely(!nskb))
+-				goto err;
+-
+ 			if (unlikely(pskb_trim(nskb, len))) {
+ 				kfree_skb(nskb);
+ 				goto err;
+@@ -4564,12 +4576,16 @@ normal:
+ 		skb_shinfo(nskb)->flags |= skb_shinfo(head_skb)->flags &
+ 					   SKBFL_SHARED_FRAG;
+ 
+-		if (skb_orphan_frags(frag_skb, GFP_ATOMIC) ||
+-		    skb_zerocopy_clone(nskb, frag_skb, GFP_ATOMIC))
++		if (skb_zerocopy_clone(nskb, frag_skb, GFP_ATOMIC))
+ 			goto err;
+ 
+ 		while (pos < offset + len) {
+ 			if (i >= nfrags) {
++				if (skb_orphan_frags(list_skb, GFP_ATOMIC) ||
++				    skb_zerocopy_clone(nskb, list_skb,
++						       GFP_ATOMIC))
++					goto err;
++
+ 				i = 0;
+ 				nfrags = skb_shinfo(list_skb)->nr_frags;
+ 				frag = skb_shinfo(list_skb)->frags;
+@@ -4583,10 +4599,6 @@ normal:
+ 					i--;
+ 					frag--;
+ 				}
+-				if (skb_orphan_frags(frag_skb, GFP_ATOMIC) ||
+-				    skb_zerocopy_clone(nskb, frag_skb,
+-						       GFP_ATOMIC))
+-					goto err;
+ 
+ 				list_skb = list_skb->next;
+ 			}
+diff --git a/net/core/sock.c b/net/core/sock.c
+index c9cffb7acbeae..1c5c01b116e6f 100644
+--- a/net/core/sock.c
++++ b/net/core/sock.c
+@@ -797,7 +797,7 @@ EXPORT_SYMBOL(sock_set_reuseport);
+ void sock_no_linger(struct sock *sk)
+ {
+ 	lock_sock(sk);
+-	sk->sk_lingertime = 0;
++	WRITE_ONCE(sk->sk_lingertime, 0);
+ 	sock_set_flag(sk, SOCK_LINGER);
+ 	release_sock(sk);
+ }
+@@ -1230,15 +1230,15 @@ set_sndbuf:
+ 			ret = -EFAULT;
+ 			break;
+ 		}
+-		if (!ling.l_onoff)
++		if (!ling.l_onoff) {
+ 			sock_reset_flag(sk, SOCK_LINGER);
+-		else {
+-#if (BITS_PER_LONG == 32)
+-			if ((unsigned int)ling.l_linger >= MAX_SCHEDULE_TIMEOUT/HZ)
+-				sk->sk_lingertime = MAX_SCHEDULE_TIMEOUT;
++		} else {
++			unsigned long t_sec = ling.l_linger;
++
++			if (t_sec >= MAX_SCHEDULE_TIMEOUT / HZ)
++				WRITE_ONCE(sk->sk_lingertime, MAX_SCHEDULE_TIMEOUT);
+ 			else
+-#endif
+-				sk->sk_lingertime = (unsigned int)ling.l_linger * HZ;
++				WRITE_ONCE(sk->sk_lingertime, t_sec * HZ);
+ 			sock_set_flag(sk, SOCK_LINGER);
+ 		}
+ 		break;
+@@ -1691,7 +1691,7 @@ int sk_getsockopt(struct sock *sk, int level, int optname,
+ 	case SO_LINGER:
+ 		lv		= sizeof(v.ling);
+ 		v.ling.l_onoff	= sock_flag(sk, SOCK_LINGER);
+-		v.ling.l_linger	= sk->sk_lingertime / HZ;
++		v.ling.l_linger	= READ_ONCE(sk->sk_lingertime) / HZ;
+ 		break;
+ 
+ 	case SO_BSDCOMPAT:
+diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
+index a545ad71201c8..a5361fb7a415b 100644
+--- a/net/dccp/ipv4.c
++++ b/net/dccp/ipv4.c
+@@ -255,12 +255,17 @@ static int dccp_v4_err(struct sk_buff *skb, u32 info)
+ 	int err;
+ 	struct net *net = dev_net(skb->dev);
+ 
+-	/* Only need dccph_dport & dccph_sport which are the first
+-	 * 4 bytes in dccp header.
++	/* For the first __dccp_basic_hdr_len() check, we only need dh->dccph_x,
++	 * which is in byte 7 of the dccp header.
+ 	 * Our caller (icmp_socket_deliver()) already pulled 8 bytes for us.
++	 *
++	 * Later on, we want to access the sequence number fields, which are
++	 * beyond 8 bytes, so we have to pskb_may_pull() ourselves.
+ 	 */
+-	BUILD_BUG_ON(offsetofend(struct dccp_hdr, dccph_sport) > 8);
+-	BUILD_BUG_ON(offsetofend(struct dccp_hdr, dccph_dport) > 8);
++	dh = (struct dccp_hdr *)(skb->data + offset);
++	if (!pskb_may_pull(skb, offset + __dccp_basic_hdr_len(dh)))
++		return -EINVAL;
++	iph = (struct iphdr *)skb->data;
+ 	dh = (struct dccp_hdr *)(skb->data + offset);
+ 
+ 	sk = __inet_lookup_established(net, &dccp_hashinfo,
+diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
+index 686090bc59451..33f6ccf6ba77b 100644
+--- a/net/dccp/ipv6.c
++++ b/net/dccp/ipv6.c
+@@ -74,7 +74,7 @@ static inline __u64 dccp_v6_init_sequence(struct sk_buff *skb)
+ static int dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
+ 			u8 type, u8 code, int offset, __be32 info)
+ {
+-	const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
++	const struct ipv6hdr *hdr;
+ 	const struct dccp_hdr *dh;
+ 	struct dccp_sock *dp;
+ 	struct ipv6_pinfo *np;
+@@ -83,12 +83,17 @@ static int dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
+ 	__u64 seq;
+ 	struct net *net = dev_net(skb->dev);
+ 
+-	/* Only need dccph_dport & dccph_sport which are the first
+-	 * 4 bytes in dccp header.
++	/* For the first __dccp_basic_hdr_len() check, we only need dh->dccph_x,
++	 * which is in byte 7 of the dccp header.
+ 	 * Our caller (icmpv6_notify()) already pulled 8 bytes for us.
++	 *
++	 * Later on, we want to access the sequence number fields, which are
++	 * beyond 8 bytes, so we have to pskb_may_pull() ourselves.
+ 	 */
+-	BUILD_BUG_ON(offsetofend(struct dccp_hdr, dccph_sport) > 8);
+-	BUILD_BUG_ON(offsetofend(struct dccp_hdr, dccph_dport) > 8);
++	dh = (struct dccp_hdr *)(skb->data + offset);
++	if (!pskb_may_pull(skb, offset + __dccp_basic_hdr_len(dh)))
++		return -EINVAL;
++	hdr = (const struct ipv6hdr *)skb->data;
+ 	dh = (struct dccp_hdr *)(skb->data + offset);
+ 
+ 	sk = __inet6_lookup_established(net, &dccp_hashinfo,
+diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
+index 48ff5f13e7979..193d8362efe2e 100644
+--- a/net/ipv4/igmp.c
++++ b/net/ipv4/igmp.c
+@@ -353,8 +353,9 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, unsigned int mtu)
+ 	struct flowi4 fl4;
+ 	int hlen = LL_RESERVED_SPACE(dev);
+ 	int tlen = dev->needed_tailroom;
+-	unsigned int size = mtu;
++	unsigned int size;
+ 
++	size = min(mtu, IP_MAX_MTU);
+ 	while (1) {
+ 		skb = alloc_skb(size + hlen + tlen,
+ 				GFP_ATOMIC | __GFP_NOWARN);
+diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
+index 6ba1a0fafbaab..a6e4c82615d7e 100644
+--- a/net/ipv4/ip_output.c
++++ b/net/ipv4/ip_output.c
+@@ -216,7 +216,7 @@ static int ip_finish_output2(struct net *net, struct sock *sk, struct sk_buff *s
+ 	if (lwtunnel_xmit_redirect(dst->lwtstate)) {
+ 		int res = lwtunnel_xmit(skb);
+ 
+-		if (res < 0 || res == LWTUNNEL_XMIT_DONE)
++		if (res != LWTUNNEL_XMIT_CONTINUE)
+ 			return res;
+ 	}
+ 
+diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
+index 57c8af1859c16..48c2b96b08435 100644
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -287,7 +287,7 @@ static void tcp_incr_quickack(struct sock *sk, unsigned int max_quickacks)
+ 		icsk->icsk_ack.quick = quickacks;
+ }
+ 
+-void tcp_enter_quickack_mode(struct sock *sk, unsigned int max_quickacks)
++static void tcp_enter_quickack_mode(struct sock *sk, unsigned int max_quickacks)
+ {
+ 	struct inet_connection_sock *icsk = inet_csk(sk);
+ 
+@@ -295,7 +295,6 @@ void tcp_enter_quickack_mode(struct sock *sk, unsigned int max_quickacks)
+ 	inet_csk_exit_pingpong_mode(sk);
+ 	icsk->icsk_ack.ato = TCP_ATO_MIN;
+ }
+-EXPORT_SYMBOL(tcp_enter_quickack_mode);
+ 
+ /* Send ACKs quickly, if "quick" count is not exhausted
+  * and the session is not interactive.
+diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
+index 206418b6d7c48..a9f6200f12f15 100644
+--- a/net/ipv4/tcp_timer.c
++++ b/net/ipv4/tcp_timer.c
+@@ -446,6 +446,22 @@ static void tcp_fastopen_synack_timer(struct sock *sk, struct request_sock *req)
+ 			  req->timeout << req->num_timeout, TCP_RTO_MAX);
+ }
+ 
++static bool tcp_rtx_probe0_timed_out(const struct sock *sk,
++				     const struct sk_buff *skb)
++{
++	const struct tcp_sock *tp = tcp_sk(sk);
++	const int timeout = TCP_RTO_MAX * 2;
++	u32 rcv_delta, rtx_delta;
++
++	rcv_delta = inet_csk(sk)->icsk_timeout - tp->rcv_tstamp;
++	if (rcv_delta <= timeout)
++		return false;
++
++	rtx_delta = (u32)msecs_to_jiffies(tcp_time_stamp(tp) -
++			(tp->retrans_stamp ?: tcp_skb_timestamp(skb)));
++
++	return rtx_delta > timeout;
++}
+ 
+ /**
+  *  tcp_retransmit_timer() - The TCP retransmit timeout handler
+@@ -511,7 +527,7 @@ void tcp_retransmit_timer(struct sock *sk)
+ 					    tp->snd_una, tp->snd_nxt);
+ 		}
+ #endif
+-		if (tcp_jiffies32 - tp->rcv_tstamp > TCP_RTO_MAX) {
++		if (tcp_rtx_probe0_timed_out(sk, skb)) {
+ 			tcp_write_err(sk);
+ 			goto out;
+ 		}
+diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
+index abfa860367aa9..b3aa68ea29de2 100644
+--- a/net/ipv4/udp.c
++++ b/net/ipv4/udp.c
+@@ -452,14 +452,24 @@ static struct sock *udp4_lib_lookup2(struct net *net,
+ 		score = compute_score(sk, net, saddr, sport,
+ 				      daddr, hnum, dif, sdif);
+ 		if (score > badness) {
+-			result = lookup_reuseport(net, sk, skb,
+-						  saddr, sport, daddr, hnum);
++			badness = score;
++			result = lookup_reuseport(net, sk, skb, saddr, sport, daddr, hnum);
++			if (!result) {
++				result = sk;
++				continue;
++			}
++
+ 			/* Fall back to scoring if group has connections */
+-			if (result && !reuseport_has_conns(sk))
++			if (!reuseport_has_conns(sk))
+ 				return result;
+ 
+-			result = result ? : sk;
+-			badness = score;
++			/* Reuseport logic returned an error, keep original score. */
++			if (IS_ERR(result))
++				continue;
++
++			badness = compute_score(result, net, saddr, sport,
++						daddr, hnum, dif, sdif);
++
+ 		}
+ 	}
+ 	return result;
+diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
+index 1e8c90e976080..016b0a513259f 100644
+--- a/net/ipv6/ip6_output.c
++++ b/net/ipv6/ip6_output.c
+@@ -113,7 +113,7 @@ static int ip6_finish_output2(struct net *net, struct sock *sk, struct sk_buff *
+ 	if (lwtunnel_xmit_redirect(dst->lwtstate)) {
+ 		int res = lwtunnel_xmit(skb);
+ 
+-		if (res < 0 || res == LWTUNNEL_XMIT_DONE)
++		if (res != LWTUNNEL_XMIT_CONTINUE)
+ 			return res;
+ 	}
+ 
+diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
+index 486d893b8e3ca..3ffca158d3e11 100644
+--- a/net/ipv6/udp.c
++++ b/net/ipv6/udp.c
+@@ -195,14 +195,23 @@ static struct sock *udp6_lib_lookup2(struct net *net,
+ 		score = compute_score(sk, net, saddr, sport,
+ 				      daddr, hnum, dif, sdif);
+ 		if (score > badness) {
+-			result = lookup_reuseport(net, sk, skb,
+-						  saddr, sport, daddr, hnum);
++			badness = score;
++			result = lookup_reuseport(net, sk, skb, saddr, sport, daddr, hnum);
++			if (!result) {
++				result = sk;
++				continue;
++			}
++
+ 			/* Fall back to scoring if group has connections */
+-			if (result && !reuseport_has_conns(sk))
++			if (!reuseport_has_conns(sk))
+ 				return result;
+ 
+-			result = result ? : sk;
+-			badness = score;
++			/* Reuseport logic returned an error, keep original score. */
++			if (IS_ERR(result))
++				continue;
++
++			badness = compute_score(sk, net, saddr, sport,
++						daddr, hnum, dif, sdif);
+ 		}
+ 	}
+ 	return result;
+diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
+index e7ac246038925..d354b32a20f8f 100644
+--- a/net/mac80211/cfg.c
++++ b/net/mac80211/cfg.c
+@@ -3648,12 +3648,6 @@ static int __ieee80211_csa_finalize(struct ieee80211_sub_if_data *sdata)
+ 	lockdep_assert_held(&local->mtx);
+ 	lockdep_assert_held(&local->chanctx_mtx);
+ 
+-	if (sdata->vif.bss_conf.eht_puncturing != sdata->vif.bss_conf.csa_punct_bitmap) {
+-		sdata->vif.bss_conf.eht_puncturing =
+-					sdata->vif.bss_conf.csa_punct_bitmap;
+-		changed |= BSS_CHANGED_EHT_PUNCTURING;
+-	}
+-
+ 	/*
+ 	 * using reservation isn't immediate as it may be deferred until later
+ 	 * with multi-vif. once reservation is complete it will re-schedule the
+@@ -3683,6 +3677,12 @@ static int __ieee80211_csa_finalize(struct ieee80211_sub_if_data *sdata)
+ 	if (err)
+ 		return err;
+ 
++	if (sdata->vif.bss_conf.eht_puncturing != sdata->vif.bss_conf.csa_punct_bitmap) {
++		sdata->vif.bss_conf.eht_puncturing =
++					sdata->vif.bss_conf.csa_punct_bitmap;
++		changed |= BSS_CHANGED_EHT_PUNCTURING;
++	}
++
+ 	ieee80211_link_info_change_notify(sdata, &sdata->deflink, changed);
+ 
+ 	if (sdata->deflink.csa_block_tx) {
+diff --git a/net/netfilter/ipset/ip_set_hash_netportnet.c b/net/netfilter/ipset/ip_set_hash_netportnet.c
+index 005a7ce87217e..bf4f91b78e1dc 100644
+--- a/net/netfilter/ipset/ip_set_hash_netportnet.c
++++ b/net/netfilter/ipset/ip_set_hash_netportnet.c
+@@ -36,6 +36,7 @@ MODULE_ALIAS("ip_set_hash:net,port,net");
+ #define IP_SET_HASH_WITH_PROTO
+ #define IP_SET_HASH_WITH_NETS
+ #define IPSET_NET_COUNT 2
++#define IP_SET_HASH_WITH_NET0
+ 
+ /* IPv4 variant */
+ 
+diff --git a/net/netfilter/nft_exthdr.c b/net/netfilter/nft_exthdr.c
+index 7f856ceb3a668..a9844eefedebc 100644
+--- a/net/netfilter/nft_exthdr.c
++++ b/net/netfilter/nft_exthdr.c
+@@ -238,7 +238,12 @@ static void nft_exthdr_tcp_set_eval(const struct nft_expr *expr,
+ 	if (!tcph)
+ 		goto err;
+ 
++	if (skb_ensure_writable(pkt->skb, nft_thoff(pkt) + tcphdr_len))
++		goto err;
++
++	tcph = (struct tcphdr *)(pkt->skb->data + nft_thoff(pkt));
+ 	opt = (u8 *)tcph;
++
+ 	for (i = sizeof(*tcph); i < tcphdr_len - 1; i += optl) {
+ 		union {
+ 			__be16 v16;
+@@ -253,15 +258,6 @@ static void nft_exthdr_tcp_set_eval(const struct nft_expr *expr,
+ 		if (i + optl > tcphdr_len || priv->len + priv->offset > optl)
+ 			goto err;
+ 
+-		if (skb_ensure_writable(pkt->skb,
+-					nft_thoff(pkt) + i + priv->len))
+-			goto err;
+-
+-		tcph = nft_tcp_header_pointer(pkt, sizeof(buff), buff,
+-					      &tcphdr_len);
+-		if (!tcph)
+-			goto err;
+-
+ 		offset = i + priv->offset;
+ 
+ 		switch (priv->len) {
+@@ -325,9 +321,9 @@ static void nft_exthdr_tcp_strip_eval(const struct nft_expr *expr,
+ 	if (skb_ensure_writable(pkt->skb, nft_thoff(pkt) + tcphdr_len))
+ 		goto drop;
+ 
+-	opt = (u8 *)nft_tcp_header_pointer(pkt, sizeof(buff), buff, &tcphdr_len);
+-	if (!opt)
+-		goto err;
++	tcph = (struct tcphdr *)(pkt->skb->data + nft_thoff(pkt));
++	opt = (u8 *)tcph;
++
+ 	for (i = sizeof(*tcph); i < tcphdr_len - 1; i += optl) {
+ 		unsigned int j;
+ 
+diff --git a/net/netfilter/xt_sctp.c b/net/netfilter/xt_sctp.c
+index e8961094a2822..b46a6a5120583 100644
+--- a/net/netfilter/xt_sctp.c
++++ b/net/netfilter/xt_sctp.c
+@@ -149,6 +149,8 @@ static int sctp_mt_check(const struct xt_mtchk_param *par)
+ {
+ 	const struct xt_sctp_info *info = par->matchinfo;
+ 
++	if (info->flag_count > ARRAY_SIZE(info->flag_info))
++		return -EINVAL;
+ 	if (info->flags & ~XT_SCTP_VALID_FLAGS)
+ 		return -EINVAL;
+ 	if (info->invflags & ~XT_SCTP_VALID_FLAGS)
+diff --git a/net/netfilter/xt_u32.c b/net/netfilter/xt_u32.c
+index 177b40d08098b..117d4615d6684 100644
+--- a/net/netfilter/xt_u32.c
++++ b/net/netfilter/xt_u32.c
+@@ -96,11 +96,32 @@ static bool u32_mt(const struct sk_buff *skb, struct xt_action_param *par)
+ 	return ret ^ data->invert;
+ }
+ 
++static int u32_mt_checkentry(const struct xt_mtchk_param *par)
++{
++	const struct xt_u32 *data = par->matchinfo;
++	const struct xt_u32_test *ct;
++	unsigned int i;
++
++	if (data->ntests > ARRAY_SIZE(data->tests))
++		return -EINVAL;
++
++	for (i = 0; i < data->ntests; ++i) {
++		ct = &data->tests[i];
++
++		if (ct->nnums > ARRAY_SIZE(ct->location) ||
++		    ct->nvalues > ARRAY_SIZE(ct->value))
++			return -EINVAL;
++	}
++
++	return 0;
++}
++
+ static struct xt_match xt_u32_mt_reg __read_mostly = {
+ 	.name       = "u32",
+ 	.revision   = 0,
+ 	.family     = NFPROTO_UNSPEC,
+ 	.match      = u32_mt,
++	.checkentry = u32_mt_checkentry,
+ 	.matchsize  = sizeof(struct xt_u32),
+ 	.me         = THIS_MODULE,
+ };
+diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
+index eb8ccbd58df74..96e91ab71573c 100644
+--- a/net/netrom/af_netrom.c
++++ b/net/netrom/af_netrom.c
+@@ -660,6 +660,11 @@ static int nr_connect(struct socket *sock, struct sockaddr *uaddr,
+ 		goto out_release;
+ 	}
+ 
++	if (sock->state == SS_CONNECTING) {
++		err = -EALREADY;
++		goto out_release;
++	}
++
+ 	sk->sk_state   = TCP_CLOSE;
+ 	sock->state = SS_UNCONNECTED;
+ 
+diff --git a/net/sched/em_meta.c b/net/sched/em_meta.c
+index 6fdba069f6bfd..da34fd4c92695 100644
+--- a/net/sched/em_meta.c
++++ b/net/sched/em_meta.c
+@@ -502,7 +502,7 @@ META_COLLECTOR(int_sk_lingertime)
+ 		*err = -1;
+ 		return;
+ 	}
+-	dst->value = sk->sk_lingertime / HZ;
++	dst->value = READ_ONCE(sk->sk_lingertime) / HZ;
+ }
+ 
+ META_COLLECTOR(int_sk_err_qlen)
+diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
+index 70b0c5873d326..61d52594ff6d8 100644
+--- a/net/sched/sch_hfsc.c
++++ b/net/sched/sch_hfsc.c
+@@ -1012,6 +1012,10 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
+ 		if (parent == NULL)
+ 			return -ENOENT;
+ 	}
++	if (!(parent->cl_flags & HFSC_FSC) && parent != &q->root) {
++		NL_SET_ERR_MSG(extack, "Invalid parent - parent class must have FSC");
++		return -EINVAL;
++	}
+ 
+ 	if (classid == 0 || TC_H_MAJ(classid ^ sch->handle) != 0)
+ 		return -EINVAL;
+diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
+index f5834af5fad53..7c77565c39d19 100644
+--- a/net/smc/af_smc.c
++++ b/net/smc/af_smc.c
+@@ -1820,7 +1820,7 @@ void smc_close_non_accepted(struct sock *sk)
+ 	lock_sock(sk);
+ 	if (!sk->sk_lingertime)
+ 		/* wait for peer closing */
+-		sk->sk_lingertime = SMC_MAX_STREAM_WAIT_TIMEOUT;
++		WRITE_ONCE(sk->sk_lingertime, SMC_MAX_STREAM_WAIT_TIMEOUT);
+ 	__smc_release(smc);
+ 	release_sock(sk);
+ 	sock_put(sk); /* sock_hold above */
+diff --git a/net/socket.c b/net/socket.c
+index 2b0e54b2405c8..f49edb9b49185 100644
+--- a/net/socket.c
++++ b/net/socket.c
+@@ -3519,7 +3519,11 @@ EXPORT_SYMBOL(kernel_accept);
+ int kernel_connect(struct socket *sock, struct sockaddr *addr, int addrlen,
+ 		   int flags)
+ {
+-	return sock->ops->connect(sock, addr, addrlen, flags);
++	struct sockaddr_storage address;
++
++	memcpy(&address, addr, addrlen);
++
++	return sock->ops->connect(sock, (struct sockaddr *)&address, addrlen, flags);
+ }
+ EXPORT_SYMBOL(kernel_connect);
+ 
+diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
+index 2eb8df44f894d..589020ed909dc 100644
+--- a/net/sunrpc/svcsock.c
++++ b/net/sunrpc/svcsock.c
+@@ -1244,8 +1244,10 @@ static int svc_tcp_sendmsg(struct socket *sock, struct xdr_buf *xdr,
+ 	if (ret != head->iov_len)
+ 		goto out;
+ 
+-	if (xdr_buf_pagecount(xdr))
++	if (xdr_buf_pagecount(xdr)) {
+ 		xdr->bvec[0].bv_offset = offset_in_page(xdr->page_base);
++		xdr->bvec[0].bv_len -= offset_in_page(xdr->page_base);
++	}
+ 
+ 	msg.msg_flags = MSG_SPLICE_PAGES;
+ 	iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, xdr->bvec,
+diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
+index 8bcf8e293308e..4dcbc40d07c85 100644
+--- a/net/wireless/nl80211.c
++++ b/net/wireless/nl80211.c
+@@ -323,6 +323,7 @@ nl80211_pmsr_ftm_req_attr_policy[NL80211_PMSR_FTM_REQ_ATTR_MAX + 1] = {
+ 	[NL80211_PMSR_FTM_REQ_ATTR_TRIGGER_BASED] = { .type = NLA_FLAG },
+ 	[NL80211_PMSR_FTM_REQ_ATTR_NON_TRIGGER_BASED] = { .type = NLA_FLAG },
+ 	[NL80211_PMSR_FTM_REQ_ATTR_LMR_FEEDBACK] = { .type = NLA_FLAG },
++	[NL80211_PMSR_FTM_REQ_ATTR_BSS_COLOR] = { .type = NLA_U8 },
+ };
+ 
+ static const struct nla_policy
+diff --git a/samples/bpf/tracex3_kern.c b/samples/bpf/tracex3_kern.c
+index bde6591cb20c5..af235bd6615b1 100644
+--- a/samples/bpf/tracex3_kern.c
++++ b/samples/bpf/tracex3_kern.c
+@@ -11,6 +11,12 @@
+ #include <bpf/bpf_helpers.h>
+ #include <bpf/bpf_tracing.h>
+ 
++struct start_key {
++	dev_t dev;
++	u32 _pad;
++	sector_t sector;
++};
++
+ struct {
+ 	__uint(type, BPF_MAP_TYPE_HASH);
+ 	__type(key, long);
+@@ -18,16 +24,17 @@ struct {
+ 	__uint(max_entries, 4096);
+ } my_map SEC(".maps");
+ 
+-/* kprobe is NOT a stable ABI. If kernel internals change this bpf+kprobe
+- * example will no longer be meaningful
+- */
+-SEC("kprobe/blk_mq_start_request")
+-int bpf_prog1(struct pt_regs *ctx)
++/* from /sys/kernel/tracing/events/block/block_io_start/format */
++SEC("tracepoint/block/block_io_start")
++int bpf_prog1(struct trace_event_raw_block_rq *ctx)
+ {
+-	long rq = PT_REGS_PARM1(ctx);
+ 	u64 val = bpf_ktime_get_ns();
++	struct start_key key = {
++		.dev = ctx->dev,
++		.sector = ctx->sector
++	};
+ 
+-	bpf_map_update_elem(&my_map, &rq, &val, BPF_ANY);
++	bpf_map_update_elem(&my_map, &key, &val, BPF_ANY);
+ 	return 0;
+ }
+ 
+@@ -49,21 +56,26 @@ struct {
+ 	__uint(max_entries, SLOTS);
+ } lat_map SEC(".maps");
+ 
+-SEC("kprobe/__blk_account_io_done")
+-int bpf_prog2(struct pt_regs *ctx)
++/* from /sys/kernel/tracing/events/block/block_io_done/format */
++SEC("tracepoint/block/block_io_done")
++int bpf_prog2(struct trace_event_raw_block_rq *ctx)
+ {
+-	long rq = PT_REGS_PARM1(ctx);
++	struct start_key key = {
++		.dev = ctx->dev,
++		.sector = ctx->sector
++	};
++
+ 	u64 *value, l, base;
+ 	u32 index;
+ 
+-	value = bpf_map_lookup_elem(&my_map, &rq);
++	value = bpf_map_lookup_elem(&my_map, &key);
+ 	if (!value)
+ 		return 0;
+ 
+ 	u64 cur_time = bpf_ktime_get_ns();
+ 	u64 delta = cur_time - *value;
+ 
+-	bpf_map_delete_elem(&my_map, &rq);
++	bpf_map_delete_elem(&my_map, &key);
+ 
+ 	/* the lines below are computing index = log10(delta)*10
+ 	 * using integer arithmetic
+diff --git a/samples/bpf/tracex6_kern.c b/samples/bpf/tracex6_kern.c
+index acad5712d8b4f..fd602c2774b8b 100644
+--- a/samples/bpf/tracex6_kern.c
++++ b/samples/bpf/tracex6_kern.c
+@@ -2,6 +2,8 @@
+ #include <linux/version.h>
+ #include <uapi/linux/bpf.h>
+ #include <bpf/bpf_helpers.h>
++#include <bpf/bpf_tracing.h>
++#include <bpf/bpf_core_read.h>
+ 
+ struct {
+ 	__uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
+@@ -45,13 +47,24 @@ int bpf_prog1(struct pt_regs *ctx)
+ 	return 0;
+ }
+ 
+-SEC("kprobe/htab_map_lookup_elem")
+-int bpf_prog2(struct pt_regs *ctx)
++/*
++ * Since *_map_lookup_elem can't be expected to trigger bpf programs
++ * due to potential deadlocks (bpf_disable_instrumentation), this bpf
++ * program will be attached to bpf_map_copy_value (which is called
++ * from map_lookup_elem) and will only filter the hashtable type.
++ */
++SEC("kprobe/bpf_map_copy_value")
++int BPF_KPROBE(bpf_prog2, struct bpf_map *map)
+ {
+ 	u32 key = bpf_get_smp_processor_id();
+ 	struct bpf_perf_event_value *val, buf;
++	enum bpf_map_type type;
+ 	int error;
+ 
++	type = BPF_CORE_READ(map, map_type);
++	if (type != BPF_MAP_TYPE_HASH)
++		return 0;
++
+ 	error = bpf_perf_event_read_value(&counters, key, &buf, sizeof(buf));
+ 	if (error)
+ 		return 0;
+diff --git a/scripts/gdb/linux/constants.py.in b/scripts/gdb/linux/constants.py.in
+index 50a92c4e9984e..fab74ca9df6fc 100644
+--- a/scripts/gdb/linux/constants.py.in
++++ b/scripts/gdb/linux/constants.py.in
+@@ -64,6 +64,9 @@ LX_GDBPARSED(IRQ_HIDDEN)
+ 
+ /* linux/module.h */
+ LX_GDBPARSED(MOD_TEXT)
++LX_GDBPARSED(MOD_DATA)
++LX_GDBPARSED(MOD_RODATA)
++LX_GDBPARSED(MOD_RO_AFTER_INIT)
+ 
+ /* linux/mount.h */
+ LX_VALUE(MNT_NOSUID)
+diff --git a/scripts/gdb/linux/modules.py b/scripts/gdb/linux/modules.py
+index 261f28640f4cd..f76a43bfa15fc 100644
+--- a/scripts/gdb/linux/modules.py
++++ b/scripts/gdb/linux/modules.py
+@@ -73,11 +73,17 @@ class LxLsmod(gdb.Command):
+                 "        " if utils.get_long_type().sizeof == 8 else ""))
+ 
+         for module in module_list():
+-            layout = module['mem'][constants.LX_MOD_TEXT]
++            text = module['mem'][constants.LX_MOD_TEXT]
++            text_addr = str(text['base']).split()[0]
++            total_size = 0
++
++            for i in range(constants.LX_MOD_TEXT, constants.LX_MOD_RO_AFTER_INIT + 1):
++                total_size += module['mem'][i]['size']
++
+             gdb.write("{address} {name:<19} {size:>8}  {ref}".format(
+-                address=str(layout['base']).split()[0],
++                address=text_addr,
+                 name=module['name'].string(),
+-                size=str(layout['size']),
++                size=str(total_size),
+                 ref=str(module['refcnt']['counter'] - 1)))
+ 
+             t = self._module_use_type.get_type().pointer()
+diff --git a/scripts/rust_is_available.sh b/scripts/rust_is_available.sh
+index aebbf19139709..7a925d2b20fc7 100755
+--- a/scripts/rust_is_available.sh
++++ b/scripts/rust_is_available.sh
+@@ -2,8 +2,6 @@
+ # SPDX-License-Identifier: GPL-2.0
+ #
+ # Tests whether a suitable Rust toolchain is available.
+-#
+-# Pass `-v` for human output and more checks (as warnings).
+ 
+ set -e
+ 
+@@ -23,21 +21,17 @@ get_canonical_version()
+ 
+ # Check that the Rust compiler exists.
+ if ! command -v "$RUSTC" >/dev/null; then
+-	if [ "$1" = -v ]; then
+-		echo >&2 "***"
+-		echo >&2 "*** Rust compiler '$RUSTC' could not be found."
+-		echo >&2 "***"
+-	fi
++	echo >&2 "***"
++	echo >&2 "*** Rust compiler '$RUSTC' could not be found."
++	echo >&2 "***"
+ 	exit 1
+ fi
+ 
+ # Check that the Rust bindings generator exists.
+ if ! command -v "$BINDGEN" >/dev/null; then
+-	if [ "$1" = -v ]; then
+-		echo >&2 "***"
+-		echo >&2 "*** Rust bindings generator '$BINDGEN' could not be found."
+-		echo >&2 "***"
+-	fi
++	echo >&2 "***"
++	echo >&2 "*** Rust bindings generator '$BINDGEN' could not be found."
++	echo >&2 "***"
+ 	exit 1
+ fi
+ 
+@@ -53,16 +47,14 @@ rust_compiler_min_version=$($min_tool_version rustc)
+ rust_compiler_cversion=$(get_canonical_version $rust_compiler_version)
+ rust_compiler_min_cversion=$(get_canonical_version $rust_compiler_min_version)
+ if [ "$rust_compiler_cversion" -lt "$rust_compiler_min_cversion" ]; then
+-	if [ "$1" = -v ]; then
+-		echo >&2 "***"
+-		echo >&2 "*** Rust compiler '$RUSTC' is too old."
+-		echo >&2 "***   Your version:    $rust_compiler_version"
+-		echo >&2 "***   Minimum version: $rust_compiler_min_version"
+-		echo >&2 "***"
+-	fi
++	echo >&2 "***"
++	echo >&2 "*** Rust compiler '$RUSTC' is too old."
++	echo >&2 "***   Your version:    $rust_compiler_version"
++	echo >&2 "***   Minimum version: $rust_compiler_min_version"
++	echo >&2 "***"
+ 	exit 1
+ fi
+-if [ "$1" = -v ] && [ "$rust_compiler_cversion" -gt "$rust_compiler_min_cversion" ]; then
++if [ "$rust_compiler_cversion" -gt "$rust_compiler_min_cversion" ]; then
+ 	echo >&2 "***"
+ 	echo >&2 "*** Rust compiler '$RUSTC' is too new. This may or may not work."
+ 	echo >&2 "***   Your version:     $rust_compiler_version"
+@@ -82,16 +74,14 @@ rust_bindings_generator_min_version=$($min_tool_version bindgen)
+ rust_bindings_generator_cversion=$(get_canonical_version $rust_bindings_generator_version)
+ rust_bindings_generator_min_cversion=$(get_canonical_version $rust_bindings_generator_min_version)
+ if [ "$rust_bindings_generator_cversion" -lt "$rust_bindings_generator_min_cversion" ]; then
+-	if [ "$1" = -v ]; then
+-		echo >&2 "***"
+-		echo >&2 "*** Rust bindings generator '$BINDGEN' is too old."
+-		echo >&2 "***   Your version:    $rust_bindings_generator_version"
+-		echo >&2 "***   Minimum version: $rust_bindings_generator_min_version"
+-		echo >&2 "***"
+-	fi
++	echo >&2 "***"
++	echo >&2 "*** Rust bindings generator '$BINDGEN' is too old."
++	echo >&2 "***   Your version:    $rust_bindings_generator_version"
++	echo >&2 "***   Minimum version: $rust_bindings_generator_min_version"
++	echo >&2 "***"
+ 	exit 1
+ fi
+-if [ "$1" = -v ] && [ "$rust_bindings_generator_cversion" -gt "$rust_bindings_generator_min_cversion" ]; then
++if [ "$rust_bindings_generator_cversion" -gt "$rust_bindings_generator_min_cversion" ]; then
+ 	echo >&2 "***"
+ 	echo >&2 "*** Rust bindings generator '$BINDGEN' is too new. This may or may not work."
+ 	echo >&2 "***   Your version:     $rust_bindings_generator_version"
+@@ -100,23 +90,39 @@ if [ "$1" = -v ] && [ "$rust_bindings_generator_cversion" -gt "$rust_bindings_ge
+ fi
+ 
+ # Check that the `libclang` used by the Rust bindings generator is suitable.
++#
++# In order to do that, first invoke `bindgen` to get the `libclang` version
++# found by `bindgen`. This step may already fail if, for instance, `libclang`
++# is not found, thus inform the user in such a case.
++bindgen_libclang_output=$( \
++	LC_ALL=C "$BINDGEN" $(dirname $0)/rust_is_available_bindgen_libclang.h 2>&1 >/dev/null
++) || bindgen_libclang_code=$?
++if [ -n "$bindgen_libclang_code" ]; then
++	echo >&2 "***"
++	echo >&2 "*** Running '$BINDGEN' to check the libclang version (used by the Rust"
++	echo >&2 "*** bindings generator) failed with code $bindgen_libclang_code. This may be caused by"
++	echo >&2 "*** a failure to locate libclang. See output and docs below for details:"
++	echo >&2 "***"
++	echo >&2 "$bindgen_libclang_output"
++	echo >&2 "***"
++	exit 1
++fi
++
++# `bindgen` returned successfully, thus use the output to check that the version
++# of the `libclang` found by the Rust bindings generator is suitable.
+ bindgen_libclang_version=$( \
+-	LC_ALL=C "$BINDGEN" $(dirname $0)/rust_is_available_bindgen_libclang.h 2>&1 >/dev/null \
+-		| grep -F 'clang version ' \
+-		| grep -oE '[0-9]+\.[0-9]+\.[0-9]+' \
+-		| head -n 1 \
++	echo "$bindgen_libclang_output" \
++		| sed -nE 's:.*clang version ([0-9]+\.[0-9]+\.[0-9]+).*:\1:p'
+ )
+ bindgen_libclang_min_version=$($min_tool_version llvm)
+ bindgen_libclang_cversion=$(get_canonical_version $bindgen_libclang_version)
+ bindgen_libclang_min_cversion=$(get_canonical_version $bindgen_libclang_min_version)
+ if [ "$bindgen_libclang_cversion" -lt "$bindgen_libclang_min_cversion" ]; then
+-	if [ "$1" = -v ]; then
+-		echo >&2 "***"
+-		echo >&2 "*** libclang (used by the Rust bindings generator '$BINDGEN') is too old."
+-		echo >&2 "***   Your version:    $bindgen_libclang_version"
+-		echo >&2 "***   Minimum version: $bindgen_libclang_min_version"
+-		echo >&2 "***"
+-	fi
++	echo >&2 "***"
++	echo >&2 "*** libclang (used by the Rust bindings generator '$BINDGEN') is too old."
++	echo >&2 "***   Your version:    $bindgen_libclang_version"
++	echo >&2 "***   Minimum version: $bindgen_libclang_min_version"
++	echo >&2 "***"
+ 	exit 1
+ fi
+ 
+@@ -125,21 +131,19 @@ fi
+ #
+ # In the future, we might be able to perform a full version check, see
+ # https://github.com/rust-lang/rust-bindgen/issues/2138.
+-if [ "$1" = -v ]; then
+-	cc_name=$($(dirname $0)/cc-version.sh "$CC" | cut -f1 -d' ')
+-	if [ "$cc_name" = Clang ]; then
+-		clang_version=$( \
+-			LC_ALL=C "$CC" --version 2>/dev/null \
+-				| sed -nE '1s:.*version ([0-9]+\.[0-9]+\.[0-9]+).*:\1:p'
+-		)
+-		if [ "$clang_version" != "$bindgen_libclang_version" ]; then
+-			echo >&2 "***"
+-			echo >&2 "*** libclang (used by the Rust bindings generator '$BINDGEN')"
+-			echo >&2 "*** version does not match Clang's. This may be a problem."
+-			echo >&2 "***   libclang version: $bindgen_libclang_version"
+-			echo >&2 "***   Clang version:    $clang_version"
+-			echo >&2 "***"
+-		fi
++cc_name=$($(dirname $0)/cc-version.sh $CC | cut -f1 -d' ')
++if [ "$cc_name" = Clang ]; then
++	clang_version=$( \
++		LC_ALL=C $CC --version 2>/dev/null \
++			| sed -nE '1s:.*version ([0-9]+\.[0-9]+\.[0-9]+).*:\1:p'
++	)
++	if [ "$clang_version" != "$bindgen_libclang_version" ]; then
++		echo >&2 "***"
++		echo >&2 "*** libclang (used by the Rust bindings generator '$BINDGEN')"
++		echo >&2 "*** version does not match Clang's. This may be a problem."
++		echo >&2 "***   libclang version: $bindgen_libclang_version"
++		echo >&2 "***   Clang version:    $clang_version"
++		echo >&2 "***"
+ 	fi
+ fi
+ 
+@@ -150,11 +154,9 @@ rustc_sysroot=$("$RUSTC" $KRUSTFLAGS --print sysroot)
+ rustc_src=${RUST_LIB_SRC:-"$rustc_sysroot/lib/rustlib/src/rust/library"}
+ rustc_src_core="$rustc_src/core/src/lib.rs"
+ if [ ! -e "$rustc_src_core" ]; then
+-	if [ "$1" = -v ]; then
+-		echo >&2 "***"
+-		echo >&2 "*** Source code for the 'core' standard library could not be found"
+-		echo >&2 "*** at '$rustc_src_core'."
+-		echo >&2 "***"
+-	fi
++	echo >&2 "***"
++	echo >&2 "*** Source code for the 'core' standard library could not be found"
++	echo >&2 "*** at '$rustc_src_core'."
++	echo >&2 "***"
+ 	exit 1
+ fi
+diff --git a/security/integrity/ima/Kconfig b/security/integrity/ima/Kconfig
+index 60a511c6b583e..c17660bf5f347 100644
+--- a/security/integrity/ima/Kconfig
++++ b/security/integrity/ima/Kconfig
+@@ -248,18 +248,6 @@ config IMA_APPRAISE_MODSIG
+ 	   The modsig keyword can be used in the IMA policy to allow a hook
+ 	   to accept such signatures.
+ 
+-config IMA_TRUSTED_KEYRING
+-	bool "Require all keys on the .ima keyring be signed (deprecated)"
+-	depends on IMA_APPRAISE && SYSTEM_TRUSTED_KEYRING
+-	depends on INTEGRITY_ASYMMETRIC_KEYS
+-	select INTEGRITY_TRUSTED_KEYRING
+-	default y
+-	help
+-	   This option requires that all keys added to the .ima
+-	   keyring be signed by a key on the system trusted keyring.
+-
+-	   This option is deprecated in favor of INTEGRITY_TRUSTED_KEYRING
+-
+ config IMA_KEYRINGS_PERMIT_SIGNED_BY_BUILTIN_OR_SECONDARY
+ 	bool "Permit keys validly signed by a built-in or secondary CA cert (EXPERIMENTAL)"
+ 	depends on SYSTEM_TRUSTED_KEYRING
+diff --git a/security/security.c b/security/security.c
+index b720424ca37d9..549104a447e36 100644
+--- a/security/security.c
++++ b/security/security.c
+@@ -1138,6 +1138,20 @@ void security_bprm_committed_creds(struct linux_binprm *bprm)
+ 	call_void_hook(bprm_committed_creds, bprm);
+ }
+ 
++/**
++ * security_fs_context_submount() - Initialise fc->security
++ * @fc: new filesystem context
++ * @reference: dentry reference for submount/remount
++ *
++ * Fill out the ->security field for a new fs_context.
++ *
++ * Return: Returns 0 on success or negative error code on failure.
++ */
++int security_fs_context_submount(struct fs_context *fc, struct super_block *reference)
++{
++	return call_int_hook(fs_context_submount, 0, fc, reference);
++}
++
+ /**
+  * security_fs_context_dup() - Duplicate a fs_context LSM blob
+  * @fc: destination filesystem context
+diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
+index d06e350fedee5..afd6637440418 100644
+--- a/security/selinux/hooks.c
++++ b/security/selinux/hooks.c
+@@ -2745,6 +2745,27 @@ static int selinux_umount(struct vfsmount *mnt, int flags)
+ 				   FILESYSTEM__UNMOUNT, NULL);
+ }
+ 
++static int selinux_fs_context_submount(struct fs_context *fc,
++				   struct super_block *reference)
++{
++	const struct superblock_security_struct *sbsec;
++	struct selinux_mnt_opts *opts;
++
++	opts = kzalloc(sizeof(*opts), GFP_KERNEL);
++	if (!opts)
++		return -ENOMEM;
++
++	sbsec = selinux_superblock(reference);
++	if (sbsec->flags & FSCONTEXT_MNT)
++		opts->fscontext_sid = sbsec->sid;
++	if (sbsec->flags & CONTEXT_MNT)
++		opts->context_sid = sbsec->mntpoint_sid;
++	if (sbsec->flags & DEFCONTEXT_MNT)
++		opts->defcontext_sid = sbsec->def_sid;
++	fc->security = opts;
++	return 0;
++}
++
+ static int selinux_fs_context_dup(struct fs_context *fc,
+ 				  struct fs_context *src_fc)
+ {
+@@ -7182,6 +7203,7 @@ static struct security_hook_list selinux_hooks[] __ro_after_init = {
+ 	/*
+ 	 * PUT "CLONING" (ACCESSING + ALLOCATING) HOOKS HERE
+ 	 */
++	LSM_HOOK_INIT(fs_context_submount, selinux_fs_context_submount),
+ 	LSM_HOOK_INIT(fs_context_dup, selinux_fs_context_dup),
+ 	LSM_HOOK_INIT(fs_context_parse_param, selinux_fs_context_parse_param),
+ 	LSM_HOOK_INIT(sb_eat_lsm_opts, selinux_sb_eat_lsm_opts),
+diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
+index 6e270cf3fd30c..a8201cf22f20b 100644
+--- a/security/smack/smack_lsm.c
++++ b/security/smack/smack_lsm.c
+@@ -614,6 +614,56 @@ out_opt_err:
+ 	return -EINVAL;
+ }
+ 
++/**
++ * smack_fs_context_submount - Initialise security data for a filesystem context
++ * @fc: The filesystem context.
++ * @reference: reference superblock
++ *
++ * Returns 0 on success or -ENOMEM on error.
++ */
++static int smack_fs_context_submount(struct fs_context *fc,
++				 struct super_block *reference)
++{
++	struct superblock_smack *sbsp;
++	struct smack_mnt_opts *ctx;
++	struct inode_smack *isp;
++
++	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
++	if (!ctx)
++		return -ENOMEM;
++	fc->security = ctx;
++
++	sbsp = smack_superblock(reference);
++	isp = smack_inode(reference->s_root->d_inode);
++
++	if (sbsp->smk_default) {
++		ctx->fsdefault = kstrdup(sbsp->smk_default->smk_known, GFP_KERNEL);
++		if (!ctx->fsdefault)
++			return -ENOMEM;
++	}
++
++	if (sbsp->smk_floor) {
++		ctx->fsfloor = kstrdup(sbsp->smk_floor->smk_known, GFP_KERNEL);
++		if (!ctx->fsfloor)
++			return -ENOMEM;
++	}
++
++	if (sbsp->smk_hat) {
++		ctx->fshat = kstrdup(sbsp->smk_hat->smk_known, GFP_KERNEL);
++		if (!ctx->fshat)
++			return -ENOMEM;
++	}
++
++	if (isp->smk_flags & SMK_INODE_TRANSMUTE) {
++		if (sbsp->smk_root) {
++			ctx->fstransmute = kstrdup(sbsp->smk_root->smk_known, GFP_KERNEL);
++			if (!ctx->fstransmute)
++				return -ENOMEM;
++		}
++	}
++	return 0;
++}
++
+ /**
+  * smack_fs_context_dup - Duplicate the security data on fs_context duplication
+  * @fc: The new filesystem context.
+@@ -4876,6 +4926,7 @@ static struct security_hook_list smack_hooks[] __ro_after_init = {
+ 	LSM_HOOK_INIT(ptrace_traceme, smack_ptrace_traceme),
+ 	LSM_HOOK_INIT(syslog, smack_syslog),
+ 
++	LSM_HOOK_INIT(fs_context_submount, smack_fs_context_submount),
+ 	LSM_HOOK_INIT(fs_context_dup, smack_fs_context_dup),
+ 	LSM_HOOK_INIT(fs_context_parse_param, smack_fs_context_parse_param),
+ 
+diff --git a/security/smack/smackfs.c b/security/smack/smackfs.c
+index 5590eaad241bb..25f67d1b5c73e 100644
+--- a/security/smack/smackfs.c
++++ b/security/smack/smackfs.c
+@@ -896,7 +896,7 @@ static ssize_t smk_set_cipso(struct file *file, const char __user *buf,
+ 	}
+ 
+ 	ret = sscanf(rule, "%d", &catlen);
+-	if (ret != 1 || catlen > SMACK_CIPSO_MAXCATNUM)
++	if (ret != 1 || catlen < 0 || catlen > SMACK_CIPSO_MAXCATNUM)
+ 		goto out;
+ 
+ 	if (format == SMK_FIXED24_FMT &&
+diff --git a/sound/Kconfig b/sound/Kconfig
+index 0ddfb717b81dc..466e848689bd1 100644
+--- a/sound/Kconfig
++++ b/sound/Kconfig
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0-only
+ menuconfig SOUND
+ 	tristate "Sound card support"
+-	depends on HAS_IOMEM
++	depends on HAS_IOMEM || UML
+ 	help
+ 	  If you have a sound card in your computer, i.e. if it can say more
+ 	  than an occasional beep, say Y.
+diff --git a/sound/core/pcm_compat.c b/sound/core/pcm_compat.c
+index 42c2ada8e8887..c96483091f30a 100644
+--- a/sound/core/pcm_compat.c
++++ b/sound/core/pcm_compat.c
+@@ -253,10 +253,14 @@ static int snd_pcm_ioctl_hw_params_compat(struct snd_pcm_substream *substream,
+ 		goto error;
+ 	}
+ 
+-	if (refine)
++	if (refine) {
+ 		err = snd_pcm_hw_refine(substream, data);
+-	else
++		if (err < 0)
++			goto error;
++		err = fixup_unreferenced_params(substream, data);
++	} else {
+ 		err = snd_pcm_hw_params(substream, data);
++	}
+ 	if (err < 0)
+ 		goto error;
+ 	if (copy_to_user(data32, data, sizeof(*data32)) ||
+diff --git a/sound/core/seq/seq_memory.c b/sound/core/seq/seq_memory.c
+index 174585bf59d29..b603bb93f8960 100644
+--- a/sound/core/seq/seq_memory.c
++++ b/sound/core/seq/seq_memory.c
+@@ -187,8 +187,13 @@ int snd_seq_expand_var_event(const struct snd_seq_event *event, int count, char
+ 	err = expand_var_event(event, 0, len, buf, in_kernel);
+ 	if (err < 0)
+ 		return err;
+-	if (len != newlen)
+-		memset(buf + len, 0, newlen - len);
++	if (len != newlen) {
++		if (in_kernel)
++			memset(buf + len, 0, newlen - len);
++		else if (clear_user((__force void __user *)buf + len,
++				    newlen - len))
++			return -EFAULT;
++	}
+ 	return newlen;
+ }
+ EXPORT_SYMBOL(snd_seq_expand_var_event);
+diff --git a/sound/core/ump.c b/sound/core/ump.c
+index 246348766ec16..1e4e1e428b205 100644
+--- a/sound/core/ump.c
++++ b/sound/core/ump.c
+@@ -984,7 +984,7 @@ static int snd_ump_legacy_open(struct snd_rawmidi_substream *substream)
+ {
+ 	struct snd_ump_endpoint *ump = substream->rmidi->private_data;
+ 	int dir = substream->stream;
+-	int group = substream->number;
++	int group = ump->legacy_mapping[substream->number];
+ 	int err;
+ 
+ 	mutex_lock(&ump->open_mutex);
+@@ -1016,7 +1016,7 @@ static int snd_ump_legacy_close(struct snd_rawmidi_substream *substream)
+ {
+ 	struct snd_ump_endpoint *ump = substream->rmidi->private_data;
+ 	int dir = substream->stream;
+-	int group = substream->number;
++	int group = ump->legacy_mapping[substream->number];
+ 
+ 	mutex_lock(&ump->open_mutex);
+ 	spin_lock_irq(&ump->legacy_locks[dir]);
+@@ -1123,21 +1123,62 @@ static void process_legacy_input(struct snd_ump_endpoint *ump, const u32 *src,
+ 	spin_unlock_irqrestore(&ump->legacy_locks[dir], flags);
+ }
+ 
++/* Fill ump->legacy_mapping[] for groups to be used for legacy rawmidi */
++static int fill_legacy_mapping(struct snd_ump_endpoint *ump)
++{
++	struct snd_ump_block *fb;
++	unsigned int group_maps = 0;
++	int i, num;
++
++	if (ump->info.flags & SNDRV_UMP_EP_INFO_STATIC_BLOCKS) {
++		list_for_each_entry(fb, &ump->block_list, list) {
++			for (i = 0; i < fb->info.num_groups; i++)
++				group_maps |= 1U << (fb->info.first_group + i);
++		}
++		if (!group_maps)
++			ump_info(ump, "No UMP Group is found in FB\n");
++	}
++
++	/* use all groups for non-static case */
++	if (!group_maps)
++		group_maps = (1U << SNDRV_UMP_MAX_GROUPS) - 1;
++
++	num = 0;
++	for (i = 0; i < SNDRV_UMP_MAX_GROUPS; i++)
++		if (group_maps & (1U << i))
++			ump->legacy_mapping[num++] = i;
++
++	return num;
++}
++
++static void fill_substream_names(struct snd_ump_endpoint *ump,
++				 struct snd_rawmidi *rmidi, int dir)
++{
++	struct snd_rawmidi_substream *s;
++
++	list_for_each_entry(s, &rmidi->streams[dir].substreams, list)
++		snprintf(s->name, sizeof(s->name), "Group %d (%.16s)",
++			 ump->legacy_mapping[s->number] + 1, ump->info.name);
++}
++
+ int snd_ump_attach_legacy_rawmidi(struct snd_ump_endpoint *ump,
+ 				  char *id, int device)
+ {
+ 	struct snd_rawmidi *rmidi;
+ 	bool input, output;
+-	int err;
++	int err, num;
+ 
+-	ump->out_cvts = kcalloc(16, sizeof(*ump->out_cvts), GFP_KERNEL);
++	ump->out_cvts = kcalloc(SNDRV_UMP_MAX_GROUPS,
++				sizeof(*ump->out_cvts), GFP_KERNEL);
+ 	if (!ump->out_cvts)
+ 		return -ENOMEM;
+ 
++	num = fill_legacy_mapping(ump);
++
+ 	input = ump->core.info_flags & SNDRV_RAWMIDI_INFO_INPUT;
+ 	output = ump->core.info_flags & SNDRV_RAWMIDI_INFO_OUTPUT;
+ 	err = snd_rawmidi_new(ump->core.card, id, device,
+-			      output ? 16 : 0, input ? 16 : 0,
++			      output ? num : 0, input ? num : 0,
+ 			      &rmidi);
+ 	if (err < 0) {
+ 		kfree(ump->out_cvts);
+@@ -1150,10 +1191,17 @@ int snd_ump_attach_legacy_rawmidi(struct snd_ump_endpoint *ump,
+ 	if (output)
+ 		snd_rawmidi_set_ops(rmidi, SNDRV_RAWMIDI_STREAM_OUTPUT,
+ 				    &snd_ump_legacy_output_ops);
++	snprintf(rmidi->name, sizeof(rmidi->name), "%.68s (MIDI 1.0)",
++		 ump->info.name);
+ 	rmidi->info_flags = ump->core.info_flags & ~SNDRV_RAWMIDI_INFO_UMP;
+ 	rmidi->ops = &snd_ump_legacy_ops;
+ 	rmidi->private_data = ump;
+ 	ump->legacy_rmidi = rmidi;
++	if (input)
++		fill_substream_names(ump, rmidi, SNDRV_RAWMIDI_STREAM_INPUT);
++	if (output)
++		fill_substream_names(ump, rmidi, SNDRV_RAWMIDI_STREAM_OUTPUT);
++
+ 	ump_dbg(ump, "Created a legacy rawmidi #%d (%s)\n", device, id);
+ 	return 0;
+ }
+diff --git a/sound/pci/ac97/ac97_codec.c b/sound/pci/ac97/ac97_codec.c
+index 80a65b8ad7b9b..25f93e56cfc7a 100644
+--- a/sound/pci/ac97/ac97_codec.c
++++ b/sound/pci/ac97/ac97_codec.c
+@@ -2069,10 +2069,9 @@ int snd_ac97_mixer(struct snd_ac97_bus *bus, struct snd_ac97_template *template,
+ 		.dev_disconnect =	snd_ac97_dev_disconnect,
+ 	};
+ 
+-	if (!rac97)
+-		return -EINVAL;
+-	if (snd_BUG_ON(!bus || !template))
++	if (snd_BUG_ON(!bus || !template || !rac97))
+ 		return -EINVAL;
++	*rac97 = NULL;
+ 	if (snd_BUG_ON(template->num >= 4))
+ 		return -EINVAL;
+ 	if (bus->codec[template->num])
+diff --git a/sound/pci/hda/patch_cs8409.c b/sound/pci/hda/patch_cs8409.c
+index 0ba1fbcbb21e4..627899959ffe8 100644
+--- a/sound/pci/hda/patch_cs8409.c
++++ b/sound/pci/hda/patch_cs8409.c
+@@ -888,7 +888,7 @@ static void cs42l42_resume(struct sub_codec *cs42l42)
+ 
+ 	/* Initialize CS42L42 companion codec */
+ 	cs8409_i2c_bulk_write(cs42l42, cs42l42->init_seq, cs42l42->init_seq_num);
+-	usleep_range(30000, 35000);
++	msleep(CS42L42_INIT_TIMEOUT_MS);
+ 
+ 	/* Clear interrupts, by reading interrupt status registers */
+ 	cs8409_i2c_bulk_read(cs42l42, irq_regs, ARRAY_SIZE(irq_regs));
+diff --git a/sound/pci/hda/patch_cs8409.h b/sound/pci/hda/patch_cs8409.h
+index 2a8dfb4ff046b..937e9387abdc7 100644
+--- a/sound/pci/hda/patch_cs8409.h
++++ b/sound/pci/hda/patch_cs8409.h
+@@ -229,6 +229,7 @@ enum cs8409_coefficient_index_registers {
+ #define CS42L42_I2C_SLEEP_US			(2000)
+ #define CS42L42_PDN_TIMEOUT_US			(250000)
+ #define CS42L42_PDN_SLEEP_US			(2000)
++#define CS42L42_INIT_TIMEOUT_MS			(45)
+ #define CS42L42_FULL_SCALE_VOL_MASK		(2)
+ #define CS42L42_FULL_SCALE_VOL_0DB		(1)
+ #define CS42L42_FULL_SCALE_VOL_MINUS6DB		(0)
+diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig
+index c2de4ee721836..947473d2da7d2 100644
+--- a/sound/soc/codecs/Kconfig
++++ b/sound/soc/codecs/Kconfig
+@@ -1708,6 +1708,7 @@ config SND_SOC_STA529
+ config SND_SOC_STAC9766
+ 	tristate
+ 	depends on SND_SOC_AC97_BUS
++	select REGMAP_AC97
+ 
+ config SND_SOC_STI_SAS
+ 	tristate "codec Audio support for STI SAS codec"
+diff --git a/sound/soc/codecs/cs43130.h b/sound/soc/codecs/cs43130.h
+index 1dd8936743132..90e8895275e77 100644
+--- a/sound/soc/codecs/cs43130.h
++++ b/sound/soc/codecs/cs43130.h
+@@ -381,88 +381,88 @@ struct cs43130_clk_gen {
+ 
+ /* frm_size = 16 */
+ static const struct cs43130_clk_gen cs43130_16_clk_gen[] = {
+-	{ 22579200,	32000,		.v = { 441,	10, }, },
+-	{ 22579200,	44100,		.v = { 32,	1, }, },
+-	{ 22579200,	48000,		.v = { 147,	5, }, },
+-	{ 22579200,	88200,		.v = { 16,	1, }, },
+-	{ 22579200,	96000,		.v = { 147,	10, }, },
+-	{ 22579200,	176400,		.v = { 8,	1, }, },
+-	{ 22579200,	192000,		.v = { 147,	20, }, },
+-	{ 22579200,	352800,		.v = { 4,	1, }, },
+-	{ 22579200,	384000,		.v = { 147,	40, }, },
+-	{ 24576000,	32000,		.v = { 48,	1, }, },
+-	{ 24576000,	44100,		.v = { 5120,	147, }, },
+-	{ 24576000,	48000,		.v = { 32,	1, }, },
+-	{ 24576000,	88200,		.v = { 2560,	147, }, },
+-	{ 24576000,	96000,		.v = { 16,	1, }, },
+-	{ 24576000,	176400,		.v = { 1280,	147, }, },
+-	{ 24576000,	192000,		.v = { 8,	1, }, },
+-	{ 24576000,	352800,		.v = { 640,	147, }, },
+-	{ 24576000,	384000,		.v = { 4,	1, }, },
++	{ 22579200,	32000,		.v = { 10,	441, }, },
++	{ 22579200,	44100,		.v = { 1,	32, }, },
++	{ 22579200,	48000,		.v = { 5,	147, }, },
++	{ 22579200,	88200,		.v = { 1,	16, }, },
++	{ 22579200,	96000,		.v = { 10,	147, }, },
++	{ 22579200,	176400,		.v = { 1,	8, }, },
++	{ 22579200,	192000,		.v = { 20,	147, }, },
++	{ 22579200,	352800,		.v = { 1,	4, }, },
++	{ 22579200,	384000,		.v = { 40,	147, }, },
++	{ 24576000,	32000,		.v = { 1,	48, }, },
++	{ 24576000,	44100,		.v = { 147,	5120, }, },
++	{ 24576000,	48000,		.v = { 1,	32, }, },
++	{ 24576000,	88200,		.v = { 147,	2560, }, },
++	{ 24576000,	96000,		.v = { 1,	16, }, },
++	{ 24576000,	176400,		.v = { 147,	1280, }, },
++	{ 24576000,	192000,		.v = { 1,	8, }, },
++	{ 24576000,	352800,		.v = { 147,	640, }, },
++	{ 24576000,	384000,		.v = { 1,	4, }, },
+ };
+ 
+ /* frm_size = 32 */
+ static const struct cs43130_clk_gen cs43130_32_clk_gen[] = {
+-	{ 22579200,	32000,		.v = { 441,	20, }, },
+-	{ 22579200,	44100,		.v = { 16,	1, }, },
+-	{ 22579200,	48000,		.v = { 147,	10, }, },
+-	{ 22579200,	88200,		.v = { 8,	1, }, },
+-	{ 22579200,	96000,		.v = { 147,	20, }, },
+-	{ 22579200,	176400,		.v = { 4,	1, }, },
+-	{ 22579200,	192000,		.v = { 147,	40, }, },
+-	{ 22579200,	352800,		.v = { 2,	1, }, },
+-	{ 22579200,	384000,		.v = { 147,	80, }, },
+-	{ 24576000,	32000,		.v = { 24,	1, }, },
+-	{ 24576000,	44100,		.v = { 2560,	147, }, },
+-	{ 24576000,	48000,		.v = { 16,	1, }, },
+-	{ 24576000,	88200,		.v = { 1280,	147, }, },
+-	{ 24576000,	96000,		.v = { 8,	1, }, },
+-	{ 24576000,	176400,		.v = { 640,	147, }, },
+-	{ 24576000,	192000,		.v = { 4,	1, }, },
+-	{ 24576000,	352800,		.v = { 320,	147, }, },
+-	{ 24576000,	384000,		.v = { 2,	1, }, },
++	{ 22579200,	32000,		.v = { 20,	441, }, },
++	{ 22579200,	44100,		.v = { 1,	16, }, },
++	{ 22579200,	48000,		.v = { 10,	147, }, },
++	{ 22579200,	88200,		.v = { 1,	8, }, },
++	{ 22579200,	96000,		.v = { 20,	147, }, },
++	{ 22579200,	176400,		.v = { 1,	4, }, },
++	{ 22579200,	192000,		.v = { 40,	147, }, },
++	{ 22579200,	352800,		.v = { 1,	2, }, },
++	{ 22579200,	384000,		.v = { 80,	147, }, },
++	{ 24576000,	32000,		.v = { 1,	24, }, },
++	{ 24576000,	44100,		.v = { 147,	2560, }, },
++	{ 24576000,	48000,		.v = { 1,	16, }, },
++	{ 24576000,	88200,		.v = { 147,	1280, }, },
++	{ 24576000,	96000,		.v = { 1,	8, }, },
++	{ 24576000,	176400,		.v = { 147,	640, }, },
++	{ 24576000,	192000,		.v = { 1,	4, }, },
++	{ 24576000,	352800,		.v = { 147,	320, }, },
++	{ 24576000,	384000,		.v = { 1,	2, }, },
+ };
+ 
+ /* frm_size = 48 */
+ static const struct cs43130_clk_gen cs43130_48_clk_gen[] = {
+-	{ 22579200,	32000,		.v = { 147,	100, }, },
+-	{ 22579200,	44100,		.v = { 32,	3, }, },
+-	{ 22579200,	48000,		.v = { 49,	5, }, },
+-	{ 22579200,	88200,		.v = { 16,	3, }, },
+-	{ 22579200,	96000,		.v = { 49,	10, }, },
+-	{ 22579200,	176400,		.v = { 8,	3, }, },
+-	{ 22579200,	192000,		.v = { 49,	20, }, },
+-	{ 22579200,	352800,		.v = { 4,	3, }, },
+-	{ 22579200,	384000,		.v = { 49,	40, }, },
+-	{ 24576000,	32000,		.v = { 16,	1, }, },
+-	{ 24576000,	44100,		.v = { 5120,	441, }, },
+-	{ 24576000,	48000,		.v = { 32,	3, }, },
+-	{ 24576000,	88200,		.v = { 2560,	441, }, },
+-	{ 24576000,	96000,		.v = { 16,	3, }, },
+-	{ 24576000,	176400,		.v = { 1280,	441, }, },
+-	{ 24576000,	192000,		.v = { 8,	3, }, },
+-	{ 24576000,	352800,		.v = { 640,	441, }, },
+-	{ 24576000,	384000,		.v = { 4,	3, }, },
++	{ 22579200,	32000,		.v = { 100,	147, }, },
++	{ 22579200,	44100,		.v = { 3,	32, }, },
++	{ 22579200,	48000,		.v = { 5,	49, }, },
++	{ 22579200,	88200,		.v = { 3,	16, }, },
++	{ 22579200,	96000,		.v = { 10,	49, }, },
++	{ 22579200,	176400,		.v = { 3,	8, }, },
++	{ 22579200,	192000,		.v = { 20,	49, }, },
++	{ 22579200,	352800,		.v = { 3,	4, }, },
++	{ 22579200,	384000,		.v = { 40,	49, }, },
++	{ 24576000,	32000,		.v = { 1,	16, }, },
++	{ 24576000,	44100,		.v = { 441,	5120, }, },
++	{ 24576000,	48000,		.v = { 3,	32, }, },
++	{ 24576000,	88200,		.v = { 441,	2560, }, },
++	{ 24576000,	96000,		.v = { 3,	16, }, },
++	{ 24576000,	176400,		.v = { 441,	1280, }, },
++	{ 24576000,	192000,		.v = { 3,	8, }, },
++	{ 24576000,	352800,		.v = { 441,	640, }, },
++	{ 24576000,	384000,		.v = { 3,	4, }, },
+ };
+ 
+ /* frm_size = 64 */
+ static const struct cs43130_clk_gen cs43130_64_clk_gen[] = {
+-	{ 22579200,	32000,		.v = { 441,	40, }, },
+-	{ 22579200,	44100,		.v = { 8,	1, }, },
+-	{ 22579200,	48000,		.v = { 147,	20, }, },
+-	{ 22579200,	88200,		.v = { 4,	1, }, },
+-	{ 22579200,	96000,		.v = { 147,	40, }, },
+-	{ 22579200,	176400,		.v = { 2,	1, }, },
+-	{ 22579200,	192000,		.v = { 147,	80, }, },
++	{ 22579200,	32000,		.v = { 40,	441, }, },
++	{ 22579200,	44100,		.v = { 1,	8, }, },
++	{ 22579200,	48000,		.v = { 20,	147, }, },
++	{ 22579200,	88200,		.v = { 1,	4, }, },
++	{ 22579200,	96000,		.v = { 40,	147, }, },
++	{ 22579200,	176400,		.v = { 1,	2, }, },
++	{ 22579200,	192000,		.v = { 80,	147, }, },
+ 	{ 22579200,	352800,		.v = { 1,	1, }, },
+-	{ 24576000,	32000,		.v = { 12,	1, }, },
+-	{ 24576000,	44100,		.v = { 1280,	147, }, },
+-	{ 24576000,	48000,		.v = { 8,	1, }, },
+-	{ 24576000,	88200,		.v = { 640,	147, }, },
+-	{ 24576000,	96000,		.v = { 4,	1, }, },
+-	{ 24576000,	176400,		.v = { 320,	147, }, },
+-	{ 24576000,	192000,		.v = { 2,	1, }, },
+-	{ 24576000,	352800,		.v = { 160,	147, }, },
++	{ 24576000,	32000,		.v = { 1,	12, }, },
++	{ 24576000,	44100,		.v = { 147,	1280, }, },
++	{ 24576000,	48000,		.v = { 1,	8, }, },
++	{ 24576000,	88200,		.v = { 147,	640, }, },
++	{ 24576000,	96000,		.v = { 1,	4, }, },
++	{ 24576000,	176400,		.v = { 147,	320, }, },
++	{ 24576000,	192000,		.v = { 1,	2, }, },
++	{ 24576000,	352800,		.v = { 147,	160, }, },
+ 	{ 24576000,	384000,		.v = { 1,	1, }, },
+ };
+ 
+diff --git a/sound/soc/fsl/fsl_qmc_audio.c b/sound/soc/fsl/fsl_qmc_audio.c
+index 7cbb8e4758ccc..56d6b0b039a2e 100644
+--- a/sound/soc/fsl/fsl_qmc_audio.c
++++ b/sound/soc/fsl/fsl_qmc_audio.c
+@@ -372,8 +372,8 @@ static int qmc_dai_hw_rule_format_by_channels(struct qmc_dai *qmc_dai,
+ 	struct snd_mask *f_old = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT);
+ 	unsigned int channels = params_channels(params);
+ 	unsigned int slot_width;
++	snd_pcm_format_t format;
+ 	struct snd_mask f_new;
+-	unsigned int i;
+ 
+ 	if (!channels || channels > nb_ts) {
+ 		dev_err(qmc_dai->dev, "channels %u not supported\n",
+@@ -384,10 +384,10 @@ static int qmc_dai_hw_rule_format_by_channels(struct qmc_dai *qmc_dai,
+ 	slot_width = (nb_ts / channels) * 8;
+ 
+ 	snd_mask_none(&f_new);
+-	for (i = 0; i <= SNDRV_PCM_FORMAT_LAST; i++) {
+-		if (snd_mask_test(f_old, i)) {
+-			if (snd_pcm_format_physical_width(i) <= slot_width)
+-				snd_mask_set(&f_new, i);
++	pcm_for_each_format(format) {
++		if (snd_mask_test_format(f_old, format)) {
++			if (snd_pcm_format_physical_width(format) <= slot_width)
++				snd_mask_set_format(&f_new, format);
+ 		}
+ 	}
+ 
+@@ -551,26 +551,26 @@ static const struct snd_soc_dai_ops qmc_dai_ops = {
+ 
+ static u64 qmc_audio_formats(u8 nb_ts)
+ {
+-	u64 formats;
+-	unsigned int chan_width;
+ 	unsigned int format_width;
+-	int i;
++	unsigned int chan_width;
++	snd_pcm_format_t format;
++	u64 formats_mask;
+ 
+ 	if (!nb_ts)
+ 		return 0;
+ 
+-	formats = 0;
++	formats_mask = 0;
+ 	chan_width = nb_ts * 8;
+-	for (i = 0; i <= SNDRV_PCM_FORMAT_LAST; i++) {
++	pcm_for_each_format(format) {
+ 		/*
+ 		 * Support format other than little-endian (ie big-endian or
+ 		 * without endianness such as 8bit formats)
+ 		 */
+-		if (snd_pcm_format_little_endian(i) == 1)
++		if (snd_pcm_format_little_endian(format) == 1)
+ 			continue;
+ 
+ 		/* Support physical width multiple of 8bit */
+-		format_width = snd_pcm_format_physical_width(i);
++		format_width = snd_pcm_format_physical_width(format);
+ 		if (format_width == 0 || format_width % 8)
+ 			continue;
+ 
+@@ -581,9 +581,9 @@ static u64 qmc_audio_formats(u8 nb_ts)
+ 		if (format_width > chan_width || chan_width % format_width)
+ 			continue;
+ 
+-		formats |= (1ULL << i);
++		formats_mask |= pcm_format_to_bits(format);
+ 	}
+-	return formats;
++	return formats_mask;
+ }
+ 
+ static int qmc_audio_dai_parse(struct qmc_audio *qmc_audio, struct device_node *np,
+diff --git a/sound/soc/loongson/loongson_card.c b/sound/soc/loongson/loongson_card.c
+index 9ded163297477..406ee8db1a3c5 100644
+--- a/sound/soc/loongson/loongson_card.c
++++ b/sound/soc/loongson/loongson_card.c
+@@ -208,7 +208,7 @@ static struct platform_driver loongson_audio_driver = {
+ 	.driver = {
+ 		.name = "loongson-asoc-card",
+ 		.pm = &snd_soc_pm_ops,
+-		.of_match_table = of_match_ptr(loongson_asoc_dt_ids),
++		.of_match_table = loongson_asoc_dt_ids,
+ 	},
+ };
+ module_platform_driver(loongson_audio_driver);
+diff --git a/sound/soc/soc-compress.c b/sound/soc/soc-compress.c
+index 02fdb683f75f3..b58921e7921f8 100644
+--- a/sound/soc/soc-compress.c
++++ b/sound/soc/soc-compress.c
+@@ -193,6 +193,7 @@ open_err:
+ 	snd_soc_dai_compr_shutdown(cpu_dai, cstream, 1);
+ out:
+ 	dpcm_path_put(&list);
++	snd_soc_dpcm_mutex_unlock(fe);
+ be_err:
+ 	fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_NO;
+ 	snd_soc_card_mutex_unlock(fe->card);
+diff --git a/sound/soc/sof/amd/acp.c b/sound/soc/sof/amd/acp.c
+index afb505461ea17..83cf08c4cf5f6 100644
+--- a/sound/soc/sof/amd/acp.c
++++ b/sound/soc/sof/amd/acp.c
+@@ -355,9 +355,9 @@ static irqreturn_t acp_irq_handler(int irq, void *dev_id)
+ 	unsigned int val;
+ 
+ 	val = snd_sof_dsp_read(sdev, ACP_DSP_BAR, base + DSP_SW_INTR_STAT_OFFSET);
+-	if (val) {
+-		val |= ACP_DSP_TO_HOST_IRQ;
+-		snd_sof_dsp_write(sdev, ACP_DSP_BAR, base + DSP_SW_INTR_STAT_OFFSET, val);
++	if (val & ACP_DSP_TO_HOST_IRQ) {
++		snd_sof_dsp_write(sdev, ACP_DSP_BAR, base + DSP_SW_INTR_STAT_OFFSET,
++				  ACP_DSP_TO_HOST_IRQ);
+ 		return IRQ_WAKE_THREAD;
+ 	}
+ 
+diff --git a/sound/soc/sof/intel/hda-mlink.c b/sound/soc/sof/intel/hda-mlink.c
+index b7cbf66badf5b..df87b3791c23e 100644
+--- a/sound/soc/sof/intel/hda-mlink.c
++++ b/sound/soc/sof/intel/hda-mlink.c
+@@ -331,14 +331,14 @@ static bool hdaml_link_check_cmdsync(u32 __iomem *lsync, u32 cmdsync_mask)
+ 	return !!(val & cmdsync_mask);
+ }
+ 
+-static void hdaml_link_set_lsdiid(u32 __iomem *lsdiid, int dev_num)
++static void hdaml_link_set_lsdiid(u16 __iomem *lsdiid, int dev_num)
+ {
+-	u32 val;
++	u16 val;
+ 
+-	val = readl(lsdiid);
++	val = readw(lsdiid);
+ 	val |= BIT(dev_num);
+ 
+-	writel(val, lsdiid);
++	writew(val, lsdiid);
+ }
+ 
+ static void hdaml_shim_map_stream_ch(u16 __iomem *pcmsycm, int lchan, int hchan,
+@@ -781,6 +781,8 @@ int hdac_bus_eml_sdw_map_stream_ch(struct hdac_bus *bus, int sublink, int y,
+ {
+ 	struct hdac_ext2_link *h2link;
+ 	u16 __iomem *pcmsycm;
++	int hchan;
++	int lchan;
+ 	u16 val;
+ 
+ 	h2link = find_ext2_link(bus, true, AZX_REG_ML_LEPTR_ID_SDW);
+@@ -791,9 +793,17 @@ int hdac_bus_eml_sdw_map_stream_ch(struct hdac_bus *bus, int sublink, int y,
+ 		h2link->instance_offset * sublink +
+ 		AZX_REG_SDW_SHIM_PCMSyCM(y);
+ 
++	if (channel_mask) {
++		hchan = __fls(channel_mask);
++		lchan = __ffs(channel_mask);
++	} else {
++		hchan = 0;
++		lchan = 0;
++	}
++
+ 	mutex_lock(&h2link->eml_lock);
+ 
+-	hdaml_shim_map_stream_ch(pcmsycm, 0, hweight32(channel_mask),
++	hdaml_shim_map_stream_ch(pcmsycm, lchan, hchan,
+ 				 stream_id, dir);
+ 
+ 	mutex_unlock(&h2link->eml_lock);
+diff --git a/sound/usb/midi2.c b/sound/usb/midi2.c
+index ee28357414795..1ec177fe284ed 100644
+--- a/sound/usb/midi2.c
++++ b/sound/usb/midi2.c
+@@ -265,7 +265,7 @@ static void free_midi_urbs(struct snd_usb_midi2_endpoint *ep)
+ 
+ 	if (!ep)
+ 		return;
+-	for (i = 0; i < ep->num_urbs; ++i) {
++	for (i = 0; i < NUM_URBS; ++i) {
+ 		ctx = &ep->urbs[i];
+ 		if (!ctx->urb)
+ 			break;
+@@ -279,6 +279,7 @@ static void free_midi_urbs(struct snd_usb_midi2_endpoint *ep)
+ }
+ 
+ /* allocate URBs for an EP */
++/* the callers should handle allocation errors via free_midi_urbs() */
+ static int alloc_midi_urbs(struct snd_usb_midi2_endpoint *ep)
+ {
+ 	struct snd_usb_midi2_urb *ctx;
+@@ -351,8 +352,10 @@ static int snd_usb_midi_v2_open(struct snd_ump_endpoint *ump, int dir)
+ 		return -EIO;
+ 	if (ep->direction == STR_OUT) {
+ 		err = alloc_midi_urbs(ep);
+-		if (err)
++		if (err) {
++			free_midi_urbs(ep);
+ 			return err;
++		}
+ 	}
+ 	return 0;
+ }
+@@ -990,7 +993,7 @@ static int parse_midi_2_0(struct snd_usb_midi2_interface *umidi)
+ 		}
+ 	}
+ 
+-	return attach_legacy_rawmidi(umidi);
++	return 0;
+ }
+ 
+ /* is the given interface for MIDI 2.0? */
+@@ -1059,12 +1062,6 @@ static void set_fallback_rawmidi_names(struct snd_usb_midi2_interface *umidi)
+ 			usb_string(dev, dev->descriptor.iSerialNumber,
+ 				   ump->info.product_id,
+ 				   sizeof(ump->info.product_id));
+-#if IS_ENABLED(CONFIG_SND_UMP_LEGACY_RAWMIDI)
+-		if (ump->legacy_rmidi && !*ump->legacy_rmidi->name)
+-			snprintf(ump->legacy_rmidi->name,
+-				 sizeof(ump->legacy_rmidi->name),
+-				 "%s (MIDI 1.0)", ump->info.name);
+-#endif
+ 	}
+ }
+ 
+@@ -1157,6 +1154,13 @@ int snd_usb_midi_v2_create(struct snd_usb_audio *chip,
+ 	}
+ 
+ 	set_fallback_rawmidi_names(umidi);
++
++	err = attach_legacy_rawmidi(umidi);
++	if (err < 0) {
++		usb_audio_err(chip, "Failed to create legacy rawmidi\n");
++		goto error;
++	}
++
+ 	return 0;
+ 
+  error:
+diff --git a/tools/bpf/bpftool/skeleton/pid_iter.bpf.c b/tools/bpf/bpftool/skeleton/pid_iter.bpf.c
+index eb05ea53afb12..26004f0c5a6ae 100644
+--- a/tools/bpf/bpftool/skeleton/pid_iter.bpf.c
++++ b/tools/bpf/bpftool/skeleton/pid_iter.bpf.c
+@@ -15,6 +15,19 @@ enum bpf_obj_type {
+ 	BPF_OBJ_BTF,
+ };
+ 
++struct bpf_perf_link___local {
++	struct bpf_link link;
++	struct file *perf_file;
++} __attribute__((preserve_access_index));
++
++struct perf_event___local {
++	u64 bpf_cookie;
++} __attribute__((preserve_access_index));
++
++enum bpf_link_type___local {
++	BPF_LINK_TYPE_PERF_EVENT___local = 7,
++};
++
+ extern const void bpf_link_fops __ksym;
+ extern const void bpf_map_fops __ksym;
+ extern const void bpf_prog_fops __ksym;
+@@ -41,10 +54,10 @@ static __always_inline __u32 get_obj_id(void *ent, enum bpf_obj_type type)
+ /* could be used only with BPF_LINK_TYPE_PERF_EVENT links */
+ static __u64 get_bpf_cookie(struct bpf_link *link)
+ {
+-	struct bpf_perf_link *perf_link;
+-	struct perf_event *event;
++	struct bpf_perf_link___local *perf_link;
++	struct perf_event___local *event;
+ 
+-	perf_link = container_of(link, struct bpf_perf_link, link);
++	perf_link = container_of(link, struct bpf_perf_link___local, link);
+ 	event = BPF_CORE_READ(perf_link, perf_file, private_data);
+ 	return BPF_CORE_READ(event, bpf_cookie);
+ }
+@@ -84,10 +97,13 @@ int iter(struct bpf_iter__task_file *ctx)
+ 	e.pid = task->tgid;
+ 	e.id = get_obj_id(file->private_data, obj_type);
+ 
+-	if (obj_type == BPF_OBJ_LINK) {
++	if (obj_type == BPF_OBJ_LINK &&
++	    bpf_core_enum_value_exists(enum bpf_link_type___local,
++				       BPF_LINK_TYPE_PERF_EVENT___local)) {
+ 		struct bpf_link *link = (struct bpf_link *) file->private_data;
+ 
+-		if (BPF_CORE_READ(link, type) == BPF_LINK_TYPE_PERF_EVENT) {
++		if (link->type == bpf_core_enum_value(enum bpf_link_type___local,
++						      BPF_LINK_TYPE_PERF_EVENT___local)) {
+ 			e.has_bpf_cookie = true;
+ 			e.bpf_cookie = get_bpf_cookie(link);
+ 		}
+diff --git a/tools/bpf/bpftool/skeleton/profiler.bpf.c b/tools/bpf/bpftool/skeleton/profiler.bpf.c
+index ce5b65e07ab10..2f80edc682f11 100644
+--- a/tools/bpf/bpftool/skeleton/profiler.bpf.c
++++ b/tools/bpf/bpftool/skeleton/profiler.bpf.c
+@@ -4,6 +4,12 @@
+ #include <bpf/bpf_helpers.h>
+ #include <bpf/bpf_tracing.h>
+ 
++struct bpf_perf_event_value___local {
++	__u64 counter;
++	__u64 enabled;
++	__u64 running;
++} __attribute__((preserve_access_index));
++
+ /* map of perf event fds, num_cpu * num_metric entries */
+ struct {
+ 	__uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
+@@ -15,14 +21,14 @@ struct {
+ struct {
+ 	__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
+ 	__uint(key_size, sizeof(u32));
+-	__uint(value_size, sizeof(struct bpf_perf_event_value));
++	__uint(value_size, sizeof(struct bpf_perf_event_value___local));
+ } fentry_readings SEC(".maps");
+ 
+ /* accumulated readings */
+ struct {
+ 	__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
+ 	__uint(key_size, sizeof(u32));
+-	__uint(value_size, sizeof(struct bpf_perf_event_value));
++	__uint(value_size, sizeof(struct bpf_perf_event_value___local));
+ } accum_readings SEC(".maps");
+ 
+ /* sample counts, one per cpu */
+@@ -39,7 +45,7 @@ const volatile __u32 num_metric = 1;
+ SEC("fentry/XXX")
+ int BPF_PROG(fentry_XXX)
+ {
+-	struct bpf_perf_event_value *ptrs[MAX_NUM_MATRICS];
++	struct bpf_perf_event_value___local *ptrs[MAX_NUM_MATRICS];
+ 	u32 key = bpf_get_smp_processor_id();
+ 	u32 i;
+ 
+@@ -53,10 +59,10 @@ int BPF_PROG(fentry_XXX)
+ 	}
+ 
+ 	for (i = 0; i < num_metric && i < MAX_NUM_MATRICS; i++) {
+-		struct bpf_perf_event_value reading;
++		struct bpf_perf_event_value___local reading;
+ 		int err;
+ 
+-		err = bpf_perf_event_read_value(&events, key, &reading,
++		err = bpf_perf_event_read_value(&events, key, (void *)&reading,
+ 						sizeof(reading));
+ 		if (err)
+ 			return 0;
+@@ -68,14 +74,14 @@ int BPF_PROG(fentry_XXX)
+ }
+ 
+ static inline void
+-fexit_update_maps(u32 id, struct bpf_perf_event_value *after)
++fexit_update_maps(u32 id, struct bpf_perf_event_value___local *after)
+ {
+-	struct bpf_perf_event_value *before, diff;
++	struct bpf_perf_event_value___local *before, diff;
+ 
+ 	before = bpf_map_lookup_elem(&fentry_readings, &id);
+ 	/* only account samples with a valid fentry_reading */
+ 	if (before && before->counter) {
+-		struct bpf_perf_event_value *accum;
++		struct bpf_perf_event_value___local *accum;
+ 
+ 		diff.counter = after->counter - before->counter;
+ 		diff.enabled = after->enabled - before->enabled;
+@@ -93,7 +99,7 @@ fexit_update_maps(u32 id, struct bpf_perf_event_value *after)
+ SEC("fexit/XXX")
+ int BPF_PROG(fexit_XXX)
+ {
+-	struct bpf_perf_event_value readings[MAX_NUM_MATRICS];
++	struct bpf_perf_event_value___local readings[MAX_NUM_MATRICS];
+ 	u32 cpu = bpf_get_smp_processor_id();
+ 	u32 i, zero = 0;
+ 	int err;
+@@ -102,7 +108,8 @@ int BPF_PROG(fexit_XXX)
+ 	/* read all events before updating the maps, to reduce error */
+ 	for (i = 0; i < num_metric && i < MAX_NUM_MATRICS; i++) {
+ 		err = bpf_perf_event_read_value(&events, cpu + i * num_cpu,
+-						readings + i, sizeof(*readings));
++						(void *)(readings + i),
++						sizeof(*readings));
+ 		if (err)
+ 			return 0;
+ 	}
+diff --git a/tools/include/nolibc/arch-aarch64.h b/tools/include/nolibc/arch-aarch64.h
+index 11f294a406b7c..b8c7b14c4ca85 100644
+--- a/tools/include/nolibc/arch-aarch64.h
++++ b/tools/include/nolibc/arch-aarch64.h
+@@ -175,7 +175,7 @@ char **environ __attribute__((weak));
+ const unsigned long *_auxv __attribute__((weak));
+ 
+ /* startup code */
+-void __attribute__((weak,noreturn,optimize("omit-frame-pointer"))) __no_stack_protector _start(void)
++void __attribute__((weak, noreturn, optimize("Os", "omit-frame-pointer"))) __no_stack_protector _start(void)
+ {
+ 	__asm__ volatile (
+ #ifdef _NOLIBC_STACKPROTECTOR
+diff --git a/tools/include/nolibc/arch-arm.h b/tools/include/nolibc/arch-arm.h
+index ca4c669874973..bd8bf2ebd43bf 100644
+--- a/tools/include/nolibc/arch-arm.h
++++ b/tools/include/nolibc/arch-arm.h
+@@ -225,7 +225,7 @@ char **environ __attribute__((weak));
+ const unsigned long *_auxv __attribute__((weak));
+ 
+ /* startup code */
+-void __attribute__((weak,noreturn,optimize("omit-frame-pointer"))) __no_stack_protector _start(void)
++void __attribute__((weak, noreturn, optimize("Os", "omit-frame-pointer"))) __no_stack_protector _start(void)
+ {
+ 	__asm__ volatile (
+ #ifdef _NOLIBC_STACKPROTECTOR
+diff --git a/tools/include/nolibc/arch-i386.h b/tools/include/nolibc/arch-i386.h
+index 3d672d925e9e2..1a86f86eab5c5 100644
+--- a/tools/include/nolibc/arch-i386.h
++++ b/tools/include/nolibc/arch-i386.h
+@@ -190,7 +190,7 @@ const unsigned long *_auxv __attribute__((weak));
+  * 2) The deepest stack frame should be set to zero
+  *
+  */
+-void __attribute__((weak,noreturn,optimize("omit-frame-pointer"))) __no_stack_protector _start(void)
++void __attribute__((weak, noreturn, optimize("Os", "omit-frame-pointer"))) __no_stack_protector _start(void)
+ {
+ 	__asm__ volatile (
+ #ifdef _NOLIBC_STACKPROTECTOR
+diff --git a/tools/include/nolibc/arch-loongarch.h b/tools/include/nolibc/arch-loongarch.h
+index ad3f266e70930..b0279b9411785 100644
+--- a/tools/include/nolibc/arch-loongarch.h
++++ b/tools/include/nolibc/arch-loongarch.h
+@@ -172,7 +172,7 @@ const unsigned long *_auxv __attribute__((weak));
+ #endif
+ 
+ /* startup code */
+-void __attribute__((weak,noreturn,optimize("omit-frame-pointer"))) __no_stack_protector _start(void)
++void __attribute__((weak, noreturn, optimize("Os", "omit-frame-pointer"))) __no_stack_protector _start(void)
+ {
+ 	__asm__ volatile (
+ #ifdef _NOLIBC_STACKPROTECTOR
+diff --git a/tools/include/nolibc/arch-mips.h b/tools/include/nolibc/arch-mips.h
+index db24e0837a39b..67c5d79971107 100644
+--- a/tools/include/nolibc/arch-mips.h
++++ b/tools/include/nolibc/arch-mips.h
+@@ -182,7 +182,7 @@ char **environ __attribute__((weak));
+ const unsigned long *_auxv __attribute__((weak));
+ 
+ /* startup code, note that it's called __start on MIPS */
+-void __attribute__((weak,noreturn,optimize("omit-frame-pointer"))) __no_stack_protector __start(void)
++void __attribute__((weak, noreturn, optimize("Os", "omit-frame-pointer"))) __no_stack_protector __start(void)
+ {
+ 	__asm__ volatile (
+ 		/*".set nomips16\n"*/
+diff --git a/tools/include/nolibc/arch-riscv.h b/tools/include/nolibc/arch-riscv.h
+index a2e8564e66d6a..cefefc2e93f18 100644
+--- a/tools/include/nolibc/arch-riscv.h
++++ b/tools/include/nolibc/arch-riscv.h
+@@ -180,7 +180,7 @@ char **environ __attribute__((weak));
+ const unsigned long *_auxv __attribute__((weak));
+ 
+ /* startup code */
+-void __attribute__((weak,noreturn,optimize("omit-frame-pointer"))) __no_stack_protector _start(void)
++void __attribute__((weak, noreturn, optimize("Os", "omit-frame-pointer"))) __no_stack_protector _start(void)
+ {
+ 	__asm__ volatile (
+ 		".option push\n"
+diff --git a/tools/include/nolibc/arch-s390.h b/tools/include/nolibc/arch-s390.h
+index 516dff5bff8bc..ed2c33b2de68b 100644
+--- a/tools/include/nolibc/arch-s390.h
++++ b/tools/include/nolibc/arch-s390.h
+@@ -166,7 +166,7 @@ char **environ __attribute__((weak));
+ const unsigned long *_auxv __attribute__((weak));
+ 
+ /* startup code */
+-void __attribute__((weak,noreturn,optimize("omit-frame-pointer"))) __no_stack_protector _start(void)
++void __attribute__((weak, noreturn, optimize("Os", "omit-frame-pointer"))) __no_stack_protector _start(void)
+ {
+ 	__asm__ volatile (
+ 		"lg	%r2,0(%r15)\n"		/* argument count */
+diff --git a/tools/include/nolibc/arch-x86_64.h b/tools/include/nolibc/arch-x86_64.h
+index 6fc4d83927429..1bbd95f652330 100644
+--- a/tools/include/nolibc/arch-x86_64.h
++++ b/tools/include/nolibc/arch-x86_64.h
+@@ -190,7 +190,7 @@ const unsigned long *_auxv __attribute__((weak));
+  * 2) The deepest stack frame should be zero (the %rbp).
+  *
+  */
+-void __attribute__((weak,noreturn,optimize("omit-frame-pointer"))) __no_stack_protector _start(void)
++void __attribute__((weak, noreturn, optimize("Os", "omit-frame-pointer"))) __no_stack_protector _start(void)
+ {
+ 	__asm__ volatile (
+ #ifdef _NOLIBC_STACKPROTECTOR
+diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
+index 214f828ece6bf..e07dff7eba600 100644
+--- a/tools/lib/bpf/libbpf.c
++++ b/tools/lib/bpf/libbpf.c
+@@ -1975,9 +1975,9 @@ static int bpf_object__read_kconfig_file(struct bpf_object *obj, void *data)
+ 		return -ENAMETOOLONG;
+ 
+ 	/* gzopen also accepts uncompressed files. */
+-	file = gzopen(buf, "r");
++	file = gzopen(buf, "re");
+ 	if (!file)
+-		file = gzopen("/proc/config.gz", "r");
++		file = gzopen("/proc/config.gz", "re");
+ 
+ 	if (!file) {
+ 		pr_warn("failed to open system Kconfig\n");
+@@ -6157,7 +6157,11 @@ static int append_subprog_relos(struct bpf_program *main_prog, struct bpf_progra
+ 	if (main_prog == subprog)
+ 		return 0;
+ 	relos = libbpf_reallocarray(main_prog->reloc_desc, new_cnt, sizeof(*relos));
+-	if (!relos)
++	/* if new count is zero, reallocarray can return a valid NULL result;
++	 * in this case the previous pointer will be freed, so we *have to*
++	 * reassign old pointer to the new value (even if it's NULL)
++	 */
++	if (!relos && new_cnt)
+ 		return -ENOMEM;
+ 	if (subprog->nr_reloc)
+ 		memcpy(relos + main_prog->nr_reloc, subprog->reloc_desc,
+@@ -8528,7 +8532,8 @@ int bpf_program__set_insns(struct bpf_program *prog,
+ 		return -EBUSY;
+ 
+ 	insns = libbpf_reallocarray(prog->insns, new_insn_cnt, sizeof(*insns));
+-	if (!insns) {
++	/* NULL is a valid return from reallocarray if the new count is zero */
++	if (!insns && new_insn_cnt) {
+ 		pr_warn("prog '%s': failed to realloc prog code\n", prog->name);
+ 		return -ENOMEM;
+ 	}
+@@ -8558,13 +8563,31 @@ enum bpf_prog_type bpf_program__type(const struct bpf_program *prog)
+ 	return prog->type;
+ }
+ 
++static size_t custom_sec_def_cnt;
++static struct bpf_sec_def *custom_sec_defs;
++static struct bpf_sec_def custom_fallback_def;
++static bool has_custom_fallback_def;
++static int last_custom_sec_def_handler_id;
++
+ int bpf_program__set_type(struct bpf_program *prog, enum bpf_prog_type type)
+ {
+ 	if (prog->obj->loaded)
+ 		return libbpf_err(-EBUSY);
+ 
++	/* if type is not changed, do nothing */
++	if (prog->type == type)
++		return 0;
++
+ 	prog->type = type;
+-	prog->sec_def = NULL;
++
++	/* If a program type was changed, we need to reset associated SEC()
++	 * handler, as it will be invalid now. The only exception is a generic
++	 * fallback handler, which by definition is program type-agnostic and
++	 * is a catch-all custom handler, optionally set by the application,
++	 * so should be able to handle any type of BPF program.
++	 */
++	if (prog->sec_def != &custom_fallback_def)
++		prog->sec_def = NULL;
+ 	return 0;
+ }
+ 
+@@ -8740,13 +8763,6 @@ static const struct bpf_sec_def section_defs[] = {
+ 	SEC_DEF("netfilter",		NETFILTER, BPF_NETFILTER, SEC_NONE),
+ };
+ 
+-static size_t custom_sec_def_cnt;
+-static struct bpf_sec_def *custom_sec_defs;
+-static struct bpf_sec_def custom_fallback_def;
+-static bool has_custom_fallback_def;
+-
+-static int last_custom_sec_def_handler_id;
+-
+ int libbpf_register_prog_handler(const char *sec,
+ 				 enum bpf_prog_type prog_type,
+ 				 enum bpf_attach_type exp_attach_type,
+@@ -8826,7 +8842,11 @@ int libbpf_unregister_prog_handler(int handler_id)
+ 
+ 	/* try to shrink the array, but it's ok if we couldn't */
+ 	sec_defs = libbpf_reallocarray(custom_sec_defs, custom_sec_def_cnt, sizeof(*sec_defs));
+-	if (sec_defs)
++	/* if new count is zero, reallocarray can return a valid NULL result;
++	 * in this case the previous pointer will be freed, so we *have to*
++	 * reassign old pointer to the new value (even if it's NULL)
++	 */
++	if (sec_defs || custom_sec_def_cnt == 0)
+ 		custom_sec_defs = sec_defs;
+ 
+ 	return 0;
+diff --git a/tools/lib/bpf/usdt.c b/tools/lib/bpf/usdt.c
+index f1a141555f084..37455d00b239c 100644
+--- a/tools/lib/bpf/usdt.c
++++ b/tools/lib/bpf/usdt.c
+@@ -852,8 +852,11 @@ static int bpf_link_usdt_detach(struct bpf_link *link)
+ 		 * system is so exhausted on memory, it's the least of user's
+ 		 * concerns, probably.
+ 		 * So just do our best here to return those IDs to usdt_manager.
++		 * Another edge case when we can legitimately get NULL is when
++		 * new_cnt is zero, which can happen in some edge cases, so we
++		 * need to be careful about that.
+ 		 */
+-		if (new_free_ids) {
++		if (new_free_ids || new_cnt == 0) {
+ 			memcpy(new_free_ids + man->free_spec_cnt, usdt_link->spec_ids,
+ 			       usdt_link->spec_cnt * sizeof(*usdt_link->spec_ids));
+ 			man->free_spec_ids = new_free_ids;
+diff --git a/tools/testing/radix-tree/multiorder.c b/tools/testing/radix-tree/multiorder.c
+index e00520cc63498..cffaf2245d4f1 100644
+--- a/tools/testing/radix-tree/multiorder.c
++++ b/tools/testing/radix-tree/multiorder.c
+@@ -159,7 +159,7 @@ void multiorder_tagged_iteration(struct xarray *xa)
+ 	item_kill_tree(xa);
+ }
+ 
+-bool stop_iteration = false;
++bool stop_iteration;
+ 
+ static void *creator_func(void *ptr)
+ {
+@@ -201,6 +201,7 @@ static void multiorder_iteration_race(struct xarray *xa)
+ 	pthread_t worker_thread[num_threads];
+ 	int i;
+ 
++	stop_iteration = false;
+ 	pthread_create(&worker_thread[0], NULL, &creator_func, xa);
+ 	for (i = 1; i < num_threads; i++)
+ 		pthread_create(&worker_thread[i], NULL, &iterator_func, xa);
+@@ -211,6 +212,61 @@ static void multiorder_iteration_race(struct xarray *xa)
+ 	item_kill_tree(xa);
+ }
+ 
++static void *load_creator(void *ptr)
++{
++	/* 'order' is set up to ensure we have sibling entries */
++	unsigned int order;
++	struct radix_tree_root *tree = ptr;
++	int i;
++
++	rcu_register_thread();
++	item_insert_order(tree, 3 << RADIX_TREE_MAP_SHIFT, 0);
++	item_insert_order(tree, 2 << RADIX_TREE_MAP_SHIFT, 0);
++	for (i = 0; i < 10000; i++) {
++		for (order = 1; order < RADIX_TREE_MAP_SHIFT; order++) {
++			unsigned long index = (3 << RADIX_TREE_MAP_SHIFT) -
++						(1 << order);
++			item_insert_order(tree, index, order);
++			item_delete_rcu(tree, index);
++		}
++	}
++	rcu_unregister_thread();
++
++	stop_iteration = true;
++	return NULL;
++}
++
++static void *load_worker(void *ptr)
++{
++	unsigned long index = (3 << RADIX_TREE_MAP_SHIFT) - 1;
++
++	rcu_register_thread();
++	while (!stop_iteration) {
++		struct item *item = xa_load(ptr, index);
++		assert(!xa_is_internal(item));
++	}
++	rcu_unregister_thread();
++
++	return NULL;
++}
++
++static void load_race(struct xarray *xa)
++{
++	const int num_threads = sysconf(_SC_NPROCESSORS_ONLN) * 4;
++	pthread_t worker_thread[num_threads];
++	int i;
++
++	stop_iteration = false;
++	pthread_create(&worker_thread[0], NULL, &load_creator, xa);
++	for (i = 1; i < num_threads; i++)
++		pthread_create(&worker_thread[i], NULL, &load_worker, xa);
++
++	for (i = 0; i < num_threads; i++)
++		pthread_join(worker_thread[i], NULL);
++
++	item_kill_tree(xa);
++}
++
+ static DEFINE_XARRAY(array);
+ 
+ void multiorder_checks(void)
+@@ -218,12 +274,20 @@ void multiorder_checks(void)
+ 	multiorder_iteration(&array);
+ 	multiorder_tagged_iteration(&array);
+ 	multiorder_iteration_race(&array);
++	load_race(&array);
+ 
+ 	radix_tree_cpu_dead(0);
+ }
+ 
+-int __weak main(void)
++int __weak main(int argc, char **argv)
+ {
++	int opt;
++
++	while ((opt = getopt(argc, argv, "ls:v")) != -1) {
++		if (opt == 'v')
++			test_verbose++;
++	}
++
+ 	rcu_register_thread();
+ 	radix_tree_init();
+ 	multiorder_checks();
+diff --git a/tools/testing/selftests/bpf/benchs/run_bench_rename.sh b/tools/testing/selftests/bpf/benchs/run_bench_rename.sh
+index 16f774b1cdbed..7b281dbe41656 100755
+--- a/tools/testing/selftests/bpf/benchs/run_bench_rename.sh
++++ b/tools/testing/selftests/bpf/benchs/run_bench_rename.sh
+@@ -2,7 +2,7 @@
+ 
+ set -eufo pipefail
+ 
+-for i in base kprobe kretprobe rawtp fentry fexit fmodret
++for i in base kprobe kretprobe rawtp fentry fexit
+ do
+ 	summary=$(sudo ./bench -w2 -d5 -a rename-$i | tail -n1 | cut -d'(' -f1 | cut -d' ' -f3-)
+ 	printf "%-10s: %s\n" $i "$summary"
+diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_nf.c b/tools/testing/selftests/bpf/prog_tests/bpf_nf.c
+index c8ba4009e4ab9..b30ff6b3b81ae 100644
+--- a/tools/testing/selftests/bpf/prog_tests/bpf_nf.c
++++ b/tools/testing/selftests/bpf/prog_tests/bpf_nf.c
+@@ -123,12 +123,13 @@ static void test_bpf_nf_ct(int mode)
+ 	ASSERT_EQ(skel->data->test_snat_addr, 0, "Test for source natting");
+ 	ASSERT_EQ(skel->data->test_dnat_addr, 0, "Test for destination natting");
+ end:
+-	if (srv_client_fd != -1)
+-		close(srv_client_fd);
+ 	if (client_fd != -1)
+ 		close(client_fd);
++	if (srv_client_fd != -1)
++		close(srv_client_fd);
+ 	if (srv_fd != -1)
+ 		close(srv_fd);
++
+ 	snprintf(cmd, sizeof(cmd), iptables, "-D");
+ 	system(cmd);
+ 	test_bpf_nf__destroy(skel);
+diff --git a/tools/testing/selftests/bpf/prog_tests/kfunc_call.c b/tools/testing/selftests/bpf/prog_tests/kfunc_call.c
+index a543742cd7bd1..2eb71559713c9 100644
+--- a/tools/testing/selftests/bpf/prog_tests/kfunc_call.c
++++ b/tools/testing/selftests/bpf/prog_tests/kfunc_call.c
+@@ -173,8 +173,8 @@ static void verify_fail(struct kfunc_test_params *param)
+ 	case tc_test:
+ 		topts.data_in = &pkt_v4;
+ 		topts.data_size_in = sizeof(pkt_v4);
+-		break;
+ 		topts.repeat = 1;
++		break;
+ 	}
+ 
+ 	skel = kfunc_call_fail__open_opts(&opts);
+diff --git a/tools/testing/selftests/bpf/progs/test_cls_redirect.h b/tools/testing/selftests/bpf/progs/test_cls_redirect.h
+index 76eab0aacba0c..233b089d1fbac 100644
+--- a/tools/testing/selftests/bpf/progs/test_cls_redirect.h
++++ b/tools/testing/selftests/bpf/progs/test_cls_redirect.h
+@@ -12,6 +12,15 @@
+ #include <linux/ipv6.h>
+ #include <linux/udp.h>
+ 
++/* offsetof() is used in static asserts, and the libbpf-redefined CO-RE
++ * friendly version breaks compilation for older clang versions <= 15
++ * when invoked in a static assert.  Restore original here.
++ */
++#ifdef offsetof
++#undef offsetof
++#define offsetof(type, member) __builtin_offsetof(type, member)
++#endif
++
+ struct gre_base_hdr {
+ 	uint16_t flags;
+ 	uint16_t protocol;
+diff --git a/tools/testing/selftests/futex/functional/futex_wait_timeout.c b/tools/testing/selftests/futex/functional/futex_wait_timeout.c
+index 3651ce17beeb9..d183f878360bc 100644
+--- a/tools/testing/selftests/futex/functional/futex_wait_timeout.c
++++ b/tools/testing/selftests/futex/functional/futex_wait_timeout.c
+@@ -24,6 +24,7 @@
+ 
+ static long timeout_ns = 100000;	/* 100us default timeout */
+ static futex_t futex_pi;
++static pthread_barrier_t barrier;
+ 
+ void usage(char *prog)
+ {
+@@ -48,6 +49,8 @@ void *get_pi_lock(void *arg)
+ 	if (ret != 0)
+ 		error("futex_lock_pi failed\n", ret);
+ 
++	pthread_barrier_wait(&barrier);
++
+ 	/* Blocks forever */
+ 	ret = futex_wait(&lock, 0, NULL, 0);
+ 	error("futex_wait failed\n", ret);
+@@ -130,6 +133,7 @@ int main(int argc, char *argv[])
+ 	       basename(argv[0]));
+ 	ksft_print_msg("\tArguments: timeout=%ldns\n", timeout_ns);
+ 
++	pthread_barrier_init(&barrier, NULL, 2);
+ 	pthread_create(&thread, NULL, get_pi_lock, NULL);
+ 
+ 	/* initialize relative timeout */
+@@ -163,6 +167,9 @@ int main(int argc, char *argv[])
+ 	res = futex_wait_requeue_pi(&f1, f1, &futex_pi, &to, 0);
+ 	test_timeout(res, &ret, "futex_wait_requeue_pi monotonic", ETIMEDOUT);
+ 
++	/* Wait until the other thread calls futex_lock_pi() */
++	pthread_barrier_wait(&barrier);
++	pthread_barrier_destroy(&barrier);
+ 	/*
+ 	 * FUTEX_LOCK_PI with CLOCK_REALTIME
+ 	 * Due to historical reasons, FUTEX_LOCK_PI supports only realtime
+diff --git a/tools/testing/selftests/kselftest_harness.h b/tools/testing/selftests/kselftest_harness.h
+index 5fd49ad0c696f..e05ac82610467 100644
+--- a/tools/testing/selftests/kselftest_harness.h
++++ b/tools/testing/selftests/kselftest_harness.h
+@@ -938,7 +938,11 @@ void __wait_for_test(struct __test_metadata *t)
+ 		fprintf(TH_LOG_STREAM,
+ 			"# %s: Test terminated by timeout\n", t->name);
+ 	} else if (WIFEXITED(status)) {
+-		if (t->termsig != -1) {
++		if (WEXITSTATUS(status) == 255) {
++			/* SKIP */
++			t->passed = 1;
++			t->skip = 1;
++		} else if (t->termsig != -1) {
+ 			t->passed = 0;
+ 			fprintf(TH_LOG_STREAM,
+ 				"# %s: Test exited normally instead of by signal (code: %d)\n",
+@@ -950,11 +954,6 @@ void __wait_for_test(struct __test_metadata *t)
+ 			case 0:
+ 				t->passed = 1;
+ 				break;
+-			/* SKIP */
+-			case 255:
+-				t->passed = 1;
+-				t->skip = 1;
+-				break;
+ 			/* Other failure, assume step report. */
+ 			default:
+ 				t->passed = 0;
+diff --git a/tools/testing/selftests/landlock/fs_test.c b/tools/testing/selftests/landlock/fs_test.c
+index 83d5655695126..251594306d409 100644
+--- a/tools/testing/selftests/landlock/fs_test.c
++++ b/tools/testing/selftests/landlock/fs_test.c
+@@ -113,7 +113,7 @@ static bool supports_filesystem(const char *const filesystem)
+ {
+ 	char str[32];
+ 	int len;
+-	bool res;
++	bool res = true;
+ 	FILE *const inf = fopen("/proc/filesystems", "r");
+ 
+ 	/*
+@@ -125,14 +125,16 @@ static bool supports_filesystem(const char *const filesystem)
+ 
+ 	/* filesystem can be null for bind mounts. */
+ 	if (!filesystem)
+-		return true;
++		goto out;
+ 
+ 	len = snprintf(str, sizeof(str), "nodev\t%s\n", filesystem);
+ 	if (len >= sizeof(str))
+ 		/* Ignores too-long filesystem names. */
+-		return true;
++		goto out;
+ 
+ 	res = fgrep(inf, str);
++
++out:
+ 	fclose(inf);
+ 	return res;
+ }
+diff --git a/tools/testing/selftests/memfd/memfd_test.c b/tools/testing/selftests/memfd/memfd_test.c
+index dba0e8ba002f8..8b7390ad81d11 100644
+--- a/tools/testing/selftests/memfd/memfd_test.c
++++ b/tools/testing/selftests/memfd/memfd_test.c
+@@ -1145,8 +1145,25 @@ static void test_sysctl_child(void)
+ 
+ 	printf("%s sysctl 2\n", memfd_str);
+ 	sysctl_assert_write("2");
+-	mfd_fail_new("kern_memfd_sysctl_2",
+-		MFD_CLOEXEC | MFD_ALLOW_SEALING);
++	mfd_fail_new("kern_memfd_sysctl_2_exec",
++		     MFD_EXEC | MFD_CLOEXEC | MFD_ALLOW_SEALING);
++
++	fd = mfd_assert_new("kern_memfd_sysctl_2_dfl",
++			    mfd_def_size,
++			    MFD_CLOEXEC | MFD_ALLOW_SEALING);
++	mfd_assert_mode(fd, 0666);
++	mfd_assert_has_seals(fd, F_SEAL_EXEC);
++	mfd_fail_chmod(fd, 0777);
++	close(fd);
++
++	fd = mfd_assert_new("kern_memfd_sysctl_2_noexec_seal",
++			    mfd_def_size,
++			    MFD_NOEXEC_SEAL | MFD_CLOEXEC | MFD_ALLOW_SEALING);
++	mfd_assert_mode(fd, 0666);
++	mfd_assert_has_seals(fd, F_SEAL_EXEC);
++	mfd_fail_chmod(fd, 0777);
++	close(fd);
++
+ 	sysctl_fail_write("0");
+ 	sysctl_fail_write("1");
+ }
+@@ -1202,7 +1219,24 @@ static pid_t spawn_newpid_thread(unsigned int flags, int (*fn)(void *))
+ 
+ static void join_newpid_thread(pid_t pid)
+ {
+-	waitpid(pid, NULL, 0);
++	int wstatus;
++
++	if (waitpid(pid, &wstatus, 0) < 0) {
++		printf("newpid thread: waitpid() failed: %m\n");
++		abort();
++	}
++
++	if (WIFEXITED(wstatus) && WEXITSTATUS(wstatus) != 0) {
++		printf("newpid thread: exited with non-zero error code %d\n",
++		       WEXITSTATUS(wstatus));
++		abort();
++	}
++
++	if (WIFSIGNALED(wstatus)) {
++		printf("newpid thread: killed by signal %d\n",
++		       WTERMSIG(wstatus));
++		abort();
++	}
+ }
+ 
+ /*
+diff --git a/tools/testing/selftests/resctrl/Makefile b/tools/testing/selftests/resctrl/Makefile
+index 73d53257df42f..5073dbc961258 100644
+--- a/tools/testing/selftests/resctrl/Makefile
++++ b/tools/testing/selftests/resctrl/Makefile
+@@ -7,4 +7,4 @@ TEST_GEN_PROGS := resctrl_tests
+ 
+ include ../lib.mk
+ 
+-$(OUTPUT)/resctrl_tests: $(wildcard *.c)
++$(OUTPUT)/resctrl_tests: $(wildcard *.[ch])
+diff --git a/tools/testing/selftests/resctrl/cache.c b/tools/testing/selftests/resctrl/cache.c
+index 8a4fe8693be63..289b619116fec 100644
+--- a/tools/testing/selftests/resctrl/cache.c
++++ b/tools/testing/selftests/resctrl/cache.c
+@@ -87,21 +87,19 @@ static int reset_enable_llc_perf(pid_t pid, int cpu_no)
+ static int get_llc_perf(unsigned long *llc_perf_miss)
+ {
+ 	__u64 total_misses;
++	int ret;
+ 
+ 	/* Stop counters after one span to get miss rate */
+ 
+ 	ioctl(fd_lm, PERF_EVENT_IOC_DISABLE, 0);
+ 
+-	if (read(fd_lm, &rf_cqm, sizeof(struct read_format)) == -1) {
++	ret = read(fd_lm, &rf_cqm, sizeof(struct read_format));
++	if (ret == -1) {
+ 		perror("Could not get llc misses through perf");
+-
+ 		return -1;
+ 	}
+ 
+ 	total_misses = rf_cqm.values[0].value;
+-
+-	close(fd_lm);
+-
+ 	*llc_perf_miss = total_misses;
+ 
+ 	return 0;
+@@ -253,19 +251,25 @@ int cat_val(struct resctrl_val_param *param)
+ 					 memflush, operation, resctrl_val)) {
+ 				fprintf(stderr, "Error-running fill buffer\n");
+ 				ret = -1;
+-				break;
++				goto pe_close;
+ 			}
+ 
+ 			sleep(1);
+ 			ret = measure_cache_vals(param, bm_pid);
+ 			if (ret)
+-				break;
++				goto pe_close;
++
++			close(fd_lm);
+ 		} else {
+ 			break;
+ 		}
+ 	}
+ 
+ 	return ret;
++
++pe_close:
++	close(fd_lm);
++	return ret;
+ }
+ 
+ /*
+diff --git a/tools/testing/selftests/resctrl/fill_buf.c b/tools/testing/selftests/resctrl/fill_buf.c
+index 341cc93ca84c4..3b328c8448964 100644
+--- a/tools/testing/selftests/resctrl/fill_buf.c
++++ b/tools/testing/selftests/resctrl/fill_buf.c
+@@ -177,12 +177,13 @@ fill_cache(unsigned long long buf_size, int malloc_and_init, int memflush,
+ 	else
+ 		ret = fill_cache_write(start_ptr, end_ptr, resctrl_val);
+ 
++	free(startptr);
++
+ 	if (ret) {
+ 		printf("\n Error in fill cache read/write...\n");
+ 		return -1;
+ 	}
+ 
+-	free(startptr);
+ 
+ 	return 0;
+ }
+diff --git a/tools/testing/selftests/resctrl/resctrl.h b/tools/testing/selftests/resctrl/resctrl.h
+index 87e39456dee08..f455f0b7e314b 100644
+--- a/tools/testing/selftests/resctrl/resctrl.h
++++ b/tools/testing/selftests/resctrl/resctrl.h
+@@ -43,6 +43,7 @@
+ 	do {					\
+ 		perror(err_msg);		\
+ 		kill(ppid, SIGKILL);		\
++		umount_resctrlfs();		\
+ 		exit(EXIT_FAILURE);		\
+ 	} while (0)
+ 
+diff --git a/virt/kvm/vfio.c b/virt/kvm/vfio.c
+index 9584eb57e0eda..365d30779768a 100644
+--- a/virt/kvm/vfio.c
++++ b/virt/kvm/vfio.c
+@@ -21,7 +21,7 @@
+ #include <asm/kvm_ppc.h>
+ #endif
+ 
+-struct kvm_vfio_group {
++struct kvm_vfio_file {
+ 	struct list_head node;
+ 	struct file *file;
+ #ifdef CONFIG_SPAPR_TCE_IOMMU
+@@ -30,7 +30,7 @@ struct kvm_vfio_group {
+ };
+ 
+ struct kvm_vfio {
+-	struct list_head group_list;
++	struct list_head file_list;
+ 	struct mutex lock;
+ 	bool noncoherent;
+ };
+@@ -98,34 +98,35 @@ static struct iommu_group *kvm_vfio_file_iommu_group(struct file *file)
+ }
+ 
+ static void kvm_spapr_tce_release_vfio_group(struct kvm *kvm,
+-					     struct kvm_vfio_group *kvg)
++					     struct kvm_vfio_file *kvf)
+ {
+-	if (WARN_ON_ONCE(!kvg->iommu_group))
++	if (WARN_ON_ONCE(!kvf->iommu_group))
+ 		return;
+ 
+-	kvm_spapr_tce_release_iommu_group(kvm, kvg->iommu_group);
+-	iommu_group_put(kvg->iommu_group);
+-	kvg->iommu_group = NULL;
++	kvm_spapr_tce_release_iommu_group(kvm, kvf->iommu_group);
++	iommu_group_put(kvf->iommu_group);
++	kvf->iommu_group = NULL;
+ }
+ #endif
+ 
+ /*
+- * Groups can use the same or different IOMMU domains.  If the same then
+- * adding a new group may change the coherency of groups we've previously
+- * been told about.  We don't want to care about any of that so we retest
+- * each group and bail as soon as we find one that's noncoherent.  This
+- * means we only ever [un]register_noncoherent_dma once for the whole device.
++ * Groups/devices can use the same or different IOMMU domains. If the same
++ * then adding a new group/device may change the coherency of groups/devices
++ * we've previously been told about. We don't want to care about any of
++ * that so we retest each group/device and bail as soon as we find one that's
++ * noncoherent.  This means we only ever [un]register_noncoherent_dma once
++ * for the whole device.
+  */
+ static void kvm_vfio_update_coherency(struct kvm_device *dev)
+ {
+ 	struct kvm_vfio *kv = dev->private;
+ 	bool noncoherent = false;
+-	struct kvm_vfio_group *kvg;
++	struct kvm_vfio_file *kvf;
+ 
+ 	mutex_lock(&kv->lock);
+ 
+-	list_for_each_entry(kvg, &kv->group_list, node) {
+-		if (!kvm_vfio_file_enforced_coherent(kvg->file)) {
++	list_for_each_entry(kvf, &kv->file_list, node) {
++		if (!kvm_vfio_file_enforced_coherent(kvf->file)) {
+ 			noncoherent = true;
+ 			break;
+ 		}
+@@ -143,10 +144,10 @@ static void kvm_vfio_update_coherency(struct kvm_device *dev)
+ 	mutex_unlock(&kv->lock);
+ }
+ 
+-static int kvm_vfio_group_add(struct kvm_device *dev, unsigned int fd)
++static int kvm_vfio_file_add(struct kvm_device *dev, unsigned int fd)
+ {
+ 	struct kvm_vfio *kv = dev->private;
+-	struct kvm_vfio_group *kvg;
++	struct kvm_vfio_file *kvf;
+ 	struct file *filp;
+ 	int ret;
+ 
+@@ -162,27 +163,27 @@ static int kvm_vfio_group_add(struct kvm_device *dev, unsigned int fd)
+ 
+ 	mutex_lock(&kv->lock);
+ 
+-	list_for_each_entry(kvg, &kv->group_list, node) {
+-		if (kvg->file == filp) {
++	list_for_each_entry(kvf, &kv->file_list, node) {
++		if (kvf->file == filp) {
+ 			ret = -EEXIST;
+ 			goto err_unlock;
+ 		}
+ 	}
+ 
+-	kvg = kzalloc(sizeof(*kvg), GFP_KERNEL_ACCOUNT);
+-	if (!kvg) {
++	kvf = kzalloc(sizeof(*kvf), GFP_KERNEL_ACCOUNT);
++	if (!kvf) {
+ 		ret = -ENOMEM;
+ 		goto err_unlock;
+ 	}
+ 
+-	kvg->file = filp;
+-	list_add_tail(&kvg->node, &kv->group_list);
++	kvf->file = filp;
++	list_add_tail(&kvf->node, &kv->file_list);
+ 
+ 	kvm_arch_start_assignment(dev->kvm);
++	kvm_vfio_file_set_kvm(kvf->file, dev->kvm);
+ 
+ 	mutex_unlock(&kv->lock);
+ 
+-	kvm_vfio_file_set_kvm(kvg->file, dev->kvm);
+ 	kvm_vfio_update_coherency(dev);
+ 
+ 	return 0;
+@@ -193,10 +194,10 @@ err_fput:
+ 	return ret;
+ }
+ 
+-static int kvm_vfio_group_del(struct kvm_device *dev, unsigned int fd)
++static int kvm_vfio_file_del(struct kvm_device *dev, unsigned int fd)
+ {
+ 	struct kvm_vfio *kv = dev->private;
+-	struct kvm_vfio_group *kvg;
++	struct kvm_vfio_file *kvf;
+ 	struct fd f;
+ 	int ret;
+ 
+@@ -208,18 +209,18 @@ static int kvm_vfio_group_del(struct kvm_device *dev, unsigned int fd)
+ 
+ 	mutex_lock(&kv->lock);
+ 
+-	list_for_each_entry(kvg, &kv->group_list, node) {
+-		if (kvg->file != f.file)
++	list_for_each_entry(kvf, &kv->file_list, node) {
++		if (kvf->file != f.file)
+ 			continue;
+ 
+-		list_del(&kvg->node);
++		list_del(&kvf->node);
+ 		kvm_arch_end_assignment(dev->kvm);
+ #ifdef CONFIG_SPAPR_TCE_IOMMU
+-		kvm_spapr_tce_release_vfio_group(dev->kvm, kvg);
++		kvm_spapr_tce_release_vfio_group(dev->kvm, kvf);
+ #endif
+-		kvm_vfio_file_set_kvm(kvg->file, NULL);
+-		fput(kvg->file);
+-		kfree(kvg);
++		kvm_vfio_file_set_kvm(kvf->file, NULL);
++		fput(kvf->file);
++		kfree(kvf);
+ 		ret = 0;
+ 		break;
+ 	}
+@@ -234,12 +235,12 @@ static int kvm_vfio_group_del(struct kvm_device *dev, unsigned int fd)
+ }
+ 
+ #ifdef CONFIG_SPAPR_TCE_IOMMU
+-static int kvm_vfio_group_set_spapr_tce(struct kvm_device *dev,
+-					void __user *arg)
++static int kvm_vfio_file_set_spapr_tce(struct kvm_device *dev,
++				       void __user *arg)
+ {
+ 	struct kvm_vfio_spapr_tce param;
+ 	struct kvm_vfio *kv = dev->private;
+-	struct kvm_vfio_group *kvg;
++	struct kvm_vfio_file *kvf;
+ 	struct fd f;
+ 	int ret;
+ 
+@@ -254,20 +255,20 @@ static int kvm_vfio_group_set_spapr_tce(struct kvm_device *dev,
+ 
+ 	mutex_lock(&kv->lock);
+ 
+-	list_for_each_entry(kvg, &kv->group_list, node) {
+-		if (kvg->file != f.file)
++	list_for_each_entry(kvf, &kv->file_list, node) {
++		if (kvf->file != f.file)
+ 			continue;
+ 
+-		if (!kvg->iommu_group) {
+-			kvg->iommu_group = kvm_vfio_file_iommu_group(kvg->file);
+-			if (WARN_ON_ONCE(!kvg->iommu_group)) {
++		if (!kvf->iommu_group) {
++			kvf->iommu_group = kvm_vfio_file_iommu_group(kvf->file);
++			if (WARN_ON_ONCE(!kvf->iommu_group)) {
+ 				ret = -EIO;
+ 				goto err_fdput;
+ 			}
+ 		}
+ 
+ 		ret = kvm_spapr_tce_attach_iommu_group(dev->kvm, param.tablefd,
+-						       kvg->iommu_group);
++						       kvf->iommu_group);
+ 		break;
+ 	}
+ 
+@@ -278,8 +279,8 @@ err_fdput:
+ }
+ #endif
+ 
+-static int kvm_vfio_set_group(struct kvm_device *dev, long attr,
+-			      void __user *arg)
++static int kvm_vfio_set_file(struct kvm_device *dev, long attr,
++			     void __user *arg)
+ {
+ 	int32_t __user *argp = arg;
+ 	int32_t fd;
+@@ -288,16 +289,16 @@ static int kvm_vfio_set_group(struct kvm_device *dev, long attr,
+ 	case KVM_DEV_VFIO_GROUP_ADD:
+ 		if (get_user(fd, argp))
+ 			return -EFAULT;
+-		return kvm_vfio_group_add(dev, fd);
++		return kvm_vfio_file_add(dev, fd);
+ 
+ 	case KVM_DEV_VFIO_GROUP_DEL:
+ 		if (get_user(fd, argp))
+ 			return -EFAULT;
+-		return kvm_vfio_group_del(dev, fd);
++		return kvm_vfio_file_del(dev, fd);
+ 
+ #ifdef CONFIG_SPAPR_TCE_IOMMU
+ 	case KVM_DEV_VFIO_GROUP_SET_SPAPR_TCE:
+-		return kvm_vfio_group_set_spapr_tce(dev, arg);
++		return kvm_vfio_file_set_spapr_tce(dev, arg);
+ #endif
+ 	}
+ 
+@@ -309,8 +310,8 @@ static int kvm_vfio_set_attr(struct kvm_device *dev,
+ {
+ 	switch (attr->group) {
+ 	case KVM_DEV_VFIO_GROUP:
+-		return kvm_vfio_set_group(dev, attr->attr,
+-					  u64_to_user_ptr(attr->addr));
++		return kvm_vfio_set_file(dev, attr->attr,
++					 u64_to_user_ptr(attr->addr));
+ 	}
+ 
+ 	return -ENXIO;
+@@ -339,16 +340,16 @@ static int kvm_vfio_has_attr(struct kvm_device *dev,
+ static void kvm_vfio_release(struct kvm_device *dev)
+ {
+ 	struct kvm_vfio *kv = dev->private;
+-	struct kvm_vfio_group *kvg, *tmp;
++	struct kvm_vfio_file *kvf, *tmp;
+ 
+-	list_for_each_entry_safe(kvg, tmp, &kv->group_list, node) {
++	list_for_each_entry_safe(kvf, tmp, &kv->file_list, node) {
+ #ifdef CONFIG_SPAPR_TCE_IOMMU
+-		kvm_spapr_tce_release_vfio_group(dev->kvm, kvg);
++		kvm_spapr_tce_release_vfio_group(dev->kvm, kvf);
+ #endif
+-		kvm_vfio_file_set_kvm(kvg->file, NULL);
+-		fput(kvg->file);
+-		list_del(&kvg->node);
+-		kfree(kvg);
++		kvm_vfio_file_set_kvm(kvf->file, NULL);
++		fput(kvf->file);
++		list_del(&kvf->node);
++		kfree(kvf);
+ 		kvm_arch_end_assignment(dev->kvm);
+ 	}
+ 
+@@ -382,7 +383,7 @@ static int kvm_vfio_create(struct kvm_device *dev, u32 type)
+ 	if (!kv)
+ 		return -ENOMEM;
+ 
+-	INIT_LIST_HEAD(&kv->group_list);
++	INIT_LIST_HEAD(&kv->file_list);
+ 	mutex_init(&kv->lock);
+ 
+ 	dev->private = kv;


^ permalink raw reply related	[flat|nested] 24+ messages in thread
* [gentoo-commits] proj/linux-patches:6.5 commit in: /
@ 2023-09-07 14:53 Mike Pagano
  0 siblings, 0 replies; 24+ messages in thread
From: Mike Pagano @ 2023-09-07 14:53 UTC (permalink / raw
  To: gentoo-commits

commit:     47ce2789667cce62af975a65d5dcca920c439a1d
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Thu Sep  7 14:52:31 2023 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Thu Sep  7 14:52:31 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=47ce2789

BMQ Patch (use=experimental)

Thanks to Holger Hoffstätte

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README                                 |     4 +
 5020_BMQ-and-PDS-io-scheduler-v6.5-r0.patch | 11296 ++++++++++++++++++++++++++
 2 files changed, 11300 insertions(+)

diff --git a/0000_README b/0000_README
index 465e90aa..4ba02fbb 100644
--- a/0000_README
+++ b/0000_README
@@ -94,3 +94,7 @@ Desc:   Add Gentoo Linux support config settings and defaults.
 Patch:  5010_enable-cpu-optimizations-universal.patch
 From:   https://github.com/graysky2/kernel_compiler_patch
 Desc:   Kernel >= 5.15 patch enables gcc = v11.1+ optimizations for additional CPUs.
+
+Patch:  5020_BMQ-and-PDS-io-scheduler-v6.5-r0.patch
+From:   https://github.com/hhoffstaette/kernel-patches/
+Desc:   BMQ(BitMap Queue) Scheduler. A new CPU scheduler developed from PDS(incld). Inspired by the scheduler in zircon.

diff --git a/5020_BMQ-and-PDS-io-scheduler-v6.5-r0.patch b/5020_BMQ-and-PDS-io-scheduler-v6.5-r0.patch
new file mode 100644
index 00000000..f305f913
--- /dev/null
+++ b/5020_BMQ-and-PDS-io-scheduler-v6.5-r0.patch
@@ -0,0 +1,11296 @@
+
+Thanks to torvic9 in https://gitlab.com/alfredchen/linux-prjc/-/issues/85
+
+diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
+index 722b6ec..223e96f 100644
+--- a/Documentation/admin-guide/kernel-parameters.txt
++++ b/Documentation/admin-guide/kernel-parameters.txt
+@@ -5553,6 +5553,12 @@
+ 	sa1100ir	[NET]
+ 			See drivers/net/irda/sa1100_ir.c.
+ 
++	sched_timeslice=
++			[KNL] Time slice in ms for Project C BMQ/PDS scheduler.
++			Format: integer 2, 4
++			Default: 4
++			See Documentation/scheduler/sched-BMQ.txt
++
+ 	sched_verbose	[KNL] Enables verbose scheduler debug messages.
+ 
+ 	schedstats=	[KNL,X86] Enable or disable scheduled statistics.
+diff --git a/Documentation/admin-guide/sysctl/kernel.rst b/Documentation/admin-guide/sysctl/kernel.rst
+index 3800fab..12ea62d 100644
+--- a/Documentation/admin-guide/sysctl/kernel.rst
++++ b/Documentation/admin-guide/sysctl/kernel.rst
+@@ -1616,3 +1616,13 @@ is 10 seconds.
+ 
+ The softlockup threshold is (``2 * watchdog_thresh``). Setting this
+ tunable to zero will disable lockup detection altogether.
++
++yield_type:
++===========
++
++BMQ/PDS CPU scheduler only. This determines what type of yield calls
++to sched_yield will perform.
++
++  0 - No yield.
++  1 - Deboost and requeue task. (default)
++  2 - Set run queue skip task.
+diff --git a/b/Documentation/scheduler/sched-BMQ.txt b/Documentation/scheduler/sched-BMQ.txt
+new file mode 100644
+index 0000000..05c84ee
+--- /dev/null
++++ b/Documentation/scheduler/sched-BMQ.txt
+@@ -0,0 +1,110 @@
++                         BitMap queue CPU Scheduler
++                         --------------------------
++
++CONTENT
++========
++
++ Background
++ Design
++   Overview
++   Task policy
++   Priority management
++   BitMap Queue
++   CPU Assignment and Migration
++
++
++Background
++==========
++
++BitMap Queue CPU scheduler, referred to as BMQ from here on, is an evolution
++of previous Priority and Deadline based Skiplist multiple queue scheduler(PDS),
++and inspired by Zircon scheduler. The goal of it is to keep the scheduler code
++simple, while efficiency and scalable for interactive tasks, such as desktop,
++movie playback and gaming etc.
++
++Design
++======
++
++Overview
++--------
++
++BMQ use per CPU run queue design, each CPU(logical) has it's own run queue,
++each CPU is responsible for scheduling the tasks that are putting into it's
++run queue.
++
++The run queue is a set of priority queues. Note that these queues are fifo
++queue for non-rt tasks or priority queue for rt tasks in data structure. See
++BitMap Queue below for details. BMQ is optimized for non-rt tasks in the fact
++that most applications are non-rt tasks. No matter the queue is fifo or
++priority, In each queue is an ordered list of runnable tasks awaiting execution
++and the data structures are the same. When it is time for a new task to run,
++the scheduler simply looks the lowest numbered queueue that contains a task,
++and runs the first task from the head of that queue. And per CPU idle task is
++also in the run queue, so the scheduler can always find a task to run on from
++its run queue.
++
++Each task will assigned the same timeslice(default 4ms) when it is picked to
++start running. Task will be reinserted at the end of the appropriate priority
++queue when it uses its whole timeslice. When the scheduler selects a new task
++from the priority queue it sets the CPU's preemption timer for the remainder of
++the previous timeslice. When that timer fires the scheduler will stop execution
++on that task, select another task and start over again.
++
++If a task blocks waiting for a shared resource then it's taken out of its
++priority queue and is placed in a wait queue for the shared resource. When it
++is unblocked it will be reinserted in the appropriate priority queue of an
++eligible CPU.
++
++Task policy
++-----------
++
++BMQ supports DEADLINE, FIFO, RR, NORMAL, BATCH and IDLE task policy like the
++mainline CFS scheduler. But BMQ is heavy optimized for non-rt task, that's
++NORMAL/BATCH/IDLE policy tasks. Below is the implementation detail of each
++policy.
++
++DEADLINE
++	It is squashed as priority 0 FIFO task.
++
++FIFO/RR
++	All RT tasks share one single priority queue in BMQ run queue designed. The
++complexity of insert operation is O(n). BMQ is not designed for system runs
++with major rt policy tasks.
++
++NORMAL/BATCH/IDLE
++	BATCH and IDLE tasks are treated as the same policy. They compete CPU with
++NORMAL policy tasks, but they just don't boost. To control the priority of
++NORMAL/BATCH/IDLE tasks, simply use nice level.
++
++ISO
++	ISO policy is not supported in BMQ. Please use nice level -20 NORMAL policy
++task instead.
++
++Priority management
++-------------------
++
++RT tasks have priority from 0-99. For non-rt tasks, there are three different
++factors used to determine the effective priority of a task. The effective
++priority being what is used to determine which queue it will be in.
++
++The first factor is simply the task’s static priority. Which is assigned from
++task's nice level, within [-20, 19] in userland's point of view and [0, 39]
++internally.
++
++The second factor is the priority boost. This is a value bounded between
++[-MAX_PRIORITY_ADJ, MAX_PRIORITY_ADJ] used to offset the base priority, it is
++modified by the following cases:
++
++*When a thread has used up its entire timeslice, always deboost its boost by
++increasing by one.
++*When a thread gives up cpu control(voluntary or non-voluntary) to reschedule,
++and its switch-in time(time after last switch and run) below the thredhold
++based on its priority boost, will boost its boost by decreasing by one buti is
++capped at 0 (won’t go negative).
++
++The intent in this system is to ensure that interactive threads are serviced
++quickly. These are usually the threads that interact directly with the user
++and cause user-perceivable latency. These threads usually do little work and
++spend most of their time blocked awaiting another user event. So they get the
++priority boost from unblocking while background threads that do most of the
++processing receive the priority penalty for using their entire timeslice.
+diff --git a/fs/proc/base.c b/fs/proc/base.c
+index 9df3f48..8a0596f 100644
+--- a/fs/proc/base.c
++++ b/fs/proc/base.c
+@@ -480,7 +480,7 @@ static int proc_pid_schedstat(struct seq_file *m, struct pid_namespace *ns,
+ 		seq_puts(m, "0 0 0\n");
+ 	else
+ 		seq_printf(m, "%llu %llu %lu\n",
+-		   (unsigned long long)task->se.sum_exec_runtime,
++		   (unsigned long long)tsk_seruntime(task),
+ 		   (unsigned long long)task->sched_info.run_delay,
+ 		   task->sched_info.pcount);
+ 
+diff --git a/include/asm-generic/resource.h b/include/asm-generic/resource.h
+index 8874f68..59eb72b 100644
+--- a/include/asm-generic/resource.h
++++ b/include/asm-generic/resource.h
+@@ -23,7 +23,7 @@
+ 	[RLIMIT_LOCKS]		= {  RLIM_INFINITY,  RLIM_INFINITY },	\
+ 	[RLIMIT_SIGPENDING]	= { 		0,	       0 },	\
+ 	[RLIMIT_MSGQUEUE]	= {   MQ_BYTES_MAX,   MQ_BYTES_MAX },	\
+-	[RLIMIT_NICE]		= { 0, 0 },				\
++	[RLIMIT_NICE]		= { 30, 30 },				\
+ 	[RLIMIT_RTPRIO]		= { 0, 0 },				\
+ 	[RLIMIT_RTTIME]		= {  RLIM_INFINITY,  RLIM_INFINITY },	\
+ }
+diff --git a/include/linux/sched/deadline.h b/include/linux/sched/deadline.h
+index 7c83d4d..fa30f98 100644
+--- a/include/linux/sched/deadline.h
++++ b/include/linux/sched/deadline.h
+@@ -1,5 +1,24 @@
+ /* SPDX-License-Identifier: GPL-2.0 */
+ 
++#ifdef CONFIG_SCHED_ALT
++
++static inline int dl_task(struct task_struct *p)
++{
++	return 0;
++}
++
++#ifdef CONFIG_SCHED_BMQ
++#define __tsk_deadline(p)	(0UL)
++#endif
++
++#ifdef CONFIG_SCHED_PDS
++#define __tsk_deadline(p)	((((u64) ((p)->prio))<<56) | (p)->deadline)
++#endif
++
++#else
++
++#define __tsk_deadline(p)	((p)->dl.deadline)
++
+ /*
+  * SCHED_DEADLINE tasks has negative priorities, reflecting
+  * the fact that any of them has higher prio than RT and
+@@ -21,6 +40,7 @@ static inline int dl_task(struct task_struct *p)
+ {
+ 	return dl_prio(p->prio);
+ }
++#endif /* CONFIG_SCHED_ALT */
+ 
+ static inline bool dl_time_before(u64 a, u64 b)
+ {
+diff --git a/include/linux/sched/prio.h b/include/linux/sched/prio.h
+index ab83d85..6af9ae6 100644
+--- a/include/linux/sched/prio.h
++++ b/include/linux/sched/prio.h
+@@ -18,6 +18,32 @@
+ #define MAX_PRIO		(MAX_RT_PRIO + NICE_WIDTH)
+ #define DEFAULT_PRIO		(MAX_RT_PRIO + NICE_WIDTH / 2)
+ 
++#ifdef CONFIG_SCHED_ALT
++
++/* Undefine MAX_PRIO and DEFAULT_PRIO */
++#undef MAX_PRIO
++#undef DEFAULT_PRIO
++
++/* +/- priority levels from the base priority */
++#ifdef CONFIG_SCHED_BMQ
++#define MAX_PRIORITY_ADJ	(7)
++
++#define MIN_NORMAL_PRIO		(MAX_RT_PRIO)
++#define MAX_PRIO		(MIN_NORMAL_PRIO + NICE_WIDTH)
++#define DEFAULT_PRIO		(MIN_NORMAL_PRIO + NICE_WIDTH / 2)
++#endif
++
++#ifdef CONFIG_SCHED_PDS
++#define MAX_PRIORITY_ADJ	(0)
++
++#define MIN_NORMAL_PRIO		(128)
++#define NORMAL_PRIO_NUM		(64)
++#define MAX_PRIO		(MIN_NORMAL_PRIO + NORMAL_PRIO_NUM)
++#define DEFAULT_PRIO		(MAX_PRIO - NICE_WIDTH / 2)
++#endif
++
++#endif /* CONFIG_SCHED_ALT */
++
+ /*
+  * Convert user-nice values [ -20 ... 0 ... 19 ]
+  * to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ],
+diff --git a/include/linux/sched/rt.h b/include/linux/sched/rt.h
+index 994c256..8c050a5 100644
+--- a/include/linux/sched/rt.h
++++ b/include/linux/sched/rt.h
+@@ -24,8 +24,10 @@ static inline bool task_is_realtime(struct task_struct *tsk)
+ 
+ 	if (policy == SCHED_FIFO || policy == SCHED_RR)
+ 		return true;
++#ifndef CONFIG_SCHED_ALT
+ 	if (policy == SCHED_DEADLINE)
+ 		return true;
++#endif
+ 	return false;
+ }
+ 
+diff --git a/include/linux/sched/topology.h b/include/linux/sched/topology.h
+index 67b573d..93f45c8 100644
+--- a/include/linux/sched/topology.h
++++ b/include/linux/sched/topology.h
+@@ -234,7 +234,8 @@ static inline bool cpus_share_cache(int this_cpu, int that_cpu)
+ 
+ #endif	/* !CONFIG_SMP */
+ 
+-#if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL)
++#if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) && \
++	!defined(CONFIG_SCHED_ALT)
+ extern void rebuild_sched_domains_energy(void);
+ #else
+ static inline void rebuild_sched_domains_energy(void)
+diff --git a/include/linux/sched.h b/include/linux/sched.h
+index 609bde8..5d4e8aa 100644
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -763,8 +762,14 @@ struct task_struct {
+ 	unsigned int			ptrace;
+ 
+ #ifdef CONFIG_SMP
+-	int				on_cpu;
+ 	struct __call_single_node	wake_entry;
++#endif
++#if defined(CONFIG_SMP) || defined(CONFIG_SCHED_ALT)
++	int				on_cpu;
++#endif
++
++#ifdef CONFIG_SMP
++#ifndef CONFIG_SCHED_ALT
+ 	unsigned int			wakee_flips;
+ 	unsigned long			wakee_flip_decay_ts;
+ 	struct task_struct		*last_wakee;
+@@ -778,6 +783,7 @@ struct task_struct {
+ 	 */
+ 	int				recent_used_cpu;
+ 	int				wake_cpu;
++#endif /* !CONFIG_SCHED_ALT */
+ #endif
+ 	int				on_rq;
+ 
+@@ -786,6 +792,20 @@ struct task_struct {
+ 	int				normal_prio;
+ 	unsigned int			rt_priority;
+ 
++#ifdef CONFIG_SCHED_ALT
++	u64				last_ran;
++	s64				time_slice;
++	int				sq_idx;
++	struct list_head		sq_node;
++#ifdef CONFIG_SCHED_BMQ
++	int				boost_prio;
++#endif /* CONFIG_SCHED_BMQ */
++#ifdef CONFIG_SCHED_PDS
++	u64				deadline;
++#endif /* CONFIG_SCHED_PDS */
++	/* sched_clock time spent running */
++	u64				sched_time;
++#else /* !CONFIG_SCHED_ALT */
+ 	struct sched_entity		se;
+ 	struct sched_rt_entity		rt;
+ 	struct sched_dl_entity		dl;
+@@ -796,6 +816,7 @@ struct task_struct {
+ 	unsigned long			core_cookie;
+ 	unsigned int			core_occupation;
+ #endif
++#endif /* !CONFIG_SCHED_ALT */
+ 
+ #ifdef CONFIG_CGROUP_SCHED
+ 	struct task_group		*sched_task_group;
+@@ -1548,6 +1569,15 @@ struct task_struct {
+ 	 */
+ };
+ 
++#ifdef CONFIG_SCHED_ALT
++#define tsk_seruntime(t)		((t)->sched_time)
++/* replace the uncertian rt_timeout with 0UL */
++#define tsk_rttimeout(t)		(0UL)
++#else /* CFS */
++#define tsk_seruntime(t)	((t)->se.sum_exec_runtime)
++#define tsk_rttimeout(t)	((t)->rt.timeout)
++#endif /* !CONFIG_SCHED_ALT */
++
+ static inline struct pid *task_pid(struct task_struct *task)
+ {
+ 	return task->thread_pid;
+diff --git a/init/Kconfig b/init/Kconfig
+index f7f65af..d57f100 100644
+--- a/init/Kconfig
++++ b/init/Kconfig
+@@ -629,6 +629,7 @@ config TASK_IO_ACCOUNTING
+ 
+ config PSI
+ 	bool "Pressure stall information tracking"
++	depends on !SCHED_ALT
+ 	help
+ 	  Collect metrics that indicate how overcommitted the CPU, memory,
+ 	  and IO capacity are in the system.
+@@ -793,6 +794,7 @@ menu "Scheduler features"
+ config UCLAMP_TASK
+ 	bool "Enable utilization clamping for RT/FAIR tasks"
+ 	depends on CPU_FREQ_GOV_SCHEDUTIL
++	depends on !SCHED_ALT
+ 	help
+ 	  This feature enables the scheduler to track the clamped utilization
+ 	  of each CPU based on RUNNABLE tasks scheduled on that CPU.
+@@ -839,6 +841,35 @@ config UCLAMP_BUCKETS_COUNT
+ 
+ 	  If in doubt, use the default value.
+ 
++menuconfig SCHED_ALT
++	bool "Alternative CPU Schedulers"
++	default n
++	help
++	  This feature enables the ProjectC alternative CPU schedulers."
++
++if SCHED_ALT
++
++choice
++	prompt "Alternative CPU schedulers"
++	default SCHED_PDS
++
++config SCHED_BMQ
++	bool "BMQ CPU scheduler"
++	help
++	  The BitMap Queue CPU scheduler for excellent interactivity and
++	  responsiveness on the desktop and solid scalability on normal
++	  hardware and commodity servers.
++
++config SCHED_PDS
++	bool "PDS CPU scheduler"
++	help
++	  The Priority and Deadline based Skip list multiple queue CPU
++	  Scheduler.
++
++endchoice
++
++endif
++
+ endmenu
+ 
+ #
+@@ -892,6 +923,7 @@ config NUMA_BALANCING
+ 	depends on ARCH_SUPPORTS_NUMA_BALANCING
+ 	depends on !ARCH_WANT_NUMA_VARIABLE_LOCALITY
+ 	depends on SMP && NUMA && MIGRATION && !PREEMPT_RT
++	depends on !SCHED_ALT
+ 	help
+ 	  This option adds support for automatic NUMA aware memory/task placement.
+ 	  The mechanism is quite primitive and is based on migrating memory when
+@@ -989,6 +1021,7 @@ config FAIR_GROUP_SCHED
+ 	depends on CGROUP_SCHED
+ 	default CGROUP_SCHED
+ 
++if !SCHED_ALT
+ config CFS_BANDWIDTH
+ 	bool "CPU bandwidth provisioning for FAIR_GROUP_SCHED"
+ 	depends on FAIR_GROUP_SCHED
+@@ -1011,6 +1044,7 @@ config RT_GROUP_SCHED
+ 	  realtime bandwidth for them.
+ 	  See Documentation/scheduler/sched-rt-group.rst for more information.
+ 
++endif #!SCHED_ALT
+ endif #CGROUP_SCHED
+ 
+ config SCHED_MM_CID
+@@ -1259,6 +1293,7 @@ config CHECKPOINT_RESTORE
+ 
+ config SCHED_AUTOGROUP
+ 	bool "Automatic process group scheduling"
++	depends on !SCHED_ALT
+ 	select CGROUPS
+ 	select CGROUP_SCHED
+ 	select FAIR_GROUP_SCHED
+diff --git a/init/init_task.c b/init/init_task.c
+index ff6c4b9..19e9c66 100644
+--- a/init/init_task.c
++++ b/init/init_task.c
+@@ -75,9 +75,15 @@ struct task_struct init_task
+ 	.stack		= init_stack,
+ 	.usage		= REFCOUNT_INIT(2),
+ 	.flags		= PF_KTHREAD,
++#ifdef CONFIG_SCHED_ALT
++	.prio		= DEFAULT_PRIO + MAX_PRIORITY_ADJ,
++	.static_prio	= DEFAULT_PRIO,
++	.normal_prio	= DEFAULT_PRIO + MAX_PRIORITY_ADJ,
++#else
+ 	.prio		= MAX_PRIO - 20,
+ 	.static_prio	= MAX_PRIO - 20,
+ 	.normal_prio	= MAX_PRIO - 20,
++#endif
+ 	.policy		= SCHED_NORMAL,
+ 	.cpus_ptr	= &init_task.cpus_mask,
+ 	.user_cpus_ptr	= NULL,
+@@ -88,6 +94,17 @@ struct task_struct init_task
+ 	.restart_block	= {
+ 		.fn = do_no_restart_syscall,
+ 	},
++#ifdef CONFIG_SCHED_ALT
++	.sq_node	= LIST_HEAD_INIT(init_task.sq_node),
++#ifdef CONFIG_SCHED_BMQ
++	.boost_prio	= 0,
++	.sq_idx		= 15,
++#endif
++#ifdef CONFIG_SCHED_PDS
++	.deadline	= 0,
++#endif
++	.time_slice	= HZ,
++#else
+ 	.se		= {
+ 		.group_node 	= LIST_HEAD_INIT(init_task.se.group_node),
+ 	},
+@@ -95,6 +112,7 @@ struct task_struct init_task
+ 		.run_list	= LIST_HEAD_INIT(init_task.rt.run_list),
+ 		.time_slice	= RR_TIMESLICE,
+ 	},
++#endif
+ 	.tasks		= LIST_HEAD_INIT(init_task.tasks),
+ #ifdef CONFIG_SMP
+ 	.pushable_tasks	= PLIST_NODE_INIT(init_task.pushable_tasks, MAX_PRIO),
+diff --git a/kernel/Kconfig.preempt b/kernel/Kconfig.preempt
+index c2f1fd9..4165467 100644
+--- a/kernel/Kconfig.preempt
++++ b/kernel/Kconfig.preempt
+@@ -117,7 +117,7 @@ config PREEMPT_DYNAMIC
+ 
+ config SCHED_CORE
+ 	bool "Core Scheduling for SMT"
+-	depends on SCHED_SMT
++	depends on SCHED_SMT && !SCHED_ALT
+ 	help
+ 	  This option permits Core Scheduling, a means of coordinated task
+ 	  selection across SMT siblings. When enabled -- see
+diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
+index 58e6f18..18852b0 100644
+--- a/kernel/cgroup/cpuset.c
++++ b/kernel/cgroup/cpuset.c
+@@ -791,7 +791,7 @@ out:
+ 	return ret;
+ }
+ 
+-#ifdef CONFIG_SMP
++#if defined(CONFIG_SMP) && !defined(CONFIG_SCHED_ALT)
+ /*
+  * Helper routine for generate_sched_domains().
+  * Do cpusets a, b have overlapping effective cpus_allowed masks?
+@@ -1190,7 +1190,7 @@ static void rebuild_sched_domains_locked(void)
+ 	/* Have scheduler rebuild the domains */
+ 	partition_and_rebuild_sched_domains(ndoms, doms, attr);
+ }
+-#else /* !CONFIG_SMP */
++#else /* !CONFIG_SMP || CONFIG_SCHED_ALT */
+ static void rebuild_sched_domains_locked(void)
+ {
+ }
+@@ -2475,11 +2475,13 @@ static int cpuset_can_attach_check(struct cpuset *cs)
+ 	return 0;
+ }
+ 
++#ifndef CONFIG_SCHED_ALT
+ static void reset_migrate_dl_data(struct cpuset *cs)
+ {
+ 	cs->nr_migrate_dl_tasks = 0;
+ 	cs->sum_migrate_dl_bw = 0;
+ }
++#endif
+ 
+ /* Called by cgroups to determine if a cpuset is usable; cpuset_mutex held */
+ static int cpuset_can_attach(struct cgroup_taskset *tset)
+@@ -2509,12 +2511,15 @@ static int cpuset_can_attach(struct cgroup_taskset *tset)
+ 		if (ret)
+ 			goto out_unlock;
+ 
++#ifndef CONFIG_SCHED_ALT
+ 		if (dl_task(task)) {
+ 			cs->nr_migrate_dl_tasks++;
+ 			cs->sum_migrate_dl_bw += task->dl.dl_bw;
+ 		}
++#endif
+ 	}
+ 
++#ifndef CONFIG_SCHED_ALT
+ 	if (!cs->nr_migrate_dl_tasks)
+ 		goto out_success;
+ 
+@@ -2535,6 +2540,7 @@ static int cpuset_can_attach(struct cgroup_taskset *tset)
+ 	}
+ 
+ out_success:
++#endif
+ 	/*
+ 	 * Mark attach is in progress.  This makes validate_change() fail
+ 	 * changes which zero cpus/mems_allowed.
+@@ -2557,13 +2563,14 @@ static void cpuset_cancel_attach(struct cgroup_taskset *tset)
+ 	cs->attach_in_progress--;
+ 	if (!cs->attach_in_progress)
+ 		wake_up(&cpuset_attach_wq);
+-
++#ifndef CONFIG_SCHED_ALT
+ 	if (cs->nr_migrate_dl_tasks) {
+ 		int cpu = cpumask_any(cs->effective_cpus);
+ 
+ 		dl_bw_free(cpu, cs->sum_migrate_dl_bw);
+ 		reset_migrate_dl_data(cs);
+ 	}
++#endif
+ 
+ 	mutex_unlock(&cpuset_mutex);
+ }
+@@ -2665,11 +2672,13 @@ static void cpuset_attach(struct cgroup_taskset *tset)
+ out:
+ 	cs->old_mems_allowed = cpuset_attach_nodemask_to;
+ 
++#ifndef CONFIG_SCHED_ALT
+ 	if (cs->nr_migrate_dl_tasks) {
+ 		cs->nr_deadline_tasks += cs->nr_migrate_dl_tasks;
+ 		oldcs->nr_deadline_tasks -= cs->nr_migrate_dl_tasks;
+ 		reset_migrate_dl_data(cs);
+ 	}
++#endif
+ 
+ 	cs->attach_in_progress--;
+ 	if (!cs->attach_in_progress)
+diff --git a/kernel/delayacct.c b/kernel/delayacct.c
+index 6f0c358..8111481 100644
+--- a/kernel/delayacct.c
++++ b/kernel/delayacct.c
+@@ -150,7 +150,7 @@ int delayacct_add_tsk(struct taskstats *d, struct task_struct *tsk)
+ 	 */
+ 	t1 = tsk->sched_info.pcount;
+ 	t2 = tsk->sched_info.run_delay;
+-	t3 = tsk->se.sum_exec_runtime;
++	t3 = tsk_seruntime(tsk);
+ 
+ 	d->cpu_count += t1;
+ 
+diff --git a/kernel/exit.c b/kernel/exit.c
+index edb50b4..09e72bb 100644
+--- a/kernel/exit.c
++++ b/kernel/exit.c
+@@ -173,7 +173,7 @@ static void __exit_signal(struct task_struct *tsk)
+ 			sig->curr_target = next_thread(tsk);
+ 	}
+ 
+-	add_device_randomness((const void*) &tsk->se.sum_exec_runtime,
++	add_device_randomness((const void*) &tsk_seruntime(tsk),
+ 			      sizeof(unsigned long long));
+ 
+ 	/*
+@@ -194,7 +194,7 @@ static void __exit_signal(struct task_struct *tsk)
+ 	sig->inblock += task_io_get_inblock(tsk);
+ 	sig->oublock += task_io_get_oublock(tsk);
+ 	task_io_accounting_add(&sig->ioac, &tsk->ioac);
+-	sig->sum_sched_runtime += tsk->se.sum_exec_runtime;
++	sig->sum_sched_runtime += tsk_seruntime(tsk);
+ 	sig->nr_threads--;
+ 	__unhash_process(tsk, group_dead);
+ 	write_sequnlock(&sig->stats_lock);
+diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
+index 21db0df..039badd 100644
+--- a/kernel/locking/rtmutex.c
++++ b/kernel/locking/rtmutex.c
+@@ -343,7 +343,7 @@ waiter_update_prio(struct rt_mutex_waiter *waiter, struct task_struct *task)
+ 	lockdep_assert(RB_EMPTY_NODE(&waiter->tree.entry));
+ 
+ 	waiter->tree.prio = __waiter_prio(task);
+-	waiter->tree.deadline = task->dl.deadline;
++	waiter->tree.deadline = __tsk_deadline(task);
+ }
+ 
+ /*
+@@ -364,16 +364,20 @@ waiter_clone_prio(struct rt_mutex_waiter *waiter, struct task_struct *task)
+  * Only use with rt_waiter_node_{less,equal}()
+  */
+ #define task_to_waiter_node(p)	\
+-	&(struct rt_waiter_node){ .prio = __waiter_prio(p), .deadline = (p)->dl.deadline }
++	&(struct rt_waiter_node){ .prio = __waiter_prio(p), .deadline = __tsk_deadline(task) }
+ #define task_to_waiter(p)	\
+ 	&(struct rt_mutex_waiter){ .tree = *task_to_waiter_node(p) }
+ 
+ static __always_inline int rt_waiter_node_less(struct rt_waiter_node *left,
+ 					       struct rt_waiter_node *right)
+ {
++#ifdef CONFIG_SCHED_PDS
++	return (left->deadline < right->deadline);
++#else
+ 	if (left->prio < right->prio)
+ 		return 1;
+ 
++#ifndef CONFIG_SCHED_BMQ
+ 	/*
+ 	 * If both waiters have dl_prio(), we check the deadlines of the
+ 	 * associated tasks.
+@@ -382,16 +386,22 @@ static __always_inline int rt_waiter_node_less(struct rt_waiter_node *left,
+ 	 */
+ 	if (dl_prio(left->prio))
+ 		return dl_time_before(left->deadline, right->deadline);
++#endif
+ 
+ 	return 0;
++#endif
+ }
+ 
+ static __always_inline int rt_waiter_node_equal(struct rt_waiter_node *left,
+ 						 struct rt_waiter_node *right)
+ {
++#ifdef CONFIG_SCHED_PDS
++	return (left->deadline == right->deadline);
++#else
+ 	if (left->prio != right->prio)
+ 		return 0;
+ 
++#ifndef CONFIG_SCHED_BMQ
+ 	/*
+ 	 * If both waiters have dl_prio(), we check the deadlines of the
+ 	 * associated tasks.
+@@ -400,8 +410,10 @@ static __always_inline int rt_waiter_node_equal(struct rt_waiter_node *left,
+ 	 */
+ 	if (dl_prio(left->prio))
+ 		return left->deadline == right->deadline;
++#endif
+ 
+ 	return 1;
++#endif
+ }
+ 
+ static inline bool rt_mutex_steal(struct rt_mutex_waiter *waiter,
+diff --git a/kernel/sched/Makefile b/kernel/sched/Makefile
+index 976092b..31d587c 100644
+--- a/kernel/sched/Makefile
++++ b/kernel/sched/Makefile
+@@ -28,7 +28,12 @@ endif
+ # These compilation units have roughly the same size and complexity - so their
+ # build parallelizes well and finishes roughly at once:
+ #
++ifdef CONFIG_SCHED_ALT
++obj-y += alt_core.o
++obj-$(CONFIG_SCHED_DEBUG) += alt_debug.o
++else
+ obj-y += core.o
+ obj-y += fair.o
++endif
+ obj-y += build_policy.o
+ obj-y += build_utility.o
+diff --git a/b/kernel/sched/alt_core.c b/kernel/sched/alt_core.c
+new file mode 100644
+index 0000000..05b0f12
+--- /dev/null
++++ b/kernel/sched/alt_core.c
+@@ -0,0 +1,8738 @@
++/*
++ *  kernel/sched/alt_core.c
++ *
++ *  Core alternative kernel scheduler code and related syscalls
++ *
++ *  Copyright (C) 1991-2002  Linus Torvalds
++ *
++ *  2009-08-13	Brainfuck deadline scheduling policy by Con Kolivas deletes
++ *		a whole lot of those previous things.
++ *  2017-09-06	Priority and Deadline based Skip list multiple queue kernel
++ *		scheduler by Alfred Chen.
++ *  2019-02-20	BMQ(BitMap Queue) kernel scheduler by Alfred Chen.
++ */
++#include <linux/sched/clock.h>
++#include <linux/sched/cputime.h>
++#include <linux/sched/debug.h>
++#include <linux/sched/isolation.h>
++#include <linux/sched/loadavg.h>
++#include <linux/sched/mm.h>
++#include <linux/sched/nohz.h>
++#include <linux/sched/stat.h>
++#include <linux/sched/wake_q.h>
++
++#include <linux/blkdev.h>
++#include <linux/context_tracking.h>
++#include <linux/cpuset.h>
++#include <linux/delayacct.h>
++#include <linux/init_task.h>
++#include <linux/kcov.h>
++#include <linux/kprobes.h>
++#include <linux/nmi.h>
++#include <linux/scs.h>
++
++#include <uapi/linux/sched/types.h>
++
++#include <asm/irq_regs.h>
++#include <asm/switch_to.h>
++
++#define CREATE_TRACE_POINTS
++#include <trace/events/sched.h>
++#include <trace/events/ipi.h>
++#undef CREATE_TRACE_POINTS
++
++#include "sched.h"
++
++#include "pelt.h"
++
++#include "../../io_uring/io-wq.h"
++#include "../smpboot.h"
++
++EXPORT_TRACEPOINT_SYMBOL_GPL(ipi_send_cpu);
++
++/*
++ * Export tracepoints that act as a bare tracehook (ie: have no trace event
++ * associated with them) to allow external modules to probe them.
++ */
++EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_irq_tp);
++
++#ifdef CONFIG_SCHED_DEBUG
++#define sched_feat(x)	(1)
++/*
++ * Print a warning if need_resched is set for the given duration (if
++ * LATENCY_WARN is enabled).
++ *
++ * If sysctl_resched_latency_warn_once is set, only one warning will be shown
++ * per boot.
++ */
++__read_mostly int sysctl_resched_latency_warn_ms = 100;
++__read_mostly int sysctl_resched_latency_warn_once = 1;
++#else
++#define sched_feat(x)	(0)
++#endif /* CONFIG_SCHED_DEBUG */
++
++#define ALT_SCHED_VERSION "v6.5-r0-tv"
++
++/*
++ * Compile time debug macro
++ * #define ALT_SCHED_DEBUG
++ */
++
++/* rt_prio(prio) defined in include/linux/sched/rt.h */
++#define rt_task(p)		rt_prio((p)->prio)
++#define rt_policy(policy)	((policy) == SCHED_FIFO || (policy) == SCHED_RR)
++#define task_has_rt_policy(p)	(rt_policy((p)->policy))
++
++#define STOP_PRIO		(MAX_RT_PRIO - 1)
++
++/* Default time slice is 4 in ms, can be set via kernel parameter "sched_timeslice" */
++u64 sched_timeslice_ns __read_mostly = (4 << 20);
++
++static inline void requeue_task(struct task_struct *p, struct rq *rq, int idx);
++
++#ifdef CONFIG_SCHED_BMQ
++#include "bmq.h"
++#endif
++#ifdef CONFIG_SCHED_PDS
++#include "pds.h"
++#endif
++
++struct affinity_context {
++	const struct cpumask *new_mask;
++	struct cpumask *user_mask;
++	unsigned int flags;
++};
++
++static int __init sched_timeslice(char *str)
++{
++	int timeslice_ms;
++
++	get_option(&str, &timeslice_ms);
++	if (2 != timeslice_ms)
++		timeslice_ms = 4;
++	sched_timeslice_ns = timeslice_ms << 20;
++	sched_timeslice_imp(timeslice_ms);
++
++	return 0;
++}
++early_param("sched_timeslice", sched_timeslice);
++
++/* Reschedule if less than this many μs left */
++#define RESCHED_NS		(100 << 10)
++
++/**
++ * sched_yield_type - Choose what sort of yield sched_yield will perform.
++ * 0: No yield.
++ * 1: Deboost and requeue task. (default)
++ * 2: Set rq skip task.
++ */
++int sched_yield_type __read_mostly = 1;
++
++#ifdef CONFIG_SMP
++static cpumask_t sched_rq_pending_mask ____cacheline_aligned_in_smp;
++
++DEFINE_PER_CPU_ALIGNED(cpumask_t [NR_CPU_AFFINITY_LEVELS], sched_cpu_topo_masks);
++DEFINE_PER_CPU_ALIGNED(cpumask_t *, sched_cpu_llc_mask);
++DEFINE_PER_CPU_ALIGNED(cpumask_t *, sched_cpu_topo_end_mask);
++
++#ifdef CONFIG_SCHED_SMT
++DEFINE_STATIC_KEY_FALSE(sched_smt_present);
++EXPORT_SYMBOL_GPL(sched_smt_present);
++#endif
++
++/*
++ * Keep a unique ID per domain (we use the first CPUs number in the cpumask of
++ * the domain), this allows us to quickly tell if two cpus are in the same cache
++ * domain, see cpus_share_cache().
++ */
++DEFINE_PER_CPU(int, sd_llc_id);
++#endif /* CONFIG_SMP */
++
++static DEFINE_MUTEX(sched_hotcpu_mutex);
++
++DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
++
++#ifndef prepare_arch_switch
++# define prepare_arch_switch(next)	do { } while (0)
++#endif
++#ifndef finish_arch_post_lock_switch
++# define finish_arch_post_lock_switch()	do { } while (0)
++#endif
++
++#ifdef CONFIG_SCHED_SMT
++static cpumask_t sched_sg_idle_mask ____cacheline_aligned_in_smp;
++#endif
++static cpumask_t sched_preempt_mask[SCHED_QUEUE_BITS] ____cacheline_aligned_in_smp;
++static cpumask_t *const sched_idle_mask = &sched_preempt_mask[0];
++
++/* task function */
++static inline const struct cpumask *task_user_cpus(struct task_struct *p)
++{
++	if (!p->user_cpus_ptr)
++		return cpu_possible_mask; /* &init_task.cpus_mask */
++	return p->user_cpus_ptr;
++}
++
++/* sched_queue related functions */
++static inline void sched_queue_init(struct sched_queue *q)
++{
++	int i;
++
++	bitmap_zero(q->bitmap, SCHED_QUEUE_BITS);
++	for(i = 0; i < SCHED_LEVELS; i++)
++		INIT_LIST_HEAD(&q->heads[i]);
++}
++
++/*
++ * Init idle task and put into queue structure of rq
++ * IMPORTANT: may be called multiple times for a single cpu
++ */
++static inline void sched_queue_init_idle(struct sched_queue *q,
++					 struct task_struct *idle)
++{
++	idle->sq_idx = IDLE_TASK_SCHED_PRIO;
++	INIT_LIST_HEAD(&q->heads[idle->sq_idx]);
++	list_add(&idle->sq_node, &q->heads[idle->sq_idx]);
++}
++
++static inline void
++clear_recorded_preempt_mask(int pr, int low, int high, int cpu)
++{
++	if (low < pr && pr <= high)
++		cpumask_clear_cpu(cpu, sched_preempt_mask + SCHED_QUEUE_BITS - pr);
++}
++
++static inline void
++set_recorded_preempt_mask(int pr, int low, int high, int cpu)
++{
++	if (low < pr && pr <= high)
++		cpumask_set_cpu(cpu, sched_preempt_mask + SCHED_QUEUE_BITS - pr);
++}
++
++static atomic_t sched_prio_record = ATOMIC_INIT(0);
++
++/* water mark related functions */
++static inline void update_sched_preempt_mask(struct rq *rq)
++{
++	unsigned long prio = find_first_bit(rq->queue.bitmap, SCHED_QUEUE_BITS);
++	unsigned long last_prio = rq->prio;
++	int cpu, pr;
++
++	if (prio == last_prio)
++		return;
++
++	rq->prio = prio;
++	cpu = cpu_of(rq);
++	pr = atomic_read(&sched_prio_record);
++
++	if (prio < last_prio) {
++		if (IDLE_TASK_SCHED_PRIO == last_prio) {
++#ifdef CONFIG_SCHED_SMT
++			if (static_branch_likely(&sched_smt_present))
++				cpumask_andnot(&sched_sg_idle_mask,
++					       &sched_sg_idle_mask, cpu_smt_mask(cpu));
++#endif
++			cpumask_clear_cpu(cpu, sched_idle_mask);
++			last_prio -= 2;
++		}
++		clear_recorded_preempt_mask(pr, prio, last_prio, cpu);
++
++		return;
++	}
++	/* last_prio < prio */
++	if (IDLE_TASK_SCHED_PRIO == prio) {
++#ifdef CONFIG_SCHED_SMT
++		if (static_branch_likely(&sched_smt_present) &&
++		    cpumask_intersects(cpu_smt_mask(cpu), sched_idle_mask))
++			cpumask_or(&sched_sg_idle_mask,
++				   &sched_sg_idle_mask, cpu_smt_mask(cpu));
++#endif
++		cpumask_set_cpu(cpu, sched_idle_mask);
++		prio -= 2;
++	}
++	set_recorded_preempt_mask(pr, last_prio, prio, cpu);
++}
++
++/*
++ * This routine assume that the idle task always in queue
++ */
++static inline struct task_struct *sched_rq_first_task(struct rq *rq)
++{
++	const struct list_head *head = &rq->queue.heads[sched_prio2idx(rq->prio, rq)];
++
++	return list_first_entry(head, struct task_struct, sq_node);
++}
++
++static inline struct task_struct *
++sched_rq_next_task(struct task_struct *p, struct rq *rq)
++{
++	unsigned long idx = p->sq_idx;
++	struct list_head *head = &rq->queue.heads[idx];
++
++	if (list_is_last(&p->sq_node, head)) {
++		idx = find_next_bit(rq->queue.bitmap, SCHED_QUEUE_BITS,
++				    sched_idx2prio(idx, rq) + 1);
++		head = &rq->queue.heads[sched_prio2idx(idx, rq)];
++
++		return list_first_entry(head, struct task_struct, sq_node);
++	}
++
++	return list_next_entry(p, sq_node);
++}
++
++static inline struct task_struct *rq_runnable_task(struct rq *rq)
++{
++	struct task_struct *next = sched_rq_first_task(rq);
++
++	if (unlikely(next == rq->skip))
++		next = sched_rq_next_task(next, rq);
++
++	return next;
++}
++
++/*
++ * Serialization rules:
++ *
++ * Lock order:
++ *
++ *   p->pi_lock
++ *     rq->lock
++ *       hrtimer_cpu_base->lock (hrtimer_start() for bandwidth controls)
++ *
++ *  rq1->lock
++ *    rq2->lock  where: rq1 < rq2
++ *
++ * Regular state:
++ *
++ * Normal scheduling state is serialized by rq->lock. __schedule() takes the
++ * local CPU's rq->lock, it optionally removes the task from the runqueue and
++ * always looks at the local rq data structures to find the most eligible task
++ * to run next.
++ *
++ * Task enqueue is also under rq->lock, possibly taken from another CPU.
++ * Wakeups from another LLC domain might use an IPI to transfer the enqueue to
++ * the local CPU to avoid bouncing the runqueue state around [ see
++ * ttwu_queue_wakelist() ]
++ *
++ * Task wakeup, specifically wakeups that involve migration, are horribly
++ * complicated to avoid having to take two rq->locks.
++ *
++ * Special state:
++ *
++ * System-calls and anything external will use task_rq_lock() which acquires
++ * both p->pi_lock and rq->lock. As a consequence the state they change is
++ * stable while holding either lock:
++ *
++ *  - sched_setaffinity()/
++ *    set_cpus_allowed_ptr():	p->cpus_ptr, p->nr_cpus_allowed
++ *  - set_user_nice():		p->se.load, p->*prio
++ *  - __sched_setscheduler():	p->sched_class, p->policy, p->*prio,
++ *				p->se.load, p->rt_priority,
++ *				p->dl.dl_{runtime, deadline, period, flags, bw, density}
++ *  - sched_setnuma():		p->numa_preferred_nid
++ *  - sched_move_task():        p->sched_task_group
++ *  - uclamp_update_active()	p->uclamp*
++ *
++ * p->state <- TASK_*:
++ *
++ *   is changed locklessly using set_current_state(), __set_current_state() or
++ *   set_special_state(), see their respective comments, or by
++ *   try_to_wake_up(). This latter uses p->pi_lock to serialize against
++ *   concurrent self.
++ *
++ * p->on_rq <- { 0, 1 = TASK_ON_RQ_QUEUED, 2 = TASK_ON_RQ_MIGRATING }:
++ *
++ *   is set by activate_task() and cleared by deactivate_task(), under
++ *   rq->lock. Non-zero indicates the task is runnable, the special
++ *   ON_RQ_MIGRATING state is used for migration without holding both
++ *   rq->locks. It indicates task_cpu() is not stable, see task_rq_lock().
++ *
++ * p->on_cpu <- { 0, 1 }:
++ *
++ *   is set by prepare_task() and cleared by finish_task() such that it will be
++ *   set before p is scheduled-in and cleared after p is scheduled-out, both
++ *   under rq->lock. Non-zero indicates the task is running on its CPU.
++ *
++ *   [ The astute reader will observe that it is possible for two tasks on one
++ *     CPU to have ->on_cpu = 1 at the same time. ]
++ *
++ * task_cpu(p): is changed by set_task_cpu(), the rules are:
++ *
++ *  - Don't call set_task_cpu() on a blocked task:
++ *
++ *    We don't care what CPU we're not running on, this simplifies hotplug,
++ *    the CPU assignment of blocked tasks isn't required to be valid.
++ *
++ *  - for try_to_wake_up(), called under p->pi_lock:
++ *
++ *    This allows try_to_wake_up() to only take one rq->lock, see its comment.
++ *
++ *  - for migration called under rq->lock:
++ *    [ see task_on_rq_migrating() in task_rq_lock() ]
++ *
++ *    o move_queued_task()
++ *    o detach_task()
++ *
++ *  - for migration called under double_rq_lock():
++ *
++ *    o __migrate_swap_task()
++ *    o push_rt_task() / pull_rt_task()
++ *    o push_dl_task() / pull_dl_task()
++ *    o dl_task_offline_migration()
++ *
++ */
++
++/*
++ * Context: p->pi_lock
++ */
++static inline struct rq
++*__task_access_lock(struct task_struct *p, raw_spinlock_t **plock)
++{
++	struct rq *rq;
++	for (;;) {
++		rq = task_rq(p);
++		if (p->on_cpu || task_on_rq_queued(p)) {
++			raw_spin_lock(&rq->lock);
++			if (likely((p->on_cpu || task_on_rq_queued(p))
++				   && rq == task_rq(p))) {
++				*plock = &rq->lock;
++				return rq;
++			}
++			raw_spin_unlock(&rq->lock);
++		} else if (task_on_rq_migrating(p)) {
++			do {
++				cpu_relax();
++			} while (unlikely(task_on_rq_migrating(p)));
++		} else {
++			*plock = NULL;
++			return rq;
++		}
++	}
++}
++
++static inline void
++__task_access_unlock(struct task_struct *p, raw_spinlock_t *lock)
++{
++	if (NULL != lock)
++		raw_spin_unlock(lock);
++}
++
++static inline struct rq
++*task_access_lock_irqsave(struct task_struct *p, raw_spinlock_t **plock,
++			  unsigned long *flags)
++{
++	struct rq *rq;
++	for (;;) {
++		rq = task_rq(p);
++		if (p->on_cpu || task_on_rq_queued(p)) {
++			raw_spin_lock_irqsave(&rq->lock, *flags);
++			if (likely((p->on_cpu || task_on_rq_queued(p))
++				   && rq == task_rq(p))) {
++				*plock = &rq->lock;
++				return rq;
++			}
++			raw_spin_unlock_irqrestore(&rq->lock, *flags);
++		} else if (task_on_rq_migrating(p)) {
++			do {
++				cpu_relax();
++			} while (unlikely(task_on_rq_migrating(p)));
++		} else {
++			raw_spin_lock_irqsave(&p->pi_lock, *flags);
++			if (likely(!p->on_cpu && !p->on_rq &&
++				   rq == task_rq(p))) {
++				*plock = &p->pi_lock;
++				return rq;
++			}
++			raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
++		}
++	}
++}
++
++static inline void
++task_access_unlock_irqrestore(struct task_struct *p, raw_spinlock_t *lock,
++			      unsigned long *flags)
++{
++	raw_spin_unlock_irqrestore(lock, *flags);
++}
++
++/*
++ * __task_rq_lock - lock the rq @p resides on.
++ */
++struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf)
++	__acquires(rq->lock)
++{
++	struct rq *rq;
++
++	lockdep_assert_held(&p->pi_lock);
++
++	for (;;) {
++		rq = task_rq(p);
++		raw_spin_lock(&rq->lock);
++		if (likely(rq == task_rq(p) && !task_on_rq_migrating(p)))
++			return rq;
++		raw_spin_unlock(&rq->lock);
++
++		while (unlikely(task_on_rq_migrating(p)))
++			cpu_relax();
++	}
++}
++
++/*
++ * task_rq_lock - lock p->pi_lock and lock the rq @p resides on.
++ */
++struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf)
++	__acquires(p->pi_lock)
++	__acquires(rq->lock)
++{
++	struct rq *rq;
++
++	for (;;) {
++		raw_spin_lock_irqsave(&p->pi_lock, rf->flags);
++		rq = task_rq(p);
++		raw_spin_lock(&rq->lock);
++		/*
++		 *	move_queued_task()		task_rq_lock()
++		 *
++		 *	ACQUIRE (rq->lock)
++		 *	[S] ->on_rq = MIGRATING		[L] rq = task_rq()
++		 *	WMB (__set_task_cpu())		ACQUIRE (rq->lock);
++		 *	[S] ->cpu = new_cpu		[L] task_rq()
++		 *					[L] ->on_rq
++		 *	RELEASE (rq->lock)
++		 *
++		 * If we observe the old CPU in task_rq_lock(), the acquire of
++		 * the old rq->lock will fully serialize against the stores.
++		 *
++		 * If we observe the new CPU in task_rq_lock(), the address
++		 * dependency headed by '[L] rq = task_rq()' and the acquire
++		 * will pair with the WMB to ensure we then also see migrating.
++		 */
++		if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) {
++			return rq;
++		}
++		raw_spin_unlock(&rq->lock);
++		raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags);
++
++		while (unlikely(task_on_rq_migrating(p)))
++			cpu_relax();
++	}
++}
++
++static inline void
++rq_lock_irqsave(struct rq *rq, struct rq_flags *rf)
++	__acquires(rq->lock)
++{
++	raw_spin_lock_irqsave(&rq->lock, rf->flags);
++}
++
++static inline void
++rq_unlock_irqrestore(struct rq *rq, struct rq_flags *rf)
++	__releases(rq->lock)
++{
++	raw_spin_unlock_irqrestore(&rq->lock, rf->flags);
++}
++
++void raw_spin_rq_lock_nested(struct rq *rq, int subclass)
++{
++	raw_spinlock_t *lock;
++
++	/* Matches synchronize_rcu() in __sched_core_enable() */
++	preempt_disable();
++
++	for (;;) {
++		lock = __rq_lockp(rq);
++		raw_spin_lock_nested(lock, subclass);
++		if (likely(lock == __rq_lockp(rq))) {
++			/* preempt_count *MUST* be > 1 */
++			preempt_enable_no_resched();
++			return;
++		}
++		raw_spin_unlock(lock);
++	}
++}
++
++void raw_spin_rq_unlock(struct rq *rq)
++{
++	raw_spin_unlock(rq_lockp(rq));
++}
++
++/*
++ * RQ-clock updating methods:
++ */
++
++static void update_rq_clock_task(struct rq *rq, s64 delta)
++{
++/*
++ * In theory, the compile should just see 0 here, and optimize out the call
++ * to sched_rt_avg_update. But I don't trust it...
++ */
++	s64 __maybe_unused steal = 0, irq_delta = 0;
++
++#ifdef CONFIG_IRQ_TIME_ACCOUNTING
++	irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time;
++
++	/*
++	 * Since irq_time is only updated on {soft,}irq_exit, we might run into
++	 * this case when a previous update_rq_clock() happened inside a
++	 * {soft,}irq region.
++	 *
++	 * When this happens, we stop ->clock_task and only update the
++	 * prev_irq_time stamp to account for the part that fit, so that a next
++	 * update will consume the rest. This ensures ->clock_task is
++	 * monotonic.
++	 *
++	 * It does however cause some slight miss-attribution of {soft,}irq
++	 * time, a more accurate solution would be to update the irq_time using
++	 * the current rq->clock timestamp, except that would require using
++	 * atomic ops.
++	 */
++	if (irq_delta > delta)
++		irq_delta = delta;
++
++	rq->prev_irq_time += irq_delta;
++	delta -= irq_delta;
++	delayacct_irq(rq->curr, irq_delta);
++#endif
++#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
++	if (static_key_false((&paravirt_steal_rq_enabled))) {
++		steal = paravirt_steal_clock(cpu_of(rq));
++		steal -= rq->prev_steal_time_rq;
++
++		if (unlikely(steal > delta))
++			steal = delta;
++
++		rq->prev_steal_time_rq += steal;
++		delta -= steal;
++	}
++#endif
++
++	rq->clock_task += delta;
++
++#ifdef CONFIG_HAVE_SCHED_AVG_IRQ
++	if ((irq_delta + steal))
++		update_irq_load_avg(rq, irq_delta + steal);
++#endif
++}
++
++static inline void update_rq_clock(struct rq *rq)
++{
++	s64 delta = sched_clock_cpu(cpu_of(rq)) - rq->clock;
++
++	if (unlikely(delta <= 0))
++		return;
++	rq->clock += delta;
++	update_rq_time_edge(rq);
++	update_rq_clock_task(rq, delta);
++}
++
++/*
++ * RQ Load update routine
++ */
++#define RQ_LOAD_HISTORY_BITS		(sizeof(s32) * 8ULL)
++#define RQ_UTIL_SHIFT			(8)
++#define RQ_LOAD_HISTORY_TO_UTIL(l)	(((l) >> (RQ_LOAD_HISTORY_BITS - 1 - RQ_UTIL_SHIFT)) & 0xff)
++
++#define LOAD_BLOCK(t)		((t) >> 17)
++#define LOAD_HALF_BLOCK(t)	((t) >> 16)
++#define BLOCK_MASK(t)		((t) & ((0x01 << 18) - 1))
++#define LOAD_BLOCK_BIT(b)	(1UL << (RQ_LOAD_HISTORY_BITS - 1 - (b)))
++#define CURRENT_LOAD_BIT	LOAD_BLOCK_BIT(0)
++
++static inline void rq_load_update(struct rq *rq)
++{
++	u64 time = rq->clock;
++	u64 delta = min(LOAD_BLOCK(time) - LOAD_BLOCK(rq->load_stamp),
++			RQ_LOAD_HISTORY_BITS - 1);
++	u64 prev = !!(rq->load_history & CURRENT_LOAD_BIT);
++	u64 curr = !!rq->nr_running;
++
++	if (delta) {
++		rq->load_history = rq->load_history >> delta;
++
++		if (delta < RQ_UTIL_SHIFT) {
++			rq->load_block += (~BLOCK_MASK(rq->load_stamp)) * prev;
++			if (!!LOAD_HALF_BLOCK(rq->load_block) ^ curr)
++				rq->load_history ^= LOAD_BLOCK_BIT(delta);
++		}
++
++		rq->load_block = BLOCK_MASK(time) * prev;
++	} else {
++		rq->load_block += (time - rq->load_stamp) * prev;
++	}
++	if (prev ^ curr)
++		rq->load_history ^= CURRENT_LOAD_BIT;
++	rq->load_stamp = time;
++}
++
++unsigned long rq_load_util(struct rq *rq, unsigned long max)
++{
++	return RQ_LOAD_HISTORY_TO_UTIL(rq->load_history) * (max >> RQ_UTIL_SHIFT);
++}
++
++#ifdef CONFIG_SMP
++unsigned long sched_cpu_util(int cpu)
++{
++	return rq_load_util(cpu_rq(cpu), arch_scale_cpu_capacity(cpu));
++}
++#endif /* CONFIG_SMP */
++
++#ifdef CONFIG_CPU_FREQ
++/**
++ * cpufreq_update_util - Take a note about CPU utilization changes.
++ * @rq: Runqueue to carry out the update for.
++ * @flags: Update reason flags.
++ *
++ * This function is called by the scheduler on the CPU whose utilization is
++ * being updated.
++ *
++ * It can only be called from RCU-sched read-side critical sections.
++ *
++ * The way cpufreq is currently arranged requires it to evaluate the CPU
++ * performance state (frequency/voltage) on a regular basis to prevent it from
++ * being stuck in a completely inadequate performance level for too long.
++ * That is not guaranteed to happen if the updates are only triggered from CFS
++ * and DL, though, because they may not be coming in if only RT tasks are
++ * active all the time (or there are RT tasks only).
++ *
++ * As a workaround for that issue, this function is called periodically by the
++ * RT sched class to trigger extra cpufreq updates to prevent it from stalling,
++ * but that really is a band-aid.  Going forward it should be replaced with
++ * solutions targeted more specifically at RT tasks.
++ */
++static inline void cpufreq_update_util(struct rq *rq, unsigned int flags)
++{
++	struct update_util_data *data;
++
++#ifdef CONFIG_SMP
++	rq_load_update(rq);
++#endif
++	data = rcu_dereference_sched(*per_cpu_ptr(&cpufreq_update_util_data,
++						  cpu_of(rq)));
++	if (data)
++		data->func(data, rq_clock(rq), flags);
++}
++#else
++static inline void cpufreq_update_util(struct rq *rq, unsigned int flags)
++{
++#ifdef CONFIG_SMP
++	rq_load_update(rq);
++#endif
++}
++#endif /* CONFIG_CPU_FREQ */
++
++#ifdef CONFIG_NO_HZ_FULL
++/*
++ * Tick may be needed by tasks in the runqueue depending on their policy and
++ * requirements. If tick is needed, lets send the target an IPI to kick it out
++ * of nohz mode if necessary.
++ */
++static inline void sched_update_tick_dependency(struct rq *rq)
++{
++	int cpu = cpu_of(rq);
++
++	if (!tick_nohz_full_cpu(cpu))
++		return;
++
++	if (rq->nr_running < 2)
++		tick_nohz_dep_clear_cpu(cpu, TICK_DEP_BIT_SCHED);
++	else
++		tick_nohz_dep_set_cpu(cpu, TICK_DEP_BIT_SCHED);
++}
++#else /* !CONFIG_NO_HZ_FULL */
++static inline void sched_update_tick_dependency(struct rq *rq) { }
++#endif
++
++bool sched_task_on_rq(struct task_struct *p)
++{
++	return task_on_rq_queued(p);
++}
++
++unsigned long get_wchan(struct task_struct *p)
++{
++	unsigned long ip = 0;
++	unsigned int state;
++
++	if (!p || p == current)
++		return 0;
++
++	/* Only get wchan if task is blocked and we can keep it that way. */
++	raw_spin_lock_irq(&p->pi_lock);
++	state = READ_ONCE(p->__state);
++	smp_rmb(); /* see try_to_wake_up() */
++	if (state != TASK_RUNNING && state != TASK_WAKING && !p->on_rq)
++		ip = __get_wchan(p);
++	raw_spin_unlock_irq(&p->pi_lock);
++
++	return ip;
++}
++
++/*
++ * Add/Remove/Requeue task to/from the runqueue routines
++ * Context: rq->lock
++ */
++#define __SCHED_DEQUEUE_TASK(p, rq, flags, func)				\
++	sched_info_dequeue(rq, p);						\
++										\
++	list_del(&p->sq_node);							\
++	if (list_empty(&rq->queue.heads[p->sq_idx])) { 				\
++		clear_bit(sched_idx2prio(p->sq_idx, rq), rq->queue.bitmap);	\
++		func;								\
++	}
++
++#define __SCHED_ENQUEUE_TASK(p, rq, flags)				\
++	sched_info_enqueue(rq, p);					\
++									\
++	p->sq_idx = task_sched_prio_idx(p, rq);				\
++	list_add_tail(&p->sq_node, &rq->queue.heads[p->sq_idx]);	\
++	set_bit(sched_idx2prio(p->sq_idx, rq), rq->queue.bitmap);
++
++static inline void dequeue_task(struct task_struct *p, struct rq *rq, int flags)
++{
++#ifdef ALT_SCHED_DEBUG
++	lockdep_assert_held(&rq->lock);
++
++	/*printk(KERN_INFO "sched: dequeue(%d) %px %016llx\n", cpu_of(rq), p, p->deadline);*/
++	WARN_ONCE(task_rq(p) != rq, "sched: dequeue task reside on cpu%d from cpu%d\n",
++		  task_cpu(p), cpu_of(rq));
++#endif
++
++	__SCHED_DEQUEUE_TASK(p, rq, flags, update_sched_preempt_mask(rq));
++	--rq->nr_running;
++#ifdef CONFIG_SMP
++	if (1 == rq->nr_running)
++		cpumask_clear_cpu(cpu_of(rq), &sched_rq_pending_mask);
++#endif
++
++	sched_update_tick_dependency(rq);
++}
++
++static inline void enqueue_task(struct task_struct *p, struct rq *rq, int flags)
++{
++#ifdef ALT_SCHED_DEBUG
++	lockdep_assert_held(&rq->lock);
++
++	/*printk(KERN_INFO "sched: enqueue(%d) %px %d\n", cpu_of(rq), p, p->prio);*/
++	WARN_ONCE(task_rq(p) != rq, "sched: enqueue task reside on cpu%d to cpu%d\n",
++		  task_cpu(p), cpu_of(rq));
++#endif
++
++	__SCHED_ENQUEUE_TASK(p, rq, flags);
++	update_sched_preempt_mask(rq);
++	++rq->nr_running;
++#ifdef CONFIG_SMP
++	if (2 == rq->nr_running)
++		cpumask_set_cpu(cpu_of(rq), &sched_rq_pending_mask);
++#endif
++
++	sched_update_tick_dependency(rq);
++}
++
++static inline void requeue_task(struct task_struct *p, struct rq *rq, int idx)
++{
++#ifdef ALT_SCHED_DEBUG
++	lockdep_assert_held(&rq->lock);
++	/*printk(KERN_INFO "sched: requeue(%d) %px %016llx\n", cpu_of(rq), p, p->deadline);*/
++	WARN_ONCE(task_rq(p) != rq, "sched: cpu[%d] requeue task reside on cpu%d\n",
++		  cpu_of(rq), task_cpu(p));
++#endif
++
++	list_del(&p->sq_node);
++	list_add_tail(&p->sq_node, &rq->queue.heads[idx]);
++	if (idx != p->sq_idx) {
++		if (list_empty(&rq->queue.heads[p->sq_idx]))
++			clear_bit(sched_idx2prio(p->sq_idx, rq), rq->queue.bitmap);
++		p->sq_idx = idx;
++		set_bit(sched_idx2prio(p->sq_idx, rq), rq->queue.bitmap);
++		update_sched_preempt_mask(rq);
++	}
++}
++
++/*
++ * cmpxchg based fetch_or, macro so it works for different integer types
++ */
++#define fetch_or(ptr, mask)						\
++	({								\
++		typeof(ptr) _ptr = (ptr);				\
++		typeof(mask) _mask = (mask);				\
++		typeof(*_ptr) _val = *_ptr;				\
++									\
++		do {							\
++		} while (!try_cmpxchg(_ptr, &_val, _val | _mask));	\
++	_val;								\
++})
++
++#if defined(CONFIG_SMP) && defined(TIF_POLLING_NRFLAG)
++/*
++ * Atomically set TIF_NEED_RESCHED and test for TIF_POLLING_NRFLAG,
++ * this avoids any races wrt polling state changes and thereby avoids
++ * spurious IPIs.
++ */
++static inline bool set_nr_and_not_polling(struct task_struct *p)
++{
++	struct thread_info *ti = task_thread_info(p);
++	return !(fetch_or(&ti->flags, _TIF_NEED_RESCHED) & _TIF_POLLING_NRFLAG);
++}
++
++/*
++ * Atomically set TIF_NEED_RESCHED if TIF_POLLING_NRFLAG is set.
++ *
++ * If this returns true, then the idle task promises to call
++ * sched_ttwu_pending() and reschedule soon.
++ */
++static bool set_nr_if_polling(struct task_struct *p)
++{
++	struct thread_info *ti = task_thread_info(p);
++	typeof(ti->flags) val = READ_ONCE(ti->flags);
++
++	for (;;) {
++		if (!(val & _TIF_POLLING_NRFLAG))
++			return false;
++		if (val & _TIF_NEED_RESCHED)
++			return true;
++		if (try_cmpxchg(&ti->flags, &val, val | _TIF_NEED_RESCHED))
++			break;
++	}
++	return true;
++}
++
++#else
++static inline bool set_nr_and_not_polling(struct task_struct *p)
++{
++	set_tsk_need_resched(p);
++	return true;
++}
++
++#ifdef CONFIG_SMP
++static inline bool set_nr_if_polling(struct task_struct *p)
++{
++	return false;
++}
++#endif
++#endif
++
++static bool __wake_q_add(struct wake_q_head *head, struct task_struct *task)
++{
++	struct wake_q_node *node = &task->wake_q;
++
++	/*
++	 * Atomically grab the task, if ->wake_q is !nil already it means
++	 * it's already queued (either by us or someone else) and will get the
++	 * wakeup due to that.
++	 *
++	 * In order to ensure that a pending wakeup will observe our pending
++	 * state, even in the failed case, an explicit smp_mb() must be used.
++	 */
++	smp_mb__before_atomic();
++	if (unlikely(cmpxchg_relaxed(&node->next, NULL, WAKE_Q_TAIL)))
++		return false;
++
++	/*
++	 * The head is context local, there can be no concurrency.
++	 */
++	*head->lastp = node;
++	head->lastp = &node->next;
++	return true;
++}
++
++/**
++ * wake_q_add() - queue a wakeup for 'later' waking.
++ * @head: the wake_q_head to add @task to
++ * @task: the task to queue for 'later' wakeup
++ *
++ * Queue a task for later wakeup, most likely by the wake_up_q() call in the
++ * same context, _HOWEVER_ this is not guaranteed, the wakeup can come
++ * instantly.
++ *
++ * This function must be used as-if it were wake_up_process(); IOW the task
++ * must be ready to be woken at this location.
++ */
++void wake_q_add(struct wake_q_head *head, struct task_struct *task)
++{
++	if (__wake_q_add(head, task))
++		get_task_struct(task);
++}
++
++/**
++ * wake_q_add_safe() - safely queue a wakeup for 'later' waking.
++ * @head: the wake_q_head to add @task to
++ * @task: the task to queue for 'later' wakeup
++ *
++ * Queue a task for later wakeup, most likely by the wake_up_q() call in the
++ * same context, _HOWEVER_ this is not guaranteed, the wakeup can come
++ * instantly.
++ *
++ * This function must be used as-if it were wake_up_process(); IOW the task
++ * must be ready to be woken at this location.
++ *
++ * This function is essentially a task-safe equivalent to wake_q_add(). Callers
++ * that already hold reference to @task can call the 'safe' version and trust
++ * wake_q to do the right thing depending whether or not the @task is already
++ * queued for wakeup.
++ */
++void wake_q_add_safe(struct wake_q_head *head, struct task_struct *task)
++{
++	if (!__wake_q_add(head, task))
++		put_task_struct(task);
++}
++
++void wake_up_q(struct wake_q_head *head)
++{
++	struct wake_q_node *node = head->first;
++
++	while (node != WAKE_Q_TAIL) {
++		struct task_struct *task;
++
++		task = container_of(node, struct task_struct, wake_q);
++		/* task can safely be re-inserted now: */
++		node = node->next;
++		task->wake_q.next = NULL;
++
++		/*
++		 * wake_up_process() executes a full barrier, which pairs with
++		 * the queueing in wake_q_add() so as not to miss wakeups.
++		 */
++		wake_up_process(task);
++		put_task_struct(task);
++	}
++}
++
++/*
++ * resched_curr - mark rq's current task 'to be rescheduled now'.
++ *
++ * On UP this means the setting of the need_resched flag, on SMP it
++ * might also involve a cross-CPU call to trigger the scheduler on
++ * the target CPU.
++ */
++void resched_curr(struct rq *rq)
++{
++	struct task_struct *curr = rq->curr;
++	int cpu;
++
++	lockdep_assert_held(&rq->lock);
++
++	if (test_tsk_need_resched(curr))
++		return;
++
++	cpu = cpu_of(rq);
++	if (cpu == smp_processor_id()) {
++		set_tsk_need_resched(curr);
++		set_preempt_need_resched();
++		return;
++	}
++
++	if (set_nr_and_not_polling(curr))
++		smp_send_reschedule(cpu);
++	else
++		trace_sched_wake_idle_without_ipi(cpu);
++}
++
++void resched_cpu(int cpu)
++{
++	struct rq *rq = cpu_rq(cpu);
++	unsigned long flags;
++
++	raw_spin_lock_irqsave(&rq->lock, flags);
++	if (cpu_online(cpu) || cpu == smp_processor_id())
++		resched_curr(cpu_rq(cpu));
++	raw_spin_unlock_irqrestore(&rq->lock, flags);
++}
++
++#ifdef CONFIG_SMP
++#ifdef CONFIG_NO_HZ_COMMON
++void nohz_balance_enter_idle(int cpu) {}
++
++void select_nohz_load_balancer(int stop_tick) {}
++
++void set_cpu_sd_state_idle(void) {}
++
++/*
++ * In the semi idle case, use the nearest busy CPU for migrating timers
++ * from an idle CPU.  This is good for power-savings.
++ *
++ * We don't do similar optimization for completely idle system, as
++ * selecting an idle CPU will add more delays to the timers than intended
++ * (as that CPU's timer base may not be uptodate wrt jiffies etc).
++ */
++int get_nohz_timer_target(void)
++{
++	int i, cpu = smp_processor_id(), default_cpu = -1;
++	struct cpumask *mask;
++	const struct cpumask *hk_mask;
++
++	if (housekeeping_cpu(cpu, HK_TYPE_TIMER)) {
++		if (!idle_cpu(cpu))
++			return cpu;
++		default_cpu = cpu;
++	}
++
++	hk_mask = housekeeping_cpumask(HK_TYPE_TIMER);
++
++	for (mask = per_cpu(sched_cpu_topo_masks, cpu) + 1;
++	     mask < per_cpu(sched_cpu_topo_end_mask, cpu); mask++)
++		for_each_cpu_and(i, mask, hk_mask)
++			if (!idle_cpu(i))
++				return i;
++
++	if (default_cpu == -1)
++		default_cpu = housekeeping_any_cpu(HK_TYPE_TIMER);
++	cpu = default_cpu;
++
++	return cpu;
++}
++
++/*
++ * When add_timer_on() enqueues a timer into the timer wheel of an
++ * idle CPU then this timer might expire before the next timer event
++ * which is scheduled to wake up that CPU. In case of a completely
++ * idle system the next event might even be infinite time into the
++ * future. wake_up_idle_cpu() ensures that the CPU is woken up and
++ * leaves the inner idle loop so the newly added timer is taken into
++ * account when the CPU goes back to idle and evaluates the timer
++ * wheel for the next timer event.
++ */
++static inline void wake_up_idle_cpu(int cpu)
++{
++	struct rq *rq = cpu_rq(cpu);
++
++	if (cpu == smp_processor_id())
++		return;
++
++	if (set_nr_and_not_polling(rq->idle))
++		smp_send_reschedule(cpu);
++	else
++		trace_sched_wake_idle_without_ipi(cpu);
++}
++
++static inline bool wake_up_full_nohz_cpu(int cpu)
++{
++	/*
++	 * We just need the target to call irq_exit() and re-evaluate
++	 * the next tick. The nohz full kick at least implies that.
++	 * If needed we can still optimize that later with an
++	 * empty IRQ.
++	 */
++	if (cpu_is_offline(cpu))
++		return true;  /* Don't try to wake offline CPUs. */
++	if (tick_nohz_full_cpu(cpu)) {
++		if (cpu != smp_processor_id() ||
++		    tick_nohz_tick_stopped())
++			tick_nohz_full_kick_cpu(cpu);
++		return true;
++	}
++
++	return false;
++}
++
++void wake_up_nohz_cpu(int cpu)
++{
++	if (!wake_up_full_nohz_cpu(cpu))
++		wake_up_idle_cpu(cpu);
++}
++
++static void nohz_csd_func(void *info)
++{
++	struct rq *rq = info;
++	int cpu = cpu_of(rq);
++	unsigned int flags;
++
++	/*
++	 * Release the rq::nohz_csd.
++	 */
++	flags = atomic_fetch_andnot(NOHZ_KICK_MASK, nohz_flags(cpu));
++	WARN_ON(!(flags & NOHZ_KICK_MASK));
++
++	rq->idle_balance = idle_cpu(cpu);
++	if (rq->idle_balance && !need_resched()) {
++		rq->nohz_idle_balance = flags;
++		raise_softirq_irqoff(SCHED_SOFTIRQ);
++	}
++}
++
++#endif /* CONFIG_NO_HZ_COMMON */
++#endif /* CONFIG_SMP */
++
++static inline void check_preempt_curr(struct rq *rq)
++{
++	if (sched_rq_first_task(rq) != rq->curr)
++		resched_curr(rq);
++}
++
++static __always_inline
++int __task_state_match(struct task_struct *p, unsigned int state)
++{
++	if (READ_ONCE(p->__state) & state)
++		return 1;
++
++#ifdef CONFIG_PREEMPT_RT
++	if (READ_ONCE(p->saved_state) & state)
++		return -1;
++#endif
++	return 0;
++}
++
++static __always_inline
++int task_state_match(struct task_struct *p, unsigned int state)
++{
++#ifdef CONFIG_PREEMPT_RT
++	int match;
++
++	/*
++	 * Serialize against current_save_and_set_rtlock_wait_state() and
++	 * current_restore_rtlock_saved_state().
++	 */
++	raw_spin_lock_irq(&p->pi_lock);
++	match = __task_state_match(p, state);
++	raw_spin_unlock_irq(&p->pi_lock);
++
++	return match;
++#else
++	return __task_state_match(p, state);
++#endif
++}
++
++/*
++ * wait_task_inactive - wait for a thread to unschedule.
++ *
++ * Wait for the thread to block in any of the states set in @match_state.
++ * If it changes, i.e. @p might have woken up, then return zero.  When we
++ * succeed in waiting for @p to be off its CPU, we return a positive number
++ * (its total switch count).  If a second call a short while later returns the
++ * same number, the caller can be sure that @p has remained unscheduled the
++ * whole time.
++ *
++ * The caller must ensure that the task *will* unschedule sometime soon,
++ * else this function might spin for a *long* time. This function can't
++ * be called with interrupts off, or it may introduce deadlock with
++ * smp_call_function() if an IPI is sent by the same process we are
++ * waiting to become inactive.
++ */
++unsigned long wait_task_inactive(struct task_struct *p, unsigned int match_state)
++{
++	unsigned long flags;
++	bool running, on_rq;
++	int match;
++	unsigned long ncsw;
++	struct rq *rq;
++	raw_spinlock_t *lock;
++
++	for (;;) {
++		rq = task_rq(p);
++
++		/*
++		 * If the task is actively running on another CPU
++		 * still, just relax and busy-wait without holding
++		 * any locks.
++		 *
++		 * NOTE! Since we don't hold any locks, it's not
++		 * even sure that "rq" stays as the right runqueue!
++		 * But we don't care, since this will return false
++		 * if the runqueue has changed and p is actually now
++		 * running somewhere else!
++		 */
++		while (task_on_cpu(p) && p == rq->curr) {
++			if (!task_state_match(p, match_state))
++				return 0;
++			cpu_relax();
++		}
++
++		/*
++		 * Ok, time to look more closely! We need the rq
++		 * lock now, to be *sure*. If we're wrong, we'll
++		 * just go back and repeat.
++		 */
++		task_access_lock_irqsave(p, &lock, &flags);
++		trace_sched_wait_task(p);
++		running = task_on_cpu(p);
++		on_rq = p->on_rq;
++		ncsw = 0;
++		if ((match = __task_state_match(p, match_state))) {
++			/*
++			 * When matching on p->saved_state, consider this task
++			 * still queued so it will wait.
++			 */
++			if (match < 0)
++				on_rq = 1;
++			ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
++		}
++		task_access_unlock_irqrestore(p, lock, &flags);
++
++		/*
++		 * If it changed from the expected state, bail out now.
++		 */
++		if (unlikely(!ncsw))
++			break;
++
++		/*
++		 * Was it really running after all now that we
++		 * checked with the proper locks actually held?
++		 *
++		 * Oops. Go back and try again..
++		 */
++		if (unlikely(running)) {
++			cpu_relax();
++			continue;
++		}
++
++		/*
++		 * It's not enough that it's not actively running,
++		 * it must be off the runqueue _entirely_, and not
++		 * preempted!
++		 *
++		 * So if it was still runnable (but just not actively
++		 * running right now), it's preempted, and we should
++		 * yield - it could be a while.
++		 */
++		if (unlikely(on_rq)) {
++			ktime_t to = NSEC_PER_SEC / HZ;
++
++			set_current_state(TASK_UNINTERRUPTIBLE);
++			schedule_hrtimeout(&to, HRTIMER_MODE_REL_HARD);
++			continue;
++		}
++
++		/*
++		 * Ahh, all good. It wasn't running, and it wasn't
++		 * runnable, which means that it will never become
++		 * running in the future either. We're all done!
++		 */
++		break;
++	}
++
++	return ncsw;
++}
++
++#ifdef CONFIG_SCHED_HRTICK
++/*
++ * Use HR-timers to deliver accurate preemption points.
++ */
++
++static void hrtick_clear(struct rq *rq)
++{
++	if (hrtimer_active(&rq->hrtick_timer))
++		hrtimer_cancel(&rq->hrtick_timer);
++}
++
++/*
++ * High-resolution timer tick.
++ * Runs from hardirq context with interrupts disabled.
++ */
++static enum hrtimer_restart hrtick(struct hrtimer *timer)
++{
++	struct rq *rq = container_of(timer, struct rq, hrtick_timer);
++
++	WARN_ON_ONCE(cpu_of(rq) != smp_processor_id());
++
++	raw_spin_lock(&rq->lock);
++	resched_curr(rq);
++	raw_spin_unlock(&rq->lock);
++
++	return HRTIMER_NORESTART;
++}
++
++/*
++ * Use hrtick when:
++ *  - enabled by features
++ *  - hrtimer is actually high res
++ */
++static inline int hrtick_enabled(struct rq *rq)
++{
++	/**
++	 * Alt schedule FW doesn't support sched_feat yet
++	if (!sched_feat(HRTICK))
++		return 0;
++	*/
++	if (!cpu_active(cpu_of(rq)))
++		return 0;
++	return hrtimer_is_hres_active(&rq->hrtick_timer);
++}
++
++#ifdef CONFIG_SMP
++
++static void __hrtick_restart(struct rq *rq)
++{
++	struct hrtimer *timer = &rq->hrtick_timer;
++	ktime_t time = rq->hrtick_time;
++
++	hrtimer_start(timer, time, HRTIMER_MODE_ABS_PINNED_HARD);
++}
++
++/*
++ * called from hardirq (IPI) context
++ */
++static void __hrtick_start(void *arg)
++{
++	struct rq *rq = arg;
++
++	raw_spin_lock(&rq->lock);
++	__hrtick_restart(rq);
++	raw_spin_unlock(&rq->lock);
++}
++
++/*
++ * Called to set the hrtick timer state.
++ *
++ * called with rq->lock held and irqs disabled
++ */
++void hrtick_start(struct rq *rq, u64 delay)
++{
++	struct hrtimer *timer = &rq->hrtick_timer;
++	s64 delta;
++
++	/*
++	 * Don't schedule slices shorter than 10000ns, that just
++	 * doesn't make sense and can cause timer DoS.
++	 */
++	delta = max_t(s64, delay, 10000LL);
++
++	rq->hrtick_time = ktime_add_ns(timer->base->get_time(), delta);
++
++	if (rq == this_rq())
++		__hrtick_restart(rq);
++	else
++		smp_call_function_single_async(cpu_of(rq), &rq->hrtick_csd);
++}
++
++#else
++/*
++ * Called to set the hrtick timer state.
++ *
++ * called with rq->lock held and irqs disabled
++ */
++void hrtick_start(struct rq *rq, u64 delay)
++{
++	/*
++	 * Don't schedule slices shorter than 10000ns, that just
++	 * doesn't make sense. Rely on vruntime for fairness.
++	 */
++	delay = max_t(u64, delay, 10000LL);
++	hrtimer_start(&rq->hrtick_timer, ns_to_ktime(delay),
++		      HRTIMER_MODE_REL_PINNED_HARD);
++}
++#endif /* CONFIG_SMP */
++
++static void hrtick_rq_init(struct rq *rq)
++{
++#ifdef CONFIG_SMP
++	INIT_CSD(&rq->hrtick_csd, __hrtick_start, rq);
++#endif
++
++	hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
++	rq->hrtick_timer.function = hrtick;
++}
++#else	/* CONFIG_SCHED_HRTICK */
++static inline int hrtick_enabled(struct rq *rq)
++{
++	return 0;
++}
++
++static inline void hrtick_clear(struct rq *rq)
++{
++}
++
++static inline void hrtick_rq_init(struct rq *rq)
++{
++}
++#endif	/* CONFIG_SCHED_HRTICK */
++
++static inline int __normal_prio(int policy, int rt_prio, int static_prio)
++{
++	return rt_policy(policy) ? (MAX_RT_PRIO - 1 - rt_prio) :
++		static_prio + MAX_PRIORITY_ADJ;
++}
++
++/*
++ * Calculate the expected normal priority: i.e. priority
++ * without taking RT-inheritance into account. Might be
++ * boosted by interactivity modifiers. Changes upon fork,
++ * setprio syscalls, and whenever the interactivity
++ * estimator recalculates.
++ */
++static inline int normal_prio(struct task_struct *p)
++{
++	return __normal_prio(p->policy, p->rt_priority, p->static_prio);
++}
++
++/*
++ * Calculate the current priority, i.e. the priority
++ * taken into account by the scheduler. This value might
++ * be boosted by RT tasks as it will be RT if the task got
++ * RT-boosted. If not then it returns p->normal_prio.
++ */
++static int effective_prio(struct task_struct *p)
++{
++	p->normal_prio = normal_prio(p);
++	/*
++	 * If we are RT tasks or we were boosted to RT priority,
++	 * keep the priority unchanged. Otherwise, update priority
++	 * to the normal priority:
++	 */
++	if (!rt_prio(p->prio))
++		return p->normal_prio;
++	return p->prio;
++}
++
++/*
++ * activate_task - move a task to the runqueue.
++ *
++ * Context: rq->lock
++ */
++static void activate_task(struct task_struct *p, struct rq *rq, int flags)
++{
++	enqueue_task(p, rq, flags);
++	p->on_rq = TASK_ON_RQ_QUEUED;
++
++	/*
++	 * If in_iowait is set, the code below may not trigger any cpufreq
++	 * utilization updates, so do it here explicitly with the IOWAIT flag
++	 * passed.
++	 */
++	cpufreq_update_util(rq, SCHED_CPUFREQ_IOWAIT * p->in_iowait);
++}
++
++/*
++ * deactivate_task - remove a task from the runqueue.
++ *
++ * Context: rq->lock
++ */
++static void deactivate_task(struct task_struct *p, struct rq *rq, int flags)
++{
++	p->on_rq = (flags & DEQUEUE_SLEEP) ? 0 : TASK_ON_RQ_MIGRATING;
++	dequeue_task(p, rq, flags);
++	cpufreq_update_util(rq, 0);
++}
++
++static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
++{
++#ifdef CONFIG_SMP
++	/*
++	 * After ->cpu is set up to a new value, task_access_lock(p, ...) can be
++	 * successfully executed on another CPU. We must ensure that updates of
++	 * per-task data have been completed by this moment.
++	 */
++	smp_wmb();
++
++	WRITE_ONCE(task_thread_info(p)->cpu, cpu);
++#endif
++}
++
++static inline bool is_migration_disabled(struct task_struct *p)
++{
++#ifdef CONFIG_SMP
++	return p->migration_disabled;
++#else
++	return false;
++#endif
++}
++
++#define SCA_CHECK		0x01
++#define SCA_USER		0x08
++
++#ifdef CONFIG_SMP
++
++void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
++{
++#ifdef CONFIG_SCHED_DEBUG
++	unsigned int state = READ_ONCE(p->__state);
++
++	/*
++	 * We should never call set_task_cpu() on a blocked task,
++	 * ttwu() will sort out the placement.
++	 */
++	WARN_ON_ONCE(state != TASK_RUNNING && state != TASK_WAKING && !p->on_rq);
++
++#ifdef CONFIG_LOCKDEP
++	/*
++	 * The caller should hold either p->pi_lock or rq->lock, when changing
++	 * a task's CPU. ->pi_lock for waking tasks, rq->lock for runnable tasks.
++	 *
++	 * sched_move_task() holds both and thus holding either pins the cgroup,
++	 * see task_group().
++	 */
++	WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) ||
++				      lockdep_is_held(&task_rq(p)->lock)));
++#endif
++	/*
++	 * Clearly, migrating tasks to offline CPUs is a fairly daft thing.
++	 */
++	WARN_ON_ONCE(!cpu_online(new_cpu));
++
++	WARN_ON_ONCE(is_migration_disabled(p));
++#endif
++
++	trace_sched_migrate_task(p, new_cpu);
++
++	if (task_cpu(p) != new_cpu) {
++		rseq_migrate(p);
++		sched_mm_cid_migrate_from(p);
++		perf_event_task_migrate(p);
++	}
++
++	__set_task_cpu(p, new_cpu);
++}
++
++#define MDF_FORCE_ENABLED	0x80
++
++static void
++__do_set_cpus_ptr(struct task_struct *p, const struct cpumask *new_mask)
++{
++	/*
++	 * This here violates the locking rules for affinity, since we're only
++	 * supposed to change these variables while holding both rq->lock and
++	 * p->pi_lock.
++	 *
++	 * HOWEVER, it magically works, because ttwu() is the only code that
++	 * accesses these variables under p->pi_lock and only does so after
++	 * smp_cond_load_acquire(&p->on_cpu, !VAL), and we're in __schedule()
++	 * before finish_task().
++	 *
++	 * XXX do further audits, this smells like something putrid.
++	 */
++	SCHED_WARN_ON(!p->on_cpu);
++	p->cpus_ptr = new_mask;
++}
++
++void migrate_disable(void)
++{
++	struct task_struct *p = current;
++	int cpu;
++
++	if (p->migration_disabled) {
++		p->migration_disabled++;
++		return;
++	}
++
++	preempt_disable();
++	cpu = smp_processor_id();
++	if (cpumask_test_cpu(cpu, &p->cpus_mask)) {
++		cpu_rq(cpu)->nr_pinned++;
++		p->migration_disabled = 1;
++		p->migration_flags &= ~MDF_FORCE_ENABLED;
++
++		/*
++		 * Violates locking rules! see comment in __do_set_cpus_ptr().
++		 */
++		if (p->cpus_ptr == &p->cpus_mask)
++			__do_set_cpus_ptr(p, cpumask_of(cpu));
++	}
++	preempt_enable();
++}
++EXPORT_SYMBOL_GPL(migrate_disable);
++
++void migrate_enable(void)
++{
++	struct task_struct *p = current;
++
++	if (0 == p->migration_disabled)
++		return;
++
++	if (p->migration_disabled > 1) {
++		p->migration_disabled--;
++		return;
++	}
++
++	if (WARN_ON_ONCE(!p->migration_disabled))
++		return;
++
++	/*
++	 * Ensure stop_task runs either before or after this, and that
++	 * __set_cpus_allowed_ptr(SCA_MIGRATE_ENABLE) doesn't schedule().
++	 */
++	preempt_disable();
++	/*
++	 * Assumption: current should be running on allowed cpu
++	 */
++	WARN_ON_ONCE(!cpumask_test_cpu(smp_processor_id(), &p->cpus_mask));
++	if (p->cpus_ptr != &p->cpus_mask)
++		__do_set_cpus_ptr(p, &p->cpus_mask);
++	/*
++	 * Mustn't clear migration_disabled() until cpus_ptr points back at the
++	 * regular cpus_mask, otherwise things that race (eg.
++	 * select_fallback_rq) get confused.
++	 */
++	barrier();
++	p->migration_disabled = 0;
++	this_rq()->nr_pinned--;
++	preempt_enable();
++}
++EXPORT_SYMBOL_GPL(migrate_enable);
++
++static inline bool rq_has_pinned_tasks(struct rq *rq)
++{
++	return rq->nr_pinned;
++}
++
++/*
++ * Per-CPU kthreads are allowed to run on !active && online CPUs, see
++ * __set_cpus_allowed_ptr() and select_fallback_rq().
++ */
++static inline bool is_cpu_allowed(struct task_struct *p, int cpu)
++{
++	/* When not in the task's cpumask, no point in looking further. */
++	if (!cpumask_test_cpu(cpu, p->cpus_ptr))
++		return false;
++
++	/* migrate_disabled() must be allowed to finish. */
++	if (is_migration_disabled(p))
++		return cpu_online(cpu);
++
++	/* Non kernel threads are not allowed during either online or offline. */
++	if (!(p->flags & PF_KTHREAD))
++		return cpu_active(cpu) && task_cpu_possible(cpu, p);
++
++	/* KTHREAD_IS_PER_CPU is always allowed. */
++	if (kthread_is_per_cpu(p))
++		return cpu_online(cpu);
++
++	/* Regular kernel threads don't get to stay during offline. */
++	if (cpu_dying(cpu))
++		return false;
++
++	/* But are allowed during online. */
++	return cpu_online(cpu);
++}
++
++/*
++ * This is how migration works:
++ *
++ * 1) we invoke migration_cpu_stop() on the target CPU using
++ *    stop_one_cpu().
++ * 2) stopper starts to run (implicitly forcing the migrated thread
++ *    off the CPU)
++ * 3) it checks whether the migrated task is still in the wrong runqueue.
++ * 4) if it's in the wrong runqueue then the migration thread removes
++ *    it and puts it into the right queue.
++ * 5) stopper completes and stop_one_cpu() returns and the migration
++ *    is done.
++ */
++
++/*
++ * move_queued_task - move a queued task to new rq.
++ *
++ * Returns (locked) new rq. Old rq's lock is released.
++ */
++static struct rq *move_queued_task(struct rq *rq, struct task_struct *p, int
++				   new_cpu)
++{
++	lockdep_assert_held(&rq->lock);
++
++	deactivate_task(p, rq, 0);
++	set_task_cpu(p, new_cpu);
++	raw_spin_unlock(&rq->lock);
++
++	rq = cpu_rq(new_cpu);
++
++	raw_spin_lock(&rq->lock);
++	WARN_ON_ONCE(task_cpu(p) != new_cpu);
++	sched_task_sanity_check(p, rq);
++	activate_task(p, rq, 0);
++	check_preempt_curr(rq);
++
++	return rq;
++}
++
++struct migration_arg {
++	struct task_struct *task;
++	int dest_cpu;
++};
++
++/*
++ * Move (not current) task off this CPU, onto the destination CPU. We're doing
++ * this because either it can't run here any more (set_cpus_allowed()
++ * away from this CPU, or CPU going down), or because we're
++ * attempting to rebalance this task on exec (sched_exec).
++ *
++ * So we race with normal scheduler movements, but that's OK, as long
++ * as the task is no longer on this CPU.
++ */
++static struct rq *__migrate_task(struct rq *rq, struct task_struct *p, int
++				 dest_cpu)
++{
++	/* Affinity changed (again). */
++	if (!is_cpu_allowed(p, dest_cpu))
++		return rq;
++
++	return move_queued_task(rq, p, dest_cpu);
++}
++
++/*
++ * migration_cpu_stop - this will be executed by a highprio stopper thread
++ * and performs thread migration by bumping thread off CPU then
++ * 'pushing' onto another runqueue.
++ */
++static int migration_cpu_stop(void *data)
++{
++	struct migration_arg *arg = data;
++	struct task_struct *p = arg->task;
++	struct rq *rq = this_rq();
++	unsigned long flags;
++
++	/*
++	 * The original target CPU might have gone down and we might
++	 * be on another CPU but it doesn't matter.
++	 */
++	local_irq_save(flags);
++	/*
++	 * We need to explicitly wake pending tasks before running
++	 * __migrate_task() such that we will not miss enforcing cpus_ptr
++	 * during wakeups, see set_cpus_allowed_ptr()'s TASK_WAKING test.
++	 */
++	flush_smp_call_function_queue();
++
++	raw_spin_lock(&p->pi_lock);
++	raw_spin_lock(&rq->lock);
++	/*
++	 * If task_rq(p) != rq, it cannot be migrated here, because we're
++	 * holding rq->lock, if p->on_rq == 0 it cannot get enqueued because
++	 * we're holding p->pi_lock.
++	 */
++	if (task_rq(p) == rq && task_on_rq_queued(p)) {
++		update_rq_clock(rq);
++		rq = __migrate_task(rq, p, arg->dest_cpu);
++	}
++	raw_spin_unlock(&rq->lock);
++	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
++
++	return 0;
++}
++
++static inline void
++set_cpus_allowed_common(struct task_struct *p, struct affinity_context *ctx)
++{
++	cpumask_copy(&p->cpus_mask, ctx->new_mask);
++	p->nr_cpus_allowed = cpumask_weight(ctx->new_mask);
++
++	/*
++	 * Swap in a new user_cpus_ptr if SCA_USER flag set
++	 */
++	if (ctx->flags & SCA_USER)
++		swap(p->user_cpus_ptr, ctx->user_mask);
++}
++
++static void
++__do_set_cpus_allowed(struct task_struct *p, struct affinity_context *ctx)
++{
++	lockdep_assert_held(&p->pi_lock);
++	set_cpus_allowed_common(p, ctx);
++}
++
++/*
++ * Used for kthread_bind() and select_fallback_rq(), in both cases the user
++ * affinity (if any) should be destroyed too.
++ */
++void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
++{
++	struct affinity_context ac = {
++		.new_mask  = new_mask,
++		.user_mask = NULL,
++		.flags     = SCA_USER,	/* clear the user requested mask */
++	};
++	union cpumask_rcuhead {
++		cpumask_t cpumask;
++		struct rcu_head rcu;
++	};
++
++	__do_set_cpus_allowed(p, &ac);
++
++	/*
++	 * Because this is called with p->pi_lock held, it is not possible
++	 * to use kfree() here (when PREEMPT_RT=y), therefore punt to using
++	 * kfree_rcu().
++	 */
++	kfree_rcu((union cpumask_rcuhead *)ac.user_mask, rcu);
++}
++
++static cpumask_t *alloc_user_cpus_ptr(int node)
++{
++	/*
++	 * See do_set_cpus_allowed() above for the rcu_head usage.
++	 */
++	int size = max_t(int, cpumask_size(), sizeof(struct rcu_head));
++
++	return kmalloc_node(size, GFP_KERNEL, node);
++}
++
++int dup_user_cpus_ptr(struct task_struct *dst, struct task_struct *src,
++		      int node)
++{
++	cpumask_t *user_mask;
++	unsigned long flags;
++
++	/*
++	 * Always clear dst->user_cpus_ptr first as their user_cpus_ptr's
++	 * may differ by now due to racing.
++	 */
++	dst->user_cpus_ptr = NULL;
++
++	/*
++	 * This check is racy and losing the race is a valid situation.
++	 * It is not worth the extra overhead of taking the pi_lock on
++	 * every fork/clone.
++	 */
++	if (data_race(!src->user_cpus_ptr))
++		return 0;
++
++	user_mask = alloc_user_cpus_ptr(node);
++	if (!user_mask)
++		return -ENOMEM;
++
++	/*
++	 * Use pi_lock to protect content of user_cpus_ptr
++	 *
++	 * Though unlikely, user_cpus_ptr can be reset to NULL by a concurrent
++	 * do_set_cpus_allowed().
++	 */
++	raw_spin_lock_irqsave(&src->pi_lock, flags);
++	if (src->user_cpus_ptr) {
++		swap(dst->user_cpus_ptr, user_mask);
++		cpumask_copy(dst->user_cpus_ptr, src->user_cpus_ptr);
++	}
++	raw_spin_unlock_irqrestore(&src->pi_lock, flags);
++
++	if (unlikely(user_mask))
++		kfree(user_mask);
++
++	return 0;
++}
++
++static inline struct cpumask *clear_user_cpus_ptr(struct task_struct *p)
++{
++	struct cpumask *user_mask = NULL;
++
++	swap(p->user_cpus_ptr, user_mask);
++
++	return user_mask;
++}
++
++void release_user_cpus_ptr(struct task_struct *p)
++{
++	kfree(clear_user_cpus_ptr(p));
++}
++
++#endif
++
++/**
++ * task_curr - is this task currently executing on a CPU?
++ * @p: the task in question.
++ *
++ * Return: 1 if the task is currently executing. 0 otherwise.
++ */
++inline int task_curr(const struct task_struct *p)
++{
++	return cpu_curr(task_cpu(p)) == p;
++}
++
++#ifdef CONFIG_SMP
++/***
++ * kick_process - kick a running thread to enter/exit the kernel
++ * @p: the to-be-kicked thread
++ *
++ * Cause a process which is running on another CPU to enter
++ * kernel-mode, without any delay. (to get signals handled.)
++ *
++ * NOTE: this function doesn't have to take the runqueue lock,
++ * because all it wants to ensure is that the remote task enters
++ * the kernel. If the IPI races and the task has been migrated
++ * to another CPU then no harm is done and the purpose has been
++ * achieved as well.
++ */
++void kick_process(struct task_struct *p)
++{
++	int cpu;
++
++	preempt_disable();
++	cpu = task_cpu(p);
++	if ((cpu != smp_processor_id()) && task_curr(p))
++		smp_send_reschedule(cpu);
++	preempt_enable();
++}
++EXPORT_SYMBOL_GPL(kick_process);
++
++/*
++ * ->cpus_ptr is protected by both rq->lock and p->pi_lock
++ *
++ * A few notes on cpu_active vs cpu_online:
++ *
++ *  - cpu_active must be a subset of cpu_online
++ *
++ *  - on CPU-up we allow per-CPU kthreads on the online && !active CPU,
++ *    see __set_cpus_allowed_ptr(). At this point the newly online
++ *    CPU isn't yet part of the sched domains, and balancing will not
++ *    see it.
++ *
++ *  - on cpu-down we clear cpu_active() to mask the sched domains and
++ *    avoid the load balancer to place new tasks on the to be removed
++ *    CPU. Existing tasks will remain running there and will be taken
++ *    off.
++ *
++ * This means that fallback selection must not select !active CPUs.
++ * And can assume that any active CPU must be online. Conversely
++ * select_task_rq() below may allow selection of !active CPUs in order
++ * to satisfy the above rules.
++ */
++static int select_fallback_rq(int cpu, struct task_struct *p)
++{
++	int nid = cpu_to_node(cpu);
++	const struct cpumask *nodemask = NULL;
++	enum { cpuset, possible, fail } state = cpuset;
++	int dest_cpu;
++
++	/*
++	 * If the node that the CPU is on has been offlined, cpu_to_node()
++	 * will return -1. There is no CPU on the node, and we should
++	 * select the CPU on the other node.
++	 */
++	if (nid != -1) {
++		nodemask = cpumask_of_node(nid);
++
++		/* Look for allowed, online CPU in same node. */
++		for_each_cpu(dest_cpu, nodemask) {
++			if (is_cpu_allowed(p, dest_cpu))
++				return dest_cpu;
++		}
++	}
++
++	for (;;) {
++		/* Any allowed, online CPU? */
++		for_each_cpu(dest_cpu, p->cpus_ptr) {
++			if (!is_cpu_allowed(p, dest_cpu))
++				continue;
++			goto out;
++		}
++
++		/* No more Mr. Nice Guy. */
++		switch (state) {
++		case cpuset:
++			if (cpuset_cpus_allowed_fallback(p)) {
++				state = possible;
++				break;
++			}
++			fallthrough;
++		case possible:
++			/*
++			 * XXX When called from select_task_rq() we only
++			 * hold p->pi_lock and again violate locking order.
++			 *
++			 * More yuck to audit.
++			 */
++			do_set_cpus_allowed(p, task_cpu_possible_mask(p));
++			state = fail;
++			break;
++
++		case fail:
++			BUG();
++			break;
++		}
++	}
++
++out:
++	if (state != cpuset) {
++		/*
++		 * Don't tell them about moving exiting tasks or
++		 * kernel threads (both mm NULL), since they never
++		 * leave kernel.
++		 */
++		if (p->mm && printk_ratelimit()) {
++			printk_deferred("process %d (%s) no longer affine to cpu%d\n",
++					task_pid_nr(p), p->comm, cpu);
++		}
++	}
++
++	return dest_cpu;
++}
++
++static inline void
++sched_preempt_mask_flush(cpumask_t *mask, int prio)
++{
++	int cpu;
++
++	cpumask_copy(mask, sched_idle_mask);
++
++	for_each_clear_bit(cpu, cpumask_bits(mask), nr_cpumask_bits) {
++		if (prio < cpu_rq(cpu)->prio)
++			cpumask_set_cpu(cpu, mask);
++	}
++}
++
++static inline int
++preempt_mask_check(struct task_struct *p, cpumask_t *allow_mask, cpumask_t *preempt_mask)
++{
++	int task_prio = task_sched_prio(p);
++	cpumask_t *mask = sched_preempt_mask + SCHED_QUEUE_BITS - 1 - task_prio;
++	int pr = atomic_read(&sched_prio_record);
++
++	if (pr != task_prio) {
++		sched_preempt_mask_flush(mask, task_prio);
++		atomic_set(&sched_prio_record, task_prio);
++	}
++
++	return cpumask_and(preempt_mask, allow_mask, mask);
++}
++
++static inline int select_task_rq(struct task_struct *p)
++{
++	cpumask_t allow_mask, mask;
++
++	if (unlikely(!cpumask_and(&allow_mask, p->cpus_ptr, cpu_active_mask)))
++		return select_fallback_rq(task_cpu(p), p);
++
++	if (
++#ifdef CONFIG_SCHED_SMT
++	    cpumask_and(&mask, &allow_mask, &sched_sg_idle_mask) ||
++#endif
++	    cpumask_and(&mask, &allow_mask, sched_idle_mask) ||
++	    preempt_mask_check(p, &allow_mask, &mask))
++		return best_mask_cpu(task_cpu(p), &mask);
++
++	return best_mask_cpu(task_cpu(p), &allow_mask);
++}
++
++void sched_set_stop_task(int cpu, struct task_struct *stop)
++{
++	static struct lock_class_key stop_pi_lock;
++	struct sched_param stop_param = { .sched_priority = STOP_PRIO };
++	struct sched_param start_param = { .sched_priority = 0 };
++	struct task_struct *old_stop = cpu_rq(cpu)->stop;
++
++	if (stop) {
++		/*
++		 * Make it appear like a SCHED_FIFO task, its something
++		 * userspace knows about and won't get confused about.
++		 *
++		 * Also, it will make PI more or less work without too
++		 * much confusion -- but then, stop work should not
++		 * rely on PI working anyway.
++		 */
++		sched_setscheduler_nocheck(stop, SCHED_FIFO, &stop_param);
++
++		/*
++		 * The PI code calls rt_mutex_setprio() with ->pi_lock held to
++		 * adjust the effective priority of a task. As a result,
++		 * rt_mutex_setprio() can trigger (RT) balancing operations,
++		 * which can then trigger wakeups of the stop thread to push
++		 * around the current task.
++		 *
++		 * The stop task itself will never be part of the PI-chain, it
++		 * never blocks, therefore that ->pi_lock recursion is safe.
++		 * Tell lockdep about this by placing the stop->pi_lock in its
++		 * own class.
++		 */
++		lockdep_set_class(&stop->pi_lock, &stop_pi_lock);
++	}
++
++	cpu_rq(cpu)->stop = stop;
++
++	if (old_stop) {
++		/*
++		 * Reset it back to a normal scheduling policy so that
++		 * it can die in pieces.
++		 */
++		sched_setscheduler_nocheck(old_stop, SCHED_NORMAL, &start_param);
++	}
++}
++
++static int affine_move_task(struct rq *rq, struct task_struct *p, int dest_cpu,
++			    raw_spinlock_t *lock, unsigned long irq_flags)
++	__releases(rq->lock)
++	__releases(p->pi_lock)
++{
++	/* Can the task run on the task's current CPU? If so, we're done */
++	if (!cpumask_test_cpu(task_cpu(p), &p->cpus_mask)) {
++		if (p->migration_disabled) {
++			if (likely(p->cpus_ptr != &p->cpus_mask))
++				__do_set_cpus_ptr(p, &p->cpus_mask);
++			p->migration_disabled = 0;
++			p->migration_flags |= MDF_FORCE_ENABLED;
++			/* When p is migrate_disabled, rq->lock should be held */
++			rq->nr_pinned--;
++		}
++
++		if (task_on_cpu(p) || READ_ONCE(p->__state) == TASK_WAKING) {
++			struct migration_arg arg = { p, dest_cpu };
++
++			/* Need help from migration thread: drop lock and wait. */
++			__task_access_unlock(p, lock);
++			raw_spin_unlock_irqrestore(&p->pi_lock, irq_flags);
++			stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg);
++			return 0;
++		}
++		if (task_on_rq_queued(p)) {
++			/*
++			 * OK, since we're going to drop the lock immediately
++			 * afterwards anyway.
++			 */
++			update_rq_clock(rq);
++			rq = move_queued_task(rq, p, dest_cpu);
++			lock = &rq->lock;
++		}
++	}
++	__task_access_unlock(p, lock);
++	raw_spin_unlock_irqrestore(&p->pi_lock, irq_flags);
++	return 0;
++}
++
++static int __set_cpus_allowed_ptr_locked(struct task_struct *p,
++					 struct affinity_context *ctx,
++					 struct rq *rq,
++					 raw_spinlock_t *lock,
++					 unsigned long irq_flags)
++{
++	const struct cpumask *cpu_allowed_mask = task_cpu_possible_mask(p);
++	const struct cpumask *cpu_valid_mask = cpu_active_mask;
++	bool kthread = p->flags & PF_KTHREAD;
++	int dest_cpu;
++	int ret = 0;
++
++	if (kthread || is_migration_disabled(p)) {
++		/*
++		 * Kernel threads are allowed on online && !active CPUs,
++		 * however, during cpu-hot-unplug, even these might get pushed
++		 * away if not KTHREAD_IS_PER_CPU.
++		 *
++		 * Specifically, migration_disabled() tasks must not fail the
++		 * cpumask_any_and_distribute() pick below, esp. so on
++		 * SCA_MIGRATE_ENABLE, otherwise we'll not call
++		 * set_cpus_allowed_common() and actually reset p->cpus_ptr.
++		 */
++		cpu_valid_mask = cpu_online_mask;
++	}
++
++	if (!kthread && !cpumask_subset(ctx->new_mask, cpu_allowed_mask)) {
++		ret = -EINVAL;
++		goto out;
++	}
++
++	/*
++	 * Must re-check here, to close a race against __kthread_bind(),
++	 * sched_setaffinity() is not guaranteed to observe the flag.
++	 */
++	if ((ctx->flags & SCA_CHECK) && (p->flags & PF_NO_SETAFFINITY)) {
++		ret = -EINVAL;
++		goto out;
++	}
++
++	if (cpumask_equal(&p->cpus_mask, ctx->new_mask))
++		goto out;
++
++	dest_cpu = cpumask_any_and(cpu_valid_mask, ctx->new_mask);
++	if (dest_cpu >= nr_cpu_ids) {
++		ret = -EINVAL;
++		goto out;
++	}
++
++	__do_set_cpus_allowed(p, ctx);
++
++	return affine_move_task(rq, p, dest_cpu, lock, irq_flags);
++
++out:
++	__task_access_unlock(p, lock);
++	raw_spin_unlock_irqrestore(&p->pi_lock, irq_flags);
++
++	return ret;
++}
++
++/*
++ * Change a given task's CPU affinity. Migrate the thread to a
++ * is removed from the allowed bitmask.
++ *
++ * NOTE: the caller must have a valid reference to the task, the
++ * task must not exit() & deallocate itself prematurely. The
++ * call is not atomic; no spinlocks may be held.
++ */
++static int __set_cpus_allowed_ptr(struct task_struct *p,
++				  struct affinity_context *ctx)
++{
++	unsigned long irq_flags;
++	struct rq *rq;
++	raw_spinlock_t *lock;
++
++	raw_spin_lock_irqsave(&p->pi_lock, irq_flags);
++	rq = __task_access_lock(p, &lock);
++	/*
++	 * Masking should be skipped if SCA_USER or any of the SCA_MIGRATE_*
++	 * flags are set.
++	 */
++	if (p->user_cpus_ptr &&
++	    !(ctx->flags & SCA_USER) &&
++	    cpumask_and(rq->scratch_mask, ctx->new_mask, p->user_cpus_ptr))
++		ctx->new_mask = rq->scratch_mask;
++
++
++	return __set_cpus_allowed_ptr_locked(p, ctx, rq, lock, irq_flags);
++}
++
++int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
++{
++	struct affinity_context ac = {
++		.new_mask  = new_mask,
++		.flags     = 0,
++	};
++
++	return __set_cpus_allowed_ptr(p, &ac);
++}
++EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
++
++/*
++ * Change a given task's CPU affinity to the intersection of its current
++ * affinity mask and @subset_mask, writing the resulting mask to @new_mask.
++ * If user_cpus_ptr is defined, use it as the basis for restricting CPU
++ * affinity or use cpu_online_mask instead.
++ *
++ * If the resulting mask is empty, leave the affinity unchanged and return
++ * -EINVAL.
++ */
++static int restrict_cpus_allowed_ptr(struct task_struct *p,
++				     struct cpumask *new_mask,
++				     const struct cpumask *subset_mask)
++{
++	struct affinity_context ac = {
++		.new_mask  = new_mask,
++		.flags     = 0,
++	};
++	unsigned long irq_flags;
++	raw_spinlock_t *lock;
++	struct rq *rq;
++	int err;
++
++	raw_spin_lock_irqsave(&p->pi_lock, irq_flags);
++	rq = __task_access_lock(p, &lock);
++
++	if (!cpumask_and(new_mask, task_user_cpus(p), subset_mask)) {
++		err = -EINVAL;
++		goto err_unlock;
++	}
++
++	return __set_cpus_allowed_ptr_locked(p, &ac, rq, lock, irq_flags);
++
++err_unlock:
++	__task_access_unlock(p, lock);
++	raw_spin_unlock_irqrestore(&p->pi_lock, irq_flags);
++	return err;
++}
++
++/*
++ * Restrict the CPU affinity of task @p so that it is a subset of
++ * task_cpu_possible_mask() and point @p->user_cpus_ptr to a copy of the
++ * old affinity mask. If the resulting mask is empty, we warn and walk
++ * up the cpuset hierarchy until we find a suitable mask.
++ */
++void force_compatible_cpus_allowed_ptr(struct task_struct *p)
++{
++	cpumask_var_t new_mask;
++	const struct cpumask *override_mask = task_cpu_possible_mask(p);
++
++	alloc_cpumask_var(&new_mask, GFP_KERNEL);
++
++	/*
++	 * __migrate_task() can fail silently in the face of concurrent
++	 * offlining of the chosen destination CPU, so take the hotplug
++	 * lock to ensure that the migration succeeds.
++	 */
++	cpus_read_lock();
++	if (!cpumask_available(new_mask))
++		goto out_set_mask;
++
++	if (!restrict_cpus_allowed_ptr(p, new_mask, override_mask))
++		goto out_free_mask;
++
++	/*
++	 * We failed to find a valid subset of the affinity mask for the
++	 * task, so override it based on its cpuset hierarchy.
++	 */
++	cpuset_cpus_allowed(p, new_mask);
++	override_mask = new_mask;
++
++out_set_mask:
++	if (printk_ratelimit()) {
++		printk_deferred("Overriding affinity for process %d (%s) to CPUs %*pbl\n",
++				task_pid_nr(p), p->comm,
++				cpumask_pr_args(override_mask));
++	}
++
++	WARN_ON(set_cpus_allowed_ptr(p, override_mask));
++out_free_mask:
++	cpus_read_unlock();
++	free_cpumask_var(new_mask);
++}
++
++static int
++__sched_setaffinity(struct task_struct *p, struct affinity_context *ctx);
++
++/*
++ * Restore the affinity of a task @p which was previously restricted by a
++ * call to force_compatible_cpus_allowed_ptr().
++ *
++ * It is the caller's responsibility to serialise this with any calls to
++ * force_compatible_cpus_allowed_ptr(@p).
++ */
++void relax_compatible_cpus_allowed_ptr(struct task_struct *p)
++{
++	struct affinity_context ac = {
++		.new_mask  = task_user_cpus(p),
++		.flags     = 0,
++	};
++	int ret;
++
++	/*
++	 * Try to restore the old affinity mask with __sched_setaffinity().
++	 * Cpuset masking will be done there too.
++	 */
++	ret = __sched_setaffinity(p, &ac);
++	WARN_ON_ONCE(ret);
++}
++
++#else /* CONFIG_SMP */
++
++static inline int select_task_rq(struct task_struct *p)
++{
++	return 0;
++}
++
++static inline int
++__set_cpus_allowed_ptr(struct task_struct *p,
++		       struct affinity_context *ctx)
++{
++	return set_cpus_allowed_ptr(p, ctx->new_mask);
++}
++
++static inline bool rq_has_pinned_tasks(struct rq *rq)
++{
++	return false;
++}
++
++static inline cpumask_t *alloc_user_cpus_ptr(int node)
++{
++	return NULL;
++}
++
++#endif /* !CONFIG_SMP */
++
++static void
++ttwu_stat(struct task_struct *p, int cpu, int wake_flags)
++{
++	struct rq *rq;
++
++	if (!schedstat_enabled())
++		return;
++
++	rq = this_rq();
++
++#ifdef CONFIG_SMP
++	if (cpu == rq->cpu) {
++		__schedstat_inc(rq->ttwu_local);
++		__schedstat_inc(p->stats.nr_wakeups_local);
++	} else {
++		/** Alt schedule FW ToDo:
++		 * How to do ttwu_wake_remote
++		 */
++	}
++#endif /* CONFIG_SMP */
++
++	__schedstat_inc(rq->ttwu_count);
++	__schedstat_inc(p->stats.nr_wakeups);
++}
++
++/*
++ * Mark the task runnable.
++ */
++static inline void ttwu_do_wakeup(struct task_struct *p)
++{
++	WRITE_ONCE(p->__state, TASK_RUNNING);
++	trace_sched_wakeup(p);
++}
++
++static inline void
++ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags)
++{
++	if (p->sched_contributes_to_load)
++		rq->nr_uninterruptible--;
++
++	if (
++#ifdef CONFIG_SMP
++	    !(wake_flags & WF_MIGRATED) &&
++#endif
++	    p->in_iowait) {
++		delayacct_blkio_end(p);
++		atomic_dec(&task_rq(p)->nr_iowait);
++	}
++
++	activate_task(p, rq, ENQUEUE_WAKEUP);
++	check_preempt_curr(rq);
++
++	ttwu_do_wakeup(p);
++}
++
++/*
++ * Consider @p being inside a wait loop:
++ *
++ *   for (;;) {
++ *      set_current_state(TASK_UNINTERRUPTIBLE);
++ *
++ *      if (CONDITION)
++ *         break;
++ *
++ *      schedule();
++ *   }
++ *   __set_current_state(TASK_RUNNING);
++ *
++ * between set_current_state() and schedule(). In this case @p is still
++ * runnable, so all that needs doing is change p->state back to TASK_RUNNING in
++ * an atomic manner.
++ *
++ * By taking task_rq(p)->lock we serialize against schedule(), if @p->on_rq
++ * then schedule() must still happen and p->state can be changed to
++ * TASK_RUNNING. Otherwise we lost the race, schedule() has happened, and we
++ * need to do a full wakeup with enqueue.
++ *
++ * Returns: %true when the wakeup is done,
++ *          %false otherwise.
++ */
++static int ttwu_runnable(struct task_struct *p, int wake_flags)
++{
++	struct rq *rq;
++	raw_spinlock_t *lock;
++	int ret = 0;
++
++	rq = __task_access_lock(p, &lock);
++	if (task_on_rq_queued(p)) {
++		if (!task_on_cpu(p)) {
++			/*
++			 * When on_rq && !on_cpu the task is preempted, see if
++			 * it should preempt the task that is current now.
++			 */
++			update_rq_clock(rq);
++			check_preempt_curr(rq);
++		}
++		ttwu_do_wakeup(p);
++		ret = 1;
++	}
++	__task_access_unlock(p, lock);
++
++	return ret;
++}
++
++#ifdef CONFIG_SMP
++void sched_ttwu_pending(void *arg)
++{
++	struct llist_node *llist = arg;
++	struct rq *rq = this_rq();
++	struct task_struct *p, *t;
++	struct rq_flags rf;
++
++	if (!llist)
++		return;
++
++	rq_lock_irqsave(rq, &rf);
++	update_rq_clock(rq);
++
++	llist_for_each_entry_safe(p, t, llist, wake_entry.llist) {
++		if (WARN_ON_ONCE(p->on_cpu))
++			smp_cond_load_acquire(&p->on_cpu, !VAL);
++
++		if (WARN_ON_ONCE(task_cpu(p) != cpu_of(rq)))
++			set_task_cpu(p, cpu_of(rq));
++
++		ttwu_do_activate(rq, p, p->sched_remote_wakeup ? WF_MIGRATED : 0);
++	}
++
++	/*
++	 * Must be after enqueueing at least once task such that
++	 * idle_cpu() does not observe a false-negative -- if it does,
++	 * it is possible for select_idle_siblings() to stack a number
++	 * of tasks on this CPU during that window.
++	 *
++	 * It is ok to clear ttwu_pending when another task pending.
++	 * We will receive IPI after local irq enabled and then enqueue it.
++	 * Since now nr_running > 0, idle_cpu() will always get correct result.
++	 */
++	WRITE_ONCE(rq->ttwu_pending, 0);
++	rq_unlock_irqrestore(rq, &rf);
++}
++
++/*
++ * Prepare the scene for sending an IPI for a remote smp_call
++ *
++ * Returns true if the caller can proceed with sending the IPI.
++ * Returns false otherwise.
++ */
++bool call_function_single_prep_ipi(int cpu)
++{
++	if (set_nr_if_polling(cpu_rq(cpu)->idle)) {
++		trace_sched_wake_idle_without_ipi(cpu);
++		return false;
++	}
++
++	return true;
++}
++
++/*
++ * Queue a task on the target CPUs wake_list and wake the CPU via IPI if
++ * necessary. The wakee CPU on receipt of the IPI will queue the task
++ * via sched_ttwu_wakeup() for activation so the wakee incurs the cost
++ * of the wakeup instead of the waker.
++ */
++static void __ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags)
++{
++	struct rq *rq = cpu_rq(cpu);
++
++	p->sched_remote_wakeup = !!(wake_flags & WF_MIGRATED);
++
++	WRITE_ONCE(rq->ttwu_pending, 1);
++	__smp_call_single_queue(cpu, &p->wake_entry.llist);
++}
++
++static inline bool ttwu_queue_cond(struct task_struct *p, int cpu)
++{
++	/*
++	 * Do not complicate things with the async wake_list while the CPU is
++	 * in hotplug state.
++	 */
++	if (!cpu_active(cpu))
++		return false;
++
++	/* Ensure the task will still be allowed to run on the CPU. */
++	if (!cpumask_test_cpu(cpu, p->cpus_ptr))
++		return false;
++
++	/*
++	 * If the CPU does not share cache, then queue the task on the
++	 * remote rqs wakelist to avoid accessing remote data.
++	 */
++	if (!cpus_share_cache(smp_processor_id(), cpu))
++		return true;
++
++	if (cpu == smp_processor_id())
++		return false;
++
++	/*
++	 * If the wakee cpu is idle, or the task is descheduling and the
++	 * only running task on the CPU, then use the wakelist to offload
++	 * the task activation to the idle (or soon-to-be-idle) CPU as
++	 * the current CPU is likely busy. nr_running is checked to
++	 * avoid unnecessary task stacking.
++	 *
++	 * Note that we can only get here with (wakee) p->on_rq=0,
++	 * p->on_cpu can be whatever, we've done the dequeue, so
++	 * the wakee has been accounted out of ->nr_running.
++	 */
++	if (!cpu_rq(cpu)->nr_running)
++		return true;
++
++	return false;
++}
++
++static bool ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags)
++{
++	if (__is_defined(ALT_SCHED_TTWU_QUEUE) && ttwu_queue_cond(p, cpu)) {
++		sched_clock_cpu(cpu); /* Sync clocks across CPUs */
++		__ttwu_queue_wakelist(p, cpu, wake_flags);
++		return true;
++	}
++
++	return false;
++}
++
++void wake_up_if_idle(int cpu)
++{
++	struct rq *rq = cpu_rq(cpu);
++	unsigned long flags;
++
++	rcu_read_lock();
++
++	if (!is_idle_task(rcu_dereference(rq->curr)))
++		goto out;
++
++	raw_spin_lock_irqsave(&rq->lock, flags);
++	if (is_idle_task(rq->curr))
++		resched_curr(rq);
++	/* Else CPU is not idle, do nothing here */
++	raw_spin_unlock_irqrestore(&rq->lock, flags);
++
++out:
++	rcu_read_unlock();
++}
++
++bool cpus_share_cache(int this_cpu, int that_cpu)
++{
++	if (this_cpu == that_cpu)
++		return true;
++
++	return per_cpu(sd_llc_id, this_cpu) == per_cpu(sd_llc_id, that_cpu);
++}
++#else /* !CONFIG_SMP */
++
++static inline bool ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags)
++{
++	return false;
++}
++
++#endif /* CONFIG_SMP */
++
++static inline void ttwu_queue(struct task_struct *p, int cpu, int wake_flags)
++{
++	struct rq *rq = cpu_rq(cpu);
++
++	if (ttwu_queue_wakelist(p, cpu, wake_flags))
++		return;
++
++	raw_spin_lock(&rq->lock);
++	update_rq_clock(rq);
++	ttwu_do_activate(rq, p, wake_flags);
++	raw_spin_unlock(&rq->lock);
++}
++
++/*
++ * Invoked from try_to_wake_up() to check whether the task can be woken up.
++ *
++ * The caller holds p::pi_lock if p != current or has preemption
++ * disabled when p == current.
++ *
++ * The rules of PREEMPT_RT saved_state:
++ *
++ *   The related locking code always holds p::pi_lock when updating
++ *   p::saved_state, which means the code is fully serialized in both cases.
++ *
++ *   The lock wait and lock wakeups happen via TASK_RTLOCK_WAIT. No other
++ *   bits set. This allows to distinguish all wakeup scenarios.
++ */
++static __always_inline
++bool ttwu_state_match(struct task_struct *p, unsigned int state, int *success)
++{
++	int match;
++
++	if (IS_ENABLED(CONFIG_DEBUG_PREEMPT)) {
++		WARN_ON_ONCE((state & TASK_RTLOCK_WAIT) &&
++			     state != TASK_RTLOCK_WAIT);
++	}
++
++	*success = !!(match = __task_state_match(p, state));
++
++#ifdef CONFIG_PREEMPT_RT
++	/*
++	 * Saved state preserves the task state across blocking on
++	 * an RT lock.  If the state matches, set p::saved_state to
++	 * TASK_RUNNING, but do not wake the task because it waits
++	 * for a lock wakeup. Also indicate success because from
++	 * the regular waker's point of view this has succeeded.
++	 *
++	 * After acquiring the lock the task will restore p::__state
++	 * from p::saved_state which ensures that the regular
++	 * wakeup is not lost. The restore will also set
++	 * p::saved_state to TASK_RUNNING so any further tests will
++	 * not result in false positives vs. @success
++	 */
++	if (match < 0)
++		p->saved_state = TASK_RUNNING;
++#endif
++	return match > 0;
++}
++
++/*
++ * Notes on Program-Order guarantees on SMP systems.
++ *
++ *  MIGRATION
++ *
++ * The basic program-order guarantee on SMP systems is that when a task [t]
++ * migrates, all its activity on its old CPU [c0] happens-before any subsequent
++ * execution on its new CPU [c1].
++ *
++ * For migration (of runnable tasks) this is provided by the following means:
++ *
++ *  A) UNLOCK of the rq(c0)->lock scheduling out task t
++ *  B) migration for t is required to synchronize *both* rq(c0)->lock and
++ *     rq(c1)->lock (if not at the same time, then in that order).
++ *  C) LOCK of the rq(c1)->lock scheduling in task
++ *
++ * Transitivity guarantees that B happens after A and C after B.
++ * Note: we only require RCpc transitivity.
++ * Note: the CPU doing B need not be c0 or c1
++ *
++ * Example:
++ *
++ *   CPU0            CPU1            CPU2
++ *
++ *   LOCK rq(0)->lock
++ *   sched-out X
++ *   sched-in Y
++ *   UNLOCK rq(0)->lock
++ *
++ *                                   LOCK rq(0)->lock // orders against CPU0
++ *                                   dequeue X
++ *                                   UNLOCK rq(0)->lock
++ *
++ *                                   LOCK rq(1)->lock
++ *                                   enqueue X
++ *                                   UNLOCK rq(1)->lock
++ *
++ *                   LOCK rq(1)->lock // orders against CPU2
++ *                   sched-out Z
++ *                   sched-in X
++ *                   UNLOCK rq(1)->lock
++ *
++ *
++ *  BLOCKING -- aka. SLEEP + WAKEUP
++ *
++ * For blocking we (obviously) need to provide the same guarantee as for
++ * migration. However the means are completely different as there is no lock
++ * chain to provide order. Instead we do:
++ *
++ *   1) smp_store_release(X->on_cpu, 0)   -- finish_task()
++ *   2) smp_cond_load_acquire(!X->on_cpu) -- try_to_wake_up()
++ *
++ * Example:
++ *
++ *   CPU0 (schedule)  CPU1 (try_to_wake_up) CPU2 (schedule)
++ *
++ *   LOCK rq(0)->lock LOCK X->pi_lock
++ *   dequeue X
++ *   sched-out X
++ *   smp_store_release(X->on_cpu, 0);
++ *
++ *                    smp_cond_load_acquire(&X->on_cpu, !VAL);
++ *                    X->state = WAKING
++ *                    set_task_cpu(X,2)
++ *
++ *                    LOCK rq(2)->lock
++ *                    enqueue X
++ *                    X->state = RUNNING
++ *                    UNLOCK rq(2)->lock
++ *
++ *                                          LOCK rq(2)->lock // orders against CPU1
++ *                                          sched-out Z
++ *                                          sched-in X
++ *                                          UNLOCK rq(2)->lock
++ *
++ *                    UNLOCK X->pi_lock
++ *   UNLOCK rq(0)->lock
++ *
++ *
++ * However; for wakeups there is a second guarantee we must provide, namely we
++ * must observe the state that lead to our wakeup. That is, not only must our
++ * task observe its own prior state, it must also observe the stores prior to
++ * its wakeup.
++ *
++ * This means that any means of doing remote wakeups must order the CPU doing
++ * the wakeup against the CPU the task is going to end up running on. This,
++ * however, is already required for the regular Program-Order guarantee above,
++ * since the waking CPU is the one issueing the ACQUIRE (smp_cond_load_acquire).
++ *
++ */
++
++/**
++ * try_to_wake_up - wake up a thread
++ * @p: the thread to be awakened
++ * @state: the mask of task states that can be woken
++ * @wake_flags: wake modifier flags (WF_*)
++ *
++ * Conceptually does:
++ *
++ *   If (@state & @p->state) @p->state = TASK_RUNNING.
++ *
++ * If the task was not queued/runnable, also place it back on a runqueue.
++ *
++ * This function is atomic against schedule() which would dequeue the task.
++ *
++ * It issues a full memory barrier before accessing @p->state, see the comment
++ * with set_current_state().
++ *
++ * Uses p->pi_lock to serialize against concurrent wake-ups.
++ *
++ * Relies on p->pi_lock stabilizing:
++ *  - p->sched_class
++ *  - p->cpus_ptr
++ *  - p->sched_task_group
++ * in order to do migration, see its use of select_task_rq()/set_task_cpu().
++ *
++ * Tries really hard to only take one task_rq(p)->lock for performance.
++ * Takes rq->lock in:
++ *  - ttwu_runnable()    -- old rq, unavoidable, see comment there;
++ *  - ttwu_queue()       -- new rq, for enqueue of the task;
++ *  - psi_ttwu_dequeue() -- much sadness :-( accounting will kill us.
++ *
++ * As a consequence we race really badly with just about everything. See the
++ * many memory barriers and their comments for details.
++ *
++ * Return: %true if @p->state changes (an actual wakeup was done),
++ *	   %false otherwise.
++ */
++static int try_to_wake_up(struct task_struct *p, unsigned int state,
++			  int wake_flags)
++{
++	unsigned long flags;
++	int cpu, success = 0;
++
++	preempt_disable();
++	if (p == current) {
++		/*
++		 * We're waking current, this means 'p->on_rq' and 'task_cpu(p)
++		 * == smp_processor_id()'. Together this means we can special
++		 * case the whole 'p->on_rq && ttwu_runnable()' case below
++		 * without taking any locks.
++		 *
++		 * In particular:
++		 *  - we rely on Program-Order guarantees for all the ordering,
++		 *  - we're serialized against set_special_state() by virtue of
++		 *    it disabling IRQs (this allows not taking ->pi_lock).
++		 */
++		if (!ttwu_state_match(p, state, &success))
++			goto out;
++
++		trace_sched_waking(p);
++		ttwu_do_wakeup(p);
++		goto out;
++	}
++
++	/*
++	 * If we are going to wake up a thread waiting for CONDITION we
++	 * need to ensure that CONDITION=1 done by the caller can not be
++	 * reordered with p->state check below. This pairs with smp_store_mb()
++	 * in set_current_state() that the waiting thread does.
++	 */
++	raw_spin_lock_irqsave(&p->pi_lock, flags);
++	smp_mb__after_spinlock();
++	if (!ttwu_state_match(p, state, &success))
++		goto unlock;
++
++	trace_sched_waking(p);
++
++	/*
++	 * Ensure we load p->on_rq _after_ p->state, otherwise it would
++	 * be possible to, falsely, observe p->on_rq == 0 and get stuck
++	 * in smp_cond_load_acquire() below.
++	 *
++	 * sched_ttwu_pending()			try_to_wake_up()
++	 *   STORE p->on_rq = 1			  LOAD p->state
++	 *   UNLOCK rq->lock
++	 *
++	 * __schedule() (switch to task 'p')
++	 *   LOCK rq->lock			  smp_rmb();
++	 *   smp_mb__after_spinlock();
++	 *   UNLOCK rq->lock
++	 *
++	 * [task p]
++	 *   STORE p->state = UNINTERRUPTIBLE	  LOAD p->on_rq
++	 *
++	 * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in
++	 * __schedule().  See the comment for smp_mb__after_spinlock().
++	 *
++	 * A similar smb_rmb() lives in try_invoke_on_locked_down_task().
++	 */
++	smp_rmb();
++	if (READ_ONCE(p->on_rq) && ttwu_runnable(p, wake_flags))
++		goto unlock;
++
++#ifdef CONFIG_SMP
++	/*
++	 * Ensure we load p->on_cpu _after_ p->on_rq, otherwise it would be
++	 * possible to, falsely, observe p->on_cpu == 0.
++	 *
++	 * One must be running (->on_cpu == 1) in order to remove oneself
++	 * from the runqueue.
++	 *
++	 * __schedule() (switch to task 'p')	try_to_wake_up()
++	 *   STORE p->on_cpu = 1		  LOAD p->on_rq
++	 *   UNLOCK rq->lock
++	 *
++	 * __schedule() (put 'p' to sleep)
++	 *   LOCK rq->lock			  smp_rmb();
++	 *   smp_mb__after_spinlock();
++	 *   STORE p->on_rq = 0			  LOAD p->on_cpu
++	 *
++	 * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in
++	 * __schedule().  See the comment for smp_mb__after_spinlock().
++	 *
++	 * Form a control-dep-acquire with p->on_rq == 0 above, to ensure
++	 * schedule()'s deactivate_task() has 'happened' and p will no longer
++	 * care about it's own p->state. See the comment in __schedule().
++	 */
++	smp_acquire__after_ctrl_dep();
++
++	/*
++	 * We're doing the wakeup (@success == 1), they did a dequeue (p->on_rq
++	 * == 0), which means we need to do an enqueue, change p->state to
++	 * TASK_WAKING such that we can unlock p->pi_lock before doing the
++	 * enqueue, such as ttwu_queue_wakelist().
++	 */
++	WRITE_ONCE(p->__state, TASK_WAKING);
++
++	/*
++	 * If the owning (remote) CPU is still in the middle of schedule() with
++	 * this task as prev, considering queueing p on the remote CPUs wake_list
++	 * which potentially sends an IPI instead of spinning on p->on_cpu to
++	 * let the waker make forward progress. This is safe because IRQs are
++	 * disabled and the IPI will deliver after on_cpu is cleared.
++	 *
++	 * Ensure we load task_cpu(p) after p->on_cpu:
++	 *
++	 * set_task_cpu(p, cpu);
++	 *   STORE p->cpu = @cpu
++	 * __schedule() (switch to task 'p')
++	 *   LOCK rq->lock
++	 *   smp_mb__after_spin_lock()          smp_cond_load_acquire(&p->on_cpu)
++	 *   STORE p->on_cpu = 1                LOAD p->cpu
++	 *
++	 * to ensure we observe the correct CPU on which the task is currently
++	 * scheduling.
++	 */
++	if (smp_load_acquire(&p->on_cpu) &&
++	    ttwu_queue_wakelist(p, task_cpu(p), wake_flags))
++		goto unlock;
++
++	/*
++	 * If the owning (remote) CPU is still in the middle of schedule() with
++	 * this task as prev, wait until it's done referencing the task.
++	 *
++	 * Pairs with the smp_store_release() in finish_task().
++	 *
++	 * This ensures that tasks getting woken will be fully ordered against
++	 * their previous state and preserve Program Order.
++	 */
++	smp_cond_load_acquire(&p->on_cpu, !VAL);
++
++	sched_task_ttwu(p);
++
++	cpu = select_task_rq(p);
++
++	if (cpu != task_cpu(p)) {
++		if (p->in_iowait) {
++			delayacct_blkio_end(p);
++			atomic_dec(&task_rq(p)->nr_iowait);
++		}
++
++		wake_flags |= WF_MIGRATED;
++		set_task_cpu(p, cpu);
++	}
++#else
++	cpu = task_cpu(p);
++#endif /* CONFIG_SMP */
++
++	ttwu_queue(p, cpu, wake_flags);
++unlock:
++	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
++out:
++	if (success)
++		ttwu_stat(p, task_cpu(p), wake_flags);
++	preempt_enable();
++
++	return success;
++}
++
++static bool __task_needs_rq_lock(struct task_struct *p)
++{
++	unsigned int state = READ_ONCE(p->__state);
++
++	/*
++	 * Since pi->lock blocks try_to_wake_up(), we don't need rq->lock when
++	 * the task is blocked. Make sure to check @state since ttwu() can drop
++	 * locks at the end, see ttwu_queue_wakelist().
++	 */
++	if (state == TASK_RUNNING || state == TASK_WAKING)
++		return true;
++
++	/*
++	 * Ensure we load p->on_rq after p->__state, otherwise it would be
++	 * possible to, falsely, observe p->on_rq == 0.
++	 *
++	 * See try_to_wake_up() for a longer comment.
++	 */
++	smp_rmb();
++	if (p->on_rq)
++		return true;
++
++#ifdef CONFIG_SMP
++	/*
++	 * Ensure the task has finished __schedule() and will not be referenced
++	 * anymore. Again, see try_to_wake_up() for a longer comment.
++	 */
++	smp_rmb();
++	smp_cond_load_acquire(&p->on_cpu, !VAL);
++#endif
++
++	return false;
++}
++
++/**
++ * task_call_func - Invoke a function on task in fixed state
++ * @p: Process for which the function is to be invoked, can be @current.
++ * @func: Function to invoke.
++ * @arg: Argument to function.
++ *
++ * Fix the task in it's current state by avoiding wakeups and or rq operations
++ * and call @func(@arg) on it.  This function can use ->on_rq and task_curr()
++ * to work out what the state is, if required.  Given that @func can be invoked
++ * with a runqueue lock held, it had better be quite lightweight.
++ *
++ * Returns:
++ *   Whatever @func returns
++ */
++int task_call_func(struct task_struct *p, task_call_f func, void *arg)
++{
++	struct rq *rq = NULL;
++	struct rq_flags rf;
++	int ret;
++
++	raw_spin_lock_irqsave(&p->pi_lock, rf.flags);
++
++	if (__task_needs_rq_lock(p))
++		rq = __task_rq_lock(p, &rf);
++
++	/*
++	 * At this point the task is pinned; either:
++	 *  - blocked and we're holding off wakeups      (pi->lock)
++	 *  - woken, and we're holding off enqueue       (rq->lock)
++	 *  - queued, and we're holding off schedule     (rq->lock)
++	 *  - running, and we're holding off de-schedule (rq->lock)
++	 *
++	 * The called function (@func) can use: task_curr(), p->on_rq and
++	 * p->__state to differentiate between these states.
++	 */
++	ret = func(p, arg);
++
++	if (rq)
++		__task_rq_unlock(rq, &rf);
++
++	raw_spin_unlock_irqrestore(&p->pi_lock, rf.flags);
++	return ret;
++}
++
++/**
++ * cpu_curr_snapshot - Return a snapshot of the currently running task
++ * @cpu: The CPU on which to snapshot the task.
++ *
++ * Returns the task_struct pointer of the task "currently" running on
++ * the specified CPU.  If the same task is running on that CPU throughout,
++ * the return value will be a pointer to that task's task_struct structure.
++ * If the CPU did any context switches even vaguely concurrently with the
++ * execution of this function, the return value will be a pointer to the
++ * task_struct structure of a randomly chosen task that was running on
++ * that CPU somewhere around the time that this function was executing.
++ *
++ * If the specified CPU was offline, the return value is whatever it
++ * is, perhaps a pointer to the task_struct structure of that CPU's idle
++ * task, but there is no guarantee.  Callers wishing a useful return
++ * value must take some action to ensure that the specified CPU remains
++ * online throughout.
++ *
++ * This function executes full memory barriers before and after fetching
++ * the pointer, which permits the caller to confine this function's fetch
++ * with respect to the caller's accesses to other shared variables.
++ */
++struct task_struct *cpu_curr_snapshot(int cpu)
++{
++	struct task_struct *t;
++
++	smp_mb(); /* Pairing determined by caller's synchronization design. */
++	t = rcu_dereference(cpu_curr(cpu));
++	smp_mb(); /* Pairing determined by caller's synchronization design. */
++	return t;
++}
++
++/**
++ * wake_up_process - Wake up a specific process
++ * @p: The process to be woken up.
++ *
++ * Attempt to wake up the nominated process and move it to the set of runnable
++ * processes.
++ *
++ * Return: 1 if the process was woken up, 0 if it was already running.
++ *
++ * This function executes a full memory barrier before accessing the task state.
++ */
++int wake_up_process(struct task_struct *p)
++{
++	return try_to_wake_up(p, TASK_NORMAL, 0);
++}
++EXPORT_SYMBOL(wake_up_process);
++
++int wake_up_state(struct task_struct *p, unsigned int state)
++{
++	return try_to_wake_up(p, state, 0);
++}
++
++/*
++ * Perform scheduler related setup for a newly forked process p.
++ * p is forked by current.
++ *
++ * __sched_fork() is basic setup used by init_idle() too:
++ */
++static inline void __sched_fork(unsigned long clone_flags, struct task_struct *p)
++{
++	p->on_rq			= 0;
++	p->on_cpu			= 0;
++	p->utime			= 0;
++	p->stime			= 0;
++	p->sched_time			= 0;
++
++#ifdef CONFIG_SCHEDSTATS
++	/* Even if schedstat is disabled, there should not be garbage */
++	memset(&p->stats, 0, sizeof(p->stats));
++#endif
++
++#ifdef CONFIG_PREEMPT_NOTIFIERS
++	INIT_HLIST_HEAD(&p->preempt_notifiers);
++#endif
++
++#ifdef CONFIG_COMPACTION
++	p->capture_control = NULL;
++#endif
++#ifdef CONFIG_SMP
++	p->wake_entry.u_flags = CSD_TYPE_TTWU;
++#endif
++	init_sched_mm_cid(p);
++}
++
++/*
++ * fork()/clone()-time setup:
++ */
++int sched_fork(unsigned long clone_flags, struct task_struct *p)
++{
++	__sched_fork(clone_flags, p);
++	/*
++	 * We mark the process as NEW here. This guarantees that
++	 * nobody will actually run it, and a signal or other external
++	 * event cannot wake it up and insert it on the runqueue either.
++	 */
++	p->__state = TASK_NEW;
++
++	/*
++	 * Make sure we do not leak PI boosting priority to the child.
++	 */
++	p->prio = current->normal_prio;
++
++	/*
++	 * Revert to default priority/policy on fork if requested.
++	 */
++	if (unlikely(p->sched_reset_on_fork)) {
++		if (task_has_rt_policy(p)) {
++			p->policy = SCHED_NORMAL;
++			p->static_prio = NICE_TO_PRIO(0);
++			p->rt_priority = 0;
++		} else if (PRIO_TO_NICE(p->static_prio) < 0)
++			p->static_prio = NICE_TO_PRIO(0);
++
++		p->prio = p->normal_prio = p->static_prio;
++
++		/*
++		 * We don't need the reset flag anymore after the fork. It has
++		 * fulfilled its duty:
++		 */
++		p->sched_reset_on_fork = 0;
++	}
++
++#ifdef CONFIG_SCHED_INFO
++	if (unlikely(sched_info_on()))
++		memset(&p->sched_info, 0, sizeof(p->sched_info));
++#endif
++	init_task_preempt_count(p);
++
++	return 0;
++}
++
++void sched_cgroup_fork(struct task_struct *p, struct kernel_clone_args *kargs)
++{
++	unsigned long flags;
++	struct rq *rq;
++
++	/*
++	 * Because we're not yet on the pid-hash, p->pi_lock isn't strictly
++	 * required yet, but lockdep gets upset if rules are violated.
++	 */
++	raw_spin_lock_irqsave(&p->pi_lock, flags);
++	/*
++	 * Share the timeslice between parent and child, thus the
++	 * total amount of pending timeslices in the system doesn't change,
++	 * resulting in more scheduling fairness.
++	 */
++	rq = this_rq();
++	raw_spin_lock(&rq->lock);
++
++	rq->curr->time_slice /= 2;
++	p->time_slice = rq->curr->time_slice;
++#ifdef CONFIG_SCHED_HRTICK
++	hrtick_start(rq, rq->curr->time_slice);
++#endif
++
++	if (p->time_slice < RESCHED_NS) {
++		p->time_slice = sched_timeslice_ns;
++		resched_curr(rq);
++	}
++	sched_task_fork(p, rq);
++	raw_spin_unlock(&rq->lock);
++
++	rseq_migrate(p);
++	/*
++	 * We're setting the CPU for the first time, we don't migrate,
++	 * so use __set_task_cpu().
++	 */
++	__set_task_cpu(p, smp_processor_id());
++	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
++}
++
++void sched_post_fork(struct task_struct *p)
++{
++}
++
++#ifdef CONFIG_SCHEDSTATS
++
++DEFINE_STATIC_KEY_FALSE(sched_schedstats);
++
++static void set_schedstats(bool enabled)
++{
++	if (enabled)
++		static_branch_enable(&sched_schedstats);
++	else
++		static_branch_disable(&sched_schedstats);
++}
++
++void force_schedstat_enabled(void)
++{
++	if (!schedstat_enabled()) {
++		pr_info("kernel profiling enabled schedstats, disable via kernel.sched_schedstats.\n");
++		static_branch_enable(&sched_schedstats);
++	}
++}
++
++static int __init setup_schedstats(char *str)
++{
++	int ret = 0;
++	if (!str)
++		goto out;
++
++	if (!strcmp(str, "enable")) {
++		set_schedstats(true);
++		ret = 1;
++	} else if (!strcmp(str, "disable")) {
++		set_schedstats(false);
++		ret = 1;
++	}
++out:
++	if (!ret)
++		pr_warn("Unable to parse schedstats=\n");
++
++	return ret;
++}
++__setup("schedstats=", setup_schedstats);
++
++#ifdef CONFIG_PROC_SYSCTL
++static int sysctl_schedstats(struct ctl_table *table, int write, void *buffer,
++		size_t *lenp, loff_t *ppos)
++{
++	struct ctl_table t;
++	int err;
++	int state = static_branch_likely(&sched_schedstats);
++
++	if (write && !capable(CAP_SYS_ADMIN))
++		return -EPERM;
++
++	t = *table;
++	t.data = &state;
++	err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos);
++	if (err < 0)
++		return err;
++	if (write)
++		set_schedstats(state);
++	return err;
++}
++
++static struct ctl_table sched_core_sysctls[] = {
++	{
++		.procname       = "sched_schedstats",
++		.data           = NULL,
++		.maxlen         = sizeof(unsigned int),
++		.mode           = 0644,
++		.proc_handler   = sysctl_schedstats,
++		.extra1         = SYSCTL_ZERO,
++		.extra2         = SYSCTL_ONE,
++	},
++	{}
++};
++static int __init sched_core_sysctl_init(void)
++{
++	register_sysctl_init("kernel", sched_core_sysctls);
++	return 0;
++}
++late_initcall(sched_core_sysctl_init);
++#endif /* CONFIG_PROC_SYSCTL */
++#endif /* CONFIG_SCHEDSTATS */
++
++/*
++ * wake_up_new_task - wake up a newly created task for the first time.
++ *
++ * This function will do some initial scheduler statistics housekeeping
++ * that must be done for every newly created context, then puts the task
++ * on the runqueue and wakes it.
++ */
++void wake_up_new_task(struct task_struct *p)
++{
++	unsigned long flags;
++	struct rq *rq;
++
++	raw_spin_lock_irqsave(&p->pi_lock, flags);
++	WRITE_ONCE(p->__state, TASK_RUNNING);
++	rq = cpu_rq(select_task_rq(p));
++#ifdef CONFIG_SMP
++	rseq_migrate(p);
++	/*
++	 * Fork balancing, do it here and not earlier because:
++	 * - cpus_ptr can change in the fork path
++	 * - any previously selected CPU might disappear through hotplug
++	 *
++	 * Use __set_task_cpu() to avoid calling sched_class::migrate_task_rq,
++	 * as we're not fully set-up yet.
++	 */
++	__set_task_cpu(p, cpu_of(rq));
++#endif
++
++	raw_spin_lock(&rq->lock);
++	update_rq_clock(rq);
++
++	activate_task(p, rq, flags);
++	trace_sched_wakeup_new(p);
++	check_preempt_curr(rq);
++
++	raw_spin_unlock(&rq->lock);
++	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
++}
++
++#ifdef CONFIG_PREEMPT_NOTIFIERS
++
++static DEFINE_STATIC_KEY_FALSE(preempt_notifier_key);
++
++void preempt_notifier_inc(void)
++{
++	static_branch_inc(&preempt_notifier_key);
++}
++EXPORT_SYMBOL_GPL(preempt_notifier_inc);
++
++void preempt_notifier_dec(void)
++{
++	static_branch_dec(&preempt_notifier_key);
++}
++EXPORT_SYMBOL_GPL(preempt_notifier_dec);
++
++/**
++ * preempt_notifier_register - tell me when current is being preempted & rescheduled
++ * @notifier: notifier struct to register
++ */
++void preempt_notifier_register(struct preempt_notifier *notifier)
++{
++	if (!static_branch_unlikely(&preempt_notifier_key))
++		WARN(1, "registering preempt_notifier while notifiers disabled\n");
++
++	hlist_add_head(&notifier->link, &current->preempt_notifiers);
++}
++EXPORT_SYMBOL_GPL(preempt_notifier_register);
++
++/**
++ * preempt_notifier_unregister - no longer interested in preemption notifications
++ * @notifier: notifier struct to unregister
++ *
++ * This is *not* safe to call from within a preemption notifier.
++ */
++void preempt_notifier_unregister(struct preempt_notifier *notifier)
++{
++	hlist_del(&notifier->link);
++}
++EXPORT_SYMBOL_GPL(preempt_notifier_unregister);
++
++static void __fire_sched_in_preempt_notifiers(struct task_struct *curr)
++{
++	struct preempt_notifier *notifier;
++
++	hlist_for_each_entry(notifier, &curr->preempt_notifiers, link)
++		notifier->ops->sched_in(notifier, raw_smp_processor_id());
++}
++
++static __always_inline void fire_sched_in_preempt_notifiers(struct task_struct *curr)
++{
++	if (static_branch_unlikely(&preempt_notifier_key))
++		__fire_sched_in_preempt_notifiers(curr);
++}
++
++static void
++__fire_sched_out_preempt_notifiers(struct task_struct *curr,
++				   struct task_struct *next)
++{
++	struct preempt_notifier *notifier;
++
++	hlist_for_each_entry(notifier, &curr->preempt_notifiers, link)
++		notifier->ops->sched_out(notifier, next);
++}
++
++static __always_inline void
++fire_sched_out_preempt_notifiers(struct task_struct *curr,
++				 struct task_struct *next)
++{
++	if (static_branch_unlikely(&preempt_notifier_key))
++		__fire_sched_out_preempt_notifiers(curr, next);
++}
++
++#else /* !CONFIG_PREEMPT_NOTIFIERS */
++
++static inline void fire_sched_in_preempt_notifiers(struct task_struct *curr)
++{
++}
++
++static inline void
++fire_sched_out_preempt_notifiers(struct task_struct *curr,
++				 struct task_struct *next)
++{
++}
++
++#endif /* CONFIG_PREEMPT_NOTIFIERS */
++
++static inline void prepare_task(struct task_struct *next)
++{
++	/*
++	 * Claim the task as running, we do this before switching to it
++	 * such that any running task will have this set.
++	 *
++	 * See the ttwu() WF_ON_CPU case and its ordering comment.
++	 */
++	WRITE_ONCE(next->on_cpu, 1);
++}
++
++static inline void finish_task(struct task_struct *prev)
++{
++#ifdef CONFIG_SMP
++	/*
++	 * This must be the very last reference to @prev from this CPU. After
++	 * p->on_cpu is cleared, the task can be moved to a different CPU. We
++	 * must ensure this doesn't happen until the switch is completely
++	 * finished.
++	 *
++	 * In particular, the load of prev->state in finish_task_switch() must
++	 * happen before this.
++	 *
++	 * Pairs with the smp_cond_load_acquire() in try_to_wake_up().
++	 */
++	smp_store_release(&prev->on_cpu, 0);
++#else
++	prev->on_cpu = 0;
++#endif
++}
++
++#ifdef CONFIG_SMP
++
++static void do_balance_callbacks(struct rq *rq, struct balance_callback *head)
++{
++	void (*func)(struct rq *rq);
++	struct balance_callback *next;
++
++	lockdep_assert_held(&rq->lock);
++
++	while (head) {
++		func = (void (*)(struct rq *))head->func;
++		next = head->next;
++		head->next = NULL;
++		head = next;
++
++		func(rq);
++	}
++}
++
++static void balance_push(struct rq *rq);
++
++/*
++ * balance_push_callback is a right abuse of the callback interface and plays
++ * by significantly different rules.
++ *
++ * Where the normal balance_callback's purpose is to be ran in the same context
++ * that queued it (only later, when it's safe to drop rq->lock again),
++ * balance_push_callback is specifically targeted at __schedule().
++ *
++ * This abuse is tolerated because it places all the unlikely/odd cases behind
++ * a single test, namely: rq->balance_callback == NULL.
++ */
++struct balance_callback balance_push_callback = {
++	.next = NULL,
++	.func = balance_push,
++};
++
++static inline struct balance_callback *
++__splice_balance_callbacks(struct rq *rq, bool split)
++{
++	struct balance_callback *head = rq->balance_callback;
++
++	if (likely(!head))
++		return NULL;
++
++	lockdep_assert_held(&rq->lock);
++	/*
++	 * Must not take balance_push_callback off the list when
++	 * splice_balance_callbacks() and balance_callbacks() are not
++	 * in the same rq->lock section.
++	 *
++	 * In that case it would be possible for __schedule() to interleave
++	 * and observe the list empty.
++	 */
++	if (split && head == &balance_push_callback)
++		head = NULL;
++	else
++		rq->balance_callback = NULL;
++
++	return head;
++}
++
++static inline struct balance_callback *splice_balance_callbacks(struct rq *rq)
++{
++	return __splice_balance_callbacks(rq, true);
++}
++
++static void __balance_callbacks(struct rq *rq)
++{
++	do_balance_callbacks(rq, __splice_balance_callbacks(rq, false));
++}
++
++static inline void balance_callbacks(struct rq *rq, struct balance_callback *head)
++{
++	unsigned long flags;
++
++	if (unlikely(head)) {
++		raw_spin_lock_irqsave(&rq->lock, flags);
++		do_balance_callbacks(rq, head);
++		raw_spin_unlock_irqrestore(&rq->lock, flags);
++	}
++}
++
++#else
++
++static inline void __balance_callbacks(struct rq *rq)
++{
++}
++
++static inline struct balance_callback *splice_balance_callbacks(struct rq *rq)
++{
++	return NULL;
++}
++
++static inline void balance_callbacks(struct rq *rq, struct balance_callback *head)
++{
++}
++
++#endif
++
++static inline void
++prepare_lock_switch(struct rq *rq, struct task_struct *next)
++{
++	/*
++	 * Since the runqueue lock will be released by the next
++	 * task (which is an invalid locking op but in the case
++	 * of the scheduler it's an obvious special-case), so we
++	 * do an early lockdep release here:
++	 */
++	spin_release(&rq->lock.dep_map, _THIS_IP_);
++#ifdef CONFIG_DEBUG_SPINLOCK
++	/* this is a valid case when another task releases the spinlock */
++	rq->lock.owner = next;
++#endif
++}
++
++static inline void finish_lock_switch(struct rq *rq)
++{
++	/*
++	 * If we are tracking spinlock dependencies then we have to
++	 * fix up the runqueue lock - which gets 'carried over' from
++	 * prev into current:
++	 */
++	spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_);
++	__balance_callbacks(rq);
++	raw_spin_unlock_irq(&rq->lock);
++}
++
++/*
++ * NOP if the arch has not defined these:
++ */
++
++#ifndef prepare_arch_switch
++# define prepare_arch_switch(next)	do { } while (0)
++#endif
++
++#ifndef finish_arch_post_lock_switch
++# define finish_arch_post_lock_switch()	do { } while (0)
++#endif
++
++static inline void kmap_local_sched_out(void)
++{
++#ifdef CONFIG_KMAP_LOCAL
++	if (unlikely(current->kmap_ctrl.idx))
++		__kmap_local_sched_out();
++#endif
++}
++
++static inline void kmap_local_sched_in(void)
++{
++#ifdef CONFIG_KMAP_LOCAL
++	if (unlikely(current->kmap_ctrl.idx))
++		__kmap_local_sched_in();
++#endif
++}
++
++/**
++ * prepare_task_switch - prepare to switch tasks
++ * @rq: the runqueue preparing to switch
++ * @next: the task we are going to switch to.
++ *
++ * This is called with the rq lock held and interrupts off. It must
++ * be paired with a subsequent finish_task_switch after the context
++ * switch.
++ *
++ * prepare_task_switch sets up locking and calls architecture specific
++ * hooks.
++ */
++static inline void
++prepare_task_switch(struct rq *rq, struct task_struct *prev,
++		    struct task_struct *next)
++{
++	kcov_prepare_switch(prev);
++	sched_info_switch(rq, prev, next);
++	perf_event_task_sched_out(prev, next);
++	rseq_preempt(prev);
++	fire_sched_out_preempt_notifiers(prev, next);
++	kmap_local_sched_out();
++	prepare_task(next);
++	prepare_arch_switch(next);
++}
++
++/**
++ * finish_task_switch - clean up after a task-switch
++ * @rq: runqueue associated with task-switch
++ * @prev: the thread we just switched away from.
++ *
++ * finish_task_switch must be called after the context switch, paired
++ * with a prepare_task_switch call before the context switch.
++ * finish_task_switch will reconcile locking set up by prepare_task_switch,
++ * and do any other architecture-specific cleanup actions.
++ *
++ * Note that we may have delayed dropping an mm in context_switch(). If
++ * so, we finish that here outside of the runqueue lock.  (Doing it
++ * with the lock held can cause deadlocks; see schedule() for
++ * details.)
++ *
++ * The context switch have flipped the stack from under us and restored the
++ * local variables which were saved when this task called schedule() in the
++ * past. prev == current is still correct but we need to recalculate this_rq
++ * because prev may have moved to another CPU.
++ */
++static struct rq *finish_task_switch(struct task_struct *prev)
++	__releases(rq->lock)
++{
++	struct rq *rq = this_rq();
++	struct mm_struct *mm = rq->prev_mm;
++	unsigned int prev_state;
++
++	/*
++	 * The previous task will have left us with a preempt_count of 2
++	 * because it left us after:
++	 *
++	 *	schedule()
++	 *	  preempt_disable();			// 1
++	 *	  __schedule()
++	 *	    raw_spin_lock_irq(&rq->lock)	// 2
++	 *
++	 * Also, see FORK_PREEMPT_COUNT.
++	 */
++	if (WARN_ONCE(preempt_count() != 2*PREEMPT_DISABLE_OFFSET,
++		      "corrupted preempt_count: %s/%d/0x%x\n",
++		      current->comm, current->pid, preempt_count()))
++		preempt_count_set(FORK_PREEMPT_COUNT);
++
++	rq->prev_mm = NULL;
++
++	/*
++	 * A task struct has one reference for the use as "current".
++	 * If a task dies, then it sets TASK_DEAD in tsk->state and calls
++	 * schedule one last time. The schedule call will never return, and
++	 * the scheduled task must drop that reference.
++	 *
++	 * We must observe prev->state before clearing prev->on_cpu (in
++	 * finish_task), otherwise a concurrent wakeup can get prev
++	 * running on another CPU and we could rave with its RUNNING -> DEAD
++	 * transition, resulting in a double drop.
++	 */
++	prev_state = READ_ONCE(prev->__state);
++	vtime_task_switch(prev);
++	perf_event_task_sched_in(prev, current);
++	finish_task(prev);
++	tick_nohz_task_switch();
++	finish_lock_switch(rq);
++	finish_arch_post_lock_switch();
++	kcov_finish_switch(current);
++	/*
++	 * kmap_local_sched_out() is invoked with rq::lock held and
++	 * interrupts disabled. There is no requirement for that, but the
++	 * sched out code does not have an interrupt enabled section.
++	 * Restoring the maps on sched in does not require interrupts being
++	 * disabled either.
++	 */
++	kmap_local_sched_in();
++
++	fire_sched_in_preempt_notifiers(current);
++	/*
++	 * When switching through a kernel thread, the loop in
++	 * membarrier_{private,global}_expedited() may have observed that
++	 * kernel thread and not issued an IPI. It is therefore possible to
++	 * schedule between user->kernel->user threads without passing though
++	 * switch_mm(). Membarrier requires a barrier after storing to
++	 * rq->curr, before returning to userspace, so provide them here:
++	 *
++	 * - a full memory barrier for {PRIVATE,GLOBAL}_EXPEDITED, implicitly
++	 *   provided by mmdrop(),
++	 * - a sync_core for SYNC_CORE.
++	 */
++	if (mm) {
++		membarrier_mm_sync_core_before_usermode(mm);
++		mmdrop_sched(mm);
++	}
++	if (unlikely(prev_state == TASK_DEAD)) {
++		/* Task is done with its stack. */
++		put_task_stack(prev);
++
++		put_task_struct_rcu_user(prev);
++	}
++
++	return rq;
++}
++
++/**
++ * schedule_tail - first thing a freshly forked thread must call.
++ * @prev: the thread we just switched away from.
++ */
++asmlinkage __visible void schedule_tail(struct task_struct *prev)
++	__releases(rq->lock)
++{
++	/*
++	 * New tasks start with FORK_PREEMPT_COUNT, see there and
++	 * finish_task_switch() for details.
++	 *
++	 * finish_task_switch() will drop rq->lock() and lower preempt_count
++	 * and the preempt_enable() will end up enabling preemption (on
++	 * PREEMPT_COUNT kernels).
++	 */
++
++	finish_task_switch(prev);
++	preempt_enable();
++
++	if (current->set_child_tid)
++		put_user(task_pid_vnr(current), current->set_child_tid);
++
++	calculate_sigpending();
++}
++
++/*
++ * context_switch - switch to the new MM and the new thread's register state.
++ */
++static __always_inline struct rq *
++context_switch(struct rq *rq, struct task_struct *prev,
++	       struct task_struct *next)
++{
++	prepare_task_switch(rq, prev, next);
++
++	/*
++	 * For paravirt, this is coupled with an exit in switch_to to
++	 * combine the page table reload and the switch backend into
++	 * one hypercall.
++	 */
++	arch_start_context_switch(prev);
++
++	/*
++	 * kernel -> kernel   lazy + transfer active
++	 *   user -> kernel   lazy + mmgrab() active
++	 *
++	 * kernel ->   user   switch + mmdrop() active
++	 *   user ->   user   switch
++	 *
++	 * switch_mm_cid() needs to be updated if the barriers provided
++	 * by context_switch() are modified.
++	 */
++	if (!next->mm) {                                // to kernel
++		enter_lazy_tlb(prev->active_mm, next);
++
++		next->active_mm = prev->active_mm;
++		if (prev->mm)                           // from user
++			mmgrab(prev->active_mm);
++		else
++			prev->active_mm = NULL;
++	} else {                                        // to user
++		membarrier_switch_mm(rq, prev->active_mm, next->mm);
++		/*
++		 * sys_membarrier() requires an smp_mb() between setting
++		 * rq->curr / membarrier_switch_mm() and returning to userspace.
++		 *
++		 * The below provides this either through switch_mm(), or in
++		 * case 'prev->active_mm == next->mm' through
++		 * finish_task_switch()'s mmdrop().
++		 */
++		switch_mm_irqs_off(prev->active_mm, next->mm, next);
++		lru_gen_use_mm(next->mm);
++
++		if (!prev->mm) {                        // from kernel
++			/* will mmdrop() in finish_task_switch(). */
++			rq->prev_mm = prev->active_mm;
++			prev->active_mm = NULL;
++		}
++	}
++
++	/* switch_mm_cid() requires the memory barriers above. */
++	switch_mm_cid(rq, prev, next);
++
++	prepare_lock_switch(rq, next);
++
++	/* Here we just switch the register state and the stack. */
++	switch_to(prev, next, prev);
++	barrier();
++
++	return finish_task_switch(prev);
++}
++
++/*
++ * nr_running, nr_uninterruptible and nr_context_switches:
++ *
++ * externally visible scheduler statistics: current number of runnable
++ * threads, total number of context switches performed since bootup.
++ */
++unsigned int nr_running(void)
++{
++	unsigned int i, sum = 0;
++
++	for_each_online_cpu(i)
++		sum += cpu_rq(i)->nr_running;
++
++	return sum;
++}
++
++/*
++ * Check if only the current task is running on the CPU.
++ *
++ * Caution: this function does not check that the caller has disabled
++ * preemption, thus the result might have a time-of-check-to-time-of-use
++ * race.  The caller is responsible to use it correctly, for example:
++ *
++ * - from a non-preemptible section (of course)
++ *
++ * - from a thread that is bound to a single CPU
++ *
++ * - in a loop with very short iterations (e.g. a polling loop)
++ */
++bool single_task_running(void)
++{
++	return raw_rq()->nr_running == 1;
++}
++EXPORT_SYMBOL(single_task_running);
++
++unsigned long long nr_context_switches_cpu(int cpu)
++{
++	return cpu_rq(cpu)->nr_switches;
++}
++
++unsigned long long nr_context_switches(void)
++{
++	int i;
++	unsigned long long sum = 0;
++
++	for_each_possible_cpu(i)
++		sum += cpu_rq(i)->nr_switches;
++
++	return sum;
++}
++
++/*
++ * Consumers of these two interfaces, like for example the cpuidle menu
++ * governor, are using nonsensical data. Preferring shallow idle state selection
++ * for a CPU that has IO-wait which might not even end up running the task when
++ * it does become runnable.
++ */
++
++unsigned int nr_iowait_cpu(int cpu)
++{
++	return atomic_read(&cpu_rq(cpu)->nr_iowait);
++}
++
++/*
++ * IO-wait accounting, and how it's mostly bollocks (on SMP).
++ *
++ * The idea behind IO-wait account is to account the idle time that we could
++ * have spend running if it were not for IO. That is, if we were to improve the
++ * storage performance, we'd have a proportional reduction in IO-wait time.
++ *
++ * This all works nicely on UP, where, when a task blocks on IO, we account
++ * idle time as IO-wait, because if the storage were faster, it could've been
++ * running and we'd not be idle.
++ *
++ * This has been extended to SMP, by doing the same for each CPU. This however
++ * is broken.
++ *
++ * Imagine for instance the case where two tasks block on one CPU, only the one
++ * CPU will have IO-wait accounted, while the other has regular idle. Even
++ * though, if the storage were faster, both could've ran at the same time,
++ * utilising both CPUs.
++ *
++ * This means, that when looking globally, the current IO-wait accounting on
++ * SMP is a lower bound, by reason of under accounting.
++ *
++ * Worse, since the numbers are provided per CPU, they are sometimes
++ * interpreted per CPU, and that is nonsensical. A blocked task isn't strictly
++ * associated with any one particular CPU, it can wake to another CPU than it
++ * blocked on. This means the per CPU IO-wait number is meaningless.
++ *
++ * Task CPU affinities can make all that even more 'interesting'.
++ */
++
++unsigned int nr_iowait(void)
++{
++	unsigned int i, sum = 0;
++
++	for_each_possible_cpu(i)
++		sum += nr_iowait_cpu(i);
++
++	return sum;
++}
++
++#ifdef CONFIG_SMP
++
++/*
++ * sched_exec - execve() is a valuable balancing opportunity, because at
++ * this point the task has the smallest effective memory and cache
++ * footprint.
++ */
++void sched_exec(void)
++{
++}
++
++#endif
++
++DEFINE_PER_CPU(struct kernel_stat, kstat);
++DEFINE_PER_CPU(struct kernel_cpustat, kernel_cpustat);
++
++EXPORT_PER_CPU_SYMBOL(kstat);
++EXPORT_PER_CPU_SYMBOL(kernel_cpustat);
++
++static inline void update_curr(struct rq *rq, struct task_struct *p)
++{
++	s64 ns = rq->clock_task - p->last_ran;
++
++	p->sched_time += ns;
++	cgroup_account_cputime(p, ns);
++	account_group_exec_runtime(p, ns);
++
++	p->time_slice -= ns;
++	p->last_ran = rq->clock_task;
++}
++
++/*
++ * Return accounted runtime for the task.
++ * Return separately the current's pending runtime that have not been
++ * accounted yet.
++ */
++unsigned long long task_sched_runtime(struct task_struct *p)
++{
++	unsigned long flags;
++	struct rq *rq;
++	raw_spinlock_t *lock;
++	u64 ns;
++
++#if defined(CONFIG_64BIT) && defined(CONFIG_SMP)
++	/*
++	 * 64-bit doesn't need locks to atomically read a 64-bit value.
++	 * So we have a optimization chance when the task's delta_exec is 0.
++	 * Reading ->on_cpu is racy, but this is ok.
++	 *
++	 * If we race with it leaving CPU, we'll take a lock. So we're correct.
++	 * If we race with it entering CPU, unaccounted time is 0. This is
++	 * indistinguishable from the read occurring a few cycles earlier.
++	 * If we see ->on_cpu without ->on_rq, the task is leaving, and has
++	 * been accounted, so we're correct here as well.
++	 */
++	if (!p->on_cpu || !task_on_rq_queued(p))
++		return tsk_seruntime(p);
++#endif
++
++	rq = task_access_lock_irqsave(p, &lock, &flags);
++	/*
++	 * Must be ->curr _and_ ->on_rq.  If dequeued, we would
++	 * project cycles that may never be accounted to this
++	 * thread, breaking clock_gettime().
++	 */
++	if (p == rq->curr && task_on_rq_queued(p)) {
++		update_rq_clock(rq);
++		update_curr(rq, p);
++	}
++	ns = tsk_seruntime(p);
++	task_access_unlock_irqrestore(p, lock, &flags);
++
++	return ns;
++}
++
++/* This manages tasks that have run out of timeslice during a scheduler_tick */
++static inline void scheduler_task_tick(struct rq *rq)
++{
++	struct task_struct *p = rq->curr;
++
++	if (is_idle_task(p))
++		return;
++
++	update_curr(rq, p);
++	cpufreq_update_util(rq, 0);
++
++	/*
++	 * Tasks have less than RESCHED_NS of time slice left they will be
++	 * rescheduled.
++	 */
++	if (p->time_slice >= RESCHED_NS)
++		return;
++	set_tsk_need_resched(p);
++	set_preempt_need_resched();
++}
++
++#ifdef CONFIG_SCHED_DEBUG
++static u64 cpu_resched_latency(struct rq *rq)
++{
++	int latency_warn_ms = READ_ONCE(sysctl_resched_latency_warn_ms);
++	u64 resched_latency, now = rq_clock(rq);
++	static bool warned_once;
++
++	if (sysctl_resched_latency_warn_once && warned_once)
++		return 0;
++
++	if (!need_resched() || !latency_warn_ms)
++		return 0;
++
++	if (system_state == SYSTEM_BOOTING)
++		return 0;
++
++	if (!rq->last_seen_need_resched_ns) {
++		rq->last_seen_need_resched_ns = now;
++		rq->ticks_without_resched = 0;
++		return 0;
++	}
++
++	rq->ticks_without_resched++;
++	resched_latency = now - rq->last_seen_need_resched_ns;
++	if (resched_latency <= latency_warn_ms * NSEC_PER_MSEC)
++		return 0;
++
++	warned_once = true;
++
++	return resched_latency;
++}
++
++static int __init setup_resched_latency_warn_ms(char *str)
++{
++	long val;
++
++	if ((kstrtol(str, 0, &val))) {
++		pr_warn("Unable to set resched_latency_warn_ms\n");
++		return 1;
++	}
++
++	sysctl_resched_latency_warn_ms = val;
++	return 1;
++}
++__setup("resched_latency_warn_ms=", setup_resched_latency_warn_ms);
++#else
++static inline u64 cpu_resched_latency(struct rq *rq) { return 0; }
++#endif /* CONFIG_SCHED_DEBUG */
++
++/*
++ * This function gets called by the timer code, with HZ frequency.
++ * We call it with interrupts disabled.
++ */
++void scheduler_tick(void)
++{
++	int cpu __maybe_unused = smp_processor_id();
++	struct rq *rq = cpu_rq(cpu);
++	struct task_struct *curr = rq->curr;
++	u64 resched_latency;
++
++	if (housekeeping_cpu(cpu, HK_TYPE_TICK))
++		arch_scale_freq_tick();
++
++	sched_clock_tick();
++
++	raw_spin_lock(&rq->lock);
++	update_rq_clock(rq);
++
++	scheduler_task_tick(rq);
++	if (sched_feat(LATENCY_WARN))
++		resched_latency = cpu_resched_latency(rq);
++	calc_global_load_tick(rq);
++	task_tick_mm_cid(rq, curr);
++
++	rq->last_tick = rq->clock;
++	raw_spin_unlock(&rq->lock);
++
++	if (sched_feat(LATENCY_WARN) && resched_latency)
++		resched_latency_warn(cpu, resched_latency);
++
++	perf_event_task_tick();
++
++	if (curr->flags & PF_WQ_WORKER)
++		wq_worker_tick(curr);
++}
++
++#ifdef CONFIG_SCHED_SMT
++static inline int sg_balance_cpu_stop(void *data)
++{
++	struct rq *rq = this_rq();
++	struct task_struct *p = data;
++	cpumask_t tmp;
++	unsigned long flags;
++
++	local_irq_save(flags);
++
++	raw_spin_lock(&p->pi_lock);
++	raw_spin_lock(&rq->lock);
++
++	rq->active_balance = 0;
++	/* _something_ may have changed the task, double check again */
++	if (task_on_rq_queued(p) && task_rq(p) == rq &&
++	    cpumask_and(&tmp, p->cpus_ptr, &sched_sg_idle_mask) &&
++	    !is_migration_disabled(p)) {
++		int cpu = cpu_of(rq);
++		int dcpu = __best_mask_cpu(&tmp, per_cpu(sched_cpu_llc_mask, cpu));
++		rq = move_queued_task(rq, p, dcpu);
++	}
++
++	raw_spin_unlock(&rq->lock);
++	raw_spin_unlock(&p->pi_lock);
++
++	local_irq_restore(flags);
++
++	return 0;
++}
++
++/* sg_balance_trigger - trigger slibing group balance for @cpu */
++static inline int sg_balance_trigger(const int cpu)
++{
++	struct rq *rq= cpu_rq(cpu);
++	unsigned long flags;
++	struct task_struct *curr;
++	int res;
++
++	if (!raw_spin_trylock_irqsave(&rq->lock, flags))
++		return 0;
++	curr = rq->curr;
++	res = (!is_idle_task(curr)) && (1 == rq->nr_running) &&\
++	      cpumask_intersects(curr->cpus_ptr, &sched_sg_idle_mask) &&\
++	      !is_migration_disabled(curr) && (!rq->active_balance);
++
++	if (res)
++		rq->active_balance = 1;
++
++	raw_spin_unlock_irqrestore(&rq->lock, flags);
++
++	if (res)
++		stop_one_cpu_nowait(cpu, sg_balance_cpu_stop, curr,
++				    &rq->active_balance_work);
++	return res;
++}
++
++/*
++ * sg_balance - slibing group balance check for run queue @rq
++ */
++static inline void sg_balance(struct rq *rq, int cpu)
++{
++	cpumask_t chk;
++
++	/* exit when cpu is offline */
++	if (unlikely(!rq->online))
++		return;
++
++	/*
++	 * Only cpu in slibing idle group will do the checking and then
++	 * find potential cpus which can migrate the current running task
++	 */
++	if (cpumask_test_cpu(cpu, &sched_sg_idle_mask) &&
++	    cpumask_andnot(&chk, cpu_online_mask, sched_idle_mask) &&
++	    cpumask_andnot(&chk, &chk, &sched_rq_pending_mask)) {
++		int i;
++
++		for_each_cpu_wrap(i, &chk, cpu) {
++			if (!cpumask_intersects(cpu_smt_mask(i), sched_idle_mask) &&\
++			    sg_balance_trigger(i))
++				return;
++		}
++	}
++}
++#endif /* CONFIG_SCHED_SMT */
++
++#ifdef CONFIG_NO_HZ_FULL
++
++struct tick_work {
++	int			cpu;
++	atomic_t		state;
++	struct delayed_work	work;
++};
++/* Values for ->state, see diagram below. */
++#define TICK_SCHED_REMOTE_OFFLINE	0
++#define TICK_SCHED_REMOTE_OFFLINING	1
++#define TICK_SCHED_REMOTE_RUNNING	2
++
++/*
++ * State diagram for ->state:
++ *
++ *
++ *          TICK_SCHED_REMOTE_OFFLINE
++ *                    |   ^
++ *                    |   |
++ *                    |   | sched_tick_remote()
++ *                    |   |
++ *                    |   |
++ *                    +--TICK_SCHED_REMOTE_OFFLINING
++ *                    |   ^
++ *                    |   |
++ * sched_tick_start() |   | sched_tick_stop()
++ *                    |   |
++ *                    V   |
++ *          TICK_SCHED_REMOTE_RUNNING
++ *
++ *
++ * Other transitions get WARN_ON_ONCE(), except that sched_tick_remote()
++ * and sched_tick_start() are happy to leave the state in RUNNING.
++ */
++
++static struct tick_work __percpu *tick_work_cpu;
++
++static void sched_tick_remote(struct work_struct *work)
++{
++	struct delayed_work *dwork = to_delayed_work(work);
++	struct tick_work *twork = container_of(dwork, struct tick_work, work);
++	int cpu = twork->cpu;
++	struct rq *rq = cpu_rq(cpu);
++	struct task_struct *curr;
++	unsigned long flags;
++	u64 delta;
++	int os;
++
++	/*
++	 * Handle the tick only if it appears the remote CPU is running in full
++	 * dynticks mode. The check is racy by nature, but missing a tick or
++	 * having one too much is no big deal because the scheduler tick updates
++	 * statistics and checks timeslices in a time-independent way, regardless
++	 * of when exactly it is running.
++	 */
++	if (!tick_nohz_tick_stopped_cpu(cpu))
++		goto out_requeue;
++
++	raw_spin_lock_irqsave(&rq->lock, flags);
++	curr = rq->curr;
++	if (cpu_is_offline(cpu))
++		goto out_unlock;
++
++	update_rq_clock(rq);
++	if (!is_idle_task(curr)) {
++		/*
++		 * Make sure the next tick runs within a reasonable
++		 * amount of time.
++		 */
++		delta = rq_clock_task(rq) - curr->last_ran;
++		WARN_ON_ONCE(delta > (u64)NSEC_PER_SEC * 3);
++	}
++	scheduler_task_tick(rq);
++
++	calc_load_nohz_remote(rq);
++out_unlock:
++	raw_spin_unlock_irqrestore(&rq->lock, flags);
++
++out_requeue:
++	/*
++	 * Run the remote tick once per second (1Hz). This arbitrary
++	 * frequency is large enough to avoid overload but short enough
++	 * to keep scheduler internal stats reasonably up to date.  But
++	 * first update state to reflect hotplug activity if required.
++	 */
++	os = atomic_fetch_add_unless(&twork->state, -1, TICK_SCHED_REMOTE_RUNNING);
++	WARN_ON_ONCE(os == TICK_SCHED_REMOTE_OFFLINE);
++	if (os == TICK_SCHED_REMOTE_RUNNING)
++		queue_delayed_work(system_unbound_wq, dwork, HZ);
++}
++
++static void sched_tick_start(int cpu)
++{
++	int os;
++	struct tick_work *twork;
++
++	if (housekeeping_cpu(cpu, HK_TYPE_TICK))
++		return;
++
++	WARN_ON_ONCE(!tick_work_cpu);
++
++	twork = per_cpu_ptr(tick_work_cpu, cpu);
++	os = atomic_xchg(&twork->state, TICK_SCHED_REMOTE_RUNNING);
++	WARN_ON_ONCE(os == TICK_SCHED_REMOTE_RUNNING);
++	if (os == TICK_SCHED_REMOTE_OFFLINE) {
++		twork->cpu = cpu;
++		INIT_DELAYED_WORK(&twork->work, sched_tick_remote);
++		queue_delayed_work(system_unbound_wq, &twork->work, HZ);
++	}
++}
++
++#ifdef CONFIG_HOTPLUG_CPU
++static void sched_tick_stop(int cpu)
++{
++	struct tick_work *twork;
++	int os;
++
++	if (housekeeping_cpu(cpu, HK_TYPE_TICK))
++		return;
++
++	WARN_ON_ONCE(!tick_work_cpu);
++
++	twork = per_cpu_ptr(tick_work_cpu, cpu);
++	/* There cannot be competing actions, but don't rely on stop-machine. */
++	os = atomic_xchg(&twork->state, TICK_SCHED_REMOTE_OFFLINING);
++	WARN_ON_ONCE(os != TICK_SCHED_REMOTE_RUNNING);
++	/* Don't cancel, as this would mess up the state machine. */
++}
++#endif /* CONFIG_HOTPLUG_CPU */
++
++int __init sched_tick_offload_init(void)
++{
++	tick_work_cpu = alloc_percpu(struct tick_work);
++	WARN_ON_ONCE(!tick_work_cpu);
++	return 0;
++}
++
++#else /* !CONFIG_NO_HZ_FULL */
++static inline void sched_tick_start(int cpu) { }
++static inline void sched_tick_stop(int cpu) { }
++#endif
++
++#if defined(CONFIG_PREEMPTION) && (defined(CONFIG_DEBUG_PREEMPT) || \
++				defined(CONFIG_PREEMPT_TRACER))
++/*
++ * If the value passed in is equal to the current preempt count
++ * then we just disabled preemption. Start timing the latency.
++ */
++static inline void preempt_latency_start(int val)
++{
++	if (preempt_count() == val) {
++		unsigned long ip = get_lock_parent_ip();
++#ifdef CONFIG_DEBUG_PREEMPT
++		current->preempt_disable_ip = ip;
++#endif
++		trace_preempt_off(CALLER_ADDR0, ip);
++	}
++}
++
++void preempt_count_add(int val)
++{
++#ifdef CONFIG_DEBUG_PREEMPT
++	/*
++	 * Underflow?
++	 */
++	if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0)))
++		return;
++#endif
++	__preempt_count_add(val);
++#ifdef CONFIG_DEBUG_PREEMPT
++	/*
++	 * Spinlock count overflowing soon?
++	 */
++	DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >=
++				PREEMPT_MASK - 10);
++#endif
++	preempt_latency_start(val);
++}
++EXPORT_SYMBOL(preempt_count_add);
++NOKPROBE_SYMBOL(preempt_count_add);
++
++/*
++ * If the value passed in equals to the current preempt count
++ * then we just enabled preemption. Stop timing the latency.
++ */
++static inline void preempt_latency_stop(int val)
++{
++	if (preempt_count() == val)
++		trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip());
++}
++
++void preempt_count_sub(int val)
++{
++#ifdef CONFIG_DEBUG_PREEMPT
++	/*
++	 * Underflow?
++	 */
++	if (DEBUG_LOCKS_WARN_ON(val > preempt_count()))
++		return;
++	/*
++	 * Is the spinlock portion underflowing?
++	 */
++	if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) &&
++			!(preempt_count() & PREEMPT_MASK)))
++		return;
++#endif
++
++	preempt_latency_stop(val);
++	__preempt_count_sub(val);
++}
++EXPORT_SYMBOL(preempt_count_sub);
++NOKPROBE_SYMBOL(preempt_count_sub);
++
++#else
++static inline void preempt_latency_start(int val) { }
++static inline void preempt_latency_stop(int val) { }
++#endif
++
++static inline unsigned long get_preempt_disable_ip(struct task_struct *p)
++{
++#ifdef CONFIG_DEBUG_PREEMPT
++	return p->preempt_disable_ip;
++#else
++	return 0;
++#endif
++}
++
++/*
++ * Print scheduling while atomic bug:
++ */
++static noinline void __schedule_bug(struct task_struct *prev)
++{
++	/* Save this before calling printk(), since that will clobber it */
++	unsigned long preempt_disable_ip = get_preempt_disable_ip(current);
++
++	if (oops_in_progress)
++		return;
++
++	printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n",
++		prev->comm, prev->pid, preempt_count());
++
++	debug_show_held_locks(prev);
++	print_modules();
++	if (irqs_disabled())
++		print_irqtrace_events(prev);
++	if (IS_ENABLED(CONFIG_DEBUG_PREEMPT)
++	    && in_atomic_preempt_off()) {
++		pr_err("Preemption disabled at:");
++		print_ip_sym(KERN_ERR, preempt_disable_ip);
++	}
++	check_panic_on_warn("scheduling while atomic");
++
++	dump_stack();
++	add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
++}
++
++/*
++ * Various schedule()-time debugging checks and statistics:
++ */
++static inline void schedule_debug(struct task_struct *prev, bool preempt)
++{
++#ifdef CONFIG_SCHED_STACK_END_CHECK
++	if (task_stack_end_corrupted(prev))
++		panic("corrupted stack end detected inside scheduler\n");
++
++	if (task_scs_end_corrupted(prev))
++		panic("corrupted shadow stack detected inside scheduler\n");
++#endif
++
++#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
++	if (!preempt && READ_ONCE(prev->__state) && prev->non_block_count) {
++		printk(KERN_ERR "BUG: scheduling in a non-blocking section: %s/%d/%i\n",
++			prev->comm, prev->pid, prev->non_block_count);
++		dump_stack();
++		add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
++	}
++#endif
++
++	if (unlikely(in_atomic_preempt_off())) {
++		__schedule_bug(prev);
++		preempt_count_set(PREEMPT_DISABLED);
++	}
++	rcu_sleep_check();
++	SCHED_WARN_ON(ct_state() == CONTEXT_USER);
++
++	profile_hit(SCHED_PROFILING, __builtin_return_address(0));
++
++	schedstat_inc(this_rq()->sched_count);
++}
++
++#ifdef ALT_SCHED_DEBUG
++void alt_sched_debug(void)
++{
++	printk(KERN_INFO "sched: pending: 0x%04lx, idle: 0x%04lx, sg_idle: 0x%04lx\n",
++	       sched_rq_pending_mask.bits[0],
++	       sched_idle_mask->bits[0],
++	       sched_sg_idle_mask.bits[0]);
++}
++#else
++inline void alt_sched_debug(void) {}
++#endif
++
++#ifdef	CONFIG_SMP
++
++#ifdef CONFIG_PREEMPT_RT
++#define SCHED_NR_MIGRATE_BREAK 8
++#else
++#define SCHED_NR_MIGRATE_BREAK 32
++#endif
++
++const_debug unsigned int sysctl_sched_nr_migrate = SCHED_NR_MIGRATE_BREAK;
++
++/*
++ * Migrate pending tasks in @rq to @dest_cpu
++ */
++static inline int
++migrate_pending_tasks(struct rq *rq, struct rq *dest_rq, const int dest_cpu)
++{
++	struct task_struct *p, *skip = rcu_dereference(rq->curr);
++	int nr_migrated = 0;
++	int nr_tries = min(rq->nr_running / 2, sysctl_sched_nr_migrate);
++
++	/* WA to check rq->curr is still on rq */
++	if (!task_on_rq_queued(skip))
++		return 0;
++
++	while (skip != rq->idle && nr_tries &&
++	       (p = sched_rq_next_task(skip, rq)) != rq->idle) {
++		skip = sched_rq_next_task(p, rq);
++		if (cpumask_test_cpu(dest_cpu, p->cpus_ptr)) {
++			__SCHED_DEQUEUE_TASK(p, rq, 0, );
++			set_task_cpu(p, dest_cpu);
++			sched_task_sanity_check(p, dest_rq);
++			__SCHED_ENQUEUE_TASK(p, dest_rq, 0);
++			nr_migrated++;
++		}
++		nr_tries--;
++	}
++
++	return nr_migrated;
++}
++
++static inline int take_other_rq_tasks(struct rq *rq, int cpu)
++{
++	struct cpumask *topo_mask, *end_mask;
++
++	if (unlikely(!rq->online))
++		return 0;
++
++	if (cpumask_empty(&sched_rq_pending_mask))
++		return 0;
++
++	topo_mask = per_cpu(sched_cpu_topo_masks, cpu) + 1;
++	end_mask = per_cpu(sched_cpu_topo_end_mask, cpu);
++	do {
++		int i;
++		for_each_cpu_and(i, &sched_rq_pending_mask, topo_mask) {
++			int nr_migrated;
++			struct rq *src_rq;
++
++			src_rq = cpu_rq(i);
++			if (!do_raw_spin_trylock(&src_rq->lock))
++				continue;
++			spin_acquire(&src_rq->lock.dep_map,
++				     SINGLE_DEPTH_NESTING, 1, _RET_IP_);
++
++			if ((nr_migrated = migrate_pending_tasks(src_rq, rq, cpu))) {
++				src_rq->nr_running -= nr_migrated;
++				if (src_rq->nr_running < 2)
++					cpumask_clear_cpu(i, &sched_rq_pending_mask);
++
++				spin_release(&src_rq->lock.dep_map, _RET_IP_);
++				do_raw_spin_unlock(&src_rq->lock);
++
++				rq->nr_running += nr_migrated;
++				if (rq->nr_running > 1)
++					cpumask_set_cpu(cpu, &sched_rq_pending_mask);
++
++				update_sched_preempt_mask(rq);
++				cpufreq_update_util(rq, 0);
++
++				return 1;
++			}
++
++			spin_release(&src_rq->lock.dep_map, _RET_IP_);
++			do_raw_spin_unlock(&src_rq->lock);
++		}
++	} while (++topo_mask < end_mask);
++
++	return 0;
++}
++#endif
++
++/*
++ * Timeslices below RESCHED_NS are considered as good as expired as there's no
++ * point rescheduling when there's so little time left.
++ */
++static inline void check_curr(struct task_struct *p, struct rq *rq)
++{
++	if (unlikely(rq->idle == p))
++		return;
++
++	update_curr(rq, p);
++
++	if (p->time_slice < RESCHED_NS)
++		time_slice_expired(p, rq);
++}
++
++static inline struct task_struct *
++choose_next_task(struct rq *rq, int cpu)
++{
++	struct task_struct *next;
++
++	if (unlikely(rq->skip)) {
++		next = rq_runnable_task(rq);
++		if (next == rq->idle) {
++#ifdef	CONFIG_SMP
++			if (!take_other_rq_tasks(rq, cpu)) {
++#endif
++				rq->skip = NULL;
++				schedstat_inc(rq->sched_goidle);
++				return next;
++#ifdef	CONFIG_SMP
++			}
++			next = rq_runnable_task(rq);
++#endif
++		}
++		rq->skip = NULL;
++#ifdef CONFIG_HIGH_RES_TIMERS
++		hrtick_start(rq, next->time_slice);
++#endif
++		return next;
++	}
++
++	next = sched_rq_first_task(rq);
++	if (next == rq->idle) {
++#ifdef	CONFIG_SMP
++		if (!take_other_rq_tasks(rq, cpu)) {
++#endif
++			schedstat_inc(rq->sched_goidle);
++			/*printk(KERN_INFO "sched: choose_next_task(%d) idle %px\n", cpu, next);*/
++			return next;
++#ifdef	CONFIG_SMP
++		}
++		next = sched_rq_first_task(rq);
++#endif
++	}
++#ifdef CONFIG_HIGH_RES_TIMERS
++	hrtick_start(rq, next->time_slice);
++#endif
++	/*printk(KERN_INFO "sched: choose_next_task(%d) next %px\n", cpu, next);*/
++	return next;
++}
++
++/*
++ * Constants for the sched_mode argument of __schedule().
++ *
++ * The mode argument allows RT enabled kernels to differentiate a
++ * preemption from blocking on an 'sleeping' spin/rwlock. Note that
++ * SM_MASK_PREEMPT for !RT has all bits set, which allows the compiler to
++ * optimize the AND operation out and just check for zero.
++ */
++#define SM_NONE			0x0
++#define SM_PREEMPT		0x1
++#define SM_RTLOCK_WAIT		0x2
++
++#ifndef CONFIG_PREEMPT_RT
++# define SM_MASK_PREEMPT	(~0U)
++#else
++# define SM_MASK_PREEMPT	SM_PREEMPT
++#endif
++
++/*
++ * schedule() is the main scheduler function.
++ *
++ * The main means of driving the scheduler and thus entering this function are:
++ *
++ *   1. Explicit blocking: mutex, semaphore, waitqueue, etc.
++ *
++ *   2. TIF_NEED_RESCHED flag is checked on interrupt and userspace return
++ *      paths. For example, see arch/x86/entry_64.S.
++ *
++ *      To drive preemption between tasks, the scheduler sets the flag in timer
++ *      interrupt handler scheduler_tick().
++ *
++ *   3. Wakeups don't really cause entry into schedule(). They add a
++ *      task to the run-queue and that's it.
++ *
++ *      Now, if the new task added to the run-queue preempts the current
++ *      task, then the wakeup sets TIF_NEED_RESCHED and schedule() gets
++ *      called on the nearest possible occasion:
++ *
++ *       - If the kernel is preemptible (CONFIG_PREEMPTION=y):
++ *
++ *         - in syscall or exception context, at the next outmost
++ *           preempt_enable(). (this might be as soon as the wake_up()'s
++ *           spin_unlock()!)
++ *
++ *         - in IRQ context, return from interrupt-handler to
++ *           preemptible context
++ *
++ *       - If the kernel is not preemptible (CONFIG_PREEMPTION is not set)
++ *         then at the next:
++ *
++ *          - cond_resched() call
++ *          - explicit schedule() call
++ *          - return from syscall or exception to user-space
++ *          - return from interrupt-handler to user-space
++ *
++ * WARNING: must be called with preemption disabled!
++ */
++static void __sched notrace __schedule(unsigned int sched_mode)
++{
++	struct task_struct *prev, *next;
++	unsigned long *switch_count;
++	unsigned long prev_state;
++	struct rq *rq;
++	int cpu;
++
++	cpu = smp_processor_id();
++	rq = cpu_rq(cpu);
++	prev = rq->curr;
++
++	schedule_debug(prev, !!sched_mode);
++
++	/* by passing sched_feat(HRTICK) checking which Alt schedule FW doesn't support */
++	hrtick_clear(rq);
++
++	local_irq_disable();
++	rcu_note_context_switch(!!sched_mode);
++
++	/*
++	 * Make sure that signal_pending_state()->signal_pending() below
++	 * can't be reordered with __set_current_state(TASK_INTERRUPTIBLE)
++	 * done by the caller to avoid the race with signal_wake_up():
++	 *
++	 * __set_current_state(@state)		signal_wake_up()
++	 * schedule()				  set_tsk_thread_flag(p, TIF_SIGPENDING)
++	 *					  wake_up_state(p, state)
++	 *   LOCK rq->lock			    LOCK p->pi_state
++	 *   smp_mb__after_spinlock()		    smp_mb__after_spinlock()
++	 *     if (signal_pending_state())	    if (p->state & @state)
++	 *
++	 * Also, the membarrier system call requires a full memory barrier
++	 * after coming from user-space, before storing to rq->curr.
++	 */
++	raw_spin_lock(&rq->lock);
++	smp_mb__after_spinlock();
++
++	update_rq_clock(rq);
++
++	switch_count = &prev->nivcsw;
++	/*
++	 * We must load prev->state once (task_struct::state is volatile), such
++	 * that we form a control dependency vs deactivate_task() below.
++	 */
++	prev_state = READ_ONCE(prev->__state);
++	if (!(sched_mode & SM_MASK_PREEMPT) && prev_state) {
++		if (signal_pending_state(prev_state, prev)) {
++			WRITE_ONCE(prev->__state, TASK_RUNNING);
++		} else {
++			prev->sched_contributes_to_load =
++				(prev_state & TASK_UNINTERRUPTIBLE) &&
++				!(prev_state & TASK_NOLOAD) &&
++				!(prev_state & TASK_FROZEN);
++
++			if (prev->sched_contributes_to_load)
++				rq->nr_uninterruptible++;
++
++			/*
++			 * __schedule()			ttwu()
++			 *   prev_state = prev->state;    if (p->on_rq && ...)
++			 *   if (prev_state)		    goto out;
++			 *     p->on_rq = 0;		  smp_acquire__after_ctrl_dep();
++			 *				  p->state = TASK_WAKING
++			 *
++			 * Where __schedule() and ttwu() have matching control dependencies.
++			 *
++			 * After this, schedule() must not care about p->state any more.
++			 */
++			sched_task_deactivate(prev, rq);
++			deactivate_task(prev, rq, DEQUEUE_SLEEP);
++
++			if (prev->in_iowait) {
++				atomic_inc(&rq->nr_iowait);
++				delayacct_blkio_start();
++			}
++		}
++		switch_count = &prev->nvcsw;
++	}
++
++	check_curr(prev, rq);
++
++	next = choose_next_task(rq, cpu);
++	clear_tsk_need_resched(prev);
++	clear_preempt_need_resched();
++#ifdef CONFIG_SCHED_DEBUG
++	rq->last_seen_need_resched_ns = 0;
++#endif
++
++	if (likely(prev != next)) {
++		next->last_ran = rq->clock_task;
++		rq->last_ts_switch = rq->clock;
++
++		/*printk(KERN_INFO "sched: %px -> %px\n", prev, next);*/
++		rq->nr_switches++;
++		/*
++		 * RCU users of rcu_dereference(rq->curr) may not see
++		 * changes to task_struct made by pick_next_task().
++		 */
++		RCU_INIT_POINTER(rq->curr, next);
++		/*
++		 * The membarrier system call requires each architecture
++		 * to have a full memory barrier after updating
++		 * rq->curr, before returning to user-space.
++		 *
++		 * Here are the schemes providing that barrier on the
++		 * various architectures:
++		 * - mm ? switch_mm() : mmdrop() for x86, s390, sparc, PowerPC.
++		 *   switch_mm() rely on membarrier_arch_switch_mm() on PowerPC.
++		 * - finish_lock_switch() for weakly-ordered
++		 *   architectures where spin_unlock is a full barrier,
++		 * - switch_to() for arm64 (weakly-ordered, spin_unlock
++		 *   is a RELEASE barrier),
++		 */
++		++*switch_count;
++
++		trace_sched_switch(sched_mode & SM_MASK_PREEMPT, prev, next, prev_state);
++
++		/* Also unlocks the rq: */
++		rq = context_switch(rq, prev, next);
++
++		cpu = cpu_of(rq);
++	} else {
++		__balance_callbacks(rq);
++		raw_spin_unlock_irq(&rq->lock);
++	}
++
++#ifdef CONFIG_SCHED_SMT
++	sg_balance(rq, cpu);
++#endif
++}
++
++void __noreturn do_task_dead(void)
++{
++	/* Causes final put_task_struct in finish_task_switch(): */
++	set_special_state(TASK_DEAD);
++
++	/* Tell freezer to ignore us: */
++	current->flags |= PF_NOFREEZE;
++
++	__schedule(SM_NONE);
++	BUG();
++
++	/* Avoid "noreturn function does return" - but don't continue if BUG() is a NOP: */
++	for (;;)
++		cpu_relax();
++}
++
++static inline void sched_submit_work(struct task_struct *tsk)
++{
++	unsigned int task_flags;
++
++	if (task_is_running(tsk))
++		return;
++
++	task_flags = tsk->flags;
++	/*
++	 * If a worker goes to sleep, notify and ask workqueue whether it
++	 * wants to wake up a task to maintain concurrency.
++	 */
++	if (task_flags & (PF_WQ_WORKER | PF_IO_WORKER)) {
++		if (task_flags & PF_WQ_WORKER)
++			wq_worker_sleeping(tsk);
++		else
++			io_wq_worker_sleeping(tsk);
++	}
++
++	/*
++	 * spinlock and rwlock must not flush block requests.  This will
++	 * deadlock if the callback attempts to acquire a lock which is
++	 * already acquired.
++	 */
++	SCHED_WARN_ON(current->__state & TASK_RTLOCK_WAIT);
++
++	/*
++	 * If we are going to sleep and we have plugged IO queued,
++	 * make sure to submit it to avoid deadlocks.
++	 */
++	blk_flush_plug(tsk->plug, true);
++}
++
++static void sched_update_worker(struct task_struct *tsk)
++{
++	if (tsk->flags & (PF_WQ_WORKER | PF_IO_WORKER)) {
++		if (tsk->flags & PF_WQ_WORKER)
++			wq_worker_running(tsk);
++		else
++			io_wq_worker_running(tsk);
++	}
++}
++
++asmlinkage __visible void __sched schedule(void)
++{
++	struct task_struct *tsk = current;
++
++	sched_submit_work(tsk);
++	do {
++		preempt_disable();
++		__schedule(SM_NONE);
++		sched_preempt_enable_no_resched();
++	} while (need_resched());
++	sched_update_worker(tsk);
++}
++EXPORT_SYMBOL(schedule);
++
++/*
++ * synchronize_rcu_tasks() makes sure that no task is stuck in preempted
++ * state (have scheduled out non-voluntarily) by making sure that all
++ * tasks have either left the run queue or have gone into user space.
++ * As idle tasks do not do either, they must not ever be preempted
++ * (schedule out non-voluntarily).
++ *
++ * schedule_idle() is similar to schedule_preempt_disable() except that it
++ * never enables preemption because it does not call sched_submit_work().
++ */
++void __sched schedule_idle(void)
++{
++	/*
++	 * As this skips calling sched_submit_work(), which the idle task does
++	 * regardless because that function is a nop when the task is in a
++	 * TASK_RUNNING state, make sure this isn't used someplace that the
++	 * current task can be in any other state. Note, idle is always in the
++	 * TASK_RUNNING state.
++	 */
++	WARN_ON_ONCE(current->__state);
++	do {
++		__schedule(SM_NONE);
++	} while (need_resched());
++}
++
++#if defined(CONFIG_CONTEXT_TRACKING_USER) && !defined(CONFIG_HAVE_CONTEXT_TRACKING_USER_OFFSTACK)
++asmlinkage __visible void __sched schedule_user(void)
++{
++	/*
++	 * If we come here after a random call to set_need_resched(),
++	 * or we have been woken up remotely but the IPI has not yet arrived,
++	 * we haven't yet exited the RCU idle mode. Do it here manually until
++	 * we find a better solution.
++	 *
++	 * NB: There are buggy callers of this function.  Ideally we
++	 * should warn if prev_state != CONTEXT_USER, but that will trigger
++	 * too frequently to make sense yet.
++	 */
++	enum ctx_state prev_state = exception_enter();
++	schedule();
++	exception_exit(prev_state);
++}
++#endif
++
++/**
++ * schedule_preempt_disabled - called with preemption disabled
++ *
++ * Returns with preemption disabled. Note: preempt_count must be 1
++ */
++void __sched schedule_preempt_disabled(void)
++{
++	sched_preempt_enable_no_resched();
++	schedule();
++	preempt_disable();
++}
++
++#ifdef CONFIG_PREEMPT_RT
++void __sched notrace schedule_rtlock(void)
++{
++	do {
++		preempt_disable();
++		__schedule(SM_RTLOCK_WAIT);
++		sched_preempt_enable_no_resched();
++	} while (need_resched());
++}
++NOKPROBE_SYMBOL(schedule_rtlock);
++#endif
++
++static void __sched notrace preempt_schedule_common(void)
++{
++	do {
++		/*
++		 * Because the function tracer can trace preempt_count_sub()
++		 * and it also uses preempt_enable/disable_notrace(), if
++		 * NEED_RESCHED is set, the preempt_enable_notrace() called
++		 * by the function tracer will call this function again and
++		 * cause infinite recursion.
++		 *
++		 * Preemption must be disabled here before the function
++		 * tracer can trace. Break up preempt_disable() into two
++		 * calls. One to disable preemption without fear of being
++		 * traced. The other to still record the preemption latency,
++		 * which can also be traced by the function tracer.
++		 */
++		preempt_disable_notrace();
++		preempt_latency_start(1);
++		__schedule(SM_PREEMPT);
++		preempt_latency_stop(1);
++		preempt_enable_no_resched_notrace();
++
++		/*
++		 * Check again in case we missed a preemption opportunity
++		 * between schedule and now.
++		 */
++	} while (need_resched());
++}
++
++#ifdef CONFIG_PREEMPTION
++/*
++ * This is the entry point to schedule() from in-kernel preemption
++ * off of preempt_enable.
++ */
++asmlinkage __visible void __sched notrace preempt_schedule(void)
++{
++	/*
++	 * If there is a non-zero preempt_count or interrupts are disabled,
++	 * we do not want to preempt the current task. Just return..
++	 */
++	if (likely(!preemptible()))
++		return;
++
++	preempt_schedule_common();
++}
++NOKPROBE_SYMBOL(preempt_schedule);
++EXPORT_SYMBOL(preempt_schedule);
++
++#ifdef CONFIG_PREEMPT_DYNAMIC
++#if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
++#ifndef preempt_schedule_dynamic_enabled
++#define preempt_schedule_dynamic_enabled	preempt_schedule
++#define preempt_schedule_dynamic_disabled	NULL
++#endif
++DEFINE_STATIC_CALL(preempt_schedule, preempt_schedule_dynamic_enabled);
++EXPORT_STATIC_CALL_TRAMP(preempt_schedule);
++#elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
++static DEFINE_STATIC_KEY_TRUE(sk_dynamic_preempt_schedule);
++void __sched notrace dynamic_preempt_schedule(void)
++{
++	if (!static_branch_unlikely(&sk_dynamic_preempt_schedule))
++		return;
++	preempt_schedule();
++}
++NOKPROBE_SYMBOL(dynamic_preempt_schedule);
++EXPORT_SYMBOL(dynamic_preempt_schedule);
++#endif
++#endif
++
++/**
++ * preempt_schedule_notrace - preempt_schedule called by tracing
++ *
++ * The tracing infrastructure uses preempt_enable_notrace to prevent
++ * recursion and tracing preempt enabling caused by the tracing
++ * infrastructure itself. But as tracing can happen in areas coming
++ * from userspace or just about to enter userspace, a preempt enable
++ * can occur before user_exit() is called. This will cause the scheduler
++ * to be called when the system is still in usermode.
++ *
++ * To prevent this, the preempt_enable_notrace will use this function
++ * instead of preempt_schedule() to exit user context if needed before
++ * calling the scheduler.
++ */
++asmlinkage __visible void __sched notrace preempt_schedule_notrace(void)
++{
++	enum ctx_state prev_ctx;
++
++	if (likely(!preemptible()))
++		return;
++
++	do {
++		/*
++		 * Because the function tracer can trace preempt_count_sub()
++		 * and it also uses preempt_enable/disable_notrace(), if
++		 * NEED_RESCHED is set, the preempt_enable_notrace() called
++		 * by the function tracer will call this function again and
++		 * cause infinite recursion.
++		 *
++		 * Preemption must be disabled here before the function
++		 * tracer can trace. Break up preempt_disable() into two
++		 * calls. One to disable preemption without fear of being
++		 * traced. The other to still record the preemption latency,
++		 * which can also be traced by the function tracer.
++		 */
++		preempt_disable_notrace();
++		preempt_latency_start(1);
++		/*
++		 * Needs preempt disabled in case user_exit() is traced
++		 * and the tracer calls preempt_enable_notrace() causing
++		 * an infinite recursion.
++		 */
++		prev_ctx = exception_enter();
++		__schedule(SM_PREEMPT);
++		exception_exit(prev_ctx);
++
++		preempt_latency_stop(1);
++		preempt_enable_no_resched_notrace();
++	} while (need_resched());
++}
++EXPORT_SYMBOL_GPL(preempt_schedule_notrace);
++
++#ifdef CONFIG_PREEMPT_DYNAMIC
++#if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
++#ifndef preempt_schedule_notrace_dynamic_enabled
++#define preempt_schedule_notrace_dynamic_enabled	preempt_schedule_notrace
++#define preempt_schedule_notrace_dynamic_disabled	NULL
++#endif
++DEFINE_STATIC_CALL(preempt_schedule_notrace, preempt_schedule_notrace_dynamic_enabled);
++EXPORT_STATIC_CALL_TRAMP(preempt_schedule_notrace);
++#elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
++static DEFINE_STATIC_KEY_TRUE(sk_dynamic_preempt_schedule_notrace);
++void __sched notrace dynamic_preempt_schedule_notrace(void)
++{
++	if (!static_branch_unlikely(&sk_dynamic_preempt_schedule_notrace))
++		return;
++	preempt_schedule_notrace();
++}
++NOKPROBE_SYMBOL(dynamic_preempt_schedule_notrace);
++EXPORT_SYMBOL(dynamic_preempt_schedule_notrace);
++#endif
++#endif
++
++#endif /* CONFIG_PREEMPTION */
++
++/*
++ * This is the entry point to schedule() from kernel preemption
++ * off of irq context.
++ * Note, that this is called and return with irqs disabled. This will
++ * protect us against recursive calling from irq.
++ */
++asmlinkage __visible void __sched preempt_schedule_irq(void)
++{
++	enum ctx_state prev_state;
++
++	/* Catch callers which need to be fixed */
++	WARN_ON_ONCE(preempt_count() || !irqs_disabled());
++
++	prev_state = exception_enter();
++
++	do {
++		preempt_disable();
++		local_irq_enable();
++		__schedule(SM_PREEMPT);
++		local_irq_disable();
++		sched_preempt_enable_no_resched();
++	} while (need_resched());
++
++	exception_exit(prev_state);
++}
++
++int default_wake_function(wait_queue_entry_t *curr, unsigned mode, int wake_flags,
++			  void *key)
++{
++	WARN_ON_ONCE(IS_ENABLED(CONFIG_SCHED_DEBUG) && wake_flags & ~WF_SYNC);
++	return try_to_wake_up(curr->private, mode, wake_flags);
++}
++EXPORT_SYMBOL(default_wake_function);
++
++static inline void check_task_changed(struct task_struct *p, struct rq *rq)
++{
++	/* Trigger resched if task sched_prio has been modified. */
++	if (task_on_rq_queued(p)) {
++		int idx;
++
++		update_rq_clock(rq);
++		idx = task_sched_prio_idx(p, rq);
++		if (idx != p->sq_idx) {
++			requeue_task(p, rq, idx);
++			check_preempt_curr(rq);
++		}
++	}
++}
++
++static void __setscheduler_prio(struct task_struct *p, int prio)
++{
++	p->prio = prio;
++}
++
++#ifdef CONFIG_RT_MUTEXES
++
++static inline int __rt_effective_prio(struct task_struct *pi_task, int prio)
++{
++	if (pi_task)
++		prio = min(prio, pi_task->prio);
++
++	return prio;
++}
++
++static inline int rt_effective_prio(struct task_struct *p, int prio)
++{
++	struct task_struct *pi_task = rt_mutex_get_top_task(p);
++
++	return __rt_effective_prio(pi_task, prio);
++}
++
++/*
++ * rt_mutex_setprio - set the current priority of a task
++ * @p: task to boost
++ * @pi_task: donor task
++ *
++ * This function changes the 'effective' priority of a task. It does
++ * not touch ->normal_prio like __setscheduler().
++ *
++ * Used by the rt_mutex code to implement priority inheritance
++ * logic. Call site only calls if the priority of the task changed.
++ */
++void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task)
++{
++	int prio;
++	struct rq *rq;
++	raw_spinlock_t *lock;
++
++	/* XXX used to be waiter->prio, not waiter->task->prio */
++	prio = __rt_effective_prio(pi_task, p->normal_prio);
++
++	/*
++	 * If nothing changed; bail early.
++	 */
++	if (p->pi_top_task == pi_task && prio == p->prio)
++		return;
++
++	rq = __task_access_lock(p, &lock);
++	/*
++	 * Set under pi_lock && rq->lock, such that the value can be used under
++	 * either lock.
++	 *
++	 * Note that there is loads of tricky to make this pointer cache work
++	 * right. rt_mutex_slowunlock()+rt_mutex_postunlock() work together to
++	 * ensure a task is de-boosted (pi_task is set to NULL) before the
++	 * task is allowed to run again (and can exit). This ensures the pointer
++	 * points to a blocked task -- which guarantees the task is present.
++	 */
++	p->pi_top_task = pi_task;
++
++	/*
++	 * For FIFO/RR we only need to set prio, if that matches we're done.
++	 */
++	if (prio == p->prio)
++		goto out_unlock;
++
++	/*
++	 * Idle task boosting is a nono in general. There is one
++	 * exception, when PREEMPT_RT and NOHZ is active:
++	 *
++	 * The idle task calls get_next_timer_interrupt() and holds
++	 * the timer wheel base->lock on the CPU and another CPU wants
++	 * to access the timer (probably to cancel it). We can safely
++	 * ignore the boosting request, as the idle CPU runs this code
++	 * with interrupts disabled and will complete the lock
++	 * protected section without being interrupted. So there is no
++	 * real need to boost.
++	 */
++	if (unlikely(p == rq->idle)) {
++		WARN_ON(p != rq->curr);
++		WARN_ON(p->pi_blocked_on);
++		goto out_unlock;
++	}
++
++	trace_sched_pi_setprio(p, pi_task);
++
++	__setscheduler_prio(p, prio);
++
++	check_task_changed(p, rq);
++out_unlock:
++	/* Avoid rq from going away on us: */
++	preempt_disable();
++
++	__balance_callbacks(rq);
++	__task_access_unlock(p, lock);
++
++	preempt_enable();
++}
++#else
++static inline int rt_effective_prio(struct task_struct *p, int prio)
++{
++	return prio;
++}
++#endif
++
++void set_user_nice(struct task_struct *p, long nice)
++{
++	unsigned long flags;
++	struct rq *rq;
++	raw_spinlock_t *lock;
++
++	if (task_nice(p) == nice || nice < MIN_NICE || nice > MAX_NICE)
++		return;
++	/*
++	 * We have to be careful, if called from sys_setpriority(),
++	 * the task might be in the middle of scheduling on another CPU.
++	 */
++	raw_spin_lock_irqsave(&p->pi_lock, flags);
++	rq = __task_access_lock(p, &lock);
++
++	p->static_prio = NICE_TO_PRIO(nice);
++	/*
++	 * The RT priorities are set via sched_setscheduler(), but we still
++	 * allow the 'normal' nice value to be set - but as expected
++	 * it won't have any effect on scheduling until the task is
++	 * not SCHED_NORMAL/SCHED_BATCH:
++	 */
++	if (task_has_rt_policy(p))
++		goto out_unlock;
++
++	p->prio = effective_prio(p);
++
++	check_task_changed(p, rq);
++out_unlock:
++	__task_access_unlock(p, lock);
++	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
++}
++EXPORT_SYMBOL(set_user_nice);
++
++/*
++ * can_nice - check if a task can reduce its nice value
++ * @p: task
++ * @nice: nice value
++ */
++int can_nice(const struct task_struct *p, const int nice)
++{
++	/* Convert nice value [19,-20] to rlimit style value [1,40] */
++	int nice_rlim = nice_to_rlimit(nice);
++
++	return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
++		capable(CAP_SYS_NICE));
++}
++
++#ifdef __ARCH_WANT_SYS_NICE
++
++/*
++ * sys_nice - change the priority of the current process.
++ * @increment: priority increment
++ *
++ * sys_setpriority is a more generic, but much slower function that
++ * does similar things.
++ */
++SYSCALL_DEFINE1(nice, int, increment)
++{
++	long nice, retval;
++
++	/*
++	 * Setpriority might change our priority at the same moment.
++	 * We don't have to worry. Conceptually one call occurs first
++	 * and we have a single winner.
++	 */
++
++	increment = clamp(increment, -NICE_WIDTH, NICE_WIDTH);
++	nice = task_nice(current) + increment;
++
++	nice = clamp_val(nice, MIN_NICE, MAX_NICE);
++	if (increment < 0 && !can_nice(current, nice))
++		return -EPERM;
++
++	retval = security_task_setnice(current, nice);
++	if (retval)
++		return retval;
++
++	set_user_nice(current, nice);
++	return 0;
++}
++
++#endif
++
++/**
++ * task_prio - return the priority value of a given task.
++ * @p: the task in question.
++ *
++ * Return: The priority value as seen by users in /proc.
++ *
++ * sched policy         return value   kernel prio    user prio/nice
++ *
++ * (BMQ)normal, batch, idle[0 ... 53]  [100 ... 139]          0/[-20 ... 19]/[-7 ... 7]
++ * (PDS)normal, batch, idle[0 ... 39]            100          0/[-20 ... 19]
++ * fifo, rr             [-1 ... -100]     [99 ... 0]  [0 ... 99]
++ */
++int task_prio(const struct task_struct *p)
++{
++	return (p->prio < MAX_RT_PRIO) ? p->prio - MAX_RT_PRIO :
++		task_sched_prio_normal(p, task_rq(p));
++}
++
++/**
++ * idle_cpu - is a given CPU idle currently?
++ * @cpu: the processor in question.
++ *
++ * Return: 1 if the CPU is currently idle. 0 otherwise.
++ */
++int idle_cpu(int cpu)
++{
++	struct rq *rq = cpu_rq(cpu);
++
++	if (rq->curr != rq->idle)
++		return 0;
++
++	if (rq->nr_running)
++		return 0;
++
++#ifdef CONFIG_SMP
++	if (rq->ttwu_pending)
++		return 0;
++#endif
++
++	return 1;
++}
++
++/**
++ * idle_task - return the idle task for a given CPU.
++ * @cpu: the processor in question.
++ *
++ * Return: The idle task for the cpu @cpu.
++ */
++struct task_struct *idle_task(int cpu)
++{
++	return cpu_rq(cpu)->idle;
++}
++
++/**
++ * find_process_by_pid - find a process with a matching PID value.
++ * @pid: the pid in question.
++ *
++ * The task of @pid, if found. %NULL otherwise.
++ */
++static inline struct task_struct *find_process_by_pid(pid_t pid)
++{
++	return pid ? find_task_by_vpid(pid) : current;
++}
++
++/*
++ * sched_setparam() passes in -1 for its policy, to let the functions
++ * it calls know not to change it.
++ */
++#define SETPARAM_POLICY -1
++
++static void __setscheduler_params(struct task_struct *p,
++		const struct sched_attr *attr)
++{
++	int policy = attr->sched_policy;
++
++	if (policy == SETPARAM_POLICY)
++		policy = p->policy;
++
++	p->policy = policy;
++
++	/*
++	 * allow normal nice value to be set, but will not have any
++	 * effect on scheduling until the task not SCHED_NORMAL/
++	 * SCHED_BATCH
++	 */
++	p->static_prio = NICE_TO_PRIO(attr->sched_nice);
++
++	/*
++	 * __sched_setscheduler() ensures attr->sched_priority == 0 when
++	 * !rt_policy. Always setting this ensures that things like
++	 * getparam()/getattr() don't report silly values for !rt tasks.
++	 */
++	p->rt_priority = attr->sched_priority;
++	p->normal_prio = normal_prio(p);
++}
++
++/*
++ * check the target process has a UID that matches the current process's
++ */
++static bool check_same_owner(struct task_struct *p)
++{
++	const struct cred *cred = current_cred(), *pcred;
++	bool match;
++
++	rcu_read_lock();
++	pcred = __task_cred(p);
++	match = (uid_eq(cred->euid, pcred->euid) ||
++		 uid_eq(cred->euid, pcred->uid));
++	rcu_read_unlock();
++	return match;
++}
++
++static int __sched_setscheduler(struct task_struct *p,
++				const struct sched_attr *attr,
++				bool user, bool pi)
++{
++	const struct sched_attr dl_squash_attr = {
++		.size		= sizeof(struct sched_attr),
++		.sched_policy	= SCHED_FIFO,
++		.sched_nice	= 0,
++		.sched_priority = 99,
++	};
++	int oldpolicy = -1, policy = attr->sched_policy;
++	int retval, newprio;
++	struct balance_callback *head;
++	unsigned long flags;
++	struct rq *rq;
++	bool cpuset_locked = false;
++	int reset_on_fork;
++	raw_spinlock_t *lock;
++
++	/* The pi code expects interrupts enabled */
++	WARN_ON_ONCE(pi && in_interrupt());
++
++	/*
++	 * Alt schedule FW supports SCHED_DEADLINE by squash it as prio 0 SCHED_FIFO
++	 */
++	if (unlikely(SCHED_DEADLINE == policy)) {
++		attr = &dl_squash_attr;
++		policy = attr->sched_policy;
++	}
++recheck:
++	/* Double check policy once rq lock held */
++	if (policy < 0) {
++		reset_on_fork = p->sched_reset_on_fork;
++		policy = oldpolicy = p->policy;
++	} else {
++		reset_on_fork = !!(attr->sched_flags & SCHED_RESET_ON_FORK);
++
++		if (policy > SCHED_IDLE)
++			return -EINVAL;
++	}
++
++	if (attr->sched_flags & ~(SCHED_FLAG_ALL))
++		return -EINVAL;
++
++	/*
++	 * Valid priorities for SCHED_FIFO and SCHED_RR are
++	 * 1..MAX_RT_PRIO-1, valid priority for SCHED_NORMAL and
++	 * SCHED_BATCH and SCHED_IDLE is 0.
++	 */
++	if (attr->sched_priority < 0 ||
++	    (p->mm && attr->sched_priority > MAX_RT_PRIO - 1) ||
++	    (!p->mm && attr->sched_priority > MAX_RT_PRIO - 1))
++		return -EINVAL;
++	if ((SCHED_RR == policy || SCHED_FIFO == policy) !=
++	    (attr->sched_priority != 0))
++		return -EINVAL;
++
++	/*
++	 * Allow unprivileged RT tasks to decrease priority:
++	 */
++	if (user && !capable(CAP_SYS_NICE)) {
++		if (SCHED_FIFO == policy || SCHED_RR == policy) {
++			unsigned long rlim_rtprio =
++					task_rlimit(p, RLIMIT_RTPRIO);
++
++			/* Can't set/change the rt policy */
++			if (policy != p->policy && !rlim_rtprio)
++				return -EPERM;
++
++			/* Can't increase priority */
++			if (attr->sched_priority > p->rt_priority &&
++			    attr->sched_priority > rlim_rtprio)
++				return -EPERM;
++		}
++
++		/* Can't change other user's priorities */
++		if (!check_same_owner(p))
++			return -EPERM;
++
++		/* Normal users shall not reset the sched_reset_on_fork flag */
++		if (p->sched_reset_on_fork && !reset_on_fork)
++			return -EPERM;
++	}
++
++	if (user) {
++		retval = security_task_setscheduler(p);
++		if (retval)
++			return retval;
++	}
++
++	if (pi) {
++		cpuset_locked = true;
++		cpuset_lock();
++	}
++
++	/*
++	 * Make sure no PI-waiters arrive (or leave) while we are
++	 * changing the priority of the task:
++	 */
++	raw_spin_lock_irqsave(&p->pi_lock, flags);
++
++	/*
++	 * To be able to change p->policy safely, task_access_lock()
++	 * must be called.
++	 * IF use task_access_lock() here:
++	 * For the task p which is not running, reading rq->stop is
++	 * racy but acceptable as ->stop doesn't change much.
++	 * An enhancemnet can be made to read rq->stop saftly.
++	 */
++	rq = __task_access_lock(p, &lock);
++
++	/*
++	 * Changing the policy of the stop threads its a very bad idea
++	 */
++	if (p == rq->stop) {
++		retval = -EINVAL;
++		goto unlock;
++	}
++
++	/*
++	 * If not changing anything there's no need to proceed further:
++	 */
++	if (unlikely(policy == p->policy)) {
++		if (rt_policy(policy) && attr->sched_priority != p->rt_priority)
++			goto change;
++		if (!rt_policy(policy) &&
++		    NICE_TO_PRIO(attr->sched_nice) != p->static_prio)
++			goto change;
++
++		p->sched_reset_on_fork = reset_on_fork;
++		retval = 0;
++		goto unlock;
++	}
++change:
++
++	/* Re-check policy now with rq lock held */
++	if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) {
++		policy = oldpolicy = -1;
++		__task_access_unlock(p, lock);
++		raw_spin_unlock_irqrestore(&p->pi_lock, flags);
++		if (cpuset_locked)
++			cpuset_unlock();
++		goto recheck;
++	}
++
++	p->sched_reset_on_fork = reset_on_fork;
++
++	newprio = __normal_prio(policy, attr->sched_priority, NICE_TO_PRIO(attr->sched_nice));
++	if (pi) {
++		/*
++		 * Take priority boosted tasks into account. If the new
++		 * effective priority is unchanged, we just store the new
++		 * normal parameters and do not touch the scheduler class and
++		 * the runqueue. This will be done when the task deboost
++		 * itself.
++		 */
++		newprio = rt_effective_prio(p, newprio);
++	}
++
++	if (!(attr->sched_flags & SCHED_FLAG_KEEP_PARAMS)) {
++		__setscheduler_params(p, attr);
++		__setscheduler_prio(p, newprio);
++	}
++
++	check_task_changed(p, rq);
++
++	/* Avoid rq from going away on us: */
++	preempt_disable();
++	head = splice_balance_callbacks(rq);
++	__task_access_unlock(p, lock);
++	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
++
++	if (pi) {
++		if (cpuset_locked)
++			cpuset_unlock();
++		rt_mutex_adjust_pi(p);
++	}
++
++	/* Run balance callbacks after we've adjusted the PI chain: */
++	balance_callbacks(rq, head);
++	preempt_enable();
++
++	return 0;
++
++unlock:
++	__task_access_unlock(p, lock);
++	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
++	if (cpuset_locked)
++		cpuset_unlock();
++	return retval;
++}
++
++static int _sched_setscheduler(struct task_struct *p, int policy,
++			       const struct sched_param *param, bool check)
++{
++	struct sched_attr attr = {
++		.sched_policy   = policy,
++		.sched_priority = param->sched_priority,
++		.sched_nice     = PRIO_TO_NICE(p->static_prio),
++	};
++
++	/* Fixup the legacy SCHED_RESET_ON_FORK hack. */
++	if ((policy != SETPARAM_POLICY) && (policy & SCHED_RESET_ON_FORK)) {
++		attr.sched_flags |= SCHED_FLAG_RESET_ON_FORK;
++		policy &= ~SCHED_RESET_ON_FORK;
++		attr.sched_policy = policy;
++	}
++
++	return __sched_setscheduler(p, &attr, check, true);
++}
++
++/**
++ * sched_setscheduler - change the scheduling policy and/or RT priority of a thread.
++ * @p: the task in question.
++ * @policy: new policy.
++ * @param: structure containing the new RT priority.
++ *
++ * Use sched_set_fifo(), read its comment.
++ *
++ * Return: 0 on success. An error code otherwise.
++ *
++ * NOTE that the task may be already dead.
++ */
++int sched_setscheduler(struct task_struct *p, int policy,
++		       const struct sched_param *param)
++{
++	return _sched_setscheduler(p, policy, param, true);
++}
++
++int sched_setattr(struct task_struct *p, const struct sched_attr *attr)
++{
++	return __sched_setscheduler(p, attr, true, true);
++}
++
++int sched_setattr_nocheck(struct task_struct *p, const struct sched_attr *attr)
++{
++	return __sched_setscheduler(p, attr, false, true);
++}
++EXPORT_SYMBOL_GPL(sched_setattr_nocheck);
++
++/**
++ * sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernelspace.
++ * @p: the task in question.
++ * @policy: new policy.
++ * @param: structure containing the new RT priority.
++ *
++ * Just like sched_setscheduler, only don't bother checking if the
++ * current context has permission.  For example, this is needed in
++ * stop_machine(): we create temporary high priority worker threads,
++ * but our caller might not have that capability.
++ *
++ * Return: 0 on success. An error code otherwise.
++ */
++int sched_setscheduler_nocheck(struct task_struct *p, int policy,
++			       const struct sched_param *param)
++{
++	return _sched_setscheduler(p, policy, param, false);
++}
++
++/*
++ * SCHED_FIFO is a broken scheduler model; that is, it is fundamentally
++ * incapable of resource management, which is the one thing an OS really should
++ * be doing.
++ *
++ * This is of course the reason it is limited to privileged users only.
++ *
++ * Worse still; it is fundamentally impossible to compose static priority
++ * workloads. You cannot take two correctly working static prio workloads
++ * and smash them together and still expect them to work.
++ *
++ * For this reason 'all' FIFO tasks the kernel creates are basically at:
++ *
++ *   MAX_RT_PRIO / 2
++ *
++ * The administrator _MUST_ configure the system, the kernel simply doesn't
++ * know enough information to make a sensible choice.
++ */
++void sched_set_fifo(struct task_struct *p)
++{
++	struct sched_param sp = { .sched_priority = MAX_RT_PRIO / 2 };
++	WARN_ON_ONCE(sched_setscheduler_nocheck(p, SCHED_FIFO, &sp) != 0);
++}
++EXPORT_SYMBOL_GPL(sched_set_fifo);
++
++/*
++ * For when you don't much care about FIFO, but want to be above SCHED_NORMAL.
++ */
++void sched_set_fifo_low(struct task_struct *p)
++{
++	struct sched_param sp = { .sched_priority = 1 };
++	WARN_ON_ONCE(sched_setscheduler_nocheck(p, SCHED_FIFO, &sp) != 0);
++}
++EXPORT_SYMBOL_GPL(sched_set_fifo_low);
++
++void sched_set_normal(struct task_struct *p, int nice)
++{
++	struct sched_attr attr = {
++		.sched_policy = SCHED_NORMAL,
++		.sched_nice = nice,
++	};
++	WARN_ON_ONCE(sched_setattr_nocheck(p, &attr) != 0);
++}
++EXPORT_SYMBOL_GPL(sched_set_normal);
++
++static int
++do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
++{
++	struct sched_param lparam;
++	struct task_struct *p;
++	int retval;
++
++	if (!param || pid < 0)
++		return -EINVAL;
++	if (copy_from_user(&lparam, param, sizeof(struct sched_param)))
++		return -EFAULT;
++
++	rcu_read_lock();
++	retval = -ESRCH;
++	p = find_process_by_pid(pid);
++	if (likely(p))
++		get_task_struct(p);
++	rcu_read_unlock();
++
++	if (likely(p)) {
++		retval = sched_setscheduler(p, policy, &lparam);
++		put_task_struct(p);
++	}
++
++	return retval;
++}
++
++/*
++ * Mimics kernel/events/core.c perf_copy_attr().
++ */
++static int sched_copy_attr(struct sched_attr __user *uattr, struct sched_attr *attr)
++{
++	u32 size;
++	int ret;
++
++	/* Zero the full structure, so that a short copy will be nice: */
++	memset(attr, 0, sizeof(*attr));
++
++	ret = get_user(size, &uattr->size);
++	if (ret)
++		return ret;
++
++	/* ABI compatibility quirk: */
++	if (!size)
++		size = SCHED_ATTR_SIZE_VER0;
++
++	if (size < SCHED_ATTR_SIZE_VER0 || size > PAGE_SIZE)
++		goto err_size;
++
++	ret = copy_struct_from_user(attr, sizeof(*attr), uattr, size);
++	if (ret) {
++		if (ret == -E2BIG)
++			goto err_size;
++		return ret;
++	}
++
++	/*
++	 * XXX: Do we want to be lenient like existing syscalls; or do we want
++	 * to be strict and return an error on out-of-bounds values?
++	 */
++	attr->sched_nice = clamp(attr->sched_nice, -20, 19);
++
++	/* sched/core.c uses zero here but we already know ret is zero */
++	return 0;
++
++err_size:
++	put_user(sizeof(*attr), &uattr->size);
++	return -E2BIG;
++}
++
++/**
++ * sys_sched_setscheduler - set/change the scheduler policy and RT priority
++ * @pid: the pid in question.
++ * @policy: new policy.
++ *
++ * Return: 0 on success. An error code otherwise.
++ * @param: structure containing the new RT priority.
++ */
++SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy, struct sched_param __user *, param)
++{
++	if (policy < 0)
++		return -EINVAL;
++
++	return do_sched_setscheduler(pid, policy, param);
++}
++
++/**
++ * sys_sched_setparam - set/change the RT priority of a thread
++ * @pid: the pid in question.
++ * @param: structure containing the new RT priority.
++ *
++ * Return: 0 on success. An error code otherwise.
++ */
++SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param)
++{
++	return do_sched_setscheduler(pid, SETPARAM_POLICY, param);
++}
++
++/**
++ * sys_sched_setattr - same as above, but with extended sched_attr
++ * @pid: the pid in question.
++ * @uattr: structure containing the extended parameters.
++ */
++SYSCALL_DEFINE3(sched_setattr, pid_t, pid, struct sched_attr __user *, uattr,
++			       unsigned int, flags)
++{
++	struct sched_attr attr;
++	struct task_struct *p;
++	int retval;
++
++	if (!uattr || pid < 0 || flags)
++		return -EINVAL;
++
++	retval = sched_copy_attr(uattr, &attr);
++	if (retval)
++		return retval;
++
++	if ((int)attr.sched_policy < 0)
++		return -EINVAL;
++
++	rcu_read_lock();
++	retval = -ESRCH;
++	p = find_process_by_pid(pid);
++	if (likely(p))
++		get_task_struct(p);
++	rcu_read_unlock();
++
++	if (likely(p)) {
++		retval = sched_setattr(p, &attr);
++		put_task_struct(p);
++	}
++
++	return retval;
++}
++
++/**
++ * sys_sched_getscheduler - get the policy (scheduling class) of a thread
++ * @pid: the pid in question.
++ *
++ * Return: On success, the policy of the thread. Otherwise, a negative error
++ * code.
++ */
++SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
++{
++	struct task_struct *p;
++	int retval = -EINVAL;
++
++	if (pid < 0)
++		goto out_nounlock;
++
++	retval = -ESRCH;
++	rcu_read_lock();
++	p = find_process_by_pid(pid);
++	if (p) {
++		retval = security_task_getscheduler(p);
++		if (!retval)
++			retval = p->policy;
++	}
++	rcu_read_unlock();
++
++out_nounlock:
++	return retval;
++}
++
++/**
++ * sys_sched_getscheduler - get the RT priority of a thread
++ * @pid: the pid in question.
++ * @param: structure containing the RT priority.
++ *
++ * Return: On success, 0 and the RT priority is in @param. Otherwise, an error
++ * code.
++ */
++SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
++{
++	struct sched_param lp = { .sched_priority = 0 };
++	struct task_struct *p;
++	int retval = -EINVAL;
++
++	if (!param || pid < 0)
++		goto out_nounlock;
++
++	rcu_read_lock();
++	p = find_process_by_pid(pid);
++	retval = -ESRCH;
++	if (!p)
++		goto out_unlock;
++
++	retval = security_task_getscheduler(p);
++	if (retval)
++		goto out_unlock;
++
++	if (task_has_rt_policy(p))
++		lp.sched_priority = p->rt_priority;
++	rcu_read_unlock();
++
++	/*
++	 * This one might sleep, we cannot do it with a spinlock held ...
++	 */
++	retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0;
++
++out_nounlock:
++	return retval;
++
++out_unlock:
++	rcu_read_unlock();
++	return retval;
++}
++
++/*
++ * Copy the kernel size attribute structure (which might be larger
++ * than what user-space knows about) to user-space.
++ *
++ * Note that all cases are valid: user-space buffer can be larger or
++ * smaller than the kernel-space buffer. The usual case is that both
++ * have the same size.
++ */
++static int
++sched_attr_copy_to_user(struct sched_attr __user *uattr,
++			struct sched_attr *kattr,
++			unsigned int usize)
++{
++	unsigned int ksize = sizeof(*kattr);
++
++	if (!access_ok(uattr, usize))
++		return -EFAULT;
++
++	/*
++	 * sched_getattr() ABI forwards and backwards compatibility:
++	 *
++	 * If usize == ksize then we just copy everything to user-space and all is good.
++	 *
++	 * If usize < ksize then we only copy as much as user-space has space for,
++	 * this keeps ABI compatibility as well. We skip the rest.
++	 *
++	 * If usize > ksize then user-space is using a newer version of the ABI,
++	 * which part the kernel doesn't know about. Just ignore it - tooling can
++	 * detect the kernel's knowledge of attributes from the attr->size value
++	 * which is set to ksize in this case.
++	 */
++	kattr->size = min(usize, ksize);
++
++	if (copy_to_user(uattr, kattr, kattr->size))
++		return -EFAULT;
++
++	return 0;
++}
++
++/**
++ * sys_sched_getattr - similar to sched_getparam, but with sched_attr
++ * @pid: the pid in question.
++ * @uattr: structure containing the extended parameters.
++ * @usize: sizeof(attr) for fwd/bwd comp.
++ * @flags: for future extension.
++ */
++SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr,
++		unsigned int, usize, unsigned int, flags)
++{
++	struct sched_attr kattr = { };
++	struct task_struct *p;
++	int retval;
++
++	if (!uattr || pid < 0 || usize > PAGE_SIZE ||
++	    usize < SCHED_ATTR_SIZE_VER0 || flags)
++		return -EINVAL;
++
++	rcu_read_lock();
++	p = find_process_by_pid(pid);
++	retval = -ESRCH;
++	if (!p)
++		goto out_unlock;
++
++	retval = security_task_getscheduler(p);
++	if (retval)
++		goto out_unlock;
++
++	kattr.sched_policy = p->policy;
++	if (p->sched_reset_on_fork)
++		kattr.sched_flags |= SCHED_FLAG_RESET_ON_FORK;
++	if (task_has_rt_policy(p))
++		kattr.sched_priority = p->rt_priority;
++	else
++		kattr.sched_nice = task_nice(p);
++	kattr.sched_flags &= SCHED_FLAG_ALL;
++
++#ifdef CONFIG_UCLAMP_TASK
++	kattr.sched_util_min = p->uclamp_req[UCLAMP_MIN].value;
++	kattr.sched_util_max = p->uclamp_req[UCLAMP_MAX].value;
++#endif
++
++	rcu_read_unlock();
++
++	return sched_attr_copy_to_user(uattr, &kattr, usize);
++
++out_unlock:
++	rcu_read_unlock();
++	return retval;
++}
++
++#ifdef CONFIG_SMP
++int dl_task_check_affinity(struct task_struct *p, const struct cpumask *mask)
++{
++	return 0;
++}
++#endif
++
++static int
++__sched_setaffinity(struct task_struct *p, struct affinity_context *ctx)
++{
++	int retval;
++	cpumask_var_t cpus_allowed, new_mask;
++
++	if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL))
++		return -ENOMEM;
++
++	if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) {
++		retval = -ENOMEM;
++		goto out_free_cpus_allowed;
++	}
++
++	cpuset_cpus_allowed(p, cpus_allowed);
++	cpumask_and(new_mask, ctx->new_mask, cpus_allowed);
++
++	ctx->new_mask = new_mask;
++	ctx->flags |= SCA_CHECK;
++
++	retval = __set_cpus_allowed_ptr(p, ctx);
++	if (retval)
++		goto out_free_new_mask;
++
++	cpuset_cpus_allowed(p, cpus_allowed);
++	if (!cpumask_subset(new_mask, cpus_allowed)) {
++		/*
++		 * We must have raced with a concurrent cpuset
++		 * update. Just reset the cpus_allowed to the
++		 * cpuset's cpus_allowed
++		 */
++		cpumask_copy(new_mask, cpus_allowed);
++
++		/*
++		 * If SCA_USER is set, a 2nd call to __set_cpus_allowed_ptr()
++		 * will restore the previous user_cpus_ptr value.
++		 *
++		 * In the unlikely event a previous user_cpus_ptr exists,
++		 * we need to further restrict the mask to what is allowed
++		 * by that old user_cpus_ptr.
++		 */
++		if (unlikely((ctx->flags & SCA_USER) && ctx->user_mask)) {
++			bool empty = !cpumask_and(new_mask, new_mask,
++						  ctx->user_mask);
++
++			if (WARN_ON_ONCE(empty))
++				cpumask_copy(new_mask, cpus_allowed);
++		}
++		__set_cpus_allowed_ptr(p, ctx);
++		retval = -EINVAL;
++	}
++
++out_free_new_mask:
++	free_cpumask_var(new_mask);
++out_free_cpus_allowed:
++	free_cpumask_var(cpus_allowed);
++	return retval;
++}
++
++long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
++{
++	struct affinity_context ac;
++	struct cpumask *user_mask;
++	struct task_struct *p;
++	int retval;
++
++	rcu_read_lock();
++
++	p = find_process_by_pid(pid);
++	if (!p) {
++		rcu_read_unlock();
++		return -ESRCH;
++	}
++
++	/* Prevent p going away */
++	get_task_struct(p);
++	rcu_read_unlock();
++
++	if (p->flags & PF_NO_SETAFFINITY) {
++		retval = -EINVAL;
++		goto out_put_task;
++	}
++
++	if (!check_same_owner(p)) {
++		rcu_read_lock();
++		if (!ns_capable(__task_cred(p)->user_ns, CAP_SYS_NICE)) {
++			rcu_read_unlock();
++			retval = -EPERM;
++			goto out_put_task;
++		}
++		rcu_read_unlock();
++	}
++
++	retval = security_task_setscheduler(p);
++	if (retval)
++		goto out_put_task;
++
++	/*
++	 * With non-SMP configs, user_cpus_ptr/user_mask isn't used and
++	 * alloc_user_cpus_ptr() returns NULL.
++	 */
++	user_mask = alloc_user_cpus_ptr(NUMA_NO_NODE);
++	if (user_mask) {
++		cpumask_copy(user_mask, in_mask);
++	} else if (IS_ENABLED(CONFIG_SMP)) {
++		retval = -ENOMEM;
++		goto out_put_task;
++	}
++
++	ac = (struct affinity_context){
++		.new_mask  = in_mask,
++		.user_mask = user_mask,
++		.flags     = SCA_USER,
++	};
++
++	retval = __sched_setaffinity(p, &ac);
++	kfree(ac.user_mask);
++
++out_put_task:
++	put_task_struct(p);
++	return retval;
++}
++
++static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len,
++			     struct cpumask *new_mask)
++{
++	if (len < cpumask_size())
++		cpumask_clear(new_mask);
++	else if (len > cpumask_size())
++		len = cpumask_size();
++
++	return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0;
++}
++
++/**
++ * sys_sched_setaffinity - set the CPU affinity of a process
++ * @pid: pid of the process
++ * @len: length in bytes of the bitmask pointed to by user_mask_ptr
++ * @user_mask_ptr: user-space pointer to the new CPU mask
++ *
++ * Return: 0 on success. An error code otherwise.
++ */
++SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len,
++		unsigned long __user *, user_mask_ptr)
++{
++	cpumask_var_t new_mask;
++	int retval;
++
++	if (!alloc_cpumask_var(&new_mask, GFP_KERNEL))
++		return -ENOMEM;
++
++	retval = get_user_cpu_mask(user_mask_ptr, len, new_mask);
++	if (retval == 0)
++		retval = sched_setaffinity(pid, new_mask);
++	free_cpumask_var(new_mask);
++	return retval;
++}
++
++long sched_getaffinity(pid_t pid, cpumask_t *mask)
++{
++	struct task_struct *p;
++	raw_spinlock_t *lock;
++	unsigned long flags;
++	int retval;
++
++	rcu_read_lock();
++
++	retval = -ESRCH;
++	p = find_process_by_pid(pid);
++	if (!p)
++		goto out_unlock;
++
++	retval = security_task_getscheduler(p);
++	if (retval)
++		goto out_unlock;
++
++	task_access_lock_irqsave(p, &lock, &flags);
++	cpumask_and(mask, &p->cpus_mask, cpu_active_mask);
++	task_access_unlock_irqrestore(p, lock, &flags);
++
++out_unlock:
++	rcu_read_unlock();
++
++	return retval;
++}
++
++/**
++ * sys_sched_getaffinity - get the CPU affinity of a process
++ * @pid: pid of the process
++ * @len: length in bytes of the bitmask pointed to by user_mask_ptr
++ * @user_mask_ptr: user-space pointer to hold the current CPU mask
++ *
++ * Return: size of CPU mask copied to user_mask_ptr on success. An
++ * error code otherwise.
++ */
++SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
++		unsigned long __user *, user_mask_ptr)
++{
++	int ret;
++	cpumask_var_t mask;
++
++	if ((len * BITS_PER_BYTE) < nr_cpu_ids)
++		return -EINVAL;
++	if (len & (sizeof(unsigned long)-1))
++		return -EINVAL;
++
++	if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
++		return -ENOMEM;
++
++	ret = sched_getaffinity(pid, mask);
++	if (ret == 0) {
++		unsigned int retlen = min(len, cpumask_size());
++
++		if (copy_to_user(user_mask_ptr, cpumask_bits(mask), retlen))
++			ret = -EFAULT;
++		else
++			ret = retlen;
++	}
++	free_cpumask_var(mask);
++
++	return ret;
++}
++
++static void do_sched_yield(void)
++{
++	struct rq *rq;
++	struct rq_flags rf;
++
++	if (!sched_yield_type)
++		return;
++
++	rq = this_rq_lock_irq(&rf);
++
++	schedstat_inc(rq->yld_count);
++
++	if (1 == sched_yield_type) {
++		if (!rt_task(current))
++			do_sched_yield_type_1(current, rq);
++	} else if (2 == sched_yield_type) {
++		if (rq->nr_running > 1)
++			rq->skip = current;
++	}
++
++	preempt_disable();
++	raw_spin_unlock_irq(&rq->lock);
++	sched_preempt_enable_no_resched();
++
++	schedule();
++}
++
++/**
++ * sys_sched_yield - yield the current processor to other threads.
++ *
++ * This function yields the current CPU to other tasks. If there are no
++ * other threads running on this CPU then this function will return.
++ *
++ * Return: 0.
++ */
++SYSCALL_DEFINE0(sched_yield)
++{
++	do_sched_yield();
++	return 0;
++}
++
++#if !defined(CONFIG_PREEMPTION) || defined(CONFIG_PREEMPT_DYNAMIC)
++int __sched __cond_resched(void)
++{
++	if (should_resched(0)) {
++		preempt_schedule_common();
++		return 1;
++	}
++	/*
++	 * In preemptible kernels, ->rcu_read_lock_nesting tells the tick
++	 * whether the current CPU is in an RCU read-side critical section,
++	 * so the tick can report quiescent states even for CPUs looping
++	 * in kernel context.  In contrast, in non-preemptible kernels,
++	 * RCU readers leave no in-memory hints, which means that CPU-bound
++	 * processes executing in kernel context might never report an
++	 * RCU quiescent state.  Therefore, the following code causes
++	 * cond_resched() to report a quiescent state, but only when RCU
++	 * is in urgent need of one.
++	 */
++#ifndef CONFIG_PREEMPT_RCU
++	rcu_all_qs();
++#endif
++	return 0;
++}
++EXPORT_SYMBOL(__cond_resched);
++#endif
++
++#ifdef CONFIG_PREEMPT_DYNAMIC
++#if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
++#define cond_resched_dynamic_enabled	__cond_resched
++#define cond_resched_dynamic_disabled	((void *)&__static_call_return0)
++DEFINE_STATIC_CALL_RET0(cond_resched, __cond_resched);
++EXPORT_STATIC_CALL_TRAMP(cond_resched);
++
++#define might_resched_dynamic_enabled	__cond_resched
++#define might_resched_dynamic_disabled	((void *)&__static_call_return0)
++DEFINE_STATIC_CALL_RET0(might_resched, __cond_resched);
++EXPORT_STATIC_CALL_TRAMP(might_resched);
++#elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
++static DEFINE_STATIC_KEY_FALSE(sk_dynamic_cond_resched);
++int __sched dynamic_cond_resched(void)
++{
++	klp_sched_try_switch();
++	if (!static_branch_unlikely(&sk_dynamic_cond_resched))
++		return 0;
++	return __cond_resched();
++}
++EXPORT_SYMBOL(dynamic_cond_resched);
++
++static DEFINE_STATIC_KEY_FALSE(sk_dynamic_might_resched);
++int __sched dynamic_might_resched(void)
++{
++	if (!static_branch_unlikely(&sk_dynamic_might_resched))
++		return 0;
++	return __cond_resched();
++}
++EXPORT_SYMBOL(dynamic_might_resched);
++#endif
++#endif
++
++/*
++ * __cond_resched_lock() - if a reschedule is pending, drop the given lock,
++ * call schedule, and on return reacquire the lock.
++ *
++ * This works OK both with and without CONFIG_PREEMPTION.  We do strange low-level
++ * operations here to prevent schedule() from being called twice (once via
++ * spin_unlock(), once by hand).
++ */
++int __cond_resched_lock(spinlock_t *lock)
++{
++	int resched = should_resched(PREEMPT_LOCK_OFFSET);
++	int ret = 0;
++
++	lockdep_assert_held(lock);
++
++	if (spin_needbreak(lock) || resched) {
++		spin_unlock(lock);
++		if (!_cond_resched())
++			cpu_relax();
++		ret = 1;
++		spin_lock(lock);
++	}
++	return ret;
++}
++EXPORT_SYMBOL(__cond_resched_lock);
++
++int __cond_resched_rwlock_read(rwlock_t *lock)
++{
++	int resched = should_resched(PREEMPT_LOCK_OFFSET);
++	int ret = 0;
++
++	lockdep_assert_held_read(lock);
++
++	if (rwlock_needbreak(lock) || resched) {
++		read_unlock(lock);
++		if (!_cond_resched())
++			cpu_relax();
++		ret = 1;
++		read_lock(lock);
++	}
++	return ret;
++}
++EXPORT_SYMBOL(__cond_resched_rwlock_read);
++
++int __cond_resched_rwlock_write(rwlock_t *lock)
++{
++	int resched = should_resched(PREEMPT_LOCK_OFFSET);
++	int ret = 0;
++
++	lockdep_assert_held_write(lock);
++
++	if (rwlock_needbreak(lock) || resched) {
++		write_unlock(lock);
++		if (!_cond_resched())
++			cpu_relax();
++		ret = 1;
++		write_lock(lock);
++	}
++	return ret;
++}
++EXPORT_SYMBOL(__cond_resched_rwlock_write);
++
++#ifdef CONFIG_PREEMPT_DYNAMIC
++
++#ifdef CONFIG_GENERIC_ENTRY
++#include <linux/entry-common.h>
++#endif
++
++/*
++ * SC:cond_resched
++ * SC:might_resched
++ * SC:preempt_schedule
++ * SC:preempt_schedule_notrace
++ * SC:irqentry_exit_cond_resched
++ *
++ *
++ * NONE:
++ *   cond_resched               <- __cond_resched
++ *   might_resched              <- RET0
++ *   preempt_schedule           <- NOP
++ *   preempt_schedule_notrace   <- NOP
++ *   irqentry_exit_cond_resched <- NOP
++ *
++ * VOLUNTARY:
++ *   cond_resched               <- __cond_resched
++ *   might_resched              <- __cond_resched
++ *   preempt_schedule           <- NOP
++ *   preempt_schedule_notrace   <- NOP
++ *   irqentry_exit_cond_resched <- NOP
++ *
++ * FULL:
++ *   cond_resched               <- RET0
++ *   might_resched              <- RET0
++ *   preempt_schedule           <- preempt_schedule
++ *   preempt_schedule_notrace   <- preempt_schedule_notrace
++ *   irqentry_exit_cond_resched <- irqentry_exit_cond_resched
++ */
++
++enum {
++	preempt_dynamic_undefined = -1,
++	preempt_dynamic_none,
++	preempt_dynamic_voluntary,
++	preempt_dynamic_full,
++};
++
++int preempt_dynamic_mode = preempt_dynamic_undefined;
++
++int sched_dynamic_mode(const char *str)
++{
++	if (!strcmp(str, "none"))
++		return preempt_dynamic_none;
++
++	if (!strcmp(str, "voluntary"))
++		return preempt_dynamic_voluntary;
++
++	if (!strcmp(str, "full"))
++		return preempt_dynamic_full;
++
++	return -EINVAL;
++}
++
++#if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
++#define preempt_dynamic_enable(f)	static_call_update(f, f##_dynamic_enabled)
++#define preempt_dynamic_disable(f)	static_call_update(f, f##_dynamic_disabled)
++#elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
++#define preempt_dynamic_enable(f)	static_key_enable(&sk_dynamic_##f.key)
++#define preempt_dynamic_disable(f)	static_key_disable(&sk_dynamic_##f.key)
++#else
++#error "Unsupported PREEMPT_DYNAMIC mechanism"
++#endif
++
++static DEFINE_MUTEX(sched_dynamic_mutex);
++static bool klp_override;
++
++static void __sched_dynamic_update(int mode)
++{
++	/*
++	 * Avoid {NONE,VOLUNTARY} -> FULL transitions from ever ending up in
++	 * the ZERO state, which is invalid.
++	 */
++	if (!klp_override)
++		preempt_dynamic_enable(cond_resched);
++	preempt_dynamic_enable(cond_resched);
++	preempt_dynamic_enable(might_resched);
++	preempt_dynamic_enable(preempt_schedule);
++	preempt_dynamic_enable(preempt_schedule_notrace);
++	preempt_dynamic_enable(irqentry_exit_cond_resched);
++
++	switch (mode) {
++	case preempt_dynamic_none:
++		if (!klp_override)
++			preempt_dynamic_enable(cond_resched);
++		preempt_dynamic_disable(might_resched);
++		preempt_dynamic_disable(preempt_schedule);
++		preempt_dynamic_disable(preempt_schedule_notrace);
++		preempt_dynamic_disable(irqentry_exit_cond_resched);
++		if (mode != preempt_dynamic_mode)
++			pr_info("Dynamic Preempt: none\n");
++		break;
++
++	case preempt_dynamic_voluntary:
++		if (!klp_override)
++			preempt_dynamic_enable(cond_resched);
++		preempt_dynamic_enable(might_resched);
++		preempt_dynamic_disable(preempt_schedule);
++		preempt_dynamic_disable(preempt_schedule_notrace);
++		preempt_dynamic_disable(irqentry_exit_cond_resched);
++		if (mode != preempt_dynamic_mode)
++			pr_info("Dynamic Preempt: voluntary\n");
++		break;
++
++	case preempt_dynamic_full:
++		if (!klp_override)
++			preempt_dynamic_enable(cond_resched);
++		preempt_dynamic_disable(might_resched);
++		preempt_dynamic_enable(preempt_schedule);
++		preempt_dynamic_enable(preempt_schedule_notrace);
++		preempt_dynamic_enable(irqentry_exit_cond_resched);
++		if (mode != preempt_dynamic_mode)
++			pr_info("Dynamic Preempt: full\n");
++		break;
++	}
++
++	preempt_dynamic_mode = mode;
++}
++
++void sched_dynamic_update(int mode)
++{
++	mutex_lock(&sched_dynamic_mutex);
++	__sched_dynamic_update(mode);
++	mutex_unlock(&sched_dynamic_mutex);
++}
++
++#ifdef CONFIG_HAVE_PREEMPT_DYNAMIC_CALL
++
++static int klp_cond_resched(void)
++{
++	__klp_sched_try_switch();
++	return __cond_resched();
++}
++
++void sched_dynamic_klp_enable(void)
++{
++	mutex_lock(&sched_dynamic_mutex);
++
++	klp_override = true;
++	static_call_update(cond_resched, klp_cond_resched);
++
++	mutex_unlock(&sched_dynamic_mutex);
++}
++
++void sched_dynamic_klp_disable(void)
++{
++	mutex_lock(&sched_dynamic_mutex);
++
++	klp_override = false;
++	__sched_dynamic_update(preempt_dynamic_mode);
++
++	mutex_unlock(&sched_dynamic_mutex);
++}
++
++#endif /* CONFIG_HAVE_PREEMPT_DYNAMIC_CALL */
++
++
++static int __init setup_preempt_mode(char *str)
++{
++	int mode = sched_dynamic_mode(str);
++	if (mode < 0) {
++		pr_warn("Dynamic Preempt: unsupported mode: %s\n", str);
++		return 0;
++	}
++
++	sched_dynamic_update(mode);
++	return 1;
++}
++__setup("preempt=", setup_preempt_mode);
++
++static void __init preempt_dynamic_init(void)
++{
++	if (preempt_dynamic_mode == preempt_dynamic_undefined) {
++		if (IS_ENABLED(CONFIG_PREEMPT_NONE)) {
++			sched_dynamic_update(preempt_dynamic_none);
++		} else if (IS_ENABLED(CONFIG_PREEMPT_VOLUNTARY)) {
++			sched_dynamic_update(preempt_dynamic_voluntary);
++		} else {
++			/* Default static call setting, nothing to do */
++			WARN_ON_ONCE(!IS_ENABLED(CONFIG_PREEMPT));
++			preempt_dynamic_mode = preempt_dynamic_full;
++			pr_info("Dynamic Preempt: full\n");
++		}
++	}
++}
++
++#define PREEMPT_MODEL_ACCESSOR(mode) \
++	bool preempt_model_##mode(void)						 \
++	{									 \
++		WARN_ON_ONCE(preempt_dynamic_mode == preempt_dynamic_undefined); \
++		return preempt_dynamic_mode == preempt_dynamic_##mode;		 \
++	}									 \
++	EXPORT_SYMBOL_GPL(preempt_model_##mode)
++
++PREEMPT_MODEL_ACCESSOR(none);
++PREEMPT_MODEL_ACCESSOR(voluntary);
++PREEMPT_MODEL_ACCESSOR(full);
++
++#else /* !CONFIG_PREEMPT_DYNAMIC */
++
++static inline void preempt_dynamic_init(void) { }
++
++#endif /* #ifdef CONFIG_PREEMPT_DYNAMIC */
++
++/**
++ * yield - yield the current processor to other threads.
++ *
++ * Do not ever use this function, there's a 99% chance you're doing it wrong.
++ *
++ * The scheduler is at all times free to pick the calling task as the most
++ * eligible task to run, if removing the yield() call from your code breaks
++ * it, it's already broken.
++ *
++ * Typical broken usage is:
++ *
++ * while (!event)
++ * 	yield();
++ *
++ * where one assumes that yield() will let 'the other' process run that will
++ * make event true. If the current task is a SCHED_FIFO task that will never
++ * happen. Never use yield() as a progress guarantee!!
++ *
++ * If you want to use yield() to wait for something, use wait_event().
++ * If you want to use yield() to be 'nice' for others, use cond_resched().
++ * If you still want to use yield(), do not!
++ */
++void __sched yield(void)
++{
++	set_current_state(TASK_RUNNING);
++	do_sched_yield();
++}
++EXPORT_SYMBOL(yield);
++
++/**
++ * yield_to - yield the current processor to another thread in
++ * your thread group, or accelerate that thread toward the
++ * processor it's on.
++ * @p: target task
++ * @preempt: whether task preemption is allowed or not
++ *
++ * It's the caller's job to ensure that the target task struct
++ * can't go away on us before we can do any checks.
++ *
++ * In Alt schedule FW, yield_to is not supported.
++ *
++ * Return:
++ *	true (>0) if we indeed boosted the target task.
++ *	false (0) if we failed to boost the target.
++ *	-ESRCH if there's no task to yield to.
++ */
++int __sched yield_to(struct task_struct *p, bool preempt)
++{
++	return 0;
++}
++EXPORT_SYMBOL_GPL(yield_to);
++
++int io_schedule_prepare(void)
++{
++	int old_iowait = current->in_iowait;
++
++	current->in_iowait = 1;
++	blk_flush_plug(current->plug, true);
++	return old_iowait;
++}
++
++void io_schedule_finish(int token)
++{
++	current->in_iowait = token;
++}
++
++/*
++ * This task is about to go to sleep on IO.  Increment rq->nr_iowait so
++ * that process accounting knows that this is a task in IO wait state.
++ *
++ * But don't do that if it is a deliberate, throttling IO wait (this task
++ * has set its backing_dev_info: the queue against which it should throttle)
++ */
++
++long __sched io_schedule_timeout(long timeout)
++{
++	int token;
++	long ret;
++
++	token = io_schedule_prepare();
++	ret = schedule_timeout(timeout);
++	io_schedule_finish(token);
++
++	return ret;
++}
++EXPORT_SYMBOL(io_schedule_timeout);
++
++void __sched io_schedule(void)
++{
++	int token;
++
++	token = io_schedule_prepare();
++	schedule();
++	io_schedule_finish(token);
++}
++EXPORT_SYMBOL(io_schedule);
++
++/**
++ * sys_sched_get_priority_max - return maximum RT priority.
++ * @policy: scheduling class.
++ *
++ * Return: On success, this syscall returns the maximum
++ * rt_priority that can be used by a given scheduling class.
++ * On failure, a negative error code is returned.
++ */
++SYSCALL_DEFINE1(sched_get_priority_max, int, policy)
++{
++	int ret = -EINVAL;
++
++	switch (policy) {
++	case SCHED_FIFO:
++	case SCHED_RR:
++		ret = MAX_RT_PRIO - 1;
++		break;
++	case SCHED_NORMAL:
++	case SCHED_BATCH:
++	case SCHED_IDLE:
++		ret = 0;
++		break;
++	}
++	return ret;
++}
++
++/**
++ * sys_sched_get_priority_min - return minimum RT priority.
++ * @policy: scheduling class.
++ *
++ * Return: On success, this syscall returns the minimum
++ * rt_priority that can be used by a given scheduling class.
++ * On failure, a negative error code is returned.
++ */
++SYSCALL_DEFINE1(sched_get_priority_min, int, policy)
++{
++	int ret = -EINVAL;
++
++	switch (policy) {
++	case SCHED_FIFO:
++	case SCHED_RR:
++		ret = 1;
++		break;
++	case SCHED_NORMAL:
++	case SCHED_BATCH:
++	case SCHED_IDLE:
++		ret = 0;
++		break;
++	}
++	return ret;
++}
++
++static int sched_rr_get_interval(pid_t pid, struct timespec64 *t)
++{
++	struct task_struct *p;
++	int retval;
++
++	alt_sched_debug();
++
++	if (pid < 0)
++		return -EINVAL;
++
++	retval = -ESRCH;
++	rcu_read_lock();
++	p = find_process_by_pid(pid);
++	if (!p)
++		goto out_unlock;
++
++	retval = security_task_getscheduler(p);
++	if (retval)
++		goto out_unlock;
++	rcu_read_unlock();
++
++	*t = ns_to_timespec64(sched_timeslice_ns);
++	return 0;
++
++out_unlock:
++	rcu_read_unlock();
++	return retval;
++}
++
++/**
++ * sys_sched_rr_get_interval - return the default timeslice of a process.
++ * @pid: pid of the process.
++ * @interval: userspace pointer to the timeslice value.
++ *
++ *
++ * Return: On success, 0 and the timeslice is in @interval. Otherwise,
++ * an error code.
++ */
++SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
++		struct __kernel_timespec __user *, interval)
++{
++	struct timespec64 t;
++	int retval = sched_rr_get_interval(pid, &t);
++
++	if (retval == 0)
++		retval = put_timespec64(&t, interval);
++
++	return retval;
++}
++
++#ifdef CONFIG_COMPAT_32BIT_TIME
++SYSCALL_DEFINE2(sched_rr_get_interval_time32, pid_t, pid,
++		struct old_timespec32 __user *, interval)
++{
++	struct timespec64 t;
++	int retval = sched_rr_get_interval(pid, &t);
++
++	if (retval == 0)
++		retval = put_old_timespec32(&t, interval);
++	return retval;
++}
++#endif
++
++void sched_show_task(struct task_struct *p)
++{
++	unsigned long free = 0;
++	int ppid;
++
++	if (!try_get_task_stack(p))
++		return;
++
++	pr_info("task:%-15.15s state:%c", p->comm, task_state_to_char(p));
++
++	if (task_is_running(p))
++		pr_cont("  running task    ");
++#ifdef CONFIG_DEBUG_STACK_USAGE
++	free = stack_not_used(p);
++#endif
++	ppid = 0;
++	rcu_read_lock();
++	if (pid_alive(p))
++		ppid = task_pid_nr(rcu_dereference(p->real_parent));
++	rcu_read_unlock();
++	pr_cont(" stack:%-5lu pid:%-5d ppid:%-6d flags:0x%08lx\n",
++		free, task_pid_nr(p), ppid,
++		read_task_thread_flags(p));
++
++	print_worker_info(KERN_INFO, p);
++	print_stop_info(KERN_INFO, p);
++	show_stack(p, NULL, KERN_INFO);
++	put_task_stack(p);
++}
++EXPORT_SYMBOL_GPL(sched_show_task);
++
++static inline bool
++state_filter_match(unsigned long state_filter, struct task_struct *p)
++{
++	unsigned int state = READ_ONCE(p->__state);
++
++	/* no filter, everything matches */
++	if (!state_filter)
++		return true;
++
++	/* filter, but doesn't match */
++	if (!(state & state_filter))
++		return false;
++
++	/*
++	 * When looking for TASK_UNINTERRUPTIBLE skip TASK_IDLE (allows
++	 * TASK_KILLABLE).
++	 */
++	if (state_filter == TASK_UNINTERRUPTIBLE && (state & TASK_NOLOAD))
++		return false;
++
++	return true;
++}
++
++
++void show_state_filter(unsigned int state_filter)
++{
++	struct task_struct *g, *p;
++
++	rcu_read_lock();
++	for_each_process_thread(g, p) {
++		/*
++		 * reset the NMI-timeout, listing all files on a slow
++		 * console might take a lot of time:
++		 * Also, reset softlockup watchdogs on all CPUs, because
++		 * another CPU might be blocked waiting for us to process
++		 * an IPI.
++		 */
++		touch_nmi_watchdog();
++		touch_all_softlockup_watchdogs();
++		if (state_filter_match(state_filter, p))
++			sched_show_task(p);
++	}
++
++#ifdef CONFIG_SCHED_DEBUG
++	/* TODO: Alt schedule FW should support this
++	if (!state_filter)
++		sysrq_sched_debug_show();
++	*/
++#endif
++	rcu_read_unlock();
++	/*
++	 * Only show locks if all tasks are dumped:
++	 */
++	if (!state_filter)
++		debug_show_all_locks();
++}
++
++void dump_cpu_task(int cpu)
++{
++	if (cpu == smp_processor_id() && in_hardirq()) {
++		struct pt_regs *regs;
++
++		regs = get_irq_regs();
++		if (regs) {
++			show_regs(regs);
++			return;
++		}
++	}
++
++	if (trigger_single_cpu_backtrace(cpu))
++		return;
++
++	pr_info("Task dump for CPU %d:\n", cpu);
++	sched_show_task(cpu_curr(cpu));
++}
++
++/**
++ * init_idle - set up an idle thread for a given CPU
++ * @idle: task in question
++ * @cpu: CPU the idle task belongs to
++ *
++ * NOTE: this function does not set the idle thread's NEED_RESCHED
++ * flag, to make booting more robust.
++ */
++void __init init_idle(struct task_struct *idle, int cpu)
++{
++#ifdef CONFIG_SMP
++	struct affinity_context ac = (struct affinity_context) {
++		.new_mask  = cpumask_of(cpu),
++		.flags     = 0,
++	};
++#endif
++	struct rq *rq = cpu_rq(cpu);
++	unsigned long flags;
++
++	__sched_fork(0, idle);
++
++	raw_spin_lock_irqsave(&idle->pi_lock, flags);
++	raw_spin_lock(&rq->lock);
++
++	idle->last_ran = rq->clock_task;
++	idle->__state = TASK_RUNNING;
++	/*
++	 * PF_KTHREAD should already be set at this point; regardless, make it
++	 * look like a proper per-CPU kthread.
++	 */
++	idle->flags |= PF_IDLE | PF_KTHREAD | PF_NO_SETAFFINITY;
++	kthread_set_per_cpu(idle, cpu);
++
++	sched_queue_init_idle(&rq->queue, idle);
++
++#ifdef CONFIG_SMP
++	/*
++	 * It's possible that init_idle() gets called multiple times on a task,
++	 * in that case do_set_cpus_allowed() will not do the right thing.
++	 *
++	 * And since this is boot we can forgo the serialisation.
++	 */
++	set_cpus_allowed_common(idle, &ac);
++#endif
++
++	/* Silence PROVE_RCU */
++	rcu_read_lock();
++	__set_task_cpu(idle, cpu);
++	rcu_read_unlock();
++
++	rq->idle = idle;
++	rcu_assign_pointer(rq->curr, idle);
++	idle->on_cpu = 1;
++
++	raw_spin_unlock(&rq->lock);
++	raw_spin_unlock_irqrestore(&idle->pi_lock, flags);
++
++	/* Set the preempt count _outside_ the spinlocks! */
++	init_idle_preempt_count(idle, cpu);
++
++	ftrace_graph_init_idle_task(idle, cpu);
++	vtime_init_idle(idle, cpu);
++#ifdef CONFIG_SMP
++	sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu);
++#endif
++}
++
++#ifdef CONFIG_SMP
++
++int cpuset_cpumask_can_shrink(const struct cpumask __maybe_unused *cur,
++			      const struct cpumask __maybe_unused *trial)
++{
++	return 1;
++}
++
++int task_can_attach(struct task_struct *p)
++{
++	int ret = 0;
++
++	/*
++	 * Kthreads which disallow setaffinity shouldn't be moved
++	 * to a new cpuset; we don't want to change their CPU
++	 * affinity and isolating such threads by their set of
++	 * allowed nodes is unnecessary.  Thus, cpusets are not
++	 * applicable for such threads.  This prevents checking for
++	 * success of set_cpus_allowed_ptr() on all attached tasks
++	 * before cpus_mask may be changed.
++	 */
++	if (p->flags & PF_NO_SETAFFINITY)
++		ret = -EINVAL;
++
++	return ret;
++}
++
++bool sched_smp_initialized __read_mostly;
++
++#ifdef CONFIG_HOTPLUG_CPU
++/*
++ * Ensures that the idle task is using init_mm right before its CPU goes
++ * offline.
++ */
++void idle_task_exit(void)
++{
++	struct mm_struct *mm = current->active_mm;
++
++	WARN_ON_ONCE(current != this_rq()->idle);
++
++	if (mm != &init_mm) {
++		switch_mm(mm, &init_mm, current);
++		finish_arch_post_lock_switch();
++	}
++
++	/* finish_cpu(), as ran on the BP, will clean up the active_mm state */
++}
++
++static int __balance_push_cpu_stop(void *arg)
++{
++	struct task_struct *p = arg;
++	struct rq *rq = this_rq();
++	struct rq_flags rf;
++	int cpu;
++
++	raw_spin_lock_irq(&p->pi_lock);
++	rq_lock(rq, &rf);
++
++	update_rq_clock(rq);
++
++	if (task_rq(p) == rq && task_on_rq_queued(p)) {
++		cpu = select_fallback_rq(rq->cpu, p);
++		rq = __migrate_task(rq, p, cpu);
++	}
++
++	rq_unlock(rq, &rf);
++	raw_spin_unlock_irq(&p->pi_lock);
++
++	put_task_struct(p);
++
++	return 0;
++}
++
++static DEFINE_PER_CPU(struct cpu_stop_work, push_work);
++
++/*
++ * This is enabled below SCHED_AP_ACTIVE; when !cpu_active(), but only
++ * effective when the hotplug motion is down.
++ */
++static void balance_push(struct rq *rq)
++{
++	struct task_struct *push_task = rq->curr;
++
++	lockdep_assert_held(&rq->lock);
++
++	/*
++	 * Ensure the thing is persistent until balance_push_set(.on = false);
++	 */
++	rq->balance_callback = &balance_push_callback;
++
++	/*
++	 * Only active while going offline and when invoked on the outgoing
++	 * CPU.
++	 */
++	if (!cpu_dying(rq->cpu) || rq != this_rq())
++		return;
++
++	/*
++	 * Both the cpu-hotplug and stop task are in this case and are
++	 * required to complete the hotplug process.
++	 */
++	if (kthread_is_per_cpu(push_task) ||
++	    is_migration_disabled(push_task)) {
++
++		/*
++		 * If this is the idle task on the outgoing CPU try to wake
++		 * up the hotplug control thread which might wait for the
++		 * last task to vanish. The rcuwait_active() check is
++		 * accurate here because the waiter is pinned on this CPU
++		 * and can't obviously be running in parallel.
++		 *
++		 * On RT kernels this also has to check whether there are
++		 * pinned and scheduled out tasks on the runqueue. They
++		 * need to leave the migrate disabled section first.
++		 */
++		if (!rq->nr_running && !rq_has_pinned_tasks(rq) &&
++		    rcuwait_active(&rq->hotplug_wait)) {
++			raw_spin_unlock(&rq->lock);
++			rcuwait_wake_up(&rq->hotplug_wait);
++			raw_spin_lock(&rq->lock);
++		}
++		return;
++	}
++
++	get_task_struct(push_task);
++	/*
++	 * Temporarily drop rq->lock such that we can wake-up the stop task.
++	 * Both preemption and IRQs are still disabled.
++	 */
++	raw_spin_unlock(&rq->lock);
++	stop_one_cpu_nowait(rq->cpu, __balance_push_cpu_stop, push_task,
++			    this_cpu_ptr(&push_work));
++	/*
++	 * At this point need_resched() is true and we'll take the loop in
++	 * schedule(). The next pick is obviously going to be the stop task
++	 * which kthread_is_per_cpu() and will push this task away.
++	 */
++	raw_spin_lock(&rq->lock);
++}
++
++static void balance_push_set(int cpu, bool on)
++{
++	struct rq *rq = cpu_rq(cpu);
++	struct rq_flags rf;
++
++	rq_lock_irqsave(rq, &rf);
++	if (on) {
++		WARN_ON_ONCE(rq->balance_callback);
++		rq->balance_callback = &balance_push_callback;
++	} else if (rq->balance_callback == &balance_push_callback) {
++		rq->balance_callback = NULL;
++	}
++	rq_unlock_irqrestore(rq, &rf);
++}
++
++/*
++ * Invoked from a CPUs hotplug control thread after the CPU has been marked
++ * inactive. All tasks which are not per CPU kernel threads are either
++ * pushed off this CPU now via balance_push() or placed on a different CPU
++ * during wakeup. Wait until the CPU is quiescent.
++ */
++static void balance_hotplug_wait(void)
++{
++	struct rq *rq = this_rq();
++
++	rcuwait_wait_event(&rq->hotplug_wait,
++			   rq->nr_running == 1 && !rq_has_pinned_tasks(rq),
++			   TASK_UNINTERRUPTIBLE);
++}
++
++#else
++
++static void balance_push(struct rq *rq)
++{
++}
++
++static void balance_push_set(int cpu, bool on)
++{
++}
++
++static inline void balance_hotplug_wait(void)
++{
++}
++#endif /* CONFIG_HOTPLUG_CPU */
++
++static void set_rq_offline(struct rq *rq)
++{
++	if (rq->online)
++		rq->online = false;
++}
++
++static void set_rq_online(struct rq *rq)
++{
++	if (!rq->online)
++		rq->online = true;
++}
++
++/*
++ * used to mark begin/end of suspend/resume:
++ */
++static int num_cpus_frozen;
++
++/*
++ * Update cpusets according to cpu_active mask.  If cpusets are
++ * disabled, cpuset_update_active_cpus() becomes a simple wrapper
++ * around partition_sched_domains().
++ *
++ * If we come here as part of a suspend/resume, don't touch cpusets because we
++ * want to restore it back to its original state upon resume anyway.
++ */
++static void cpuset_cpu_active(void)
++{
++	if (cpuhp_tasks_frozen) {
++		/*
++		 * num_cpus_frozen tracks how many CPUs are involved in suspend
++		 * resume sequence. As long as this is not the last online
++		 * operation in the resume sequence, just build a single sched
++		 * domain, ignoring cpusets.
++		 */
++		partition_sched_domains(1, NULL, NULL);
++		if (--num_cpus_frozen)
++			return;
++		/*
++		 * This is the last CPU online operation. So fall through and
++		 * restore the original sched domains by considering the
++		 * cpuset configurations.
++		 */
++		cpuset_force_rebuild();
++	}
++
++	cpuset_update_active_cpus();
++}
++
++static int cpuset_cpu_inactive(unsigned int cpu)
++{
++	if (!cpuhp_tasks_frozen) {
++		cpuset_update_active_cpus();
++	} else {
++		num_cpus_frozen++;
++		partition_sched_domains(1, NULL, NULL);
++	}
++	return 0;
++}
++
++int sched_cpu_activate(unsigned int cpu)
++{
++	struct rq *rq = cpu_rq(cpu);
++	unsigned long flags;
++
++	/*
++	 * Clear the balance_push callback and prepare to schedule
++	 * regular tasks.
++	 */
++	balance_push_set(cpu, false);
++
++#ifdef CONFIG_SCHED_SMT
++	/*
++	 * When going up, increment the number of cores with SMT present.
++	 */
++	if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
++		static_branch_inc_cpuslocked(&sched_smt_present);
++#endif
++	set_cpu_active(cpu, true);
++
++	if (sched_smp_initialized)
++		cpuset_cpu_active();
++
++	/*
++	 * Put the rq online, if not already. This happens:
++	 *
++	 * 1) In the early boot process, because we build the real domains
++	 *    after all cpus have been brought up.
++	 *
++	 * 2) At runtime, if cpuset_cpu_active() fails to rebuild the
++	 *    domains.
++	 */
++	raw_spin_lock_irqsave(&rq->lock, flags);
++	set_rq_online(rq);
++	raw_spin_unlock_irqrestore(&rq->lock, flags);
++
++	return 0;
++}
++
++int sched_cpu_deactivate(unsigned int cpu)
++{
++	struct rq *rq = cpu_rq(cpu);
++	unsigned long flags;
++	int ret;
++
++	set_cpu_active(cpu, false);
++
++	/*
++	 * From this point forward, this CPU will refuse to run any task that
++	 * is not: migrate_disable() or KTHREAD_IS_PER_CPU, and will actively
++	 * push those tasks away until this gets cleared, see
++	 * sched_cpu_dying().
++	 */
++	balance_push_set(cpu, true);
++
++	/*
++	 * We've cleared cpu_active_mask, wait for all preempt-disabled and RCU
++	 * users of this state to go away such that all new such users will
++	 * observe it.
++	 *
++	 * Specifically, we rely on ttwu to no longer target this CPU, see
++	 * ttwu_queue_cond() and is_cpu_allowed().
++	 *
++	 * Do sync before park smpboot threads to take care the rcu boost case.
++	 */
++	synchronize_rcu();
++
++	raw_spin_lock_irqsave(&rq->lock, flags);
++	update_rq_clock(rq);
++	set_rq_offline(rq);
++	raw_spin_unlock_irqrestore(&rq->lock, flags);
++
++#ifdef CONFIG_SCHED_SMT
++	/*
++	 * When going down, decrement the number of cores with SMT present.
++	 */
++	if (cpumask_weight(cpu_smt_mask(cpu)) == 2) {
++		static_branch_dec_cpuslocked(&sched_smt_present);
++		if (!static_branch_likely(&sched_smt_present))
++			cpumask_clear(&sched_sg_idle_mask);
++	}
++#endif
++
++	if (!sched_smp_initialized)
++		return 0;
++
++	ret = cpuset_cpu_inactive(cpu);
++	if (ret) {
++		balance_push_set(cpu, false);
++		set_cpu_active(cpu, true);
++		return ret;
++	}
++
++	return 0;
++}
++
++static void sched_rq_cpu_starting(unsigned int cpu)
++{
++	struct rq *rq = cpu_rq(cpu);
++
++	rq->calc_load_update = calc_load_update;
++}
++
++int sched_cpu_starting(unsigned int cpu)
++{
++	sched_rq_cpu_starting(cpu);
++	sched_tick_start(cpu);
++	return 0;
++}
++
++#ifdef CONFIG_HOTPLUG_CPU
++
++/*
++ * Invoked immediately before the stopper thread is invoked to bring the
++ * CPU down completely. At this point all per CPU kthreads except the
++ * hotplug thread (current) and the stopper thread (inactive) have been
++ * either parked or have been unbound from the outgoing CPU. Ensure that
++ * any of those which might be on the way out are gone.
++ *
++ * If after this point a bound task is being woken on this CPU then the
++ * responsible hotplug callback has failed to do it's job.
++ * sched_cpu_dying() will catch it with the appropriate fireworks.
++ */
++int sched_cpu_wait_empty(unsigned int cpu)
++{
++	balance_hotplug_wait();
++	return 0;
++}
++
++/*
++ * Since this CPU is going 'away' for a while, fold any nr_active delta we
++ * might have. Called from the CPU stopper task after ensuring that the
++ * stopper is the last running task on the CPU, so nr_active count is
++ * stable. We need to take the teardown thread which is calling this into
++ * account, so we hand in adjust = 1 to the load calculation.
++ *
++ * Also see the comment "Global load-average calculations".
++ */
++static void calc_load_migrate(struct rq *rq)
++{
++	long delta = calc_load_fold_active(rq, 1);
++
++	if (delta)
++		atomic_long_add(delta, &calc_load_tasks);
++}
++
++static void dump_rq_tasks(struct rq *rq, const char *loglvl)
++{
++	struct task_struct *g, *p;
++	int cpu = cpu_of(rq);
++
++	lockdep_assert_held(&rq->lock);
++
++	printk("%sCPU%d enqueued tasks (%u total):\n", loglvl, cpu, rq->nr_running);
++	for_each_process_thread(g, p) {
++		if (task_cpu(p) != cpu)
++			continue;
++
++		if (!task_on_rq_queued(p))
++			continue;
++
++		printk("%s\tpid: %d, name: %s\n", loglvl, p->pid, p->comm);
++	}
++}
++
++int sched_cpu_dying(unsigned int cpu)
++{
++	struct rq *rq = cpu_rq(cpu);
++	unsigned long flags;
++
++	/* Handle pending wakeups and then migrate everything off */
++	sched_tick_stop(cpu);
++
++	raw_spin_lock_irqsave(&rq->lock, flags);
++	if (rq->nr_running != 1 || rq_has_pinned_tasks(rq)) {
++		WARN(true, "Dying CPU not properly vacated!");
++		dump_rq_tasks(rq, KERN_WARNING);
++	}
++	raw_spin_unlock_irqrestore(&rq->lock, flags);
++
++	calc_load_migrate(rq);
++	hrtick_clear(rq);
++	return 0;
++}
++#endif
++
++#ifdef CONFIG_SMP
++static void sched_init_topology_cpumask_early(void)
++{
++	int cpu;
++	cpumask_t *tmp;
++
++	for_each_possible_cpu(cpu) {
++		/* init topo masks */
++		tmp = per_cpu(sched_cpu_topo_masks, cpu);
++
++		cpumask_copy(tmp, cpumask_of(cpu));
++		tmp++;
++		cpumask_copy(tmp, cpu_possible_mask);
++		per_cpu(sched_cpu_llc_mask, cpu) = tmp;
++		per_cpu(sched_cpu_topo_end_mask, cpu) = ++tmp;
++		/*per_cpu(sd_llc_id, cpu) = cpu;*/
++	}
++}
++
++#define TOPOLOGY_CPUMASK(name, mask, last)\
++	if (cpumask_and(topo, topo, mask)) {					\
++		cpumask_copy(topo, mask);					\
++		printk(KERN_INFO "sched: cpu#%02d topo: 0x%08lx - "#name,	\
++		       cpu, (topo++)->bits[0]);					\
++	}									\
++	if (!last)								\
++		bitmap_complement(cpumask_bits(topo), cpumask_bits(mask),	\
++				  nr_cpumask_bits);
++
++static void sched_init_topology_cpumask(void)
++{
++	int cpu;
++	cpumask_t *topo;
++
++	for_each_online_cpu(cpu) {
++		/* take chance to reset time slice for idle tasks */
++		cpu_rq(cpu)->idle->time_slice = sched_timeslice_ns;
++
++		topo = per_cpu(sched_cpu_topo_masks, cpu) + 1;
++
++		bitmap_complement(cpumask_bits(topo), cpumask_bits(cpumask_of(cpu)),
++				  nr_cpumask_bits);
++#ifdef CONFIG_SCHED_SMT
++		TOPOLOGY_CPUMASK(smt, topology_sibling_cpumask(cpu), false);
++#endif
++		per_cpu(sd_llc_id, cpu) = cpumask_first(cpu_coregroup_mask(cpu));
++		per_cpu(sched_cpu_llc_mask, cpu) = topo;
++		TOPOLOGY_CPUMASK(coregroup, cpu_coregroup_mask(cpu), false);
++
++		TOPOLOGY_CPUMASK(core, topology_core_cpumask(cpu), false);
++
++		TOPOLOGY_CPUMASK(others, cpu_online_mask, true);
++
++		per_cpu(sched_cpu_topo_end_mask, cpu) = topo;
++		printk(KERN_INFO "sched: cpu#%02d llc_id = %d, llc_mask idx = %d\n",
++		       cpu, per_cpu(sd_llc_id, cpu),
++		       (int) (per_cpu(sched_cpu_llc_mask, cpu) -
++			      per_cpu(sched_cpu_topo_masks, cpu)));
++	}
++}
++#endif
++
++void __init sched_init_smp(void)
++{
++	/* Move init over to a non-isolated CPU */
++	if (set_cpus_allowed_ptr(current, housekeeping_cpumask(HK_TYPE_DOMAIN)) < 0)
++		BUG();
++	current->flags &= ~PF_NO_SETAFFINITY;
++
++	sched_init_topology_cpumask();
++
++	sched_smp_initialized = true;
++}
++
++static int __init migration_init(void)
++{
++	sched_cpu_starting(smp_processor_id());
++	return 0;
++}
++early_initcall(migration_init);
++
++#else
++void __init sched_init_smp(void)
++{
++	cpu_rq(0)->idle->time_slice = sched_timeslice_ns;
++}
++#endif /* CONFIG_SMP */
++
++int in_sched_functions(unsigned long addr)
++{
++	return in_lock_functions(addr) ||
++		(addr >= (unsigned long)__sched_text_start
++		&& addr < (unsigned long)__sched_text_end);
++}
++
++#ifdef CONFIG_CGROUP_SCHED
++/* task group related information */
++struct task_group {
++	struct cgroup_subsys_state css;
++
++	struct rcu_head rcu;
++	struct list_head list;
++
++	struct task_group *parent;
++	struct list_head siblings;
++	struct list_head children;
++#ifdef CONFIG_FAIR_GROUP_SCHED
++	unsigned long		shares;
++#endif
++};
++
++/*
++ * Default task group.
++ * Every task in system belongs to this group at bootup.
++ */
++struct task_group root_task_group;
++LIST_HEAD(task_groups);
++
++/* Cacheline aligned slab cache for task_group */
++static struct kmem_cache *task_group_cache __read_mostly;
++#endif /* CONFIG_CGROUP_SCHED */
++
++void __init sched_init(void)
++{
++	int i;
++
++	printk(KERN_INFO "sched/alt: "ALT_SCHED_NAME" CPU Scheduler "ALT_SCHED_VERSION\
++			 " by Alfred Chen.\n");
++
++	wait_bit_init();
++
++#ifdef CONFIG_SMP
++	for (i = 0; i < SCHED_QUEUE_BITS; i++)
++		cpumask_copy(sched_preempt_mask + i, cpu_present_mask);
++#endif
++
++#ifdef CONFIG_CGROUP_SCHED
++	task_group_cache = KMEM_CACHE(task_group, 0);
++
++	list_add(&root_task_group.list, &task_groups);
++	INIT_LIST_HEAD(&root_task_group.children);
++	INIT_LIST_HEAD(&root_task_group.siblings);
++#endif /* CONFIG_CGROUP_SCHED */
++	for_each_possible_cpu(i) {
++		struct rq *rq;
++		rq = cpu_rq(i);
++
++		sched_queue_init(&rq->queue);
++		rq->prio = IDLE_TASK_SCHED_PRIO;
++		rq->skip = NULL;
++
++		raw_spin_lock_init(&rq->lock);
++		rq->nr_running = rq->nr_uninterruptible = 0;
++		rq->calc_load_active = 0;
++		rq->calc_load_update = jiffies + LOAD_FREQ;
++#ifdef CONFIG_SMP
++		rq->online = false;
++		rq->cpu = i;
++
++#ifdef CONFIG_SCHED_SMT
++		rq->active_balance = 0;
++#endif
++
++#ifdef CONFIG_NO_HZ_COMMON
++		INIT_CSD(&rq->nohz_csd, nohz_csd_func, rq);
++#endif
++		rq->balance_callback = &balance_push_callback;
++#ifdef CONFIG_HOTPLUG_CPU
++		rcuwait_init(&rq->hotplug_wait);
++#endif
++#endif /* CONFIG_SMP */
++		rq->nr_switches = 0;
++
++		hrtick_rq_init(rq);
++		atomic_set(&rq->nr_iowait, 0);
++
++		zalloc_cpumask_var_node(&rq->scratch_mask, GFP_KERNEL, cpu_to_node(i));
++	}
++#ifdef CONFIG_SMP
++	/* Set rq->online for cpu 0 */
++	cpu_rq(0)->online = true;
++#endif
++	/*
++	 * The boot idle thread does lazy MMU switching as well:
++	 */
++	mmgrab(&init_mm);
++	enter_lazy_tlb(&init_mm, current);
++
++	/*
++	 * The idle task doesn't need the kthread struct to function, but it
++	 * is dressed up as a per-CPU kthread and thus needs to play the part
++	 * if we want to avoid special-casing it in code that deals with per-CPU
++	 * kthreads.
++	 */
++	WARN_ON(!set_kthread_struct(current));
++
++	/*
++	 * Make us the idle thread. Technically, schedule() should not be
++	 * called from this thread, however somewhere below it might be,
++	 * but because we are the idle thread, we just pick up running again
++	 * when this runqueue becomes "idle".
++	 */
++	init_idle(current, smp_processor_id());
++
++	calc_load_update = jiffies + LOAD_FREQ;
++
++#ifdef CONFIG_SMP
++	idle_thread_set_boot_cpu();
++	balance_push_set(smp_processor_id(), false);
++
++	sched_init_topology_cpumask_early();
++#endif /* SMP */
++
++	preempt_dynamic_init();
++}
++
++#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
++
++void __might_sleep(const char *file, int line)
++{
++	unsigned int state = get_current_state();
++	/*
++	 * Blocking primitives will set (and therefore destroy) current->state,
++	 * since we will exit with TASK_RUNNING make sure we enter with it,
++	 * otherwise we will destroy state.
++	 */
++	WARN_ONCE(state != TASK_RUNNING && current->task_state_change,
++			"do not call blocking ops when !TASK_RUNNING; "
++			"state=%x set at [<%p>] %pS\n", state,
++			(void *)current->task_state_change,
++			(void *)current->task_state_change);
++
++	__might_resched(file, line, 0);
++}
++EXPORT_SYMBOL(__might_sleep);
++
++static void print_preempt_disable_ip(int preempt_offset, unsigned long ip)
++{
++	if (!IS_ENABLED(CONFIG_DEBUG_PREEMPT))
++		return;
++
++	if (preempt_count() == preempt_offset)
++		return;
++
++	pr_err("Preemption disabled at:");
++	print_ip_sym(KERN_ERR, ip);
++}
++
++static inline bool resched_offsets_ok(unsigned int offsets)
++{
++	unsigned int nested = preempt_count();
++
++	nested += rcu_preempt_depth() << MIGHT_RESCHED_RCU_SHIFT;
++
++	return nested == offsets;
++}
++
++void __might_resched(const char *file, int line, unsigned int offsets)
++{
++	/* Ratelimiting timestamp: */
++	static unsigned long prev_jiffy;
++
++	unsigned long preempt_disable_ip;
++
++	/* WARN_ON_ONCE() by default, no rate limit required: */
++	rcu_sleep_check();
++
++	if ((resched_offsets_ok(offsets) && !irqs_disabled() &&
++	     !is_idle_task(current) && !current->non_block_count) ||
++	    system_state == SYSTEM_BOOTING || system_state > SYSTEM_RUNNING ||
++	    oops_in_progress)
++		return;
++	if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
++		return;
++	prev_jiffy = jiffies;
++
++	/* Save this before calling printk(), since that will clobber it: */
++	preempt_disable_ip = get_preempt_disable_ip(current);
++
++	pr_err("BUG: sleeping function called from invalid context at %s:%d\n",
++	       file, line);
++	pr_err("in_atomic(): %d, irqs_disabled(): %d, non_block: %d, pid: %d, name: %s\n",
++	       in_atomic(), irqs_disabled(), current->non_block_count,
++	       current->pid, current->comm);
++	pr_err("preempt_count: %x, expected: %x\n", preempt_count(),
++	       offsets & MIGHT_RESCHED_PREEMPT_MASK);
++
++	if (IS_ENABLED(CONFIG_PREEMPT_RCU)) {
++		pr_err("RCU nest depth: %d, expected: %u\n",
++		       rcu_preempt_depth(), offsets >> MIGHT_RESCHED_RCU_SHIFT);
++	}
++
++	if (task_stack_end_corrupted(current))
++		pr_emerg("Thread overran stack, or stack corrupted\n");
++
++	debug_show_held_locks(current);
++	if (irqs_disabled())
++		print_irqtrace_events(current);
++
++	print_preempt_disable_ip(offsets & MIGHT_RESCHED_PREEMPT_MASK,
++				 preempt_disable_ip);
++
++	dump_stack();
++	add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
++}
++EXPORT_SYMBOL(__might_resched);
++
++void __cant_sleep(const char *file, int line, int preempt_offset)
++{
++	static unsigned long prev_jiffy;
++
++	if (irqs_disabled())
++		return;
++
++	if (!IS_ENABLED(CONFIG_PREEMPT_COUNT))
++		return;
++
++	if (preempt_count() > preempt_offset)
++		return;
++
++	if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
++		return;
++	prev_jiffy = jiffies;
++
++	printk(KERN_ERR "BUG: assuming atomic context at %s:%d\n", file, line);
++	printk(KERN_ERR "in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n",
++			in_atomic(), irqs_disabled(),
++			current->pid, current->comm);
++
++	debug_show_held_locks(current);
++	dump_stack();
++	add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
++}
++EXPORT_SYMBOL_GPL(__cant_sleep);
++
++#ifdef CONFIG_SMP
++void __cant_migrate(const char *file, int line)
++{
++	static unsigned long prev_jiffy;
++
++	if (irqs_disabled())
++		return;
++
++	if (is_migration_disabled(current))
++		return;
++
++	if (!IS_ENABLED(CONFIG_PREEMPT_COUNT))
++		return;
++
++	if (preempt_count() > 0)
++		return;
++
++	if (current->migration_flags & MDF_FORCE_ENABLED)
++		return;
++
++	if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
++		return;
++	prev_jiffy = jiffies;
++
++	pr_err("BUG: assuming non migratable context at %s:%d\n", file, line);
++	pr_err("in_atomic(): %d, irqs_disabled(): %d, migration_disabled() %u pid: %d, name: %s\n",
++	       in_atomic(), irqs_disabled(), is_migration_disabled(current),
++	       current->pid, current->comm);
++
++	debug_show_held_locks(current);
++	dump_stack();
++	add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
++}
++EXPORT_SYMBOL_GPL(__cant_migrate);
++#endif
++#endif
++
++#ifdef CONFIG_MAGIC_SYSRQ
++void normalize_rt_tasks(void)
++{
++	struct task_struct *g, *p;
++	struct sched_attr attr = {
++		.sched_policy = SCHED_NORMAL,
++	};
++
++	read_lock(&tasklist_lock);
++	for_each_process_thread(g, p) {
++		/*
++		 * Only normalize user tasks:
++		 */
++		if (p->flags & PF_KTHREAD)
++			continue;
++
++		schedstat_set(p->stats.wait_start,  0);
++		schedstat_set(p->stats.sleep_start, 0);
++		schedstat_set(p->stats.block_start, 0);
++
++		if (!rt_task(p)) {
++			/*
++			 * Renice negative nice level userspace
++			 * tasks back to 0:
++			 */
++			if (task_nice(p) < 0)
++				set_user_nice(p, 0);
++			continue;
++		}
++
++		__sched_setscheduler(p, &attr, false, false);
++	}
++	read_unlock(&tasklist_lock);
++}
++#endif /* CONFIG_MAGIC_SYSRQ */
++
++#if defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB)
++/*
++ * These functions are only useful for the IA64 MCA handling, or kdb.
++ *
++ * They can only be called when the whole system has been
++ * stopped - every CPU needs to be quiescent, and no scheduling
++ * activity can take place. Using them for anything else would
++ * be a serious bug, and as a result, they aren't even visible
++ * under any other configuration.
++ */
++
++/**
++ * curr_task - return the current task for a given CPU.
++ * @cpu: the processor in question.
++ *
++ * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
++ *
++ * Return: The current task for @cpu.
++ */
++struct task_struct *curr_task(int cpu)
++{
++	return cpu_curr(cpu);
++}
++
++#endif /* defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB) */
++
++#ifdef CONFIG_IA64
++/**
++ * ia64_set_curr_task - set the current task for a given CPU.
++ * @cpu: the processor in question.
++ * @p: the task pointer to set.
++ *
++ * Description: This function must only be used when non-maskable interrupts
++ * are serviced on a separate stack.  It allows the architecture to switch the
++ * notion of the current task on a CPU in a non-blocking manner.  This function
++ * must be called with all CPU's synchronised, and interrupts disabled, the
++ * and caller must save the original value of the current task (see
++ * curr_task() above) and restore that value before reenabling interrupts and
++ * re-starting the system.
++ *
++ * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
++ */
++void ia64_set_curr_task(int cpu, struct task_struct *p)
++{
++	cpu_curr(cpu) = p;
++}
++
++#endif
++
++#ifdef CONFIG_CGROUP_SCHED
++static void sched_free_group(struct task_group *tg)
++{
++	kmem_cache_free(task_group_cache, tg);
++}
++
++static void sched_free_group_rcu(struct rcu_head *rhp)
++{
++	sched_free_group(container_of(rhp, struct task_group, rcu));
++}
++
++static void sched_unregister_group(struct task_group *tg)
++{
++	/*
++	 * We have to wait for yet another RCU grace period to expire, as
++	 * print_cfs_stats() might run concurrently.
++	 */
++	call_rcu(&tg->rcu, sched_free_group_rcu);
++}
++
++/* allocate runqueue etc for a new task group */
++struct task_group *sched_create_group(struct task_group *parent)
++{
++	struct task_group *tg;
++
++	tg = kmem_cache_alloc(task_group_cache, GFP_KERNEL | __GFP_ZERO);
++	if (!tg)
++		return ERR_PTR(-ENOMEM);
++
++	return tg;
++}
++
++void sched_online_group(struct task_group *tg, struct task_group *parent)
++{
++}
++
++/* rcu callback to free various structures associated with a task group */
++static void sched_unregister_group_rcu(struct rcu_head *rhp)
++{
++	/* Now it should be safe to free those cfs_rqs: */
++	sched_unregister_group(container_of(rhp, struct task_group, rcu));
++}
++
++void sched_destroy_group(struct task_group *tg)
++{
++	/* Wait for possible concurrent references to cfs_rqs complete: */
++	call_rcu(&tg->rcu, sched_unregister_group_rcu);
++}
++
++void sched_release_group(struct task_group *tg)
++{
++}
++
++static inline struct task_group *css_tg(struct cgroup_subsys_state *css)
++{
++	return css ? container_of(css, struct task_group, css) : NULL;
++}
++
++static struct cgroup_subsys_state *
++cpu_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
++{
++	struct task_group *parent = css_tg(parent_css);
++	struct task_group *tg;
++
++	if (!parent) {
++		/* This is early initialization for the top cgroup */
++		return &root_task_group.css;
++	}
++
++	tg = sched_create_group(parent);
++	if (IS_ERR(tg))
++		return ERR_PTR(-ENOMEM);
++	return &tg->css;
++}
++
++/* Expose task group only after completing cgroup initialization */
++static int cpu_cgroup_css_online(struct cgroup_subsys_state *css)
++{
++	struct task_group *tg = css_tg(css);
++	struct task_group *parent = css_tg(css->parent);
++
++	if (parent)
++		sched_online_group(tg, parent);
++	return 0;
++}
++
++static void cpu_cgroup_css_released(struct cgroup_subsys_state *css)
++{
++	struct task_group *tg = css_tg(css);
++
++	sched_release_group(tg);
++}
++
++static void cpu_cgroup_css_free(struct cgroup_subsys_state *css)
++{
++	struct task_group *tg = css_tg(css);
++
++	/*
++	 * Relies on the RCU grace period between css_released() and this.
++	 */
++	sched_unregister_group(tg);
++}
++
++#ifdef CONFIG_RT_GROUP_SCHED
++static int cpu_cgroup_can_attach(struct cgroup_taskset *tset)
++{
++	return 0;
++}
++#endif
++
++static void cpu_cgroup_attach(struct cgroup_taskset *tset)
++{
++}
++
++#ifdef CONFIG_FAIR_GROUP_SCHED
++static DEFINE_MUTEX(shares_mutex);
++
++int sched_group_set_shares(struct task_group *tg, unsigned long shares)
++{
++	/*
++	 * We can't change the weight of the root cgroup.
++	 */
++	if (&root_task_group == tg)
++		return -EINVAL;
++
++	shares = clamp(shares, scale_load(MIN_SHARES), scale_load(MAX_SHARES));
++
++	mutex_lock(&shares_mutex);
++	if (tg->shares == shares)
++		goto done;
++
++	tg->shares = shares;
++done:
++	mutex_unlock(&shares_mutex);
++	return 0;
++}
++
++static int cpu_shares_write_u64(struct cgroup_subsys_state *css,
++				struct cftype *cftype, u64 shareval)
++{
++	if (shareval > scale_load_down(ULONG_MAX))
++		shareval = MAX_SHARES;
++	return sched_group_set_shares(css_tg(css), scale_load(shareval));
++}
++
++static u64 cpu_shares_read_u64(struct cgroup_subsys_state *css,
++			       struct cftype *cft)
++{
++	struct task_group *tg = css_tg(css);
++
++	return (u64) scale_load_down(tg->shares);
++}
++#endif
++
++static struct cftype cpu_legacy_files[] = {
++#ifdef CONFIG_FAIR_GROUP_SCHED
++	{
++		.name = "shares",
++		.read_u64 = cpu_shares_read_u64,
++		.write_u64 = cpu_shares_write_u64,
++	},
++#endif
++	{ }	/* Terminate */
++};
++
++
++static struct cftype cpu_files[] = {
++	{ }	/* terminate */
++};
++
++static int cpu_extra_stat_show(struct seq_file *sf,
++			       struct cgroup_subsys_state *css)
++{
++	return 0;
++}
++
++struct cgroup_subsys cpu_cgrp_subsys = {
++	.css_alloc	= cpu_cgroup_css_alloc,
++	.css_online	= cpu_cgroup_css_online,
++	.css_released	= cpu_cgroup_css_released,
++	.css_free	= cpu_cgroup_css_free,
++	.css_extra_stat_show = cpu_extra_stat_show,
++#ifdef CONFIG_RT_GROUP_SCHED
++	.can_attach	= cpu_cgroup_can_attach,
++#endif
++	.attach		= cpu_cgroup_attach,
++	.legacy_cftypes	= cpu_files,
++	.legacy_cftypes	= cpu_legacy_files,
++	.dfl_cftypes	= cpu_files,
++	.early_init	= true,
++	.threaded	= true,
++};
++#endif	/* CONFIG_CGROUP_SCHED */
++
++#undef CREATE_TRACE_POINTS
++
++#ifdef CONFIG_SCHED_MM_CID
++
++/*
++ * @cid_lock: Guarantee forward-progress of cid allocation.
++ *
++ * Concurrency ID allocation within a bitmap is mostly lock-free. The cid_lock
++ * is only used when contention is detected by the lock-free allocation so
++ * forward progress can be guaranteed.
++ */
++DEFINE_RAW_SPINLOCK(cid_lock);
++
++/*
++ * @use_cid_lock: Select cid allocation behavior: lock-free vs spinlock.
++ *
++ * When @use_cid_lock is 0, the cid allocation is lock-free. When contention is
++ * detected, it is set to 1 to ensure that all newly coming allocations are
++ * serialized by @cid_lock until the allocation which detected contention
++ * completes and sets @use_cid_lock back to 0. This guarantees forward progress
++ * of a cid allocation.
++ */
++int use_cid_lock;
++
++/*
++ * mm_cid remote-clear implements a lock-free algorithm to clear per-mm/cpu cid
++ * concurrently with respect to the execution of the source runqueue context
++ * switch.
++ *
++ * There is one basic properties we want to guarantee here:
++ *
++ * (1) Remote-clear should _never_ mark a per-cpu cid UNSET when it is actively
++ * used by a task. That would lead to concurrent allocation of the cid and
++ * userspace corruption.
++ *
++ * Provide this guarantee by introducing a Dekker memory ordering to guarantee
++ * that a pair of loads observe at least one of a pair of stores, which can be
++ * shown as:
++ *
++ *      X = Y = 0
++ *
++ *      w[X]=1          w[Y]=1
++ *      MB              MB
++ *      r[Y]=y          r[X]=x
++ *
++ * Which guarantees that x==0 && y==0 is impossible. But rather than using
++ * values 0 and 1, this algorithm cares about specific state transitions of the
++ * runqueue current task (as updated by the scheduler context switch), and the
++ * per-mm/cpu cid value.
++ *
++ * Let's introduce task (Y) which has task->mm == mm and task (N) which has
++ * task->mm != mm for the rest of the discussion. There are two scheduler state
++ * transitions on context switch we care about:
++ *
++ * (TSA) Store to rq->curr with transition from (N) to (Y)
++ *
++ * (TSB) Store to rq->curr with transition from (Y) to (N)
++ *
++ * On the remote-clear side, there is one transition we care about:
++ *
++ * (TMA) cmpxchg to *pcpu_cid to set the LAZY flag
++ *
++ * There is also a transition to UNSET state which can be performed from all
++ * sides (scheduler, remote-clear). It is always performed with a cmpxchg which
++ * guarantees that only a single thread will succeed:
++ *
++ * (TMB) cmpxchg to *pcpu_cid to mark UNSET
++ *
++ * Just to be clear, what we do _not_ want to happen is a transition to UNSET
++ * when a thread is actively using the cid (property (1)).
++ *
++ * Let's looks at the relevant combinations of TSA/TSB, and TMA transitions.
++ *
++ * Scenario A) (TSA)+(TMA) (from next task perspective)
++ *
++ * CPU0                                      CPU1
++ *
++ * Context switch CS-1                       Remote-clear
++ *   - store to rq->curr: (N)->(Y) (TSA)     - cmpxchg to *pcpu_id to LAZY (TMA)
++ *                                             (implied barrier after cmpxchg)
++ *   - switch_mm_cid()
++ *     - memory barrier (see switch_mm_cid()
++ *       comment explaining how this barrier
++ *       is combined with other scheduler
++ *       barriers)
++ *     - mm_cid_get (next)
++ *       - READ_ONCE(*pcpu_cid)              - rcu_dereference(src_rq->curr)
++ *
++ * This Dekker ensures that either task (Y) is observed by the
++ * rcu_dereference() or the LAZY flag is observed by READ_ONCE(), or both are
++ * observed.
++ *
++ * If task (Y) store is observed by rcu_dereference(), it means that there is
++ * still an active task on the cpu. Remote-clear will therefore not transition
++ * to UNSET, which fulfills property (1).
++ *
++ * If task (Y) is not observed, but the lazy flag is observed by READ_ONCE(),
++ * it will move its state to UNSET, which clears the percpu cid perhaps
++ * uselessly (which is not an issue for correctness). Because task (Y) is not
++ * observed, CPU1 can move ahead to set the state to UNSET. Because moving
++ * state to UNSET is done with a cmpxchg expecting that the old state has the
++ * LAZY flag set, only one thread will successfully UNSET.
++ *
++ * If both states (LAZY flag and task (Y)) are observed, the thread on CPU0
++ * will observe the LAZY flag and transition to UNSET (perhaps uselessly), and
++ * CPU1 will observe task (Y) and do nothing more, which is fine.
++ *
++ * What we are effectively preventing with this Dekker is a scenario where
++ * neither LAZY flag nor store (Y) are observed, which would fail property (1)
++ * because this would UNSET a cid which is actively used.
++ */
++
++void sched_mm_cid_migrate_from(struct task_struct *t)
++{
++	t->migrate_from_cpu = task_cpu(t);
++}
++
++static
++int __sched_mm_cid_migrate_from_fetch_cid(struct rq *src_rq,
++					  struct task_struct *t,
++					  struct mm_cid *src_pcpu_cid)
++{
++	struct mm_struct *mm = t->mm;
++	struct task_struct *src_task;
++	int src_cid, last_mm_cid;
++
++	if (!mm)
++		return -1;
++
++	last_mm_cid = t->last_mm_cid;
++	/*
++	 * If the migrated task has no last cid, or if the current
++	 * task on src rq uses the cid, it means the source cid does not need
++	 * to be moved to the destination cpu.
++	 */
++	if (last_mm_cid == -1)
++		return -1;
++	src_cid = READ_ONCE(src_pcpu_cid->cid);
++	if (!mm_cid_is_valid(src_cid) || last_mm_cid != src_cid)
++		return -1;
++
++	/*
++	 * If we observe an active task using the mm on this rq, it means we
++	 * are not the last task to be migrated from this cpu for this mm, so
++	 * there is no need to move src_cid to the destination cpu.
++	 */
++	rcu_read_lock();
++	src_task = rcu_dereference(src_rq->curr);
++	if (READ_ONCE(src_task->mm_cid_active) && src_task->mm == mm) {
++		rcu_read_unlock();
++		t->last_mm_cid = -1;
++		return -1;
++	}
++	rcu_read_unlock();
++
++	return src_cid;
++}
++
++static
++int __sched_mm_cid_migrate_from_try_steal_cid(struct rq *src_rq,
++					      struct task_struct *t,
++					      struct mm_cid *src_pcpu_cid,
++					      int src_cid)
++{
++	struct task_struct *src_task;
++	struct mm_struct *mm = t->mm;
++	int lazy_cid;
++
++	if (src_cid == -1)
++		return -1;
++
++	/*
++	 * Attempt to clear the source cpu cid to move it to the destination
++	 * cpu.
++	 */
++	lazy_cid = mm_cid_set_lazy_put(src_cid);
++	if (!try_cmpxchg(&src_pcpu_cid->cid, &src_cid, lazy_cid))
++		return -1;
++
++	/*
++	 * The implicit barrier after cmpxchg per-mm/cpu cid before loading
++	 * rq->curr->mm matches the scheduler barrier in context_switch()
++	 * between store to rq->curr and load of prev and next task's
++	 * per-mm/cpu cid.
++	 *
++	 * The implicit barrier after cmpxchg per-mm/cpu cid before loading
++	 * rq->curr->mm_cid_active matches the barrier in
++	 * sched_mm_cid_exit_signals(), sched_mm_cid_before_execve(), and
++	 * sched_mm_cid_after_execve() between store to t->mm_cid_active and
++	 * load of per-mm/cpu cid.
++	 */
++
++	/*
++	 * If we observe an active task using the mm on this rq after setting
++	 * the lazy-put flag, this task will be responsible for transitioning
++	 * from lazy-put flag set to MM_CID_UNSET.
++	 */
++	rcu_read_lock();
++	src_task = rcu_dereference(src_rq->curr);
++	if (READ_ONCE(src_task->mm_cid_active) && src_task->mm == mm) {
++		rcu_read_unlock();
++		/*
++		 * We observed an active task for this mm, there is therefore
++		 * no point in moving this cid to the destination cpu.
++		 */
++		t->last_mm_cid = -1;
++		return -1;
++	}
++	rcu_read_unlock();
++
++	/*
++	 * The src_cid is unused, so it can be unset.
++	 */
++	if (!try_cmpxchg(&src_pcpu_cid->cid, &lazy_cid, MM_CID_UNSET))
++		return -1;
++	return src_cid;
++}
++
++/*
++ * Migration to dst cpu. Called with dst_rq lock held.
++ * Interrupts are disabled, which keeps the window of cid ownership without the
++ * source rq lock held small.
++ */
++void sched_mm_cid_migrate_to(struct rq *dst_rq, struct task_struct *t)
++{
++	struct mm_cid *src_pcpu_cid, *dst_pcpu_cid;
++	struct mm_struct *mm = t->mm;
++	int src_cid, dst_cid, src_cpu;
++	struct rq *src_rq;
++
++	lockdep_assert_rq_held(dst_rq);
++
++	if (!mm)
++		return;
++	src_cpu = t->migrate_from_cpu;
++	if (src_cpu == -1) {
++		t->last_mm_cid = -1;
++		return;
++	}
++	/*
++	 * Move the src cid if the dst cid is unset. This keeps id
++	 * allocation closest to 0 in cases where few threads migrate around
++	 * many cpus.
++	 *
++	 * If destination cid is already set, we may have to just clear
++	 * the src cid to ensure compactness in frequent migrations
++	 * scenarios.
++	 *
++	 * It is not useful to clear the src cid when the number of threads is
++	 * greater or equal to the number of allowed cpus, because user-space
++	 * can expect that the number of allowed cids can reach the number of
++	 * allowed cpus.
++	 */
++	dst_pcpu_cid = per_cpu_ptr(mm->pcpu_cid, cpu_of(dst_rq));
++	dst_cid = READ_ONCE(dst_pcpu_cid->cid);
++	if (!mm_cid_is_unset(dst_cid) &&
++	    atomic_read(&mm->mm_users) >= t->nr_cpus_allowed)
++		return;
++	src_pcpu_cid = per_cpu_ptr(mm->pcpu_cid, src_cpu);
++	src_rq = cpu_rq(src_cpu);
++	src_cid = __sched_mm_cid_migrate_from_fetch_cid(src_rq, t, src_pcpu_cid);
++	if (src_cid == -1)
++		return;
++	src_cid = __sched_mm_cid_migrate_from_try_steal_cid(src_rq, t, src_pcpu_cid,
++							    src_cid);
++	if (src_cid == -1)
++		return;
++	if (!mm_cid_is_unset(dst_cid)) {
++		__mm_cid_put(mm, src_cid);
++		return;
++	}
++	/* Move src_cid to dst cpu. */
++	mm_cid_snapshot_time(dst_rq, mm);
++	WRITE_ONCE(dst_pcpu_cid->cid, src_cid);
++}
++
++static void sched_mm_cid_remote_clear(struct mm_struct *mm, struct mm_cid *pcpu_cid,
++				      int cpu)
++{
++	struct rq *rq = cpu_rq(cpu);
++	struct task_struct *t;
++	unsigned long flags;
++	int cid, lazy_cid;
++
++	cid = READ_ONCE(pcpu_cid->cid);
++	if (!mm_cid_is_valid(cid))
++ 		return;
++
++	/*
++	 * Clear the cpu cid if it is set to keep cid allocation compact.  If
++	 * there happens to be other tasks left on the source cpu using this
++	 * mm, the next task using this mm will reallocate its cid on context
++	 * switch.
++	 */
++	lazy_cid = mm_cid_set_lazy_put(cid);
++	if (!try_cmpxchg(&pcpu_cid->cid, &cid, lazy_cid))
++		return;
++
++	/*
++	 * The implicit barrier after cmpxchg per-mm/cpu cid before loading
++	 * rq->curr->mm matches the scheduler barrier in context_switch()
++	 * between store to rq->curr and load of prev and next task's
++	 * per-mm/cpu cid.
++	 *
++	 * The implicit barrier after cmpxchg per-mm/cpu cid before loading
++	 * rq->curr->mm_cid_active matches the barrier in
++	 * sched_mm_cid_exit_signals(), sched_mm_cid_before_execve(), and
++	 * sched_mm_cid_after_execve() between store to t->mm_cid_active and
++	 * load of per-mm/cpu cid.
++	 */
++
++	/*
++	 * If we observe an active task using the mm on this rq after setting
++	 * the lazy-put flag, that task will be responsible for transitioning
++	 * from lazy-put flag set to MM_CID_UNSET.
++	 */
++	rcu_read_lock();
++	t = rcu_dereference(rq->curr);
++	if (READ_ONCE(t->mm_cid_active) && t->mm == mm) {
++		rcu_read_unlock();
++		return;
++	}
++	rcu_read_unlock();
++
++	/*
++	 * The cid is unused, so it can be unset.
++	 * Disable interrupts to keep the window of cid ownership without rq
++	 * lock small.
++	 */
++	local_irq_save(flags);
++	if (try_cmpxchg(&pcpu_cid->cid, &lazy_cid, MM_CID_UNSET))
++		__mm_cid_put(mm, cid);
++	local_irq_restore(flags);
++}
++
++static void sched_mm_cid_remote_clear_old(struct mm_struct *mm, int cpu)
++{
++	struct rq *rq = cpu_rq(cpu);
++	struct mm_cid *pcpu_cid;
++	struct task_struct *curr;
++	u64 rq_clock;
++
++	/*
++	 * rq->clock load is racy on 32-bit but one spurious clear once in a
++	 * while is irrelevant.
++	 */
++	rq_clock = READ_ONCE(rq->clock);
++	pcpu_cid = per_cpu_ptr(mm->pcpu_cid, cpu);
++
++	/*
++	 * In order to take care of infrequently scheduled tasks, bump the time
++	 * snapshot associated with this cid if an active task using the mm is
++	 * observed on this rq.
++	 */
++	rcu_read_lock();
++	curr = rcu_dereference(rq->curr);
++	if (READ_ONCE(curr->mm_cid_active) && curr->mm == mm) {
++		WRITE_ONCE(pcpu_cid->time, rq_clock);
++		rcu_read_unlock();
++		return;
++	}
++	rcu_read_unlock();
++
++	if (rq_clock < pcpu_cid->time + SCHED_MM_CID_PERIOD_NS)
++		return;
++	sched_mm_cid_remote_clear(mm, pcpu_cid, cpu);
++}
++
++static void sched_mm_cid_remote_clear_weight(struct mm_struct *mm, int cpu,
++					     int weight)
++{
++	struct mm_cid *pcpu_cid;
++	int cid;
++
++	pcpu_cid = per_cpu_ptr(mm->pcpu_cid, cpu);
++	cid = READ_ONCE(pcpu_cid->cid);
++	if (!mm_cid_is_valid(cid) || cid < weight)
++		return;
++	sched_mm_cid_remote_clear(mm, pcpu_cid, cpu);
++}
++
++static void task_mm_cid_work(struct callback_head *work)
++{
++	unsigned long now = jiffies, old_scan, next_scan;
++	struct task_struct *t = current;
++	struct cpumask *cidmask;
++	struct mm_struct *mm;
++	int weight, cpu;
++
++	SCHED_WARN_ON(t != container_of(work, struct task_struct, cid_work));
++
++	work->next = work;	/* Prevent double-add */
++	if (t->flags & PF_EXITING)
++		return;
++	mm = t->mm;
++	if (!mm)
++		return;
++	old_scan = READ_ONCE(mm->mm_cid_next_scan);
++	next_scan = now + msecs_to_jiffies(MM_CID_SCAN_DELAY);
++	if (!old_scan) {
++		unsigned long res;
++
++		res = cmpxchg(&mm->mm_cid_next_scan, old_scan, next_scan);
++		if (res != old_scan)
++			old_scan = res;
++		else
++			old_scan = next_scan;
++	}
++	if (time_before(now, old_scan))
++		return;
++	if (!try_cmpxchg(&mm->mm_cid_next_scan, &old_scan, next_scan))
++		return;
++	cidmask = mm_cidmask(mm);
++	/* Clear cids that were not recently used. */
++	for_each_possible_cpu(cpu)
++		sched_mm_cid_remote_clear_old(mm, cpu);
++	weight = cpumask_weight(cidmask);
++	/*
++	 * Clear cids that are greater or equal to the cidmask weight to
++	 * recompact it.
++	 */
++	for_each_possible_cpu(cpu)
++		sched_mm_cid_remote_clear_weight(mm, cpu, weight);
++}
++
++void init_sched_mm_cid(struct task_struct *t)
++{
++	struct mm_struct *mm = t->mm;
++	int mm_users = 0;
++
++	if (mm) {
++		mm_users = atomic_read(&mm->mm_users);
++		if (mm_users == 1)
++			mm->mm_cid_next_scan = jiffies + msecs_to_jiffies(MM_CID_SCAN_DELAY);
++	}
++	t->cid_work.next = &t->cid_work;	/* Protect against double add */
++	init_task_work(&t->cid_work, task_mm_cid_work);
++}
++
++void task_tick_mm_cid(struct rq *rq, struct task_struct *curr)
++{
++	struct callback_head *work = &curr->cid_work;
++	unsigned long now = jiffies;
++
++	if (!curr->mm || (curr->flags & (PF_EXITING | PF_KTHREAD)) ||
++	    work->next != work)
++		return;
++	if (time_before(now, READ_ONCE(curr->mm->mm_cid_next_scan)))
++		return;
++	task_work_add(curr, work, TWA_RESUME);
++}
++
++void sched_mm_cid_exit_signals(struct task_struct *t)
++{
++	struct mm_struct *mm = t->mm;
++	struct rq_flags rf;
++	struct rq *rq;
++
++	if (!mm)
++		return;
++
++	preempt_disable();
++	rq = this_rq();
++	rq_lock_irqsave(rq, &rf);
++	preempt_enable_no_resched();	/* holding spinlock */
++	WRITE_ONCE(t->mm_cid_active, 0);
++	/*
++	 * Store t->mm_cid_active before loading per-mm/cpu cid.
++	 * Matches barrier in sched_mm_cid_remote_clear_old().
++	 */
++	smp_mb();
++	mm_cid_put(mm);
++	t->last_mm_cid = t->mm_cid = -1;
++	rq_unlock_irqrestore(rq, &rf);
++}
++
++
++
++void sched_mm_cid_before_execve(struct task_struct *t)
++{
++	struct mm_struct *mm = t->mm;
++	struct rq_flags rf;
++	struct rq *rq;
++
++	if (!mm)
++		return;
++
++	preempt_disable();
++	rq = this_rq();
++	rq_lock_irqsave(rq, &rf);
++	preempt_enable_no_resched();	/* holding spinlock */
++	WRITE_ONCE(t->mm_cid_active, 0);
++	/*
++	 * Store t->mm_cid_active before loading per-mm/cpu cid.
++	 * Matches barrier in sched_mm_cid_remote_clear_old().
++	 */
++	smp_mb();
++	mm_cid_put(mm);
++	t->last_mm_cid = t->mm_cid = -1;
++	rq_unlock_irqrestore(rq, &rf);
++}
++
++void sched_mm_cid_after_execve(struct task_struct *t)
++{
++	struct mm_struct *mm = t->mm;
++	struct rq_flags rf;
++	struct rq *rq;
++
++	if (!mm)
++		return;
++
++	preempt_disable();
++	rq = this_rq();
++	rq_lock_irqsave(rq, &rf);
++	preempt_enable_no_resched();	/* holding spinlock */
++	WRITE_ONCE(t->mm_cid_active, 1);
++	/*
++	 * Store t->mm_cid_active before loading per-mm/cpu cid.
++	 * Matches barrier in sched_mm_cid_remote_clear_old().
++	 */
++	smp_mb();
++	t->last_mm_cid = t->mm_cid = mm_cid_get(rq, mm);
++	rq_unlock_irqrestore(rq, &rf);
++	rseq_set_notify_resume(t);
++}
++
++void sched_mm_cid_fork(struct task_struct *t)
++{
++	WARN_ON_ONCE(!t->mm || t->mm_cid != -1);
++	t->mm_cid_active = 1;
++}
++#endif
+diff --git a/b/kernel/sched/alt_debug.c b/kernel/sched/alt_debug.c
+new file mode 100644
+index 0000000..1212a03
+--- /dev/null
++++ b/kernel/sched/alt_debug.c
+@@ -0,0 +1,31 @@
++/*
++ * kernel/sched/alt_debug.c
++ *
++ * Print the alt scheduler debugging details
++ *
++ * Author: Alfred Chen
++ * Date  : 2020
++ */
++#include "sched.h"
++
++/*
++ * This allows printing both to /proc/sched_debug and
++ * to the console
++ */
++#define SEQ_printf(m, x...)			\
++ do {						\
++	if (m)					\
++		seq_printf(m, x);		\
++	else					\
++		pr_cont(x);			\
++ } while (0)
++
++void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns,
++			  struct seq_file *m)
++{
++	SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, task_pid_nr_ns(p, ns),
++						get_nr_threads(p));
++}
++
++void proc_sched_set_task(struct task_struct *p)
++{}
+diff --git a/b/kernel/sched/alt_sched.h b/kernel/sched/alt_sched.h
+new file mode 100644
+index 0000000..dc99b8d
+--- /dev/null
++++ b/kernel/sched/alt_sched.h
+@@ -0,0 +1,906 @@
++#ifndef ALT_SCHED_H
++#define ALT_SCHED_H
++
++#include <linux/context_tracking.h>
++#include <linux/profile.h>
++#include <linux/stop_machine.h>
++#include <linux/syscalls.h>
++#include <linux/tick.h>
++
++#include <trace/events/power.h>
++#include <trace/events/sched.h>
++
++#include "../workqueue_internal.h"
++
++#include "cpupri.h"
++
++#ifdef CONFIG_SCHED_BMQ
++/* bits:
++ * RT(0-99), (Low prio adj range, nice width, high prio adj range) / 2, cpu idle task */
++#define SCHED_LEVELS	(MAX_RT_PRIO + NICE_WIDTH / 2 + MAX_PRIORITY_ADJ + 1)
++#endif
++
++#ifdef CONFIG_SCHED_PDS
++/* bits: RT(0-24), reserved(25-31), SCHED_NORMAL_PRIO_NUM(32), cpu idle task(1) */
++#define SCHED_LEVELS	(64 + 1)
++#endif /* CONFIG_SCHED_PDS */
++
++#define IDLE_TASK_SCHED_PRIO	(SCHED_LEVELS - 1)
++
++#ifdef CONFIG_SCHED_DEBUG
++# define SCHED_WARN_ON(x)	WARN_ONCE(x, #x)
++extern void resched_latency_warn(int cpu, u64 latency);
++#else
++# define SCHED_WARN_ON(x)	({ (void)(x), 0; })
++static inline void resched_latency_warn(int cpu, u64 latency) {}
++#endif
++
++/*
++ * Increase resolution of nice-level calculations for 64-bit architectures.
++ * The extra resolution improves shares distribution and load balancing of
++ * low-weight task groups (eg. nice +19 on an autogroup), deeper taskgroup
++ * hierarchies, especially on larger systems. This is not a user-visible change
++ * and does not change the user-interface for setting shares/weights.
++ *
++ * We increase resolution only if we have enough bits to allow this increased
++ * resolution (i.e. 64-bit). The costs for increasing resolution when 32-bit
++ * are pretty high and the returns do not justify the increased costs.
++ *
++ * Really only required when CONFIG_FAIR_GROUP_SCHED=y is also set, but to
++ * increase coverage and consistency always enable it on 64-bit platforms.
++ */
++#ifdef CONFIG_64BIT
++# define NICE_0_LOAD_SHIFT	(SCHED_FIXEDPOINT_SHIFT + SCHED_FIXEDPOINT_SHIFT)
++# define scale_load(w)		((w) << SCHED_FIXEDPOINT_SHIFT)
++# define scale_load_down(w) \
++({ \
++	unsigned long __w = (w); \
++	if (__w) \
++		__w = max(2UL, __w >> SCHED_FIXEDPOINT_SHIFT); \
++	__w; \
++})
++#else
++# define NICE_0_LOAD_SHIFT	(SCHED_FIXEDPOINT_SHIFT)
++# define scale_load(w)		(w)
++# define scale_load_down(w)	(w)
++#endif
++
++#ifdef CONFIG_FAIR_GROUP_SCHED
++#define ROOT_TASK_GROUP_LOAD	NICE_0_LOAD
++
++/*
++ * A weight of 0 or 1 can cause arithmetics problems.
++ * A weight of a cfs_rq is the sum of weights of which entities
++ * are queued on this cfs_rq, so a weight of a entity should not be
++ * too large, so as the shares value of a task group.
++ * (The default weight is 1024 - so there's no practical
++ *  limitation from this.)
++ */
++#define MIN_SHARES		(1UL <<  1)
++#define MAX_SHARES		(1UL << 18)
++#endif
++
++/*
++ * Tunables that become constants when CONFIG_SCHED_DEBUG is off:
++ */
++#ifdef CONFIG_SCHED_DEBUG
++# define const_debug __read_mostly
++#else
++# define const_debug const
++#endif
++
++/* task_struct::on_rq states: */
++#define TASK_ON_RQ_QUEUED	1
++#define TASK_ON_RQ_MIGRATING	2
++
++static inline int task_on_rq_queued(struct task_struct *p)
++{
++	return p->on_rq == TASK_ON_RQ_QUEUED;
++}
++
++static inline int task_on_rq_migrating(struct task_struct *p)
++{
++	return READ_ONCE(p->on_rq) == TASK_ON_RQ_MIGRATING;
++}
++
++/*
++ * wake flags
++ */
++#define WF_SYNC		0x01		/* waker goes to sleep after wakeup */
++#define WF_FORK		0x02		/* child wakeup after fork */
++#define WF_MIGRATED	0x04		/* internal use, task got migrated */
++
++#define SCHED_QUEUE_BITS	(SCHED_LEVELS - 1)
++
++struct sched_queue {
++	DECLARE_BITMAP(bitmap, SCHED_QUEUE_BITS);
++	struct list_head heads[SCHED_LEVELS];
++};
++
++struct rq;
++struct cpuidle_state;
++
++struct balance_callback {
++	struct balance_callback *next;
++	void (*func)(struct rq *rq);
++};
++
++/*
++ * This is the main, per-CPU runqueue data structure.
++ * This data should only be modified by the local cpu.
++ */
++struct rq {
++	/* runqueue lock: */
++	raw_spinlock_t lock;
++
++	struct task_struct __rcu *curr;
++	struct task_struct *idle, *stop, *skip;
++	struct mm_struct *prev_mm;
++
++	struct sched_queue	queue;
++#ifdef CONFIG_SCHED_PDS
++	u64			time_edge;
++#endif
++	unsigned long		prio;
++
++	/* switch count */
++	u64 nr_switches;
++
++	atomic_t nr_iowait;
++
++#ifdef CONFIG_SCHED_DEBUG
++	u64 last_seen_need_resched_ns;
++	int ticks_without_resched;
++#endif
++
++#ifdef CONFIG_MEMBARRIER
++	int membarrier_state;
++#endif
++
++#ifdef CONFIG_SMP
++	int cpu;		/* cpu of this runqueue */
++	bool online;
++
++	unsigned int		ttwu_pending;
++	unsigned char		nohz_idle_balance;
++	unsigned char		idle_balance;
++
++#ifdef CONFIG_HAVE_SCHED_AVG_IRQ
++	struct sched_avg	avg_irq;
++#endif
++
++#ifdef CONFIG_SCHED_SMT
++	int active_balance;
++	struct cpu_stop_work	active_balance_work;
++#endif
++	struct balance_callback	*balance_callback;
++#ifdef CONFIG_HOTPLUG_CPU
++	struct rcuwait		hotplug_wait;
++#endif
++	unsigned int		nr_pinned;
++
++#endif /* CONFIG_SMP */
++#ifdef CONFIG_IRQ_TIME_ACCOUNTING
++	u64 prev_irq_time;
++#endif /* CONFIG_IRQ_TIME_ACCOUNTING */
++#ifdef CONFIG_PARAVIRT
++	u64 prev_steal_time;
++#endif /* CONFIG_PARAVIRT */
++#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
++	u64 prev_steal_time_rq;
++#endif /* CONFIG_PARAVIRT_TIME_ACCOUNTING */
++
++	/* For genenal cpu load util */
++	s32 load_history;
++	u64 load_block;
++	u64 load_stamp;
++
++	/* calc_load related fields */
++	unsigned long calc_load_update;
++	long calc_load_active;
++
++	u64 clock, last_tick;
++	u64 last_ts_switch;
++	u64 clock_task;
++
++	unsigned int  nr_running;
++	unsigned long nr_uninterruptible;
++
++#ifdef CONFIG_SCHED_HRTICK
++#ifdef CONFIG_SMP
++	call_single_data_t hrtick_csd;
++#endif
++	struct hrtimer		hrtick_timer;
++	ktime_t			hrtick_time;
++#endif
++
++#ifdef CONFIG_SCHEDSTATS
++
++	/* latency stats */
++	struct sched_info rq_sched_info;
++	unsigned long long rq_cpu_time;
++	/* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */
++
++	/* sys_sched_yield() stats */
++	unsigned int yld_count;
++
++	/* schedule() stats */
++	unsigned int sched_switch;
++	unsigned int sched_count;
++	unsigned int sched_goidle;
++
++	/* try_to_wake_up() stats */
++	unsigned int ttwu_count;
++	unsigned int ttwu_local;
++#endif /* CONFIG_SCHEDSTATS */
++
++#ifdef CONFIG_CPU_IDLE
++	/* Must be inspected within a rcu lock section */
++	struct cpuidle_state *idle_state;
++#endif
++
++#ifdef CONFIG_NO_HZ_COMMON
++#ifdef CONFIG_SMP
++	call_single_data_t	nohz_csd;
++#endif
++	atomic_t		nohz_flags;
++#endif /* CONFIG_NO_HZ_COMMON */
++
++	/* Scratch cpumask to be temporarily used under rq_lock */
++	cpumask_var_t		scratch_mask;
++};
++
++extern unsigned long rq_load_util(struct rq *rq, unsigned long max);
++
++extern unsigned long calc_load_update;
++extern atomic_long_t calc_load_tasks;
++
++extern void calc_global_load_tick(struct rq *this_rq);
++extern long calc_load_fold_active(struct rq *this_rq, long adjust);
++
++DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
++#define cpu_rq(cpu)		(&per_cpu(runqueues, (cpu)))
++#define this_rq()		this_cpu_ptr(&runqueues)
++#define task_rq(p)		cpu_rq(task_cpu(p))
++#define cpu_curr(cpu)		(cpu_rq(cpu)->curr)
++#define raw_rq()		raw_cpu_ptr(&runqueues)
++
++#ifdef CONFIG_SMP
++#if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
++void register_sched_domain_sysctl(void);
++void unregister_sched_domain_sysctl(void);
++#else
++static inline void register_sched_domain_sysctl(void)
++{
++}
++static inline void unregister_sched_domain_sysctl(void)
++{
++}
++#endif
++
++extern bool sched_smp_initialized;
++
++enum {
++	ITSELF_LEVEL_SPACE_HOLDER,
++#ifdef CONFIG_SCHED_SMT
++	SMT_LEVEL_SPACE_HOLDER,
++#endif
++	COREGROUP_LEVEL_SPACE_HOLDER,
++	CORE_LEVEL_SPACE_HOLDER,
++	OTHER_LEVEL_SPACE_HOLDER,
++	NR_CPU_AFFINITY_LEVELS
++};
++
++DECLARE_PER_CPU_ALIGNED(cpumask_t [NR_CPU_AFFINITY_LEVELS], sched_cpu_topo_masks);
++
++static inline int
++__best_mask_cpu(const cpumask_t *cpumask, const cpumask_t *mask)
++{
++	int cpu;
++
++	while ((cpu = cpumask_any_and(cpumask, mask)) >= nr_cpu_ids)
++		mask++;
++
++	return cpu;
++}
++
++static inline int best_mask_cpu(int cpu, const cpumask_t *mask)
++{
++	return __best_mask_cpu(mask, per_cpu(sched_cpu_topo_masks, cpu));
++}
++
++extern void flush_smp_call_function_queue(void);
++
++#else  /* !CONFIG_SMP */
++static inline void flush_smp_call_function_queue(void) { }
++#endif
++
++#ifndef arch_scale_freq_tick
++static __always_inline
++void arch_scale_freq_tick(void)
++{
++}
++#endif
++
++#ifndef arch_scale_freq_capacity
++static __always_inline
++unsigned long arch_scale_freq_capacity(int cpu)
++{
++	return SCHED_CAPACITY_SCALE;
++}
++#endif
++
++static inline u64 __rq_clock_broken(struct rq *rq)
++{
++	return READ_ONCE(rq->clock);
++}
++
++static inline u64 rq_clock(struct rq *rq)
++{
++	/*
++	 * Relax lockdep_assert_held() checking as in VRQ, call to
++	 * sched_info_xxxx() may not held rq->lock
++	 * lockdep_assert_held(&rq->lock);
++	 */
++	return rq->clock;
++}
++
++static inline u64 rq_clock_task(struct rq *rq)
++{
++	/*
++	 * Relax lockdep_assert_held() checking as in VRQ, call to
++	 * sched_info_xxxx() may not held rq->lock
++	 * lockdep_assert_held(&rq->lock);
++	 */
++	return rq->clock_task;
++}
++
++/*
++ * {de,en}queue flags:
++ *
++ * DEQUEUE_SLEEP  - task is no longer runnable
++ * ENQUEUE_WAKEUP - task just became runnable
++ *
++ */
++
++#define DEQUEUE_SLEEP		0x01
++
++#define ENQUEUE_WAKEUP		0x01
++
++
++/*
++ * Below are scheduler API which using in other kernel code
++ * It use the dummy rq_flags
++ * ToDo : BMQ need to support these APIs for compatibility with mainline
++ * scheduler code.
++ */
++struct rq_flags {
++	unsigned long flags;
++};
++
++struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf)
++	__acquires(rq->lock);
++
++struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf)
++	__acquires(p->pi_lock)
++	__acquires(rq->lock);
++
++static inline void __task_rq_unlock(struct rq *rq, struct rq_flags *rf)
++	__releases(rq->lock)
++{
++	raw_spin_unlock(&rq->lock);
++}
++
++static inline void
++task_rq_unlock(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
++	__releases(rq->lock)
++	__releases(p->pi_lock)
++{
++	raw_spin_unlock(&rq->lock);
++	raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags);
++}
++
++static inline void
++rq_lock_irq(struct rq *rq, struct rq_flags *rf)
++	__acquires(rq->lock)
++{
++	raw_spin_lock_irq(&rq->lock);
++}
++
++static inline void
++rq_lock(struct rq *rq, struct rq_flags *rf)
++	__acquires(rq->lock)
++{
++	raw_spin_lock(&rq->lock);
++}
++
++static inline void
++rq_unlock_irq(struct rq *rq, struct rq_flags *rf)
++	__releases(rq->lock)
++{
++	raw_spin_unlock_irq(&rq->lock);
++}
++
++static inline void
++rq_unlock(struct rq *rq, struct rq_flags *rf)
++	__releases(rq->lock)
++{
++	raw_spin_unlock(&rq->lock);
++}
++
++static inline struct rq *
++this_rq_lock_irq(struct rq_flags *rf)
++	__acquires(rq->lock)
++{
++	struct rq *rq;
++
++	local_irq_disable();
++	rq = this_rq();
++	raw_spin_lock(&rq->lock);
++
++	return rq;
++}
++
++static inline raw_spinlock_t *__rq_lockp(struct rq *rq)
++{
++	return &rq->lock;
++}
++
++static inline raw_spinlock_t *rq_lockp(struct rq *rq)
++{
++	return __rq_lockp(rq);
++}
++
++static inline void lockdep_assert_rq_held(struct rq *rq)
++{
++	lockdep_assert_held(__rq_lockp(rq));
++}
++
++extern void raw_spin_rq_lock_nested(struct rq *rq, int subclass);
++extern void raw_spin_rq_unlock(struct rq *rq);
++
++static inline void raw_spin_rq_lock(struct rq *rq)
++{
++	raw_spin_rq_lock_nested(rq, 0);
++}
++
++static inline void raw_spin_rq_lock_irq(struct rq *rq)
++{
++	local_irq_disable();
++	raw_spin_rq_lock(rq);
++}
++
++static inline void raw_spin_rq_unlock_irq(struct rq *rq)
++{
++	raw_spin_rq_unlock(rq);
++	local_irq_enable();
++}
++
++static inline int task_current(struct rq *rq, struct task_struct *p)
++{
++	return rq->curr == p;
++}
++
++static inline bool task_on_cpu(struct task_struct *p)
++{
++	return p->on_cpu;
++}
++
++extern int task_running_nice(struct task_struct *p);
++
++extern struct static_key_false sched_schedstats;
++
++#ifdef CONFIG_CPU_IDLE
++static inline void idle_set_state(struct rq *rq,
++				  struct cpuidle_state *idle_state)
++{
++	rq->idle_state = idle_state;
++}
++
++static inline struct cpuidle_state *idle_get_state(struct rq *rq)
++{
++	WARN_ON(!rcu_read_lock_held());
++	return rq->idle_state;
++}
++#else
++static inline void idle_set_state(struct rq *rq,
++				  struct cpuidle_state *idle_state)
++{
++}
++
++static inline struct cpuidle_state *idle_get_state(struct rq *rq)
++{
++	return NULL;
++}
++#endif
++
++static inline int cpu_of(const struct rq *rq)
++{
++#ifdef CONFIG_SMP
++	return rq->cpu;
++#else
++	return 0;
++#endif
++}
++
++#include "stats.h"
++
++#ifdef CONFIG_NO_HZ_COMMON
++#define NOHZ_BALANCE_KICK_BIT	0
++#define NOHZ_STATS_KICK_BIT	1
++
++#define NOHZ_BALANCE_KICK	BIT(NOHZ_BALANCE_KICK_BIT)
++#define NOHZ_STATS_KICK		BIT(NOHZ_STATS_KICK_BIT)
++
++#define NOHZ_KICK_MASK	(NOHZ_BALANCE_KICK | NOHZ_STATS_KICK)
++
++#define nohz_flags(cpu)	(&cpu_rq(cpu)->nohz_flags)
++
++/* TODO: needed?
++extern void nohz_balance_exit_idle(struct rq *rq);
++#else
++static inline void nohz_balance_exit_idle(struct rq *rq) { }
++*/
++#endif
++
++#ifdef CONFIG_IRQ_TIME_ACCOUNTING
++struct irqtime {
++	u64			total;
++	u64			tick_delta;
++	u64			irq_start_time;
++	struct u64_stats_sync	sync;
++};
++
++DECLARE_PER_CPU(struct irqtime, cpu_irqtime);
++
++/*
++ * Returns the irqtime minus the softirq time computed by ksoftirqd.
++ * Otherwise ksoftirqd's sum_exec_runtime is substracted its own runtime
++ * and never move forward.
++ */
++static inline u64 irq_time_read(int cpu)
++{
++	struct irqtime *irqtime = &per_cpu(cpu_irqtime, cpu);
++	unsigned int seq;
++	u64 total;
++
++	do {
++		seq = __u64_stats_fetch_begin(&irqtime->sync);
++		total = irqtime->total;
++	} while (__u64_stats_fetch_retry(&irqtime->sync, seq));
++
++	return total;
++}
++#endif /* CONFIG_IRQ_TIME_ACCOUNTING */
++
++#ifdef CONFIG_CPU_FREQ
++DECLARE_PER_CPU(struct update_util_data __rcu *, cpufreq_update_util_data);
++#endif /* CONFIG_CPU_FREQ */
++
++#ifdef CONFIG_NO_HZ_FULL
++extern int __init sched_tick_offload_init(void);
++#else
++static inline int sched_tick_offload_init(void) { return 0; }
++#endif
++
++#ifdef arch_scale_freq_capacity
++#ifndef arch_scale_freq_invariant
++#define arch_scale_freq_invariant()	(true)
++#endif
++#else /* arch_scale_freq_capacity */
++#define arch_scale_freq_invariant()	(false)
++#endif
++
++extern void schedule_idle(void);
++
++#define cap_scale(v, s) ((v)*(s) >> SCHED_CAPACITY_SHIFT)
++
++/*
++ * !! For sched_setattr_nocheck() (kernel) only !!
++ *
++ * This is actually gross. :(
++ *
++ * It is used to make schedutil kworker(s) higher priority than SCHED_DEADLINE
++ * tasks, but still be able to sleep. We need this on platforms that cannot
++ * atomically change clock frequency. Remove once fast switching will be
++ * available on such platforms.
++ *
++ * SUGOV stands for SchedUtil GOVernor.
++ */
++#define SCHED_FLAG_SUGOV	0x10000000
++
++#ifdef CONFIG_MEMBARRIER
++/*
++ * The scheduler provides memory barriers required by membarrier between:
++ * - prior user-space memory accesses and store to rq->membarrier_state,
++ * - store to rq->membarrier_state and following user-space memory accesses.
++ * In the same way it provides those guarantees around store to rq->curr.
++ */
++static inline void membarrier_switch_mm(struct rq *rq,
++					struct mm_struct *prev_mm,
++					struct mm_struct *next_mm)
++{
++	int membarrier_state;
++
++	if (prev_mm == next_mm)
++		return;
++
++	membarrier_state = atomic_read(&next_mm->membarrier_state);
++	if (READ_ONCE(rq->membarrier_state) == membarrier_state)
++		return;
++
++	WRITE_ONCE(rq->membarrier_state, membarrier_state);
++}
++#else
++static inline void membarrier_switch_mm(struct rq *rq,
++					struct mm_struct *prev_mm,
++					struct mm_struct *next_mm)
++{
++}
++#endif
++
++#ifdef CONFIG_NUMA
++extern int sched_numa_find_closest(const struct cpumask *cpus, int cpu);
++#else
++static inline int sched_numa_find_closest(const struct cpumask *cpus, int cpu)
++{
++	return nr_cpu_ids;
++}
++#endif
++
++extern void swake_up_all_locked(struct swait_queue_head *q);
++extern void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait);
++
++#ifdef CONFIG_PREEMPT_DYNAMIC
++extern int preempt_dynamic_mode;
++extern int sched_dynamic_mode(const char *str);
++extern void sched_dynamic_update(int mode);
++#endif
++
++static inline void nohz_run_idle_balance(int cpu) { }
++
++static inline
++unsigned long uclamp_rq_util_with(struct rq *rq, unsigned long util,
++				  struct task_struct *p)
++{
++	return util;
++}
++
++static inline bool uclamp_rq_is_capped(struct rq *rq) { return false; }
++
++#ifdef CONFIG_SCHED_MM_CID
++
++#define SCHED_MM_CID_PERIOD_NS	(100ULL * 1000000)	/* 100ms */
++#define MM_CID_SCAN_DELAY	100			/* 100ms */
++
++extern raw_spinlock_t cid_lock;
++extern int use_cid_lock;
++
++extern void sched_mm_cid_migrate_from(struct task_struct *t);
++extern void sched_mm_cid_migrate_to(struct rq *dst_rq, struct task_struct *t);
++extern void task_tick_mm_cid(struct rq *rq, struct task_struct *curr);
++extern void init_sched_mm_cid(struct task_struct *t);
++
++static inline void __mm_cid_put(struct mm_struct *mm, int cid)
++{
++	if (cid < 0)
++		return;
++	cpumask_clear_cpu(cid, mm_cidmask(mm));
++}
++
++/*
++ * The per-mm/cpu cid can have the MM_CID_LAZY_PUT flag set or transition to
++ * the MM_CID_UNSET state without holding the rq lock, but the rq lock needs to
++ * be held to transition to other states.
++ *
++ * State transitions synchronized with cmpxchg or try_cmpxchg need to be
++ * consistent across cpus, which prevents use of this_cpu_cmpxchg.
++ */
++static inline void mm_cid_put_lazy(struct task_struct *t)
++{
++	struct mm_struct *mm = t->mm;
++	struct mm_cid __percpu *pcpu_cid = mm->pcpu_cid;
++	int cid;
++
++	lockdep_assert_irqs_disabled();
++	cid = __this_cpu_read(pcpu_cid->cid);
++	if (!mm_cid_is_lazy_put(cid) ||
++	    !try_cmpxchg(&this_cpu_ptr(pcpu_cid)->cid, &cid, MM_CID_UNSET))
++		return;
++	__mm_cid_put(mm, mm_cid_clear_lazy_put(cid));
++}
++
++static inline int mm_cid_pcpu_unset(struct mm_struct *mm)
++{
++	struct mm_cid __percpu *pcpu_cid = mm->pcpu_cid;
++	int cid, res;
++
++	lockdep_assert_irqs_disabled();
++	cid = __this_cpu_read(pcpu_cid->cid);
++	for (;;) {
++		if (mm_cid_is_unset(cid))
++			return MM_CID_UNSET;
++		/*
++		 * Attempt transition from valid or lazy-put to unset.
++		 */
++		res = cmpxchg(&this_cpu_ptr(pcpu_cid)->cid, cid, MM_CID_UNSET);
++		if (res == cid)
++			break;
++		cid = res;
++	}
++	return cid;
++}
++
++static inline void mm_cid_put(struct mm_struct *mm)
++{
++	int cid;
++
++	lockdep_assert_irqs_disabled();
++	cid = mm_cid_pcpu_unset(mm);
++	if (cid == MM_CID_UNSET)
++		return;
++	__mm_cid_put(mm, mm_cid_clear_lazy_put(cid));
++}
++
++static inline int __mm_cid_try_get(struct mm_struct *mm)
++{
++	struct cpumask *cpumask;
++	int cid;
++
++	cpumask = mm_cidmask(mm);
++	/*
++	 * Retry finding first zero bit if the mask is temporarily
++	 * filled. This only happens during concurrent remote-clear
++	 * which owns a cid without holding a rq lock.
++	 */
++	for (;;) {
++		cid = cpumask_first_zero(cpumask);
++		if (cid < nr_cpu_ids)
++			break;
++		cpu_relax();
++	}
++	if (cpumask_test_and_set_cpu(cid, cpumask))
++		return -1;
++	return cid;
++}
++
++/*
++ * Save a snapshot of the current runqueue time of this cpu
++ * with the per-cpu cid value, allowing to estimate how recently it was used.
++ */
++static inline void mm_cid_snapshot_time(struct rq *rq, struct mm_struct *mm)
++{
++	struct mm_cid *pcpu_cid = per_cpu_ptr(mm->pcpu_cid, cpu_of(rq));
++
++	lockdep_assert_rq_held(rq);
++	WRITE_ONCE(pcpu_cid->time, rq->clock);
++}
++
++static inline int __mm_cid_get(struct rq *rq, struct mm_struct *mm)
++{
++	int cid;
++
++	/*
++	 * All allocations (even those using the cid_lock) are lock-free. If
++	 * use_cid_lock is set, hold the cid_lock to perform cid allocation to
++	 * guarantee forward progress.
++	 */
++	if (!READ_ONCE(use_cid_lock)) {
++		cid = __mm_cid_try_get(mm);
++		if (cid >= 0)
++			goto end;
++		raw_spin_lock(&cid_lock);
++	} else {
++		raw_spin_lock(&cid_lock);
++		cid = __mm_cid_try_get(mm);
++		if (cid >= 0)
++			goto unlock;
++	}
++
++	/*
++	 * cid concurrently allocated. Retry while forcing following
++	 * allocations to use the cid_lock to ensure forward progress.
++	 */
++	WRITE_ONCE(use_cid_lock, 1);
++	/*
++	 * Set use_cid_lock before allocation. Only care about program order
++	 * because this is only required for forward progress.
++	 */
++	barrier();
++	/*
++	 * Retry until it succeeds. It is guaranteed to eventually succeed once
++	 * all newcoming allocations observe the use_cid_lock flag set.
++	 */
++	do {
++		cid = __mm_cid_try_get(mm);
++		cpu_relax();
++	} while (cid < 0);
++	/*
++	 * Allocate before clearing use_cid_lock. Only care about
++	 * program order because this is for forward progress.
++	 */
++	barrier();
++	WRITE_ONCE(use_cid_lock, 0);
++unlock:
++	raw_spin_unlock(&cid_lock);
++end:
++	mm_cid_snapshot_time(rq, mm);
++	return cid;
++}
++
++static inline int mm_cid_get(struct rq *rq, struct mm_struct *mm)
++{
++	struct mm_cid __percpu *pcpu_cid = mm->pcpu_cid;
++	struct cpumask *cpumask;
++	int cid;
++
++	lockdep_assert_rq_held(rq);
++	cpumask = mm_cidmask(mm);
++	cid = __this_cpu_read(pcpu_cid->cid);
++	if (mm_cid_is_valid(cid)) {
++		mm_cid_snapshot_time(rq, mm);
++		return cid;
++	}
++	if (mm_cid_is_lazy_put(cid)) {
++		if (try_cmpxchg(&this_cpu_ptr(pcpu_cid)->cid, &cid, MM_CID_UNSET))
++			__mm_cid_put(mm, mm_cid_clear_lazy_put(cid));
++	}
++	cid = __mm_cid_get(rq, mm);
++	__this_cpu_write(pcpu_cid->cid, cid);
++	return cid;
++}
++
++static inline void switch_mm_cid(struct rq *rq,
++				 struct task_struct *prev,
++				 struct task_struct *next)
++{
++	/*
++	 * Provide a memory barrier between rq->curr store and load of
++	 * {prev,next}->mm->pcpu_cid[cpu] on rq->curr->mm transition.
++	 *
++	 * Should be adapted if context_switch() is modified.
++	 */
++	if (!next->mm) {                                // to kernel
++		/*
++		 * user -> kernel transition does not guarantee a barrier, but
++		 * we can use the fact that it performs an atomic operation in
++		 * mmgrab().
++		 */
++		if (prev->mm)                           // from user
++			smp_mb__after_mmgrab();
++		/*
++		 * kernel -> kernel transition does not change rq->curr->mm
++		 * state. It stays NULL.
++		 */
++	} else {                                        // to user
++		/*
++		 * kernel -> user transition does not provide a barrier
++		 * between rq->curr store and load of {prev,next}->mm->pcpu_cid[cpu].
++		 * Provide it here.
++		 */
++		if (!prev->mm)                          // from kernel
++			smp_mb();
++		/*
++		 * user -> user transition guarantees a memory barrier through
++		 * switch_mm() when current->mm changes. If current->mm is
++		 * unchanged, no barrier is needed.
++		 */
++	}
++	if (prev->mm_cid_active) {
++		mm_cid_snapshot_time(rq, prev->mm);
++		mm_cid_put_lazy(prev);
++		prev->mm_cid = -1;
++	}
++	if (next->mm_cid_active)
++		next->last_mm_cid = next->mm_cid = mm_cid_get(rq, next->mm);
++}
++
++#else
++static inline void switch_mm_cid(struct rq *rq, struct task_struct *prev, struct task_struct *next) { }
++static inline void sched_mm_cid_migrate_from(struct task_struct *t) { }
++static inline void sched_mm_cid_migrate_to(struct rq *dst_rq, struct task_struct *t) { }
++static inline void task_tick_mm_cid(struct rq *rq, struct task_struct *curr) { }
++static inline void init_sched_mm_cid(struct task_struct *t) { }
++#endif
++
++#endif /* ALT_SCHED_H */
+diff --git a/b/kernel/sched/bmq.h b/kernel/sched/bmq.h
+new file mode 100644
+index 0000000..f29b8f3
+--- /dev/null
++++ b/kernel/sched/bmq.h
+@@ -0,0 +1,110 @@
++#define ALT_SCHED_NAME "BMQ"
++
++/*
++ * BMQ only routines
++ */
++#define rq_switch_time(rq)	((rq)->clock - (rq)->last_ts_switch)
++#define boost_threshold(p)	(sched_timeslice_ns >>\
++				 (15 - MAX_PRIORITY_ADJ -  (p)->boost_prio))
++
++static inline void boost_task(struct task_struct *p)
++{
++	int limit;
++
++	switch (p->policy) {
++	case SCHED_NORMAL:
++		limit = -MAX_PRIORITY_ADJ;
++		break;
++	case SCHED_BATCH:
++	case SCHED_IDLE:
++		limit = 0;
++		break;
++	default:
++		return;
++	}
++
++	if (p->boost_prio > limit)
++		p->boost_prio--;
++}
++
++static inline void deboost_task(struct task_struct *p)
++{
++	if (p->boost_prio < MAX_PRIORITY_ADJ)
++		p->boost_prio++;
++}
++
++/*
++ * Common interfaces
++ */
++static inline void sched_timeslice_imp(const int timeslice_ms) {}
++
++static inline int
++task_sched_prio_normal(const struct task_struct *p, const struct rq *rq)
++{
++	return p->prio + p->boost_prio - MAX_RT_PRIO;
++}
++
++static inline int task_sched_prio(const struct task_struct *p)
++{
++	return (p->prio < MAX_RT_PRIO)? p->prio : MAX_RT_PRIO / 2 + (p->prio + p->boost_prio) / 2;
++}
++
++static inline int
++task_sched_prio_idx(const struct task_struct *p, const struct rq *rq)
++{
++	return task_sched_prio(p);
++}
++
++static inline int sched_prio2idx(int prio, struct rq *rq)
++{
++	return prio;
++}
++
++static inline int sched_idx2prio(int idx, struct rq *rq)
++{
++	return idx;
++}
++
++static inline void time_slice_expired(struct task_struct *p, struct rq *rq)
++{
++	p->time_slice = sched_timeslice_ns;
++
++	if (SCHED_FIFO != p->policy && task_on_rq_queued(p)) {
++		if (SCHED_RR != p->policy)
++			deboost_task(p);
++		requeue_task(p, rq, task_sched_prio_idx(p, rq));
++	}
++}
++
++static inline void sched_task_sanity_check(struct task_struct *p, struct rq *rq) {}
++
++inline int task_running_nice(struct task_struct *p)
++{
++	return (p->prio + p->boost_prio > DEFAULT_PRIO + MAX_PRIORITY_ADJ);
++}
++
++static void sched_task_fork(struct task_struct *p, struct rq *rq)
++{
++	p->boost_prio = MAX_PRIORITY_ADJ;
++}
++
++static inline void do_sched_yield_type_1(struct task_struct *p, struct rq *rq)
++{
++	p->boost_prio = MAX_PRIORITY_ADJ;
++}
++
++#ifdef CONFIG_SMP
++static inline void sched_task_ttwu(struct task_struct *p)
++{
++	if(this_rq()->clock_task - p->last_ran > sched_timeslice_ns)
++		boost_task(p);
++}
++#endif
++
++static inline void sched_task_deactivate(struct task_struct *p, struct rq *rq)
++{
++	if (rq_switch_time(rq) < boost_threshold(p))
++		boost_task(p);
++}
++
++static inline void update_rq_time_edge(struct rq *rq) {}
+diff --git a/kernel/sched/build_policy.c b/kernel/sched/build_policy.c
+index d9dc9ab..71a2554 100644
+--- a/kernel/sched/build_policy.c
++++ b/kernel/sched/build_policy.c
+@@ -42,13 +42,19 @@
+ 
+ #include "idle.c"
+ 
++#ifndef CONFIG_SCHED_ALT
+ #include "rt.c"
++#endif
+ 
+ #ifdef CONFIG_SMP
++#ifndef CONFIG_SCHED_ALT
+ # include "cpudeadline.c"
++#endif
+ # include "pelt.c"
+ #endif
+ 
+ #include "cputime.c"
+-#include "deadline.c"
+ 
++#ifndef CONFIG_SCHED_ALT
++#include "deadline.c"
++#endif
+diff --git a/kernel/sched/build_utility.c b/kernel/sched/build_utility.c
+index 99bdd96..bc17d5a 100644
+--- a/kernel/sched/build_utility.c
++++ b/kernel/sched/build_utility.c
+@@ -34,7 +34,6 @@
+ #include <linux/nospec.h>
+ #include <linux/proc_fs.h>
+ #include <linux/psi.h>
+-#include <linux/psi.h>
+ #include <linux/ptrace_api.h>
+ #include <linux/sched_clock.h>
+ #include <linux/security.h>
+@@ -85,7 +84,9 @@
+ 
+ #ifdef CONFIG_SMP
+ # include "cpupri.c"
++#ifndef CONFIG_SCHED_ALT
+ # include "stop_task.c"
++#endif
+ # include "topology.c"
+ #endif
+ 
+diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
+index 4492608..3522bbf 100644
+--- a/kernel/sched/cpufreq_schedutil.c
++++ b/kernel/sched/cpufreq_schedutil.c
+@@ -155,12 +155,18 @@ static unsigned int get_next_freq(struct sugov_policy *sg_policy,
+ 
+ static void sugov_get_util(struct sugov_cpu *sg_cpu)
+ {
+-	unsigned long util = cpu_util_cfs_boost(sg_cpu->cpu);
+ 	struct rq *rq = cpu_rq(sg_cpu->cpu);
+ 
++#ifndef CONFIG_SCHED_ALT
++	unsigned long util = cpu_util_cfs_boost(sg_cpu->cpu);
+ 	sg_cpu->bw_dl = cpu_bw_dl(rq);
+ 	sg_cpu->util = effective_cpu_util(sg_cpu->cpu, util,
+ 					  FREQUENCY_UTIL, NULL);
++#else
++	unsigned long max_cap = arch_scale_cpu_capacity(sg_cpu->cpu);
++	sg_cpu->bw_dl = 0;
++	sg_cpu->util = rq_load_util(rq, max_cap);
++#endif /* CONFIG_SCHED_ALT */
+ }
+ 
+ /**
+@@ -306,8 +312,10 @@ static inline bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu) { return false; }
+  */
+ static inline void ignore_dl_rate_limit(struct sugov_cpu *sg_cpu)
+ {
++#ifndef CONFIG_SCHED_ALT
+ 	if (cpu_bw_dl(cpu_rq(sg_cpu->cpu)) > sg_cpu->bw_dl)
+ 		sg_cpu->sg_policy->limits_changed = true;
++#endif
+ }
+ 
+ static inline bool sugov_update_single_common(struct sugov_cpu *sg_cpu,
+@@ -610,6 +618,7 @@ static int sugov_kthread_create(struct sugov_policy *sg_policy)
+ 	}
+ 
+ 	ret = sched_setattr_nocheck(thread, &attr);
++
+ 	if (ret) {
+ 		kthread_stop(thread);
+ 		pr_warn("%s: failed to set SCHED_DEADLINE\n", __func__);
+@@ -842,7 +851,9 @@ cpufreq_governor_init(schedutil_gov);
+ #ifdef CONFIG_ENERGY_MODEL
+ static void rebuild_sd_workfn(struct work_struct *work)
+ {
++#ifndef CONFIG_SCHED_ALT
+ 	rebuild_sched_domains_energy();
++#endif /* CONFIG_SCHED_ALT */
+ }
+ static DECLARE_WORK(rebuild_sd_work, rebuild_sd_workfn);
+ 
+diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c
+index af7952f..6461cbb 100644
+--- a/kernel/sched/cputime.c
++++ b/kernel/sched/cputime.c
+@@ -126,7 +126,7 @@ void account_user_time(struct task_struct *p, u64 cputime)
+ 	p->utime += cputime;
+ 	account_group_user_time(p, cputime);
+ 
+-	index = (task_nice(p) > 0) ? CPUTIME_NICE : CPUTIME_USER;
++	index = task_running_nice(p) ? CPUTIME_NICE : CPUTIME_USER;
+ 
+ 	/* Add user time to cpustat. */
+ 	task_group_account_field(p, index, cputime);
+@@ -150,7 +150,7 @@ void account_guest_time(struct task_struct *p, u64 cputime)
+ 	p->gtime += cputime;
+ 
+ 	/* Add guest time to cpustat. */
+-	if (task_nice(p) > 0) {
++	if (task_running_nice(p)) {
+ 		task_group_account_field(p, CPUTIME_NICE, cputime);
+ 		cpustat[CPUTIME_GUEST_NICE] += cputime;
+ 	} else {
+@@ -288,7 +288,7 @@ static inline u64 account_other_time(u64 max)
+ #ifdef CONFIG_64BIT
+ static inline u64 read_sum_exec_runtime(struct task_struct *t)
+ {
+-	return t->se.sum_exec_runtime;
++	return tsk_seruntime(t);
+ }
+ #else
+ static u64 read_sum_exec_runtime(struct task_struct *t)
+@@ -298,7 +298,7 @@ static u64 read_sum_exec_runtime(struct task_struct *t)
+ 	struct rq *rq;
+ 
+ 	rq = task_rq_lock(t, &rf);
+-	ns = t->se.sum_exec_runtime;
++	ns = tsk_seruntime(t);
+ 	task_rq_unlock(rq, t, &rf);
+ 
+ 	return ns;
+@@ -630,7 +630,7 @@ out:
+ void task_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st)
+ {
+ 	struct task_cputime cputime = {
+-		.sum_exec_runtime = p->se.sum_exec_runtime,
++		.sum_exec_runtime = tsk_seruntime(p),
+ 	};
+ 
+ 	if (task_cputime(p, &cputime.utime, &cputime.stime))
+diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
+index 066ff1c..7bdb806 100644
+--- a/kernel/sched/debug.c
++++ b/kernel/sched/debug.c
+@@ -7,6 +7,7 @@
+  * Copyright(C) 2007, Red Hat, Inc., Ingo Molnar
+  */
+ 
++#ifndef CONFIG_SCHED_ALT
+ /*
+  * This allows printing both to /proc/sched_debug and
+  * to the console
+@@ -215,6 +216,7 @@ static const struct file_operations sched_scaling_fops = {
+ };
+ 
+ #endif /* SMP */
++#endif /* !CONFIG_SCHED_ALT */
+ 
+ #ifdef CONFIG_PREEMPT_DYNAMIC
+ 
+@@ -278,6 +280,7 @@ static const struct file_operations sched_dynamic_fops = {
+ 
+ #endif /* CONFIG_PREEMPT_DYNAMIC */
+ 
++#ifndef CONFIG_SCHED_ALT
+ __read_mostly bool sched_debug_verbose;
+ 
+ #ifdef CONFIG_SMP
+@@ -332,6 +335,7 @@ static const struct file_operations sched_debug_fops = {
+ 	.llseek		= seq_lseek,
+ 	.release	= seq_release,
+ };
++#endif /* !CONFIG_SCHED_ALT */
+ 
+ static struct dentry *debugfs_sched;
+ 
+@@ -341,12 +345,15 @@ static __init int sched_init_debug(void)
+ 
+ 	debugfs_sched = debugfs_create_dir("sched", NULL);
+ 
++#ifndef CONFIG_SCHED_ALT
+ 	debugfs_create_file("features", 0644, debugfs_sched, NULL, &sched_feat_fops);
+ 	debugfs_create_file_unsafe("verbose", 0644, debugfs_sched, &sched_debug_verbose, &sched_verbose_fops);
++#endif /* !CONFIG_SCHED_ALT */
+ #ifdef CONFIG_PREEMPT_DYNAMIC
+ 	debugfs_create_file("preempt", 0644, debugfs_sched, NULL, &sched_dynamic_fops);
+ #endif
+ 
++#ifndef CONFIG_SCHED_ALT
+ 	debugfs_create_u32("latency_ns", 0644, debugfs_sched, &sysctl_sched_latency);
+ 	debugfs_create_u32("min_granularity_ns", 0644, debugfs_sched, &sysctl_sched_min_granularity);
+ 	debugfs_create_u32("idle_min_granularity_ns", 0644, debugfs_sched, &sysctl_sched_idle_min_granularity);
+@@ -376,11 +383,13 @@ static __init int sched_init_debug(void)
+ #endif
+ 
+ 	debugfs_create_file("debug", 0444, debugfs_sched, NULL, &sched_debug_fops);
++#endif /* !CONFIG_SCHED_ALT */
+ 
+ 	return 0;
+ }
+ late_initcall(sched_init_debug);
+ 
++#ifndef CONFIG_SCHED_ALT
+ #ifdef CONFIG_SMP
+ 
+ static cpumask_var_t		sd_sysctl_cpus;
+@@ -1114,6 +1123,7 @@ void proc_sched_set_task(struct task_struct *p)
+ 	memset(&p->stats, 0, sizeof(p->stats));
+ #endif
+ }
++#endif /* !CONFIG_SCHED_ALT */
+ 
+ void resched_latency_warn(int cpu, u64 latency)
+ {
+diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c
+index 342f58a..ab493e7 100644
+--- a/kernel/sched/idle.c
++++ b/kernel/sched/idle.c
+@@ -379,6 +379,7 @@ void cpu_startup_entry(enum cpuhp_state state)
+ 		do_idle();
+ }
+ 
++#ifndef CONFIG_SCHED_ALT
+ /*
+  * idle-task scheduling class.
+  */
+@@ -500,3 +501,4 @@ DEFINE_SCHED_CLASS(idle) = {
+ 	.switched_to		= switched_to_idle,
+ 	.update_curr		= update_curr_idle,
+ };
++#endif
+diff --git a/b/kernel/sched/pds.h b/kernel/sched/pds.h
+new file mode 100644
+index 0000000..15cc488
+--- /dev/null
++++ b/kernel/sched/pds.h
+@@ -0,0 +1,152 @@
++#define ALT_SCHED_NAME "PDS"
++
++#define MIN_SCHED_NORMAL_PRIO	(32)
++static const u64 RT_MASK = ((1ULL << MIN_SCHED_NORMAL_PRIO) - 1);
++
++#define SCHED_NORMAL_PRIO_NUM	(32)
++#define SCHED_EDGE_DELTA	(SCHED_NORMAL_PRIO_NUM - NICE_WIDTH / 2)
++
++/* PDS assume NORMAL_PRIO_NUM is power of 2 */
++#define SCHED_NORMAL_PRIO_MOD(x)	((x) & (SCHED_NORMAL_PRIO_NUM - 1))
++
++/* default time slice 4ms -> shift 22, 2 time slice slots -> shift 23 */
++static __read_mostly int sched_timeslice_shift = 23;
++
++/*
++ * Common interfaces
++ */
++static inline void sched_timeslice_imp(const int timeslice_ms)
++{
++	if (2 == timeslice_ms)
++		sched_timeslice_shift = 22;
++}
++
++static inline int
++task_sched_prio_normal(const struct task_struct *p, const struct rq *rq)
++{
++	s64 delta = p->deadline - rq->time_edge + SCHED_EDGE_DELTA;
++
++#ifdef ALT_SCHED_DEBUG
++	if (WARN_ONCE(delta > NORMAL_PRIO_NUM - 1,
++		      "pds: task_sched_prio_normal() delta %lld\n", delta))
++		return SCHED_NORMAL_PRIO_NUM - 1;
++#endif
++
++	return max(0LL, delta);
++}
++
++static inline int task_sched_prio(const struct task_struct *p)
++{
++	return (p->prio < MIN_NORMAL_PRIO) ? (p->prio >> 2) :
++		MIN_SCHED_NORMAL_PRIO + task_sched_prio_normal(p, task_rq(p));
++}
++
++static inline int
++task_sched_prio_idx(const struct task_struct *p, const struct rq *rq)
++{
++	u64 idx;
++
++	if (p->prio < MIN_NORMAL_PRIO)
++		return p->prio >> 2;
++
++	idx = max(p->deadline + SCHED_EDGE_DELTA, rq->time_edge);
++	/*printk(KERN_INFO "sched: task_sched_prio_idx edge:%llu, deadline=%llu idx=%llu\n", rq->time_edge, p->deadline, idx);*/
++	return MIN_SCHED_NORMAL_PRIO + SCHED_NORMAL_PRIO_MOD(idx);
++}
++
++static inline int sched_prio2idx(int sched_prio, struct rq *rq)
++{
++	return (IDLE_TASK_SCHED_PRIO == sched_prio || sched_prio < MIN_SCHED_NORMAL_PRIO) ?
++		sched_prio :
++		MIN_SCHED_NORMAL_PRIO + SCHED_NORMAL_PRIO_MOD(sched_prio + rq->time_edge);
++}
++
++static inline int sched_idx2prio(int sched_idx, struct rq *rq)
++{
++	return (sched_idx < MIN_SCHED_NORMAL_PRIO) ?
++		sched_idx :
++		MIN_SCHED_NORMAL_PRIO + SCHED_NORMAL_PRIO_MOD(sched_idx - rq->time_edge);
++}
++
++static inline void sched_renew_deadline(struct task_struct *p, const struct rq *rq)
++{
++	if (p->prio >= MIN_NORMAL_PRIO)
++		p->deadline = rq->time_edge + (p->static_prio - (MAX_PRIO - NICE_WIDTH)) / 2;
++}
++
++int task_running_nice(struct task_struct *p)
++{
++	return (p->prio > DEFAULT_PRIO);
++}
++
++static inline void update_rq_time_edge(struct rq *rq)
++{
++	struct list_head head;
++	u64 old = rq->time_edge;
++	u64 now = rq->clock >> sched_timeslice_shift;
++	u64 prio, delta;
++	DECLARE_BITMAP(normal, SCHED_QUEUE_BITS);
++
++	if (now == old)
++		return;
++
++	rq->time_edge = now;
++	delta = min_t(u64, SCHED_NORMAL_PRIO_NUM, now - old);
++	INIT_LIST_HEAD(&head);
++
++	/*printk(KERN_INFO "sched: update_rq_time_edge 0x%016lx %llu\n", rq->queue.bitmap[0], delta);*/
++	prio = MIN_SCHED_NORMAL_PRIO;
++	for_each_set_bit_from(prio, rq->queue.bitmap, MIN_SCHED_NORMAL_PRIO + delta)
++		list_splice_tail_init(rq->queue.heads + MIN_SCHED_NORMAL_PRIO +
++				      SCHED_NORMAL_PRIO_MOD(prio + old), &head);
++
++	bitmap_shift_right(normal, rq->queue.bitmap, delta, SCHED_QUEUE_BITS);
++	if (!list_empty(&head)) {
++		struct task_struct *p;
++		u64 idx = MIN_SCHED_NORMAL_PRIO + SCHED_NORMAL_PRIO_MOD(now);
++
++		list_for_each_entry(p, &head, sq_node)
++			p->sq_idx = idx;
++
++		list_splice(&head, rq->queue.heads + idx);
++		set_bit(MIN_SCHED_NORMAL_PRIO, normal);
++	}
++	bitmap_replace(rq->queue.bitmap, normal, rq->queue.bitmap,
++		       (const unsigned long *)&RT_MASK, SCHED_QUEUE_BITS);
++
++	if (rq->prio < MIN_SCHED_NORMAL_PRIO || IDLE_TASK_SCHED_PRIO == rq->prio)
++		return;
++
++	rq->prio = (rq->prio < MIN_SCHED_NORMAL_PRIO + delta) ?
++		MIN_SCHED_NORMAL_PRIO : rq->prio - delta;
++}
++
++static inline void time_slice_expired(struct task_struct *p, struct rq *rq)
++{
++	p->time_slice = sched_timeslice_ns;
++	sched_renew_deadline(p, rq);
++	if (SCHED_FIFO != p->policy && task_on_rq_queued(p))
++		requeue_task(p, rq, task_sched_prio_idx(p, rq));
++}
++
++static inline void sched_task_sanity_check(struct task_struct *p, struct rq *rq)
++{
++	u64 max_dl = rq->time_edge + NICE_WIDTH / 2 - 1;
++	if (unlikely(p->deadline > max_dl))
++		p->deadline = max_dl;
++}
++
++static void sched_task_fork(struct task_struct *p, struct rq *rq)
++{
++	sched_renew_deadline(p, rq);
++}
++
++static inline void do_sched_yield_type_1(struct task_struct *p, struct rq *rq)
++{
++	time_slice_expired(p, rq);
++}
++
++#ifdef CONFIG_SMP
++static inline void sched_task_ttwu(struct task_struct *p) {}
++#endif
++static inline void sched_task_deactivate(struct task_struct *p, struct rq *rq) {}
+diff --git a/kernel/sched/pelt.c b/kernel/sched/pelt.c
+index 0f31076..bd38bf7 100644
+--- a/kernel/sched/pelt.c
++++ b/kernel/sched/pelt.c
+@@ -266,6 +266,7 @@ ___update_load_avg(struct sched_avg *sa, unsigned long load)
+ 	WRITE_ONCE(sa->util_avg, sa->util_sum / divider);
+ }
+ 
++#ifndef CONFIG_SCHED_ALT
+ /*
+  * sched_entity:
+  *
+@@ -383,8 +384,9 @@ int update_dl_rq_load_avg(u64 now, struct rq *rq, int running)
+ 
+ 	return 0;
+ }
++#endif
+ 
+-#ifdef CONFIG_SCHED_THERMAL_PRESSURE
++#if defined(CONFIG_SCHED_THERMAL_PRESSURE) && !defined(CONFIG_SCHED_ALT)
+ /*
+  * thermal:
+  *
+diff --git a/kernel/sched/pelt.h b/kernel/sched/pelt.h
+index 3a0e0dc..e8a7d84 100644
+--- a/kernel/sched/pelt.h
++++ b/kernel/sched/pelt.h
+@@ -1,13 +1,15 @@
+ #ifdef CONFIG_SMP
+ #include "sched-pelt.h"
+ 
++#ifndef CONFIG_SCHED_ALT
+ int __update_load_avg_blocked_se(u64 now, struct sched_entity *se);
+ int __update_load_avg_se(u64 now, struct cfs_rq *cfs_rq, struct sched_entity *se);
+ int __update_load_avg_cfs_rq(u64 now, struct cfs_rq *cfs_rq);
+ int update_rt_rq_load_avg(u64 now, struct rq *rq, int running);
+ int update_dl_rq_load_avg(u64 now, struct rq *rq, int running);
++#endif
+ 
+-#ifdef CONFIG_SCHED_THERMAL_PRESSURE
++#if defined(CONFIG_SCHED_THERMAL_PRESSURE) && !defined(CONFIG_SCHED_ALT)
+ int update_thermal_load_avg(u64 now, struct rq *rq, u64 capacity);
+ 
+ static inline u64 thermal_load_avg(struct rq *rq)
+@@ -44,6 +46,7 @@ static inline u32 get_pelt_divider(struct sched_avg *avg)
+ 	return PELT_MIN_DIVIDER + avg->period_contrib;
+ }
+ 
++#ifndef CONFIG_SCHED_ALT
+ static inline void cfs_se_util_change(struct sched_avg *avg)
+ {
+ 	unsigned int enqueued;
+@@ -180,9 +183,11 @@ static inline u64 cfs_rq_clock_pelt(struct cfs_rq *cfs_rq)
+ 	return rq_clock_pelt(rq_of(cfs_rq));
+ }
+ #endif
++#endif /* CONFIG_SCHED_ALT */
+ 
+ #else
+ 
++#ifndef CONFIG_SCHED_ALT
+ static inline int
+ update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
+ {
+@@ -200,6 +205,7 @@ update_dl_rq_load_avg(u64 now, struct rq *rq, int running)
+ {
+ 	return 0;
+ }
++#endif
+ 
+ static inline int
+ update_thermal_load_avg(u64 now, struct rq *rq, u64 capacity)
+diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
+index e93e006..9bab981 100644
+--- a/kernel/sched/sched.h
++++ b/kernel/sched/sched.h
+@@ -5,6 +5,10 @@
+ #ifndef _KERNEL_SCHED_SCHED_H
+ #define _KERNEL_SCHED_SCHED_H
+ 
++#ifdef CONFIG_SCHED_ALT
++#include "alt_sched.h"
++#else
++
+ #include <linux/sched/affinity.h>
+ #include <linux/sched/autogroup.h>
+ #include <linux/sched/cpufreq.h>
+@@ -3245,6 +3249,11 @@ static inline void update_current_exec_runtime(struct task_struct *curr,
+ 	cgroup_account_cputime(curr, delta_exec);
+ }
+ 
++static inline int task_running_nice(struct task_struct *p)
++{
++	return (task_nice(p) > 0);
++}
++
+ #ifdef CONFIG_SCHED_MM_CID
+ 
+ #define SCHED_MM_CID_PERIOD_NS	(100ULL * 1000000)	/* 100ms */
+@@ -3480,4 +3489,5 @@ static inline void task_tick_mm_cid(struct rq *rq, struct task_struct *curr) { }
+ static inline void init_sched_mm_cid(struct task_struct *t) { }
+ #endif
+ 
++#endif /* !CONFIG_SCHED_ALT */
+ #endif /* _KERNEL_SCHED_SCHED_H */
+diff --git a/kernel/sched/stats.c b/kernel/sched/stats.c
+index 857f837..5486c63 100644
+--- a/kernel/sched/stats.c
++++ b/kernel/sched/stats.c
+@@ -125,8 +125,10 @@ static int show_schedstat(struct seq_file *seq, void *v)
+ 	} else {
+ 		struct rq *rq;
+ #ifdef CONFIG_SMP
++#ifndef CONFIG_SCHED_ALT
+ 		struct sched_domain *sd;
+ 		int dcount = 0;
++#endif
+ #endif
+ 		cpu = (unsigned long)(v - 2);
+ 		rq = cpu_rq(cpu);
+@@ -143,6 +145,7 @@ static int show_schedstat(struct seq_file *seq, void *v)
+ 		seq_printf(seq, "\n");
+ 
+ #ifdef CONFIG_SMP
++#ifndef CONFIG_SCHED_ALT
+ 		/* domain-specific stats */
+ 		rcu_read_lock();
+ 		for_each_domain(cpu, sd) {
+@@ -171,6 +174,7 @@ static int show_schedstat(struct seq_file *seq, void *v)
+ 			    sd->ttwu_move_balance);
+ 		}
+ 		rcu_read_unlock();
++#endif
+ #endif
+ 	}
+ 	return 0;
+diff --git a/kernel/sched/stats.h b/kernel/sched/stats.h
+index 38f3698..b9d5973 100644
+--- a/kernel/sched/stats.h
++++ b/kernel/sched/stats.h
+@@ -89,6 +89,7 @@ static inline void rq_sched_info_depart  (struct rq *rq, unsigned long long delt
+ 
+ #endif /* CONFIG_SCHEDSTATS */
+ 
++#ifndef CONFIG_SCHED_ALT
+ #ifdef CONFIG_FAIR_GROUP_SCHED
+ struct sched_entity_stats {
+ 	struct sched_entity     se;
+@@ -105,6 +106,7 @@ __schedstats_from_se(struct sched_entity *se)
+ #endif
+ 	return &task_of(se)->stats;
+ }
++#endif /* CONFIG_SCHED_ALT */
+ 
+ #ifdef CONFIG_PSI
+ void psi_task_change(struct task_struct *task, int clear, int set);
+diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
+index d3a3b26..fed43c1 100644
+--- a/kernel/sched/topology.c
++++ b/kernel/sched/topology.c
+@@ -5,6 +5,7 @@
+ 
+ #include <linux/bsearch.h>
+ 
++#ifndef CONFIG_SCHED_ALT
+ DEFINE_MUTEX(sched_domains_mutex);
+ 
+ /* Protected by sched_domains_mutex: */
+@@ -1420,8 +1421,10 @@ static void asym_cpu_capacity_scan(void)
+  */
+ 
+ static int default_relax_domain_level = -1;
++#endif /* CONFIG_SCHED_ALT */
+ int sched_domain_level_max;
+ 
++#ifndef CONFIG_SCHED_ALT
+ static int __init setup_relax_domain_level(char *str)
+ {
+ 	if (kstrtoint(str, 0, &default_relax_domain_level))
+@@ -1654,6 +1657,7 @@ sd_init(struct sched_domain_topology_level *tl,
+ 
+ 	return sd;
+ }
++#endif /* CONFIG_SCHED_ALT */
+ 
+ /*
+  * Topology list, bottom-up.
+@@ -1690,6 +1694,7 @@ void __init set_sched_topology(struct sched_domain_topology_level *tl)
+ 	sched_domain_topology_saved = NULL;
+ }
+ 
++#ifndef CONFIG_SCHED_ALT
+ #ifdef CONFIG_NUMA
+ 
+ static const struct cpumask *sd_numa_mask(int cpu)
+@@ -2745,3 +2750,20 @@ void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
+ 	partition_sched_domains_locked(ndoms_new, doms_new, dattr_new);
+ 	mutex_unlock(&sched_domains_mutex);
+ }
++#else /* CONFIG_SCHED_ALT */
++void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
++			     struct sched_domain_attr *dattr_new)
++{}
++
++#ifdef CONFIG_NUMA
++int sched_numa_find_closest(const struct cpumask *cpus, int cpu)
++{
++	return best_mask_cpu(cpu, cpus);
++}
++
++int sched_numa_find_nth_cpu(const struct cpumask *cpus, int cpu, int node)
++{
++	return cpumask_nth(cpu, cpus);
++}
++#endif /* CONFIG_NUMA */
++#endif
+diff --git a/kernel/sysctl.c b/kernel/sysctl.c
+index 354a2d2..73080f0 100644
+--- a/kernel/sysctl.c
++++ b/kernel/sysctl.c
+@@ -92,6 +92,10 @@ EXPORT_SYMBOL_GPL(sysctl_long_vals);
+ 
+ /* Constants used for minimum and maximum */
+ 
++#ifdef CONFIG_SCHED_ALT
++extern int sched_yield_type;
++#endif
++
+ #ifdef CONFIG_PERF_EVENTS
+ static const int six_hundred_forty_kb = 640 * 1024;
+ #endif
+@@ -1912,6 +1916,17 @@ static struct ctl_table kern_table[] = {
+ 		.proc_handler	= proc_dointvec,
+ 	},
+ #endif
++#ifdef CONFIG_SCHED_ALT
++	{
++		.procname	= "yield_type",
++		.data		= &sched_yield_type,
++		.maxlen		= sizeof (int),
++		.mode		= 0644,
++		.proc_handler	= &proc_dointvec_minmax,
++		.extra1		= SYSCTL_ZERO,
++		.extra2		= SYSCTL_TWO,
++	},
++#endif
+ #if defined(CONFIG_S390) && defined(CONFIG_SMP)
+ 	{
+ 		.procname	= "spin_retry",
+diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
+index 238262e..962a26f 100644
+--- a/kernel/time/hrtimer.c
++++ b/kernel/time/hrtimer.c
+@@ -2091,8 +2091,10 @@ long hrtimer_nanosleep(ktime_t rqtp, const enum hrtimer_mode mode,
+ 	int ret = 0;
+ 	u64 slack;
+ 
++#ifndef CONFIG_SCHED_ALT
+ 	slack = current->timer_slack_ns;
+ 	if (rt_task(current))
++#endif
+ 		slack = 0;
+ 
+ 	hrtimer_init_sleeper_on_stack(&t, clockid, mode);
+diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c
+index e9c6f9d..43ee0a9 100644
+--- a/kernel/time/posix-cpu-timers.c
++++ b/kernel/time/posix-cpu-timers.c
+@@ -223,7 +223,7 @@ static void task_sample_cputime(struct task_struct *p, u64 *samples)
+ 	u64 stime, utime;
+ 
+ 	task_cputime(p, &utime, &stime);
+-	store_samples(samples, stime, utime, p->se.sum_exec_runtime);
++	store_samples(samples, stime, utime, tsk_seruntime(p));
+ }
+ 
+ static void proc_sample_cputime_atomic(struct task_cputime_atomic *at,
+@@ -867,6 +867,7 @@ static void collect_posix_cputimers(struct posix_cputimers *pct, u64 *samples,
+ 	}
+ }
+ 
++#ifndef CONFIG_SCHED_ALT
+ static inline void check_dl_overrun(struct task_struct *tsk)
+ {
+ 	if (tsk->dl.dl_overrun) {
+@@ -874,6 +875,7 @@ static inline void check_dl_overrun(struct task_struct *tsk)
+ 		send_signal_locked(SIGXCPU, SEND_SIG_PRIV, tsk, PIDTYPE_TGID);
+ 	}
+ }
++#endif
+ 
+ static bool check_rlimit(u64 time, u64 limit, int signo, bool rt, bool hard)
+ {
+@@ -901,8 +903,10 @@ static void check_thread_timers(struct task_struct *tsk,
+ 	u64 samples[CPUCLOCK_MAX];
+ 	unsigned long soft;
+ 
++#ifndef CONFIG_SCHED_ALT
+ 	if (dl_task(tsk))
+ 		check_dl_overrun(tsk);
++#endif
+ 
+ 	if (expiry_cache_is_inactive(pct))
+ 		return;
+@@ -916,7 +920,7 @@ static void check_thread_timers(struct task_struct *tsk,
+ 	soft = task_rlimit(tsk, RLIMIT_RTTIME);
+ 	if (soft != RLIM_INFINITY) {
+ 		/* Task RT timeout is accounted in jiffies. RTTIME is usec */
+-		unsigned long rttime = tsk->rt.timeout * (USEC_PER_SEC / HZ);
++		unsigned long rttime = tsk_rttimeout(tsk) * (USEC_PER_SEC / HZ);
+ 		unsigned long hard = task_rlimit_max(tsk, RLIMIT_RTTIME);
+ 
+ 		/* At the hard limit, send SIGKILL. No further action. */
+@@ -1152,8 +1156,10 @@ static inline bool fastpath_timer_check(struct task_struct *tsk)
+ 			return true;
+ 	}
+ 
++#ifndef CONFIG_SCHED_ALT
+ 	if (dl_task(tsk) && tsk->dl.dl_overrun)
+ 		return true;
++#endif
+ 
+ 	return false;
+ }
+diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c
+index 5295904..d04bb99 100644
+--- a/kernel/trace/trace_selftest.c
++++ b/kernel/trace/trace_selftest.c
+@@ -1155,10 +1155,15 @@ static int trace_wakeup_test_thread(void *data)
+ {
+ 	/* Make this a -deadline thread */
+ 	static const struct sched_attr attr = {
++#ifdef CONFIG_SCHED_ALT
++		/* No deadline on BMQ/PDS, use RR */
++		.sched_policy = SCHED_RR,
++#else
+ 		.sched_policy = SCHED_DEADLINE,
+ 		.sched_runtime = 100000ULL,
+ 		.sched_deadline = 10000000ULL,
+ 		.sched_period = 10000000ULL
++#endif
+ 	};
+ 	struct wakeup_test_data *x = data;
+ 
+diff --git a/kernel/workqueue.c b/kernel/workqueue.c
+index 800b420..998a572 100644
+--- a/kernel/workqueue.c
++++ b/kernel/workqueue.c
+@@ -1075,7 +1075,7 @@ void wq_worker_running(struct task_struct *task)
+ 	 * CPU intensive auto-detection cares about how long a work item hogged
+ 	 * CPU without sleeping. Reset the starting timestamp on wakeup.
+ 	 */
+-	worker->current_at = worker->task->se.sum_exec_runtime;
++	worker->current_at = tsk_seruntime(worker->task);
+ 
+ 	WRITE_ONCE(worker->sleeping, 0);
+ }
+@@ -1161,7 +1161,7 @@ void wq_worker_tick(struct task_struct *task)
+ 	 * We probably want to make this prettier in the future.
+ 	 */
+ 	if ((worker->flags & WORKER_NOT_RUNNING) || READ_ONCE(worker->sleeping) ||
+-	    worker->task->se.sum_exec_runtime - worker->current_at <
++	    tsk_seruntime(worker->task) - worker->current_at <
+ 	    wq_cpu_intensive_thresh_us * NSEC_PER_USEC)
+ 		return;
+ 
+@@ -2530,7 +2530,7 @@ __acquires(&pool->lock)
+ 	worker->current_work = work;
+ 	worker->current_func = work->func;
+ 	worker->current_pwq = pwq;
+-	worker->current_at = worker->task->se.sum_exec_runtime;
++	worker->current_at = tsk_seruntime(worker->task);
+ 	work_data = *work_data_bits(work);
+ 	worker->current_color = get_work_color(work_data);
+ 


^ permalink raw reply related	[flat|nested] 24+ messages in thread
* [gentoo-commits] proj/linux-patches:6.5 commit in: /
@ 2023-09-02  9:54 Mike Pagano
  0 siblings, 0 replies; 24+ messages in thread
From: Mike Pagano @ 2023-09-02  9:54 UTC (permalink / raw
  To: gentoo-commits

commit:     85da7abb9dc4fd9a0f16d42dd530a7134eba7f67
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sat Sep  2 09:53:56 2023 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sat Sep  2 09:53:56 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=85da7abb

Linux patch 6.5.1

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README            |   4 +
 1000_linux-6.5.1.patch | 489 +++++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 493 insertions(+)

diff --git a/0000_README b/0000_README
index 1efa027c..f7da0ce2 100644
--- a/0000_README
+++ b/0000_README
@@ -43,6 +43,10 @@ EXPERIMENTAL
 Individual Patch Descriptions:
 --------------------------------------------------------------------------
 
+Patch:  1000_linux-6.5.1.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.5.1
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1000_linux-6.5.1.patch b/1000_linux-6.5.1.patch
new file mode 100644
index 00000000..0323b844
--- /dev/null
+++ b/1000_linux-6.5.1.patch
@@ -0,0 +1,489 @@
+diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
+index 722b6eca2e938..23ebe34ff901e 100644
+--- a/Documentation/admin-guide/kernel-parameters.txt
++++ b/Documentation/admin-guide/kernel-parameters.txt
+@@ -6275,10 +6275,6 @@
+ 			-1: disable all critical trip points in all thermal zones
+ 			<degrees C>: override all critical trip points
+ 
+-	thermal.nocrt=	[HW,ACPI]
+-			Set to disable actions on ACPI thermal zone
+-			critical and hot trip points.
+-
+ 	thermal.off=	[HW,ACPI]
+ 			1: disable ACPI thermal control
+ 
+diff --git a/Makefile b/Makefile
+index 2fdd8b40b7e04..062b9694e0547 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 5
+-SUBLEVEL = 0
++SUBLEVEL = 1
+ EXTRAVERSION =
+ NAME = Hurr durr I'ma ninja sloth
+ 
+diff --git a/arch/arm/kernel/module-plts.c b/arch/arm/kernel/module-plts.c
+index f5a43fd8c1639..da2ee8d6ef1a7 100644
+--- a/arch/arm/kernel/module-plts.c
++++ b/arch/arm/kernel/module-plts.c
+@@ -251,7 +251,7 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
+ 		/* sort by type and symbol index */
+ 		sort(rels, numrels, sizeof(Elf32_Rel), cmp_rel, NULL);
+ 
+-		if (strncmp(secstrings + dstsec->sh_name, ".init", 5) != 0)
++		if (!module_init_layout_section(secstrings + dstsec->sh_name))
+ 			core_plts += count_plts(syms, dstsec->sh_addr, rels,
+ 						numrels, s->sh_info);
+ 		else
+diff --git a/arch/arm64/kernel/module-plts.c b/arch/arm64/kernel/module-plts.c
+index ad02058756b50..bd69a4e7cd605 100644
+--- a/arch/arm64/kernel/module-plts.c
++++ b/arch/arm64/kernel/module-plts.c
+@@ -339,7 +339,7 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
+ 		if (nents)
+ 			sort(rels, nents, sizeof(Elf64_Rela), cmp_rela, NULL);
+ 
+-		if (!str_has_prefix(secstrings + dstsec->sh_name, ".init"))
++		if (!module_init_layout_section(secstrings + dstsec->sh_name))
+ 			core_plts += count_plts(syms, rels, numrels,
+ 						sechdrs[i].sh_info, dstsec);
+ 		else
+diff --git a/arch/x86/include/asm/sections.h b/arch/x86/include/asm/sections.h
+index a6e8373a5170f..3fa87e5e11aba 100644
+--- a/arch/x86/include/asm/sections.h
++++ b/arch/x86/include/asm/sections.h
+@@ -2,8 +2,6 @@
+ #ifndef _ASM_X86_SECTIONS_H
+ #define _ASM_X86_SECTIONS_H
+ 
+-#define arch_is_kernel_initmem_freed arch_is_kernel_initmem_freed
+-
+ #include <asm-generic/sections.h>
+ #include <asm/extable.h>
+ 
+@@ -18,20 +16,4 @@ extern char __end_of_kernel_reserve[];
+ 
+ extern unsigned long _brk_start, _brk_end;
+ 
+-static inline bool arch_is_kernel_initmem_freed(unsigned long addr)
+-{
+-	/*
+-	 * If _brk_start has not been cleared, brk allocation is incomplete,
+-	 * and we can not make assumptions about its use.
+-	 */
+-	if (_brk_start)
+-		return 0;
+-
+-	/*
+-	 * After brk allocation is complete, space between _brk_end and _end
+-	 * is available for allocation.
+-	 */
+-	return addr >= _brk_end && addr < (unsigned long)&_end;
+-}
+-
+ #endif	/* _ASM_X86_SECTIONS_H */
+diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
+index f9f6ebb08fdb7..3163a40f02e30 100644
+--- a/drivers/acpi/thermal.c
++++ b/drivers/acpi/thermal.c
+@@ -82,10 +82,6 @@ static int tzp;
+ module_param(tzp, int, 0444);
+ MODULE_PARM_DESC(tzp, "Thermal zone polling frequency, in 1/10 seconds.");
+ 
+-static int nocrt;
+-module_param(nocrt, int, 0);
+-MODULE_PARM_DESC(nocrt, "Set to take no action upon ACPI thermal zone critical trips points.");
+-
+ static int off;
+ module_param(off, int, 0);
+ MODULE_PARM_DESC(off, "Set to disable ACPI thermal support.");
+@@ -1094,7 +1090,7 @@ static int thermal_act(const struct dmi_system_id *d) {
+ static int thermal_nocrt(const struct dmi_system_id *d) {
+ 	pr_notice("%s detected: disabling all critical thermal trip point actions.\n",
+ 		  d->ident);
+-	nocrt = 1;
++	crt = -1;
+ 	return 0;
+ }
+ static int thermal_tzp(const struct dmi_system_id *d) {
+diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
+index 839247a4f48ea..660012997f54c 100644
+--- a/include/linux/ipv6.h
++++ b/include/linux/ipv6.h
+@@ -199,14 +199,7 @@ struct inet6_cork {
+ 	u8 tclass;
+ };
+ 
+-/**
+- * struct ipv6_pinfo - ipv6 private area
+- *
+- * In the struct sock hierarchy (tcp6_sock, upd6_sock, etc)
+- * this _must_ be the last member, so that inet6_sk_generic
+- * is able to calculate its offset from the base struct sock
+- * by using the struct proto->slab_obj_size member. -acme
+- */
++/* struct ipv6_pinfo - ipv6 private area */
+ struct ipv6_pinfo {
+ 	struct in6_addr 	saddr;
+ 	struct in6_pktinfo	sticky_pktinfo;
+@@ -306,19 +299,19 @@ struct raw6_sock {
+ 	__u32			offset;		/* checksum offset  */
+ 	struct icmp6_filter	filter;
+ 	__u32			ip6mr_table;
+-	/* ipv6_pinfo has to be the last member of raw6_sock, see inet6_sk_generic */
++
+ 	struct ipv6_pinfo	inet6;
+ };
+ 
+ struct udp6_sock {
+ 	struct udp_sock	  udp;
+-	/* ipv6_pinfo has to be the last member of udp6_sock, see inet6_sk_generic */
++
+ 	struct ipv6_pinfo inet6;
+ };
+ 
+ struct tcp6_sock {
+ 	struct tcp_sock	  tcp;
+-	/* ipv6_pinfo has to be the last member of tcp6_sock, see inet6_sk_generic */
++
+ 	struct ipv6_pinfo inet6;
+ };
+ 
+diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h
+index 03be088fb4396..001b2ce83832e 100644
+--- a/include/linux/moduleloader.h
++++ b/include/linux/moduleloader.h
+@@ -42,6 +42,11 @@ bool module_init_section(const char *name);
+  */
+ bool module_exit_section(const char *name);
+ 
++/* Describes whether within_module_init() will consider this an init section
++ * or not. This behaviour changes with CONFIG_MODULE_UNLOAD.
++ */
++bool module_init_layout_section(const char *sname);
++
+ /*
+  * Apply the given relocation to the (simplified) ELF.  Return -error
+  * or 0.
+diff --git a/include/net/sock.h b/include/net/sock.h
+index 690e22139543f..e8927f2d47a3c 100644
+--- a/include/net/sock.h
++++ b/include/net/sock.h
+@@ -1340,6 +1340,7 @@ struct proto {
+ 
+ 	struct kmem_cache	*slab;
+ 	unsigned int		obj_size;
++	unsigned int		ipv6_pinfo_offset;
+ 	slab_flags_t		slab_flags;
+ 	unsigned int		useroffset;	/* Usercopy region offset */
+ 	unsigned int		usersize;	/* Usercopy region size */
+diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
+index 016d997131d43..e12d26c10dbab 100644
+--- a/kernel/kallsyms.c
++++ b/kernel/kallsyms.c
+@@ -188,16 +188,13 @@ static bool cleanup_symbol_name(char *s)
+ 
+ static int compare_symbol_name(const char *name, char *namebuf)
+ {
+-	int ret;
+-
+-	ret = strcmp(name, namebuf);
+-	if (!ret)
+-		return ret;
+-
+-	if (cleanup_symbol_name(namebuf) && !strcmp(name, namebuf))
+-		return 0;
+-
+-	return ret;
++	/* The kallsyms_seqs_of_names is sorted based on names after
++	 * cleanup_symbol_name() (see scripts/kallsyms.c) if clang lto is enabled.
++	 * To ensure correct bisection in kallsyms_lookup_names(), do
++	 * cleanup_symbol_name(namebuf) before comparing name and namebuf.
++	 */
++	cleanup_symbol_name(namebuf);
++	return strcmp(name, namebuf);
+ }
+ 
+ static unsigned int get_symbol_seq(int index)
+diff --git a/kernel/kallsyms_selftest.c b/kernel/kallsyms_selftest.c
+index a2e3745d15c47..e05ddc33a7529 100644
+--- a/kernel/kallsyms_selftest.c
++++ b/kernel/kallsyms_selftest.c
+@@ -196,7 +196,7 @@ static bool match_cleanup_name(const char *s, const char *name)
+ 	if (!IS_ENABLED(CONFIG_LTO_CLANG))
+ 		return false;
+ 
+-	p = strchr(s, '.');
++	p = strstr(s, ".llvm.");
+ 	if (!p)
+ 		return false;
+ 
+@@ -344,27 +344,6 @@ static int test_kallsyms_basic_function(void)
+ 			goto failed;
+ 		}
+ 
+-		/*
+-		 * The first '.' may be the initial letter, in which case the
+-		 * entire symbol name will be truncated to an empty string in
+-		 * cleanup_symbol_name(). Do not test these symbols.
+-		 *
+-		 * For example:
+-		 * cat /proc/kallsyms | awk '{print $3}' | grep -E "^\." | head
+-		 * .E_read_words
+-		 * .E_leading_bytes
+-		 * .E_trailing_bytes
+-		 * .E_write_words
+-		 * .E_copy
+-		 * .str.292.llvm.12122243386960820698
+-		 * .str.24.llvm.12122243386960820698
+-		 * .str.29.llvm.12122243386960820698
+-		 * .str.75.llvm.12122243386960820698
+-		 * .str.99.llvm.12122243386960820698
+-		 */
+-		if (IS_ENABLED(CONFIG_LTO_CLANG) && !namebuf[0])
+-			continue;
+-
+ 		lookup_addr = kallsyms_lookup_name(namebuf);
+ 
+ 		memset(stat, 0, sizeof(*stat));
+diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
+index 111607d91489c..e85b5ad3e2069 100644
+--- a/kernel/locking/lockdep.c
++++ b/kernel/locking/lockdep.c
+@@ -819,34 +819,26 @@ static int very_verbose(struct lock_class *class)
+  * Is this the address of a static object:
+  */
+ #ifdef __KERNEL__
+-/*
+- * Check if an address is part of freed initmem. After initmem is freed,
+- * memory can be allocated from it, and such allocations would then have
+- * addresses within the range [_stext, _end].
+- */
+-#ifndef arch_is_kernel_initmem_freed
+-static int arch_is_kernel_initmem_freed(unsigned long addr)
+-{
+-	if (system_state < SYSTEM_FREEING_INITMEM)
+-		return 0;
+-
+-	return init_section_contains((void *)addr, 1);
+-}
+-#endif
+-
+ static int static_obj(const void *obj)
+ {
+-	unsigned long start = (unsigned long) &_stext,
+-		      end   = (unsigned long) &_end,
+-		      addr  = (unsigned long) obj;
++	unsigned long addr = (unsigned long) obj;
+ 
+-	if (arch_is_kernel_initmem_freed(addr))
+-		return 0;
++	if (is_kernel_core_data(addr))
++		return 1;
++
++	/*
++	 * keys are allowed in the __ro_after_init section.
++	 */
++	if (is_kernel_rodata(addr))
++		return 1;
+ 
+ 	/*
+-	 * static variable?
++	 * in initdata section and used during bootup only?
++	 * NOTE: On some platforms the initdata section is
++	 * outside of the _stext ... _end range.
+ 	 */
+-	if ((addr >= start) && (addr < end))
++	if (system_state < SYSTEM_FREEING_INITMEM &&
++		init_section_contains((void *)addr, 1))
+ 		return 1;
+ 
+ 	/*
+diff --git a/kernel/module/decompress.c b/kernel/module/decompress.c
+index 8a5d6d63b06cb..87440f714c0ca 100644
+--- a/kernel/module/decompress.c
++++ b/kernel/module/decompress.c
+@@ -241,7 +241,7 @@ static ssize_t module_zstd_decompress(struct load_info *info,
+ 	}
+ 
+ 	wksp_size = zstd_dstream_workspace_bound(header.windowSize);
+-	wksp = kmalloc(wksp_size, GFP_KERNEL);
++	wksp = vmalloc(wksp_size);
+ 	if (!wksp) {
+ 		retval = -ENOMEM;
+ 		goto out;
+@@ -284,7 +284,7 @@ static ssize_t module_zstd_decompress(struct load_info *info,
+ 	retval = new_size;
+ 
+  out:
+-	kfree(wksp);
++	vfree(wksp);
+ 	return retval;
+ }
+ #else
+diff --git a/kernel/module/main.c b/kernel/module/main.c
+index 59b1d067e5289..ff7cc4e292990 100644
+--- a/kernel/module/main.c
++++ b/kernel/module/main.c
+@@ -1484,7 +1484,7 @@ long module_get_offset_and_type(struct module *mod, enum mod_mem_type type,
+ 	return offset | mask;
+ }
+ 
+-static bool module_init_layout_section(const char *sname)
++bool module_init_layout_section(const char *sname)
+ {
+ #ifndef CONFIG_MODULE_UNLOAD
+ 	if (module_exit_section(sname))
+diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
+index d29d1163203d9..686090bc59451 100644
+--- a/net/dccp/ipv6.c
++++ b/net/dccp/ipv6.c
+@@ -1056,6 +1056,7 @@ static struct proto dccp_v6_prot = {
+ 	.orphan_count	   = &dccp_orphan_count,
+ 	.max_header	   = MAX_DCCP_HEADER,
+ 	.obj_size	   = sizeof(struct dccp6_sock),
++	.ipv6_pinfo_offset = offsetof(struct dccp6_sock, inet6),
+ 	.slab_flags	   = SLAB_TYPESAFE_BY_RCU,
+ 	.rsk_prot	   = &dccp6_request_sock_ops,
+ 	.twsk_prot	   = &dccp6_timewait_sock_ops,
+diff --git a/net/dccp/ipv6.h b/net/dccp/ipv6.h
+index 7e4c2a3b322b5..c5d14c48def17 100644
+--- a/net/dccp/ipv6.h
++++ b/net/dccp/ipv6.h
+@@ -13,10 +13,6 @@
+ 
+ struct dccp6_sock {
+ 	struct dccp_sock  dccp;
+-	/*
+-	 * ipv6_pinfo has to be the last member of dccp6_sock,
+-	 * see inet6_sk_generic.
+-	 */
+ 	struct ipv6_pinfo inet6;
+ };
+ 
+diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
+index 5d593ddc0347e..9f9c4b838664a 100644
+--- a/net/ipv6/af_inet6.c
++++ b/net/ipv6/af_inet6.c
+@@ -102,9 +102,9 @@ bool ipv6_mod_enabled(void)
+ }
+ EXPORT_SYMBOL_GPL(ipv6_mod_enabled);
+ 
+-static __inline__ struct ipv6_pinfo *inet6_sk_generic(struct sock *sk)
++static struct ipv6_pinfo *inet6_sk_generic(struct sock *sk)
+ {
+-	const int offset = sk->sk_prot->obj_size - sizeof(struct ipv6_pinfo);
++	const int offset = sk->sk_prot->ipv6_pinfo_offset;
+ 
+ 	return (struct ipv6_pinfo *)(((u8 *)sk) + offset);
+ }
+diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c
+index c2c291827a2ce..1b27728349725 100644
+--- a/net/ipv6/ping.c
++++ b/net/ipv6/ping.c
+@@ -215,6 +215,7 @@ struct proto pingv6_prot = {
+ 	.get_port =	ping_get_port,
+ 	.put_port =	ping_unhash,
+ 	.obj_size =	sizeof(struct raw6_sock),
++	.ipv6_pinfo_offset = offsetof(struct raw6_sock, inet6),
+ };
+ EXPORT_SYMBOL_GPL(pingv6_prot);
+ 
+diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
+index 49381f35b623c..ea16734f5e1f7 100644
+--- a/net/ipv6/raw.c
++++ b/net/ipv6/raw.c
+@@ -1216,6 +1216,7 @@ struct proto rawv6_prot = {
+ 	.hash		   = raw_hash_sk,
+ 	.unhash		   = raw_unhash_sk,
+ 	.obj_size	   = sizeof(struct raw6_sock),
++	.ipv6_pinfo_offset = offsetof(struct raw6_sock, inet6),
+ 	.useroffset	   = offsetof(struct raw6_sock, filter),
+ 	.usersize	   = sizeof_field(struct raw6_sock, filter),
+ 	.h.raw_hash	   = &raw_v6_hashinfo,
+diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
+index 6e86721e1cdbb..3a88545a265d6 100644
+--- a/net/ipv6/tcp_ipv6.c
++++ b/net/ipv6/tcp_ipv6.c
+@@ -2176,6 +2176,7 @@ struct proto tcpv6_prot = {
+ 	.sysctl_rmem_offset	= offsetof(struct net, ipv4.sysctl_tcp_rmem),
+ 	.max_header		= MAX_TCP_HEADER,
+ 	.obj_size		= sizeof(struct tcp6_sock),
++	.ipv6_pinfo_offset = offsetof(struct tcp6_sock, inet6),
+ 	.slab_flags		= SLAB_TYPESAFE_BY_RCU,
+ 	.twsk_prot		= &tcp6_timewait_sock_ops,
+ 	.rsk_prot		= &tcp6_request_sock_ops,
+diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
+index f787e6b8424c7..486d893b8e3ca 100644
+--- a/net/ipv6/udp.c
++++ b/net/ipv6/udp.c
+@@ -1802,6 +1802,7 @@ struct proto udpv6_prot = {
+ 	.sysctl_wmem_offset     = offsetof(struct net, ipv4.sysctl_udp_wmem_min),
+ 	.sysctl_rmem_offset     = offsetof(struct net, ipv4.sysctl_udp_rmem_min),
+ 	.obj_size		= sizeof(struct udp6_sock),
++	.ipv6_pinfo_offset = offsetof(struct udp6_sock, inet6),
+ 	.h.udp_table		= NULL,
+ 	.diag_destroy		= udp_abort,
+ };
+diff --git a/net/ipv6/udplite.c b/net/ipv6/udplite.c
+index 8e010d07917a7..267d491e97075 100644
+--- a/net/ipv6/udplite.c
++++ b/net/ipv6/udplite.c
+@@ -67,6 +67,7 @@ struct proto udplitev6_prot = {
+ 	.sysctl_wmem_offset = offsetof(struct net, ipv4.sysctl_udp_wmem_min),
+ 	.sysctl_rmem_offset = offsetof(struct net, ipv4.sysctl_udp_rmem_min),
+ 	.obj_size	   = sizeof(struct udp6_sock),
++	.ipv6_pinfo_offset = offsetof(struct udp6_sock, inet6),
+ 	.h.udp_table	   = &udplite_table,
+ };
+ 
+diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
+index ff78217f0cb12..ed8ebb6f59097 100644
+--- a/net/l2tp/l2tp_ip6.c
++++ b/net/l2tp/l2tp_ip6.c
+@@ -36,9 +36,6 @@ struct l2tp_ip6_sock {
+ 	u32			conn_id;
+ 	u32			peer_conn_id;
+ 
+-	/* ipv6_pinfo has to be the last member of l2tp_ip6_sock, see
+-	 * inet6_sk_generic
+-	 */
+ 	struct ipv6_pinfo	inet6;
+ };
+ 
+@@ -730,6 +727,7 @@ static struct proto l2tp_ip6_prot = {
+ 	.hash		   = l2tp_ip6_hash,
+ 	.unhash		   = l2tp_ip6_unhash,
+ 	.obj_size	   = sizeof(struct l2tp_ip6_sock),
++	.ipv6_pinfo_offset = offsetof(struct l2tp_ip6_sock, inet6),
+ };
+ 
+ static const struct proto_ops l2tp_ip6_ops = {
+diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
+index d80658547836f..0efc52c640b59 100644
+--- a/net/mptcp/protocol.c
++++ b/net/mptcp/protocol.c
+@@ -3987,6 +3987,7 @@ int __init mptcp_proto_v6_init(void)
+ 	strcpy(mptcp_v6_prot.name, "MPTCPv6");
+ 	mptcp_v6_prot.slab = NULL;
+ 	mptcp_v6_prot.obj_size = sizeof(struct mptcp6_sock);
++	mptcp_v6_prot.ipv6_pinfo_offset = offsetof(struct mptcp6_sock, np);
+ 
+ 	err = proto_register(&mptcp_v6_prot, 1);
+ 	if (err)
+diff --git a/net/sctp/socket.c b/net/sctp/socket.c
+index 76f1bce49a8e7..423dc400992ba 100644
+--- a/net/sctp/socket.c
++++ b/net/sctp/socket.c
+@@ -9732,6 +9732,7 @@ struct proto sctpv6_prot = {
+ 	.unhash		= sctp_unhash,
+ 	.no_autobind	= true,
+ 	.obj_size	= sizeof(struct sctp6_sock),
++	.ipv6_pinfo_offset = offsetof(struct sctp6_sock, inet6),
+ 	.useroffset	= offsetof(struct sctp6_sock, sctp.subscribe),
+ 	.usersize	= offsetof(struct sctp6_sock, sctp.initmsg) -
+ 				offsetof(struct sctp6_sock, sctp.subscribe) +


^ permalink raw reply related	[flat|nested] 24+ messages in thread

end of thread, other threads:[~2023-12-01 10:33 UTC | newest]

Thread overview: 24+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2023-09-06 22:14 [gentoo-commits] proj/linux-patches:6.5 commit in: / Mike Pagano
  -- strict thread matches above, loose matches on Subject: below --
2023-12-01 10:33 Mike Pagano
2023-11-28 17:50 Mike Pagano
2023-11-20 11:27 Mike Pagano
2023-11-09 18:00 Mike Pagano
2023-11-08 14:01 Mike Pagano
2023-11-02 11:09 Mike Pagano
2023-10-25 11:35 Mike Pagano
2023-10-22 22:51 Mike Pagano
2023-10-19 22:29 Mike Pagano
2023-10-18 20:01 Mike Pagano
2023-10-17 22:58 Mike Pagano
2023-10-10 22:53 Mike Pagano
2023-10-06 12:36 Mike Pagano
2023-10-05 14:07 Mike Pagano
2023-09-23 11:08 Mike Pagano
2023-09-23 11:06 Mike Pagano
2023-09-23 10:15 Mike Pagano
2023-09-19 13:18 Mike Pagano
2023-09-15 17:55 Mike Pagano
2023-09-13 12:07 Mike Pagano
2023-09-13 11:03 Mike Pagano
2023-09-07 14:53 Mike Pagano
2023-09-02  9:54 Mike Pagano

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox