public inbox for gentoo-commits@lists.gentoo.org
 help / color / mirror / Atom feed
From: "Mike Pagano" <mpagano@gentoo.org>
To: gentoo-commits@lists.gentoo.org
Subject: [gentoo-commits] proj/linux-patches:6.6 commit in: /
Date: Sun,  8 Sep 2024 11:06:03 +0000 (UTC)	[thread overview]
Message-ID: <1725793553.f28a54b9dfd8b0b7f332e8b3258958fa948f92d6.mpagano@gentoo> (raw)

commit:     f28a54b9dfd8b0b7f332e8b3258958fa948f92d6
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sun Sep  8 11:05:53 2024 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sun Sep  8 11:05:53 2024 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=f28a54b9

Linux patch 6.6.50

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README             |    4 +
 1049_linux-6.6.50.patch | 5960 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 5964 insertions(+)

diff --git a/0000_README b/0000_README
index 47c3c149..a470a96a 100644
--- a/0000_README
+++ b/0000_README
@@ -239,6 +239,10 @@ Patch:  1048_linux-6.6.49.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.6.49
 
+Patch:  1049_linux-6.6.50.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.6.50
+
 Patch:  1510_fs-enable-link-security-restrictions-by-default.patch
 From:   http://sources.debian.net/src/linux/3.16.7-ckt4-3/debian/patches/debian/fs-enable-link-security-restrictions-by-default.patch
 Desc:   Enable link security restrictions by default.

diff --git a/1049_linux-6.6.50.patch b/1049_linux-6.6.50.patch
new file mode 100644
index 00000000..16d4e5c5
--- /dev/null
+++ b/1049_linux-6.6.50.patch
@@ -0,0 +1,5960 @@
+diff --git a/Documentation/locking/hwspinlock.rst b/Documentation/locking/hwspinlock.rst
+index 6f03713b70039..2ffaa3cbd63f1 100644
+--- a/Documentation/locking/hwspinlock.rst
++++ b/Documentation/locking/hwspinlock.rst
+@@ -85,6 +85,17 @@ is already free).
+ 
+ Should be called from a process context (might sleep).
+ 
++::
++
++  int hwspin_lock_bust(struct hwspinlock *hwlock, unsigned int id);
++
++After verifying the owner of the hwspinlock, release a previously acquired
++hwspinlock; returns 0 on success, or an appropriate error code on failure
++(e.g. -EOPNOTSUPP if the bust operation is not defined for the specific
++hwspinlock).
++
++Should be called from a process context (might sleep).
++
+ ::
+ 
+   int hwspin_lock_timeout(struct hwspinlock *hwlock, unsigned int timeout);
+diff --git a/Makefile b/Makefile
+index 008fa9137c732..f7efbb59c9865 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 6
+-SUBLEVEL = 49
++SUBLEVEL = 50
+ EXTRAVERSION =
+ NAME = Hurr durr I'ma ninja sloth
+ 
+diff --git a/block/blk-integrity.c b/block/blk-integrity.c
+index d4e9b4556d14b..5276c556a9df9 100644
+--- a/block/blk-integrity.c
++++ b/block/blk-integrity.c
+@@ -396,8 +396,6 @@ void blk_integrity_unregister(struct gendisk *disk)
+ 	if (!bi->profile)
+ 		return;
+ 
+-	/* ensure all bios are off the integrity workqueue */
+-	blk_flush_integrity();
+ 	blk_queue_flag_clear(QUEUE_FLAG_STABLE_WRITES, disk->queue);
+ 	memset(bi, 0, sizeof(*bi));
+ }
+diff --git a/drivers/base/regmap/regmap-spi.c b/drivers/base/regmap/regmap-spi.c
+index 37ab23a9d0345..7f14c5ed1e229 100644
+--- a/drivers/base/regmap/regmap-spi.c
++++ b/drivers/base/regmap/regmap-spi.c
+@@ -122,8 +122,7 @@ static const struct regmap_bus *regmap_get_spi_bus(struct spi_device *spi,
+ 			return ERR_PTR(-ENOMEM);
+ 
+ 		max_msg_size = spi_max_message_size(spi);
+-		reg_reserve_size = config->reg_bits / BITS_PER_BYTE
+-				 + config->pad_bits / BITS_PER_BYTE;
++		reg_reserve_size = (config->reg_bits + config->pad_bits) / BITS_PER_BYTE;
+ 		if (max_size + reg_reserve_size > max_msg_size)
+ 			max_size -= reg_reserve_size;
+ 
+diff --git a/drivers/cpufreq/scmi-cpufreq.c b/drivers/cpufreq/scmi-cpufreq.c
+index 028df8a5f537a..079940c69ee0b 100644
+--- a/drivers/cpufreq/scmi-cpufreq.c
++++ b/drivers/cpufreq/scmi-cpufreq.c
+@@ -62,9 +62,9 @@ static unsigned int scmi_cpufreq_fast_switch(struct cpufreq_policy *policy,
+ 					     unsigned int target_freq)
+ {
+ 	struct scmi_data *priv = policy->driver_data;
++	unsigned long freq = target_freq;
+ 
+-	if (!perf_ops->freq_set(ph, priv->domain_id,
+-				target_freq * 1000, true))
++	if (!perf_ops->freq_set(ph, priv->domain_id, freq * 1000, true))
+ 		return target_freq;
+ 
+ 	return 0;
+diff --git a/drivers/crypto/stm32/stm32-cryp.c b/drivers/crypto/stm32/stm32-cryp.c
+index f095f0065428a..2f1b82cf10b1c 100644
+--- a/drivers/crypto/stm32/stm32-cryp.c
++++ b/drivers/crypto/stm32/stm32-cryp.c
+@@ -11,6 +11,7 @@
+ #include <crypto/internal/des.h>
+ #include <crypto/internal/skcipher.h>
+ #include <crypto/scatterwalk.h>
++#include <linux/bottom_half.h>
+ #include <linux/clk.h>
+ #include <linux/delay.h>
+ #include <linux/err.h>
+@@ -1665,8 +1666,11 @@ static irqreturn_t stm32_cryp_irq_thread(int irq, void *arg)
+ 		it_mask &= ~IMSCR_OUT;
+ 	stm32_cryp_write(cryp, cryp->caps->imsc, it_mask);
+ 
+-	if (!cryp->payload_in && !cryp->header_in && !cryp->payload_out)
++	if (!cryp->payload_in && !cryp->header_in && !cryp->payload_out) {
++		local_bh_disable();
+ 		stm32_cryp_finish_req(cryp, 0);
++		local_bh_enable();
++	}
+ 
+ 	return IRQ_HANDLED;
+ }
+diff --git a/drivers/dma/altera-msgdma.c b/drivers/dma/altera-msgdma.c
+index 4153c2edb0490..711e3756a39a5 100644
+--- a/drivers/dma/altera-msgdma.c
++++ b/drivers/dma/altera-msgdma.c
+@@ -233,7 +233,7 @@ static void msgdma_free_descriptor(struct msgdma_device *mdev,
+ 	struct msgdma_sw_desc *child, *next;
+ 
+ 	mdev->desc_free_cnt++;
+-	list_add_tail(&desc->node, &mdev->free_list);
++	list_move_tail(&desc->node, &mdev->free_list);
+ 	list_for_each_entry_safe(child, next, &desc->tx_list, node) {
+ 		mdev->desc_free_cnt++;
+ 		list_move_tail(&child->node, &mdev->free_list);
+@@ -583,17 +583,16 @@ static void msgdma_issue_pending(struct dma_chan *chan)
+ static void msgdma_chan_desc_cleanup(struct msgdma_device *mdev)
+ {
+ 	struct msgdma_sw_desc *desc, *next;
++	unsigned long irqflags;
+ 
+ 	list_for_each_entry_safe(desc, next, &mdev->done_list, node) {
+ 		struct dmaengine_desc_callback cb;
+ 
+-		list_del(&desc->node);
+-
+ 		dmaengine_desc_get_callback(&desc->async_tx, &cb);
+ 		if (dmaengine_desc_callback_valid(&cb)) {
+-			spin_unlock(&mdev->lock);
++			spin_unlock_irqrestore(&mdev->lock, irqflags);
+ 			dmaengine_desc_callback_invoke(&cb, NULL);
+-			spin_lock(&mdev->lock);
++			spin_lock_irqsave(&mdev->lock, irqflags);
+ 		}
+ 
+ 		/* Run any dependencies, then free the descriptor */
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_afmt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_afmt.c
+index a4d65973bf7cf..80771b1480fff 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_afmt.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_afmt.c
+@@ -100,6 +100,7 @@ struct amdgpu_afmt_acr amdgpu_afmt_acr(uint32_t clock)
+ 	amdgpu_afmt_calc_cts(clock, &res.cts_32khz, &res.n_32khz, 32000);
+ 	amdgpu_afmt_calc_cts(clock, &res.cts_44_1khz, &res.n_44_1khz, 44100);
+ 	amdgpu_afmt_calc_cts(clock, &res.cts_48khz, &res.n_48khz, 48000);
++	res.clock = clock;
+ 
+ 	return res;
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+index 9d72bb0a0eaec..a1f35510d5395 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+@@ -407,6 +407,10 @@ static int amdgpu_amdkfd_bo_validate(struct amdgpu_bo *bo, uint32_t domain,
+ 		 "Called with userptr BO"))
+ 		return -EINVAL;
+ 
++	/* bo has been pinned, not need validate it */
++	if (bo->tbo.pin_count)
++		return 0;
++
+ 	amdgpu_bo_placement_from_domain(bo, domain);
+ 
+ 	ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
+@@ -2631,7 +2635,7 @@ static int confirm_valid_user_pages_locked(struct amdkfd_process_info *process_i
+ 
+ 		/* keep mem without hmm range at userptr_inval_list */
+ 		if (!mem->range)
+-			 continue;
++			continue;
+ 
+ 		/* Only check mem with hmm range associated */
+ 		valid = amdgpu_ttm_tt_get_user_pages_done(
+@@ -2848,9 +2852,6 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
+ 			if (!attachment->is_mapped)
+ 				continue;
+ 
+-			if (attachment->bo_va->base.bo->tbo.pin_count)
+-				continue;
+-
+ 			kfd_mem_dmaunmap_attachment(mem, attachment);
+ 			ret = update_gpuvm_pte(mem, attachment, &sync_obj);
+ 			if (ret) {
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
+index dce9e7d5e4ec6..a14a54a734c12 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
+@@ -1476,6 +1476,8 @@ int amdgpu_atombios_init_mc_reg_table(struct amdgpu_device *adev,
+ 										(u32)le32_to_cpu(*((u32 *)reg_data + j));
+ 									j++;
+ 								} else if ((reg_table->mc_reg_address[i].pre_reg_data & LOW_NIBBLE_MASK) == DATA_EQU_PREV) {
++									if (i == 0)
++										continue;
+ 									reg_table->mc_reg_table_entry[num_ranges].mc_data[i] =
+ 										reg_table->mc_reg_table_entry[num_ranges].mc_data[i - 1];
+ 								}
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+index b8280be6225d9..c3d89088123db 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+@@ -213,6 +213,9 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
+ 		struct amdgpu_firmware_info *ucode;
+ 
+ 		id = fw_type_convert(cgs_device, type);
++		if (id >= AMDGPU_UCODE_ID_MAXIMUM)
++			return -EINVAL;
++
+ 		ucode = &adev->firmware.ucode[id];
+ 		if (ucode->fw == NULL)
+ 			return -EINVAL;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index eb663eb811563..9c99d69b4b083 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -4480,7 +4480,8 @@ static int amdgpu_device_recover_vram(struct amdgpu_device *adev)
+ 		shadow = vmbo->shadow;
+ 
+ 		/* No need to recover an evicted BO */
+-		if (shadow->tbo.resource->mem_type != TTM_PL_TT ||
++		if (!shadow->tbo.resource ||
++		    shadow->tbo.resource->mem_type != TTM_PL_TT ||
+ 		    shadow->tbo.resource->start == AMDGPU_BO_INVALID_OFFSET ||
+ 		    shadow->parent->tbo.resource->mem_type != TTM_PL_VRAM)
+ 			continue;
+@@ -5235,7 +5236,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
+ 	 * to put adev in the 1st position.
+ 	 */
+ 	INIT_LIST_HEAD(&device_list);
+-	if (!amdgpu_sriov_vf(adev) && (adev->gmc.xgmi.num_physical_nodes > 1)) {
++	if (!amdgpu_sriov_vf(adev) && (adev->gmc.xgmi.num_physical_nodes > 1) && hive) {
+ 		list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
+ 			list_add_tail(&tmp_adev->reset_list, &device_list);
+ 			if (gpu_reset_for_dev_remove && adev->shutdown)
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
+index cf2faeae1d0db..b04d789bfd100 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
+@@ -1550,7 +1550,7 @@ static int amdgpu_discovery_get_mall_info(struct amdgpu_device *adev)
+ 		break;
+ 	case 2:
+ 		mall_size_per_umc = le32_to_cpu(mall_info->v2.mall_size_per_umc);
+-		adev->gmc.mall_size = mall_size_per_umc * adev->gmc.num_umc;
++		adev->gmc.mall_size = (uint64_t)mall_size_per_umc * adev->gmc.num_umc;
+ 		break;
+ 	default:
+ 		dev_err(adev->dev,
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_eeprom.c
+index e71768661ca8d..09a34c7258e22 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_eeprom.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_eeprom.c
+@@ -179,7 +179,7 @@ static int __amdgpu_eeprom_xfer(struct i2c_adapter *i2c_adap, u32 eeprom_addr,
+  * Returns the number of bytes read/written; -errno on error.
+  */
+ static int amdgpu_eeprom_xfer(struct i2c_adapter *i2c_adap, u32 eeprom_addr,
+-			      u8 *eeprom_buf, u16 buf_size, bool read)
++			      u8 *eeprom_buf, u32 buf_size, bool read)
+ {
+ 	const struct i2c_adapter_quirks *quirks = i2c_adap->quirks;
+ 	u16 limit;
+@@ -225,7 +225,7 @@ static int amdgpu_eeprom_xfer(struct i2c_adapter *i2c_adap, u32 eeprom_addr,
+ 
+ int amdgpu_eeprom_read(struct i2c_adapter *i2c_adap,
+ 		       u32 eeprom_addr, u8 *eeprom_buf,
+-		       u16 bytes)
++		       u32 bytes)
+ {
+ 	return amdgpu_eeprom_xfer(i2c_adap, eeprom_addr, eeprom_buf, bytes,
+ 				  true);
+@@ -233,7 +233,7 @@ int amdgpu_eeprom_read(struct i2c_adapter *i2c_adap,
+ 
+ int amdgpu_eeprom_write(struct i2c_adapter *i2c_adap,
+ 			u32 eeprom_addr, u8 *eeprom_buf,
+-			u16 bytes)
++			u32 bytes)
+ {
+ 	return amdgpu_eeprom_xfer(i2c_adap, eeprom_addr, eeprom_buf, bytes,
+ 				  false);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_eeprom.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_eeprom.h
+index 6935adb2be1f1..8083b8253ef43 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_eeprom.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_eeprom.h
+@@ -28,10 +28,10 @@
+ 
+ int amdgpu_eeprom_read(struct i2c_adapter *i2c_adap,
+ 		       u32 eeprom_addr, u8 *eeprom_buf,
+-		       u16 bytes);
++		       u32 bytes);
+ 
+ int amdgpu_eeprom_write(struct i2c_adapter *i2c_adap,
+ 			u32 eeprom_addr, u8 *eeprom_buf,
+-			u16 bytes);
++			u32 bytes);
+ 
+ #endif
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
+index 73b8cca35bab8..eace2c9d0c362 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
+@@ -34,6 +34,7 @@
+ #include <asm/set_memory.h>
+ #endif
+ #include "amdgpu.h"
++#include "amdgpu_reset.h"
+ #include <drm/drm_drv.h>
+ #include <drm/ttm/ttm_tt.h>
+ 
+@@ -400,7 +401,10 @@ void amdgpu_gart_invalidate_tlb(struct amdgpu_device *adev)
+ 		return;
+ 
+ 	mb();
+-	amdgpu_device_flush_hdp(adev, NULL);
++	if (down_read_trylock(&adev->reset_domain->sem)) {
++		amdgpu_device_flush_hdp(adev, NULL);
++		up_read(&adev->reset_domain->sem);
++	}
+ 	for_each_set_bit(i, adev->vmhubs_mask, AMDGPU_MAX_VMHUBS)
+ 		amdgpu_gmc_flush_gpu_tlb(adev, 0, i, 0);
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+index 429ef212c1f25..a4f9015345ccb 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+@@ -1336,6 +1336,9 @@ static void psp_xgmi_reflect_topology_info(struct psp_context *psp,
+ 	uint8_t dst_num_links = node_info.num_links;
+ 
+ 	hive = amdgpu_get_xgmi_hive(psp->adev);
++	if (WARN_ON(!hive))
++		return;
++
+ 	list_for_each_entry(mirror_adev, &hive->device_list, gmc.xgmi.head) {
+ 		struct psp_xgmi_topology_info *mirror_top_info;
+ 		int j;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+index dbde3b41c0883..f44b303ae287a 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+@@ -352,7 +352,7 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
+ 	ring->max_dw = max_dw;
+ 	ring->hw_prio = hw_prio;
+ 
+-	if (!ring->no_scheduler) {
++	if (!ring->no_scheduler && ring->funcs->type < AMDGPU_HW_IP_NUM) {
+ 		hw_ip = ring->funcs->type;
+ 		num_sched = &adev->gpu_sched[hw_ip][hw_prio].num_scheds;
+ 		adev->gpu_sched[hw_ip][hw_prio].sched[(*num_sched)++] =
+@@ -469,8 +469,9 @@ static ssize_t amdgpu_debugfs_ring_read(struct file *f, char __user *buf,
+ 					size_t size, loff_t *pos)
+ {
+ 	struct amdgpu_ring *ring = file_inode(f)->i_private;
+-	int r, i;
+ 	uint32_t value, result, early[3];
++	loff_t i;
++	int r;
+ 
+ 	if (*pos & 3 || size & 3)
+ 		return -EINVAL;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.c
+index 8ed0e073656f8..41ebe690eeffa 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.c
+@@ -135,6 +135,10 @@ static ssize_t amdgpu_securedisplay_debugfs_write(struct file *f, const char __u
+ 		mutex_unlock(&psp->securedisplay_context.mutex);
+ 		break;
+ 	case 2:
++		if (size < 3 || phy_id >= TA_SECUREDISPLAY_MAX_PHY) {
++			dev_err(adev->dev, "Invalid input: %s\n", str);
++			return -EINVAL;
++		}
+ 		mutex_lock(&psp->securedisplay_context.mutex);
+ 		psp_prep_securedisplay_cmd_buf(psp, &securedisplay_cmd,
+ 			TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+index ff4f52e07cc0d..d9dc675b46aed 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+@@ -615,7 +615,7 @@ static int amdgpu_virt_write_vf2pf_data(struct amdgpu_device *adev)
+ 	vf2pf_info->dummy_page_addr = (uint64_t)adev->dummy_page_addr;
+ 	vf2pf_info->checksum =
+ 		amd_sriov_msg_checksum(
+-		vf2pf_info, vf2pf_info->header.size, 0, 0);
++		vf2pf_info, sizeof(*vf2pf_info), 0, 0);
+ 
+ 	return 0;
+ }
+@@ -998,6 +998,9 @@ static u32 amdgpu_virt_rlcg_reg_rw(struct amdgpu_device *adev, u32 offset, u32 v
+ 		return 0;
+ 	}
+ 
++	if (amdgpu_device_skip_hw_access(adev))
++		return 0;
++
+ 	reg_access_ctrl = &adev->gfx.rlc.reg_access_ctrl[xcc_id];
+ 	scratch_reg0 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg0;
+ 	scratch_reg1 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg1;
+@@ -1073,6 +1076,9 @@ void amdgpu_sriov_wreg(struct amdgpu_device *adev,
+ {
+ 	u32 rlcg_flag;
+ 
++	if (amdgpu_device_skip_hw_access(adev))
++		return;
++
+ 	if (!amdgpu_sriov_runtime(adev) &&
+ 		amdgpu_virt_get_rlcg_reg_access_flag(adev, acc_flags, hwip, true, &rlcg_flag)) {
+ 		amdgpu_virt_rlcg_reg_rw(adev, offset, value, rlcg_flag, xcc_id);
+@@ -1090,6 +1096,9 @@ u32 amdgpu_sriov_rreg(struct amdgpu_device *adev,
+ {
+ 	u32 rlcg_flag;
+ 
++	if (amdgpu_device_skip_hw_access(adev))
++		return 0;
++
+ 	if (!amdgpu_sriov_runtime(adev) &&
+ 		amdgpu_virt_get_rlcg_reg_access_flag(adev, acc_flags, hwip, false, &rlcg_flag))
+ 		return amdgpu_virt_rlcg_reg_rw(adev, offset, 0, rlcg_flag, xcc_id);
+diff --git a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c
+index 0284c9198a04a..6c6f9d9b5d897 100644
+--- a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c
++++ b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c
+@@ -500,6 +500,12 @@ static int aqua_vanjaram_switch_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr,
+ 
+ 	if (mode == AMDGPU_AUTO_COMPUTE_PARTITION_MODE) {
+ 		mode = __aqua_vanjaram_get_auto_mode(xcp_mgr);
++		if (mode == AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE) {
++			dev_err(adev->dev,
++				"Invalid config, no compatible compute partition mode found, available memory partitions: %d",
++				adev->gmc.num_mem_partitions);
++			return -EINVAL;
++		}
+ 	} else if (!__aqua_vanjaram_is_valid_mode(xcp_mgr, mode)) {
+ 		dev_err(adev->dev,
+ 			"Invalid compute partition mode requested, requested: %s, available memory partitions: %d",
+diff --git a/drivers/gpu/drm/amd/amdgpu/df_v1_7.c b/drivers/gpu/drm/amd/amdgpu/df_v1_7.c
+index 5dfab80ffff21..cd298556f7a60 100644
+--- a/drivers/gpu/drm/amd/amdgpu/df_v1_7.c
++++ b/drivers/gpu/drm/amd/amdgpu/df_v1_7.c
+@@ -70,6 +70,8 @@ static u32 df_v1_7_get_hbm_channel_number(struct amdgpu_device *adev)
+ 	int fb_channel_number;
+ 
+ 	fb_channel_number = adev->df.funcs->get_fb_channel_number(adev);
++	if (fb_channel_number >= ARRAY_SIZE(df_v1_7_channel_number))
++		fb_channel_number = 0;
+ 
+ 	return df_v1_7_channel_number[fb_channel_number];
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
+index 685abf57ffddc..977b956bf930a 100644
+--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
++++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
+@@ -384,7 +384,7 @@ static void nbio_v7_4_handle_ras_controller_intr_no_bifring(struct amdgpu_device
+ 		else
+ 			WREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL, bif_doorbell_intr_cntl);
+ 
+-		if (!ras->disable_ras_err_cnt_harvest) {
++		if (ras && !ras->disable_ras_err_cnt_harvest && obj) {
+ 			/*
+ 			 * clear error status after ras_controller_intr
+ 			 * according to hw team and count ue number
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.h b/drivers/gpu/drm/amd/amdkfd/kfd_crat.h
+index 74c2d7a0d6285..2f54ee08f2696 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.h
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.h
+@@ -42,8 +42,6 @@
+ #define CRAT_OEMTABLEID_LENGTH	8
+ #define CRAT_RESERVED_LENGTH	6
+ 
+-#define CRAT_OEMID_64BIT_MASK ((1ULL << (CRAT_OEMID_LENGTH * 8)) - 1)
+-
+ /* Compute Unit flags */
+ #define COMPUTE_UNIT_CPU	(1 << 0)  /* Create Virtual CRAT for CPU */
+ #define COMPUTE_UNIT_GPU	(1 << 1)  /* Create Virtual CRAT for GPU */
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_debug.c b/drivers/gpu/drm/amd/amdkfd/kfd_debug.c
+index 9ec750666382f..94aaf2fc556ca 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_debug.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_debug.c
+@@ -103,7 +103,8 @@ void debug_event_write_work_handler(struct work_struct *work)
+ 			struct kfd_process,
+ 			debug_event_workarea);
+ 
+-	kernel_write(process->dbg_ev_file, &write_data, 1, &pos);
++	if (process->debug_trap_enabled && process->dbg_ev_file)
++		kernel_write(process->dbg_ev_file, &write_data, 1, &pos);
+ }
+ 
+ /* update process/device/queue exception status, write to descriptor
+@@ -645,6 +646,7 @@ int kfd_dbg_trap_disable(struct kfd_process *target)
+ 	else if (target->runtime_info.runtime_state != DEBUG_RUNTIME_STATE_DISABLED)
+ 		target->runtime_info.runtime_state = DEBUG_RUNTIME_STATE_ENABLED;
+ 
++	cancel_work_sync(&target->debug_event_workarea);
+ 	fput(target->dbg_ev_file);
+ 	target->dbg_ev_file = NULL;
+ 
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+index 43eff221eae58..8aca92624a77e 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+@@ -28,6 +28,7 @@
+ #include "kfd_priv.h"
+ #include "kfd_kernel_queue.h"
+ #include "amdgpu_amdkfd.h"
++#include "amdgpu_reset.h"
+ 
+ static inline struct process_queue_node *get_queue_by_qid(
+ 			struct process_queue_manager *pqm, unsigned int qid)
+@@ -87,8 +88,12 @@ void kfd_process_dequeue_from_device(struct kfd_process_device *pdd)
+ 		return;
+ 
+ 	dev->dqm->ops.process_termination(dev->dqm, &pdd->qpd);
+-	if (dev->kfd->shared_resources.enable_mes)
+-		amdgpu_mes_flush_shader_debugger(dev->adev, pdd->proc_ctx_gpu_addr);
++	if (dev->kfd->shared_resources.enable_mes &&
++	    down_read_trylock(&dev->adev->reset_domain->sem)) {
++		amdgpu_mes_flush_shader_debugger(dev->adev,
++						 pdd->proc_ctx_gpu_addr);
++		up_read(&dev->adev->reset_domain->sem);
++	}
+ 	pdd->already_dequeued = true;
+ }
+ 
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+index 61157fddc15c7..8362a71ab7075 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+@@ -958,8 +958,7 @@ static void kfd_update_system_properties(void)
+ 	dev = list_last_entry(&topology_device_list,
+ 			struct kfd_topology_device, list);
+ 	if (dev) {
+-		sys_props.platform_id =
+-			(*((uint64_t *)dev->oem_id)) & CRAT_OEMID_64BIT_MASK;
++		sys_props.platform_id = dev->oem_id64;
+ 		sys_props.platform_oem = *((uint64_t *)dev->oem_table_id);
+ 		sys_props.platform_rev = dev->oem_revision;
+ 	}
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
+index 27386ce9a021d..2d1c9d771bef2 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
+@@ -154,7 +154,10 @@ struct kfd_topology_device {
+ 	struct attribute		attr_gpuid;
+ 	struct attribute		attr_name;
+ 	struct attribute		attr_props;
+-	uint8_t				oem_id[CRAT_OEMID_LENGTH];
++	union {
++		uint8_t				oem_id[CRAT_OEMID_LENGTH];
++		uint64_t			oem_id64;
++	};
+ 	uint8_t				oem_table_id[CRAT_OEMTABLEID_LENGTH];
+ 	uint32_t			oem_revision;
+ };
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index 94059aef762be..44c1556838240 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -4357,7 +4357,10 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
+ 
+ 	/* There is one primary plane per CRTC */
+ 	primary_planes = dm->dc->caps.max_streams;
+-	ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
++	if (primary_planes > AMDGPU_MAX_PLANES) {
++		DRM_ERROR("DM: Plane nums out of 6 planes\n");
++		return -EINVAL;
++	}
+ 
+ 	/*
+ 	 * Initialize primary planes, implicit planes for legacy IOCTLS.
+@@ -8283,15 +8286,13 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
+ 				bundle->stream_update.vrr_infopacket =
+ 					&acrtc_state->stream->vrr_infopacket;
+ 		}
+-	} else if (cursor_update && acrtc_state->active_planes > 0 &&
+-		   acrtc_attach->base.state->event) {
+-		drm_crtc_vblank_get(pcrtc);
+-
++	} else if (cursor_update && acrtc_state->active_planes > 0) {
+ 		spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
+-
+-		acrtc_attach->event = acrtc_attach->base.state->event;
+-		acrtc_attach->base.state->event = NULL;
+-
++		if (acrtc_attach->base.state->event) {
++			drm_crtc_vblank_get(pcrtc);
++			acrtc_attach->event = acrtc_attach->base.state->event;
++			acrtc_attach->base.state->event = NULL;
++		}
+ 		spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
+ 	}
+ 
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+index 9e4cc5eeda767..88606b805330d 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+@@ -49,7 +49,7 @@
+ 
+ #define AMDGPU_DM_MAX_NUM_EDP 2
+ 
+-#define AMDGPU_DMUB_NOTIFICATION_MAX 5
++#define AMDGPU_DMUB_NOTIFICATION_MAX 6
+ 
+ #define HDMI_AMD_VENDOR_SPECIFIC_DATA_BLOCK_IEEE_REGISTRATION_ID 0x00001A
+ #define AMD_VSDB_VERSION_3_FEATURECAP_REPLAYMODE 0x40
+diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
+index 6b31904475815..684b005f564c4 100644
+--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
++++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
+@@ -667,6 +667,9 @@ static enum bp_result get_ss_info_v3_1(
+ 	ss_table_header_include = ((ATOM_ASIC_INTERNAL_SS_INFO_V3 *) bios_get_image(&bp->base,
+ 				DATA_TABLES(ASIC_InternalSS_Info),
+ 				struct_size(ss_table_header_include, asSpreadSpectrum, 1)));
++	if (!ss_table_header_include)
++		return BP_RESULT_UNSUPPORTED;
++
+ 	table_size =
+ 		(le16_to_cpu(ss_table_header_include->sHeader.usStructureSize)
+ 				- sizeof(ATOM_COMMON_TABLE_HEADER))
+@@ -1036,6 +1039,8 @@ static enum bp_result get_ss_info_from_internal_ss_info_tbl_V2_1(
+ 				&bp->base,
+ 				DATA_TABLES(ASIC_InternalSS_Info),
+ 				struct_size(header, asSpreadSpectrum, 1)));
++	if (!header)
++		return result;
+ 
+ 	memset(info, 0, sizeof(struct spread_spectrum_info));
+ 
+@@ -1109,6 +1114,8 @@ static enum bp_result get_ss_info_from_ss_info_table(
+ 	get_atom_data_table_revision(header, &revision);
+ 
+ 	tbl = GET_IMAGE(ATOM_SPREAD_SPECTRUM_INFO, DATA_TABLES(SS_Info));
++	if (!tbl)
++		return result;
+ 
+ 	if (1 != revision.major || 2 > revision.minor)
+ 		return result;
+@@ -1636,6 +1643,8 @@ static uint32_t get_ss_entry_number_from_ss_info_tbl(
+ 
+ 	tbl = GET_IMAGE(ATOM_SPREAD_SPECTRUM_INFO,
+ 			DATA_TABLES(SS_Info));
++	if (!tbl)
++		return number;
+ 
+ 	if (1 != revision.major || 2 > revision.minor)
+ 		return number;
+@@ -1718,6 +1727,8 @@ static uint32_t get_ss_entry_number_from_internal_ss_info_tbl_v2_1(
+ 				&bp->base,
+ 				DATA_TABLES(ASIC_InternalSS_Info),
+ 				struct_size(header_include, asSpreadSpectrum, 1)));
++	if (!header_include)
++		return 0;
+ 
+ 	size = (le16_to_cpu(header_include->sHeader.usStructureSize)
+ 			- sizeof(ATOM_COMMON_TABLE_HEADER))
+@@ -1756,6 +1767,9 @@ static uint32_t get_ss_entry_number_from_internal_ss_info_tbl_V3_1(
+ 	header_include = ((ATOM_ASIC_INTERNAL_SS_INFO_V3 *) bios_get_image(&bp->base,
+ 				DATA_TABLES(ASIC_InternalSS_Info),
+ 				struct_size(header_include, asSpreadSpectrum, 1)));
++	if (!header_include)
++		return number;
++
+ 	size = (le16_to_cpu(header_include->sHeader.usStructureSize) -
+ 			sizeof(ATOM_COMMON_TABLE_HEADER)) /
+ 					sizeof(ATOM_ASIC_SS_ASSIGNMENT_V3);
+@@ -2552,8 +2566,8 @@ static enum bp_result construct_integrated_info(
+ 
+ 	/* Sort voltage table from low to high*/
+ 	if (result == BP_RESULT_OK) {
+-		uint32_t i;
+-		uint32_t j;
++		int32_t i;
++		int32_t j;
+ 
+ 		for (i = 1; i < NUMBER_OF_DISP_CLK_VOLTAGE; ++i) {
+ 			for (j = i; j > 0; --j) {
+diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
+index 93720cf069d7c..384ddb28e6f6d 100644
+--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
++++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
+@@ -2935,8 +2935,11 @@ static enum bp_result construct_integrated_info(
+ 	struct atom_common_table_header *header;
+ 	struct atom_data_revision revision;
+ 
+-	uint32_t i;
+-	uint32_t j;
++	int32_t i;
++	int32_t j;
++
++	if (!info)
++		return result;
+ 
+ 	if (info && DATA_TABLES(integratedsysteminfo)) {
+ 		header = GET_IMAGE(struct atom_common_table_header,
+diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
+index 0c6a4ab72b1d2..97cdc24cef9a5 100644
+--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
++++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
+@@ -484,7 +484,8 @@ static void build_watermark_ranges(struct clk_bw_params *bw_params, struct pp_sm
+ 			ranges->reader_wm_sets[num_valid_sets].max_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
+ 
+ 			/* Modify previous watermark range to cover up to max */
+-			ranges->reader_wm_sets[num_valid_sets - 1].max_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
++			if (num_valid_sets > 0)
++				ranges->reader_wm_sets[num_valid_sets - 1].max_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
+ 		}
+ 		num_valid_sets++;
+ 	}
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
+index 72db370e2f21f..50e643bfdfbad 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
+@@ -1298,6 +1298,7 @@ struct dc *dc_create(const struct dc_init_data *init_params)
+ 		return NULL;
+ 
+ 	if (init_params->dce_environment == DCE_ENV_VIRTUAL_HW) {
++		dc->caps.linear_pitch_alignment = 64;
+ 		if (!dc_construct_ctx(dc, init_params))
+ 			goto destruct_dc;
+ 	} else {
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+index 84923c5400d32..733e445331ea5 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+@@ -3927,6 +3927,9 @@ void resource_build_bit_depth_reduction_params(struct dc_stream_state *stream,
+ 
+ enum dc_status dc_validate_stream(struct dc *dc, struct dc_stream_state *stream)
+ {
++	if (dc == NULL || stream == NULL)
++		return DC_ERROR_UNEXPECTED;
++
+ 	struct dc_link *link = stream->link;
+ 	struct timing_generator *tg = dc->res_pool->timing_generators[0];
+ 	enum dc_status res = DC_OK;
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c
+index 28149e53c2a68..eeb5b8247c965 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c
++++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c
+@@ -102,7 +102,8 @@ static void dmub_replay_enable(struct dmub_replay *dmub, bool enable, bool wait,
+ 					break;
+ 			}
+ 
+-			fsleep(500);
++			/* must *not* be fsleep - this can be called from high irq levels */
++			udelay(500);
+ 		}
+ 
+ 		/* assert if max retry hit */
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb_scl.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb_scl.c
+index 994fb732a7cb7..a0d437f0ce2ba 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb_scl.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb_scl.c
+@@ -690,6 +690,9 @@ static void wbscl_set_scaler_filter(
+ 	int pair;
+ 	uint16_t odd_coef, even_coef;
+ 
++	if (!filter)
++		return;
++
+ 	for (phase = 0; phase < (NUM_PHASES / 2 + 1); phase++) {
+ 		for (pair = 0; pair < tap_pairs; pair++) {
+ 			even_coef = filter[phase * taps + 2 * pair];
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c
+index 50b0434354f8f..c08169de3660c 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c
++++ b/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c
+@@ -1453,10 +1453,9 @@ void dcn_bw_update_from_pplib_fclks(
+ 	ASSERT(fclks->num_levels);
+ 
+ 	vmin0p65_idx = 0;
+-	vmid0p72_idx = fclks->num_levels -
+-		(fclks->num_levels > 2 ? 3 : (fclks->num_levels > 1 ? 2 : 1));
+-	vnom0p8_idx = fclks->num_levels - (fclks->num_levels > 1 ? 2 : 1);
+-	vmax0p9_idx = fclks->num_levels - 1;
++	vmid0p72_idx = fclks->num_levels > 2 ? fclks->num_levels - 3 : 0;
++	vnom0p8_idx = fclks->num_levels > 1 ? fclks->num_levels - 2 : 0;
++	vmax0p9_idx = fclks->num_levels > 0 ? fclks->num_levels - 1 : 0;
+ 
+ 	dc->dcn_soc->fabric_and_dram_bandwidth_vmin0p65 =
+ 		32 * (fclks->data[vmin0p65_idx].clocks_in_khz / 1000.0) / 1000.0;
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn302/dcn302_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn302/dcn302_fpu.c
+index e2bcd205aa936..8da97a96b1ceb 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/dcn302/dcn302_fpu.c
++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn302/dcn302_fpu.c
+@@ -304,6 +304,16 @@ void dcn302_fpu_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_p
+ 			dram_speed_mts[num_states++] = bw_params->clk_table.entries[j++].memclk_mhz * 16;
+ 		}
+ 
++		/* bw_params->clk_table.entries[MAX_NUM_DPM_LVL].
++		 * MAX_NUM_DPM_LVL is 8.
++		 * dcn3_02_soc.clock_limits[DC__VOLTAGE_STATES].
++		 * DC__VOLTAGE_STATES is 40.
++		 */
++		if (num_states > MAX_NUM_DPM_LVL) {
++			ASSERT(0);
++			return;
++		}
++
+ 		dcn3_02_soc.num_states = num_states;
+ 		for (i = 0; i < dcn3_02_soc.num_states; i++) {
+ 			dcn3_02_soc.clock_limits[i].state = i;
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn303/dcn303_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn303/dcn303_fpu.c
+index 3eb3a021ab7d7..c283780ad0621 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/dcn303/dcn303_fpu.c
++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn303/dcn303_fpu.c
+@@ -299,6 +299,16 @@ void dcn303_fpu_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_p
+ 			dram_speed_mts[num_states++] = bw_params->clk_table.entries[j++].memclk_mhz * 16;
+ 		}
+ 
++		/* bw_params->clk_table.entries[MAX_NUM_DPM_LVL].
++		 * MAX_NUM_DPM_LVL is 8.
++		 * dcn3_02_soc.clock_limits[DC__VOLTAGE_STATES].
++		 * DC__VOLTAGE_STATES is 40.
++		 */
++		if (num_states > MAX_NUM_DPM_LVL) {
++			ASSERT(0);
++			return;
++		}
++
+ 		dcn3_03_soc.num_states = num_states;
+ 		for (i = 0; i < dcn3_03_soc.num_states; i++) {
+ 			dcn3_03_soc.clock_limits[i].state = i;
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
+index cf3b400c8619b..3d82cbef12740 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
+@@ -2885,6 +2885,16 @@ void dcn32_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_pa
+ 				dram_speed_mts[num_states++] = bw_params->clk_table.entries[j++].memclk_mhz * 16;
+ 			}
+ 
++			/* bw_params->clk_table.entries[MAX_NUM_DPM_LVL].
++			 * MAX_NUM_DPM_LVL is 8.
++			 * dcn3_02_soc.clock_limits[DC__VOLTAGE_STATES].
++			 * DC__VOLTAGE_STATES is 40.
++			 */
++			if (num_states > MAX_NUM_DPM_LVL) {
++				ASSERT(0);
++				return;
++			}
++
+ 			dcn3_2_soc.num_states = num_states;
+ 			for (i = 0; i < dcn3_2_soc.num_states; i++) {
+ 				dcn3_2_soc.clock_limits[i].state = i;
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c
+index b26fcf86014c7..ae2196c36f218 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c
++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c
+@@ -789,6 +789,16 @@ void dcn321_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_p
+ 			dram_speed_mts[num_states++] = bw_params->clk_table.entries[j++].memclk_mhz * 16;
+ 		}
+ 
++		/* bw_params->clk_table.entries[MAX_NUM_DPM_LVL].
++		 * MAX_NUM_DPM_LVL is 8.
++		 * dcn3_02_soc.clock_limits[DC__VOLTAGE_STATES].
++		 * DC__VOLTAGE_STATES is 40.
++		 */
++		if (num_states > MAX_NUM_DPM_LVL) {
++			ASSERT(0);
++			return;
++		}
++
+ 		dcn3_21_soc.num_states = num_states;
+ 		for (i = 0; i < dcn3_21_soc.num_states; i++) {
+ 			dcn3_21_soc.clock_limits[i].state = i;
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c
+index 9a3ded3111952..85453bbb4f9b1 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c
++++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c
+@@ -1099,8 +1099,13 @@ void ModeSupportAndSystemConfiguration(struct display_mode_lib *mode_lib)
+ 
+ 	// Total Available Pipes Support Check
+ 	for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+-		total_pipes += mode_lib->vba.DPPPerPlane[k];
+ 		pipe_idx = get_pipe_idx(mode_lib, k);
++		if (pipe_idx == -1) {
++			ASSERT(0);
++			continue; // skip inactive planes
++		}
++		total_pipes += mode_lib->vba.DPPPerPlane[k];
++
+ 		if (mode_lib->vba.cache_pipes[pipe_idx].clks_cfg.dppclk_mhz > 0.0)
+ 			mode_lib->vba.DPPCLK[k] = mode_lib->vba.cache_pipes[pipe_idx].clks_cfg.dppclk_mhz;
+ 		else
+diff --git a/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c b/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c
+index 3ede6e02c3a78..f2037d78f71ab 100644
+--- a/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c
++++ b/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c
+@@ -56,7 +56,7 @@ struct gpio_service *dal_gpio_service_create(
+ 	struct dc_context *ctx)
+ {
+ 	struct gpio_service *service;
+-	uint32_t index_of_id;
++	int32_t index_of_id;
+ 
+ 	service = kzalloc(sizeof(struct gpio_service), GFP_KERNEL);
+ 
+@@ -112,7 +112,7 @@ struct gpio_service *dal_gpio_service_create(
+ 	return service;
+ 
+ failure_2:
+-	while (index_of_id) {
++	while (index_of_id > 0) {
+ 		--index_of_id;
+ 		kfree(service->busyness[index_of_id]);
+ 	}
+@@ -239,6 +239,9 @@ static bool is_pin_busy(
+ 	enum gpio_id id,
+ 	uint32_t en)
+ {
++	if (id == GPIO_ID_UNKNOWN)
++		return false;
++
+ 	return service->busyness[id][en];
+ }
+ 
+@@ -247,6 +250,9 @@ static void set_pin_busy(
+ 	enum gpio_id id,
+ 	uint32_t en)
+ {
++	if (id == GPIO_ID_UNKNOWN)
++		return;
++
+ 	service->busyness[id][en] = true;
+ }
+ 
+@@ -255,6 +261,9 @@ static void set_pin_free(
+ 	enum gpio_id id,
+ 	uint32_t en)
+ {
++	if (id == GPIO_ID_UNKNOWN)
++		return;
++
+ 	service->busyness[id][en] = false;
+ }
+ 
+@@ -263,7 +272,7 @@ enum gpio_result dal_gpio_service_lock(
+ 	enum gpio_id id,
+ 	uint32_t en)
+ {
+-	if (!service->busyness[id]) {
++	if (id != GPIO_ID_UNKNOWN && !service->busyness[id]) {
+ 		ASSERT_CRITICAL(false);
+ 		return GPIO_RESULT_OPEN_FAILED;
+ 	}
+@@ -277,7 +286,7 @@ enum gpio_result dal_gpio_service_unlock(
+ 	enum gpio_id id,
+ 	uint32_t en)
+ {
+-	if (!service->busyness[id]) {
++	if (id != GPIO_ID_UNKNOWN && !service->busyness[id]) {
+ 		ASSERT_CRITICAL(false);
+ 		return GPIO_RESULT_OPEN_FAILED;
+ 	}
+diff --git a/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c b/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c
+index 25ffc052d53be..df2cb5279ce51 100644
+--- a/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c
++++ b/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c
+@@ -130,13 +130,21 @@ static bool hdmi_14_process_transaction(
+ 	const uint8_t hdcp_i2c_addr_link_primary = 0x3a; /* 0x74 >> 1*/
+ 	const uint8_t hdcp_i2c_addr_link_secondary = 0x3b; /* 0x76 >> 1*/
+ 	struct i2c_command i2c_command;
+-	uint8_t offset = hdcp_i2c_offsets[message_info->msg_id];
++	uint8_t offset;
+ 	struct i2c_payload i2c_payloads[] = {
+-		{ true, 0, 1, &offset },
++		{ true, 0, 1, 0 },
+ 		/* actual hdcp payload, will be filled later, zeroed for now*/
+ 		{ 0 }
+ 	};
+ 
++	if (message_info->msg_id == HDCP_MESSAGE_ID_INVALID) {
++		DC_LOG_ERROR("%s: Invalid message_info msg_id - %d\n", __func__, message_info->msg_id);
++		return false;
++	}
++
++	offset = hdcp_i2c_offsets[message_info->msg_id];
++	i2c_payloads[0].data = &offset;
++
+ 	switch (message_info->link) {
+ 	case HDCP_LINK_SECONDARY:
+ 		i2c_payloads[0].address = hdcp_i2c_addr_link_secondary;
+@@ -310,6 +318,11 @@ static bool dp_11_process_transaction(
+ 	struct dc_link *link,
+ 	struct hdcp_protection_message *message_info)
+ {
++	if (message_info->msg_id == HDCP_MESSAGE_ID_INVALID) {
++		DC_LOG_ERROR("%s: Invalid message_info msg_id - %d\n", __func__, message_info->msg_id);
++		return false;
++	}
++
+ 	return dpcd_access_helper(
+ 		link,
+ 		message_info->length,
+diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
+index 9a0beaf601f87..3d589072fe307 100644
+--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
++++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
+@@ -528,7 +528,7 @@ static bool decide_fallback_link_setting_max_bw_policy(
+ 		struct dc_link_settings *cur,
+ 		enum link_training_result training_result)
+ {
+-	uint8_t cur_idx = 0, next_idx;
++	uint32_t cur_idx = 0, next_idx;
+ 	bool found = false;
+ 
+ 	if (training_result == LINK_TRAINING_ABORT)
+@@ -908,21 +908,17 @@ bool link_decide_link_settings(struct dc_stream_state *stream,
+ 
+ 	memset(link_setting, 0, sizeof(*link_setting));
+ 
+-	/* if preferred is specified through AMDDP, use it, if it's enough
+-	 * to drive the mode
+-	 */
+-	if (link->preferred_link_setting.lane_count !=
+-			LANE_COUNT_UNKNOWN &&
+-			link->preferred_link_setting.link_rate !=
+-					LINK_RATE_UNKNOWN) {
++	if (dc_is_dp_signal(stream->signal)  &&
++			link->preferred_link_setting.lane_count != LANE_COUNT_UNKNOWN &&
++			link->preferred_link_setting.link_rate != LINK_RATE_UNKNOWN) {
++		/* if preferred is specified through AMDDP, use it, if it's enough
++		 * to drive the mode
++		 */
+ 		*link_setting = link->preferred_link_setting;
+-		return true;
+-	}
+-
+-	/* MST doesn't perform link training for now
+-	 * TODO: add MST specific link training routine
+-	 */
+-	if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
++	} else if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
++		/* MST doesn't perform link training for now
++		 * TODO: add MST specific link training routine
++		 */
+ 		decide_mst_link_settings(link, link_setting);
+ 	} else if (link->connector_signal == SIGNAL_TYPE_EDP) {
+ 		/* enable edp link optimization for DSC eDP case */
+diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c
+index 16a62e0187122..9d1adfc09fb2a 100644
+--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c
++++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c
+@@ -914,10 +914,10 @@ static enum dc_status configure_lttpr_mode_non_transparent(
+ 			/* Driver does not need to train the first hop. Skip DPCD read and clear
+ 			 * AUX_RD_INTERVAL for DPTX-to-DPIA hop.
+ 			 */
+-			if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA)
++			if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA && repeater_cnt > 0 && repeater_cnt < MAX_REPEATER_CNT)
+ 				link->dpcd_caps.lttpr_caps.aux_rd_interval[--repeater_cnt] = 0;
+ 
+-			for (repeater_id = repeater_cnt; repeater_id > 0; repeater_id--) {
++			for (repeater_id = repeater_cnt; repeater_id > 0 && repeater_id < MAX_REPEATER_CNT; repeater_id--) {
+ 				aux_interval_address = DP_TRAINING_AUX_RD_INTERVAL_PHY_REPEATER1 +
+ 						((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (repeater_id - 1));
+ 				core_link_read_dpcd(
+diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c
+index 8e9caae7c9559..1b2df97226a3f 100644
+--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c
++++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c
+@@ -156,11 +156,16 @@ static enum mod_hdcp_status read(struct mod_hdcp *hdcp,
+ 	uint32_t cur_size = 0;
+ 	uint32_t data_offset = 0;
+ 
+-	if (msg_id == MOD_HDCP_MESSAGE_ID_INVALID) {
++	if (msg_id == MOD_HDCP_MESSAGE_ID_INVALID ||
++		msg_id >= MOD_HDCP_MESSAGE_ID_MAX)
+ 		return MOD_HDCP_STATUS_DDC_FAILURE;
+-	}
+ 
+ 	if (is_dp_hdcp(hdcp)) {
++		int num_dpcd_addrs = sizeof(hdcp_dpcd_addrs) /
++			sizeof(hdcp_dpcd_addrs[0]);
++		if (msg_id >= num_dpcd_addrs)
++			return MOD_HDCP_STATUS_DDC_FAILURE;
++
+ 		while (buf_len > 0) {
+ 			cur_size = MIN(buf_len, HDCP_MAX_AUX_TRANSACTION_SIZE);
+ 			success = hdcp->config.ddc.funcs.read_dpcd(hdcp->config.ddc.handle,
+@@ -175,6 +180,11 @@ static enum mod_hdcp_status read(struct mod_hdcp *hdcp,
+ 			data_offset += cur_size;
+ 		}
+ 	} else {
++		int num_i2c_offsets = sizeof(hdcp_i2c_offsets) /
++			sizeof(hdcp_i2c_offsets[0]);
++		if (msg_id >= num_i2c_offsets)
++			return MOD_HDCP_STATUS_DDC_FAILURE;
++
+ 		success = hdcp->config.ddc.funcs.read_i2c(
+ 				hdcp->config.ddc.handle,
+ 				HDCP_I2C_ADDR,
+@@ -219,11 +229,16 @@ static enum mod_hdcp_status write(struct mod_hdcp *hdcp,
+ 	uint32_t cur_size = 0;
+ 	uint32_t data_offset = 0;
+ 
+-	if (msg_id == MOD_HDCP_MESSAGE_ID_INVALID) {
++	if (msg_id == MOD_HDCP_MESSAGE_ID_INVALID ||
++		msg_id >= MOD_HDCP_MESSAGE_ID_MAX)
+ 		return MOD_HDCP_STATUS_DDC_FAILURE;
+-	}
+ 
+ 	if (is_dp_hdcp(hdcp)) {
++		int num_dpcd_addrs = sizeof(hdcp_dpcd_addrs) /
++			sizeof(hdcp_dpcd_addrs[0]);
++		if (msg_id >= num_dpcd_addrs)
++			return MOD_HDCP_STATUS_DDC_FAILURE;
++
+ 		while (buf_len > 0) {
+ 			cur_size = MIN(buf_len, HDCP_MAX_AUX_TRANSACTION_SIZE);
+ 			success = hdcp->config.ddc.funcs.write_dpcd(
+@@ -239,6 +254,11 @@ static enum mod_hdcp_status write(struct mod_hdcp *hdcp,
+ 			data_offset += cur_size;
+ 		}
+ 	} else {
++		int num_i2c_offsets = sizeof(hdcp_i2c_offsets) /
++			sizeof(hdcp_i2c_offsets[0]);
++		if (msg_id >= num_i2c_offsets)
++			return MOD_HDCP_STATUS_DDC_FAILURE;
++
+ 		hdcp->buf[0] = hdcp_i2c_offsets[msg_id];
+ 		memmove(&hdcp->buf[1], buf, buf_len);
+ 		success = hdcp->config.ddc.funcs.write_i2c(
+diff --git a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
+index 7bf46e4974f88..86f95a291d65f 100644
+--- a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
++++ b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
+@@ -99,7 +99,7 @@ static void pp_swctf_delayed_work_handler(struct work_struct *work)
+ 	struct amdgpu_device *adev = hwmgr->adev;
+ 	struct amdgpu_dpm_thermal *range =
+ 				&adev->pm.dpm.thermal;
+-	uint32_t gpu_temperature, size;
++	uint32_t gpu_temperature, size = sizeof(gpu_temperature);
+ 	int ret;
+ 
+ 	/*
+diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/pp_psm.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/pp_psm.c
+index f4bd8e9357e22..18f00038d8441 100644
+--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/pp_psm.c
++++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/pp_psm.c
+@@ -30,9 +30,8 @@ int psm_init_power_state_table(struct pp_hwmgr *hwmgr)
+ {
+ 	int result;
+ 	unsigned int i;
+-	unsigned int table_entries;
+ 	struct pp_power_state *state;
+-	int size;
++	int size, table_entries;
+ 
+ 	if (hwmgr->hwmgr_func->get_num_of_pp_table_entries == NULL)
+ 		return 0;
+@@ -40,15 +39,19 @@ int psm_init_power_state_table(struct pp_hwmgr *hwmgr)
+ 	if (hwmgr->hwmgr_func->get_power_state_size == NULL)
+ 		return 0;
+ 
+-	hwmgr->num_ps = table_entries = hwmgr->hwmgr_func->get_num_of_pp_table_entries(hwmgr);
++	table_entries = hwmgr->hwmgr_func->get_num_of_pp_table_entries(hwmgr);
+ 
+-	hwmgr->ps_size = size = hwmgr->hwmgr_func->get_power_state_size(hwmgr) +
++	size = hwmgr->hwmgr_func->get_power_state_size(hwmgr) +
+ 					  sizeof(struct pp_power_state);
+ 
+-	if (table_entries == 0 || size == 0) {
++	if (table_entries <= 0 || size == 0) {
+ 		pr_warn("Please check whether power state management is supported on this asic\n");
++		hwmgr->num_ps = 0;
++		hwmgr->ps_size = 0;
+ 		return 0;
+ 	}
++	hwmgr->num_ps = table_entries;
++	hwmgr->ps_size = size;
+ 
+ 	hwmgr->ps = kcalloc(table_entries, size, GFP_KERNEL);
+ 	if (hwmgr->ps == NULL)
+diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c
+index f503e61faa600..cc3b62f733941 100644
+--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c
++++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c
+@@ -73,8 +73,9 @@ static int atomctrl_retrieve_ac_timing(
+ 					j++;
+ 				} else if ((table->mc_reg_address[i].uc_pre_reg_data &
+ 							LOW_NIBBLE_MASK) == DATA_EQU_PREV) {
+-					table->mc_reg_table_entry[num_ranges].mc_data[i] =
+-						table->mc_reg_table_entry[num_ranges].mc_data[i-1];
++					if (i)
++						table->mc_reg_table_entry[num_ranges].mc_data[i] =
++							table->mc_reg_table_entry[num_ranges].mc_data[i-1];
+ 				}
+ 			}
+ 			num_ranges++;
+diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
+index 02ba68d7c6546..f62381b189ade 100644
+--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
++++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
+@@ -1036,7 +1036,9 @@ static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr,
+ 
+ 	switch (type) {
+ 	case PP_SCLK:
+-		smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetGfxclkFrequency, &now);
++		ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetGfxclkFrequency, &now);
++		if (ret)
++			return ret;
+ 
+ 	/* driver only know min/max gfx_clk, Add level 1 for all other gfx clks */
+ 		if (now == data->gfx_max_freq_limit/100)
+@@ -1057,7 +1059,9 @@ static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr,
+ 					i == 2 ? "*" : "");
+ 		break;
+ 	case PP_MCLK:
+-		smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetFclkFrequency, &now);
++		ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetFclkFrequency, &now);
++		if (ret)
++			return ret;
+ 
+ 		for (i = 0; i < mclk_table->count; i++)
+ 			size += sprintf(buf + size, "%d: %uMhz %s\n",
+@@ -1550,7 +1554,10 @@ static int smu10_set_fine_grain_clk_vol(struct pp_hwmgr *hwmgr,
+ 		}
+ 
+ 		if (input[0] == 0) {
+-			smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &min_freq);
++			ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &min_freq);
++			if (ret)
++				return ret;
++
+ 			if (input[1] < min_freq) {
+ 				pr_err("Fine grain setting minimum sclk (%ld) MHz is less than the minimum allowed (%d) MHz\n",
+ 					input[1], min_freq);
+@@ -1558,7 +1565,10 @@ static int smu10_set_fine_grain_clk_vol(struct pp_hwmgr *hwmgr,
+ 			}
+ 			smu10_data->gfx_actual_soft_min_freq = input[1];
+ 		} else if (input[0] == 1) {
+-			smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &max_freq);
++			ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &max_freq);
++			if (ret)
++				return ret;
++
+ 			if (input[1] > max_freq) {
+ 				pr_err("Fine grain setting maximum sclk (%ld) MHz is greater than the maximum allowed (%d) MHz\n",
+ 					input[1], max_freq);
+@@ -1573,10 +1583,15 @@ static int smu10_set_fine_grain_clk_vol(struct pp_hwmgr *hwmgr,
+ 			pr_err("Input parameter number not correct\n");
+ 			return -EINVAL;
+ 		}
+-		smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &min_freq);
+-		smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &max_freq);
+-
++		ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &min_freq);
++		if (ret)
++			return ret;
+ 		smu10_data->gfx_actual_soft_min_freq = min_freq;
++
++		ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &max_freq);
++		if (ret)
++			return ret;
++
+ 		smu10_data->gfx_actual_soft_max_freq = max_freq;
+ 	} else if (type == PP_OD_COMMIT_DPM_TABLE) {
+ 		if (size != 0) {
+diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
+index 163864bd51c34..53849fd3615f6 100644
+--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
++++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
+@@ -5641,7 +5641,7 @@ static int smu7_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, uint
+ 	mode = input[size];
+ 	switch (mode) {
+ 	case PP_SMC_POWER_PROFILE_CUSTOM:
+-		if (size < 8 && size != 0)
++		if (size != 8 && size != 0)
+ 			return -EINVAL;
+ 		/* If only CUSTOM is passed in, use the saved values. Check
+ 		 * that we actually have a CUSTOM profile by ensuring that
+diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
+index eb744401e0567..7e11974208732 100644
+--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
++++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
+@@ -584,6 +584,7 @@ static int smu8_init_uvd_limit(struct pp_hwmgr *hwmgr)
+ 				hwmgr->dyn_state.uvd_clock_voltage_dependency_table;
+ 	unsigned long clock = 0;
+ 	uint32_t level;
++	int ret;
+ 
+ 	if (NULL == table || table->count <= 0)
+ 		return -EINVAL;
+@@ -591,7 +592,9 @@ static int smu8_init_uvd_limit(struct pp_hwmgr *hwmgr)
+ 	data->uvd_dpm.soft_min_clk = 0;
+ 	data->uvd_dpm.hard_min_clk = 0;
+ 
+-	smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxUvdLevel, &level);
++	ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxUvdLevel, &level);
++	if (ret)
++		return ret;
+ 
+ 	if (level < table->count)
+ 		clock = table->entries[level].vclk;
+@@ -611,6 +614,7 @@ static int smu8_init_vce_limit(struct pp_hwmgr *hwmgr)
+ 				hwmgr->dyn_state.vce_clock_voltage_dependency_table;
+ 	unsigned long clock = 0;
+ 	uint32_t level;
++	int ret;
+ 
+ 	if (NULL == table || table->count <= 0)
+ 		return -EINVAL;
+@@ -618,7 +622,9 @@ static int smu8_init_vce_limit(struct pp_hwmgr *hwmgr)
+ 	data->vce_dpm.soft_min_clk = 0;
+ 	data->vce_dpm.hard_min_clk = 0;
+ 
+-	smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxEclkLevel, &level);
++	ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxEclkLevel, &level);
++	if (ret)
++		return ret;
+ 
+ 	if (level < table->count)
+ 		clock = table->entries[level].ecclk;
+@@ -638,6 +644,7 @@ static int smu8_init_acp_limit(struct pp_hwmgr *hwmgr)
+ 				hwmgr->dyn_state.acp_clock_voltage_dependency_table;
+ 	unsigned long clock = 0;
+ 	uint32_t level;
++	int ret;
+ 
+ 	if (NULL == table || table->count <= 0)
+ 		return -EINVAL;
+@@ -645,7 +652,9 @@ static int smu8_init_acp_limit(struct pp_hwmgr *hwmgr)
+ 	data->acp_dpm.soft_min_clk = 0;
+ 	data->acp_dpm.hard_min_clk = 0;
+ 
+-	smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxAclkLevel, &level);
++	ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxAclkLevel, &level);
++	if (ret)
++		return ret;
+ 
+ 	if (level < table->count)
+ 		clock = table->entries[level].acpclk;
+diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
+index d43a530aba0e3..6c87b3d4ab362 100644
+--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
++++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
+@@ -354,13 +354,13 @@ static int vega10_odn_initial_default_setting(struct pp_hwmgr *hwmgr)
+ 	return 0;
+ }
+ 
+-static void vega10_init_dpm_defaults(struct pp_hwmgr *hwmgr)
++static int vega10_init_dpm_defaults(struct pp_hwmgr *hwmgr)
+ {
+ 	struct vega10_hwmgr *data = hwmgr->backend;
+-	int i;
+ 	uint32_t sub_vendor_id, hw_revision;
+ 	uint32_t top32, bottom32;
+ 	struct amdgpu_device *adev = hwmgr->adev;
++	int ret, i;
+ 
+ 	vega10_initialize_power_tune_defaults(hwmgr);
+ 
+@@ -485,9 +485,12 @@ static void vega10_init_dpm_defaults(struct pp_hwmgr *hwmgr)
+ 	if (data->registry_data.vr0hot_enabled)
+ 		data->smu_features[GNLD_VR0HOT].supported = true;
+ 
+-	smum_send_msg_to_smc(hwmgr,
++	ret = smum_send_msg_to_smc(hwmgr,
+ 			PPSMC_MSG_GetSmuVersion,
+ 			&hwmgr->smu_version);
++	if (ret)
++		return ret;
++
+ 		/* ACG firmware has major version 5 */
+ 	if ((hwmgr->smu_version & 0xff000000) == 0x5000000)
+ 		data->smu_features[GNLD_ACG].supported = true;
+@@ -505,10 +508,16 @@ static void vega10_init_dpm_defaults(struct pp_hwmgr *hwmgr)
+ 		data->smu_features[GNLD_PCC_LIMIT].supported = true;
+ 
+ 	/* Get the SN to turn into a Unique ID */
+-	smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumTop32, &top32);
+-	smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumBottom32, &bottom32);
++	ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumTop32, &top32);
++	if (ret)
++		return ret;
++
++	ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumBottom32, &bottom32);
++	if (ret)
++		return ret;
+ 
+ 	adev->unique_id = ((uint64_t)bottom32 << 32) | top32;
++	return 0;
+ }
+ 
+ #ifdef PPLIB_VEGA10_EVV_SUPPORT
+@@ -882,7 +891,9 @@ static int vega10_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
+ 
+ 	vega10_set_features_platform_caps(hwmgr);
+ 
+-	vega10_init_dpm_defaults(hwmgr);
++	result = vega10_init_dpm_defaults(hwmgr);
++	if (result)
++		return result;
+ 
+ #ifdef PPLIB_VEGA10_EVV_SUPPORT
+ 	/* Get leakage voltage based on leakage ID. */
+@@ -2350,15 +2361,20 @@ static int vega10_acg_enable(struct pp_hwmgr *hwmgr)
+ {
+ 	struct vega10_hwmgr *data = hwmgr->backend;
+ 	uint32_t agc_btc_response;
++	int ret;
+ 
+ 	if (data->smu_features[GNLD_ACG].supported) {
+ 		if (0 == vega10_enable_smc_features(hwmgr, true,
+ 					data->smu_features[GNLD_DPM_PREFETCHER].smu_feature_bitmap))
+ 			data->smu_features[GNLD_DPM_PREFETCHER].enabled = true;
+ 
+-		smum_send_msg_to_smc(hwmgr, PPSMC_MSG_InitializeAcg, NULL);
++		ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_InitializeAcg, NULL);
++		if (ret)
++			return ret;
+ 
+-		smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAcgBtc, &agc_btc_response);
++		ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAcgBtc, &agc_btc_response);
++		if (ret)
++			agc_btc_response = 0;
+ 
+ 		if (1 == agc_btc_response) {
+ 			if (1 == data->acg_loop_state)
+@@ -2571,8 +2587,11 @@ static int vega10_init_smc_table(struct pp_hwmgr *hwmgr)
+ 		}
+ 	}
+ 
+-	pp_atomfwctrl_get_voltage_table_v4(hwmgr, VOLTAGE_TYPE_VDDC,
++	result = pp_atomfwctrl_get_voltage_table_v4(hwmgr, VOLTAGE_TYPE_VDDC,
+ 			VOLTAGE_OBJ_SVID2,  &voltage_table);
++	PP_ASSERT_WITH_CODE(!result,
++			"Failed to get voltage table!",
++			return result);
+ 	pp_table->MaxVidStep = voltage_table.max_vid_step;
+ 
+ 	pp_table->GfxDpmVoltageMode =
+@@ -3910,11 +3929,14 @@ static int vega10_get_gpu_power(struct pp_hwmgr *hwmgr,
+ 		uint32_t *query)
+ {
+ 	uint32_t value;
++	int ret;
+ 
+ 	if (!query)
+ 		return -EINVAL;
+ 
+-	smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrPkgPwr, &value);
++	ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrPkgPwr, &value);
++	if (ret)
++		return ret;
+ 
+ 	/* SMC returning actual watts, keep consistent with legacy asics, low 8 bit as 8 fractional bits */
+ 	*query = value << 8;
+@@ -4810,14 +4832,16 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
+ 	uint32_t gen_speed, lane_width, current_gen_speed, current_lane_width;
+ 	PPTable_t *pptable = &(data->smc_state_table.pp_table);
+ 
+-	int i, now, size = 0, count = 0;
++	int i, ret, now,  size = 0, count = 0;
+ 
+ 	switch (type) {
+ 	case PP_SCLK:
+ 		if (data->registry_data.sclk_dpm_key_disabled)
+ 			break;
+ 
+-		smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentGfxclkIndex, &now);
++		ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentGfxclkIndex, &now);
++		if (ret)
++			break;
+ 
+ 		if (hwmgr->pp_one_vf &&
+ 		    (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK))
+@@ -4833,7 +4857,9 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
+ 		if (data->registry_data.mclk_dpm_key_disabled)
+ 			break;
+ 
+-		smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentUclkIndex, &now);
++		ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentUclkIndex, &now);
++		if (ret)
++			break;
+ 
+ 		for (i = 0; i < mclk_table->count; i++)
+ 			size += sprintf(buf + size, "%d: %uMhz %s\n",
+@@ -4844,7 +4870,9 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
+ 		if (data->registry_data.socclk_dpm_key_disabled)
+ 			break;
+ 
+-		smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentSocclkIndex, &now);
++		ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentSocclkIndex, &now);
++		if (ret)
++			break;
+ 
+ 		for (i = 0; i < soc_table->count; i++)
+ 			size += sprintf(buf + size, "%d: %uMhz %s\n",
+@@ -4855,8 +4883,10 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
+ 		if (data->registry_data.dcefclk_dpm_key_disabled)
+ 			break;
+ 
+-		smum_send_msg_to_smc_with_parameter(hwmgr,
++		ret = smum_send_msg_to_smc_with_parameter(hwmgr,
+ 				PPSMC_MSG_GetClockFreqMHz, CLK_DCEFCLK, &now);
++		if (ret)
++			break;
+ 
+ 		for (i = 0; i < dcef_table->count; i++)
+ 			size += sprintf(buf + size, "%d: %uMhz %s\n",
+diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c
+index 460067933de2e..069c0f5205e00 100644
+--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c
++++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c
+@@ -293,12 +293,12 @@ static int vega12_set_features_platform_caps(struct pp_hwmgr *hwmgr)
+ 	return 0;
+ }
+ 
+-static void vega12_init_dpm_defaults(struct pp_hwmgr *hwmgr)
++static int vega12_init_dpm_defaults(struct pp_hwmgr *hwmgr)
+ {
+ 	struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
+ 	struct amdgpu_device *adev = hwmgr->adev;
+ 	uint32_t top32, bottom32;
+-	int i;
++	int i, ret;
+ 
+ 	data->smu_features[GNLD_DPM_PREFETCHER].smu_feature_id =
+ 			FEATURE_DPM_PREFETCHER_BIT;
+@@ -364,10 +364,16 @@ static void vega12_init_dpm_defaults(struct pp_hwmgr *hwmgr)
+ 	}
+ 
+ 	/* Get the SN to turn into a Unique ID */
+-	smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumTop32, &top32);
+-	smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumBottom32, &bottom32);
++	ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumTop32, &top32);
++	if (ret)
++		return ret;
++	ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumBottom32, &bottom32);
++	if (ret)
++		return ret;
+ 
+ 	adev->unique_id = ((uint64_t)bottom32 << 32) | top32;
++
++	return 0;
+ }
+ 
+ static int vega12_set_private_data_based_on_pptable(struct pp_hwmgr *hwmgr)
+@@ -410,7 +416,11 @@ static int vega12_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
+ 
+ 	vega12_set_features_platform_caps(hwmgr);
+ 
+-	vega12_init_dpm_defaults(hwmgr);
++	result = vega12_init_dpm_defaults(hwmgr);
++	if (result) {
++		pr_err("%s failed\n", __func__);
++		return result;
++	}
+ 
+ 	/* Parse pptable data read from VBIOS */
+ 	vega12_set_private_data_based_on_pptable(hwmgr);
+diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c
+index 3b33af30eb0fb..9fdb9990d1882 100644
+--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c
++++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c
+@@ -328,12 +328,12 @@ static int vega20_set_features_platform_caps(struct pp_hwmgr *hwmgr)
+ 	return 0;
+ }
+ 
+-static void vega20_init_dpm_defaults(struct pp_hwmgr *hwmgr)
++static int vega20_init_dpm_defaults(struct pp_hwmgr *hwmgr)
+ {
+ 	struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
+ 	struct amdgpu_device *adev = hwmgr->adev;
+ 	uint32_t top32, bottom32;
+-	int i;
++	int i, ret;
+ 
+ 	data->smu_features[GNLD_DPM_PREFETCHER].smu_feature_id =
+ 			FEATURE_DPM_PREFETCHER_BIT;
+@@ -404,10 +404,17 @@ static void vega20_init_dpm_defaults(struct pp_hwmgr *hwmgr)
+ 	}
+ 
+ 	/* Get the SN to turn into a Unique ID */
+-	smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumTop32, &top32);
+-	smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumBottom32, &bottom32);
++	ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumTop32, &top32);
++	if (ret)
++		return ret;
++
++	ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumBottom32, &bottom32);
++	if (ret)
++		return ret;
+ 
+ 	adev->unique_id = ((uint64_t)bottom32 << 32) | top32;
++
++	return 0;
+ }
+ 
+ static int vega20_set_private_data_based_on_pptable(struct pp_hwmgr *hwmgr)
+@@ -427,6 +434,7 @@ static int vega20_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
+ {
+ 	struct vega20_hwmgr *data;
+ 	struct amdgpu_device *adev = hwmgr->adev;
++	int result;
+ 
+ 	data = kzalloc(sizeof(struct vega20_hwmgr), GFP_KERNEL);
+ 	if (data == NULL)
+@@ -452,8 +460,11 @@ static int vega20_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
+ 
+ 	vega20_set_features_platform_caps(hwmgr);
+ 
+-	vega20_init_dpm_defaults(hwmgr);
+-
++	result = vega20_init_dpm_defaults(hwmgr);
++	if (result) {
++		pr_err("%s failed\n", __func__);
++		return result;
++	}
+ 	/* Parse pptable data read from VBIOS */
+ 	vega20_set_private_data_based_on_pptable(hwmgr);
+ 
+@@ -4091,9 +4102,11 @@ static int vega20_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, ui
+ 	if (power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) {
+ 		struct vega20_hwmgr *data =
+ 			(struct vega20_hwmgr *)(hwmgr->backend);
+-		if (size == 0 && !data->is_custom_profile_set)
++
++		if (size != 10 && size != 0)
+ 			return -EINVAL;
+-		if (size < 10 && size != 0)
++
++		if (size == 0 && !data->is_custom_profile_set)
+ 			return -EINVAL;
+ 
+ 		result = vega20_get_activity_monitor_coeff(hwmgr,
+@@ -4155,6 +4168,8 @@ static int vega20_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, ui
+ 			activity_monitor.Fclk_PD_Data_error_coeff = input[8];
+ 			activity_monitor.Fclk_PD_Data_error_rate_coeff = input[9];
+ 			break;
++		default:
++			return -EINVAL;
+ 		}
+ 
+ 		result = vega20_set_activity_monitor_coeff(hwmgr,
+diff --git a/drivers/gpu/drm/amd/pm/powerplay/smumgr/vega10_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/vega10_smumgr.c
+index a70d738966490..f9c0f117725dd 100644
+--- a/drivers/gpu/drm/amd/pm/powerplay/smumgr/vega10_smumgr.c
++++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/vega10_smumgr.c
+@@ -130,13 +130,17 @@ int vega10_get_enabled_smc_features(struct pp_hwmgr *hwmgr,
+ 			    uint64_t *features_enabled)
+ {
+ 	uint32_t enabled_features;
++	int ret;
+ 
+ 	if (features_enabled == NULL)
+ 		return -EINVAL;
+ 
+-	smum_send_msg_to_smc(hwmgr,
++	ret = smum_send_msg_to_smc(hwmgr,
+ 			PPSMC_MSG_GetEnabledSmuFeatures,
+ 			&enabled_features);
++	if (ret)
++		return ret;
++
+ 	*features_enabled = enabled_features;
+ 
+ 	return 0;
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
+index c564f6e191f84..b1b23233635a6 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
+@@ -1222,19 +1222,22 @@ static int navi10_get_current_clk_freq_by_table(struct smu_context *smu,
+ 					   value);
+ }
+ 
+-static bool navi10_is_support_fine_grained_dpm(struct smu_context *smu, enum smu_clk_type clk_type)
++static int navi10_is_support_fine_grained_dpm(struct smu_context *smu, enum smu_clk_type clk_type)
+ {
+ 	PPTable_t *pptable = smu->smu_table.driver_pptable;
+ 	DpmDescriptor_t *dpm_desc = NULL;
+-	uint32_t clk_index = 0;
++	int clk_index = 0;
+ 
+ 	clk_index = smu_cmn_to_asic_specific_index(smu,
+ 						   CMN2ASIC_MAPPING_CLK,
+ 						   clk_type);
++	if (clk_index < 0)
++		return clk_index;
++
+ 	dpm_desc = &pptable->DpmDescriptor[clk_index];
+ 
+ 	/* 0 - Fine grained DPM, 1 - Discrete DPM */
+-	return dpm_desc->SnapToDiscrete == 0;
++	return dpm_desc->SnapToDiscrete == 0 ? 1 : 0;
+ }
+ 
+ static inline bool navi10_od_feature_is_supported(struct smu_11_0_overdrive_table *od_table, enum SMU_11_0_ODFEATURE_CAP cap)
+@@ -1290,7 +1293,11 @@ static int navi10_emit_clk_levels(struct smu_context *smu,
+ 		if (ret)
+ 			return ret;
+ 
+-		if (!navi10_is_support_fine_grained_dpm(smu, clk_type)) {
++		ret = navi10_is_support_fine_grained_dpm(smu, clk_type);
++		if (ret < 0)
++			return ret;
++
++		if (!ret) {
+ 			for (i = 0; i < count; i++) {
+ 				ret = smu_v11_0_get_dpm_freq_by_index(smu,
+ 								      clk_type, i, &value);
+@@ -1499,7 +1506,11 @@ static int navi10_print_clk_levels(struct smu_context *smu,
+ 		if (ret)
+ 			return size;
+ 
+-		if (!navi10_is_support_fine_grained_dpm(smu, clk_type)) {
++		ret = navi10_is_support_fine_grained_dpm(smu, clk_type);
++		if (ret < 0)
++			return ret;
++
++		if (!ret) {
+ 			for (i = 0; i < count; i++) {
+ 				ret = smu_v11_0_get_dpm_freq_by_index(smu, clk_type, i, &value);
+ 				if (ret)
+@@ -1668,7 +1679,11 @@ static int navi10_force_clk_levels(struct smu_context *smu,
+ 	case SMU_UCLK:
+ 	case SMU_FCLK:
+ 		/* There is only 2 levels for fine grained DPM */
+-		if (navi10_is_support_fine_grained_dpm(smu, clk_type)) {
++		ret = navi10_is_support_fine_grained_dpm(smu, clk_type);
++		if (ret < 0)
++			return ret;
++
++		if (ret) {
+ 			soft_max_level = (soft_max_level >= 1 ? 1 : 0);
+ 			soft_min_level = (soft_min_level >= 1 ? 1 : 0);
+ 		}
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
+index 201cec5998428..f46cda8894831 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
+@@ -1009,6 +1009,18 @@ static int vangogh_get_dpm_ultimate_freq(struct smu_context *smu,
+ 		}
+ 	}
+ 	if (min) {
++		ret = vangogh_get_profiling_clk_mask(smu,
++						     AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK,
++						     NULL,
++						     NULL,
++						     &mclk_mask,
++						     &fclk_mask,
++						     &soc_mask);
++		if (ret)
++			goto failed;
++
++		vclk_mask = dclk_mask = 0;
++
+ 		switch (clk_type) {
+ 		case SMU_UCLK:
+ 		case SMU_MCLK:
+@@ -2481,6 +2493,8 @@ static u32 vangogh_set_gfxoff_residency(struct smu_context *smu, bool start)
+ 
+ 	ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_LogGfxOffResidency,
+ 					      start, &residency);
++	if (ret)
++		return ret;
+ 
+ 	if (!start)
+ 		adev->gfx.gfx_off_residency = residency;
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
+index 5afd03e42bbfc..ded8952d98490 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
+@@ -1931,7 +1931,8 @@ static int aldebaran_mode2_reset(struct smu_context *smu)
+ 
+ 	index = smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG,
+ 						SMU_MSG_GfxDeviceDriverReset);
+-
++	if (index < 0 )
++		return -EINVAL;
+ 	mutex_lock(&smu->message_lock);
+ 	if (smu_version >= 0x00441400) {
+ 		ret = smu_cmn_send_msg_without_waiting(smu, (uint16_t)index, SMU_RESET_MODE_2);
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
+index be4b7b64f8785..44c5f8585f1ee 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
+@@ -2058,6 +2058,8 @@ static int smu_v13_0_6_mode2_reset(struct smu_context *smu)
+ 
+ 	index = smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG,
+ 					       SMU_MSG_GfxDeviceDriverReset);
++	if (index < 0)
++		return index;
+ 
+ 	mutex_lock(&smu->message_lock);
+ 
+diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c
+index d941c3a0e6113..7fd4a5fe03edf 100644
+--- a/drivers/gpu/drm/bridge/tc358767.c
++++ b/drivers/gpu/drm/bridge/tc358767.c
+@@ -2034,7 +2034,7 @@ static irqreturn_t tc_irq_handler(int irq, void *arg)
+ 		dev_err(tc->dev, "syserr %x\n", stat);
+ 	}
+ 
+-	if (tc->hpd_pin >= 0 && tc->bridge.dev) {
++	if (tc->hpd_pin >= 0 && tc->bridge.dev && tc->aux.drm_dev) {
+ 		/*
+ 		 * H is triggered when the GPIO goes high.
+ 		 *
+diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
+index 117237d3528bd..618b045230336 100644
+--- a/drivers/gpu/drm/drm_fb_helper.c
++++ b/drivers/gpu/drm/drm_fb_helper.c
+@@ -631,6 +631,17 @@ static void drm_fb_helper_add_damage_clip(struct drm_fb_helper *helper, u32 x, u
+ static void drm_fb_helper_damage(struct drm_fb_helper *helper, u32 x, u32 y,
+ 				 u32 width, u32 height)
+ {
++	/*
++	 * This function may be invoked by panic() to flush the frame
++	 * buffer, where all CPUs except the panic CPU are stopped.
++	 * During the following schedule_work(), the panic CPU needs
++	 * the worker_pool lock, which might be held by a stopped CPU,
++	 * causing schedule_work() and panic() to block. Return early on
++	 * oops_in_progress to prevent this blocking.
++	 */
++	if (oops_in_progress)
++		return;
++
+ 	drm_fb_helper_add_damage_clip(helper, x, y, width, height);
+ 
+ 	schedule_work(&helper->damage_work);
+diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c
+index 5db52d6c5c35c..039da0d1a613b 100644
+--- a/drivers/gpu/drm/drm_panel_orientation_quirks.c
++++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c
+@@ -414,6 +414,12 @@ static const struct dmi_system_id orientation_data[] = {
+ 		  DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "ONE XPLAYER"),
+ 		},
+ 		.driver_data = (void *)&lcd1600x2560_leftside_up,
++	}, {	/* OrangePi Neo */
++		.matches = {
++		  DMI_EXACT_MATCH(DMI_SYS_VENDOR, "OrangePi"),
++		  DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "NEO-01"),
++		},
++		.driver_data = (void *)&lcd1200x1920_rightside_up,
+ 	}, {	/* Samsung GalaxyBook 10.6 */
+ 		.matches = {
+ 		  DMI_EXACT_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
+diff --git a/drivers/gpu/drm/meson/meson_plane.c b/drivers/gpu/drm/meson/meson_plane.c
+index 815dfe30492b6..b43ac61201f31 100644
+--- a/drivers/gpu/drm/meson/meson_plane.c
++++ b/drivers/gpu/drm/meson/meson_plane.c
+@@ -534,6 +534,7 @@ int meson_plane_create(struct meson_drm *priv)
+ 	struct meson_plane *meson_plane;
+ 	struct drm_plane *plane;
+ 	const uint64_t *format_modifiers = format_modifiers_default;
++	int ret;
+ 
+ 	meson_plane = devm_kzalloc(priv->drm->dev, sizeof(*meson_plane),
+ 				   GFP_KERNEL);
+@@ -548,12 +549,16 @@ int meson_plane_create(struct meson_drm *priv)
+ 	else if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A))
+ 		format_modifiers = format_modifiers_afbc_g12a;
+ 
+-	drm_universal_plane_init(priv->drm, plane, 0xFF,
+-				 &meson_plane_funcs,
+-				 supported_drm_formats,
+-				 ARRAY_SIZE(supported_drm_formats),
+-				 format_modifiers,
+-				 DRM_PLANE_TYPE_PRIMARY, "meson_primary_plane");
++	ret = drm_universal_plane_init(priv->drm, plane, 0xFF,
++					&meson_plane_funcs,
++					supported_drm_formats,
++					ARRAY_SIZE(supported_drm_formats),
++					format_modifiers,
++					DRM_PLANE_TYPE_PRIMARY, "meson_primary_plane");
++	if (ret) {
++		devm_kfree(priv->drm->dev, meson_plane);
++		return ret;
++	}
+ 
+ 	drm_plane_helper_add(plane, &meson_plane_helper_funcs);
+ 
+diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c
+index bae0becfa24be..ae0f454c305d6 100644
+--- a/drivers/hwmon/k10temp.c
++++ b/drivers/hwmon/k10temp.c
+@@ -153,8 +153,9 @@ static void read_tempreg_nb_f15(struct pci_dev *pdev, u32 *regval)
+ 
+ static void read_tempreg_nb_zen(struct pci_dev *pdev, u32 *regval)
+ {
+-	amd_smn_read(amd_pci_dev_to_node_id(pdev),
+-		     ZEN_REPORTED_TEMP_CTRL_BASE, regval);
++	if (amd_smn_read(amd_pci_dev_to_node_id(pdev),
++			 ZEN_REPORTED_TEMP_CTRL_BASE, regval))
++		*regval = 0;
+ }
+ 
+ static long get_raw_temp(struct k10temp_data *data)
+@@ -205,6 +206,7 @@ static int k10temp_read_temp(struct device *dev, u32 attr, int channel,
+ 			     long *val)
+ {
+ 	struct k10temp_data *data = dev_get_drvdata(dev);
++	int ret = -EOPNOTSUPP;
+ 	u32 regval;
+ 
+ 	switch (attr) {
+@@ -221,13 +223,17 @@ static int k10temp_read_temp(struct device *dev, u32 attr, int channel,
+ 				*val = 0;
+ 			break;
+ 		case 2 ... 13:		/* Tccd{1-12} */
+-			amd_smn_read(amd_pci_dev_to_node_id(data->pdev),
+-				     ZEN_CCD_TEMP(data->ccd_offset, channel - 2),
+-						  &regval);
++			ret = amd_smn_read(amd_pci_dev_to_node_id(data->pdev),
++					   ZEN_CCD_TEMP(data->ccd_offset, channel - 2),
++					   &regval);
++
++			if (ret)
++				return ret;
++
+ 			*val = (regval & ZEN_CCD_TEMP_MASK) * 125 - 49000;
+ 			break;
+ 		default:
+-			return -EOPNOTSUPP;
++			return ret;
+ 		}
+ 		break;
+ 	case hwmon_temp_max:
+@@ -243,7 +249,7 @@ static int k10temp_read_temp(struct device *dev, u32 attr, int channel,
+ 			- ((regval >> 24) & 0xf)) * 500 + 52000;
+ 		break;
+ 	default:
+-		return -EOPNOTSUPP;
++		return ret;
+ 	}
+ 	return 0;
+ }
+@@ -381,8 +387,20 @@ static void k10temp_get_ccd_support(struct pci_dev *pdev,
+ 	int i;
+ 
+ 	for (i = 0; i < limit; i++) {
+-		amd_smn_read(amd_pci_dev_to_node_id(pdev),
+-			     ZEN_CCD_TEMP(data->ccd_offset, i), &regval);
++		/*
++		 * Ignore inaccessible CCDs.
++		 *
++		 * Some systems will return a register value of 0, and the TEMP_VALID
++		 * bit check below will naturally fail.
++		 *
++		 * Other systems will return a PCI_ERROR_RESPONSE (0xFFFFFFFF) for
++		 * the register value. And this will incorrectly pass the TEMP_VALID
++		 * bit check.
++		 */
++		if (amd_smn_read(amd_pci_dev_to_node_id(pdev),
++				 ZEN_CCD_TEMP(data->ccd_offset, i), &regval))
++			continue;
++
+ 		if (regval & ZEN_CCD_TEMP_VALID)
+ 			data->show_temp |= BIT(TCCD_BIT(i));
+ 	}
+diff --git a/drivers/hwspinlock/hwspinlock_core.c b/drivers/hwspinlock/hwspinlock_core.c
+index ada694ba9f958..f279dd010b73e 100644
+--- a/drivers/hwspinlock/hwspinlock_core.c
++++ b/drivers/hwspinlock/hwspinlock_core.c
+@@ -302,6 +302,34 @@ void __hwspin_unlock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
+ }
+ EXPORT_SYMBOL_GPL(__hwspin_unlock);
+ 
++/**
++ * hwspin_lock_bust() - bust a specific hwspinlock
++ * @hwlock: a previously-acquired hwspinlock which we want to bust
++ * @id: identifier of the remote lock holder, if applicable
++ *
++ * This function will bust a hwspinlock that was previously acquired as
++ * long as the current owner of the lock matches the id given by the caller.
++ *
++ * Context: Process context.
++ *
++ * Returns: 0 on success, or -EINVAL if the hwspinlock does not exist, or
++ * the bust operation fails, and -EOPNOTSUPP if the bust operation is not
++ * defined for the hwspinlock.
++ */
++int hwspin_lock_bust(struct hwspinlock *hwlock, unsigned int id)
++{
++	if (WARN_ON(!hwlock))
++		return -EINVAL;
++
++	if (!hwlock->bank->ops->bust) {
++		pr_err("bust operation not defined\n");
++		return -EOPNOTSUPP;
++	}
++
++	return hwlock->bank->ops->bust(hwlock, id);
++}
++EXPORT_SYMBOL_GPL(hwspin_lock_bust);
++
+ /**
+  * of_hwspin_lock_simple_xlate - translate hwlock_spec to return a lock id
+  * @bank: the hwspinlock device bank
+diff --git a/drivers/hwspinlock/hwspinlock_internal.h b/drivers/hwspinlock/hwspinlock_internal.h
+index 29892767bb7a0..f298fc0ee5adb 100644
+--- a/drivers/hwspinlock/hwspinlock_internal.h
++++ b/drivers/hwspinlock/hwspinlock_internal.h
+@@ -21,6 +21,8 @@ struct hwspinlock_device;
+  * @trylock: make a single attempt to take the lock. returns 0 on
+  *	     failure and true on success. may _not_ sleep.
+  * @unlock:  release the lock. always succeed. may _not_ sleep.
++ * @bust:    optional, platform-specific bust handler, called by hwspinlock
++ *	     core to bust a specific lock.
+  * @relax:   optional, platform-specific relax handler, called by hwspinlock
+  *	     core while spinning on a lock, between two successive
+  *	     invocations of @trylock. may _not_ sleep.
+@@ -28,6 +30,7 @@ struct hwspinlock_device;
+ struct hwspinlock_ops {
+ 	int (*trylock)(struct hwspinlock *lock);
+ 	void (*unlock)(struct hwspinlock *lock);
++	int (*bust)(struct hwspinlock *lock, unsigned int id);
+ 	void (*relax)(struct hwspinlock *lock);
+ };
+ 
+diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
+index 5e1a85ca12119..121bde49ccb7d 100644
+--- a/drivers/iio/industrialio-core.c
++++ b/drivers/iio/industrialio-core.c
+@@ -752,9 +752,11 @@ static ssize_t iio_read_channel_info(struct device *dev,
+ 							INDIO_MAX_RAW_ELEMENTS,
+ 							vals, &val_len,
+ 							this_attr->address);
+-	else
++	else if (indio_dev->info->read_raw)
+ 		ret = indio_dev->info->read_raw(indio_dev, this_attr->c,
+ 				    &vals[0], &vals[1], this_attr->address);
++	else
++		return -EINVAL;
+ 
+ 	if (ret < 0)
+ 		return ret;
+@@ -836,6 +838,9 @@ static ssize_t iio_read_channel_info_avail(struct device *dev,
+ 	int length;
+ 	int type;
+ 
++	if (!indio_dev->info->read_avail)
++		return -EINVAL;
++
+ 	ret = indio_dev->info->read_avail(indio_dev, this_attr->c,
+ 					  &vals, &type, &length,
+ 					  this_attr->address);
+diff --git a/drivers/iio/industrialio-event.c b/drivers/iio/industrialio-event.c
+index 19f7a91157ee4..f67e4afa5f94b 100644
+--- a/drivers/iio/industrialio-event.c
++++ b/drivers/iio/industrialio-event.c
+@@ -285,6 +285,9 @@ static ssize_t iio_ev_state_store(struct device *dev,
+ 	if (ret < 0)
+ 		return ret;
+ 
++	if (!indio_dev->info->write_event_config)
++		return -EINVAL;
++
+ 	ret = indio_dev->info->write_event_config(indio_dev,
+ 		this_attr->c, iio_ev_attr_type(this_attr),
+ 		iio_ev_attr_dir(this_attr), val);
+@@ -300,6 +303,9 @@ static ssize_t iio_ev_state_show(struct device *dev,
+ 	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+ 	int val;
+ 
++	if (!indio_dev->info->read_event_config)
++		return -EINVAL;
++
+ 	val = indio_dev->info->read_event_config(indio_dev,
+ 		this_attr->c, iio_ev_attr_type(this_attr),
+ 		iio_ev_attr_dir(this_attr));
+@@ -318,6 +324,9 @@ static ssize_t iio_ev_value_show(struct device *dev,
+ 	int val, val2, val_arr[2];
+ 	int ret;
+ 
++	if (!indio_dev->info->read_event_value)
++		return -EINVAL;
++
+ 	ret = indio_dev->info->read_event_value(indio_dev,
+ 		this_attr->c, iio_ev_attr_type(this_attr),
+ 		iio_ev_attr_dir(this_attr), iio_ev_attr_info(this_attr),
+diff --git a/drivers/iio/inkern.c b/drivers/iio/inkern.c
+index 7a1f6713318a3..b855565384757 100644
+--- a/drivers/iio/inkern.c
++++ b/drivers/iio/inkern.c
+@@ -562,6 +562,7 @@ EXPORT_SYMBOL_GPL(devm_iio_channel_get_all);
+ static int iio_channel_read(struct iio_channel *chan, int *val, int *val2,
+ 			    enum iio_chan_info_enum info)
+ {
++	const struct iio_info *iio_info = chan->indio_dev->info;
+ 	int unused;
+ 	int vals[INDIO_MAX_RAW_ELEMENTS];
+ 	int ret;
+@@ -573,15 +574,18 @@ static int iio_channel_read(struct iio_channel *chan, int *val, int *val2,
+ 	if (!iio_channel_has_info(chan->channel, info))
+ 		return -EINVAL;
+ 
+-	if (chan->indio_dev->info->read_raw_multi) {
+-		ret = chan->indio_dev->info->read_raw_multi(chan->indio_dev,
+-					chan->channel, INDIO_MAX_RAW_ELEMENTS,
+-					vals, &val_len, info);
++	if (iio_info->read_raw_multi) {
++		ret = iio_info->read_raw_multi(chan->indio_dev,
++					       chan->channel,
++					       INDIO_MAX_RAW_ELEMENTS,
++					       vals, &val_len, info);
+ 		*val = vals[0];
+ 		*val2 = vals[1];
++	} else if (iio_info->read_raw) {
++		ret = iio_info->read_raw(chan->indio_dev,
++					 chan->channel, val, val2, info);
+ 	} else {
+-		ret = chan->indio_dev->info->read_raw(chan->indio_dev,
+-					chan->channel, val, val2, info);
++		return -EINVAL;
+ 	}
+ 
+ 	return ret;
+@@ -801,11 +805,15 @@ static int iio_channel_read_avail(struct iio_channel *chan,
+ 				  const int **vals, int *type, int *length,
+ 				  enum iio_chan_info_enum info)
+ {
++	const struct iio_info *iio_info = chan->indio_dev->info;
++
+ 	if (!iio_channel_has_available(chan->channel, info))
+ 		return -EINVAL;
+ 
+-	return chan->indio_dev->info->read_avail(chan->indio_dev, chan->channel,
+-						 vals, type, length, info);
++	if (iio_info->read_avail)
++		return iio_info->read_avail(chan->indio_dev, chan->channel,
++					    vals, type, length, info);
++	return -EINVAL;
+ }
+ 
+ int iio_read_avail_channel_attribute(struct iio_channel *chan,
+@@ -995,8 +1003,12 @@ EXPORT_SYMBOL_GPL(iio_get_channel_type);
+ static int iio_channel_write(struct iio_channel *chan, int val, int val2,
+ 			     enum iio_chan_info_enum info)
+ {
+-	return chan->indio_dev->info->write_raw(chan->indio_dev,
+-						chan->channel, val, val2, info);
++	const struct iio_info *iio_info = chan->indio_dev->info;
++
++	if (iio_info->write_raw)
++		return iio_info->write_raw(chan->indio_dev,
++					   chan->channel, val, val2, info);
++	return -EINVAL;
+ }
+ 
+ int iio_write_channel_attribute(struct iio_channel *chan, int val, int val2,
+diff --git a/drivers/infiniband/hw/efa/efa_com.c b/drivers/infiniband/hw/efa/efa_com.c
+index 16a24a05fc2a6..bafd210dd43e8 100644
+--- a/drivers/infiniband/hw/efa/efa_com.c
++++ b/drivers/infiniband/hw/efa/efa_com.c
+@@ -1,6 +1,6 @@
+ // SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
+ /*
+- * Copyright 2018-2021 Amazon.com, Inc. or its affiliates. All rights reserved.
++ * Copyright 2018-2024 Amazon.com, Inc. or its affiliates. All rights reserved.
+  */
+ 
+ #include "efa_com.h"
+@@ -406,8 +406,8 @@ static struct efa_comp_ctx *efa_com_submit_admin_cmd(struct efa_com_admin_queue
+ 	return comp_ctx;
+ }
+ 
+-static void efa_com_handle_single_admin_completion(struct efa_com_admin_queue *aq,
+-						   struct efa_admin_acq_entry *cqe)
++static int efa_com_handle_single_admin_completion(struct efa_com_admin_queue *aq,
++						  struct efa_admin_acq_entry *cqe)
+ {
+ 	struct efa_comp_ctx *comp_ctx;
+ 	u16 cmd_id;
+@@ -416,11 +416,11 @@ static void efa_com_handle_single_admin_completion(struct efa_com_admin_queue *a
+ 			 EFA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID);
+ 
+ 	comp_ctx = efa_com_get_comp_ctx(aq, cmd_id, false);
+-	if (!comp_ctx) {
++	if (comp_ctx->status != EFA_CMD_SUBMITTED) {
+ 		ibdev_err(aq->efa_dev,
+-			  "comp_ctx is NULL. Changing the admin queue running state\n");
+-		clear_bit(EFA_AQ_STATE_RUNNING_BIT, &aq->state);
+-		return;
++			  "Received completion with unexpected command id[%d], sq producer: %d, sq consumer: %d, cq consumer: %d\n",
++			  cmd_id, aq->sq.pc, aq->sq.cc, aq->cq.cc);
++		return -EINVAL;
+ 	}
+ 
+ 	comp_ctx->status = EFA_CMD_COMPLETED;
+@@ -428,14 +428,17 @@ static void efa_com_handle_single_admin_completion(struct efa_com_admin_queue *a
+ 
+ 	if (!test_bit(EFA_AQ_STATE_POLLING_BIT, &aq->state))
+ 		complete(&comp_ctx->wait_event);
++
++	return 0;
+ }
+ 
+ static void efa_com_handle_admin_completion(struct efa_com_admin_queue *aq)
+ {
+ 	struct efa_admin_acq_entry *cqe;
+ 	u16 queue_size_mask;
+-	u16 comp_num = 0;
++	u16 comp_cmds = 0;
+ 	u8 phase;
++	int err;
+ 	u16 ci;
+ 
+ 	queue_size_mask = aq->depth - 1;
+@@ -453,10 +456,12 @@ static void efa_com_handle_admin_completion(struct efa_com_admin_queue *aq)
+ 		 * phase bit was validated
+ 		 */
+ 		dma_rmb();
+-		efa_com_handle_single_admin_completion(aq, cqe);
++		err = efa_com_handle_single_admin_completion(aq, cqe);
++		if (!err)
++			comp_cmds++;
+ 
++		aq->cq.cc++;
+ 		ci++;
+-		comp_num++;
+ 		if (ci == aq->depth) {
+ 			ci = 0;
+ 			phase = !phase;
+@@ -465,10 +470,9 @@ static void efa_com_handle_admin_completion(struct efa_com_admin_queue *aq)
+ 		cqe = &aq->cq.entries[ci];
+ 	}
+ 
+-	aq->cq.cc += comp_num;
+ 	aq->cq.phase = phase;
+-	aq->sq.cc += comp_num;
+-	atomic64_add(comp_num, &aq->stats.completed_cmd);
++	aq->sq.cc += comp_cmds;
++	atomic64_add(comp_cmds, &aq->stats.completed_cmd);
+ }
+ 
+ static int efa_com_comp_status_to_errno(u8 comp_status)
+diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c
+index 68bf41147a619..04e7f58553db1 100644
+--- a/drivers/media/usb/uvc/uvc_driver.c
++++ b/drivers/media/usb/uvc/uvc_driver.c
+@@ -687,16 +687,26 @@ static int uvc_parse_streaming(struct uvc_device *dev,
+ 		goto error;
+ 	}
+ 
+-	size = nformats * sizeof(*format) + nframes * sizeof(*frame)
++	/*
++	 * Allocate memory for the formats, the frames and the intervals,
++	 * plus any required padding to guarantee that everything has the
++	 * correct alignment.
++	 */
++	size = nformats * sizeof(*format);
++	size = ALIGN(size, __alignof__(*frame)) + nframes * sizeof(*frame);
++	size = ALIGN(size, __alignof__(*interval))
+ 	     + nintervals * sizeof(*interval);
++
+ 	format = kzalloc(size, GFP_KERNEL);
+-	if (format == NULL) {
++	if (!format) {
+ 		ret = -ENOMEM;
+ 		goto error;
+ 	}
+ 
+-	frame = (struct uvc_frame *)&format[nformats];
+-	interval = (u32 *)&frame[nframes];
++	frame = (void *)format + nformats * sizeof(*format);
++	frame = PTR_ALIGN(frame, __alignof__(*frame));
++	interval = (void *)frame + nframes * sizeof(*frame);
++	interval = PTR_ALIGN(interval, __alignof__(*interval));
+ 
+ 	streaming->formats = format;
+ 	streaming->nformats = 0;
+diff --git a/drivers/media/v4l2-core/v4l2-cci.c b/drivers/media/v4l2-core/v4l2-cci.c
+index ee3475bed37fa..1ff94affbaf3a 100644
+--- a/drivers/media/v4l2-core/v4l2-cci.c
++++ b/drivers/media/v4l2-core/v4l2-cci.c
+@@ -23,6 +23,15 @@ int cci_read(struct regmap *map, u32 reg, u64 *val, int *err)
+ 	u8 buf[8];
+ 	int ret;
+ 
++	/*
++	 * TODO: Fix smatch. Assign *val to 0 here in order to avoid
++	 * failing a smatch check on caller when the caller proceeds to
++	 * read *val without initialising it on caller's side. *val is set
++	 * to a valid value whenever this function returns 0 but smatch
++	 * can't figure that out currently.
++	 */
++	*val = 0;
++
+ 	if (err && *err)
+ 		return *err;
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+index 79ec6fcc9e259..57b0e26696e30 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+@@ -2369,7 +2369,8 @@ static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cq
+ 	if (flush)
+ 		mlx5e_shampo_flush_skb(rq, cqe, match);
+ free_hd_entry:
+-	mlx5e_free_rx_shampo_hd_entry(rq, header_index);
++	if (likely(head_size))
++		mlx5e_free_rx_shampo_hd_entry(rq, header_index);
+ mpwrq_cqe_out:
+ 	if (likely(wi->consumed_strides < rq->mpwqe.num_strides))
+ 		return;
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
+index 042ca03491243..d1db04baa1fa6 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
+@@ -7,7 +7,7 @@
+ /* don't try to optimize STE allocation if the stack is too constaraining */
+ #define DR_RULE_MAX_STES_OPTIMIZED 0
+ #else
+-#define DR_RULE_MAX_STES_OPTIMIZED 5
++#define DR_RULE_MAX_STES_OPTIMIZED 2
+ #endif
+ #define DR_RULE_MAX_STE_CHAIN_OPTIMIZED (DR_RULE_MAX_STES_OPTIMIZED + DR_ACTION_MAX_STES)
+ 
+diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
+index 7e6e1bed525af..9d724d228b831 100644
+--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c
++++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
+@@ -234,7 +234,7 @@ static int ionic_request_irq(struct ionic_lif *lif, struct ionic_qcq *qcq)
+ 		name = dev_name(dev);
+ 
+ 	snprintf(intr->name, sizeof(intr->name),
+-		 "%s-%s-%s", IONIC_DRV_NAME, name, q->name);
++		 "%.5s-%.16s-%.8s", IONIC_DRV_NAME, name, q->name);
+ 
+ 	return devm_request_irq(dev, intr->vector, ionic_isr,
+ 				0, intr->name, &qcq->napi);
+diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
+index b1380cf1b13ab..92c1500fa7c44 100644
+--- a/drivers/net/usb/qmi_wwan.c
++++ b/drivers/net/usb/qmi_wwan.c
+@@ -1438,6 +1438,7 @@ static const struct usb_device_id products[] = {
+ 	{QMI_QUIRK_SET_DTR(0x1546, 0x1312, 4)},	/* u-blox LARA-R6 01B */
+ 	{QMI_QUIRK_SET_DTR(0x1546, 0x1342, 4)},	/* u-blox LARA-L6 */
+ 	{QMI_QUIRK_SET_DTR(0x33f8, 0x0104, 4)}, /* Rolling RW101 RMNET */
++	{QMI_FIXED_INTF(0x2dee, 0x4d22, 5)},    /* MeiG Smart SRM825L */
+ 
+ 	/* 4. Gobi 1000 devices */
+ 	{QMI_GOBI1K_DEVICE(0x05c6, 0x9212)},	/* Acer Gobi Modem Device */
+diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
+index 51ade909c84f0..bc01f2dafa948 100644
+--- a/drivers/net/virtio_net.c
++++ b/drivers/net/virtio_net.c
+@@ -2140,7 +2140,7 @@ static int virtnet_receive(struct receive_queue *rq, int budget,
+ 	return packets;
+ }
+ 
+-static void virtnet_poll_cleantx(struct receive_queue *rq)
++static void virtnet_poll_cleantx(struct receive_queue *rq, int budget)
+ {
+ 	struct virtnet_info *vi = rq->vq->vdev->priv;
+ 	unsigned int index = vq2rxq(rq->vq);
+@@ -2158,7 +2158,7 @@ static void virtnet_poll_cleantx(struct receive_queue *rq)
+ 
+ 		do {
+ 			virtqueue_disable_cb(sq->vq);
+-			free_old_xmit_skbs(sq, true);
++			free_old_xmit_skbs(sq, !!budget);
+ 		} while (unlikely(!virtqueue_enable_cb_delayed(sq->vq)));
+ 
+ 		if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS)
+@@ -2177,7 +2177,7 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
+ 	unsigned int received;
+ 	unsigned int xdp_xmit = 0;
+ 
+-	virtnet_poll_cleantx(rq);
++	virtnet_poll_cleantx(rq, budget);
+ 
+ 	received = virtnet_receive(rq, budget, &xdp_xmit);
+ 
+@@ -2280,7 +2280,7 @@ static int virtnet_poll_tx(struct napi_struct *napi, int budget)
+ 	txq = netdev_get_tx_queue(vi->dev, index);
+ 	__netif_tx_lock(txq, raw_smp_processor_id());
+ 	virtqueue_disable_cb(sq->vq);
+-	free_old_xmit_skbs(sq, true);
++	free_old_xmit_skbs(sq, !!budget);
+ 
+ 	if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS)
+ 		netif_tx_wake_queue(txq);
+diff --git a/drivers/net/wireless/ath/ath11k/qmi.c b/drivers/net/wireless/ath/ath11k/qmi.c
+index a831d9474e9e0..83dc284392de2 100644
+--- a/drivers/net/wireless/ath/ath11k/qmi.c
++++ b/drivers/net/wireless/ath/ath11k/qmi.c
+@@ -2293,7 +2293,7 @@ static int ath11k_qmi_load_file_target_mem(struct ath11k_base *ab,
+ 	struct qmi_txn txn;
+ 	const u8 *temp = data;
+ 	void __iomem *bdf_addr = NULL;
+-	int ret;
++	int ret = 0;
+ 	u32 remaining = len;
+ 
+ 	req = kzalloc(sizeof(*req), GFP_KERNEL);
+diff --git a/drivers/net/wireless/ath/ath12k/qmi.c b/drivers/net/wireless/ath/ath12k/qmi.c
+index f1379a5e60cdd..c49f585cc3965 100644
+--- a/drivers/net/wireless/ath/ath12k/qmi.c
++++ b/drivers/net/wireless/ath/ath12k/qmi.c
+@@ -2312,7 +2312,7 @@ static int ath12k_qmi_load_file_target_mem(struct ath12k_base *ab,
+ 	struct qmi_wlanfw_bdf_download_resp_msg_v01 resp;
+ 	struct qmi_txn txn = {};
+ 	const u8 *temp = data;
+-	int ret;
++	int ret = 0;
+ 	u32 remaining = len;
+ 
+ 	req = kzalloc(sizeof(*req), GFP_KERNEL);
+diff --git a/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c b/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c
+index 3356e36e2af73..0b71a71ca240b 100644
+--- a/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c
++++ b/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c
+@@ -230,8 +230,7 @@ static ssize_t iwl_dbgfs_send_hcmd_write(struct iwl_fw_runtime *fwrt, char *buf,
+ 		.data = { NULL, },
+ 	};
+ 
+-	if (fwrt->ops && fwrt->ops->fw_running &&
+-	    !fwrt->ops->fw_running(fwrt->ops_ctx))
++	if (!iwl_trans_fw_running(fwrt->trans))
+ 		return -EIO;
+ 
+ 	if (count < header_size + 1 || count > 1024 * 4)
+diff --git a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
+index 702586945533e..5812b58c92b06 100644
+--- a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
++++ b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
+@@ -18,7 +18,6 @@
+ struct iwl_fw_runtime_ops {
+ 	void (*dump_start)(void *ctx);
+ 	void (*dump_end)(void *ctx);
+-	bool (*fw_running)(void *ctx);
+ 	int (*send_hcmd)(void *ctx, struct iwl_host_cmd *host_cmd);
+ 	bool (*d3_debug_enable)(void *ctx);
+ };
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+index 5336a4afde4d2..945524470a1e9 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+@@ -702,11 +702,6 @@ static void iwl_mvm_fwrt_dump_end(void *ctx)
+ 	mutex_unlock(&mvm->mutex);
+ }
+ 
+-static bool iwl_mvm_fwrt_fw_running(void *ctx)
+-{
+-	return iwl_mvm_firmware_running(ctx);
+-}
+-
+ static int iwl_mvm_fwrt_send_hcmd(void *ctx, struct iwl_host_cmd *host_cmd)
+ {
+ 	struct iwl_mvm *mvm = (struct iwl_mvm *)ctx;
+@@ -727,7 +722,6 @@ static bool iwl_mvm_d3_debug_enable(void *ctx)
+ static const struct iwl_fw_runtime_ops iwl_mvm_fwrt_ops = {
+ 	.dump_start = iwl_mvm_fwrt_dump_start,
+ 	.dump_end = iwl_mvm_fwrt_dump_end,
+-	.fw_running = iwl_mvm_fwrt_fw_running,
+ 	.send_hcmd = iwl_mvm_fwrt_send_hcmd,
+ 	.d3_debug_enable = iwl_mvm_d3_debug_enable,
+ };
+diff --git a/drivers/net/wireless/realtek/rtw89/ser.c b/drivers/net/wireless/realtek/rtw89/ser.c
+index c1644353053f0..01b17b8f4ff9d 100644
+--- a/drivers/net/wireless/realtek/rtw89/ser.c
++++ b/drivers/net/wireless/realtek/rtw89/ser.c
+@@ -308,9 +308,13 @@ static void ser_reset_vif(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
+ 
+ static void ser_sta_deinit_cam_iter(void *data, struct ieee80211_sta *sta)
+ {
+-	struct rtw89_vif *rtwvif = (struct rtw89_vif *)data;
+-	struct rtw89_dev *rtwdev = rtwvif->rtwdev;
++	struct rtw89_vif *target_rtwvif = (struct rtw89_vif *)data;
+ 	struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
++	struct rtw89_vif *rtwvif = rtwsta->rtwvif;
++	struct rtw89_dev *rtwdev = rtwvif->rtwdev;
++
++	if (rtwvif != target_rtwvif)
++		return;
+ 
+ 	if (rtwvif->net_type == RTW89_NET_TYPE_AP_MODE || sta->tdls)
+ 		rtw89_cam_deinit_addr_cam(rtwdev, &rtwsta->addr_cam);
+diff --git a/drivers/pci/controller/dwc/pcie-al.c b/drivers/pci/controller/dwc/pcie-al.c
+index b8cb77c9c4bd2..3132b27bc0064 100644
+--- a/drivers/pci/controller/dwc/pcie-al.c
++++ b/drivers/pci/controller/dwc/pcie-al.c
+@@ -242,18 +242,24 @@ static struct pci_ops al_child_pci_ops = {
+ 	.write = pci_generic_config_write,
+ };
+ 
+-static void al_pcie_config_prepare(struct al_pcie *pcie)
++static int al_pcie_config_prepare(struct al_pcie *pcie)
+ {
+ 	struct al_pcie_target_bus_cfg *target_bus_cfg;
+ 	struct dw_pcie_rp *pp = &pcie->pci->pp;
+ 	unsigned int ecam_bus_mask;
++	struct resource_entry *ft;
+ 	u32 cfg_control_offset;
++	struct resource *bus;
+ 	u8 subordinate_bus;
+ 	u8 secondary_bus;
+ 	u32 cfg_control;
+ 	u32 reg;
+-	struct resource *bus = resource_list_first_type(&pp->bridge->windows, IORESOURCE_BUS)->res;
+ 
++	ft = resource_list_first_type(&pp->bridge->windows, IORESOURCE_BUS);
++	if (!ft)
++		return -ENODEV;
++
++	bus = ft->res;
+ 	target_bus_cfg = &pcie->target_bus_cfg;
+ 
+ 	ecam_bus_mask = (pcie->ecam_size >> PCIE_ECAM_BUS_SHIFT) - 1;
+@@ -287,6 +293,8 @@ static void al_pcie_config_prepare(struct al_pcie *pcie)
+ 	       FIELD_PREP(CFG_CONTROL_SEC_BUS_MASK, secondary_bus);
+ 
+ 	al_pcie_controller_writel(pcie, cfg_control_offset, reg);
++
++	return 0;
+ }
+ 
+ static int al_pcie_host_init(struct dw_pcie_rp *pp)
+@@ -305,7 +313,9 @@ static int al_pcie_host_init(struct dw_pcie_rp *pp)
+ 	if (rc)
+ 		return rc;
+ 
+-	al_pcie_config_prepare(pcie);
++	rc = al_pcie_config_prepare(pcie);
++	if (rc)
++		return rc;
+ 
+ 	return 0;
+ }
+diff --git a/drivers/platform/chrome/cros_ec_lpc_mec.c b/drivers/platform/chrome/cros_ec_lpc_mec.c
+index 0d9c79b270ce2..63b6b261b8e58 100644
+--- a/drivers/platform/chrome/cros_ec_lpc_mec.c
++++ b/drivers/platform/chrome/cros_ec_lpc_mec.c
+@@ -10,13 +10,65 @@
+ 
+ #include "cros_ec_lpc_mec.h"
+ 
++#define ACPI_LOCK_DELAY_MS 500
++
+ /*
+  * This mutex must be held while accessing the EMI unit. We can't rely on the
+  * EC mutex because memmap data may be accessed without it being held.
+  */
+ static DEFINE_MUTEX(io_mutex);
++/*
++ * An alternative mutex to be used when the ACPI AML code may also
++ * access memmap data.  When set, this mutex is used in preference to
++ * io_mutex.
++ */
++static acpi_handle aml_mutex;
++
+ static u16 mec_emi_base, mec_emi_end;
+ 
++/**
++ * cros_ec_lpc_mec_lock() - Acquire mutex for EMI
++ *
++ * @return: Negative error code, or zero for success
++ */
++static int cros_ec_lpc_mec_lock(void)
++{
++	bool success;
++
++	if (!aml_mutex) {
++		mutex_lock(&io_mutex);
++		return 0;
++	}
++
++	success = ACPI_SUCCESS(acpi_acquire_mutex(aml_mutex,
++						  NULL, ACPI_LOCK_DELAY_MS));
++	if (!success)
++		return -EBUSY;
++
++	return 0;
++}
++
++/**
++ * cros_ec_lpc_mec_unlock() - Release mutex for EMI
++ *
++ * @return: Negative error code, or zero for success
++ */
++static int cros_ec_lpc_mec_unlock(void)
++{
++	bool success;
++
++	if (!aml_mutex) {
++		mutex_unlock(&io_mutex);
++		return 0;
++	}
++
++	success = ACPI_SUCCESS(acpi_release_mutex(aml_mutex, NULL));
++	if (!success)
++		return -EBUSY;
++
++	return 0;
++}
++
+ /**
+  * cros_ec_lpc_mec_emi_write_address() - Initialize EMI at a given address.
+  *
+@@ -77,6 +129,7 @@ u8 cros_ec_lpc_io_bytes_mec(enum cros_ec_lpc_mec_io_type io_type,
+ 	int io_addr;
+ 	u8 sum = 0;
+ 	enum cros_ec_lpc_mec_emi_access_mode access, new_access;
++	int ret;
+ 
+ 	/* Return checksum of 0 if window is not initialized */
+ 	WARN_ON(mec_emi_base == 0 || mec_emi_end == 0);
+@@ -92,7 +145,9 @@ u8 cros_ec_lpc_io_bytes_mec(enum cros_ec_lpc_mec_io_type io_type,
+ 	else
+ 		access = ACCESS_TYPE_LONG_AUTO_INCREMENT;
+ 
+-	mutex_lock(&io_mutex);
++	ret = cros_ec_lpc_mec_lock();
++	if (ret)
++		return ret;
+ 
+ 	/* Initialize I/O at desired address */
+ 	cros_ec_lpc_mec_emi_write_address(offset, access);
+@@ -134,7 +189,9 @@ u8 cros_ec_lpc_io_bytes_mec(enum cros_ec_lpc_mec_io_type io_type,
+ 	}
+ 
+ done:
+-	mutex_unlock(&io_mutex);
++	ret = cros_ec_lpc_mec_unlock();
++	if (ret)
++		return ret;
+ 
+ 	return sum;
+ }
+@@ -146,3 +203,18 @@ void cros_ec_lpc_mec_init(unsigned int base, unsigned int end)
+ 	mec_emi_end = end;
+ }
+ EXPORT_SYMBOL(cros_ec_lpc_mec_init);
++
++int cros_ec_lpc_mec_acpi_mutex(struct acpi_device *adev, const char *pathname)
++{
++	int status;
++
++	if (!adev)
++		return -ENOENT;
++
++	status = acpi_get_handle(adev->handle, pathname, &aml_mutex);
++	if (ACPI_FAILURE(status))
++		return -ENOENT;
++
++	return 0;
++}
++EXPORT_SYMBOL(cros_ec_lpc_mec_acpi_mutex);
+diff --git a/drivers/platform/chrome/cros_ec_lpc_mec.h b/drivers/platform/chrome/cros_ec_lpc_mec.h
+index 9d0521b23e8ae..3f3af37e58a50 100644
+--- a/drivers/platform/chrome/cros_ec_lpc_mec.h
++++ b/drivers/platform/chrome/cros_ec_lpc_mec.h
+@@ -8,6 +8,8 @@
+ #ifndef __CROS_EC_LPC_MEC_H
+ #define __CROS_EC_LPC_MEC_H
+ 
++#include <linux/acpi.h>
++
+ enum cros_ec_lpc_mec_emi_access_mode {
+ 	/* 8-bit access */
+ 	ACCESS_TYPE_BYTE = 0x0,
+@@ -45,6 +47,15 @@ enum cros_ec_lpc_mec_io_type {
+  */
+ void cros_ec_lpc_mec_init(unsigned int base, unsigned int end);
+ 
++/**
++ * cros_ec_lpc_mec_acpi_mutex() - Find and set ACPI mutex for MEC
++ *
++ * @adev:     Parent ACPI device
++ * @pathname: Name of AML mutex
++ * @return:   Negative error code, or zero for success
++ */
++int cros_ec_lpc_mec_acpi_mutex(struct acpi_device *adev, const char *pathname);
++
+ /**
+  * cros_ec_lpc_mec_in_range() - Determine if addresses are in MEC EMI range.
+  *
+diff --git a/drivers/soc/qcom/smem.c b/drivers/soc/qcom/smem.c
+index d4a89d2bb43bb..2e8568d6cde94 100644
+--- a/drivers/soc/qcom/smem.c
++++ b/drivers/soc/qcom/smem.c
+@@ -359,6 +359,32 @@ static struct qcom_smem *__smem;
+ /* Timeout (ms) for the trylock of remote spinlocks */
+ #define HWSPINLOCK_TIMEOUT	1000
+ 
++/* The qcom hwspinlock id is always plus one from the smem host id */
++#define SMEM_HOST_ID_TO_HWSPINLOCK_ID(__x) ((__x) + 1)
++
++/**
++ * qcom_smem_bust_hwspin_lock_by_host() - bust the smem hwspinlock for a host
++ * @host:	remote processor id
++ *
++ * Busts the hwspin_lock for the given smem host id. This helper is intended
++ * for remoteproc drivers that manage remoteprocs with an equivalent smem
++ * driver instance in the remote firmware. Drivers can force a release of the
++ * smem hwspin_lock if the rproc unexpectedly goes into a bad state.
++ *
++ * Context: Process context.
++ *
++ * Returns: 0 on success, otherwise negative errno.
++ */
++int qcom_smem_bust_hwspin_lock_by_host(unsigned int host)
++{
++	/* This function is for remote procs, so ignore SMEM_HOST_APPS */
++	if (host == SMEM_HOST_APPS || host >= SMEM_HOST_COUNT)
++		return -EINVAL;
++
++	return hwspin_lock_bust(__smem->hwlock, SMEM_HOST_ID_TO_HWSPINLOCK_ID(host));
++}
++EXPORT_SYMBOL_GPL(qcom_smem_bust_hwspin_lock_by_host);
++
+ /**
+  * qcom_smem_is_available() - Check if SMEM is available
+  *
+diff --git a/drivers/spi/spi-hisi-kunpeng.c b/drivers/spi/spi-hisi-kunpeng.c
+index 77e9738e42f60..6910b4d4c427b 100644
+--- a/drivers/spi/spi-hisi-kunpeng.c
++++ b/drivers/spi/spi-hisi-kunpeng.c
+@@ -495,6 +495,7 @@ static int hisi_spi_probe(struct platform_device *pdev)
+ 	host->transfer_one = hisi_spi_transfer_one;
+ 	host->handle_err = hisi_spi_handle_err;
+ 	host->dev.fwnode = dev->fwnode;
++	host->min_speed_hz = DIV_ROUND_UP(host->max_speed_hz, CLK_DIV_MAX);
+ 
+ 	hisi_spi_hw_init(hs);
+ 
+diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
+index 94edac17b95f8..ad0ef5b6b8cf9 100644
+--- a/drivers/ufs/core/ufshcd.c
++++ b/drivers/ufs/core/ufshcd.c
+@@ -2281,7 +2281,17 @@ static inline int ufshcd_hba_capabilities(struct ufs_hba *hba)
+ 		return err;
+ 	}
+ 
++	/*
++	 * The UFSHCI 3.0 specification does not define MCQ_SUPPORT and
++	 * LSDB_SUPPORT, but [31:29] as reserved bits with reset value 0s, which
++	 * means we can simply read values regardless of version.
++	 */
+ 	hba->mcq_sup = FIELD_GET(MASK_MCQ_SUPPORT, hba->capabilities);
++	/*
++	 * 0h: legacy single doorbell support is available
++	 * 1h: indicate that legacy single doorbell support has been removed
++	 */
++	hba->lsdb_sup = !FIELD_GET(MASK_LSDB_SUPPORT, hba->capabilities);
+ 	if (!hba->mcq_sup)
+ 		return 0;
+ 
+@@ -6457,7 +6467,8 @@ static void ufshcd_err_handler(struct work_struct *work)
+ 	if (ufshcd_err_handling_should_stop(hba))
+ 		goto skip_err_handling;
+ 
+-	if (hba->dev_quirks & UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
++	if ((hba->dev_quirks & UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) &&
++	    !hba->force_reset) {
+ 		bool ret;
+ 
+ 		spin_unlock_irqrestore(hba->host->host_lock, flags);
+@@ -10386,6 +10397,12 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
+ 	}
+ 
+ 	if (!is_mcq_supported(hba)) {
++		if (!hba->lsdb_sup) {
++			dev_err(hba->dev, "%s: failed to initialize (legacy doorbell mode not supported)\n",
++				__func__);
++			err = -EINVAL;
++			goto out_disable;
++		}
+ 		err = scsi_add_host(host, hba->dev);
+ 		if (err) {
+ 			dev_err(hba->dev, "scsi_add_host failed\n");
+diff --git a/drivers/usb/typec/ucsi/ucsi.h b/drivers/usb/typec/ucsi/ucsi.h
+index 2a886e58cd632..42c60eba5fb6e 100644
+--- a/drivers/usb/typec/ucsi/ucsi.h
++++ b/drivers/usb/typec/ucsi/ucsi.h
+@@ -404,7 +404,7 @@ ucsi_register_displayport(struct ucsi_connector *con,
+ 			  bool override, int offset,
+ 			  struct typec_altmode_desc *desc)
+ {
+-	return NULL;
++	return typec_port_register_altmode(con->port, desc);
+ }
+ 
+ static inline void
+diff --git a/drivers/usb/usbip/stub_rx.c b/drivers/usb/usbip/stub_rx.c
+index fc01b31bbb875..6338d818bc8bc 100644
+--- a/drivers/usb/usbip/stub_rx.c
++++ b/drivers/usb/usbip/stub_rx.c
+@@ -144,53 +144,62 @@ static int tweak_set_configuration_cmd(struct urb *urb)
+ 	if (err && err != -ENODEV)
+ 		dev_err(&sdev->udev->dev, "can't set config #%d, error %d\n",
+ 			config, err);
+-	return 0;
++	return err;
+ }
+ 
+ static int tweak_reset_device_cmd(struct urb *urb)
+ {
+ 	struct stub_priv *priv = (struct stub_priv *) urb->context;
+ 	struct stub_device *sdev = priv->sdev;
++	int err;
+ 
+ 	dev_info(&urb->dev->dev, "usb_queue_reset_device\n");
+ 
+-	if (usb_lock_device_for_reset(sdev->udev, NULL) < 0) {
++	err = usb_lock_device_for_reset(sdev->udev, NULL);
++	if (err < 0) {
+ 		dev_err(&urb->dev->dev, "could not obtain lock to reset device\n");
+-		return 0;
++		return err;
+ 	}
+-	usb_reset_device(sdev->udev);
++	err = usb_reset_device(sdev->udev);
+ 	usb_unlock_device(sdev->udev);
+ 
+-	return 0;
++	return err;
+ }
+ 
+ /*
+  * clear_halt, set_interface, and set_configuration require special tricks.
++ * Returns 1 if request was tweaked, 0 otherwise.
+  */
+-static void tweak_special_requests(struct urb *urb)
++static int tweak_special_requests(struct urb *urb)
+ {
++	int err;
++
+ 	if (!urb || !urb->setup_packet)
+-		return;
++		return 0;
+ 
+ 	if (usb_pipetype(urb->pipe) != PIPE_CONTROL)
+-		return;
++		return 0;
+ 
+ 	if (is_clear_halt_cmd(urb))
+ 		/* tweak clear_halt */
+-		 tweak_clear_halt_cmd(urb);
++		err = tweak_clear_halt_cmd(urb);
+ 
+ 	else if (is_set_interface_cmd(urb))
+ 		/* tweak set_interface */
+-		tweak_set_interface_cmd(urb);
++		err = tweak_set_interface_cmd(urb);
+ 
+ 	else if (is_set_configuration_cmd(urb))
+ 		/* tweak set_configuration */
+-		tweak_set_configuration_cmd(urb);
++		err = tweak_set_configuration_cmd(urb);
+ 
+ 	else if (is_reset_device_cmd(urb))
+-		tweak_reset_device_cmd(urb);
+-	else
++		err = tweak_reset_device_cmd(urb);
++	else {
+ 		usbip_dbg_stub_rx("no need to tweak\n");
++		return 0;
++	}
++
++	return !err;
+ }
+ 
+ /*
+@@ -468,6 +477,7 @@ static void stub_recv_cmd_submit(struct stub_device *sdev,
+ 	int support_sg = 1;
+ 	int np = 0;
+ 	int ret, i;
++	int is_tweaked;
+ 
+ 	if (pipe == -1)
+ 		return;
+@@ -580,8 +590,11 @@ static void stub_recv_cmd_submit(struct stub_device *sdev,
+ 		priv->urbs[i]->pipe = pipe;
+ 		priv->urbs[i]->complete = stub_complete;
+ 
+-		/* no need to submit an intercepted request, but harmless? */
+-		tweak_special_requests(priv->urbs[i]);
++		/*
++		 * all URBs belong to a single PDU, so a global is_tweaked flag is
++		 * enough
++		 */
++		is_tweaked = tweak_special_requests(priv->urbs[i]);
+ 
+ 		masking_bogus_flags(priv->urbs[i]);
+ 	}
+@@ -594,22 +607,32 @@ static void stub_recv_cmd_submit(struct stub_device *sdev,
+ 
+ 	/* urb is now ready to submit */
+ 	for (i = 0; i < priv->num_urbs; i++) {
+-		ret = usb_submit_urb(priv->urbs[i], GFP_KERNEL);
++		if (!is_tweaked) {
++			ret = usb_submit_urb(priv->urbs[i], GFP_KERNEL);
+ 
+-		if (ret == 0)
+-			usbip_dbg_stub_rx("submit urb ok, seqnum %u\n",
+-					pdu->base.seqnum);
+-		else {
+-			dev_err(&udev->dev, "submit_urb error, %d\n", ret);
+-			usbip_dump_header(pdu);
+-			usbip_dump_urb(priv->urbs[i]);
++			if (ret == 0)
++				usbip_dbg_stub_rx("submit urb ok, seqnum %u\n",
++						pdu->base.seqnum);
++			else {
++				dev_err(&udev->dev, "submit_urb error, %d\n", ret);
++				usbip_dump_header(pdu);
++				usbip_dump_urb(priv->urbs[i]);
+ 
++				/*
++				 * Pessimistic.
++				 * This connection will be discarded.
++				 */
++				usbip_event_add(ud, SDEV_EVENT_ERROR_SUBMIT);
++				break;
++			}
++		} else {
+ 			/*
+-			 * Pessimistic.
+-			 * This connection will be discarded.
++			 * An identical URB was already submitted in
++			 * tweak_special_requests(). Skip submitting this URB to not
++			 * duplicate the request.
+ 			 */
+-			usbip_event_add(ud, SDEV_EVENT_ERROR_SUBMIT);
+-			break;
++			priv->urbs[i]->status = 0;
++			stub_complete(priv->urbs[i]);
+ 		}
+ 	}
+ 
+diff --git a/fs/btrfs/tree-checker.c b/fs/btrfs/tree-checker.c
+index 7ff61909648d7..46c1f74983956 100644
+--- a/fs/btrfs/tree-checker.c
++++ b/fs/btrfs/tree-checker.c
+@@ -1266,6 +1266,19 @@ static void extent_err(const struct extent_buffer *eb, int slot,
+ 	va_end(args);
+ }
+ 
++static bool is_valid_dref_root(u64 rootid)
++{
++	/*
++	 * The following tree root objectids are allowed to have a data backref:
++	 * - subvolume trees
++	 * - data reloc tree
++	 * - tree root
++	 *   For v1 space cache
++	 */
++	return is_fstree(rootid) || rootid == BTRFS_DATA_RELOC_TREE_OBJECTID ||
++	       rootid == BTRFS_ROOT_TREE_OBJECTID;
++}
++
+ static int check_extent_item(struct extent_buffer *leaf,
+ 			     struct btrfs_key *key, int slot,
+ 			     struct btrfs_key *prev_key)
+@@ -1418,6 +1431,8 @@ static int check_extent_item(struct extent_buffer *leaf,
+ 		struct btrfs_extent_data_ref *dref;
+ 		struct btrfs_shared_data_ref *sref;
+ 		u64 seq;
++		u64 dref_root;
++		u64 dref_objectid;
+ 		u64 dref_offset;
+ 		u64 inline_offset;
+ 		u8 inline_type;
+@@ -1461,11 +1476,26 @@ static int check_extent_item(struct extent_buffer *leaf,
+ 		 */
+ 		case BTRFS_EXTENT_DATA_REF_KEY:
+ 			dref = (struct btrfs_extent_data_ref *)(&iref->offset);
++			dref_root = btrfs_extent_data_ref_root(leaf, dref);
++			dref_objectid = btrfs_extent_data_ref_objectid(leaf, dref);
+ 			dref_offset = btrfs_extent_data_ref_offset(leaf, dref);
+ 			seq = hash_extent_data_ref(
+ 					btrfs_extent_data_ref_root(leaf, dref),
+ 					btrfs_extent_data_ref_objectid(leaf, dref),
+ 					btrfs_extent_data_ref_offset(leaf, dref));
++			if (unlikely(!is_valid_dref_root(dref_root))) {
++				extent_err(leaf, slot,
++					   "invalid data ref root value %llu",
++					   dref_root);
++				return -EUCLEAN;
++			}
++			if (unlikely(dref_objectid < BTRFS_FIRST_FREE_OBJECTID ||
++				     dref_objectid > BTRFS_LAST_FREE_OBJECTID)) {
++				extent_err(leaf, slot,
++					   "invalid data ref objectid value %llu",
++					   dref_root);
++				return -EUCLEAN;
++			}
+ 			if (unlikely(!IS_ALIGNED(dref_offset,
+ 						 fs_info->sectorsize))) {
+ 				extent_err(leaf, slot,
+@@ -1601,6 +1631,8 @@ static int check_extent_data_ref(struct extent_buffer *leaf,
+ 		return -EUCLEAN;
+ 	}
+ 	for (; ptr < end; ptr += sizeof(*dref)) {
++		u64 root;
++		u64 objectid;
+ 		u64 offset;
+ 
+ 		/*
+@@ -1608,7 +1640,22 @@ static int check_extent_data_ref(struct extent_buffer *leaf,
+ 		 * overflow from the leaf due to hash collisions.
+ 		 */
+ 		dref = (struct btrfs_extent_data_ref *)ptr;
++		root = btrfs_extent_data_ref_root(leaf, dref);
++		objectid = btrfs_extent_data_ref_objectid(leaf, dref);
+ 		offset = btrfs_extent_data_ref_offset(leaf, dref);
++		if (unlikely(!is_valid_dref_root(root))) {
++			extent_err(leaf, slot,
++				   "invalid extent data backref root value %llu",
++				   root);
++			return -EUCLEAN;
++		}
++		if (unlikely(objectid < BTRFS_FIRST_FREE_OBJECTID ||
++			     objectid > BTRFS_LAST_FREE_OBJECTID)) {
++			extent_err(leaf, slot,
++				   "invalid extent data backref objectid value %llu",
++				   root);
++			return -EUCLEAN;
++		}
+ 		if (unlikely(!IS_ALIGNED(offset, leaf->fs_info->sectorsize))) {
+ 			extent_err(leaf, slot,
+ 	"invalid extent data backref offset, have %llu expect aligned to %u",
+diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
+index 00eff023cd9d6..6371b295fba68 100644
+--- a/fs/f2fs/f2fs.h
++++ b/fs/f2fs/f2fs.h
+@@ -4148,7 +4148,7 @@ extern struct kmem_cache *f2fs_inode_entry_slab;
+  * inline.c
+  */
+ bool f2fs_may_inline_data(struct inode *inode);
+-bool f2fs_sanity_check_inline_data(struct inode *inode);
++bool f2fs_sanity_check_inline_data(struct inode *inode, struct page *ipage);
+ bool f2fs_may_inline_dentry(struct inode *inode);
+ void f2fs_do_read_inline_data(struct page *page, struct page *ipage);
+ void f2fs_truncate_inline_inode(struct inode *inode,
+diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c
+index 2cbe557f971e7..a3f8b4ed495ef 100644
+--- a/fs/f2fs/inline.c
++++ b/fs/f2fs/inline.c
+@@ -33,11 +33,29 @@ bool f2fs_may_inline_data(struct inode *inode)
+ 	return !f2fs_post_read_required(inode);
+ }
+ 
+-bool f2fs_sanity_check_inline_data(struct inode *inode)
++static bool inode_has_blocks(struct inode *inode, struct page *ipage)
++{
++	struct f2fs_inode *ri = F2FS_INODE(ipage);
++	int i;
++
++	if (F2FS_HAS_BLOCKS(inode))
++		return true;
++
++	for (i = 0; i < DEF_NIDS_PER_INODE; i++) {
++		if (ri->i_nid[i])
++			return true;
++	}
++	return false;
++}
++
++bool f2fs_sanity_check_inline_data(struct inode *inode, struct page *ipage)
+ {
+ 	if (!f2fs_has_inline_data(inode))
+ 		return false;
+ 
++	if (inode_has_blocks(inode, ipage))
++		return false;
++
+ 	if (!support_inline_data(inode))
+ 		return true;
+ 
+diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c
+index 26e857fee631d..709b2f79872f2 100644
+--- a/fs/f2fs/inode.c
++++ b/fs/f2fs/inode.c
+@@ -346,7 +346,7 @@ static bool sanity_check_inode(struct inode *inode, struct page *node_page)
+ 		}
+ 	}
+ 
+-	if (f2fs_sanity_check_inline_data(inode)) {
++	if (f2fs_sanity_check_inline_data(inode, node_page)) {
+ 		f2fs_warn(sbi, "%s: inode (ino=%lx, mode=%u) should not have inline_data, run fsck to fix",
+ 			  __func__, inode->i_ino, inode->i_mode);
+ 		return false;
+diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c
+index 892b1c44de531..299b6d6aaa795 100644
+--- a/fs/gfs2/quota.c
++++ b/fs/gfs2/quota.c
+@@ -75,9 +75,6 @@
+ #define GFS2_QD_HASH_SIZE       BIT(GFS2_QD_HASH_SHIFT)
+ #define GFS2_QD_HASH_MASK       (GFS2_QD_HASH_SIZE - 1)
+ 
+-#define QC_CHANGE 0
+-#define QC_SYNC 1
+-
+ /* Lock order: qd_lock -> bucket lock -> qd->lockref.lock -> lru lock */
+ /*                     -> sd_bitmap_lock                              */
+ static DEFINE_SPINLOCK(qd_lock);
+@@ -697,7 +694,7 @@ static int sort_qd(const void *a, const void *b)
+ 	return 0;
+ }
+ 
+-static void do_qc(struct gfs2_quota_data *qd, s64 change, int qc_type)
++static void do_qc(struct gfs2_quota_data *qd, s64 change)
+ {
+ 	struct gfs2_sbd *sdp = qd->qd_sbd;
+ 	struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
+@@ -722,18 +719,16 @@ static void do_qc(struct gfs2_quota_data *qd, s64 change, int qc_type)
+ 	qd->qd_change = x;
+ 	spin_unlock(&qd_lock);
+ 
+-	if (qc_type == QC_CHANGE) {
+-		if (!test_and_set_bit(QDF_CHANGE, &qd->qd_flags)) {
+-			qd_hold(qd);
+-			slot_hold(qd);
+-		}
+-	} else {
++	if (!x) {
+ 		gfs2_assert_warn(sdp, test_bit(QDF_CHANGE, &qd->qd_flags));
+ 		clear_bit(QDF_CHANGE, &qd->qd_flags);
+ 		qc->qc_flags = 0;
+ 		qc->qc_id = 0;
+ 		slot_put(qd);
+ 		qd_put(qd);
++	} else if (!test_and_set_bit(QDF_CHANGE, &qd->qd_flags)) {
++		qd_hold(qd);
++		slot_hold(qd);
+ 	}
+ 
+ 	if (change < 0) /* Reset quiet flag if we freed some blocks */
+@@ -978,7 +973,7 @@ static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda)
+ 		if (error)
+ 			goto out_end_trans;
+ 
+-		do_qc(qd, -qd->qd_change_sync, QC_SYNC);
++		do_qc(qd, -qd->qd_change_sync);
+ 		set_bit(QDF_REFRESH, &qd->qd_flags);
+ 	}
+ 
+@@ -1300,7 +1295,7 @@ void gfs2_quota_change(struct gfs2_inode *ip, s64 change,
+ 
+ 		if (qid_eq(qd->qd_id, make_kqid_uid(uid)) ||
+ 		    qid_eq(qd->qd_id, make_kqid_gid(gid))) {
+-			do_qc(qd, change, QC_CHANGE);
++			do_qc(qd, change);
+ 		}
+ 	}
+ }
+diff --git a/fs/gfs2/util.c b/fs/gfs2/util.c
+index fc3ecb180ac53..b65261e0cae3a 100644
+--- a/fs/gfs2/util.c
++++ b/fs/gfs2/util.c
+@@ -99,12 +99,12 @@ int check_journal_clean(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd,
+  */
+ int gfs2_freeze_lock_shared(struct gfs2_sbd *sdp)
+ {
++	int flags = LM_FLAG_NOEXP | GL_EXACT;
+ 	int error;
+ 
+-	error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_SHARED,
+-				   LM_FLAG_NOEXP | GL_EXACT,
++	error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_SHARED, flags,
+ 				   &sdp->sd_freeze_gh);
+-	if (error)
++	if (error && error != GLR_TRYFAILED)
+ 		fs_err(sdp, "can't lock the freeze glock: %d\n", error);
+ 	return error;
+ }
+diff --git a/fs/notify/fsnotify.c b/fs/notify/fsnotify.c
+index 7974e91ffe134..b5d8f238fce42 100644
+--- a/fs/notify/fsnotify.c
++++ b/fs/notify/fsnotify.c
+@@ -103,17 +103,13 @@ void fsnotify_sb_delete(struct super_block *sb)
+  * parent cares.  Thus when an event happens on a child it can quickly tell
+  * if there is a need to find a parent and send the event to the parent.
+  */
+-void __fsnotify_update_child_dentry_flags(struct inode *inode)
++void fsnotify_set_children_dentry_flags(struct inode *inode)
+ {
+ 	struct dentry *alias;
+-	int watched;
+ 
+ 	if (!S_ISDIR(inode->i_mode))
+ 		return;
+ 
+-	/* determine if the children should tell inode about their events */
+-	watched = fsnotify_inode_watches_children(inode);
+-
+ 	spin_lock(&inode->i_lock);
+ 	/* run all of the dentries associated with this inode.  Since this is a
+ 	 * directory, there damn well better only be one item on this list */
+@@ -129,10 +125,7 @@ void __fsnotify_update_child_dentry_flags(struct inode *inode)
+ 				continue;
+ 
+ 			spin_lock_nested(&child->d_lock, DENTRY_D_LOCK_NESTED);
+-			if (watched)
+-				child->d_flags |= DCACHE_FSNOTIFY_PARENT_WATCHED;
+-			else
+-				child->d_flags &= ~DCACHE_FSNOTIFY_PARENT_WATCHED;
++			child->d_flags |= DCACHE_FSNOTIFY_PARENT_WATCHED;
+ 			spin_unlock(&child->d_lock);
+ 		}
+ 		spin_unlock(&alias->d_lock);
+@@ -140,6 +133,24 @@ void __fsnotify_update_child_dentry_flags(struct inode *inode)
+ 	spin_unlock(&inode->i_lock);
+ }
+ 
++/*
++ * Lazily clear false positive PARENT_WATCHED flag for child whose parent had
++ * stopped watching children.
++ */
++static void fsnotify_clear_child_dentry_flag(struct inode *pinode,
++					     struct dentry *dentry)
++{
++	spin_lock(&dentry->d_lock);
++	/*
++	 * d_lock is a sufficient barrier to prevent observing a non-watched
++	 * parent state from before the fsnotify_set_children_dentry_flags()
++	 * or fsnotify_update_flags() call that had set PARENT_WATCHED.
++	 */
++	if (!fsnotify_inode_watches_children(pinode))
++		dentry->d_flags &= ~DCACHE_FSNOTIFY_PARENT_WATCHED;
++	spin_unlock(&dentry->d_lock);
++}
++
+ /* Are inode/sb/mount interested in parent and name info with this event? */
+ static bool fsnotify_event_needs_parent(struct inode *inode, struct mount *mnt,
+ 					__u32 mask)
+@@ -208,7 +219,7 @@ int __fsnotify_parent(struct dentry *dentry, __u32 mask, const void *data,
+ 	p_inode = parent->d_inode;
+ 	p_mask = fsnotify_inode_watches_children(p_inode);
+ 	if (unlikely(parent_watched && !p_mask))
+-		__fsnotify_update_child_dentry_flags(p_inode);
++		fsnotify_clear_child_dentry_flag(p_inode, dentry);
+ 
+ 	/*
+ 	 * Include parent/name in notification either if some notification
+diff --git a/fs/notify/fsnotify.h b/fs/notify/fsnotify.h
+index fde74eb333cc9..2b4267de86e6b 100644
+--- a/fs/notify/fsnotify.h
++++ b/fs/notify/fsnotify.h
+@@ -74,7 +74,7 @@ static inline void fsnotify_clear_marks_by_sb(struct super_block *sb)
+  * update the dentry->d_flags of all of inode's children to indicate if inode cares
+  * about events that happen to its children.
+  */
+-extern void __fsnotify_update_child_dentry_flags(struct inode *inode);
++extern void fsnotify_set_children_dentry_flags(struct inode *inode);
+ 
+ extern struct kmem_cache *fsnotify_mark_connector_cachep;
+ 
+diff --git a/fs/notify/mark.c b/fs/notify/mark.c
+index c74ef947447d6..4be6e883d492f 100644
+--- a/fs/notify/mark.c
++++ b/fs/notify/mark.c
+@@ -176,6 +176,24 @@ static void *__fsnotify_recalc_mask(struct fsnotify_mark_connector *conn)
+ 	return fsnotify_update_iref(conn, want_iref);
+ }
+ 
++static bool fsnotify_conn_watches_children(
++					struct fsnotify_mark_connector *conn)
++{
++	if (conn->type != FSNOTIFY_OBJ_TYPE_INODE)
++		return false;
++
++	return fsnotify_inode_watches_children(fsnotify_conn_inode(conn));
++}
++
++static void fsnotify_conn_set_children_dentry_flags(
++					struct fsnotify_mark_connector *conn)
++{
++	if (conn->type != FSNOTIFY_OBJ_TYPE_INODE)
++		return;
++
++	fsnotify_set_children_dentry_flags(fsnotify_conn_inode(conn));
++}
++
+ /*
+  * Calculate mask of events for a list of marks. The caller must make sure
+  * connector and connector->obj cannot disappear under us.  Callers achieve
+@@ -184,15 +202,23 @@ static void *__fsnotify_recalc_mask(struct fsnotify_mark_connector *conn)
+  */
+ void fsnotify_recalc_mask(struct fsnotify_mark_connector *conn)
+ {
++	bool update_children;
++
+ 	if (!conn)
+ 		return;
+ 
+ 	spin_lock(&conn->lock);
++	update_children = !fsnotify_conn_watches_children(conn);
+ 	__fsnotify_recalc_mask(conn);
++	update_children &= fsnotify_conn_watches_children(conn);
+ 	spin_unlock(&conn->lock);
+-	if (conn->type == FSNOTIFY_OBJ_TYPE_INODE)
+-		__fsnotify_update_child_dentry_flags(
+-					fsnotify_conn_inode(conn));
++	/*
++	 * Set children's PARENT_WATCHED flags only if parent started watching.
++	 * When parent stops watching, we clear false positive PARENT_WATCHED
++	 * flags lazily in __fsnotify_parent().
++	 */
++	if (update_children)
++		fsnotify_conn_set_children_dentry_flags(conn);
+ }
+ 
+ /* Free all connectors queued for freeing once SRCU period ends */
+diff --git a/fs/smb/client/smb2inode.c b/fs/smb/client/smb2inode.c
+index 28031c7ba6b19..15cbfec4c28c7 100644
+--- a/fs/smb/client/smb2inode.c
++++ b/fs/smb/client/smb2inode.c
+@@ -950,7 +950,8 @@ int smb2_query_path_info(const unsigned int xid,
+ 			cmds[num_cmds++] = SMB2_OP_GET_REPARSE;
+ 
+ 		oparms = CIFS_OPARMS(cifs_sb, tcon, full_path,
+-				     FILE_READ_ATTRIBUTES | FILE_READ_EA,
++				     FILE_READ_ATTRIBUTES |
++				     FILE_READ_EA | SYNCHRONIZE,
+ 				     FILE_OPEN, create_options |
+ 				     OPEN_REPARSE_POINT, ACL_NO_MODE);
+ 		cifs_get_readable_path(tcon, full_path, &cfile);
+@@ -1258,7 +1259,8 @@ int smb2_query_reparse_point(const unsigned int xid,
+ 	cifs_dbg(FYI, "%s: path: %s\n", __func__, full_path);
+ 
+ 	cifs_get_readable_path(tcon, full_path, &cfile);
+-	oparms = CIFS_OPARMS(cifs_sb, tcon, full_path, FILE_READ_ATTRIBUTES,
++	oparms = CIFS_OPARMS(cifs_sb, tcon, full_path,
++			     FILE_READ_ATTRIBUTES | FILE_READ_EA | SYNCHRONIZE,
+ 			     FILE_OPEN, OPEN_REPARSE_POINT, ACL_NO_MODE);
+ 	rc = smb2_compound_op(xid, tcon, cifs_sb,
+ 			      full_path, &oparms, &in_iov,
+diff --git a/include/clocksource/timer-xilinx.h b/include/clocksource/timer-xilinx.h
+index c0f56fe6d22ae..d116f18de899c 100644
+--- a/include/clocksource/timer-xilinx.h
++++ b/include/clocksource/timer-xilinx.h
+@@ -41,7 +41,7 @@ struct regmap;
+ struct xilinx_timer_priv {
+ 	struct regmap *map;
+ 	struct clk *clk;
+-	u32 max;
++	u64 max;
+ };
+ 
+ /**
+diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
+index c0892d75ce333..575415b513497 100644
+--- a/include/linux/fsnotify_backend.h
++++ b/include/linux/fsnotify_backend.h
+@@ -563,12 +563,14 @@ static inline __u32 fsnotify_parent_needed_mask(__u32 mask)
+ 
+ static inline int fsnotify_inode_watches_children(struct inode *inode)
+ {
++	__u32 parent_mask = READ_ONCE(inode->i_fsnotify_mask);
++
+ 	/* FS_EVENT_ON_CHILD is set if the inode may care */
+-	if (!(inode->i_fsnotify_mask & FS_EVENT_ON_CHILD))
++	if (!(parent_mask & FS_EVENT_ON_CHILD))
+ 		return 0;
+ 	/* this inode might care about child events, does it care about the
+ 	 * specific set of events that can happen on a child? */
+-	return inode->i_fsnotify_mask & FS_EVENTS_POSS_ON_CHILD;
++	return parent_mask & FS_EVENTS_POSS_ON_CHILD;
+ }
+ 
+ /*
+@@ -582,7 +584,7 @@ static inline void fsnotify_update_flags(struct dentry *dentry)
+ 	/*
+ 	 * Serialisation of setting PARENT_WATCHED on the dentries is provided
+ 	 * by d_lock. If inotify_inode_watched changes after we have taken
+-	 * d_lock, the following __fsnotify_update_child_dentry_flags call will
++	 * d_lock, the following fsnotify_set_children_dentry_flags call will
+ 	 * find our entry, so it will spin until we complete here, and update
+ 	 * us with the new state.
+ 	 */
+diff --git a/include/linux/hwspinlock.h b/include/linux/hwspinlock.h
+index bfe7c1f1ac6d1..f0231dbc47771 100644
+--- a/include/linux/hwspinlock.h
++++ b/include/linux/hwspinlock.h
+@@ -68,6 +68,7 @@ int __hwspin_lock_timeout(struct hwspinlock *, unsigned int, int,
+ int __hwspin_trylock(struct hwspinlock *, int, unsigned long *);
+ void __hwspin_unlock(struct hwspinlock *, int, unsigned long *);
+ int of_hwspin_lock_get_id_byname(struct device_node *np, const char *name);
++int hwspin_lock_bust(struct hwspinlock *hwlock, unsigned int id);
+ int devm_hwspin_lock_free(struct device *dev, struct hwspinlock *hwlock);
+ struct hwspinlock *devm_hwspin_lock_request(struct device *dev);
+ struct hwspinlock *devm_hwspin_lock_request_specific(struct device *dev,
+@@ -127,6 +128,11 @@ void __hwspin_unlock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
+ {
+ }
+ 
++static inline int hwspin_lock_bust(struct hwspinlock *hwlock, unsigned int id)
++{
++	return 0;
++}
++
+ static inline int of_hwspin_lock_get_id(struct device_node *np, int index)
+ {
+ 	return 0;
+diff --git a/include/linux/i2c.h b/include/linux/i2c.h
+index 0dae9db275380..32cf5708d5a5b 100644
+--- a/include/linux/i2c.h
++++ b/include/linux/i2c.h
+@@ -1033,7 +1033,7 @@ static inline int of_i2c_get_board_info(struct device *dev,
+ struct acpi_resource;
+ struct acpi_resource_i2c_serialbus;
+ 
+-#if IS_ENABLED(CONFIG_ACPI)
++#if IS_REACHABLE(CONFIG_ACPI) && IS_REACHABLE(CONFIG_I2C)
+ bool i2c_acpi_get_i2c_resource(struct acpi_resource *ares,
+ 			       struct acpi_resource_i2c_serialbus **i2c);
+ int i2c_acpi_client_count(struct acpi_device *adev);
+diff --git a/include/linux/soc/qcom/smem.h b/include/linux/soc/qcom/smem.h
+index a36a3b9d4929e..03187bc958518 100644
+--- a/include/linux/soc/qcom/smem.h
++++ b/include/linux/soc/qcom/smem.h
+@@ -14,4 +14,6 @@ phys_addr_t qcom_smem_virt_to_phys(void *p);
+ 
+ int qcom_smem_get_soc_id(u32 *id);
+ 
++int qcom_smem_bust_hwspin_lock_by_host(unsigned int host);
++
+ #endif
+diff --git a/include/net/ip.h b/include/net/ip.h
+index 162cf2d2f841c..6f1ff4846451b 100644
+--- a/include/net/ip.h
++++ b/include/net/ip.h
+@@ -497,8 +497,7 @@ static inline unsigned int ip_skb_dst_mtu(struct sock *sk,
+ 	return mtu - lwtunnel_headroom(skb_dst(skb)->lwtstate, mtu);
+ }
+ 
+-struct dst_metrics *ip_fib_metrics_init(struct net *net, struct nlattr *fc_mx,
+-					int fc_mx_len,
++struct dst_metrics *ip_fib_metrics_init(struct nlattr *fc_mx, int fc_mx_len,
+ 					struct netlink_ext_ack *extack);
+ static inline void ip_fib_metrics_put(struct dst_metrics *fib_metrics)
+ {
+diff --git a/include/net/tcp.h b/include/net/tcp.h
+index cc3b56bf19e05..c206ffaa8ed70 100644
+--- a/include/net/tcp.h
++++ b/include/net/tcp.h
+@@ -1140,7 +1140,7 @@ extern struct tcp_congestion_ops tcp_reno;
+ 
+ struct tcp_congestion_ops *tcp_ca_find(const char *name);
+ struct tcp_congestion_ops *tcp_ca_find_key(u32 key);
+-u32 tcp_ca_get_key_by_name(struct net *net, const char *name, bool *ecn_ca);
++u32 tcp_ca_get_key_by_name(const char *name, bool *ecn_ca);
+ #ifdef CONFIG_INET
+ char *tcp_ca_get_name_by_key(u32 key, char *buffer);
+ #else
+diff --git a/include/sound/ump_convert.h b/include/sound/ump_convert.h
+index 28c364c63245d..d099ae27f8491 100644
+--- a/include/sound/ump_convert.h
++++ b/include/sound/ump_convert.h
+@@ -13,6 +13,7 @@ struct ump_cvt_to_ump_bank {
+ 	unsigned char cc_nrpn_msb, cc_nrpn_lsb;
+ 	unsigned char cc_data_msb, cc_data_lsb;
+ 	unsigned char cc_bank_msb, cc_bank_lsb;
++	bool cc_data_msb_set, cc_data_lsb_set;
+ };
+ 
+ /* context for converting from MIDI1 byte stream to UMP packet */
+diff --git a/include/ufs/ufshcd.h b/include/ufs/ufshcd.h
+index e4da397360682..2a7d6f269d9e3 100644
+--- a/include/ufs/ufshcd.h
++++ b/include/ufs/ufshcd.h
+@@ -1064,6 +1064,7 @@ struct ufs_hba {
+ 	bool ext_iid_sup;
+ 	bool scsi_host_added;
+ 	bool mcq_sup;
++	bool lsdb_sup;
+ 	bool mcq_enabled;
+ 	struct ufshcd_res_info res[RES_MAX];
+ 	void __iomem *mcq_base;
+diff --git a/include/ufs/ufshci.h b/include/ufs/ufshci.h
+index d5accacae6bca..ae93b30d25893 100644
+--- a/include/ufs/ufshci.h
++++ b/include/ufs/ufshci.h
+@@ -75,6 +75,7 @@ enum {
+ 	MASK_OUT_OF_ORDER_DATA_DELIVERY_SUPPORT	= 0x02000000,
+ 	MASK_UIC_DME_TEST_MODE_SUPPORT		= 0x04000000,
+ 	MASK_CRYPTO_SUPPORT			= 0x10000000,
++	MASK_LSDB_SUPPORT			= 0x20000000,
+ 	MASK_MCQ_SUPPORT			= 0x40000000,
+ };
+ 
+diff --git a/kernel/dma/debug.c b/kernel/dma/debug.c
+index 06366acd27b08..e472cc37d7de4 100644
+--- a/kernel/dma/debug.c
++++ b/kernel/dma/debug.c
+@@ -415,8 +415,11 @@ static unsigned long long phys_addr(struct dma_debug_entry *entry)
+  * dma_active_cacheline entry to track per event.  dma_map_sg(), on the
+  * other hand, consumes a single dma_debug_entry, but inserts 'nents'
+  * entries into the tree.
++ *
++ * Use __GFP_NOWARN because the printk from an OOM, to netconsole, could end
++ * up right back in the DMA debugging code, leading to a deadlock.
+  */
+-static RADIX_TREE(dma_active_cacheline, GFP_ATOMIC);
++static RADIX_TREE(dma_active_cacheline, GFP_ATOMIC | __GFP_NOWARN);
+ static DEFINE_SPINLOCK(radix_lock);
+ #define ACTIVE_CACHELINE_MAX_OVERLAP ((1 << RADIX_TREE_MAX_TAGS) - 1)
+ #define CACHELINE_PER_PAGE_SHIFT (PAGE_SHIFT - L1_CACHE_SHIFT)
+diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h
+index e9821a8422dbe..9eb43b501ff5c 100644
+--- a/kernel/rcu/tree.h
++++ b/kernel/rcu/tree.h
+@@ -224,7 +224,6 @@ struct rcu_data {
+ 	struct swait_queue_head nocb_state_wq; /* For offloading state changes */
+ 	struct task_struct *nocb_gp_kthread;
+ 	raw_spinlock_t nocb_lock;	/* Guard following pair of fields. */
+-	atomic_t nocb_lock_contended;	/* Contention experienced. */
+ 	int nocb_defer_wakeup;		/* Defer wakeup of nocb_kthread. */
+ 	struct timer_list nocb_timer;	/* Enforce finite deferral. */
+ 	unsigned long nocb_gp_adv_time;	/* Last call_rcu() CB adv (jiffies). */
+diff --git a/kernel/rcu/tree_nocb.h b/kernel/rcu/tree_nocb.h
+index 2b24405b9cd2b..30b34f215ca35 100644
+--- a/kernel/rcu/tree_nocb.h
++++ b/kernel/rcu/tree_nocb.h
+@@ -91,8 +91,7 @@ module_param(nocb_nobypass_lim_per_jiffy, int, 0);
+ 
+ /*
+  * Acquire the specified rcu_data structure's ->nocb_bypass_lock.  If the
+- * lock isn't immediately available, increment ->nocb_lock_contended to
+- * flag the contention.
++ * lock isn't immediately available, perform minimal sanity check.
+  */
+ static void rcu_nocb_bypass_lock(struct rcu_data *rdp)
+ 	__acquires(&rdp->nocb_bypass_lock)
+@@ -100,29 +99,12 @@ static void rcu_nocb_bypass_lock(struct rcu_data *rdp)
+ 	lockdep_assert_irqs_disabled();
+ 	if (raw_spin_trylock(&rdp->nocb_bypass_lock))
+ 		return;
+-	atomic_inc(&rdp->nocb_lock_contended);
++	/*
++	 * Contention expected only when local enqueue collide with
++	 * remote flush from kthreads.
++	 */
+ 	WARN_ON_ONCE(smp_processor_id() != rdp->cpu);
+-	smp_mb__after_atomic(); /* atomic_inc() before lock. */
+ 	raw_spin_lock(&rdp->nocb_bypass_lock);
+-	smp_mb__before_atomic(); /* atomic_dec() after lock. */
+-	atomic_dec(&rdp->nocb_lock_contended);
+-}
+-
+-/*
+- * Spinwait until the specified rcu_data structure's ->nocb_lock is
+- * not contended.  Please note that this is extremely special-purpose,
+- * relying on the fact that at most two kthreads and one CPU contend for
+- * this lock, and also that the two kthreads are guaranteed to have frequent
+- * grace-period-duration time intervals between successive acquisitions
+- * of the lock.  This allows us to use an extremely simple throttling
+- * mechanism, and further to apply it only to the CPU doing floods of
+- * call_rcu() invocations.  Don't try this at home!
+- */
+-static void rcu_nocb_wait_contended(struct rcu_data *rdp)
+-{
+-	WARN_ON_ONCE(smp_processor_id() != rdp->cpu);
+-	while (WARN_ON_ONCE(atomic_read(&rdp->nocb_lock_contended)))
+-		cpu_relax();
+ }
+ 
+ /*
+@@ -510,7 +492,6 @@ static bool rcu_nocb_try_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
+ 	}
+ 
+ 	// We need to use the bypass.
+-	rcu_nocb_wait_contended(rdp);
+ 	rcu_nocb_bypass_lock(rdp);
+ 	ncbs = rcu_cblist_n_cbs(&rdp->nocb_bypass);
+ 	rcu_segcblist_inc_len(&rdp->cblist); /* Must precede enqueue. */
+@@ -1668,12 +1649,11 @@ static void show_rcu_nocb_state(struct rcu_data *rdp)
+ 
+ 	sprintf(bufw, "%ld", rsclp->gp_seq[RCU_WAIT_TAIL]);
+ 	sprintf(bufr, "%ld", rsclp->gp_seq[RCU_NEXT_READY_TAIL]);
+-	pr_info("   CB %d^%d->%d %c%c%c%c%c%c F%ld L%ld C%d %c%c%s%c%s%c%c q%ld %c CPU %d%s\n",
++	pr_info("   CB %d^%d->%d %c%c%c%c%c F%ld L%ld C%d %c%c%s%c%s%c%c q%ld %c CPU %d%s\n",
+ 		rdp->cpu, rdp->nocb_gp_rdp->cpu,
+ 		nocb_next_rdp ? nocb_next_rdp->cpu : -1,
+ 		"kK"[!!rdp->nocb_cb_kthread],
+ 		"bB"[raw_spin_is_locked(&rdp->nocb_bypass_lock)],
+-		"cC"[!!atomic_read(&rdp->nocb_lock_contended)],
+ 		"lL"[raw_spin_is_locked(&rdp->nocb_lock)],
+ 		"sS"[!!rdp->nocb_cb_sleep],
+ 		".W"[swait_active(&rdp->nocb_cb_wq)],
+diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
+index e3268615a65a1..233d9d0437c27 100644
+--- a/net/ipv4/fib_semantics.c
++++ b/net/ipv4/fib_semantics.c
+@@ -1030,7 +1030,7 @@ bool fib_metrics_match(struct fib_config *cfg, struct fib_info *fi)
+ 			bool ecn_ca = false;
+ 
+ 			nla_strscpy(tmp, nla, sizeof(tmp));
+-			val = tcp_ca_get_key_by_name(fi->fib_net, tmp, &ecn_ca);
++			val = tcp_ca_get_key_by_name(tmp, &ecn_ca);
+ 		} else {
+ 			if (nla_len(nla) != sizeof(u32))
+ 				return false;
+@@ -1459,8 +1459,7 @@ struct fib_info *fib_create_info(struct fib_config *cfg,
+ 	fi = kzalloc(struct_size(fi, fib_nh, nhs), GFP_KERNEL);
+ 	if (!fi)
+ 		goto failure;
+-	fi->fib_metrics = ip_fib_metrics_init(fi->fib_net, cfg->fc_mx,
+-					      cfg->fc_mx_len, extack);
++	fi->fib_metrics = ip_fib_metrics_init(cfg->fc_mx, cfg->fc_mx_len, extack);
+ 	if (IS_ERR(fi->fib_metrics)) {
+ 		err = PTR_ERR(fi->fib_metrics);
+ 		kfree(fi);
+diff --git a/net/ipv4/metrics.c b/net/ipv4/metrics.c
+index 0e3ee1532848c..8ddac1f595ed8 100644
+--- a/net/ipv4/metrics.c
++++ b/net/ipv4/metrics.c
+@@ -7,7 +7,7 @@
+ #include <net/net_namespace.h>
+ #include <net/tcp.h>
+ 
+-static int ip_metrics_convert(struct net *net, struct nlattr *fc_mx,
++static int ip_metrics_convert(struct nlattr *fc_mx,
+ 			      int fc_mx_len, u32 *metrics,
+ 			      struct netlink_ext_ack *extack)
+ {
+@@ -31,7 +31,7 @@ static int ip_metrics_convert(struct net *net, struct nlattr *fc_mx,
+ 			char tmp[TCP_CA_NAME_MAX];
+ 
+ 			nla_strscpy(tmp, nla, sizeof(tmp));
+-			val = tcp_ca_get_key_by_name(net, tmp, &ecn_ca);
++			val = tcp_ca_get_key_by_name(tmp, &ecn_ca);
+ 			if (val == TCP_CA_UNSPEC) {
+ 				NL_SET_ERR_MSG(extack, "Unknown tcp congestion algorithm");
+ 				return -EINVAL;
+@@ -63,7 +63,7 @@ static int ip_metrics_convert(struct net *net, struct nlattr *fc_mx,
+ 	return 0;
+ }
+ 
+-struct dst_metrics *ip_fib_metrics_init(struct net *net, struct nlattr *fc_mx,
++struct dst_metrics *ip_fib_metrics_init(struct nlattr *fc_mx,
+ 					int fc_mx_len,
+ 					struct netlink_ext_ack *extack)
+ {
+@@ -77,7 +77,7 @@ struct dst_metrics *ip_fib_metrics_init(struct net *net, struct nlattr *fc_mx,
+ 	if (unlikely(!fib_metrics))
+ 		return ERR_PTR(-ENOMEM);
+ 
+-	err = ip_metrics_convert(net, fc_mx, fc_mx_len, fib_metrics->metrics,
++	err = ip_metrics_convert(fc_mx, fc_mx_len, fib_metrics->metrics,
+ 				 extack);
+ 	if (!err) {
+ 		refcount_set(&fib_metrics->refcnt, 1);
+diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c
+index 1b34050a7538b..95dbb2799be46 100644
+--- a/net/ipv4/tcp_cong.c
++++ b/net/ipv4/tcp_cong.c
+@@ -46,8 +46,7 @@ void tcp_set_ca_state(struct sock *sk, const u8 ca_state)
+ }
+ 
+ /* Must be called with rcu lock held */
+-static struct tcp_congestion_ops *tcp_ca_find_autoload(struct net *net,
+-						       const char *name)
++static struct tcp_congestion_ops *tcp_ca_find_autoload(const char *name)
+ {
+ 	struct tcp_congestion_ops *ca = tcp_ca_find(name);
+ 
+@@ -182,7 +181,7 @@ int tcp_update_congestion_control(struct tcp_congestion_ops *ca, struct tcp_cong
+ 	return ret;
+ }
+ 
+-u32 tcp_ca_get_key_by_name(struct net *net, const char *name, bool *ecn_ca)
++u32 tcp_ca_get_key_by_name(const char *name, bool *ecn_ca)
+ {
+ 	const struct tcp_congestion_ops *ca;
+ 	u32 key = TCP_CA_UNSPEC;
+@@ -190,7 +189,7 @@ u32 tcp_ca_get_key_by_name(struct net *net, const char *name, bool *ecn_ca)
+ 	might_sleep();
+ 
+ 	rcu_read_lock();
+-	ca = tcp_ca_find_autoload(net, name);
++	ca = tcp_ca_find_autoload(name);
+ 	if (ca) {
+ 		key = ca->key;
+ 		*ecn_ca = ca->flags & TCP_CONG_NEEDS_ECN;
+@@ -287,7 +286,7 @@ int tcp_set_default_congestion_control(struct net *net, const char *name)
+ 	int ret;
+ 
+ 	rcu_read_lock();
+-	ca = tcp_ca_find_autoload(net, name);
++	ca = tcp_ca_find_autoload(name);
+ 	if (!ca) {
+ 		ret = -ENOENT;
+ 	} else if (!bpf_try_module_get(ca, ca->owner)) {
+@@ -425,7 +424,7 @@ int tcp_set_congestion_control(struct sock *sk, const char *name, bool load,
+ 	if (!load)
+ 		ca = tcp_ca_find(name);
+ 	else
+-		ca = tcp_ca_find_autoload(sock_net(sk), name);
++		ca = tcp_ca_find_autoload(name);
+ 
+ 	/* No change asking for existing value */
+ 	if (ca == icsk->icsk_ca_ops) {
+diff --git a/net/ipv6/route.c b/net/ipv6/route.c
+index 49ef5623c55e2..0299886dbeb91 100644
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -3754,7 +3754,7 @@ static struct fib6_info *ip6_route_info_create(struct fib6_config *cfg,
+ 	if (!rt)
+ 		goto out;
+ 
+-	rt->fib6_metrics = ip_fib_metrics_init(net, cfg->fc_mx, cfg->fc_mx_len,
++	rt->fib6_metrics = ip_fib_metrics_init(cfg->fc_mx, cfg->fc_mx_len,
+ 					       extack);
+ 	if (IS_ERR(rt->fib6_metrics)) {
+ 		err = PTR_ERR(rt->fib6_metrics);
+diff --git a/net/mac80211/main.c b/net/mac80211/main.c
+index 066424e62ff09..71d60f57a886c 100644
+--- a/net/mac80211/main.c
++++ b/net/mac80211/main.c
+@@ -215,6 +215,8 @@ void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata,
+ 
+ 	might_sleep();
+ 
++	WARN_ON_ONCE(ieee80211_vif_is_mld(&sdata->vif));
++
+ 	if (!changed || sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
+ 		return;
+ 
+@@ -247,7 +249,6 @@ void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata,
+ 	if (changed & ~BSS_CHANGED_VIF_CFG_FLAGS) {
+ 		u64 ch = changed & ~BSS_CHANGED_VIF_CFG_FLAGS;
+ 
+-		/* FIXME: should be for each link */
+ 		trace_drv_link_info_changed(local, sdata, &sdata->vif.bss_conf,
+ 					    changed);
+ 		if (local->ops->link_info_changed)
+diff --git a/net/mptcp/fastopen.c b/net/mptcp/fastopen.c
+index ad28da655f8bc..a29ff901df758 100644
+--- a/net/mptcp/fastopen.c
++++ b/net/mptcp/fastopen.c
+@@ -68,12 +68,12 @@ void __mptcp_fastopen_gen_msk_ackseq(struct mptcp_sock *msk, struct mptcp_subflo
+ 	skb = skb_peek_tail(&sk->sk_receive_queue);
+ 	if (skb) {
+ 		WARN_ON_ONCE(MPTCP_SKB_CB(skb)->end_seq);
+-		pr_debug("msk %p moving seq %llx -> %llx end_seq %llx -> %llx", sk,
++		pr_debug("msk %p moving seq %llx -> %llx end_seq %llx -> %llx\n", sk,
+ 			 MPTCP_SKB_CB(skb)->map_seq, MPTCP_SKB_CB(skb)->map_seq + msk->ack_seq,
+ 			 MPTCP_SKB_CB(skb)->end_seq, MPTCP_SKB_CB(skb)->end_seq + msk->ack_seq);
+ 		MPTCP_SKB_CB(skb)->map_seq += msk->ack_seq;
+ 		MPTCP_SKB_CB(skb)->end_seq += msk->ack_seq;
+ 	}
+ 
+-	pr_debug("msk=%p ack_seq=%llx", msk, msk->ack_seq);
++	pr_debug("msk=%p ack_seq=%llx\n", msk, msk->ack_seq);
+ }
+diff --git a/net/mptcp/options.c b/net/mptcp/options.c
+index 604724cca887f..2ad9006a157ae 100644
+--- a/net/mptcp/options.c
++++ b/net/mptcp/options.c
+@@ -117,7 +117,7 @@ static void mptcp_parse_option(const struct sk_buff *skb,
+ 			mp_opt->suboptions |= OPTION_MPTCP_CSUMREQD;
+ 			ptr += 2;
+ 		}
+-		pr_debug("MP_CAPABLE version=%x, flags=%x, optlen=%d sndr=%llu, rcvr=%llu len=%d csum=%u",
++		pr_debug("MP_CAPABLE version=%x, flags=%x, optlen=%d sndr=%llu, rcvr=%llu len=%d csum=%u\n",
+ 			 version, flags, opsize, mp_opt->sndr_key,
+ 			 mp_opt->rcvr_key, mp_opt->data_len, mp_opt->csum);
+ 		break;
+@@ -131,7 +131,7 @@ static void mptcp_parse_option(const struct sk_buff *skb,
+ 			ptr += 4;
+ 			mp_opt->nonce = get_unaligned_be32(ptr);
+ 			ptr += 4;
+-			pr_debug("MP_JOIN bkup=%u, id=%u, token=%u, nonce=%u",
++			pr_debug("MP_JOIN bkup=%u, id=%u, token=%u, nonce=%u\n",
+ 				 mp_opt->backup, mp_opt->join_id,
+ 				 mp_opt->token, mp_opt->nonce);
+ 		} else if (opsize == TCPOLEN_MPTCP_MPJ_SYNACK) {
+@@ -142,19 +142,19 @@ static void mptcp_parse_option(const struct sk_buff *skb,
+ 			ptr += 8;
+ 			mp_opt->nonce = get_unaligned_be32(ptr);
+ 			ptr += 4;
+-			pr_debug("MP_JOIN bkup=%u, id=%u, thmac=%llu, nonce=%u",
++			pr_debug("MP_JOIN bkup=%u, id=%u, thmac=%llu, nonce=%u\n",
+ 				 mp_opt->backup, mp_opt->join_id,
+ 				 mp_opt->thmac, mp_opt->nonce);
+ 		} else if (opsize == TCPOLEN_MPTCP_MPJ_ACK) {
+ 			mp_opt->suboptions |= OPTION_MPTCP_MPJ_ACK;
+ 			ptr += 2;
+ 			memcpy(mp_opt->hmac, ptr, MPTCPOPT_HMAC_LEN);
+-			pr_debug("MP_JOIN hmac");
++			pr_debug("MP_JOIN hmac\n");
+ 		}
+ 		break;
+ 
+ 	case MPTCPOPT_DSS:
+-		pr_debug("DSS");
++		pr_debug("DSS\n");
+ 		ptr++;
+ 
+ 		/* we must clear 'mpc_map' be able to detect MP_CAPABLE
+@@ -169,7 +169,7 @@ static void mptcp_parse_option(const struct sk_buff *skb,
+ 		mp_opt->ack64 = (flags & MPTCP_DSS_ACK64) != 0;
+ 		mp_opt->use_ack = (flags & MPTCP_DSS_HAS_ACK);
+ 
+-		pr_debug("data_fin=%d dsn64=%d use_map=%d ack64=%d use_ack=%d",
++		pr_debug("data_fin=%d dsn64=%d use_map=%d ack64=%d use_ack=%d\n",
+ 			 mp_opt->data_fin, mp_opt->dsn64,
+ 			 mp_opt->use_map, mp_opt->ack64,
+ 			 mp_opt->use_ack);
+@@ -207,7 +207,7 @@ static void mptcp_parse_option(const struct sk_buff *skb,
+ 				ptr += 4;
+ 			}
+ 
+-			pr_debug("data_ack=%llu", mp_opt->data_ack);
++			pr_debug("data_ack=%llu\n", mp_opt->data_ack);
+ 		}
+ 
+ 		if (mp_opt->use_map) {
+@@ -231,7 +231,7 @@ static void mptcp_parse_option(const struct sk_buff *skb,
+ 				ptr += 2;
+ 			}
+ 
+-			pr_debug("data_seq=%llu subflow_seq=%u data_len=%u csum=%d:%u",
++			pr_debug("data_seq=%llu subflow_seq=%u data_len=%u csum=%d:%u\n",
+ 				 mp_opt->data_seq, mp_opt->subflow_seq,
+ 				 mp_opt->data_len, !!(mp_opt->suboptions & OPTION_MPTCP_CSUMREQD),
+ 				 mp_opt->csum);
+@@ -293,7 +293,7 @@ static void mptcp_parse_option(const struct sk_buff *skb,
+ 			mp_opt->ahmac = get_unaligned_be64(ptr);
+ 			ptr += 8;
+ 		}
+-		pr_debug("ADD_ADDR%s: id=%d, ahmac=%llu, echo=%d, port=%d",
++		pr_debug("ADD_ADDR%s: id=%d, ahmac=%llu, echo=%d, port=%d\n",
+ 			 (mp_opt->addr.family == AF_INET6) ? "6" : "",
+ 			 mp_opt->addr.id, mp_opt->ahmac, mp_opt->echo, ntohs(mp_opt->addr.port));
+ 		break;
+@@ -309,7 +309,7 @@ static void mptcp_parse_option(const struct sk_buff *skb,
+ 		mp_opt->rm_list.nr = opsize - TCPOLEN_MPTCP_RM_ADDR_BASE;
+ 		for (i = 0; i < mp_opt->rm_list.nr; i++)
+ 			mp_opt->rm_list.ids[i] = *ptr++;
+-		pr_debug("RM_ADDR: rm_list_nr=%d", mp_opt->rm_list.nr);
++		pr_debug("RM_ADDR: rm_list_nr=%d\n", mp_opt->rm_list.nr);
+ 		break;
+ 
+ 	case MPTCPOPT_MP_PRIO:
+@@ -318,7 +318,7 @@ static void mptcp_parse_option(const struct sk_buff *skb,
+ 
+ 		mp_opt->suboptions |= OPTION_MPTCP_PRIO;
+ 		mp_opt->backup = *ptr++ & MPTCP_PRIO_BKUP;
+-		pr_debug("MP_PRIO: prio=%d", mp_opt->backup);
++		pr_debug("MP_PRIO: prio=%d\n", mp_opt->backup);
+ 		break;
+ 
+ 	case MPTCPOPT_MP_FASTCLOSE:
+@@ -329,7 +329,7 @@ static void mptcp_parse_option(const struct sk_buff *skb,
+ 		mp_opt->rcvr_key = get_unaligned_be64(ptr);
+ 		ptr += 8;
+ 		mp_opt->suboptions |= OPTION_MPTCP_FASTCLOSE;
+-		pr_debug("MP_FASTCLOSE: recv_key=%llu", mp_opt->rcvr_key);
++		pr_debug("MP_FASTCLOSE: recv_key=%llu\n", mp_opt->rcvr_key);
+ 		break;
+ 
+ 	case MPTCPOPT_RST:
+@@ -343,7 +343,7 @@ static void mptcp_parse_option(const struct sk_buff *skb,
+ 		flags = *ptr++;
+ 		mp_opt->reset_transient = flags & MPTCP_RST_TRANSIENT;
+ 		mp_opt->reset_reason = *ptr;
+-		pr_debug("MP_RST: transient=%u reason=%u",
++		pr_debug("MP_RST: transient=%u reason=%u\n",
+ 			 mp_opt->reset_transient, mp_opt->reset_reason);
+ 		break;
+ 
+@@ -354,7 +354,7 @@ static void mptcp_parse_option(const struct sk_buff *skb,
+ 		ptr += 2;
+ 		mp_opt->suboptions |= OPTION_MPTCP_FAIL;
+ 		mp_opt->fail_seq = get_unaligned_be64(ptr);
+-		pr_debug("MP_FAIL: data_seq=%llu", mp_opt->fail_seq);
++		pr_debug("MP_FAIL: data_seq=%llu\n", mp_opt->fail_seq);
+ 		break;
+ 
+ 	default:
+@@ -417,7 +417,7 @@ bool mptcp_syn_options(struct sock *sk, const struct sk_buff *skb,
+ 		*size = TCPOLEN_MPTCP_MPC_SYN;
+ 		return true;
+ 	} else if (subflow->request_join) {
+-		pr_debug("remote_token=%u, nonce=%u", subflow->remote_token,
++		pr_debug("remote_token=%u, nonce=%u\n", subflow->remote_token,
+ 			 subflow->local_nonce);
+ 		opts->suboptions = OPTION_MPTCP_MPJ_SYN;
+ 		opts->join_id = subflow->local_id;
+@@ -500,7 +500,7 @@ static bool mptcp_established_options_mp(struct sock *sk, struct sk_buff *skb,
+ 			*size = TCPOLEN_MPTCP_MPC_ACK;
+ 		}
+ 
+-		pr_debug("subflow=%p, local_key=%llu, remote_key=%llu map_len=%d",
++		pr_debug("subflow=%p, local_key=%llu, remote_key=%llu map_len=%d\n",
+ 			 subflow, subflow->local_key, subflow->remote_key,
+ 			 data_len);
+ 
+@@ -509,7 +509,7 @@ static bool mptcp_established_options_mp(struct sock *sk, struct sk_buff *skb,
+ 		opts->suboptions = OPTION_MPTCP_MPJ_ACK;
+ 		memcpy(opts->hmac, subflow->hmac, MPTCPOPT_HMAC_LEN);
+ 		*size = TCPOLEN_MPTCP_MPJ_ACK;
+-		pr_debug("subflow=%p", subflow);
++		pr_debug("subflow=%p\n", subflow);
+ 
+ 		/* we can use the full delegate action helper only from BH context
+ 		 * If we are in process context - sk is flushing the backlog at
+@@ -675,7 +675,7 @@ static bool mptcp_established_options_add_addr(struct sock *sk, struct sk_buff *
+ 
+ 	*size = len;
+ 	if (drop_other_suboptions) {
+-		pr_debug("drop other suboptions");
++		pr_debug("drop other suboptions\n");
+ 		opts->suboptions = 0;
+ 
+ 		/* note that e.g. DSS could have written into the memory
+@@ -695,7 +695,7 @@ static bool mptcp_established_options_add_addr(struct sock *sk, struct sk_buff *
+ 	} else {
+ 		MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_ECHOADDTX);
+ 	}
+-	pr_debug("addr_id=%d, ahmac=%llu, echo=%d, port=%d",
++	pr_debug("addr_id=%d, ahmac=%llu, echo=%d, port=%d\n",
+ 		 opts->addr.id, opts->ahmac, echo, ntohs(opts->addr.port));
+ 
+ 	return true;
+@@ -726,7 +726,7 @@ static bool mptcp_established_options_rm_addr(struct sock *sk,
+ 	opts->rm_list = rm_list;
+ 
+ 	for (i = 0; i < opts->rm_list.nr; i++)
+-		pr_debug("rm_list_ids[%d]=%d", i, opts->rm_list.ids[i]);
++		pr_debug("rm_list_ids[%d]=%d\n", i, opts->rm_list.ids[i]);
+ 	MPTCP_ADD_STATS(sock_net(sk), MPTCP_MIB_RMADDRTX, opts->rm_list.nr);
+ 	return true;
+ }
+@@ -752,7 +752,7 @@ static bool mptcp_established_options_mp_prio(struct sock *sk,
+ 	opts->suboptions |= OPTION_MPTCP_PRIO;
+ 	opts->backup = subflow->request_bkup;
+ 
+-	pr_debug("prio=%d", opts->backup);
++	pr_debug("prio=%d\n", opts->backup);
+ 
+ 	return true;
+ }
+@@ -794,7 +794,7 @@ static bool mptcp_established_options_fastclose(struct sock *sk,
+ 	opts->suboptions |= OPTION_MPTCP_FASTCLOSE;
+ 	opts->rcvr_key = msk->remote_key;
+ 
+-	pr_debug("FASTCLOSE key=%llu", opts->rcvr_key);
++	pr_debug("FASTCLOSE key=%llu\n", opts->rcvr_key);
+ 	MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPFASTCLOSETX);
+ 	return true;
+ }
+@@ -816,7 +816,7 @@ static bool mptcp_established_options_mp_fail(struct sock *sk,
+ 	opts->suboptions |= OPTION_MPTCP_FAIL;
+ 	opts->fail_seq = subflow->map_seq;
+ 
+-	pr_debug("MP_FAIL fail_seq=%llu", opts->fail_seq);
++	pr_debug("MP_FAIL fail_seq=%llu\n", opts->fail_seq);
+ 	MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPFAILTX);
+ 
+ 	return true;
+@@ -904,7 +904,7 @@ bool mptcp_synack_options(const struct request_sock *req, unsigned int *size,
+ 		opts->csum_reqd = subflow_req->csum_reqd;
+ 		opts->allow_join_id0 = subflow_req->allow_join_id0;
+ 		*size = TCPOLEN_MPTCP_MPC_SYNACK;
+-		pr_debug("subflow_req=%p, local_key=%llu",
++		pr_debug("subflow_req=%p, local_key=%llu\n",
+ 			 subflow_req, subflow_req->local_key);
+ 		return true;
+ 	} else if (subflow_req->mp_join) {
+@@ -913,7 +913,7 @@ bool mptcp_synack_options(const struct request_sock *req, unsigned int *size,
+ 		opts->join_id = subflow_req->local_id;
+ 		opts->thmac = subflow_req->thmac;
+ 		opts->nonce = subflow_req->local_nonce;
+-		pr_debug("req=%p, bkup=%u, id=%u, thmac=%llu, nonce=%u",
++		pr_debug("req=%p, bkup=%u, id=%u, thmac=%llu, nonce=%u\n",
+ 			 subflow_req, opts->backup, opts->join_id,
+ 			 opts->thmac, opts->nonce);
+ 		*size = TCPOLEN_MPTCP_MPJ_SYNACK;
+diff --git a/net/mptcp/pm.c b/net/mptcp/pm.c
+index c42496bcb5e09..157a574fab0cc 100644
+--- a/net/mptcp/pm.c
++++ b/net/mptcp/pm.c
+@@ -20,7 +20,7 @@ int mptcp_pm_announce_addr(struct mptcp_sock *msk,
+ {
+ 	u8 add_addr = READ_ONCE(msk->pm.addr_signal);
+ 
+-	pr_debug("msk=%p, local_id=%d, echo=%d", msk, addr->id, echo);
++	pr_debug("msk=%p, local_id=%d, echo=%d\n", msk, addr->id, echo);
+ 
+ 	lockdep_assert_held(&msk->pm.lock);
+ 
+@@ -46,7 +46,7 @@ int mptcp_pm_remove_addr(struct mptcp_sock *msk, const struct mptcp_rm_list *rm_
+ {
+ 	u8 rm_addr = READ_ONCE(msk->pm.addr_signal);
+ 
+-	pr_debug("msk=%p, rm_list_nr=%d", msk, rm_list->nr);
++	pr_debug("msk=%p, rm_list_nr=%d\n", msk, rm_list->nr);
+ 
+ 	if (rm_addr) {
+ 		MPTCP_ADD_STATS(sock_net((struct sock *)msk),
+@@ -67,7 +67,7 @@ void mptcp_pm_new_connection(struct mptcp_sock *msk, const struct sock *ssk, int
+ {
+ 	struct mptcp_pm_data *pm = &msk->pm;
+ 
+-	pr_debug("msk=%p, token=%u side=%d", msk, msk->token, server_side);
++	pr_debug("msk=%p, token=%u side=%d\n", msk, msk->token, server_side);
+ 
+ 	WRITE_ONCE(pm->server_side, server_side);
+ 	mptcp_event(MPTCP_EVENT_CREATED, msk, ssk, GFP_ATOMIC);
+@@ -91,7 +91,7 @@ bool mptcp_pm_allow_new_subflow(struct mptcp_sock *msk)
+ 
+ 	subflows_max = mptcp_pm_get_subflows_max(msk);
+ 
+-	pr_debug("msk=%p subflows=%d max=%d allow=%d", msk, pm->subflows,
++	pr_debug("msk=%p subflows=%d max=%d allow=%d\n", msk, pm->subflows,
+ 		 subflows_max, READ_ONCE(pm->accept_subflow));
+ 
+ 	/* try to avoid acquiring the lock below */
+@@ -115,7 +115,7 @@ bool mptcp_pm_allow_new_subflow(struct mptcp_sock *msk)
+ static bool mptcp_pm_schedule_work(struct mptcp_sock *msk,
+ 				   enum mptcp_pm_status new_status)
+ {
+-	pr_debug("msk=%p status=%x new=%lx", msk, msk->pm.status,
++	pr_debug("msk=%p status=%x new=%lx\n", msk, msk->pm.status,
+ 		 BIT(new_status));
+ 	if (msk->pm.status & BIT(new_status))
+ 		return false;
+@@ -130,7 +130,7 @@ void mptcp_pm_fully_established(struct mptcp_sock *msk, const struct sock *ssk)
+ 	struct mptcp_pm_data *pm = &msk->pm;
+ 	bool announce = false;
+ 
+-	pr_debug("msk=%p", msk);
++	pr_debug("msk=%p\n", msk);
+ 
+ 	spin_lock_bh(&pm->lock);
+ 
+@@ -154,14 +154,14 @@ void mptcp_pm_fully_established(struct mptcp_sock *msk, const struct sock *ssk)
+ 
+ void mptcp_pm_connection_closed(struct mptcp_sock *msk)
+ {
+-	pr_debug("msk=%p", msk);
++	pr_debug("msk=%p\n", msk);
+ }
+ 
+ void mptcp_pm_subflow_established(struct mptcp_sock *msk)
+ {
+ 	struct mptcp_pm_data *pm = &msk->pm;
+ 
+-	pr_debug("msk=%p", msk);
++	pr_debug("msk=%p\n", msk);
+ 
+ 	if (!READ_ONCE(pm->work_pending))
+ 		return;
+@@ -213,7 +213,7 @@ void mptcp_pm_add_addr_received(const struct sock *ssk,
+ 	struct mptcp_sock *msk = mptcp_sk(subflow->conn);
+ 	struct mptcp_pm_data *pm = &msk->pm;
+ 
+-	pr_debug("msk=%p remote_id=%d accept=%d", msk, addr->id,
++	pr_debug("msk=%p remote_id=%d accept=%d\n", msk, addr->id,
+ 		 READ_ONCE(pm->accept_addr));
+ 
+ 	mptcp_event_addr_announced(ssk, addr);
+@@ -246,7 +246,7 @@ void mptcp_pm_add_addr_echoed(struct mptcp_sock *msk,
+ {
+ 	struct mptcp_pm_data *pm = &msk->pm;
+ 
+-	pr_debug("msk=%p", msk);
++	pr_debug("msk=%p\n", msk);
+ 
+ 	spin_lock_bh(&pm->lock);
+ 
+@@ -270,7 +270,7 @@ void mptcp_pm_rm_addr_received(struct mptcp_sock *msk,
+ 	struct mptcp_pm_data *pm = &msk->pm;
+ 	u8 i;
+ 
+-	pr_debug("msk=%p remote_ids_nr=%d", msk, rm_list->nr);
++	pr_debug("msk=%p remote_ids_nr=%d\n", msk, rm_list->nr);
+ 
+ 	for (i = 0; i < rm_list->nr; i++)
+ 		mptcp_event_addr_removed(msk, rm_list->ids[i]);
+@@ -302,19 +302,19 @@ void mptcp_pm_mp_fail_received(struct sock *sk, u64 fail_seq)
+ 	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
+ 	struct mptcp_sock *msk = mptcp_sk(subflow->conn);
+ 
+-	pr_debug("fail_seq=%llu", fail_seq);
++	pr_debug("fail_seq=%llu\n", fail_seq);
+ 
+ 	if (!READ_ONCE(msk->allow_infinite_fallback))
+ 		return;
+ 
+ 	if (!subflow->fail_tout) {
+-		pr_debug("send MP_FAIL response and infinite map");
++		pr_debug("send MP_FAIL response and infinite map\n");
+ 
+ 		subflow->send_mp_fail = 1;
+ 		subflow->send_infinite_map = 1;
+ 		tcp_send_ack(sk);
+ 	} else {
+-		pr_debug("MP_FAIL response received");
++		pr_debug("MP_FAIL response received\n");
+ 		WRITE_ONCE(subflow->fail_tout, 0);
+ 	}
+ }
+diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c
+index 7dd10bacc8d28..d5902e7f47a78 100644
+--- a/net/mptcp/pm_netlink.c
++++ b/net/mptcp/pm_netlink.c
+@@ -295,7 +295,7 @@ static void mptcp_pm_add_timer(struct timer_list *timer)
+ 	struct mptcp_sock *msk = entry->sock;
+ 	struct sock *sk = (struct sock *)msk;
+ 
+-	pr_debug("msk=%p", msk);
++	pr_debug("msk=%p\n", msk);
+ 
+ 	if (!msk)
+ 		return;
+@@ -314,7 +314,7 @@ static void mptcp_pm_add_timer(struct timer_list *timer)
+ 	spin_lock_bh(&msk->pm.lock);
+ 
+ 	if (!mptcp_pm_should_add_signal_addr(msk)) {
+-		pr_debug("retransmit ADD_ADDR id=%d", entry->addr.id);
++		pr_debug("retransmit ADD_ADDR id=%d\n", entry->addr.id);
+ 		mptcp_pm_announce_addr(msk, &entry->addr, false);
+ 		mptcp_pm_add_addr_send_ack(msk);
+ 		entry->retrans_times++;
+@@ -395,7 +395,7 @@ void mptcp_pm_free_anno_list(struct mptcp_sock *msk)
+ 	struct sock *sk = (struct sock *)msk;
+ 	LIST_HEAD(free_list);
+ 
+-	pr_debug("msk=%p", msk);
++	pr_debug("msk=%p\n", msk);
+ 
+ 	spin_lock_bh(&msk->pm.lock);
+ 	list_splice_init(&msk->pm.anno_list, &free_list);
+@@ -481,7 +481,7 @@ static void __mptcp_pm_send_ack(struct mptcp_sock *msk, struct mptcp_subflow_con
+ 	struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
+ 	bool slow;
+ 
+-	pr_debug("send ack for %s",
++	pr_debug("send ack for %s\n",
+ 		 prio ? "mp_prio" : (mptcp_pm_should_add_signal(msk) ? "add_addr" : "rm_addr"));
+ 
+ 	slow = lock_sock_fast(ssk);
+@@ -730,7 +730,7 @@ static void mptcp_pm_nl_add_addr_received(struct mptcp_sock *msk)
+ 	add_addr_accept_max = mptcp_pm_get_add_addr_accept_max(msk);
+ 	subflows_max = mptcp_pm_get_subflows_max(msk);
+ 
+-	pr_debug("accepted %d:%d remote family %d",
++	pr_debug("accepted %d:%d remote family %d\n",
+ 		 msk->pm.add_addr_accepted, add_addr_accept_max,
+ 		 msk->pm.remote.family);
+ 
+@@ -803,7 +803,7 @@ int mptcp_pm_nl_mp_prio_send_ack(struct mptcp_sock *msk,
+ {
+ 	struct mptcp_subflow_context *subflow;
+ 
+-	pr_debug("bkup=%d", bkup);
++	pr_debug("bkup=%d\n", bkup);
+ 
+ 	mptcp_for_each_subflow(msk, subflow) {
+ 		struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
+@@ -826,11 +826,6 @@ int mptcp_pm_nl_mp_prio_send_ack(struct mptcp_sock *msk,
+ 	return -EINVAL;
+ }
+ 
+-static bool mptcp_local_id_match(const struct mptcp_sock *msk, u8 local_id, u8 id)
+-{
+-	return local_id == id || (!local_id && msk->mpc_endpoint_id == id);
+-}
+-
+ static void mptcp_pm_nl_rm_addr_or_subflow(struct mptcp_sock *msk,
+ 					   const struct mptcp_rm_list *rm_list,
+ 					   enum linux_mptcp_mib_field rm_type)
+@@ -839,7 +834,7 @@ static void mptcp_pm_nl_rm_addr_or_subflow(struct mptcp_sock *msk,
+ 	struct sock *sk = (struct sock *)msk;
+ 	u8 i;
+ 
+-	pr_debug("%s rm_list_nr %d",
++	pr_debug("%s rm_list_nr %d\n",
+ 		 rm_type == MPTCP_MIB_RMADDR ? "address" : "subflow", rm_list->nr);
+ 
+ 	msk_owned_by_me(msk);
+@@ -867,10 +862,10 @@ static void mptcp_pm_nl_rm_addr_or_subflow(struct mptcp_sock *msk,
+ 				continue;
+ 			if (rm_type == MPTCP_MIB_RMADDR && remote_id != rm_id)
+ 				continue;
+-			if (rm_type == MPTCP_MIB_RMSUBFLOW && !mptcp_local_id_match(msk, id, rm_id))
++			if (rm_type == MPTCP_MIB_RMSUBFLOW && id != rm_id)
+ 				continue;
+ 
+-			pr_debug(" -> %s rm_list_ids[%d]=%u local_id=%u remote_id=%u mpc_id=%u",
++			pr_debug(" -> %s rm_list_ids[%d]=%u local_id=%u remote_id=%u mpc_id=%u\n",
+ 				 rm_type == MPTCP_MIB_RMADDR ? "address" : "subflow",
+ 				 i, rm_id, id, remote_id, msk->mpc_endpoint_id);
+ 			spin_unlock_bh(&msk->pm.lock);
+@@ -927,7 +922,7 @@ void mptcp_pm_nl_work(struct mptcp_sock *msk)
+ 
+ 	spin_lock_bh(&msk->pm.lock);
+ 
+-	pr_debug("msk=%p status=%x", msk, pm->status);
++	pr_debug("msk=%p status=%x\n", msk, pm->status);
+ 	if (pm->status & BIT(MPTCP_PM_ADD_ADDR_RECEIVED)) {
+ 		pm->status &= ~BIT(MPTCP_PM_ADD_ADDR_RECEIVED);
+ 		mptcp_pm_nl_add_addr_received(msk);
+@@ -1506,6 +1501,12 @@ static bool remove_anno_list_by_saddr(struct mptcp_sock *msk,
+ 	return false;
+ }
+ 
++static u8 mptcp_endp_get_local_id(struct mptcp_sock *msk,
++				  const struct mptcp_addr_info *addr)
++{
++	return msk->mpc_endpoint_id == addr->id ? 0 : addr->id;
++}
++
+ static bool mptcp_pm_remove_anno_addr(struct mptcp_sock *msk,
+ 				      const struct mptcp_addr_info *addr,
+ 				      bool force)
+@@ -1513,7 +1514,7 @@ static bool mptcp_pm_remove_anno_addr(struct mptcp_sock *msk,
+ 	struct mptcp_rm_list list = { .nr = 0 };
+ 	bool ret;
+ 
+-	list.ids[list.nr++] = addr->id;
++	list.ids[list.nr++] = mptcp_endp_get_local_id(msk, addr);
+ 
+ 	ret = remove_anno_list_by_saddr(msk, addr);
+ 	if (ret || force) {
+@@ -1540,13 +1541,11 @@ static int mptcp_nl_remove_subflow_and_signal_addr(struct net *net,
+ 						   const struct mptcp_pm_addr_entry *entry)
+ {
+ 	const struct mptcp_addr_info *addr = &entry->addr;
+-	struct mptcp_rm_list list = { .nr = 0 };
++	struct mptcp_rm_list list = { .nr = 1 };
+ 	long s_slot = 0, s_num = 0;
+ 	struct mptcp_sock *msk;
+ 
+-	pr_debug("remove_id=%d", addr->id);
+-
+-	list.ids[list.nr++] = addr->id;
++	pr_debug("remove_id=%d\n", addr->id);
+ 
+ 	while ((msk = mptcp_token_iter_next(net, &s_slot, &s_num)) != NULL) {
+ 		struct sock *sk = (struct sock *)msk;
+@@ -1565,6 +1564,7 @@ static int mptcp_nl_remove_subflow_and_signal_addr(struct net *net,
+ 		mptcp_pm_remove_anno_addr(msk, addr, remove_subflow &&
+ 					  !(entry->flags & MPTCP_PM_ADDR_FLAG_IMPLICIT));
+ 
++		list.ids[0] = mptcp_endp_get_local_id(msk, addr);
+ 		if (remove_subflow) {
+ 			spin_lock_bh(&msk->pm.lock);
+ 			mptcp_pm_nl_rm_subflow_received(msk, &list);
+@@ -1673,6 +1673,7 @@ static int mptcp_nl_cmd_del_addr(struct sk_buff *skb, struct genl_info *info)
+ 	return ret;
+ }
+ 
++/* Called from the userspace PM only */
+ void mptcp_pm_remove_addrs(struct mptcp_sock *msk, struct list_head *rm_list)
+ {
+ 	struct mptcp_rm_list alist = { .nr = 0 };
+@@ -1701,8 +1702,9 @@ void mptcp_pm_remove_addrs(struct mptcp_sock *msk, struct list_head *rm_list)
+ 	}
+ }
+ 
+-void mptcp_pm_remove_addrs_and_subflows(struct mptcp_sock *msk,
+-					struct list_head *rm_list)
++/* Called from the in-kernel PM only */
++static void mptcp_pm_remove_addrs_and_subflows(struct mptcp_sock *msk,
++					       struct list_head *rm_list)
+ {
+ 	struct mptcp_rm_list alist = { .nr = 0 }, slist = { .nr = 0 };
+ 	struct mptcp_pm_addr_entry *entry;
+@@ -1710,11 +1712,11 @@ void mptcp_pm_remove_addrs_and_subflows(struct mptcp_sock *msk,
+ 	list_for_each_entry(entry, rm_list, list) {
+ 		if (slist.nr < MPTCP_RM_IDS_MAX &&
+ 		    lookup_subflow_by_saddr(&msk->conn_list, &entry->addr))
+-			slist.ids[slist.nr++] = entry->addr.id;
++			slist.ids[slist.nr++] = mptcp_endp_get_local_id(msk, &entry->addr);
+ 
+ 		if (alist.nr < MPTCP_RM_IDS_MAX &&
+ 		    remove_anno_list_by_saddr(msk, &entry->addr))
+-			alist.ids[alist.nr++] = entry->addr.id;
++			alist.ids[alist.nr++] = mptcp_endp_get_local_id(msk, &entry->addr);
+ 	}
+ 
+ 	spin_lock_bh(&msk->pm.lock);
+@@ -2002,7 +2004,7 @@ static void mptcp_pm_nl_fullmesh(struct mptcp_sock *msk,
+ {
+ 	struct mptcp_rm_list list = { .nr = 0 };
+ 
+-	list.ids[list.nr++] = addr->id;
++	list.ids[list.nr++] = mptcp_endp_get_local_id(msk, addr);
+ 
+ 	spin_lock_bh(&msk->pm.lock);
+ 	mptcp_pm_nl_rm_subflow_received(msk, &list);
+diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
+index 0ae684624c820..ba6248372aee7 100644
+--- a/net/mptcp/protocol.c
++++ b/net/mptcp/protocol.c
+@@ -141,7 +141,7 @@ static bool mptcp_try_coalesce(struct sock *sk, struct sk_buff *to,
+ 	    !skb_try_coalesce(to, from, &fragstolen, &delta))
+ 		return false;
+ 
+-	pr_debug("colesced seq %llx into %llx new len %d new end seq %llx",
++	pr_debug("colesced seq %llx into %llx new len %d new end seq %llx\n",
+ 		 MPTCP_SKB_CB(from)->map_seq, MPTCP_SKB_CB(to)->map_seq,
+ 		 to->len, MPTCP_SKB_CB(from)->end_seq);
+ 	MPTCP_SKB_CB(to)->end_seq = MPTCP_SKB_CB(from)->end_seq;
+@@ -219,7 +219,7 @@ static void mptcp_data_queue_ofo(struct mptcp_sock *msk, struct sk_buff *skb)
+ 	end_seq = MPTCP_SKB_CB(skb)->end_seq;
+ 	max_seq = atomic64_read(&msk->rcv_wnd_sent);
+ 
+-	pr_debug("msk=%p seq=%llx limit=%llx empty=%d", msk, seq, max_seq,
++	pr_debug("msk=%p seq=%llx limit=%llx empty=%d\n", msk, seq, max_seq,
+ 		 RB_EMPTY_ROOT(&msk->out_of_order_queue));
+ 	if (after64(end_seq, max_seq)) {
+ 		/* out of window */
+@@ -643,7 +643,7 @@ static bool __mptcp_move_skbs_from_subflow(struct mptcp_sock *msk,
+ 		}
+ 	}
+ 
+-	pr_debug("msk=%p ssk=%p", msk, ssk);
++	pr_debug("msk=%p ssk=%p\n", msk, ssk);
+ 	tp = tcp_sk(ssk);
+ 	do {
+ 		u32 map_remaining, offset;
+@@ -722,7 +722,7 @@ static bool __mptcp_ofo_queue(struct mptcp_sock *msk)
+ 	u64 end_seq;
+ 
+ 	p = rb_first(&msk->out_of_order_queue);
+-	pr_debug("msk=%p empty=%d", msk, RB_EMPTY_ROOT(&msk->out_of_order_queue));
++	pr_debug("msk=%p empty=%d\n", msk, RB_EMPTY_ROOT(&msk->out_of_order_queue));
+ 	while (p) {
+ 		skb = rb_to_skb(p);
+ 		if (after64(MPTCP_SKB_CB(skb)->map_seq, msk->ack_seq))
+@@ -744,7 +744,7 @@ static bool __mptcp_ofo_queue(struct mptcp_sock *msk)
+ 			int delta = msk->ack_seq - MPTCP_SKB_CB(skb)->map_seq;
+ 
+ 			/* skip overlapping data, if any */
+-			pr_debug("uncoalesced seq=%llx ack seq=%llx delta=%d",
++			pr_debug("uncoalesced seq=%llx ack seq=%llx delta=%d\n",
+ 				 MPTCP_SKB_CB(skb)->map_seq, msk->ack_seq,
+ 				 delta);
+ 			MPTCP_SKB_CB(skb)->offset += delta;
+@@ -1235,7 +1235,7 @@ static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk,
+ 	size_t copy;
+ 	int i;
+ 
+-	pr_debug("msk=%p ssk=%p sending dfrag at seq=%llu len=%u already sent=%u",
++	pr_debug("msk=%p ssk=%p sending dfrag at seq=%llu len=%u already sent=%u\n",
+ 		 msk, ssk, dfrag->data_seq, dfrag->data_len, info->sent);
+ 
+ 	if (WARN_ON_ONCE(info->sent > info->limit ||
+@@ -1336,7 +1336,7 @@ static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk,
+ 	mpext->use_map = 1;
+ 	mpext->dsn64 = 1;
+ 
+-	pr_debug("data_seq=%llu subflow_seq=%u data_len=%u dsn64=%d",
++	pr_debug("data_seq=%llu subflow_seq=%u data_len=%u dsn64=%d\n",
+ 		 mpext->data_seq, mpext->subflow_seq, mpext->data_len,
+ 		 mpext->dsn64);
+ 
+@@ -1855,7 +1855,7 @@ static int mptcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
+ 			if (!msk->first_pending)
+ 				WRITE_ONCE(msk->first_pending, dfrag);
+ 		}
+-		pr_debug("msk=%p dfrag at seq=%llu len=%u sent=%u new=%d", msk,
++		pr_debug("msk=%p dfrag at seq=%llu len=%u sent=%u new=%d\n", msk,
+ 			 dfrag->data_seq, dfrag->data_len, dfrag->already_sent,
+ 			 !dfrag_collapsed);
+ 
+@@ -2211,7 +2211,7 @@ static int mptcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
+ 			}
+ 		}
+ 
+-		pr_debug("block timeout %ld", timeo);
++		pr_debug("block timeout %ld\n", timeo);
+ 		sk_wait_data(sk, &timeo, NULL);
+ 	}
+ 
+@@ -2227,7 +2227,7 @@ static int mptcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
+ 		}
+ 	}
+ 
+-	pr_debug("msk=%p rx queue empty=%d:%d copied=%d",
++	pr_debug("msk=%p rx queue empty=%d:%d copied=%d\n",
+ 		 msk, skb_queue_empty_lockless(&sk->sk_receive_queue),
+ 		 skb_queue_empty(&msk->receive_queue), copied);
+ 	if (!(flags & MSG_PEEK))
+@@ -2471,6 +2471,12 @@ static void __mptcp_close_ssk(struct sock *sk, struct sock *ssk,
+ void mptcp_close_ssk(struct sock *sk, struct sock *ssk,
+ 		     struct mptcp_subflow_context *subflow)
+ {
++	/* The first subflow can already be closed and still in the list */
++	if (subflow->close_event_done)
++		return;
++
++	subflow->close_event_done = true;
++
+ 	if (sk->sk_state == TCP_ESTABLISHED)
+ 		mptcp_event(MPTCP_EVENT_SUB_CLOSED, mptcp_sk(sk), ssk, GFP_KERNEL);
+ 
+@@ -2680,7 +2686,7 @@ static void mptcp_mp_fail_no_response(struct mptcp_sock *msk)
+ 	if (!ssk)
+ 		return;
+ 
+-	pr_debug("MP_FAIL doesn't respond, reset the subflow");
++	pr_debug("MP_FAIL doesn't respond, reset the subflow\n");
+ 
+ 	slow = lock_sock_fast(ssk);
+ 	mptcp_subflow_reset(ssk);
+@@ -2850,7 +2856,7 @@ void mptcp_subflow_shutdown(struct sock *sk, struct sock *ssk, int how)
+ 		break;
+ 	default:
+ 		if (__mptcp_check_fallback(mptcp_sk(sk))) {
+-			pr_debug("Fallback");
++			pr_debug("Fallback\n");
+ 			ssk->sk_shutdown |= how;
+ 			tcp_shutdown(ssk, how);
+ 
+@@ -2860,7 +2866,7 @@ void mptcp_subflow_shutdown(struct sock *sk, struct sock *ssk, int how)
+ 			WRITE_ONCE(mptcp_sk(sk)->snd_una, mptcp_sk(sk)->snd_nxt);
+ 			mptcp_schedule_work(sk);
+ 		} else {
+-			pr_debug("Sending DATA_FIN on subflow %p", ssk);
++			pr_debug("Sending DATA_FIN on subflow %p\n", ssk);
+ 			tcp_send_ack(ssk);
+ 			if (!mptcp_rtx_timer_pending(sk))
+ 				mptcp_reset_rtx_timer(sk);
+@@ -2926,7 +2932,7 @@ static void mptcp_check_send_data_fin(struct sock *sk)
+ 	struct mptcp_subflow_context *subflow;
+ 	struct mptcp_sock *msk = mptcp_sk(sk);
+ 
+-	pr_debug("msk=%p snd_data_fin_enable=%d pending=%d snd_nxt=%llu write_seq=%llu",
++	pr_debug("msk=%p snd_data_fin_enable=%d pending=%d snd_nxt=%llu write_seq=%llu\n",
+ 		 msk, msk->snd_data_fin_enable, !!mptcp_send_head(sk),
+ 		 msk->snd_nxt, msk->write_seq);
+ 
+@@ -2950,7 +2956,7 @@ static void __mptcp_wr_shutdown(struct sock *sk)
+ {
+ 	struct mptcp_sock *msk = mptcp_sk(sk);
+ 
+-	pr_debug("msk=%p snd_data_fin_enable=%d shutdown=%x state=%d pending=%d",
++	pr_debug("msk=%p snd_data_fin_enable=%d shutdown=%x state=%d pending=%d\n",
+ 		 msk, msk->snd_data_fin_enable, sk->sk_shutdown, sk->sk_state,
+ 		 !!mptcp_send_head(sk));
+ 
+@@ -2965,7 +2971,7 @@ static void __mptcp_destroy_sock(struct sock *sk)
+ {
+ 	struct mptcp_sock *msk = mptcp_sk(sk);
+ 
+-	pr_debug("msk=%p", msk);
++	pr_debug("msk=%p\n", msk);
+ 
+ 	might_sleep();
+ 
+@@ -3073,7 +3079,7 @@ bool __mptcp_close(struct sock *sk, long timeout)
+ 		mptcp_set_state(sk, TCP_CLOSE);
+ 
+ 	sock_hold(sk);
+-	pr_debug("msk=%p state=%d", sk, sk->sk_state);
++	pr_debug("msk=%p state=%d\n", sk, sk->sk_state);
+ 	if (msk->token)
+ 		mptcp_event(MPTCP_EVENT_CLOSED, msk, NULL, GFP_KERNEL);
+ 
+@@ -3508,7 +3514,7 @@ static int mptcp_get_port(struct sock *sk, unsigned short snum)
+ {
+ 	struct mptcp_sock *msk = mptcp_sk(sk);
+ 
+-	pr_debug("msk=%p, ssk=%p", msk, msk->first);
++	pr_debug("msk=%p, ssk=%p\n", msk, msk->first);
+ 	if (WARN_ON_ONCE(!msk->first))
+ 		return -EINVAL;
+ 
+@@ -3525,7 +3531,7 @@ void mptcp_finish_connect(struct sock *ssk)
+ 	sk = subflow->conn;
+ 	msk = mptcp_sk(sk);
+ 
+-	pr_debug("msk=%p, token=%u", sk, subflow->token);
++	pr_debug("msk=%p, token=%u\n", sk, subflow->token);
+ 
+ 	subflow->map_seq = subflow->iasn;
+ 	subflow->map_subflow_seq = 1;
+@@ -3554,7 +3560,7 @@ bool mptcp_finish_join(struct sock *ssk)
+ 	struct sock *parent = (void *)msk;
+ 	bool ret = true;
+ 
+-	pr_debug("msk=%p, subflow=%p", msk, subflow);
++	pr_debug("msk=%p, subflow=%p\n", msk, subflow);
+ 
+ 	/* mptcp socket already closing? */
+ 	if (!mptcp_is_fully_established(parent)) {
+@@ -3600,7 +3606,7 @@ bool mptcp_finish_join(struct sock *ssk)
+ 
+ static void mptcp_shutdown(struct sock *sk, int how)
+ {
+-	pr_debug("sk=%p, how=%d", sk, how);
++	pr_debug("sk=%p, how=%d\n", sk, how);
+ 
+ 	if ((how & SEND_SHUTDOWN) && mptcp_close_state(sk))
+ 		__mptcp_wr_shutdown(sk);
+@@ -3820,7 +3826,7 @@ static int mptcp_listen(struct socket *sock, int backlog)
+ 	struct sock *ssk;
+ 	int err;
+ 
+-	pr_debug("msk=%p", msk);
++	pr_debug("msk=%p\n", msk);
+ 
+ 	lock_sock(sk);
+ 
+@@ -3860,7 +3866,7 @@ static int mptcp_stream_accept(struct socket *sock, struct socket *newsock,
+ 	struct sock *ssk, *newsk;
+ 	int err;
+ 
+-	pr_debug("msk=%p", msk);
++	pr_debug("msk=%p\n", msk);
+ 
+ 	/* Buggy applications can call accept on socket states other then LISTEN
+ 	 * but no need to allocate the first subflow just to error out.
+@@ -3869,12 +3875,12 @@ static int mptcp_stream_accept(struct socket *sock, struct socket *newsock,
+ 	if (!ssk)
+ 		return -EINVAL;
+ 
+-	pr_debug("ssk=%p, listener=%p", ssk, mptcp_subflow_ctx(ssk));
++	pr_debug("ssk=%p, listener=%p\n", ssk, mptcp_subflow_ctx(ssk));
+ 	newsk = inet_csk_accept(ssk, flags, &err, kern);
+ 	if (!newsk)
+ 		return err;
+ 
+-	pr_debug("newsk=%p, subflow is mptcp=%d", newsk, sk_is_mptcp(newsk));
++	pr_debug("newsk=%p, subflow is mptcp=%d\n", newsk, sk_is_mptcp(newsk));
+ 	if (sk_is_mptcp(newsk)) {
+ 		struct mptcp_subflow_context *subflow;
+ 		struct sock *new_mptcp_sock;
+@@ -3967,7 +3973,7 @@ static __poll_t mptcp_poll(struct file *file, struct socket *sock,
+ 	sock_poll_wait(file, sock, wait);
+ 
+ 	state = inet_sk_state_load(sk);
+-	pr_debug("msk=%p state=%d flags=%lx", msk, state, msk->flags);
++	pr_debug("msk=%p state=%d flags=%lx\n", msk, state, msk->flags);
+ 	if (state == TCP_LISTEN) {
+ 		struct sock *ssk = READ_ONCE(msk->first);
+ 
+diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h
+index 20736f31dc534..b9a4f6364b785 100644
+--- a/net/mptcp/protocol.h
++++ b/net/mptcp/protocol.h
+@@ -500,7 +500,8 @@ struct mptcp_subflow_context {
+ 		stale : 1,	    /* unable to snd/rcv data, do not use for xmit */
+ 		valid_csum_seen : 1,        /* at least one csum validated */
+ 		is_mptfo : 1,	    /* subflow is doing TFO */
+-		__unused : 10;
++		close_event_done : 1,       /* has done the post-closed part */
++		__unused : 9;
+ 	enum mptcp_data_avail data_avail;
+ 	bool	scheduled;
+ 	u32	remote_nonce;
+@@ -948,8 +949,6 @@ int mptcp_pm_announce_addr(struct mptcp_sock *msk,
+ 			   bool echo);
+ int mptcp_pm_remove_addr(struct mptcp_sock *msk, const struct mptcp_rm_list *rm_list);
+ void mptcp_pm_remove_addrs(struct mptcp_sock *msk, struct list_head *rm_list);
+-void mptcp_pm_remove_addrs_and_subflows(struct mptcp_sock *msk,
+-					struct list_head *rm_list);
+ 
+ void mptcp_free_local_addr_list(struct mptcp_sock *msk);
+ int mptcp_nl_cmd_announce(struct sk_buff *skb, struct genl_info *info);
+@@ -1093,7 +1092,7 @@ static inline bool mptcp_check_fallback(const struct sock *sk)
+ static inline void __mptcp_do_fallback(struct mptcp_sock *msk)
+ {
+ 	if (test_bit(MPTCP_FALLBACK_DONE, &msk->flags)) {
+-		pr_debug("TCP fallback already done (msk=%p)", msk);
++		pr_debug("TCP fallback already done (msk=%p)\n", msk);
+ 		return;
+ 	}
+ 	set_bit(MPTCP_FALLBACK_DONE, &msk->flags);
+@@ -1120,7 +1119,7 @@ static inline void mptcp_do_fallback(struct sock *ssk)
+ 	}
+ }
+ 
+-#define pr_fallback(a) pr_debug("%s:fallback to TCP (msk=%p)", __func__, a)
++#define pr_fallback(a) pr_debug("%s:fallback to TCP (msk=%p)\n", __func__, a)
+ 
+ static inline bool mptcp_check_infinite_map(struct sk_buff *skb)
+ {
+diff --git a/net/mptcp/sched.c b/net/mptcp/sched.c
+index 4ab0693c069c0..907986f5f0427 100644
+--- a/net/mptcp/sched.c
++++ b/net/mptcp/sched.c
+@@ -64,7 +64,7 @@ int mptcp_register_scheduler(struct mptcp_sched_ops *sched)
+ 	list_add_tail_rcu(&sched->list, &mptcp_sched_list);
+ 	spin_unlock(&mptcp_sched_list_lock);
+ 
+-	pr_debug("%s registered", sched->name);
++	pr_debug("%s registered\n", sched->name);
+ 	return 0;
+ }
+ 
+@@ -96,7 +96,7 @@ int mptcp_init_sched(struct mptcp_sock *msk,
+ 	if (msk->sched->init)
+ 		msk->sched->init(msk);
+ 
+-	pr_debug("sched=%s", msk->sched->name);
++	pr_debug("sched=%s\n", msk->sched->name);
+ 
+ 	return 0;
+ }
+diff --git a/net/mptcp/sockopt.c b/net/mptcp/sockopt.c
+index bdfeecea814f3..d0f73b9180c7c 100644
+--- a/net/mptcp/sockopt.c
++++ b/net/mptcp/sockopt.c
+@@ -858,7 +858,7 @@ int mptcp_setsockopt(struct sock *sk, int level, int optname,
+ 	struct mptcp_sock *msk = mptcp_sk(sk);
+ 	struct sock *ssk;
+ 
+-	pr_debug("msk=%p", msk);
++	pr_debug("msk=%p\n", msk);
+ 
+ 	if (level == SOL_SOCKET)
+ 		return mptcp_setsockopt_sol_socket(msk, optname, optval, optlen);
+@@ -1416,7 +1416,7 @@ int mptcp_getsockopt(struct sock *sk, int level, int optname,
+ 	struct mptcp_sock *msk = mptcp_sk(sk);
+ 	struct sock *ssk;
+ 
+-	pr_debug("msk=%p", msk);
++	pr_debug("msk=%p\n", msk);
+ 
+ 	/* @@ the meaning of setsockopt() when the socket is connected and
+ 	 * there are multiple subflows is not yet defined. It is up to the
+diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
+index 81050aa55f7b9..2c779275a0310 100644
+--- a/net/mptcp/subflow.c
++++ b/net/mptcp/subflow.c
+@@ -40,7 +40,7 @@ static void subflow_req_destructor(struct request_sock *req)
+ {
+ 	struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
+ 
+-	pr_debug("subflow_req=%p", subflow_req);
++	pr_debug("subflow_req=%p\n", subflow_req);
+ 
+ 	if (subflow_req->msk)
+ 		sock_put((struct sock *)subflow_req->msk);
+@@ -146,7 +146,7 @@ static int subflow_check_req(struct request_sock *req,
+ 	struct mptcp_options_received mp_opt;
+ 	bool opt_mp_capable, opt_mp_join;
+ 
+-	pr_debug("subflow_req=%p, listener=%p", subflow_req, listener);
++	pr_debug("subflow_req=%p, listener=%p\n", subflow_req, listener);
+ 
+ #ifdef CONFIG_TCP_MD5SIG
+ 	/* no MPTCP if MD5SIG is enabled on this socket or we may run out of
+@@ -219,7 +219,7 @@ static int subflow_check_req(struct request_sock *req,
+ 		}
+ 
+ 		if (subflow_use_different_sport(subflow_req->msk, sk_listener)) {
+-			pr_debug("syn inet_sport=%d %d",
++			pr_debug("syn inet_sport=%d %d\n",
+ 				 ntohs(inet_sk(sk_listener)->inet_sport),
+ 				 ntohs(inet_sk((struct sock *)subflow_req->msk)->inet_sport));
+ 			if (!mptcp_pm_sport_in_anno_list(subflow_req->msk, sk_listener)) {
+@@ -238,7 +238,7 @@ static int subflow_check_req(struct request_sock *req,
+ 				return -EPERM;
+ 		}
+ 
+-		pr_debug("token=%u, remote_nonce=%u msk=%p", subflow_req->token,
++		pr_debug("token=%u, remote_nonce=%u msk=%p\n", subflow_req->token,
+ 			 subflow_req->remote_nonce, subflow_req->msk);
+ 	}
+ 
+@@ -508,7 +508,7 @@ static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb)
+ 	subflow->rel_write_seq = 1;
+ 	subflow->conn_finished = 1;
+ 	subflow->ssn_offset = TCP_SKB_CB(skb)->seq;
+-	pr_debug("subflow=%p synack seq=%x", subflow, subflow->ssn_offset);
++	pr_debug("subflow=%p synack seq=%x\n", subflow, subflow->ssn_offset);
+ 
+ 	mptcp_get_options(skb, &mp_opt);
+ 	if (subflow->request_mptcp) {
+@@ -540,7 +540,7 @@ static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb)
+ 		subflow->thmac = mp_opt.thmac;
+ 		subflow->remote_nonce = mp_opt.nonce;
+ 		WRITE_ONCE(subflow->remote_id, mp_opt.join_id);
+-		pr_debug("subflow=%p, thmac=%llu, remote_nonce=%u backup=%d",
++		pr_debug("subflow=%p, thmac=%llu, remote_nonce=%u backup=%d\n",
+ 			 subflow, subflow->thmac, subflow->remote_nonce,
+ 			 subflow->backup);
+ 
+@@ -566,7 +566,7 @@ static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb)
+ 			MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINSYNACKBACKUPRX);
+ 
+ 		if (subflow_use_different_dport(msk, sk)) {
+-			pr_debug("synack inet_dport=%d %d",
++			pr_debug("synack inet_dport=%d %d\n",
+ 				 ntohs(inet_sk(sk)->inet_dport),
+ 				 ntohs(inet_sk(parent)->inet_dport));
+ 			MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINPORTSYNACKRX);
+@@ -636,7 +636,7 @@ static int subflow_v4_conn_request(struct sock *sk, struct sk_buff *skb)
+ {
+ 	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
+ 
+-	pr_debug("subflow=%p", subflow);
++	pr_debug("subflow=%p\n", subflow);
+ 
+ 	/* Never answer to SYNs sent to broadcast or multicast */
+ 	if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
+@@ -667,7 +667,7 @@ static int subflow_v6_conn_request(struct sock *sk, struct sk_buff *skb)
+ {
+ 	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
+ 
+-	pr_debug("subflow=%p", subflow);
++	pr_debug("subflow=%p\n", subflow);
+ 
+ 	if (skb->protocol == htons(ETH_P_IP))
+ 		return subflow_v4_conn_request(sk, skb);
+@@ -786,7 +786,7 @@ static struct sock *subflow_syn_recv_sock(const struct sock *sk,
+ 	struct mptcp_sock *owner;
+ 	struct sock *child;
+ 
+-	pr_debug("listener=%p, req=%p, conn=%p", listener, req, listener->conn);
++	pr_debug("listener=%p, req=%p, conn=%p\n", listener, req, listener->conn);
+ 
+ 	/* After child creation we must look for MPC even when options
+ 	 * are not parsed
+@@ -877,7 +877,7 @@ static struct sock *subflow_syn_recv_sock(const struct sock *sk,
+ 			ctx->conn = (struct sock *)owner;
+ 
+ 			if (subflow_use_different_sport(owner, sk)) {
+-				pr_debug("ack inet_sport=%d %d",
++				pr_debug("ack inet_sport=%d %d\n",
+ 					 ntohs(inet_sk(sk)->inet_sport),
+ 					 ntohs(inet_sk((struct sock *)owner)->inet_sport));
+ 				if (!mptcp_pm_sport_in_anno_list(owner, sk)) {
+@@ -934,7 +934,7 @@ enum mapping_status {
+ 
+ static void dbg_bad_map(struct mptcp_subflow_context *subflow, u32 ssn)
+ {
+-	pr_debug("Bad mapping: ssn=%d map_seq=%d map_data_len=%d",
++	pr_debug("Bad mapping: ssn=%d map_seq=%d map_data_len=%d\n",
+ 		 ssn, subflow->map_subflow_seq, subflow->map_data_len);
+ }
+ 
+@@ -1094,7 +1094,7 @@ static enum mapping_status get_mapping_status(struct sock *ssk,
+ 
+ 	data_len = mpext->data_len;
+ 	if (data_len == 0) {
+-		pr_debug("infinite mapping received");
++		pr_debug("infinite mapping received\n");
+ 		MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_INFINITEMAPRX);
+ 		subflow->map_data_len = 0;
+ 		return MAPPING_INVALID;
+@@ -1104,7 +1104,7 @@ static enum mapping_status get_mapping_status(struct sock *ssk,
+ 		if (data_len == 1) {
+ 			bool updated = mptcp_update_rcv_data_fin(msk, mpext->data_seq,
+ 								 mpext->dsn64);
+-			pr_debug("DATA_FIN with no payload seq=%llu", mpext->data_seq);
++			pr_debug("DATA_FIN with no payload seq=%llu\n", mpext->data_seq);
+ 			if (subflow->map_valid) {
+ 				/* A DATA_FIN might arrive in a DSS
+ 				 * option before the previous mapping
+@@ -1129,7 +1129,7 @@ static enum mapping_status get_mapping_status(struct sock *ssk,
+ 				data_fin_seq &= GENMASK_ULL(31, 0);
+ 
+ 			mptcp_update_rcv_data_fin(msk, data_fin_seq, mpext->dsn64);
+-			pr_debug("DATA_FIN with mapping seq=%llu dsn64=%d",
++			pr_debug("DATA_FIN with mapping seq=%llu dsn64=%d\n",
+ 				 data_fin_seq, mpext->dsn64);
+ 		}
+ 
+@@ -1176,7 +1176,7 @@ static enum mapping_status get_mapping_status(struct sock *ssk,
+ 	if (unlikely(subflow->map_csum_reqd != csum_reqd))
+ 		return MAPPING_INVALID;
+ 
+-	pr_debug("new map seq=%llu subflow_seq=%u data_len=%u csum=%d:%u",
++	pr_debug("new map seq=%llu subflow_seq=%u data_len=%u csum=%d:%u\n",
+ 		 subflow->map_seq, subflow->map_subflow_seq,
+ 		 subflow->map_data_len, subflow->map_csum_reqd,
+ 		 subflow->map_data_csum);
+@@ -1211,7 +1211,7 @@ static void mptcp_subflow_discard_data(struct sock *ssk, struct sk_buff *skb,
+ 	avail_len = skb->len - offset;
+ 	incr = limit >= avail_len ? avail_len + fin : limit;
+ 
+-	pr_debug("discarding=%d len=%d offset=%d seq=%d", incr, skb->len,
++	pr_debug("discarding=%d len=%d offset=%d seq=%d\n", incr, skb->len,
+ 		 offset, subflow->map_subflow_seq);
+ 	MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_DUPDATA);
+ 	tcp_sk(ssk)->copied_seq += incr;
+@@ -1312,7 +1312,7 @@ static bool subflow_check_data_avail(struct sock *ssk)
+ 
+ 		old_ack = READ_ONCE(msk->ack_seq);
+ 		ack_seq = mptcp_subflow_get_mapped_dsn(subflow);
+-		pr_debug("msk ack_seq=%llx subflow ack_seq=%llx", old_ack,
++		pr_debug("msk ack_seq=%llx subflow ack_seq=%llx\n", old_ack,
+ 			 ack_seq);
+ 		if (unlikely(before64(ack_seq, old_ack))) {
+ 			mptcp_subflow_discard_data(ssk, skb, old_ack - ack_seq);
+@@ -1384,7 +1384,7 @@ bool mptcp_subflow_data_available(struct sock *sk)
+ 		subflow->map_valid = 0;
+ 		WRITE_ONCE(subflow->data_avail, MPTCP_SUBFLOW_NODATA);
+ 
+-		pr_debug("Done with mapping: seq=%u data_len=%u",
++		pr_debug("Done with mapping: seq=%u data_len=%u\n",
+ 			 subflow->map_subflow_seq,
+ 			 subflow->map_data_len);
+ 	}
+@@ -1494,7 +1494,7 @@ void mptcpv6_handle_mapped(struct sock *sk, bool mapped)
+ 
+ 	target = mapped ? &subflow_v6m_specific : subflow_default_af_ops(sk);
+ 
+-	pr_debug("subflow=%p family=%d ops=%p target=%p mapped=%d",
++	pr_debug("subflow=%p family=%d ops=%p target=%p mapped=%d\n",
+ 		 subflow, sk->sk_family, icsk->icsk_af_ops, target, mapped);
+ 
+ 	if (likely(icsk->icsk_af_ops == target))
+@@ -1589,7 +1589,7 @@ int __mptcp_subflow_connect(struct sock *sk, const struct mptcp_addr_info *loc,
+ 		goto failed;
+ 
+ 	mptcp_crypto_key_sha(subflow->remote_key, &remote_token, NULL);
+-	pr_debug("msk=%p remote_token=%u local_id=%d remote_id=%d", msk,
++	pr_debug("msk=%p remote_token=%u local_id=%d remote_id=%d\n", msk,
+ 		 remote_token, local_id, remote_id);
+ 	subflow->remote_token = remote_token;
+ 	WRITE_ONCE(subflow->remote_id, remote_id);
+@@ -1727,7 +1727,7 @@ int mptcp_subflow_create_socket(struct sock *sk, unsigned short family,
+ 	SOCK_INODE(sf)->i_gid = SOCK_INODE(sk->sk_socket)->i_gid;
+ 
+ 	subflow = mptcp_subflow_ctx(sf->sk);
+-	pr_debug("subflow=%p", subflow);
++	pr_debug("subflow=%p\n", subflow);
+ 
+ 	*new_sock = sf;
+ 	sock_hold(sk);
+@@ -1751,7 +1751,7 @@ static struct mptcp_subflow_context *subflow_create_ctx(struct sock *sk,
+ 	INIT_LIST_HEAD(&ctx->node);
+ 	INIT_LIST_HEAD(&ctx->delegated_node);
+ 
+-	pr_debug("subflow=%p", ctx);
++	pr_debug("subflow=%p\n", ctx);
+ 
+ 	ctx->tcp_sock = sk;
+ 	WRITE_ONCE(ctx->local_id, -1);
+@@ -1902,7 +1902,7 @@ static int subflow_ulp_init(struct sock *sk)
+ 		goto out;
+ 	}
+ 
+-	pr_debug("subflow=%p, family=%d", ctx, sk->sk_family);
++	pr_debug("subflow=%p, family=%d\n", ctx, sk->sk_family);
+ 
+ 	tp->is_mptcp = 1;
+ 	ctx->icsk_af_ops = icsk->icsk_af_ops;
+diff --git a/net/wireless/scan.c b/net/wireless/scan.c
+index 74db51348a7ff..4d88e797ae49f 100644
+--- a/net/wireless/scan.c
++++ b/net/wireless/scan.c
+@@ -1562,7 +1562,7 @@ struct cfg80211_bss *cfg80211_get_bss(struct wiphy *wiphy,
+ }
+ EXPORT_SYMBOL(cfg80211_get_bss);
+ 
+-static void rb_insert_bss(struct cfg80211_registered_device *rdev,
++static bool rb_insert_bss(struct cfg80211_registered_device *rdev,
+ 			  struct cfg80211_internal_bss *bss)
+ {
+ 	struct rb_node **p = &rdev->bss_tree.rb_node;
+@@ -1578,7 +1578,7 @@ static void rb_insert_bss(struct cfg80211_registered_device *rdev,
+ 
+ 		if (WARN_ON(!cmp)) {
+ 			/* will sort of leak this BSS */
+-			return;
++			return false;
+ 		}
+ 
+ 		if (cmp < 0)
+@@ -1589,6 +1589,7 @@ static void rb_insert_bss(struct cfg80211_registered_device *rdev,
+ 
+ 	rb_link_node(&bss->rbn, parent, p);
+ 	rb_insert_color(&bss->rbn, &rdev->bss_tree);
++	return true;
+ }
+ 
+ static struct cfg80211_internal_bss *
+@@ -1615,6 +1616,34 @@ rb_find_bss(struct cfg80211_registered_device *rdev,
+ 	return NULL;
+ }
+ 
++static void cfg80211_insert_bss(struct cfg80211_registered_device *rdev,
++				struct cfg80211_internal_bss *bss)
++{
++	lockdep_assert_held(&rdev->bss_lock);
++
++	if (!rb_insert_bss(rdev, bss))
++		return;
++	list_add_tail(&bss->list, &rdev->bss_list);
++	rdev->bss_entries++;
++}
++
++static void cfg80211_rehash_bss(struct cfg80211_registered_device *rdev,
++                                struct cfg80211_internal_bss *bss)
++{
++	lockdep_assert_held(&rdev->bss_lock);
++
++	rb_erase(&bss->rbn, &rdev->bss_tree);
++	if (!rb_insert_bss(rdev, bss)) {
++		list_del(&bss->list);
++		if (!list_empty(&bss->hidden_list))
++			list_del_init(&bss->hidden_list);
++		if (!list_empty(&bss->pub.nontrans_list))
++			list_del_init(&bss->pub.nontrans_list);
++		rdev->bss_entries--;
++	}
++	rdev->bss_generation++;
++}
++
+ static bool cfg80211_combine_bsses(struct cfg80211_registered_device *rdev,
+ 				   struct cfg80211_internal_bss *new)
+ {
+@@ -1876,9 +1905,7 @@ __cfg80211_bss_update(struct cfg80211_registered_device *rdev,
+ 			bss_ref_get(rdev, bss_from_pub(tmp->pub.transmitted_bss));
+ 		}
+ 
+-		list_add_tail(&new->list, &rdev->bss_list);
+-		rdev->bss_entries++;
+-		rb_insert_bss(rdev, new);
++		cfg80211_insert_bss(rdev, new);
+ 		found = new;
+ 	}
+ 
+@@ -3111,19 +3138,14 @@ void cfg80211_update_assoc_bss_entry(struct wireless_dev *wdev,
+ 		if (!WARN_ON(!__cfg80211_unlink_bss(rdev, new)))
+ 			rdev->bss_generation++;
+ 	}
+-
+-	rb_erase(&cbss->rbn, &rdev->bss_tree);
+-	rb_insert_bss(rdev, cbss);
+-	rdev->bss_generation++;
++	cfg80211_rehash_bss(rdev, cbss);
+ 
+ 	list_for_each_entry_safe(nontrans_bss, tmp,
+ 				 &cbss->pub.nontrans_list,
+ 				 nontrans_list) {
+ 		bss = bss_from_pub(nontrans_bss);
+ 		bss->pub.channel = chan;
+-		rb_erase(&bss->rbn, &rdev->bss_tree);
+-		rb_insert_bss(rdev, bss);
+-		rdev->bss_generation++;
++		cfg80211_rehash_bss(rdev, bss);
+ 	}
+ 
+ done:
+diff --git a/security/apparmor/apparmorfs.c b/security/apparmor/apparmorfs.c
+index 63ddefb6ddd1c..23b2853ce3c42 100644
+--- a/security/apparmor/apparmorfs.c
++++ b/security/apparmor/apparmorfs.c
+@@ -1698,6 +1698,10 @@ int __aafs_profile_mkdir(struct aa_profile *profile, struct dentry *parent)
+ 		struct aa_profile *p;
+ 		p = aa_deref_parent(profile);
+ 		dent = prof_dir(p);
++		if (!dent) {
++			error = -ENOENT;
++			goto fail2;
++		}
+ 		/* adding to parent that previously didn't have children */
+ 		dent = aafs_create_dir("profiles", dent);
+ 		if (IS_ERR(dent))
+diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
+index 6b92e09d3f780..98c2bdbfcaed6 100644
+--- a/security/smack/smack_lsm.c
++++ b/security/smack/smack_lsm.c
+@@ -4354,7 +4354,7 @@ static int smack_inet_conn_request(const struct sock *sk, struct sk_buff *skb,
+ 	rcu_read_unlock();
+ 
+ 	if (hskp == NULL)
+-		rc = netlbl_req_setattr(req, &skp->smk_netlabel);
++		rc = netlbl_req_setattr(req, &ssp->smk_out->smk_netlabel);
+ 	else
+ 		netlbl_req_delattr(req);
+ 
+diff --git a/sound/core/seq/seq_ports.h b/sound/core/seq/seq_ports.h
+index b111382f697aa..9e36738c0dd04 100644
+--- a/sound/core/seq/seq_ports.h
++++ b/sound/core/seq/seq_ports.h
+@@ -7,6 +7,7 @@
+ #define __SND_SEQ_PORTS_H
+ 
+ #include <sound/seq_kernel.h>
++#include <sound/ump_convert.h>
+ #include "seq_lock.h"
+ 
+ /* list of 'exported' ports */
+@@ -42,17 +43,6 @@ struct snd_seq_port_subs_info {
+ 	int (*close)(void *private_data, struct snd_seq_port_subscribe *info);
+ };
+ 
+-/* context for converting from legacy control event to UMP packet */
+-struct snd_seq_ump_midi2_bank {
+-	bool rpn_set;
+-	bool nrpn_set;
+-	bool bank_set;
+-	unsigned char cc_rpn_msb, cc_rpn_lsb;
+-	unsigned char cc_nrpn_msb, cc_nrpn_lsb;
+-	unsigned char cc_data_msb, cc_data_lsb;
+-	unsigned char cc_bank_msb, cc_bank_lsb;
+-};
+-
+ struct snd_seq_client_port {
+ 
+ 	struct snd_seq_addr addr;	/* client/port number */
+@@ -88,7 +78,7 @@ struct snd_seq_client_port {
+ 	unsigned char ump_group;
+ 
+ #if IS_ENABLED(CONFIG_SND_SEQ_UMP)
+-	struct snd_seq_ump_midi2_bank midi2_bank[16]; /* per channel */
++	struct ump_cvt_to_ump_bank midi2_bank[16]; /* per channel */
+ #endif
+ };
+ 
+diff --git a/sound/core/seq/seq_ump_convert.c b/sound/core/seq/seq_ump_convert.c
+index d9dacfbe4a9ae..4dd540cbb1cbb 100644
+--- a/sound/core/seq/seq_ump_convert.c
++++ b/sound/core/seq/seq_ump_convert.c
+@@ -368,7 +368,7 @@ static int cvt_ump_midi1_to_midi2(struct snd_seq_client *dest,
+ 	struct snd_seq_ump_event ev_cvt;
+ 	const union snd_ump_midi1_msg *midi1 = (const union snd_ump_midi1_msg *)event->ump;
+ 	union snd_ump_midi2_msg *midi2 = (union snd_ump_midi2_msg *)ev_cvt.ump;
+-	struct snd_seq_ump_midi2_bank *cc;
++	struct ump_cvt_to_ump_bank *cc;
+ 
+ 	ev_cvt = *event;
+ 	memset(&ev_cvt.ump, 0, sizeof(ev_cvt.ump));
+@@ -789,28 +789,45 @@ static int paf_ev_to_ump_midi2(const struct snd_seq_event *event,
+ 	return 1;
+ }
+ 
++static void reset_rpn(struct ump_cvt_to_ump_bank *cc)
++{
++	cc->rpn_set = 0;
++	cc->nrpn_set = 0;
++	cc->cc_rpn_msb = cc->cc_rpn_lsb = 0;
++	cc->cc_data_msb = cc->cc_data_lsb = 0;
++	cc->cc_data_msb_set = cc->cc_data_lsb_set = 0;
++}
++
+ /* set up the MIDI2 RPN/NRPN packet data from the parsed info */
+-static void fill_rpn(struct snd_seq_ump_midi2_bank *cc,
+-		     union snd_ump_midi2_msg *data,
+-		     unsigned char channel)
++static int fill_rpn(struct ump_cvt_to_ump_bank *cc,
++		    union snd_ump_midi2_msg *data,
++		    unsigned char channel,
++		    bool flush)
+ {
++	if (!(cc->cc_data_lsb_set || cc->cc_data_msb_set))
++		return 0; // skip
++	/* when not flushing, wait for complete data set */
++	if (!flush && (!cc->cc_data_lsb_set || !cc->cc_data_msb_set))
++		return 0; // skip
++
+ 	if (cc->rpn_set) {
+ 		data->rpn.status = UMP_MSG_STATUS_RPN;
+ 		data->rpn.bank = cc->cc_rpn_msb;
+ 		data->rpn.index = cc->cc_rpn_lsb;
+-		cc->rpn_set = 0;
+-		cc->cc_rpn_msb = cc->cc_rpn_lsb = 0;
+-	} else {
++	} else if (cc->nrpn_set) {
+ 		data->rpn.status = UMP_MSG_STATUS_NRPN;
+ 		data->rpn.bank = cc->cc_nrpn_msb;
+ 		data->rpn.index = cc->cc_nrpn_lsb;
+-		cc->nrpn_set = 0;
+-		cc->cc_nrpn_msb = cc->cc_nrpn_lsb = 0;
++	} else {
++		return 0; // skip
+ 	}
++
+ 	data->rpn.data = upscale_14_to_32bit((cc->cc_data_msb << 7) |
+ 					     cc->cc_data_lsb);
+ 	data->rpn.channel = channel;
+-	cc->cc_data_msb = cc->cc_data_lsb = 0;
++
++	reset_rpn(cc);
++	return 1;
+ }
+ 
+ /* convert CC event to MIDI 2.0 UMP */
+@@ -822,29 +839,39 @@ static int cc_ev_to_ump_midi2(const struct snd_seq_event *event,
+ 	unsigned char channel = event->data.control.channel & 0x0f;
+ 	unsigned char index = event->data.control.param & 0x7f;
+ 	unsigned char val = event->data.control.value & 0x7f;
+-	struct snd_seq_ump_midi2_bank *cc = &dest_port->midi2_bank[channel];
++	struct ump_cvt_to_ump_bank *cc = &dest_port->midi2_bank[channel];
++	int ret;
+ 
+ 	/* process special CC's (bank/rpn/nrpn) */
+ 	switch (index) {
+ 	case UMP_CC_RPN_MSB:
++		ret = fill_rpn(cc, data, channel, true);
+ 		cc->rpn_set = 1;
+ 		cc->cc_rpn_msb = val;
+-		return 0; // skip
++		if (cc->cc_rpn_msb == 0x7f && cc->cc_rpn_lsb == 0x7f)
++			reset_rpn(cc);
++		return ret;
+ 	case UMP_CC_RPN_LSB:
++		ret = fill_rpn(cc, data, channel, true);
+ 		cc->rpn_set = 1;
+ 		cc->cc_rpn_lsb = val;
+-		return 0; // skip
++		if (cc->cc_rpn_msb == 0x7f && cc->cc_rpn_lsb == 0x7f)
++			reset_rpn(cc);
++		return ret;
+ 	case UMP_CC_NRPN_MSB:
++		ret = fill_rpn(cc, data, channel, true);
+ 		cc->nrpn_set = 1;
+ 		cc->cc_nrpn_msb = val;
+-		return 0; // skip
++		return ret;
+ 	case UMP_CC_NRPN_LSB:
++		ret = fill_rpn(cc, data, channel, true);
+ 		cc->nrpn_set = 1;
+ 		cc->cc_nrpn_lsb = val;
+-		return 0; // skip
++		return ret;
+ 	case UMP_CC_DATA:
++		cc->cc_data_msb_set = 1;
+ 		cc->cc_data_msb = val;
+-		return 0; // skip
++		return fill_rpn(cc, data, channel, false);
+ 	case UMP_CC_BANK_SELECT:
+ 		cc->bank_set = 1;
+ 		cc->cc_bank_msb = val;
+@@ -854,11 +881,9 @@ static int cc_ev_to_ump_midi2(const struct snd_seq_event *event,
+ 		cc->cc_bank_lsb = val;
+ 		return 0; // skip
+ 	case UMP_CC_DATA_LSB:
++		cc->cc_data_lsb_set = 1;
+ 		cc->cc_data_lsb = val;
+-		if (!(cc->rpn_set || cc->nrpn_set))
+-			return 0; // skip
+-		fill_rpn(cc, data, channel);
+-		return 1;
++		return fill_rpn(cc, data, channel, false);
+ 	}
+ 
+ 	data->cc.status = status;
+@@ -887,7 +912,7 @@ static int pgm_ev_to_ump_midi2(const struct snd_seq_event *event,
+ 			       unsigned char status)
+ {
+ 	unsigned char channel = event->data.control.channel & 0x0f;
+-	struct snd_seq_ump_midi2_bank *cc = &dest_port->midi2_bank[channel];
++	struct ump_cvt_to_ump_bank *cc = &dest_port->midi2_bank[channel];
+ 
+ 	data->pg.status = status;
+ 	data->pg.channel = channel;
+@@ -924,8 +949,9 @@ static int ctrl14_ev_to_ump_midi2(const struct snd_seq_event *event,
+ {
+ 	unsigned char channel = event->data.control.channel & 0x0f;
+ 	unsigned char index = event->data.control.param & 0x7f;
+-	struct snd_seq_ump_midi2_bank *cc = &dest_port->midi2_bank[channel];
++	struct ump_cvt_to_ump_bank *cc = &dest_port->midi2_bank[channel];
+ 	unsigned char msb, lsb;
++	int ret;
+ 
+ 	msb = (event->data.control.value >> 7) & 0x7f;
+ 	lsb = event->data.control.value & 0x7f;
+@@ -939,28 +965,27 @@ static int ctrl14_ev_to_ump_midi2(const struct snd_seq_event *event,
+ 		cc->cc_bank_lsb = lsb;
+ 		return 0; // skip
+ 	case UMP_CC_RPN_MSB:
+-		cc->cc_rpn_msb = msb;
+-		fallthrough;
+ 	case UMP_CC_RPN_LSB:
+-		cc->rpn_set = 1;
++		ret = fill_rpn(cc, data, channel, true);
++		cc->cc_rpn_msb = msb;
+ 		cc->cc_rpn_lsb = lsb;
+-		return 0; // skip
++		cc->rpn_set = 1;
++		if (cc->cc_rpn_msb == 0x7f && cc->cc_rpn_lsb == 0x7f)
++			reset_rpn(cc);
++		return ret;
+ 	case UMP_CC_NRPN_MSB:
+-		cc->cc_nrpn_msb = msb;
+-		fallthrough;
+ 	case UMP_CC_NRPN_LSB:
++		ret = fill_rpn(cc, data, channel, true);
++		cc->cc_nrpn_msb = msb;
+ 		cc->nrpn_set = 1;
+ 		cc->cc_nrpn_lsb = lsb;
+-		return 0; // skip
++		return ret;
+ 	case UMP_CC_DATA:
+-		cc->cc_data_msb = msb;
+-		fallthrough;
+ 	case UMP_CC_DATA_LSB:
++		cc->cc_data_msb_set = cc->cc_data_lsb_set = 1;
++		cc->cc_data_msb = msb;
+ 		cc->cc_data_lsb = lsb;
+-		if (!(cc->rpn_set || cc->nrpn_set))
+-			return 0; // skip
+-		fill_rpn(cc, data, channel);
+-		return 1;
++		return fill_rpn(cc, data, channel, false);
+ 	}
+ 
+ 	data->cc.status = UMP_MSG_STATUS_CC;
+diff --git a/sound/core/ump_convert.c b/sound/core/ump_convert.c
+index f67c44c83fde4..0fe13d0316568 100644
+--- a/sound/core/ump_convert.c
++++ b/sound/core/ump_convert.c
+@@ -287,25 +287,42 @@ static int cvt_legacy_system_to_ump(struct ump_cvt_to_ump *cvt,
+ 	return 4;
+ }
+ 
+-static void fill_rpn(struct ump_cvt_to_ump_bank *cc,
+-		     union snd_ump_midi2_msg *midi2)
++static void reset_rpn(struct ump_cvt_to_ump_bank *cc)
+ {
++	cc->rpn_set = 0;
++	cc->nrpn_set = 0;
++	cc->cc_rpn_msb = cc->cc_rpn_lsb = 0;
++	cc->cc_data_msb = cc->cc_data_lsb = 0;
++	cc->cc_data_msb_set = cc->cc_data_lsb_set = 0;
++}
++
++static int fill_rpn(struct ump_cvt_to_ump_bank *cc,
++		    union snd_ump_midi2_msg *midi2,
++		    bool flush)
++{
++	if (!(cc->cc_data_lsb_set || cc->cc_data_msb_set))
++		return 0; // skip
++	/* when not flushing, wait for complete data set */
++	if (!flush && (!cc->cc_data_lsb_set || !cc->cc_data_msb_set))
++		return 0; // skip
++
+ 	if (cc->rpn_set) {
+ 		midi2->rpn.status = UMP_MSG_STATUS_RPN;
+ 		midi2->rpn.bank = cc->cc_rpn_msb;
+ 		midi2->rpn.index = cc->cc_rpn_lsb;
+-		cc->rpn_set = 0;
+-		cc->cc_rpn_msb = cc->cc_rpn_lsb = 0;
+-	} else {
++	} else if (cc->nrpn_set) {
+ 		midi2->rpn.status = UMP_MSG_STATUS_NRPN;
+ 		midi2->rpn.bank = cc->cc_nrpn_msb;
+ 		midi2->rpn.index = cc->cc_nrpn_lsb;
+-		cc->nrpn_set = 0;
+-		cc->cc_nrpn_msb = cc->cc_nrpn_lsb = 0;
++	} else {
++		return 0; // skip
+ 	}
++
+ 	midi2->rpn.data = upscale_14_to_32bit((cc->cc_data_msb << 7) |
+ 					      cc->cc_data_lsb);
+-	cc->cc_data_msb = cc->cc_data_lsb = 0;
++
++	reset_rpn(cc);
++	return 1;
+ }
+ 
+ /* convert to a MIDI 1.0 Channel Voice message */
+@@ -318,6 +335,7 @@ static int cvt_legacy_cmd_to_ump(struct ump_cvt_to_ump *cvt,
+ 	struct ump_cvt_to_ump_bank *cc;
+ 	union snd_ump_midi2_msg *midi2 = (union snd_ump_midi2_msg *)data;
+ 	unsigned char status, channel;
++	int ret;
+ 
+ 	BUILD_BUG_ON(sizeof(union snd_ump_midi1_msg) != 4);
+ 	BUILD_BUG_ON(sizeof(union snd_ump_midi2_msg) != 8);
+@@ -358,24 +376,33 @@ static int cvt_legacy_cmd_to_ump(struct ump_cvt_to_ump *cvt,
+ 	case UMP_MSG_STATUS_CC:
+ 		switch (buf[1]) {
+ 		case UMP_CC_RPN_MSB:
++			ret = fill_rpn(cc, midi2, true);
+ 			cc->rpn_set = 1;
+ 			cc->cc_rpn_msb = buf[2];
+-			return 0; // skip
++			if (cc->cc_rpn_msb == 0x7f && cc->cc_rpn_lsb == 0x7f)
++				reset_rpn(cc);
++			return ret;
+ 		case UMP_CC_RPN_LSB:
++			ret = fill_rpn(cc, midi2, true);
+ 			cc->rpn_set = 1;
+ 			cc->cc_rpn_lsb = buf[2];
+-			return 0; // skip
++			if (cc->cc_rpn_msb == 0x7f && cc->cc_rpn_lsb == 0x7f)
++				reset_rpn(cc);
++			return ret;
+ 		case UMP_CC_NRPN_MSB:
++			ret = fill_rpn(cc, midi2, true);
+ 			cc->nrpn_set = 1;
+ 			cc->cc_nrpn_msb = buf[2];
+-			return 0; // skip
++			return ret;
+ 		case UMP_CC_NRPN_LSB:
++			ret = fill_rpn(cc, midi2, true);
+ 			cc->nrpn_set = 1;
+ 			cc->cc_nrpn_lsb = buf[2];
+-			return 0; // skip
++			return ret;
+ 		case UMP_CC_DATA:
++			cc->cc_data_msb_set = 1;
+ 			cc->cc_data_msb = buf[2];
+-			return 0; // skip
++			return fill_rpn(cc, midi2, false);
+ 		case UMP_CC_BANK_SELECT:
+ 			cc->bank_set = 1;
+ 			cc->cc_bank_msb = buf[2];
+@@ -385,12 +412,9 @@ static int cvt_legacy_cmd_to_ump(struct ump_cvt_to_ump *cvt,
+ 			cc->cc_bank_lsb = buf[2];
+ 			return 0; // skip
+ 		case UMP_CC_DATA_LSB:
++			cc->cc_data_lsb_set = 1;
+ 			cc->cc_data_lsb = buf[2];
+-			if (cc->rpn_set || cc->nrpn_set)
+-				fill_rpn(cc, midi2);
+-			else
+-				return 0; // skip
+-			break;
++			return fill_rpn(cc, midi2, false);
+ 		default:
+ 			midi2->cc.index = buf[1];
+ 			midi2->cc.data = upscale_7_to_32bit(buf[2]);
+diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
+index bf685d01259d3..d3ed3e21b1979 100644
+--- a/sound/pci/hda/hda_generic.c
++++ b/sound/pci/hda/hda_generic.c
+@@ -4956,6 +4956,69 @@ void snd_hda_gen_stream_pm(struct hda_codec *codec, hda_nid_t nid, bool on)
+ }
+ EXPORT_SYMBOL_GPL(snd_hda_gen_stream_pm);
+ 
++/* forcibly mute the speaker output without caching; return true if updated */
++static bool force_mute_output_path(struct hda_codec *codec, hda_nid_t nid)
++{
++	if (!nid)
++		return false;
++	if (!nid_has_mute(codec, nid, HDA_OUTPUT))
++		return false; /* no mute, skip */
++	if (snd_hda_codec_amp_read(codec, nid, 0, HDA_OUTPUT, 0) &
++	    snd_hda_codec_amp_read(codec, nid, 1, HDA_OUTPUT, 0) &
++	    HDA_AMP_MUTE)
++		return false; /* both channels already muted, skip */
++
++	/* direct amp update without caching */
++	snd_hda_codec_write(codec, nid, 0, AC_VERB_SET_AMP_GAIN_MUTE,
++			    AC_AMP_SET_OUTPUT | AC_AMP_SET_LEFT |
++			    AC_AMP_SET_RIGHT | HDA_AMP_MUTE);
++	return true;
++}
++
++/**
++ * snd_hda_gen_shutup_speakers - Forcibly mute the speaker outputs
++ * @codec: the HDA codec
++ *
++ * Forcibly mute the speaker outputs, to be called at suspend or shutdown.
++ *
++ * The mute state done by this function isn't cached, hence the original state
++ * will be restored at resume.
++ *
++ * Return true if the mute state has been changed.
++ */
++bool snd_hda_gen_shutup_speakers(struct hda_codec *codec)
++{
++	struct hda_gen_spec *spec = codec->spec;
++	const int *paths;
++	const struct nid_path *path;
++	int i, p, num_paths;
++	bool updated = false;
++
++	/* if already powered off, do nothing */
++	if (!snd_hdac_is_power_on(&codec->core))
++		return false;
++
++	if (spec->autocfg.line_out_type == AUTO_PIN_SPEAKER_OUT) {
++		paths = spec->out_paths;
++		num_paths = spec->autocfg.line_outs;
++	} else {
++		paths = spec->speaker_paths;
++		num_paths = spec->autocfg.speaker_outs;
++	}
++
++	for (i = 0; i < num_paths; i++) {
++		path = snd_hda_get_path_from_idx(codec, paths[i]);
++		if (!path)
++			continue;
++		for (p = 0; p < path->depth; p++)
++			if (force_mute_output_path(codec, path->path[p]))
++				updated = true;
++	}
++
++	return updated;
++}
++EXPORT_SYMBOL_GPL(snd_hda_gen_shutup_speakers);
++
+ /**
+  * snd_hda_gen_parse_auto_config - Parse the given BIOS configuration and
+  * set up the hda_gen_spec
+diff --git a/sound/pci/hda/hda_generic.h b/sound/pci/hda/hda_generic.h
+index a8eea83676299..aed4381f7a619 100644
+--- a/sound/pci/hda/hda_generic.h
++++ b/sound/pci/hda/hda_generic.h
+@@ -355,5 +355,6 @@ int snd_hda_gen_add_mute_led_cdev(struct hda_codec *codec,
+ int snd_hda_gen_add_micmute_led_cdev(struct hda_codec *codec,
+ 				     int (*callback)(struct led_classdev *,
+ 						     enum led_brightness));
++bool snd_hda_gen_shutup_speakers(struct hda_codec *codec);
+ 
+ #endif /* __SOUND_HDA_GENERIC_H */
+diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
+index af921364195e4..8396d1d93668c 100644
+--- a/sound/pci/hda/patch_conexant.c
++++ b/sound/pci/hda/patch_conexant.c
+@@ -205,6 +205,8 @@ static void cx_auto_shutdown(struct hda_codec *codec)
+ {
+ 	struct conexant_spec *spec = codec->spec;
+ 
++	snd_hda_gen_shutup_speakers(codec);
++
+ 	/* Turn the problematic codec into D3 to avoid spurious noises
+ 	   from the internal speaker during (and after) reboot */
+ 	cx_auto_turn_eapd(codec, spec->num_eapds, spec->eapds, false);
+diff --git a/sound/soc/amd/yc/acp6x-mach.c b/sound/soc/amd/yc/acp6x-mach.c
+index d597e59863ee3..f6c1dbd0ebcf5 100644
+--- a/sound/soc/amd/yc/acp6x-mach.c
++++ b/sound/soc/amd/yc/acp6x-mach.c
+@@ -220,6 +220,13 @@ static const struct dmi_system_id yc_acp_quirk_table[] = {
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "21J6"),
+ 		}
+ 	},
++	{
++		.driver_data = &acp6x_card,
++		.matches = {
++			DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "21M3"),
++		}
++	},
+ 	{
+ 		.driver_data = &acp6x_card,
+ 		.matches = {
+@@ -430,6 +437,13 @@ static const struct dmi_system_id yc_acp_quirk_table[] = {
+ 			DMI_MATCH(DMI_BOARD_NAME, "8A3E"),
+ 		}
+ 	},
++	{
++		.driver_data = &acp6x_card,
++		.matches = {
++			DMI_MATCH(DMI_BOARD_VENDOR, "HP"),
++			DMI_MATCH(DMI_BOARD_NAME, "8B27"),
++		}
++	},
+ 	{
+ 		.driver_data = &acp6x_card,
+ 		.matches = {
+diff --git a/sound/soc/codecs/es8326.c b/sound/soc/codecs/es8326.c
+index 6c263086c44d2..32a9b26ee2c89 100644
+--- a/sound/soc/codecs/es8326.c
++++ b/sound/soc/codecs/es8326.c
+@@ -617,6 +617,8 @@ static void es8326_jack_detect_handler(struct work_struct *work)
+ 		es8326_disable_micbias(es8326->component);
+ 		if (es8326->jack->status & SND_JACK_HEADPHONE) {
+ 			dev_dbg(comp->dev, "Report hp remove event\n");
++			snd_soc_jack_report(es8326->jack, 0,
++				    SND_JACK_BTN_0 | SND_JACK_BTN_1 | SND_JACK_BTN_2);
+ 			snd_soc_jack_report(es8326->jack, 0, SND_JACK_HEADSET);
+ 			/* mute adc when mic path switch */
+ 			regmap_write(es8326->regmap, ES8326_ADC_SCALE, 0x33);
+diff --git a/tools/testing/selftests/net/mptcp/mptcp_join.sh b/tools/testing/selftests/net/mptcp/mptcp_join.sh
+index 2be13dd19ddd2..6faff03acc110 100755
+--- a/tools/testing/selftests/net/mptcp/mptcp_join.sh
++++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh
+@@ -21,6 +21,7 @@ cinfail=""
+ cinsent=""
+ tmpfile=""
+ cout=""
++err=""
+ capout=""
+ ns1=""
+ ns2=""
+@@ -187,6 +188,7 @@ init() {
+ 	cin=$(mktemp)
+ 	cinsent=$(mktemp)
+ 	cout=$(mktemp)
++	err=$(mktemp)
+ 	evts_ns1=$(mktemp)
+ 	evts_ns2=$(mktemp)
+ 
+@@ -202,6 +204,7 @@ cleanup()
+ 	rm -f "$sin" "$sout" "$cinsent" "$cinfail"
+ 	rm -f "$tmpfile"
+ 	rm -rf $evts_ns1 $evts_ns2
++	rm -f "$err"
+ 	cleanup_partial
+ }
+ 
+@@ -461,16 +464,17 @@ reset_with_fail()
+ 	fi
+ }
+ 
++start_events()
++{
++	mptcp_lib_events "${ns1}" "${evts_ns1}" evts_ns1_pid
++	mptcp_lib_events "${ns2}" "${evts_ns2}" evts_ns2_pid
++}
++
+ reset_with_events()
+ {
+ 	reset "${1}" || return 1
+ 
+-	:> "$evts_ns1"
+-	:> "$evts_ns2"
+-	ip netns exec $ns1 ./pm_nl_ctl events >> "$evts_ns1" 2>&1 &
+-	evts_ns1_pid=$!
+-	ip netns exec $ns2 ./pm_nl_ctl events >> "$evts_ns2" 2>&1 &
+-	evts_ns2_pid=$!
++	start_events
+ }
+ 
+ reset_with_tcp_filter()
+@@ -669,7 +673,9 @@ wait_mpj()
+ kill_events_pids()
+ {
+ 	mptcp_lib_kill_wait $evts_ns1_pid
++	evts_ns1_pid=0
+ 	mptcp_lib_kill_wait $evts_ns2_pid
++	evts_ns2_pid=0
+ }
+ 
+ pm_nl_set_limits()
+@@ -2891,13 +2897,6 @@ backup_tests()
+ 	fi
+ }
+ 
+-SUB_ESTABLISHED=10 # MPTCP_EVENT_SUB_ESTABLISHED
+-LISTENER_CREATED=15 #MPTCP_EVENT_LISTENER_CREATED
+-LISTENER_CLOSED=16  #MPTCP_EVENT_LISTENER_CLOSED
+-
+-AF_INET=2
+-AF_INET6=10
+-
+ verify_listener_events()
+ {
+ 	local evt=$1
+@@ -2911,9 +2910,9 @@ verify_listener_events()
+ 	local sport
+ 	local name
+ 
+-	if [ $e_type = $LISTENER_CREATED ]; then
++	if [ $e_type = $MPTCP_LIB_EVENT_LISTENER_CREATED ]; then
+ 		name="LISTENER_CREATED"
+-	elif [ $e_type = $LISTENER_CLOSED ]; then
++	elif [ $e_type = $MPTCP_LIB_EVENT_LISTENER_CLOSED ]; then
+ 		name="LISTENER_CLOSED "
+ 	else
+ 		name="$e_type"
+@@ -2980,8 +2979,10 @@ add_addr_ports_tests()
+ 		chk_add_nr 1 1 1
+ 		chk_rm_nr 1 1 invert
+ 
+-		verify_listener_events $evts_ns1 $LISTENER_CREATED $AF_INET 10.0.2.1 10100
+-		verify_listener_events $evts_ns1 $LISTENER_CLOSED $AF_INET 10.0.2.1 10100
++		verify_listener_events $evts_ns1 $MPTCP_LIB_EVENT_LISTENER_CREATED \
++				       $MPTCP_LIB_AF_INET 10.0.2.1 10100
++		verify_listener_events $evts_ns1 $MPTCP_LIB_EVENT_LISTENER_CLOSED \
++				       $MPTCP_LIB_AF_INET 10.0.2.1 10100
+ 		kill_events_pids
+ 	fi
+ 
+@@ -3422,6 +3423,107 @@ userspace_pm_rm_sf()
+ 	wait_rm_sf $1 "${cnt}"
+ }
+ 
++check_output()
++{
++	local cmd="$1"
++	local expected="$2"
++	local msg="$3"
++	local rc=0
++
++	mptcp_lib_check_output "${err}" "${cmd}" "${expected}" || rc=${?}
++	if [ ${rc} -eq 2 ]; then
++		fail_test "fail to check output # error ${rc}"
++	elif [ ${rc} -eq 0 ]; then
++		print_ok
++	elif [ ${rc} -eq 1 ]; then
++		fail_test "fail to check output # different output"
++	fi
++}
++
++# $1: ns
++userspace_pm_dump()
++{
++	local evts=$evts_ns1
++	local tk
++
++	[ "$1" == "$ns2" ] && evts=$evts_ns2
++	tk=$(mptcp_lib_evts_get_info token "$evts")
++
++	ip netns exec $1 ./pm_nl_ctl dump token $tk
++}
++
++# $1: ns ; $2: id
++userspace_pm_get_addr()
++{
++	local evts=$evts_ns1
++	local tk
++
++	[ "$1" == "$ns2" ] && evts=$evts_ns2
++	tk=$(mptcp_lib_evts_get_info token "$evts")
++
++	ip netns exec $1 ./pm_nl_ctl get $2 token $tk
++}
++
++userspace_pm_chk_dump_addr()
++{
++	local ns="${1}"
++	local exp="${2}"
++	local check="${3}"
++
++	print_check "dump addrs ${check}"
++
++	if false && mptcp_lib_kallsyms_has "mptcp_userspace_pm_dump_addr$"; then
++		check_output "userspace_pm_dump ${ns}" "${exp}"
++	else
++		print_skip
++	fi
++}
++
++userspace_pm_chk_get_addr()
++{
++	local ns="${1}"
++	local id="${2}"
++	local exp="${3}"
++
++	print_check "get id ${id} addr"
++
++	if false && mptcp_lib_kallsyms_has "mptcp_userspace_pm_get_addr$"; then
++		check_output "userspace_pm_get_addr ${ns} ${id}" "${exp}"
++	else
++		print_skip
++	fi
++}
++
++# $1: ns ; $2: event type ; $3: count
++chk_evt_nr()
++{
++	local ns=${1}
++	local evt_name="${2}"
++	local exp="${3}"
++
++	local evts="${evts_ns1}"
++	local evt="${!evt_name}"
++	local count
++
++	evt_name="${evt_name:16}" # without MPTCP_LIB_EVENT_
++	[ "${ns}" == "ns2" ] && evts="${evts_ns2}"
++
++	print_check "event ${ns} ${evt_name} (${exp})"
++
++	if [[ "${evt_name}" = "LISTENER_"* ]] &&
++	   ! mptcp_lib_kallsyms_has "mptcp_event_pm_listener$"; then
++		print_skip "event not supported"
++		return
++	fi
++
++	count=$(grep -cw "type:${evt}" "${evts}")
++	if [ "${count}" != "${exp}" ]; then
++		fail_test "got ${count} events, expected ${exp}"
++	else
++		print_ok
++	fi
++}
++
+ userspace_tests()
+ {
+ 	# userspace pm type prevents add_addr
+@@ -3513,11 +3615,17 @@ userspace_tests()
+ 		chk_mptcp_info subflows 2 subflows 2
+ 		chk_subflows_total 3 3
+ 		chk_mptcp_info add_addr_signal 2 add_addr_accepted 2
+-		userspace_pm_rm_addr $ns1 10
+-		userspace_pm_rm_sf $ns1 "::ffff:10.0.2.1" $SUB_ESTABLISHED
++		userspace_pm_chk_dump_addr "${ns1}" \
++			$'id 10 flags signal 10.0.2.1\nid 20 flags signal 10.0.3.1' \
++			"signal"
++		userspace_pm_chk_get_addr "${ns1}" "10" "id 10 flags signal 10.0.2.1"
++		userspace_pm_chk_get_addr "${ns1}" "20" "id 20 flags signal 10.0.3.1"
++		userspace_pm_rm_sf $ns1 "::ffff:10.0.2.1" $MPTCP_LIB_EVENT_SUB_ESTABLISHED
++		userspace_pm_chk_dump_addr "${ns1}" \
++			"id 20 flags signal 10.0.3.1" "after rm_sf 10"
+ 		userspace_pm_rm_addr $ns1 20
+-		userspace_pm_rm_sf $ns1 10.0.3.1 $SUB_ESTABLISHED
+-		chk_rm_nr 2 2 invert
++		userspace_pm_chk_dump_addr "${ns1}" "" "after rm_addr 20"
++		chk_rm_nr 1 1 invert
+ 		chk_mptcp_info subflows 0 subflows 0
+ 		chk_subflows_total 1 1
+ 		kill_events_pids
+@@ -3537,11 +3645,38 @@ userspace_tests()
+ 		chk_join_nr 1 1 1
+ 		chk_mptcp_info subflows 1 subflows 1
+ 		chk_subflows_total 2 2
+-		userspace_pm_rm_addr $ns2 20
+-		userspace_pm_rm_sf $ns2 10.0.3.2 $SUB_ESTABLISHED
+-		chk_rm_nr 1 1
++		userspace_pm_chk_dump_addr "${ns2}" \
++			"id 20 flags subflow 10.0.3.2" \
++			"subflow"
++		userspace_pm_chk_get_addr "${ns2}" "20" "id 20 flags subflow 10.0.3.2"
++		userspace_pm_rm_sf $ns2 10.0.3.2 $MPTCP_LIB_EVENT_SUB_ESTABLISHED
++		userspace_pm_chk_dump_addr "${ns2}" \
++			"" \
++			"after rm_sf 20"
++		chk_rm_nr 0 1
++		chk_mptcp_info subflows 0 subflows 0
++		chk_subflows_total 1 1
++		kill_events_pids
++		mptcp_lib_kill_wait $tests_pid
++	fi
++
++	# userspace pm create id 0 subflow
++	if reset_with_events "userspace pm create id 0 subflow" &&
++	   continue_if mptcp_lib_has_file '/proc/sys/net/mptcp/pm_type'; then
++		set_userspace_pm $ns2
++		pm_nl_set_limits $ns1 0 1
++		speed=5 \
++			run_tests $ns1 $ns2 10.0.1.1 &
++		local tests_pid=$!
++		wait_mpj $ns2
+ 		chk_mptcp_info subflows 0 subflows 0
+ 		chk_subflows_total 1 1
++		userspace_pm_add_sf $ns2 10.0.3.2 0
++		userspace_pm_chk_dump_addr "${ns2}" \
++			"id 0 flags subflow 10.0.3.2" "id 0 subflow"
++		chk_join_nr 1 1 1
++		chk_mptcp_info subflows 1 subflows 1
++		chk_subflows_total 2 2
+ 		kill_events_pids
+ 		mptcp_lib_kill_wait $tests_pid
+ 	fi
+@@ -3578,6 +3713,7 @@ endpoint_tests()
+ 
+ 	if reset_with_tcp_filter "delete and re-add" ns2 10.0.3.2 REJECT OUTPUT &&
+ 	   mptcp_lib_kallsyms_has "subflow_rebuild_header$"; then
++		start_events
+ 		pm_nl_set_limits $ns1 0 3
+ 		pm_nl_set_limits $ns2 0 3
+ 		pm_nl_add_endpoint $ns2 10.0.1.2 id 1 dev ns2eth1 flags subflow
+@@ -3629,9 +3765,129 @@ endpoint_tests()
+ 
+ 		mptcp_lib_kill_wait $tests_pid
+ 
++		kill_events_pids
++		chk_evt_nr ns1 MPTCP_LIB_EVENT_LISTENER_CREATED 1
++		chk_evt_nr ns1 MPTCP_LIB_EVENT_CREATED 1
++		chk_evt_nr ns1 MPTCP_LIB_EVENT_ESTABLISHED 1
++		chk_evt_nr ns1 MPTCP_LIB_EVENT_ANNOUNCED 0
++		chk_evt_nr ns1 MPTCP_LIB_EVENT_REMOVED 4
++		chk_evt_nr ns1 MPTCP_LIB_EVENT_SUB_ESTABLISHED 6
++		chk_evt_nr ns1 MPTCP_LIB_EVENT_SUB_CLOSED 4
++
++		chk_evt_nr ns2 MPTCP_LIB_EVENT_CREATED 1
++		chk_evt_nr ns2 MPTCP_LIB_EVENT_ESTABLISHED 1
++		chk_evt_nr ns2 MPTCP_LIB_EVENT_ANNOUNCED 0
++		chk_evt_nr ns2 MPTCP_LIB_EVENT_REMOVED 0
++		chk_evt_nr ns2 MPTCP_LIB_EVENT_SUB_ESTABLISHED 6
++		chk_evt_nr ns2 MPTCP_LIB_EVENT_SUB_CLOSED 5 # one has been closed before estab
++
+ 		chk_join_nr 6 6 6
+ 		chk_rm_nr 4 4
+ 	fi
++
++	# remove and re-add
++	if reset_with_events "delete re-add signal" &&
++	   mptcp_lib_kallsyms_has "subflow_rebuild_header$"; then
++		pm_nl_set_limits $ns1 0 3
++		pm_nl_set_limits $ns2 3 3
++		pm_nl_add_endpoint $ns1 10.0.2.1 id 1 flags signal
++		# broadcast IP: no packet for this address will be received on ns1
++		pm_nl_add_endpoint $ns1 224.0.0.1 id 2 flags signal
++		pm_nl_add_endpoint $ns1 10.0.1.1 id 42 flags signal
++		test_linkfail=4 speed=5 \
++			run_tests $ns1 $ns2 10.0.1.1 &
++		local tests_pid=$!
++
++		wait_mpj $ns2
++		pm_nl_check_endpoint "creation" \
++			$ns1 10.0.2.1 id 1 flags signal
++		chk_subflow_nr "before delete" 2
++		chk_mptcp_info subflows 1 subflows 1
++
++		pm_nl_del_endpoint $ns1 1 10.0.2.1
++		pm_nl_del_endpoint $ns1 2 224.0.0.1
++		sleep 0.5
++		chk_subflow_nr "after delete" 1
++		chk_mptcp_info subflows 0 subflows 0
++
++		pm_nl_add_endpoint $ns1 10.0.2.1 id 1 flags signal
++		pm_nl_add_endpoint $ns1 10.0.3.1 id 2 flags signal
++		wait_mpj $ns2
++		chk_subflow_nr "after re-add" 3
++		chk_mptcp_info subflows 2 subflows 2
++
++		pm_nl_del_endpoint $ns1 42 10.0.1.1
++		sleep 0.5
++		chk_subflow_nr "after delete ID 0" 2
++		chk_mptcp_info subflows 2 subflows 2
++
++		pm_nl_add_endpoint $ns1 10.0.1.1 id 99 flags signal
++		wait_mpj $ns2
++		chk_subflow_nr "after re-add ID 0" 3
++		chk_mptcp_info subflows 3 subflows 3
++
++		pm_nl_del_endpoint $ns1 99 10.0.1.1
++		sleep 0.5
++		chk_subflow_nr "after re-delete ID 0" 2
++		chk_mptcp_info subflows 2 subflows 2
++
++		pm_nl_add_endpoint $ns1 10.0.1.1 id 88 flags signal
++		wait_mpj $ns2
++		chk_subflow_nr "after re-re-add ID 0" 3
++		chk_mptcp_info subflows 3 subflows 3
++		mptcp_lib_kill_wait $tests_pid
++
++		kill_events_pids
++		chk_evt_nr ns1 MPTCP_LIB_EVENT_LISTENER_CREATED 1
++		chk_evt_nr ns1 MPTCP_LIB_EVENT_CREATED 1
++		chk_evt_nr ns1 MPTCP_LIB_EVENT_ESTABLISHED 1
++		chk_evt_nr ns1 MPTCP_LIB_EVENT_ANNOUNCED 0
++		chk_evt_nr ns1 MPTCP_LIB_EVENT_REMOVED 0
++		chk_evt_nr ns1 MPTCP_LIB_EVENT_SUB_ESTABLISHED 5
++		chk_evt_nr ns1 MPTCP_LIB_EVENT_SUB_CLOSED 3
++
++		chk_evt_nr ns2 MPTCP_LIB_EVENT_CREATED 1
++		chk_evt_nr ns2 MPTCP_LIB_EVENT_ESTABLISHED 1
++		chk_evt_nr ns2 MPTCP_LIB_EVENT_ANNOUNCED 6
++		chk_evt_nr ns2 MPTCP_LIB_EVENT_REMOVED 4
++		chk_evt_nr ns2 MPTCP_LIB_EVENT_SUB_ESTABLISHED 5
++		chk_evt_nr ns2 MPTCP_LIB_EVENT_SUB_CLOSED 3
++
++		chk_join_nr 5 5 5
++		chk_add_nr 6 6
++		chk_rm_nr 4 3 invert
++	fi
++
++	# flush and re-add
++	if reset_with_tcp_filter "flush re-add" ns2 10.0.3.2 REJECT OUTPUT &&
++	   mptcp_lib_kallsyms_has "subflow_rebuild_header$"; then
++		pm_nl_set_limits $ns1 0 2
++		pm_nl_set_limits $ns2 1 2
++		# broadcast IP: no packet for this address will be received on ns1
++		pm_nl_add_endpoint $ns1 224.0.0.1 id 2 flags signal
++		pm_nl_add_endpoint $ns2 10.0.3.2 id 3 flags subflow
++		test_linkfail=4 speed=20 \
++			run_tests $ns1 $ns2 10.0.1.1 &
++		local tests_pid=$!
++
++		wait_attempt_fail $ns2
++		chk_subflow_nr "before flush" 1
++		chk_mptcp_info subflows 0 subflows 0
++
++		pm_nl_flush_endpoint $ns2
++		pm_nl_flush_endpoint $ns1
++		wait_rm_addr $ns2 0
++		ip netns exec "${ns2}" ${iptables} -D OUTPUT -s "10.0.3.2" -p tcp -j REJECT
++		pm_nl_add_endpoint $ns2 10.0.3.2 id 3 flags subflow
++		wait_mpj $ns2
++		pm_nl_add_endpoint $ns1 10.0.3.1 id 2 flags signal
++		wait_mpj $ns2
++		mptcp_lib_kill_wait $tests_pid
++
++		chk_join_nr 2 2 2
++		chk_add_nr 2 2
++		chk_rm_nr 1 0 invert
++	fi
+ }
+ 
+ # [$1: error message]
+diff --git a/tools/testing/selftests/net/mptcp/mptcp_lib.sh b/tools/testing/selftests/net/mptcp/mptcp_lib.sh
+index 8939d5c135a0e..869c8eda4bc34 100644
+--- a/tools/testing/selftests/net/mptcp/mptcp_lib.sh
++++ b/tools/testing/selftests/net/mptcp/mptcp_lib.sh
+@@ -8,6 +8,32 @@ readonly KSFT_SKIP=4
+ # shellcheck disable=SC2155 # declare and assign separately
+ readonly KSFT_TEST="${MPTCP_LIB_KSFT_TEST:-$(basename "${0}" .sh)}"
+ 
++# These variables are used in some selftests, read-only
++declare -rx MPTCP_LIB_EVENT_ANNOUNCED=6         # MPTCP_EVENT_ANNOUNCED
++declare -rx MPTCP_LIB_EVENT_REMOVED=7           # MPTCP_EVENT_REMOVED
++declare -rx MPTCP_LIB_EVENT_SUB_ESTABLISHED=10  # MPTCP_EVENT_SUB_ESTABLISHED
++declare -rx MPTCP_LIB_EVENT_SUB_CLOSED=11       # MPTCP_EVENT_SUB_CLOSED
++declare -rx MPTCP_LIB_EVENT_LISTENER_CREATED=15 # MPTCP_EVENT_LISTENER_CREATED
++declare -rx MPTCP_LIB_EVENT_LISTENER_CLOSED=16  # MPTCP_EVENT_LISTENER_CLOSED
++
++declare -rx MPTCP_LIB_AF_INET=2
++declare -rx MPTCP_LIB_AF_INET6=10
++
++# These variables are used in some selftests, read-only
++declare -rx MPTCP_LIB_EVENT_CREATED=1           # MPTCP_EVENT_CREATED
++declare -rx MPTCP_LIB_EVENT_ESTABLISHED=2       # MPTCP_EVENT_ESTABLISHED
++declare -rx MPTCP_LIB_EVENT_CLOSED=3            # MPTCP_EVENT_CLOSED
++declare -rx MPTCP_LIB_EVENT_ANNOUNCED=6         # MPTCP_EVENT_ANNOUNCED
++declare -rx MPTCP_LIB_EVENT_REMOVED=7           # MPTCP_EVENT_REMOVED
++declare -rx MPTCP_LIB_EVENT_SUB_ESTABLISHED=10  # MPTCP_EVENT_SUB_ESTABLISHED
++declare -rx MPTCP_LIB_EVENT_SUB_CLOSED=11       # MPTCP_EVENT_SUB_CLOSED
++declare -rx MPTCP_LIB_EVENT_SUB_PRIORITY=13     # MPTCP_EVENT_SUB_PRIORITY
++declare -rx MPTCP_LIB_EVENT_LISTENER_CREATED=15 # MPTCP_EVENT_LISTENER_CREATED
++declare -rx MPTCP_LIB_EVENT_LISTENER_CLOSED=16  # MPTCP_EVENT_LISTENER_CLOSED
++
++declare -rx MPTCP_LIB_AF_INET=2
++declare -rx MPTCP_LIB_AF_INET6=10
++
+ MPTCP_LIB_SUBTESTS=()
+ 
+ # only if supported (or forced) and not disabled, see no-color.org
+@@ -247,3 +273,15 @@ mptcp_lib_get_counter() {
+ 
+ 	echo "${count}"
+ }
++
++mptcp_lib_events() {
++	local ns="${1}"
++	local evts="${2}"
++	declare -n pid="${3}"
++
++	:>"${evts}"
++
++	mptcp_lib_kill_wait "${pid:-0}"
++	ip netns exec "${ns}" ./pm_nl_ctl events >> "${evts}" 2>&1 &
++	pid=$!
++}
+diff --git a/tools/testing/selftests/net/mptcp/userspace_pm.sh b/tools/testing/selftests/net/mptcp/userspace_pm.sh
+index 4e58291550498..c5d7af8e8efde 100755
+--- a/tools/testing/selftests/net/mptcp/userspace_pm.sh
++++ b/tools/testing/selftests/net/mptcp/userspace_pm.sh
+@@ -23,15 +23,15 @@ if ! ip -Version &> /dev/null; then
+ 	exit ${KSFT_SKIP}
+ fi
+ 
+-ANNOUNCED=6        # MPTCP_EVENT_ANNOUNCED
+-REMOVED=7          # MPTCP_EVENT_REMOVED
+-SUB_ESTABLISHED=10 # MPTCP_EVENT_SUB_ESTABLISHED
+-SUB_CLOSED=11      # MPTCP_EVENT_SUB_CLOSED
+-LISTENER_CREATED=15 #MPTCP_EVENT_LISTENER_CREATED
+-LISTENER_CLOSED=16  #MPTCP_EVENT_LISTENER_CLOSED
++ANNOUNCED=${MPTCP_LIB_EVENT_ANNOUNCED}
++REMOVED=${MPTCP_LIB_EVENT_REMOVED}
++SUB_ESTABLISHED=${MPTCP_LIB_EVENT_SUB_ESTABLISHED}
++SUB_CLOSED=${MPTCP_LIB_EVENT_SUB_CLOSED}
++LISTENER_CREATED=${MPTCP_LIB_EVENT_LISTENER_CREATED}
++LISTENER_CLOSED=${MPTCP_LIB_EVENT_LISTENER_CLOSED}
+ 
+-AF_INET=2
+-AF_INET6=10
++AF_INET=${MPTCP_LIB_AF_INET}
++AF_INET6=${MPTCP_LIB_AF_INET6}
+ 
+ file=""
+ server_evts=""
+@@ -201,21 +201,11 @@ make_connection()
+ 	if [ -z "$client_evts" ]; then
+ 		client_evts=$(mktemp)
+ 	fi
+-	:>"$client_evts"
+-	if [ $client_evts_pid -ne 0 ]; then
+-		mptcp_lib_kill_wait $client_evts_pid
+-	fi
+-	ip netns exec "$ns2" ./pm_nl_ctl events >> "$client_evts" 2>&1 &
+-	client_evts_pid=$!
++	mptcp_lib_events "${ns2}" "${client_evts}" client_evts_pid
+ 	if [ -z "$server_evts" ]; then
+ 		server_evts=$(mktemp)
+ 	fi
+-	:>"$server_evts"
+-	if [ $server_evts_pid -ne 0 ]; then
+-		mptcp_lib_kill_wait $server_evts_pid
+-	fi
+-	ip netns exec "$ns1" ./pm_nl_ctl events >> "$server_evts" 2>&1 &
+-	server_evts_pid=$!
++	mptcp_lib_events "${ns1}" "${server_evts}" server_evts_pid
+ 	sleep 0.5
+ 
+ 	# Run the server


             reply	other threads:[~2024-09-08 11:06 UTC|newest]

Thread overview: 98+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-09-08 11:06 Mike Pagano [this message]
  -- strict thread matches above, loose matches on Subject: below --
2024-11-08 16:30 [gentoo-commits] proj/linux-patches:6.6 commit in: / Mike Pagano
2024-11-04 20:46 Mike Pagano
2024-11-03 11:26 Mike Pagano
2024-11-01 12:02 Mike Pagano
2024-11-01 11:52 Mike Pagano
2024-11-01 11:27 Mike Pagano
2024-10-26 22:46 Mike Pagano
2024-10-25 11:44 Mike Pagano
2024-10-22 16:57 Mike Pagano
2024-10-17 14:28 Mike Pagano
2024-10-17 14:05 Mike Pagano
2024-10-10 11:37 Mike Pagano
2024-10-04 15:23 Mike Pagano
2024-09-30 16:04 Mike Pagano
2024-09-30 15:18 Mike Pagano
2024-09-18 18:03 Mike Pagano
2024-09-12 12:32 Mike Pagano
2024-09-04 13:51 Mike Pagano
2024-08-29 16:49 Mike Pagano
2024-08-19 10:24 Mike Pagano
2024-08-14 15:14 Mike Pagano
2024-08-14 14:51 Mike Pagano
2024-08-14 14:10 Mike Pagano
2024-08-11 13:28 Mike Pagano
2024-08-10 15:43 Mike Pagano
2024-08-03 15:22 Mike Pagano
2024-07-27 13:46 Mike Pagano
2024-07-25 15:48 Mike Pagano
2024-07-25 12:09 Mike Pagano
2024-07-18 12:15 Mike Pagano
2024-07-15 11:15 Mike Pagano
2024-07-11 11:48 Mike Pagano
2024-07-09 10:45 Mike Pagano
2024-07-05 10:49 Mike Pagano
2024-06-27 12:32 Mike Pagano
2024-06-21 14:06 Mike Pagano
2024-06-16 14:33 Mike Pagano
2024-06-12 10:23 Mike Pagano
2024-05-25 15:17 Mike Pagano
2024-05-17 11:49 Mike Pagano
2024-05-17 11:35 Mike Pagano
2024-05-05 18:06 Mike Pagano
2024-05-02 15:01 Mike Pagano
2024-04-27 22:05 Mike Pagano
2024-04-27 17:21 Mike Pagano
2024-04-27 17:05 Mike Pagano
2024-04-18  6:38 Alice Ferrazzi
2024-04-18  3:05 Alice Ferrazzi
2024-04-13 13:06 Mike Pagano
2024-04-11 14:49 Mike Pagano
2024-04-10 15:09 Mike Pagano
2024-04-04 19:06 Mike Pagano
2024-04-03 14:03 Mike Pagano
2024-03-27 11:24 Mike Pagano
2024-03-15 22:00 Mike Pagano
2024-03-06 18:07 Mike Pagano
2024-03-02 22:37 Mike Pagano
2024-03-01 13:06 Mike Pagano
2024-02-23 13:25 Mike Pagano
2024-02-23 12:36 Mike Pagano
2024-02-22 13:39 Mike Pagano
2024-02-16 19:06 Mike Pagano
2024-02-16 18:59 Mike Pagano
2024-02-06 17:27 Mike Pagano
2024-02-06 15:38 Mike Pagano
2024-02-06 15:34 Mike Pagano
2024-02-05 21:04 Mike Pagano
2024-02-05 21:00 Mike Pagano
2024-02-01 23:18 Mike Pagano
2024-02-01  1:22 Mike Pagano
2024-01-26 22:48 Mike Pagano
2024-01-26  0:08 Mike Pagano
2024-01-25 13:49 Mike Pagano
2024-01-20 11:45 Mike Pagano
2024-01-15 18:46 Mike Pagano
2024-01-10 17:20 Mike Pagano
2024-01-10 17:16 Mike Pagano
2024-01-05 14:49 Mike Pagano
2024-01-04 15:36 Mike Pagano
2024-01-01 13:45 Mike Pagano
2023-12-20 16:55 Mike Pagano
2023-12-17 14:55 Mike Pagano
2023-12-13 18:26 Mike Pagano
2023-12-11 14:19 Mike Pagano
2023-12-08 12:01 Mike Pagano
2023-12-08 10:54 Mike Pagano
2023-12-07 18:53 Mike Pagano
2023-12-03 11:24 Mike Pagano
2023-12-03 11:15 Mike Pagano
2023-12-01 10:31 Mike Pagano
2023-11-28 18:16 Mike Pagano
2023-11-28 17:50 Mike Pagano
2023-11-20 11:40 Mike Pagano
2023-11-19 15:18 Mike Pagano
2023-11-19 14:41 Mike Pagano
2023-11-08 11:52 Mike Pagano
2023-10-30 11:30 Mike Pagano

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1725793553.f28a54b9dfd8b0b7f332e8b3258958fa948f92d6.mpagano@gentoo \
    --to=mpagano@gentoo.org \
    --cc=gentoo-commits@lists.gentoo.org \
    --cc=gentoo-dev@lists.gentoo.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox