public inbox for gentoo-commits@lists.gentoo.org
 help / color / mirror / Atom feed
From: "Mike Pagano" <mpagano@gentoo.org>
To: gentoo-commits@lists.gentoo.org
Subject: [gentoo-commits] proj/linux-patches:5.11 commit in: /
Date: Tue,  9 Mar 2021 12:20:01 +0000 (UTC)	[thread overview]
Message-ID: <1615292391.8a2110bb6cc816f7e99b8290ca9d9d9c1f359609.mpagano@gentoo> (raw)

commit:     8a2110bb6cc816f7e99b8290ca9d9d9c1f359609
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Tue Mar  9 12:19:51 2021 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Tue Mar  9 12:19:51 2021 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=8a2110bb

Linux patch 5.11.5

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README             |    4 +
 1004_linux-5.11.5.patch | 1526 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 1530 insertions(+)

diff --git a/0000_README b/0000_README
index 196569b..e8533bf 100644
--- a/0000_README
+++ b/0000_README
@@ -59,6 +59,10 @@ Patch:  1003_linux-5.11.4.patch
 From:   http://www.kernel.org
 Desc:   Linux 5.11.4
 
+Patch:  1004_linux-5.11.5.patch
+From:   http://www.kernel.org
+Desc:   Linux 5.11.5
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1004_linux-5.11.5.patch b/1004_linux-5.11.5.patch
new file mode 100644
index 0000000..cd533a8
--- /dev/null
+++ b/1004_linux-5.11.5.patch
@@ -0,0 +1,1526 @@
+diff --git a/Makefile b/Makefile
+index cb9a8e8239511..1673c12fb4b35 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 11
+-SUBLEVEL = 4
++SUBLEVEL = 5
+ EXTRAVERSION =
+ NAME = 💕 Valentine's Day Edition 💕
+ 
+diff --git a/arch/ia64/kernel/signal.c b/arch/ia64/kernel/signal.c
+index e67b22fc3c60b..c1b299760bf7a 100644
+--- a/arch/ia64/kernel/signal.c
++++ b/arch/ia64/kernel/signal.c
+@@ -341,7 +341,8 @@ ia64_do_signal (struct sigscratch *scr, long in_syscall)
+ 	 * need to push through a forced SIGSEGV.
+ 	 */
+ 	while (1) {
+-		get_signal(&ksig);
++		if (!get_signal(&ksig))
++			break;
+ 
+ 		/*
+ 		 * get_signal() may have run a debugger (via notify_parent())
+diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
+index bfda153b1a41d..87682dcb64ec3 100644
+--- a/drivers/base/power/runtime.c
++++ b/drivers/base/power/runtime.c
+@@ -325,22 +325,22 @@ static void rpm_put_suppliers(struct device *dev)
+ static int __rpm_callback(int (*cb)(struct device *), struct device *dev)
+ 	__releases(&dev->power.lock) __acquires(&dev->power.lock)
+ {
+-	int retval, idx;
+ 	bool use_links = dev->power.links_count > 0;
++	bool get = false;
++	int retval, idx;
++	bool put;
+ 
+ 	if (dev->power.irq_safe) {
+ 		spin_unlock(&dev->power.lock);
++	} else if (!use_links) {
++		spin_unlock_irq(&dev->power.lock);
+ 	} else {
++		get = dev->power.runtime_status == RPM_RESUMING;
++
+ 		spin_unlock_irq(&dev->power.lock);
+ 
+-		/*
+-		 * Resume suppliers if necessary.
+-		 *
+-		 * The device's runtime PM status cannot change until this
+-		 * routine returns, so it is safe to read the status outside of
+-		 * the lock.
+-		 */
+-		if (use_links && dev->power.runtime_status == RPM_RESUMING) {
++		/* Resume suppliers if necessary. */
++		if (get) {
+ 			idx = device_links_read_lock();
+ 
+ 			retval = rpm_get_suppliers(dev);
+@@ -355,24 +355,36 @@ static int __rpm_callback(int (*cb)(struct device *), struct device *dev)
+ 
+ 	if (dev->power.irq_safe) {
+ 		spin_lock(&dev->power.lock);
+-	} else {
+-		/*
+-		 * If the device is suspending and the callback has returned
+-		 * success, drop the usage counters of the suppliers that have
+-		 * been reference counted on its resume.
+-		 *
+-		 * Do that if resume fails too.
+-		 */
+-		if (use_links
+-		    && ((dev->power.runtime_status == RPM_SUSPENDING && !retval)
+-		    || (dev->power.runtime_status == RPM_RESUMING && retval))) {
+-			idx = device_links_read_lock();
++		return retval;
++	}
+ 
+- fail:
+-			rpm_put_suppliers(dev);
++	spin_lock_irq(&dev->power.lock);
+ 
+-			device_links_read_unlock(idx);
+-		}
++	if (!use_links)
++		return retval;
++
++	/*
++	 * If the device is suspending and the callback has returned success,
++	 * drop the usage counters of the suppliers that have been reference
++	 * counted on its resume.
++	 *
++	 * Do that if the resume fails too.
++	 */
++	put = dev->power.runtime_status == RPM_SUSPENDING && !retval;
++	if (put)
++		__update_runtime_status(dev, RPM_SUSPENDED);
++	else
++		put = get && retval;
++
++	if (put) {
++		spin_unlock_irq(&dev->power.lock);
++
++		idx = device_links_read_lock();
++
++fail:
++		rpm_put_suppliers(dev);
++
++		device_links_read_unlock(idx);
+ 
+ 		spin_lock_irq(&dev->power.lock);
+ 	}
+diff --git a/drivers/block/rsxx/core.c b/drivers/block/rsxx/core.c
+index 63f549889f875..5ac1881396afb 100644
+--- a/drivers/block/rsxx/core.c
++++ b/drivers/block/rsxx/core.c
+@@ -165,15 +165,17 @@ static ssize_t rsxx_cram_read(struct file *fp, char __user *ubuf,
+ {
+ 	struct rsxx_cardinfo *card = file_inode(fp)->i_private;
+ 	char *buf;
+-	ssize_t st;
++	int st;
+ 
+ 	buf = kzalloc(cnt, GFP_KERNEL);
+ 	if (!buf)
+ 		return -ENOMEM;
+ 
+ 	st = rsxx_creg_read(card, CREG_ADD_CRAM + (u32)*ppos, cnt, buf, 1);
+-	if (!st)
+-		st = copy_to_user(ubuf, buf, cnt);
++	if (!st) {
++		if (copy_to_user(ubuf, buf, cnt))
++			st = -EFAULT;
++	}
+ 	kfree(buf);
+ 	if (st)
+ 		return st;
+diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c
+index 431919d5f48af..a2e0395cbe618 100644
+--- a/drivers/char/tpm/tpm_tis_core.c
++++ b/drivers/char/tpm/tpm_tis_core.c
+@@ -707,12 +707,22 @@ static int tpm_tis_gen_interrupt(struct tpm_chip *chip)
+ 	const char *desc = "attempting to generate an interrupt";
+ 	u32 cap2;
+ 	cap_t cap;
++	int ret;
+ 
++	/* TPM 2.0 */
+ 	if (chip->flags & TPM_CHIP_FLAG_TPM2)
+ 		return tpm2_get_tpm_pt(chip, 0x100, &cap2, desc);
+-	else
+-		return tpm1_getcap(chip, TPM_CAP_PROP_TIS_TIMEOUT, &cap, desc,
+-				  0);
++
++	/* TPM 1.2 */
++	ret = request_locality(chip, 0);
++	if (ret < 0)
++		return ret;
++
++	ret = tpm1_getcap(chip, TPM_CAP_PROP_TIS_TIMEOUT, &cap, desc, 0);
++
++	release_locality(chip, 0);
++
++	return ret;
+ }
+ 
+ /* Register the IRQ and issue a command that will cause an interrupt. If an
+@@ -1019,11 +1029,21 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
+ 	init_waitqueue_head(&priv->read_queue);
+ 	init_waitqueue_head(&priv->int_queue);
+ 	if (irq != -1) {
+-		/* Before doing irq testing issue a command to the TPM in polling mode
++		/*
++		 * Before doing irq testing issue a command to the TPM in polling mode
+ 		 * to make sure it works. May as well use that command to set the
+ 		 * proper timeouts for the driver.
+ 		 */
+-		if (tpm_get_timeouts(chip)) {
++
++		rc = request_locality(chip, 0);
++		if (rc < 0)
++			goto out_err;
++
++		rc = tpm_get_timeouts(chip);
++
++		release_locality(chip, 0);
++
++		if (rc) {
+ 			dev_err(dev, "Could not get TPM timeouts and durations\n");
+ 			rc = -ENODEV;
+ 			goto out_err;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
+index 8155c54392c88..36a741d63ddcf 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
+@@ -903,10 +903,11 @@ void amdgpu_acpi_fini(struct amdgpu_device *adev)
+  */
+ bool amdgpu_acpi_is_s0ix_supported(struct amdgpu_device *adev)
+ {
++#if defined(CONFIG_AMD_PMC)
+ 	if (acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0) {
+ 		if (adev->flags & AMD_IS_APU)
+ 			return true;
+ 	}
+-
++#endif
+ 	return false;
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+index a6667a2ca0db3..c2190c3e97f31 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+@@ -356,7 +356,7 @@ static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf,
+ 	while (size) {
+ 		uint32_t value;
+ 
+-		value = RREG32_PCIE(*pos >> 2);
++		value = RREG32_PCIE(*pos);
+ 		r = put_user(value, (uint32_t *)buf);
+ 		if (r) {
+ 			pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+@@ -423,7 +423,7 @@ static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user
+ 			return r;
+ 		}
+ 
+-		WREG32_PCIE(*pos >> 2, value);
++		WREG32_PCIE(*pos, value);
+ 
+ 		result += 4;
+ 		buf += 4;
+diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c
+index 6bee3677394ac..22b96b7d3647f 100644
+--- a/drivers/gpu/drm/amd/amdgpu/nv.c
++++ b/drivers/gpu/drm/amd/amdgpu/nv.c
+@@ -498,7 +498,8 @@ static bool nv_is_headless_sku(struct pci_dev *pdev)
+ {
+ 	if ((pdev->device == 0x731E &&
+ 	    (pdev->revision == 0xC6 || pdev->revision == 0xC7)) ||
+-	    (pdev->device == 0x7340 && pdev->revision == 0xC9))
++	    (pdev->device == 0x7340 && pdev->revision == 0xC9)  ||
++	    (pdev->device == 0x7360 && pdev->revision == 0xC7))
+ 		return true;
+ 	return false;
+ }
+@@ -568,7 +569,8 @@ int nv_set_ip_blocks(struct amdgpu_device *adev)
+ 		if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT &&
+ 		    !amdgpu_sriov_vf(adev))
+ 			amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
+-		amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
++		if (!nv_is_headless_sku(adev->pdev))
++		        amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
+ 		if (!amdgpu_sriov_vf(adev))
+ 			amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block);
+ 		break;
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
+index 5aeb5f5a04478..9be8e1888daf4 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
+@@ -78,6 +78,9 @@ MODULE_FIRMWARE("amdgpu/dimgrey_cavefish_smc.bin");
+ #define PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK 0xC000
+ #define PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT 0xE
+ 
++#define mmTHM_BACO_CNTL_ARCT			0xA7
++#define mmTHM_BACO_CNTL_ARCT_BASE_IDX		0
++
+ static int link_width[] = {0, 1, 2, 4, 8, 12, 16};
+ static int link_speed[] = {25, 50, 80, 160};
+ 
+@@ -1581,9 +1584,15 @@ int smu_v11_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state)
+ 			break;
+ 		default:
+ 			if (!ras || !ras->supported) {
+-				data = RREG32_SOC15(THM, 0, mmTHM_BACO_CNTL);
+-				data |= 0x80000000;
+-				WREG32_SOC15(THM, 0, mmTHM_BACO_CNTL, data);
++				if (adev->asic_type == CHIP_ARCTURUS) {
++					data = RREG32_SOC15(THM, 0, mmTHM_BACO_CNTL_ARCT);
++					data |= 0x80000000;
++					WREG32_SOC15(THM, 0, mmTHM_BACO_CNTL_ARCT, data);
++				} else {
++					data = RREG32_SOC15(THM, 0, mmTHM_BACO_CNTL);
++					data |= 0x80000000;
++					WREG32_SOC15(THM, 0, mmTHM_BACO_CNTL, data);
++				}
+ 
+ 				ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_EnterBaco, 0, NULL);
+ 			} else {
+diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
+index be996dba040cc..3d194bb608405 100644
+--- a/drivers/infiniband/core/cm.c
++++ b/drivers/infiniband/core/cm.c
+@@ -3651,6 +3651,7 @@ static int cm_send_sidr_rep_locked(struct cm_id_private *cm_id_priv,
+ 				   struct ib_cm_sidr_rep_param *param)
+ {
+ 	struct ib_mad_send_buf *msg;
++	unsigned long flags;
+ 	int ret;
+ 
+ 	lockdep_assert_held(&cm_id_priv->lock);
+@@ -3676,12 +3677,12 @@ static int cm_send_sidr_rep_locked(struct cm_id_private *cm_id_priv,
+ 		return ret;
+ 	}
+ 	cm_id_priv->id.state = IB_CM_IDLE;
+-	spin_lock_irq(&cm.lock);
++	spin_lock_irqsave(&cm.lock, flags);
+ 	if (!RB_EMPTY_NODE(&cm_id_priv->sidr_id_node)) {
+ 		rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
+ 		RB_CLEAR_NODE(&cm_id_priv->sidr_id_node);
+ 	}
+-	spin_unlock_irq(&cm.lock);
++	spin_unlock_irqrestore(&cm.lock, flags);
+ 	return 0;
+ }
+ 
+diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c
+index ff8e17d7f7ca8..8161035eb7740 100644
+--- a/drivers/infiniband/hw/mlx5/devx.c
++++ b/drivers/infiniband/hw/mlx5/devx.c
+@@ -1970,8 +1970,10 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_SUBSCRIBE_EVENT)(
+ 
+ 		num_alloc_xa_entries++;
+ 		event_sub = kzalloc(sizeof(*event_sub), GFP_KERNEL);
+-		if (!event_sub)
++		if (!event_sub) {
++			err = -ENOMEM;
+ 			goto err;
++		}
+ 
+ 		list_add_tail(&event_sub->event_list, &sub_list);
+ 		uverbs_uobject_get(&ev_file->uobj);
+diff --git a/drivers/infiniband/sw/rxe/Kconfig b/drivers/infiniband/sw/rxe/Kconfig
+index 4521490667925..06b8dc5093f77 100644
+--- a/drivers/infiniband/sw/rxe/Kconfig
++++ b/drivers/infiniband/sw/rxe/Kconfig
+@@ -4,6 +4,7 @@ config RDMA_RXE
+ 	depends on INET && PCI && INFINIBAND
+ 	depends on INFINIBAND_VIRT_DMA
+ 	select NET_UDP_TUNNEL
++	select CRYPTO
+ 	select CRYPTO_CRC32
+ 	help
+ 	This driver implements the InfiniBand RDMA transport over
+diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
+index 4078358ed66ea..00fbc591a1425 100644
+--- a/drivers/iommu/dma-iommu.c
++++ b/drivers/iommu/dma-iommu.c
+@@ -309,6 +309,11 @@ static void iommu_dma_flush_iotlb_all(struct iova_domain *iovad)
+ 	domain->ops->flush_iotlb_all(domain);
+ }
+ 
++static bool dev_is_untrusted(struct device *dev)
++{
++	return dev_is_pci(dev) && to_pci_dev(dev)->untrusted;
++}
++
+ /**
+  * iommu_dma_init_domain - Initialise a DMA mapping domain
+  * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie()
+@@ -363,8 +368,9 @@ static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
+ 
+ 	init_iova_domain(iovad, 1UL << order, base_pfn);
+ 
+-	if (!cookie->fq_domain && !iommu_domain_get_attr(domain,
+-			DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE, &attr) && attr) {
++	if (!cookie->fq_domain && (!dev || !dev_is_untrusted(dev)) &&
++	    !iommu_domain_get_attr(domain, DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE, &attr) &&
++	    attr) {
+ 		if (init_iova_flush_queue(iovad, iommu_dma_flush_iotlb_all,
+ 					  iommu_dma_entry_dtor))
+ 			pr_warn("iova flush queue initialization failed\n");
+@@ -521,11 +527,6 @@ static void __iommu_dma_unmap_swiotlb(struct device *dev, dma_addr_t dma_addr,
+ 				iova_align(iovad, size), dir, attrs);
+ }
+ 
+-static bool dev_is_untrusted(struct device *dev)
+-{
+-	return dev_is_pci(dev) && to_pci_dev(dev)->untrusted;
+-}
+-
+ static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
+ 		size_t size, int prot, u64 dma_mask)
+ {
+diff --git a/drivers/iommu/intel/pasid.h b/drivers/iommu/intel/pasid.h
+index 97dfcffbf495a..444c0bec221a4 100644
+--- a/drivers/iommu/intel/pasid.h
++++ b/drivers/iommu/intel/pasid.h
+@@ -30,8 +30,8 @@
+ #define VCMD_VRSP_IP			0x1
+ #define VCMD_VRSP_SC(e)			(((e) >> 1) & 0x3)
+ #define VCMD_VRSP_SC_SUCCESS		0
+-#define VCMD_VRSP_SC_NO_PASID_AVAIL	1
+-#define VCMD_VRSP_SC_INVALID_PASID	1
++#define VCMD_VRSP_SC_NO_PASID_AVAIL	2
++#define VCMD_VRSP_SC_INVALID_PASID	2
+ #define VCMD_VRSP_RESULT_PASID(e)	(((e) >> 8) & 0xfffff)
+ #define VCMD_CMD_OPERAND(e)		((e) << 8)
+ /*
+diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c
+index 4a3f095a1c267..97eb62f667d22 100644
+--- a/drivers/iommu/tegra-smmu.c
++++ b/drivers/iommu/tegra-smmu.c
+@@ -798,10 +798,70 @@ static phys_addr_t tegra_smmu_iova_to_phys(struct iommu_domain *domain,
+ 	return SMMU_PFN_PHYS(pfn) + SMMU_OFFSET_IN_PAGE(iova);
+ }
+ 
++static struct tegra_smmu *tegra_smmu_find(struct device_node *np)
++{
++	struct platform_device *pdev;
++	struct tegra_mc *mc;
++
++	pdev = of_find_device_by_node(np);
++	if (!pdev)
++		return NULL;
++
++	mc = platform_get_drvdata(pdev);
++	if (!mc)
++		return NULL;
++
++	return mc->smmu;
++}
++
++static int tegra_smmu_configure(struct tegra_smmu *smmu, struct device *dev,
++				struct of_phandle_args *args)
++{
++	const struct iommu_ops *ops = smmu->iommu.ops;
++	int err;
++
++	err = iommu_fwspec_init(dev, &dev->of_node->fwnode, ops);
++	if (err < 0) {
++		dev_err(dev, "failed to initialize fwspec: %d\n", err);
++		return err;
++	}
++
++	err = ops->of_xlate(dev, args);
++	if (err < 0) {
++		dev_err(dev, "failed to parse SW group ID: %d\n", err);
++		iommu_fwspec_free(dev);
++		return err;
++	}
++
++	return 0;
++}
++
+ static struct iommu_device *tegra_smmu_probe_device(struct device *dev)
+ {
+-	struct tegra_smmu *smmu = dev_iommu_priv_get(dev);
++	struct device_node *np = dev->of_node;
++	struct tegra_smmu *smmu = NULL;
++	struct of_phandle_args args;
++	unsigned int index = 0;
++	int err;
++
++	while (of_parse_phandle_with_args(np, "iommus", "#iommu-cells", index,
++					  &args) == 0) {
++		smmu = tegra_smmu_find(args.np);
++		if (smmu) {
++			err = tegra_smmu_configure(smmu, dev, &args);
++			of_node_put(args.np);
+ 
++			if (err < 0)
++				return ERR_PTR(err);
++
++			break;
++		}
++
++		of_node_put(args.np);
++		index++;
++	}
++
++	smmu = dev_iommu_priv_get(dev);
+ 	if (!smmu)
+ 		return ERR_PTR(-ENODEV);
+ 
+@@ -1028,6 +1088,16 @@ struct tegra_smmu *tegra_smmu_probe(struct device *dev,
+ 	if (!smmu)
+ 		return ERR_PTR(-ENOMEM);
+ 
++	/*
++	 * This is a bit of a hack. Ideally we'd want to simply return this
++	 * value. However the IOMMU registration process will attempt to add
++	 * all devices to the IOMMU when bus_set_iommu() is called. In order
++	 * not to rely on global variables to track the IOMMU instance, we
++	 * set it here so that it can be looked up from the .probe_device()
++	 * callback via the IOMMU device's .drvdata field.
++	 */
++	mc->smmu = smmu;
++
+ 	size = BITS_TO_LONGS(soc->num_asids) * sizeof(long);
+ 
+ 	smmu->asids = devm_kzalloc(dev, size, GFP_KERNEL);
+diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
+index fce4cbf9529d6..50f3e673729c3 100644
+--- a/drivers/md/dm-bufio.c
++++ b/drivers/md/dm-bufio.c
+@@ -1526,6 +1526,10 @@ EXPORT_SYMBOL_GPL(dm_bufio_get_block_size);
+ sector_t dm_bufio_get_device_size(struct dm_bufio_client *c)
+ {
+ 	sector_t s = i_size_read(c->bdev->bd_inode) >> SECTOR_SHIFT;
++	if (s >= c->start)
++		s -= c->start;
++	else
++		s = 0;
+ 	if (likely(c->sectors_per_block_bits >= 0))
+ 		s >>= c->sectors_per_block_bits;
+ 	else
+diff --git a/drivers/md/dm-verity-fec.c b/drivers/md/dm-verity-fec.c
+index fb41b4f23c489..66f4c6398f670 100644
+--- a/drivers/md/dm-verity-fec.c
++++ b/drivers/md/dm-verity-fec.c
+@@ -61,19 +61,18 @@ static int fec_decode_rs8(struct dm_verity *v, struct dm_verity_fec_io *fio,
+ static u8 *fec_read_parity(struct dm_verity *v, u64 rsb, int index,
+ 			   unsigned *offset, struct dm_buffer **buf)
+ {
+-	u64 position, block;
++	u64 position, block, rem;
+ 	u8 *res;
+ 
+ 	position = (index + rsb) * v->fec->roots;
+-	block = position >> v->data_dev_block_bits;
+-	*offset = (unsigned)(position - (block << v->data_dev_block_bits));
++	block = div64_u64_rem(position, v->fec->roots << SECTOR_SHIFT, &rem);
++	*offset = (unsigned)rem;
+ 
+-	res = dm_bufio_read(v->fec->bufio, v->fec->start + block, buf);
++	res = dm_bufio_read(v->fec->bufio, block, buf);
+ 	if (IS_ERR(res)) {
+ 		DMERR("%s: FEC %llu: parity read failed (block %llu): %ld",
+ 		      v->data_dev->name, (unsigned long long)rsb,
+-		      (unsigned long long)(v->fec->start + block),
+-		      PTR_ERR(res));
++		      (unsigned long long)block, PTR_ERR(res));
+ 		*buf = NULL;
+ 	}
+ 
+@@ -155,7 +154,7 @@ static int fec_decode_bufs(struct dm_verity *v, struct dm_verity_fec_io *fio,
+ 
+ 		/* read the next block when we run out of parity bytes */
+ 		offset += v->fec->roots;
+-		if (offset >= 1 << v->data_dev_block_bits) {
++		if (offset >= v->fec->roots << SECTOR_SHIFT) {
+ 			dm_bufio_release(buf);
+ 
+ 			par = fec_read_parity(v, rsb, block_offset, &offset, &buf);
+@@ -674,7 +673,7 @@ int verity_fec_ctr(struct dm_verity *v)
+ {
+ 	struct dm_verity_fec *f = v->fec;
+ 	struct dm_target *ti = v->ti;
+-	u64 hash_blocks;
++	u64 hash_blocks, fec_blocks;
+ 	int ret;
+ 
+ 	if (!verity_fec_is_enabled(v)) {
+@@ -744,15 +743,17 @@ int verity_fec_ctr(struct dm_verity *v)
+ 	}
+ 
+ 	f->bufio = dm_bufio_client_create(f->dev->bdev,
+-					  1 << v->data_dev_block_bits,
++					  f->roots << SECTOR_SHIFT,
+ 					  1, 0, NULL, NULL);
+ 	if (IS_ERR(f->bufio)) {
+ 		ti->error = "Cannot initialize FEC bufio client";
+ 		return PTR_ERR(f->bufio);
+ 	}
+ 
+-	if (dm_bufio_get_device_size(f->bufio) <
+-	    ((f->start + f->rounds * f->roots) >> v->data_dev_block_bits)) {
++	dm_bufio_set_sector_offset(f->bufio, f->start << (v->data_dev_block_bits - SECTOR_SHIFT));
++
++	fec_blocks = div64_u64(f->rounds * f->roots, v->fec->roots << SECTOR_SHIFT);
++	if (dm_bufio_get_device_size(f->bufio) < fec_blocks) {
+ 		ti->error = "FEC device is too small";
+ 		return -E2BIG;
+ 	}
+diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
+index 470ff6b3ebef1..35b015c9ab025 100644
+--- a/drivers/net/ethernet/realtek/r8169_main.c
++++ b/drivers/net/ethernet/realtek/r8169_main.c
+@@ -2208,6 +2208,7 @@ static void rtl_pll_power_down(struct rtl8169_private *tp)
+ 
+ 	switch (tp->mac_version) {
+ 	case RTL_GIGA_MAC_VER_25 ... RTL_GIGA_MAC_VER_26:
++	case RTL_GIGA_MAC_VER_29 ... RTL_GIGA_MAC_VER_30:
+ 	case RTL_GIGA_MAC_VER_32 ... RTL_GIGA_MAC_VER_33:
+ 	case RTL_GIGA_MAC_VER_37:
+ 	case RTL_GIGA_MAC_VER_39:
+@@ -2235,6 +2236,7 @@ static void rtl_pll_power_up(struct rtl8169_private *tp)
+ {
+ 	switch (tp->mac_version) {
+ 	case RTL_GIGA_MAC_VER_25 ... RTL_GIGA_MAC_VER_26:
++	case RTL_GIGA_MAC_VER_29 ... RTL_GIGA_MAC_VER_30:
+ 	case RTL_GIGA_MAC_VER_32 ... RTL_GIGA_MAC_VER_33:
+ 	case RTL_GIGA_MAC_VER_37:
+ 	case RTL_GIGA_MAC_VER_39:
+diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c
+index 3b1c387375a6b..3cf1b953f5236 100644
+--- a/fs/btrfs/block-group.c
++++ b/fs/btrfs/block-group.c
+@@ -1150,6 +1150,11 @@ static int inc_block_group_ro(struct btrfs_block_group *cache, int force)
+ 	spin_lock(&sinfo->lock);
+ 	spin_lock(&cache->lock);
+ 
++	if (cache->swap_extents) {
++		ret = -ETXTBSY;
++		goto out;
++	}
++
+ 	if (cache->ro) {
+ 		cache->ro++;
+ 		ret = 0;
+@@ -2253,7 +2258,7 @@ again:
+ 	}
+ 
+ 	ret = inc_block_group_ro(cache, 0);
+-	if (!do_chunk_alloc)
++	if (!do_chunk_alloc || ret == -ETXTBSY)
+ 		goto unlock_out;
+ 	if (!ret)
+ 		goto out;
+@@ -2262,6 +2267,8 @@ again:
+ 	if (ret < 0)
+ 		goto out;
+ 	ret = inc_block_group_ro(cache, 0);
++	if (ret == -ETXTBSY)
++		goto unlock_out;
+ out:
+ 	if (cache->flags & BTRFS_BLOCK_GROUP_SYSTEM) {
+ 		alloc_flags = btrfs_get_alloc_profile(fs_info, cache->flags);
+@@ -3345,6 +3352,7 @@ int btrfs_free_block_groups(struct btrfs_fs_info *info)
+ 		ASSERT(list_empty(&block_group->io_list));
+ 		ASSERT(list_empty(&block_group->bg_list));
+ 		ASSERT(refcount_read(&block_group->refs) == 1);
++		ASSERT(block_group->swap_extents == 0);
+ 		btrfs_put_block_group(block_group);
+ 
+ 		spin_lock(&info->block_group_cache_lock);
+@@ -3411,3 +3419,26 @@ void btrfs_unfreeze_block_group(struct btrfs_block_group *block_group)
+ 		__btrfs_remove_free_space_cache(block_group->free_space_ctl);
+ 	}
+ }
++
++bool btrfs_inc_block_group_swap_extents(struct btrfs_block_group *bg)
++{
++	bool ret = true;
++
++	spin_lock(&bg->lock);
++	if (bg->ro)
++		ret = false;
++	else
++		bg->swap_extents++;
++	spin_unlock(&bg->lock);
++
++	return ret;
++}
++
++void btrfs_dec_block_group_swap_extents(struct btrfs_block_group *bg, int amount)
++{
++	spin_lock(&bg->lock);
++	ASSERT(!bg->ro);
++	ASSERT(bg->swap_extents >= amount);
++	bg->swap_extents -= amount;
++	spin_unlock(&bg->lock);
++}
+diff --git a/fs/btrfs/block-group.h b/fs/btrfs/block-group.h
+index 8f74a96074f7b..8a925741dc34a 100644
+--- a/fs/btrfs/block-group.h
++++ b/fs/btrfs/block-group.h
+@@ -181,6 +181,12 @@ struct btrfs_block_group {
+ 	 */
+ 	int needs_free_space;
+ 
++	/*
++	 * Number of extents in this block group used for swap files.
++	 * All accesses protected by the spinlock 'lock'.
++	 */
++	int swap_extents;
++
+ 	/* Record locked full stripes for RAID5/6 block group */
+ 	struct btrfs_full_stripe_locks_tree full_stripe_locks_root;
+ };
+@@ -301,4 +307,7 @@ int btrfs_rmap_block(struct btrfs_fs_info *fs_info, u64 chunk_start,
+ 		     u64 physical, u64 **logical, int *naddrs, int *stripe_len);
+ #endif
+ 
++bool btrfs_inc_block_group_swap_extents(struct btrfs_block_group *bg);
++void btrfs_dec_block_group_swap_extents(struct btrfs_block_group *bg, int amount);
++
+ #endif /* BTRFS_BLOCK_GROUP_H */
+diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
+index 4debdbdde2abb..0c8c55a41d7b2 100644
+--- a/fs/btrfs/ctree.h
++++ b/fs/btrfs/ctree.h
+@@ -523,6 +523,11 @@ struct btrfs_swapfile_pin {
+ 	 * points to a struct btrfs_device.
+ 	 */
+ 	bool is_block_group;
++	/*
++	 * Only used when 'is_block_group' is true and it is the number of
++	 * extents used by a swapfile for this block group ('ptr' field).
++	 */
++	int bg_extent_count;
+ };
+ 
+ bool btrfs_pinned_by_swapfile(struct btrfs_fs_info *fs_info, void *ptr);
+diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
+index 70c0340d839cb..f12e6a0aa3c70 100644
+--- a/fs/btrfs/delayed-inode.c
++++ b/fs/btrfs/delayed-inode.c
+@@ -649,7 +649,7 @@ static int btrfs_delayed_inode_reserve_metadata(
+ 						      btrfs_ino(inode),
+ 						      num_bytes, 1);
+ 		} else {
+-			btrfs_qgroup_free_meta_prealloc(root, fs_info->nodesize);
++			btrfs_qgroup_free_meta_prealloc(root, num_bytes);
+ 		}
+ 		return ret;
+ 	}
+diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
+index 0e41459b8de66..f851a1a63833d 100644
+--- a/fs/btrfs/file.c
++++ b/fs/btrfs/file.c
+@@ -3264,8 +3264,11 @@ reserve_space:
+ 			goto out;
+ 		ret = btrfs_qgroup_reserve_data(BTRFS_I(inode), &data_reserved,
+ 						alloc_start, bytes_to_reserve);
+-		if (ret)
++		if (ret) {
++			unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart,
++					     lockend, &cached_state);
+ 			goto out;
++		}
+ 		ret = btrfs_prealloc_file_range(inode, mode, alloc_start,
+ 						alloc_end - alloc_start,
+ 						i_blocksize(inode),
+diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
+index 71d0d14bc18b3..b64b88987367c 100644
+--- a/fs/btrfs/free-space-cache.c
++++ b/fs/btrfs/free-space-cache.c
+@@ -2708,8 +2708,10 @@ static void __btrfs_return_cluster_to_free_space(
+ 	struct rb_node *node;
+ 
+ 	spin_lock(&cluster->lock);
+-	if (cluster->block_group != block_group)
+-		goto out;
++	if (cluster->block_group != block_group) {
++		spin_unlock(&cluster->lock);
++		return;
++	}
+ 
+ 	cluster->block_group = NULL;
+ 	cluster->window_start = 0;
+@@ -2747,8 +2749,6 @@ static void __btrfs_return_cluster_to_free_space(
+ 				   entry->offset, &entry->offset_index, bitmap);
+ 	}
+ 	cluster->root = RB_ROOT;
+-
+-out:
+ 	spin_unlock(&cluster->lock);
+ 	btrfs_put_block_group(block_group);
+ }
+@@ -3028,8 +3028,6 @@ u64 btrfs_alloc_from_cluster(struct btrfs_block_group *block_group,
+ 			entry->bytes -= bytes;
+ 		}
+ 
+-		if (entry->bytes == 0)
+-			rb_erase(&entry->offset_index, &cluster->root);
+ 		break;
+ 	}
+ out:
+@@ -3046,7 +3044,10 @@ out:
+ 	ctl->free_space -= bytes;
+ 	if (!entry->bitmap && !btrfs_free_space_trimmed(entry))
+ 		ctl->discardable_bytes[BTRFS_STAT_CURR] -= bytes;
++
++	spin_lock(&cluster->lock);
+ 	if (entry->bytes == 0) {
++		rb_erase(&entry->offset_index, &cluster->root);
+ 		ctl->free_extents--;
+ 		if (entry->bitmap) {
+ 			kmem_cache_free(btrfs_free_space_bitmap_cachep,
+@@ -3059,6 +3060,7 @@ out:
+ 		kmem_cache_free(btrfs_free_space_cachep, entry);
+ 	}
+ 
++	spin_unlock(&cluster->lock);
+ 	spin_unlock(&ctl->tree_lock);
+ 
+ 	return ret;
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index ad34c5a09befc..40ccb8ddab23a 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -9993,6 +9993,7 @@ static int btrfs_add_swapfile_pin(struct inode *inode, void *ptr,
+ 	sp->ptr = ptr;
+ 	sp->inode = inode;
+ 	sp->is_block_group = is_block_group;
++	sp->bg_extent_count = 1;
+ 
+ 	spin_lock(&fs_info->swapfile_pins_lock);
+ 	p = &fs_info->swapfile_pins.rb_node;
+@@ -10006,6 +10007,8 @@ static int btrfs_add_swapfile_pin(struct inode *inode, void *ptr,
+ 			   (sp->ptr == entry->ptr && sp->inode > entry->inode)) {
+ 			p = &(*p)->rb_right;
+ 		} else {
++			if (is_block_group)
++				entry->bg_extent_count++;
+ 			spin_unlock(&fs_info->swapfile_pins_lock);
+ 			kfree(sp);
+ 			return 1;
+@@ -10031,8 +10034,11 @@ static void btrfs_free_swapfile_pins(struct inode *inode)
+ 		sp = rb_entry(node, struct btrfs_swapfile_pin, node);
+ 		if (sp->inode == inode) {
+ 			rb_erase(&sp->node, &fs_info->swapfile_pins);
+-			if (sp->is_block_group)
++			if (sp->is_block_group) {
++				btrfs_dec_block_group_swap_extents(sp->ptr,
++							   sp->bg_extent_count);
+ 				btrfs_put_block_group(sp->ptr);
++			}
+ 			kfree(sp);
+ 		}
+ 		node = next;
+@@ -10093,7 +10099,8 @@ static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file,
+ 			       sector_t *span)
+ {
+ 	struct inode *inode = file_inode(file);
+-	struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
++	struct btrfs_root *root = BTRFS_I(inode)->root;
++	struct btrfs_fs_info *fs_info = root->fs_info;
+ 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
+ 	struct extent_state *cached_state = NULL;
+ 	struct extent_map *em = NULL;
+@@ -10144,13 +10151,27 @@ static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file,
+ 	   "cannot activate swapfile while exclusive operation is running");
+ 		return -EBUSY;
+ 	}
++
++	/*
++	 * Prevent snapshot creation while we are activating the swap file.
++	 * We do not want to race with snapshot creation. If snapshot creation
++	 * already started before we bumped nr_swapfiles from 0 to 1 and
++	 * completes before the first write into the swap file after it is
++	 * activated, than that write would fallback to COW.
++	 */
++	if (!btrfs_drew_try_write_lock(&root->snapshot_lock)) {
++		btrfs_exclop_finish(fs_info);
++		btrfs_warn(fs_info,
++	   "cannot activate swapfile because snapshot creation is in progress");
++		return -EINVAL;
++	}
+ 	/*
+ 	 * Snapshots can create extents which require COW even if NODATACOW is
+ 	 * set. We use this counter to prevent snapshots. We must increment it
+ 	 * before walking the extents because we don't want a concurrent
+ 	 * snapshot to run after we've already checked the extents.
+ 	 */
+-	atomic_inc(&BTRFS_I(inode)->root->nr_swapfiles);
++	atomic_inc(&root->nr_swapfiles);
+ 
+ 	isize = ALIGN_DOWN(inode->i_size, fs_info->sectorsize);
+ 
+@@ -10247,6 +10268,17 @@ static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file,
+ 			goto out;
+ 		}
+ 
++		if (!btrfs_inc_block_group_swap_extents(bg)) {
++			btrfs_warn(fs_info,
++			   "block group for swapfile at %llu is read-only%s",
++			   bg->start,
++			   atomic_read(&fs_info->scrubs_running) ?
++				       " (scrub running)" : "");
++			btrfs_put_block_group(bg);
++			ret = -EINVAL;
++			goto out;
++		}
++
+ 		ret = btrfs_add_swapfile_pin(inode, bg, true);
+ 		if (ret) {
+ 			btrfs_put_block_group(bg);
+@@ -10285,6 +10317,8 @@ out:
+ 	if (ret)
+ 		btrfs_swap_deactivate(file);
+ 
++	btrfs_drew_write_unlock(&root->snapshot_lock);
++
+ 	btrfs_exclop_finish(fs_info);
+ 
+ 	if (ret)
+diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
+index dde49a791f3e2..0a4ab121c684b 100644
+--- a/fs/btrfs/ioctl.c
++++ b/fs/btrfs/ioctl.c
+@@ -1926,7 +1926,10 @@ static noinline int btrfs_ioctl_snap_create_v2(struct file *file,
+ 	if (vol_args->flags & BTRFS_SUBVOL_RDONLY)
+ 		readonly = true;
+ 	if (vol_args->flags & BTRFS_SUBVOL_QGROUP_INHERIT) {
+-		if (vol_args->size > PAGE_SIZE) {
++		u64 nums;
++
++		if (vol_args->size < sizeof(*inherit) ||
++		    vol_args->size > PAGE_SIZE) {
+ 			ret = -EINVAL;
+ 			goto free_args;
+ 		}
+@@ -1935,6 +1938,20 @@ static noinline int btrfs_ioctl_snap_create_v2(struct file *file,
+ 			ret = PTR_ERR(inherit);
+ 			goto free_args;
+ 		}
++
++		if (inherit->num_qgroups > PAGE_SIZE ||
++		    inherit->num_ref_copies > PAGE_SIZE ||
++		    inherit->num_excl_copies > PAGE_SIZE) {
++			ret = -EINVAL;
++			goto free_inherit;
++		}
++
++		nums = inherit->num_qgroups + 2 * inherit->num_ref_copies +
++		       2 * inherit->num_excl_copies;
++		if (vol_args->size != struct_size(inherit, qgroups, nums)) {
++			ret = -EINVAL;
++			goto free_inherit;
++		}
+ 	}
+ 
+ 	ret = __btrfs_ioctl_snap_create(file, vol_args->name, vol_args->fd,
+diff --git a/fs/btrfs/raid56.c b/fs/btrfs/raid56.c
+index 93fbf87bdc8d3..123b79672c63c 100644
+--- a/fs/btrfs/raid56.c
++++ b/fs/btrfs/raid56.c
+@@ -2363,16 +2363,21 @@ static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
+ 	SetPageUptodate(p_page);
+ 
+ 	if (has_qstripe) {
++		/* RAID6, allocate and map temp space for the Q stripe */
+ 		q_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
+ 		if (!q_page) {
+ 			__free_page(p_page);
+ 			goto cleanup;
+ 		}
+ 		SetPageUptodate(q_page);
++		pointers[rbio->real_stripes - 1] = kmap(q_page);
+ 	}
+ 
+ 	atomic_set(&rbio->error, 0);
+ 
++	/* Map the parity stripe just once */
++	pointers[nr_data] = kmap(p_page);
++
+ 	for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) {
+ 		struct page *p;
+ 		void *parity;
+@@ -2382,16 +2387,8 @@ static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
+ 			pointers[stripe] = kmap(p);
+ 		}
+ 
+-		/* then add the parity stripe */
+-		pointers[stripe++] = kmap(p_page);
+-
+ 		if (has_qstripe) {
+-			/*
+-			 * raid6, add the qstripe and call the
+-			 * library function to fill in our p/q
+-			 */
+-			pointers[stripe++] = kmap(q_page);
+-
++			/* RAID6, call the library function to fill in our P/Q */
+ 			raid6_call.gen_syndrome(rbio->real_stripes, PAGE_SIZE,
+ 						pointers);
+ 		} else {
+@@ -2412,12 +2409,14 @@ static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
+ 
+ 		for (stripe = 0; stripe < nr_data; stripe++)
+ 			kunmap(page_in_rbio(rbio, stripe, pagenr, 0));
+-		kunmap(p_page);
+ 	}
+ 
++	kunmap(p_page);
+ 	__free_page(p_page);
+-	if (q_page)
++	if (q_page) {
++		kunmap(q_page);
+ 		__free_page(q_page);
++	}
+ 
+ writeback:
+ 	/*
+diff --git a/fs/btrfs/reflink.c b/fs/btrfs/reflink.c
+index b03e7891394e3..a3bc721bab7c8 100644
+--- a/fs/btrfs/reflink.c
++++ b/fs/btrfs/reflink.c
+@@ -550,6 +550,24 @@ process_slot:
+ 		 */
+ 		btrfs_release_path(path);
+ 
++		/*
++		 * When using NO_HOLES and we are cloning a range that covers
++		 * only a hole (no extents) into a range beyond the current
++		 * i_size, punching a hole in the target range will not create
++		 * an extent map defining a hole, because the range starts at or
++		 * beyond current i_size. If the file previously had an i_size
++		 * greater than the new i_size set by this clone operation, we
++		 * need to make sure the next fsync is a full fsync, so that it
++		 * detects and logs a hole covering a range from the current
++		 * i_size to the new i_size. If the clone range covers extents,
++		 * besides a hole, then we know the full sync flag was already
++		 * set by previous calls to btrfs_replace_file_extents() that
++		 * replaced file extent items.
++		 */
++		if (last_dest_end >= i_size_read(inode))
++			set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
++				&BTRFS_I(inode)->runtime_flags);
++
+ 		ret = btrfs_replace_file_extents(inode, path, last_dest_end,
+ 				destoff + len - 1, NULL, &trans);
+ 		if (ret)
+diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
+index 5f4f88a4d2c8a..c09a494be8c68 100644
+--- a/fs/btrfs/scrub.c
++++ b/fs/btrfs/scrub.c
+@@ -3630,6 +3630,13 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
+ 			 * commit_transactions.
+ 			 */
+ 			ro_set = 0;
++		} else if (ret == -ETXTBSY) {
++			btrfs_warn(fs_info,
++		   "skipping scrub of block group %llu due to active swapfile",
++				   cache->start);
++			scrub_pause_off(fs_info);
++			ret = 0;
++			goto skip_unfreeze;
+ 		} else {
+ 			btrfs_warn(fs_info,
+ 				   "failed setting block group ro: %d", ret);
+@@ -3719,7 +3726,7 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
+ 		} else {
+ 			spin_unlock(&cache->lock);
+ 		}
+-
++skip_unfreeze:
+ 		btrfs_unfreeze_block_group(cache);
+ 		btrfs_put_block_group(cache);
+ 		if (ret)
+diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
+index 12d7d3be7cd45..8baa806f43d76 100644
+--- a/fs/btrfs/super.c
++++ b/fs/btrfs/super.c
+@@ -1919,8 +1919,8 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data)
+ 	btrfs_resize_thread_pool(fs_info,
+ 		fs_info->thread_pool_size, old_thread_pool_size);
+ 
+-	if (btrfs_test_opt(fs_info, FREE_SPACE_TREE) !=
+-	    btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE) &&
++	if ((bool)btrfs_test_opt(fs_info, FREE_SPACE_TREE) !=
++	    (bool)btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE) &&
+ 	    (!sb_rdonly(sb) || (*flags & SB_RDONLY))) {
+ 		btrfs_warn(fs_info,
+ 		"remount supports changing free space tree only from ro to rw");
+diff --git a/fs/btrfs/tree-checker.c b/fs/btrfs/tree-checker.c
+index 582061c7b5471..f4ade821307d7 100644
+--- a/fs/btrfs/tree-checker.c
++++ b/fs/btrfs/tree-checker.c
+@@ -1453,22 +1453,14 @@ static int check_extent_data_ref(struct extent_buffer *leaf,
+ 		return -EUCLEAN;
+ 	}
+ 	for (; ptr < end; ptr += sizeof(*dref)) {
+-		u64 root_objectid;
+-		u64 owner;
+ 		u64 offset;
+-		u64 hash;
+ 
++		/*
++		 * We cannot check the extent_data_ref hash due to possible
++		 * overflow from the leaf due to hash collisions.
++		 */
+ 		dref = (struct btrfs_extent_data_ref *)ptr;
+-		root_objectid = btrfs_extent_data_ref_root(leaf, dref);
+-		owner = btrfs_extent_data_ref_objectid(leaf, dref);
+ 		offset = btrfs_extent_data_ref_offset(leaf, dref);
+-		hash = hash_extent_data_ref(root_objectid, owner, offset);
+-		if (unlikely(hash != key->offset)) {
+-			extent_err(leaf, slot,
+-	"invalid extent data ref hash, item has 0x%016llx key has 0x%016llx",
+-				   hash, key->offset);
+-			return -EUCLEAN;
+-		}
+ 		if (unlikely(!IS_ALIGNED(offset, leaf->fs_info->sectorsize))) {
+ 			extent_err(leaf, slot,
+ 	"invalid extent data backref offset, have %llu expect aligned to %u",
+diff --git a/fs/btrfs/xattr.c b/fs/btrfs/xattr.c
+index af6246f36a9e5..03135dbb318a5 100644
+--- a/fs/btrfs/xattr.c
++++ b/fs/btrfs/xattr.c
+@@ -229,11 +229,33 @@ int btrfs_setxattr_trans(struct inode *inode, const char *name,
+ {
+ 	struct btrfs_root *root = BTRFS_I(inode)->root;
+ 	struct btrfs_trans_handle *trans;
++	const bool start_trans = (current->journal_info == NULL);
+ 	int ret;
+ 
+-	trans = btrfs_start_transaction(root, 2);
+-	if (IS_ERR(trans))
+-		return PTR_ERR(trans);
++	if (start_trans) {
++		/*
++		 * 1 unit for inserting/updating/deleting the xattr
++		 * 1 unit for the inode item update
++		 */
++		trans = btrfs_start_transaction(root, 2);
++		if (IS_ERR(trans))
++			return PTR_ERR(trans);
++	} else {
++		/*
++		 * This can happen when smack is enabled and a directory is being
++		 * created. It happens through d_instantiate_new(), which calls
++		 * smack_d_instantiate(), which in turn calls __vfs_setxattr() to
++		 * set the transmute xattr (XATTR_NAME_SMACKTRANSMUTE) on the
++		 * inode. We have already reserved space for the xattr and inode
++		 * update at btrfs_mkdir(), so just use the transaction handle.
++		 * We don't join or start a transaction, as that will reset the
++		 * block_rsv of the handle and trigger a warning for the start
++		 * case.
++		 */
++		ASSERT(strncmp(name, XATTR_SECURITY_PREFIX,
++			       XATTR_SECURITY_PREFIX_LEN) == 0);
++		trans = current->journal_info;
++	}
+ 
+ 	ret = btrfs_setxattr(trans, inode, name, value, size, flags);
+ 	if (ret)
+@@ -244,7 +266,8 @@ int btrfs_setxattr_trans(struct inode *inode, const char *name,
+ 	ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
+ 	BUG_ON(ret);
+ out:
+-	btrfs_end_transaction(trans);
++	if (start_trans)
++		btrfs_end_transaction(trans);
+ 	return ret;
+ }
+ 
+diff --git a/fs/btrfs/zoned.c b/fs/btrfs/zoned.c
+index c388466590191..2f80de4403595 100644
+--- a/fs/btrfs/zoned.c
++++ b/fs/btrfs/zoned.c
+@@ -152,7 +152,7 @@ int btrfs_get_dev_zone_info(struct btrfs_device *device)
+ 	sector_t sector = 0;
+ 	struct blk_zone *zones = NULL;
+ 	unsigned int i, nreported = 0, nr_zones;
+-	unsigned int zone_sectors;
++	sector_t zone_sectors;
+ 	int ret;
+ 
+ 	if (!bdev_is_zoned(bdev))
+@@ -485,7 +485,7 @@ int btrfs_sb_log_location_bdev(struct block_device *bdev, int mirror, int rw,
+ 			       u64 *bytenr_ret)
+ {
+ 	struct blk_zone zones[BTRFS_NR_SB_LOG_ZONES];
+-	unsigned int zone_sectors;
++	sector_t zone_sectors;
+ 	u32 sb_zone;
+ 	int ret;
+ 	u64 zone_size;
+diff --git a/fs/io_uring.c b/fs/io_uring.c
+index 4d0ede0418571..38bfd168ad3b7 100644
+--- a/fs/io_uring.c
++++ b/fs/io_uring.c
+@@ -5316,6 +5316,9 @@ static void __io_queue_proc(struct io_poll_iocb *poll, struct io_poll_table *pt,
+ 			pt->error = -EINVAL;
+ 			return;
+ 		}
++		/* double add on the same waitqueue head, ignore */
++		if (poll->head == head)
++			return;
+ 		poll = kmalloc(sizeof(*poll), GFP_ATOMIC);
+ 		if (!poll) {
+ 			pt->error = -ENOMEM;
+diff --git a/include/crypto/hash.h b/include/crypto/hash.h
+index af2ff31ff619f..13f8a6a54ca87 100644
+--- a/include/crypto/hash.h
++++ b/include/crypto/hash.h
+@@ -149,7 +149,7 @@ struct ahash_alg {
+ 
+ struct shash_desc {
+ 	struct crypto_shash *tfm;
+-	void *__ctx[] CRYPTO_MINALIGN_ATTR;
++	void *__ctx[] __aligned(ARCH_SLAB_MINALIGN);
+ };
+ 
+ #define HASH_MAX_DIGESTSIZE	 64
+@@ -162,9 +162,9 @@ struct shash_desc {
+ 
+ #define HASH_MAX_STATESIZE	512
+ 
+-#define SHASH_DESC_ON_STACK(shash, ctx)				  \
+-	char __##shash##_desc[sizeof(struct shash_desc) +	  \
+-		HASH_MAX_DESCSIZE] CRYPTO_MINALIGN_ATTR; \
++#define SHASH_DESC_ON_STACK(shash, ctx)					     \
++	char __##shash##_desc[sizeof(struct shash_desc) + HASH_MAX_DESCSIZE] \
++		__aligned(__alignof__(struct shash_desc));		     \
+ 	struct shash_desc *shash = (struct shash_desc *)__##shash##_desc
+ 
+ /**
+diff --git a/include/linux/crypto.h b/include/linux/crypto.h
+index ef90e07c9635c..e3abd1f8646a1 100644
+--- a/include/linux/crypto.h
++++ b/include/linux/crypto.h
+@@ -151,9 +151,12 @@
+  * The macro CRYPTO_MINALIGN_ATTR (along with the void * type in the actual
+  * declaration) is used to ensure that the crypto_tfm context structure is
+  * aligned correctly for the given architecture so that there are no alignment
+- * faults for C data types.  In particular, this is required on platforms such
+- * as arm where pointers are 32-bit aligned but there are data types such as
+- * u64 which require 64-bit alignment.
++ * faults for C data types.  On architectures that support non-cache coherent
++ * DMA, such as ARM or arm64, it also takes into account the minimal alignment
++ * that is required to ensure that the context struct member does not share any
++ * cachelines with the rest of the struct. This is needed to ensure that cache
++ * maintenance for non-coherent DMA (cache invalidation in particular) does not
++ * affect data that may be accessed by the CPU concurrently.
+  */
+ #define CRYPTO_MINALIGN ARCH_KMALLOC_MINALIGN
+ 
+diff --git a/include/sound/intel-nhlt.h b/include/sound/intel-nhlt.h
+index 743c2f4422806..d0574805865f9 100644
+--- a/include/sound/intel-nhlt.h
++++ b/include/sound/intel-nhlt.h
+@@ -112,6 +112,11 @@ struct nhlt_vendor_dmic_array_config {
+ 	/* TODO add vendor mic config */
+ } __packed;
+ 
++enum {
++	NHLT_CONFIG_TYPE_GENERIC = 0,
++	NHLT_CONFIG_TYPE_MIC_ARRAY = 1
++};
++
+ enum {
+ 	NHLT_MIC_ARRAY_2CH_SMALL = 0xa,
+ 	NHLT_MIC_ARRAY_2CH_BIG = 0xb,
+diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
+index ec08f948dd80e..063f8ea6aad97 100644
+--- a/kernel/trace/ring_buffer.c
++++ b/kernel/trace/ring_buffer.c
+@@ -2821,6 +2821,17 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
+ 				       write_stamp, write_stamp - delta))
+ 			return 0;
+ 
++		/*
++		 * It's possible that the event time delta is zero
++		 * (has the same time stamp as the previous event)
++		 * in which case write_stamp and before_stamp could
++		 * be the same. In such a case, force before_stamp
++		 * to be different than write_stamp. It doesn't
++		 * matter what it is, as long as its different.
++		 */
++		if (!delta)
++			rb_time_set(&cpu_buffer->before_stamp, 0);
++
+ 		/*
+ 		 * If an event were to come in now, it would see that the
+ 		 * write_stamp and the before_stamp are different, and assume
+diff --git a/scripts/recordmcount.c b/scripts/recordmcount.c
+index b9c2ee7ab43fa..cce12e1971d85 100644
+--- a/scripts/recordmcount.c
++++ b/scripts/recordmcount.c
+@@ -438,7 +438,7 @@ static int arm_is_fake_mcount(Elf32_Rel const *rp)
+ 
+ static int arm64_is_fake_mcount(Elf64_Rel const *rp)
+ {
+-	return ELF64_R_TYPE(w(rp->r_info)) != R_AARCH64_CALL26;
++	return ELF64_R_TYPE(w8(rp->r_info)) != R_AARCH64_CALL26;
+ }
+ 
+ /* 64-bit EM_MIPS has weird ELF64_Rela.r_info.
+diff --git a/security/tomoyo/network.c b/security/tomoyo/network.c
+index a89ed55d85d41..478f757ff8435 100644
+--- a/security/tomoyo/network.c
++++ b/security/tomoyo/network.c
+@@ -613,7 +613,7 @@ static int tomoyo_check_unix_address(struct sockaddr *addr,
+ static bool tomoyo_kernel_service(void)
+ {
+ 	/* Nothing to do if I am a kernel service. */
+-	return uaccess_kernel();
++	return (current->flags & (PF_KTHREAD | PF_IO_WORKER)) == PF_KTHREAD;
+ }
+ 
+ /**
+diff --git a/sound/hda/intel-nhlt.c b/sound/hda/intel-nhlt.c
+index 059aaf04f536a..d053beccfaec3 100644
+--- a/sound/hda/intel-nhlt.c
++++ b/sound/hda/intel-nhlt.c
+@@ -31,18 +31,44 @@ int intel_nhlt_get_dmic_geo(struct device *dev, struct nhlt_acpi_table *nhlt)
+ 	struct nhlt_endpoint *epnt;
+ 	struct nhlt_dmic_array_config *cfg;
+ 	struct nhlt_vendor_dmic_array_config *cfg_vendor;
++	struct nhlt_fmt *fmt_configs;
+ 	unsigned int dmic_geo = 0;
+-	u8 j;
++	u16 max_ch = 0;
++	u8 i, j;
+ 
+ 	if (!nhlt)
+ 		return 0;
+ 
+-	epnt = (struct nhlt_endpoint *)nhlt->desc;
++	for (j = 0, epnt = nhlt->desc; j < nhlt->endpoint_count; j++,
++	     epnt = (struct nhlt_endpoint *)((u8 *)epnt + epnt->length)) {
+ 
+-	for (j = 0; j < nhlt->endpoint_count; j++) {
+-		if (epnt->linktype == NHLT_LINK_DMIC) {
+-			cfg = (struct nhlt_dmic_array_config  *)
+-					(epnt->config.caps);
++		if (epnt->linktype != NHLT_LINK_DMIC)
++			continue;
++
++		cfg = (struct nhlt_dmic_array_config  *)(epnt->config.caps);
++		fmt_configs = (struct nhlt_fmt *)(epnt->config.caps + epnt->config.size);
++
++		/* find max number of channels based on format_configuration */
++		if (fmt_configs->fmt_count) {
++			dev_dbg(dev, "%s: found %d format definitions\n",
++				__func__, fmt_configs->fmt_count);
++
++			for (i = 0; i < fmt_configs->fmt_count; i++) {
++				struct wav_fmt_ext *fmt_ext;
++
++				fmt_ext = &fmt_configs->fmt_config[i].fmt_ext;
++
++				if (fmt_ext->fmt.channels > max_ch)
++					max_ch = fmt_ext->fmt.channels;
++			}
++			dev_dbg(dev, "%s: max channels found %d\n", __func__, max_ch);
++		} else {
++			dev_dbg(dev, "%s: No format information found\n", __func__);
++		}
++
++		if (cfg->device_config.config_type != NHLT_CONFIG_TYPE_MIC_ARRAY) {
++			dmic_geo = max_ch;
++		} else {
+ 			switch (cfg->array_type) {
+ 			case NHLT_MIC_ARRAY_2CH_SMALL:
+ 			case NHLT_MIC_ARRAY_2CH_BIG:
+@@ -59,13 +85,23 @@ int intel_nhlt_get_dmic_geo(struct device *dev, struct nhlt_acpi_table *nhlt)
+ 				dmic_geo = cfg_vendor->nb_mics;
+ 				break;
+ 			default:
+-				dev_warn(dev, "undefined DMIC array_type 0x%0x\n",
+-					 cfg->array_type);
++				dev_warn(dev, "%s: undefined DMIC array_type 0x%0x\n",
++					 __func__, cfg->array_type);
++			}
++
++			if (dmic_geo > 0) {
++				dev_dbg(dev, "%s: Array with %d dmics\n", __func__, dmic_geo);
++			}
++			if (max_ch > dmic_geo) {
++				dev_dbg(dev, "%s: max channels %d exceed dmic number %d\n",
++					__func__, max_ch, dmic_geo);
+ 			}
+ 		}
+-		epnt = (struct nhlt_endpoint *)((u8 *)epnt + epnt->length);
+ 	}
+ 
++	dev_dbg(dev, "%s: dmic number %d max_ch %d\n",
++		__func__, dmic_geo, max_ch);
++
+ 	return dmic_geo;
+ }
+ EXPORT_SYMBOL_GPL(intel_nhlt_get_dmic_geo);
+diff --git a/sound/pci/ctxfi/cthw20k2.c b/sound/pci/ctxfi/cthw20k2.c
+index fc1bc18caee98..85d1fc76f59e1 100644
+--- a/sound/pci/ctxfi/cthw20k2.c
++++ b/sound/pci/ctxfi/cthw20k2.c
+@@ -991,7 +991,7 @@ static int daio_mgr_dao_init(void *blk, unsigned int idx, unsigned int conf)
+ 
+ 	if (idx < 4) {
+ 		/* S/PDIF output */
+-		switch ((conf & 0x7)) {
++		switch ((conf & 0xf)) {
+ 		case 1:
+ 			set_field(&ctl->txctl[idx], ATXCTL_NUC, 0);
+ 			break;
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 5f4f8c2d760f0..b47504fa8dfd0 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -6408,6 +6408,7 @@ enum {
+ 	ALC236_FIXUP_DELL_AIO_HEADSET_MIC,
+ 	ALC282_FIXUP_ACER_DISABLE_LINEOUT,
+ 	ALC255_FIXUP_ACER_LIMIT_INT_MIC_BOOST,
++	ALC256_FIXUP_ACER_HEADSET_MIC,
+ };
+ 
+ static const struct hda_fixup alc269_fixups[] = {
+@@ -7864,6 +7865,16 @@ static const struct hda_fixup alc269_fixups[] = {
+ 		.chained = true,
+ 		.chain_id = ALC255_FIXUP_ACER_MIC_NO_PRESENCE,
+ 	},
++	[ALC256_FIXUP_ACER_HEADSET_MIC] = {
++		.type = HDA_FIXUP_PINS,
++		.v.pins = (const struct hda_pintbl[]) {
++			{ 0x19, 0x02a1113c }, /* use as headset mic, without its own jack detect */
++			{ 0x1a, 0x90a1092f }, /* use as internal mic */
++			{ }
++		},
++		.chained = true,
++		.chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC
++	},
+ };
+ 
+ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+@@ -7890,9 +7901,11 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1025, 0x1246, "Acer Predator Helios 500", ALC299_FIXUP_PREDATOR_SPK),
+ 	SND_PCI_QUIRK(0x1025, 0x1247, "Acer vCopperbox", ALC269VC_FIXUP_ACER_VCOPPERBOX_PINS),
+ 	SND_PCI_QUIRK(0x1025, 0x1248, "Acer Veriton N4660G", ALC269VC_FIXUP_ACER_MIC_NO_PRESENCE),
++	SND_PCI_QUIRK(0x1025, 0x1269, "Acer SWIFT SF314-54", ALC256_FIXUP_ACER_HEADSET_MIC),
+ 	SND_PCI_QUIRK(0x1025, 0x128f, "Acer Veriton Z6860G", ALC286_FIXUP_ACER_AIO_HEADSET_MIC),
+ 	SND_PCI_QUIRK(0x1025, 0x1290, "Acer Veriton Z4860G", ALC286_FIXUP_ACER_AIO_HEADSET_MIC),
+ 	SND_PCI_QUIRK(0x1025, 0x1291, "Acer Veriton Z4660G", ALC286_FIXUP_ACER_AIO_HEADSET_MIC),
++	SND_PCI_QUIRK(0x1025, 0x129c, "Acer SWIFT SF314-55", ALC256_FIXUP_ACER_HEADSET_MIC),
+ 	SND_PCI_QUIRK(0x1025, 0x1308, "Acer Aspire Z24-890", ALC286_FIXUP_ACER_AIO_HEADSET_MIC),
+ 	SND_PCI_QUIRK(0x1025, 0x132a, "Acer TravelMate B114-21", ALC233_FIXUP_ACER_HEADSET_MIC),
+ 	SND_PCI_QUIRK(0x1025, 0x1330, "Acer TravelMate X514-51T", ALC255_FIXUP_ACER_HEADSET_MIC),
+diff --git a/sound/usb/clock.c b/sound/usb/clock.c
+index dc68ed65e4787..771b652329571 100644
+--- a/sound/usb/clock.c
++++ b/sound/usb/clock.c
+@@ -646,10 +646,10 @@ static int set_sample_rate_v2v3(struct snd_usb_audio *chip,
+ 		cur_rate = prev_rate;
+ 
+ 	if (cur_rate != rate) {
+-		usb_audio_warn(chip,
+-			       "%d:%d: freq mismatch (RO clock): req %d, clock runs @%d\n",
+-			       fmt->iface, fmt->altsetting, rate, cur_rate);
+-		return -ENXIO;
++		usb_audio_dbg(chip,
++			      "%d:%d: freq mismatch: req %d, clock runs @%d\n",
++			      fmt->iface, fmt->altsetting, rate, cur_rate);
++		/* continue processing */
+ 	}
+ 
+ validation:
+diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
+index 12b15ed59eaa1..d5bdc9c4f452b 100644
+--- a/sound/usb/mixer.c
++++ b/sound/usb/mixer.c
+@@ -1301,6 +1301,17 @@ no_res_check:
+ 			/* totally crap, return an error */
+ 			return -EINVAL;
+ 		}
++	} else {
++		/* if the max volume is too low, it's likely a bogus range;
++		 * here we use -96dB as the threshold
++		 */
++		if (cval->dBmax <= -9600) {
++			usb_audio_info(cval->head.mixer->chip,
++				       "%d:%d: bogus dB values (%d/%d), disabling dB reporting\n",
++				       cval->head.id, mixer_ctrl_intf(cval->head.mixer),
++				       cval->dBmin, cval->dBmax);
++			cval->dBmin = cval->dBmax = 0;
++		}
+ 	}
+ 
+ 	return 0;
+diff --git a/sound/usb/mixer_maps.c b/sound/usb/mixer_maps.c
+index a7212f16660ec..646deb6244b15 100644
+--- a/sound/usb/mixer_maps.c
++++ b/sound/usb/mixer_maps.c
+@@ -536,6 +536,16 @@ static const struct usbmix_ctl_map usbmix_ctl_maps[] = {
+ 		.id = USB_ID(0x05a7, 0x1020),
+ 		.map = bose_companion5_map,
+ 	},
++	{
++		/* Corsair Virtuoso SE (wired mode) */
++		.id = USB_ID(0x1b1c, 0x0a3d),
++		.map = corsair_virtuoso_map,
++	},
++	{
++		/* Corsair Virtuoso SE (wireless mode) */
++		.id = USB_ID(0x1b1c, 0x0a3e),
++		.map = corsair_virtuoso_map,
++	},
+ 	{
+ 		/* Corsair Virtuoso (wired mode) */
+ 		.id = USB_ID(0x1b1c, 0x0a41),
+diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
+index bf5a0f3c1fade..e5311b6bb3f65 100644
+--- a/sound/usb/pcm.c
++++ b/sound/usb/pcm.c
+@@ -845,13 +845,19 @@ get_sync_ep_from_substream(struct snd_usb_substream *subs)
+ 
+ 	list_for_each_entry(fp, &subs->fmt_list, list) {
+ 		ep = snd_usb_get_endpoint(chip, fp->endpoint);
+-		if (ep && ep->cur_rate)
+-			return ep;
++		if (ep && ep->cur_audiofmt) {
++			/* if EP is already opened solely for this substream,
++			 * we still allow us to change the parameter; otherwise
++			 * this substream has to follow the existing parameter
++			 */
++			if (ep->cur_audiofmt != subs->cur_audiofmt || ep->opened > 1)
++				return ep;
++		}
+ 		if (!fp->implicit_fb)
+ 			continue;
+ 		/* for the implicit fb, check the sync ep as well */
+ 		ep = snd_usb_get_endpoint(chip, fp->sync_ep);
+-		if (ep && ep->cur_rate)
++		if (ep && ep->cur_audiofmt)
+ 			return ep;
+ 	}
+ 	return NULL;
+diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
+index 9ba4682ebc482..737b2729c0d37 100644
+--- a/sound/usb/quirks.c
++++ b/sound/usb/quirks.c
+@@ -1482,7 +1482,7 @@ static int pioneer_djm_set_format_quirk(struct snd_usb_substream *subs,
+ 	usb_set_interface(subs->dev, 0, 1);
+ 	// we should derive windex from fmt-sync_ep but it's not set
+ 	snd_usb_ctl_msg(subs->stream->chip->dev,
+-		usb_rcvctrlpipe(subs->stream->chip->dev, 0),
++		usb_sndctrlpipe(subs->stream->chip->dev, 0),
+ 		0x01, 0x22, 0x0100, windex, &sr, 0x0003);
+ 	return 0;
+ }


             reply	other threads:[~2021-03-09 12:20 UTC|newest]

Thread overview: 29+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-03-09 12:20 Mike Pagano [this message]
  -- strict thread matches above, loose matches on Subject: below --
2021-05-19 12:25 [gentoo-commits] proj/linux-patches:5.11 commit in: / Mike Pagano
2021-05-14 14:05 Alice Ferrazzi
2021-05-12 12:29 Mike Pagano
2021-05-07 11:29 Alice Ferrazzi
2021-05-06 14:22 Mike Pagano
2021-05-02 16:04 Mike Pagano
2021-04-30 18:56 Mike Pagano
2021-04-28 12:05 Alice Ferrazzi
2021-04-21 12:03 Mike Pagano
2021-04-18 22:23 Mike Pagano
2021-04-16 10:56 Alice Ferrazzi
2021-04-14 10:51 Alice Ferrazzi
2021-04-10 13:27 Mike Pagano
2021-04-07 13:28 Mike Pagano
2021-03-30 12:59 Alice Ferrazzi
2021-03-25  9:47 Alice Ferrazzi
2021-03-24 12:10 Mike Pagano
2021-03-22 15:58 Mike Pagano
2021-03-21 22:05 Mike Pagano
2021-03-20 14:39 Mike Pagano
2021-03-18 22:31 Mike Pagano
2021-03-17 17:01 Mike Pagano
2021-03-11 15:09 Mike Pagano
2021-03-07 15:18 Mike Pagano
2021-03-04 13:04 Mike Pagano
2021-03-04 12:02 Alice Ferrazzi
2021-02-26  9:59 Alice Ferrazzi
2021-02-23 13:42 Alice Ferrazzi

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1615292391.8a2110bb6cc816f7e99b8290ca9d9d9c1f359609.mpagano@gentoo \
    --to=mpagano@gentoo.org \
    --cc=gentoo-commits@lists.gentoo.org \
    --cc=gentoo-dev@lists.gentoo.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox