public inbox for gentoo-commits@lists.gentoo.org
 help / color / mirror / Atom feed
From: "Mike Pagano" <mpagano@gentoo.org>
To: gentoo-commits@lists.gentoo.org
Subject: [gentoo-commits] proj/linux-patches:5.7 commit in: /
Date: Thu,  9 Jul 2020 12:15:56 +0000 (UTC)	[thread overview]
Message-ID: <1594296944.1d06c709e96c894be7ce445cad98142c8c24befd.mpagano@gentoo> (raw)

commit:     1d06c709e96c894be7ce445cad98142c8c24befd
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Thu Jul  9 12:15:44 2020 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Thu Jul  9 12:15:44 2020 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=1d06c709

Linux patch 5.7.8

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README            |    4 +
 1007_linux-5.7.8.patch | 5012 ++++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 5016 insertions(+)

diff --git a/0000_README b/0000_README
index 4fdfe73..46bac07 100644
--- a/0000_README
+++ b/0000_README
@@ -71,6 +71,10 @@ Patch:  1006_linux-5.7.7.patch
 From:   http://www.kernel.org
 Desc:   Linux 5.7.7
 
+Patch:  1007_linux-5.7.8.patch
+From:   http://www.kernel.org
+Desc:   Linux 5.7.8
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1007_linux-5.7.8.patch b/1007_linux-5.7.8.patch
new file mode 100644
index 0000000..1e0b33e
--- /dev/null
+++ b/1007_linux-5.7.8.patch
@@ -0,0 +1,5012 @@
+diff --git a/Makefile b/Makefile
+index 5a5e329d9241..6163d607ca72 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 7
+-SUBLEVEL = 7
++SUBLEVEL = 8
+ EXTRAVERSION =
+ NAME = Kleptomaniac Octopus
+ 
+diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
+index 31968cbd6464..9f252d132b52 100644
+--- a/arch/mips/kernel/traps.c
++++ b/arch/mips/kernel/traps.c
+@@ -2121,6 +2121,7 @@ static void configure_status(void)
+ 
+ 	change_c0_status(ST0_CU|ST0_MX|ST0_RE|ST0_FR|ST0_BEV|ST0_TS|ST0_KX|ST0_SX|ST0_UX,
+ 			 status_set);
++	back_to_back_c0_hazard();
+ }
+ 
+ unsigned int hwrena;
+diff --git a/arch/mips/lantiq/xway/sysctrl.c b/arch/mips/lantiq/xway/sysctrl.c
+index aa37545ebe8f..b10342018d19 100644
+--- a/arch/mips/lantiq/xway/sysctrl.c
++++ b/arch/mips/lantiq/xway/sysctrl.c
+@@ -514,8 +514,8 @@ void __init ltq_soc_init(void)
+ 		clkdev_add_pmu("1e10b308.eth", NULL, 0, 0, PMU_SWITCH |
+ 			       PMU_PPE_DP | PMU_PPE_TC);
+ 		clkdev_add_pmu("1da00000.usif", "NULL", 1, 0, PMU_USIF);
+-		clkdev_add_pmu("1e108000.gswip", "gphy0", 0, 0, PMU_GPHY);
+-		clkdev_add_pmu("1e108000.gswip", "gphy1", 0, 0, PMU_GPHY);
++		clkdev_add_pmu("1e108000.switch", "gphy0", 0, 0, PMU_GPHY);
++		clkdev_add_pmu("1e108000.switch", "gphy1", 0, 0, PMU_GPHY);
+ 		clkdev_add_pmu("1e103100.deu", NULL, 1, 0, PMU_DEU);
+ 		clkdev_add_pmu("1e116000.mei", "afe", 1, 2, PMU_ANALOG_DSL_AFE);
+ 		clkdev_add_pmu("1e116000.mei", "dfe", 1, 0, PMU_DFE);
+@@ -538,8 +538,8 @@ void __init ltq_soc_init(void)
+ 				PMU_SWITCH | PMU_PPE_DPLUS | PMU_PPE_DPLUM |
+ 				PMU_PPE_EMA | PMU_PPE_TC | PMU_PPE_SLL01 |
+ 				PMU_PPE_QSB | PMU_PPE_TOP);
+-		clkdev_add_pmu("1e108000.gswip", "gphy0", 0, 0, PMU_GPHY);
+-		clkdev_add_pmu("1e108000.gswip", "gphy1", 0, 0, PMU_GPHY);
++		clkdev_add_pmu("1e108000.switch", "gphy0", 0, 0, PMU_GPHY);
++		clkdev_add_pmu("1e108000.switch", "gphy1", 0, 0, PMU_GPHY);
+ 		clkdev_add_pmu("1e103000.sdio", NULL, 1, 0, PMU_SDIO);
+ 		clkdev_add_pmu("1e103100.deu", NULL, 1, 0, PMU_DEU);
+ 		clkdev_add_pmu("1e116000.mei", "dfe", 1, 0, PMU_DFE);
+diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h
+index 04b2b927bb5a..0431db7b82af 100644
+--- a/arch/powerpc/include/asm/kvm_book3s_64.h
++++ b/arch/powerpc/include/asm/kvm_book3s_64.h
+@@ -14,6 +14,7 @@
+ #include <asm/book3s/64/mmu-hash.h>
+ #include <asm/cpu_has_feature.h>
+ #include <asm/ppc-opcode.h>
++#include <asm/pte-walk.h>
+ 
+ #ifdef CONFIG_PPC_PSERIES
+ static inline bool kvmhv_on_pseries(void)
+@@ -634,6 +635,28 @@ extern void kvmhv_remove_nest_rmap_range(struct kvm *kvm,
+ 				unsigned long gpa, unsigned long hpa,
+ 				unsigned long nbytes);
+ 
++static inline pte_t *
++find_kvm_secondary_pte_unlocked(struct kvm *kvm, unsigned long ea,
++				unsigned *hshift)
++{
++	pte_t *pte;
++
++	pte = __find_linux_pte(kvm->arch.pgtable, ea, NULL, hshift);
++	return pte;
++}
++
++static inline pte_t *find_kvm_secondary_pte(struct kvm *kvm, unsigned long ea,
++					    unsigned *hshift)
++{
++	pte_t *pte;
++
++	VM_WARN(!spin_is_locked(&kvm->mmu_lock),
++		"%s called with kvm mmu_lock not held \n", __func__);
++	pte = __find_linux_pte(kvm->arch.pgtable, ea, NULL, hshift);
++
++	return pte;
++}
++
+ #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
+ 
+ #endif /* __ASM_KVM_BOOK3S_64_H__ */
+diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c b/arch/powerpc/kvm/book3s_64_mmu_radix.c
+index bc6c1aa3d0e9..d4e532a63f08 100644
+--- a/arch/powerpc/kvm/book3s_64_mmu_radix.c
++++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c
+@@ -993,11 +993,11 @@ int kvm_unmap_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
+ 		return 0;
+ 	}
+ 
+-	ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift);
++	ptep = find_kvm_secondary_pte(kvm, gpa, &shift);
+ 	if (ptep && pte_present(*ptep))
+ 		kvmppc_unmap_pte(kvm, ptep, gpa, shift, memslot,
+ 				 kvm->arch.lpid);
+-	return 0;				
++	return 0;
+ }
+ 
+ /* Called with kvm->mmu_lock held */
+@@ -1013,7 +1013,7 @@ int kvm_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
+ 	if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE)
+ 		return ref;
+ 
+-	ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift);
++	ptep = find_kvm_secondary_pte(kvm, gpa, &shift);
+ 	if (ptep && pte_present(*ptep) && pte_young(*ptep)) {
+ 		old = kvmppc_radix_update_pte(kvm, ptep, _PAGE_ACCESSED, 0,
+ 					      gpa, shift);
+@@ -1040,7 +1040,7 @@ int kvm_test_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
+ 	if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE)
+ 		return ref;
+ 
+-	ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift);
++	ptep = find_kvm_secondary_pte(kvm, gpa, &shift);
+ 	if (ptep && pte_present(*ptep) && pte_young(*ptep))
+ 		ref = 1;
+ 	return ref;
+@@ -1052,7 +1052,7 @@ static int kvm_radix_test_clear_dirty(struct kvm *kvm,
+ {
+ 	unsigned long gfn = memslot->base_gfn + pagenum;
+ 	unsigned long gpa = gfn << PAGE_SHIFT;
+-	pte_t *ptep;
++	pte_t *ptep, pte;
+ 	unsigned int shift;
+ 	int ret = 0;
+ 	unsigned long old, *rmapp;
+@@ -1060,12 +1060,35 @@ static int kvm_radix_test_clear_dirty(struct kvm *kvm,
+ 	if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE)
+ 		return ret;
+ 
+-	ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift);
+-	if (ptep && pte_present(*ptep) && pte_dirty(*ptep)) {
+-		ret = 1;
+-		if (shift)
+-			ret = 1 << (shift - PAGE_SHIFT);
++	/*
++	 * For performance reasons we don't hold kvm->mmu_lock while walking the
++	 * partition scoped table.
++	 */
++	ptep = find_kvm_secondary_pte_unlocked(kvm, gpa, &shift);
++	if (!ptep)
++		return 0;
++
++	pte = READ_ONCE(*ptep);
++	if (pte_present(pte) && pte_dirty(pte)) {
+ 		spin_lock(&kvm->mmu_lock);
++		/*
++		 * Recheck the pte again
++		 */
++		if (pte_val(pte) != pte_val(*ptep)) {
++			/*
++			 * We have KVM_MEM_LOG_DIRTY_PAGES enabled. Hence we can
++			 * only find PAGE_SIZE pte entries here. We can continue
++			 * to use the pte addr returned by above page table
++			 * walk.
++			 */
++			if (!pte_present(*ptep) || !pte_dirty(*ptep)) {
++				spin_unlock(&kvm->mmu_lock);
++				return 0;
++			}
++		}
++
++		ret = 1;
++		VM_BUG_ON(shift);
+ 		old = kvmppc_radix_update_pte(kvm, ptep, _PAGE_DIRTY, 0,
+ 					      gpa, shift);
+ 		kvmppc_radix_tlbie_page(kvm, gpa, shift, kvm->arch.lpid);
+@@ -1121,7 +1144,7 @@ void kvmppc_radix_flush_memslot(struct kvm *kvm,
+ 	gpa = memslot->base_gfn << PAGE_SHIFT;
+ 	spin_lock(&kvm->mmu_lock);
+ 	for (n = memslot->npages; n; --n) {
+-		ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift);
++		ptep = find_kvm_secondary_pte(kvm, gpa, &shift);
+ 		if (ptep && pte_present(*ptep))
+ 			kvmppc_unmap_pte(kvm, ptep, gpa, shift, memslot,
+ 					 kvm->arch.lpid);
+diff --git a/arch/powerpc/kvm/book3s_hv_nested.c b/arch/powerpc/kvm/book3s_hv_nested.c
+index dc97e5be76f6..7f1fc5db13ea 100644
+--- a/arch/powerpc/kvm/book3s_hv_nested.c
++++ b/arch/powerpc/kvm/book3s_hv_nested.c
+@@ -1362,7 +1362,7 @@ static long int __kvmhv_nested_page_fault(struct kvm_run *run,
+ 	/* See if can find translation in our partition scoped tables for L1 */
+ 	pte = __pte(0);
+ 	spin_lock(&kvm->mmu_lock);
+-	pte_p = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift);
++	pte_p = find_kvm_secondary_pte(kvm, gpa, &shift);
+ 	if (!shift)
+ 		shift = PAGE_SHIFT;
+ 	if (pte_p)
+diff --git a/arch/s390/kernel/debug.c b/arch/s390/kernel/debug.c
+index 6d321f5f101d..7184d55d87aa 100644
+--- a/arch/s390/kernel/debug.c
++++ b/arch/s390/kernel/debug.c
+@@ -198,9 +198,10 @@ static debug_entry_t ***debug_areas_alloc(int pages_per_area, int nr_areas)
+ 	if (!areas)
+ 		goto fail_malloc_areas;
+ 	for (i = 0; i < nr_areas; i++) {
++		/* GFP_NOWARN to avoid user triggerable WARN, we handle fails */
+ 		areas[i] = kmalloc_array(pages_per_area,
+ 					 sizeof(debug_entry_t *),
+-					 GFP_KERNEL);
++					 GFP_KERNEL | __GFP_NOWARN);
+ 		if (!areas[i])
+ 			goto fail_malloc_areas2;
+ 		for (j = 0; j < pages_per_area; j++) {
+diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
+index a19a680542ce..19b6c42739fc 100644
+--- a/arch/x86/kernel/cpu/intel.c
++++ b/arch/x86/kernel/cpu/intel.c
+@@ -48,6 +48,13 @@ enum split_lock_detect_state {
+ static enum split_lock_detect_state sld_state __ro_after_init = sld_off;
+ static u64 msr_test_ctrl_cache __ro_after_init;
+ 
++/*
++ * With a name like MSR_TEST_CTL it should go without saying, but don't touch
++ * MSR_TEST_CTL unless the CPU is one of the whitelisted models.  Writing it
++ * on CPUs that do not support SLD can cause fireworks, even when writing '0'.
++ */
++static bool cpu_model_supports_sld __ro_after_init;
++
+ /*
+  * Processors which have self-snooping capability can handle conflicting
+  * memory type across CPUs by snooping its own cache. However, there exists
+@@ -1064,7 +1071,8 @@ static void sld_update_msr(bool on)
+ 
+ static void split_lock_init(void)
+ {
+-	split_lock_verify_msr(sld_state != sld_off);
++	if (cpu_model_supports_sld)
++		split_lock_verify_msr(sld_state != sld_off);
+ }
+ 
+ static void split_lock_warn(unsigned long ip)
+@@ -1167,5 +1175,6 @@ void __init cpu_set_core_cap_bits(struct cpuinfo_x86 *c)
+ 		return;
+ 	}
+ 
++	cpu_model_supports_sld = true;
+ 	split_lock_setup();
+ }
+diff --git a/crypto/af_alg.c b/crypto/af_alg.c
+index b1cd3535c525..28fc323e3fe3 100644
+--- a/crypto/af_alg.c
++++ b/crypto/af_alg.c
+@@ -128,21 +128,15 @@ EXPORT_SYMBOL_GPL(af_alg_release);
+ void af_alg_release_parent(struct sock *sk)
+ {
+ 	struct alg_sock *ask = alg_sk(sk);
+-	unsigned int nokey = ask->nokey_refcnt;
+-	bool last = nokey && !ask->refcnt;
++	unsigned int nokey = atomic_read(&ask->nokey_refcnt);
+ 
+ 	sk = ask->parent;
+ 	ask = alg_sk(sk);
+ 
+-	local_bh_disable();
+-	bh_lock_sock(sk);
+-	ask->nokey_refcnt -= nokey;
+-	if (!last)
+-		last = !--ask->refcnt;
+-	bh_unlock_sock(sk);
+-	local_bh_enable();
++	if (nokey)
++		atomic_dec(&ask->nokey_refcnt);
+ 
+-	if (last)
++	if (atomic_dec_and_test(&ask->refcnt))
+ 		sock_put(sk);
+ }
+ EXPORT_SYMBOL_GPL(af_alg_release_parent);
+@@ -187,7 +181,7 @@ static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
+ 
+ 	err = -EBUSY;
+ 	lock_sock(sk);
+-	if (ask->refcnt | ask->nokey_refcnt)
++	if (atomic_read(&ask->refcnt))
+ 		goto unlock;
+ 
+ 	swap(ask->type, type);
+@@ -236,7 +230,7 @@ static int alg_setsockopt(struct socket *sock, int level, int optname,
+ 	int err = -EBUSY;
+ 
+ 	lock_sock(sk);
+-	if (ask->refcnt)
++	if (atomic_read(&ask->refcnt) != atomic_read(&ask->nokey_refcnt))
+ 		goto unlock;
+ 
+ 	type = ask->type;
+@@ -301,12 +295,14 @@ int af_alg_accept(struct sock *sk, struct socket *newsock, bool kern)
+ 	if (err)
+ 		goto unlock;
+ 
+-	if (nokey || !ask->refcnt++)
++	if (atomic_inc_return_relaxed(&ask->refcnt) == 1)
+ 		sock_hold(sk);
+-	ask->nokey_refcnt += nokey;
++	if (nokey) {
++		atomic_inc(&ask->nokey_refcnt);
++		atomic_set(&alg_sk(sk2)->nokey_refcnt, 1);
++	}
+ 	alg_sk(sk2)->parent = sk;
+ 	alg_sk(sk2)->type = type;
+-	alg_sk(sk2)->nokey_refcnt = nokey;
+ 
+ 	newsock->ops = type->ops;
+ 	newsock->state = SS_CONNECTED;
+diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c
+index eb1910b6d434..0ae000a61c7f 100644
+--- a/crypto/algif_aead.c
++++ b/crypto/algif_aead.c
+@@ -384,7 +384,7 @@ static int aead_check_key(struct socket *sock)
+ 	struct alg_sock *ask = alg_sk(sk);
+ 
+ 	lock_sock(sk);
+-	if (ask->refcnt)
++	if (!atomic_read(&ask->nokey_refcnt))
+ 		goto unlock_child;
+ 
+ 	psk = ask->parent;
+@@ -396,11 +396,8 @@ static int aead_check_key(struct socket *sock)
+ 	if (crypto_aead_get_flags(tfm->aead) & CRYPTO_TFM_NEED_KEY)
+ 		goto unlock;
+ 
+-	if (!pask->refcnt++)
+-		sock_hold(psk);
+-
+-	ask->refcnt = 1;
+-	sock_put(psk);
++	atomic_dec(&pask->nokey_refcnt);
++	atomic_set(&ask->nokey_refcnt, 0);
+ 
+ 	err = 0;
+ 
+diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c
+index da1ffa4f7f8d..e71727c25a7d 100644
+--- a/crypto/algif_hash.c
++++ b/crypto/algif_hash.c
+@@ -301,7 +301,7 @@ static int hash_check_key(struct socket *sock)
+ 	struct alg_sock *ask = alg_sk(sk);
+ 
+ 	lock_sock(sk);
+-	if (ask->refcnt)
++	if (!atomic_read(&ask->nokey_refcnt))
+ 		goto unlock_child;
+ 
+ 	psk = ask->parent;
+@@ -313,11 +313,8 @@ static int hash_check_key(struct socket *sock)
+ 	if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
+ 		goto unlock;
+ 
+-	if (!pask->refcnt++)
+-		sock_hold(psk);
+-
+-	ask->refcnt = 1;
+-	sock_put(psk);
++	atomic_dec(&pask->nokey_refcnt);
++	atomic_set(&ask->nokey_refcnt, 0);
+ 
+ 	err = 0;
+ 
+diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c
+index 4c3bdffe0c3a..ec5567c87a6d 100644
+--- a/crypto/algif_skcipher.c
++++ b/crypto/algif_skcipher.c
+@@ -211,7 +211,7 @@ static int skcipher_check_key(struct socket *sock)
+ 	struct alg_sock *ask = alg_sk(sk);
+ 
+ 	lock_sock(sk);
+-	if (ask->refcnt)
++	if (!atomic_read(&ask->nokey_refcnt))
+ 		goto unlock_child;
+ 
+ 	psk = ask->parent;
+@@ -223,11 +223,8 @@ static int skcipher_check_key(struct socket *sock)
+ 	if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
+ 		goto unlock;
+ 
+-	if (!pask->refcnt++)
+-		sock_hold(psk);
+-
+-	ask->refcnt = 1;
+-	sock_put(psk);
++	atomic_dec(&pask->nokey_refcnt);
++	atomic_set(&ask->nokey_refcnt, 0);
+ 
+ 	err = 0;
+ 
+diff --git a/drivers/acpi/fan.c b/drivers/acpi/fan.c
+index 873e039ad4b7..62873388b24f 100644
+--- a/drivers/acpi/fan.c
++++ b/drivers/acpi/fan.c
+@@ -25,8 +25,8 @@ static int acpi_fan_remove(struct platform_device *pdev);
+ 
+ static const struct acpi_device_id fan_device_ids[] = {
+ 	{"PNP0C0B", 0},
+-	{"INT1044", 0},
+ 	{"INT3404", 0},
++	{"INTC1044", 0},
+ 	{"", 0},
+ };
+ MODULE_DEVICE_TABLE(acpi, fan_device_ids);
+diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
+index 9d21bf0f155e..980df853ee49 100644
+--- a/drivers/block/virtio_blk.c
++++ b/drivers/block/virtio_blk.c
+@@ -878,6 +878,7 @@ out_put_disk:
+ 	put_disk(vblk->disk);
+ out_free_vq:
+ 	vdev->config->del_vqs(vdev);
++	kfree(vblk->vqs);
+ out_free_vblk:
+ 	kfree(vblk);
+ out_free_index:
+diff --git a/drivers/char/tpm/tpm-dev-common.c b/drivers/char/tpm/tpm-dev-common.c
+index 87f449340202..1784530b8387 100644
+--- a/drivers/char/tpm/tpm-dev-common.c
++++ b/drivers/char/tpm/tpm-dev-common.c
+@@ -189,15 +189,6 @@ ssize_t tpm_common_write(struct file *file, const char __user *buf,
+ 		goto out;
+ 	}
+ 
+-	/* atomic tpm command send and result receive. We only hold the ops
+-	 * lock during this period so that the tpm can be unregistered even if
+-	 * the char dev is held open.
+-	 */
+-	if (tpm_try_get_ops(priv->chip)) {
+-		ret = -EPIPE;
+-		goto out;
+-	}
+-
+ 	priv->response_length = 0;
+ 	priv->response_read = false;
+ 	*off = 0;
+@@ -211,11 +202,19 @@ ssize_t tpm_common_write(struct file *file, const char __user *buf,
+ 	if (file->f_flags & O_NONBLOCK) {
+ 		priv->command_enqueued = true;
+ 		queue_work(tpm_dev_wq, &priv->async_work);
+-		tpm_put_ops(priv->chip);
+ 		mutex_unlock(&priv->buffer_mutex);
+ 		return size;
+ 	}
+ 
++	/* atomic tpm command send and result receive. We only hold the ops
++	 * lock during this period so that the tpm can be unregistered even if
++	 * the char dev is held open.
++	 */
++	if (tpm_try_get_ops(priv->chip)) {
++		ret = -EPIPE;
++		goto out;
++	}
++
+ 	ret = tpm_dev_transmit(priv->chip, priv->space, priv->data_buffer,
+ 			       sizeof(priv->data_buffer));
+ 	tpm_put_ops(priv->chip);
+diff --git a/drivers/char/tpm/tpm_ibmvtpm.c b/drivers/char/tpm/tpm_ibmvtpm.c
+index 09fe45246b8c..994385bf37c0 100644
+--- a/drivers/char/tpm/tpm_ibmvtpm.c
++++ b/drivers/char/tpm/tpm_ibmvtpm.c
+@@ -683,13 +683,6 @@ static int tpm_ibmvtpm_probe(struct vio_dev *vio_dev,
+ 	if (rc)
+ 		goto init_irq_cleanup;
+ 
+-	if (!strcmp(id->compat, "IBM,vtpm20")) {
+-		chip->flags |= TPM_CHIP_FLAG_TPM2;
+-		rc = tpm2_get_cc_attrs_tbl(chip);
+-		if (rc)
+-			goto init_irq_cleanup;
+-	}
+-
+ 	if (!wait_event_timeout(ibmvtpm->crq_queue.wq,
+ 				ibmvtpm->rtce_buf != NULL,
+ 				HZ)) {
+@@ -697,6 +690,13 @@ static int tpm_ibmvtpm_probe(struct vio_dev *vio_dev,
+ 		goto init_irq_cleanup;
+ 	}
+ 
++	if (!strcmp(id->compat, "IBM,vtpm20")) {
++		chip->flags |= TPM_CHIP_FLAG_TPM2;
++		rc = tpm2_get_cc_attrs_tbl(chip);
++		if (rc)
++			goto init_irq_cleanup;
++	}
++
+ 	return tpm_chip_register(chip);
+ init_irq_cleanup:
+ 	do {
+diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
+index 07df88f2e305..e782aaaf3e1f 100644
+--- a/drivers/dma-buf/dma-buf.c
++++ b/drivers/dma-buf/dma-buf.c
+@@ -54,37 +54,11 @@ static char *dmabuffs_dname(struct dentry *dentry, char *buffer, int buflen)
+ 			     dentry->d_name.name, ret > 0 ? name : "");
+ }
+ 
+-static const struct dentry_operations dma_buf_dentry_ops = {
+-	.d_dname = dmabuffs_dname,
+-};
+-
+-static struct vfsmount *dma_buf_mnt;
+-
+-static int dma_buf_fs_init_context(struct fs_context *fc)
+-{
+-	struct pseudo_fs_context *ctx;
+-
+-	ctx = init_pseudo(fc, DMA_BUF_MAGIC);
+-	if (!ctx)
+-		return -ENOMEM;
+-	ctx->dops = &dma_buf_dentry_ops;
+-	return 0;
+-}
+-
+-static struct file_system_type dma_buf_fs_type = {
+-	.name = "dmabuf",
+-	.init_fs_context = dma_buf_fs_init_context,
+-	.kill_sb = kill_anon_super,
+-};
+-
+-static int dma_buf_release(struct inode *inode, struct file *file)
++static void dma_buf_release(struct dentry *dentry)
+ {
+ 	struct dma_buf *dmabuf;
+ 
+-	if (!is_dma_buf_file(file))
+-		return -EINVAL;
+-
+-	dmabuf = file->private_data;
++	dmabuf = dentry->d_fsdata;
+ 
+ 	BUG_ON(dmabuf->vmapping_counter);
+ 
+@@ -110,9 +84,32 @@ static int dma_buf_release(struct inode *inode, struct file *file)
+ 	module_put(dmabuf->owner);
+ 	kfree(dmabuf->name);
+ 	kfree(dmabuf);
++}
++
++static const struct dentry_operations dma_buf_dentry_ops = {
++	.d_dname = dmabuffs_dname,
++	.d_release = dma_buf_release,
++};
++
++static struct vfsmount *dma_buf_mnt;
++
++static int dma_buf_fs_init_context(struct fs_context *fc)
++{
++	struct pseudo_fs_context *ctx;
++
++	ctx = init_pseudo(fc, DMA_BUF_MAGIC);
++	if (!ctx)
++		return -ENOMEM;
++	ctx->dops = &dma_buf_dentry_ops;
+ 	return 0;
+ }
+ 
++static struct file_system_type dma_buf_fs_type = {
++	.name = "dmabuf",
++	.init_fs_context = dma_buf_fs_init_context,
++	.kill_sb = kill_anon_super,
++};
++
+ static int dma_buf_mmap_internal(struct file *file, struct vm_area_struct *vma)
+ {
+ 	struct dma_buf *dmabuf;
+@@ -412,7 +409,6 @@ static void dma_buf_show_fdinfo(struct seq_file *m, struct file *file)
+ }
+ 
+ static const struct file_operations dma_buf_fops = {
+-	.release	= dma_buf_release,
+ 	.mmap		= dma_buf_mmap_internal,
+ 	.llseek		= dma_buf_llseek,
+ 	.poll		= dma_buf_poll,
+diff --git a/drivers/firmware/efi/Kconfig b/drivers/firmware/efi/Kconfig
+index 613828d3f106..168935b3afa1 100644
+--- a/drivers/firmware/efi/Kconfig
++++ b/drivers/firmware/efi/Kconfig
+@@ -267,3 +267,14 @@ config EFI_EARLYCON
+ 	depends on SERIAL_EARLYCON && !ARM && !IA64
+ 	select FONT_SUPPORT
+ 	select ARCH_USE_MEMREMAP_PROT
++
++config EFI_CUSTOM_SSDT_OVERLAYS
++	bool "Load custom ACPI SSDT overlay from an EFI variable"
++	depends on EFI_VARS && ACPI
++	default ACPI_TABLE_UPGRADE
++	help
++	  Allow loading of an ACPI SSDT overlay from an EFI variable specified
++	  by a kernel command line option.
++
++	  See Documentation/admin-guide/acpi/ssdt-overlays.rst for more
++	  information.
+diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
+index 4e3055238f31..20a7ba47a792 100644
+--- a/drivers/firmware/efi/efi.c
++++ b/drivers/firmware/efi/efi.c
+@@ -189,7 +189,7 @@ static void generic_ops_unregister(void)
+ 	efivars_unregister(&generic_efivars);
+ }
+ 
+-#if IS_ENABLED(CONFIG_ACPI)
++#ifdef CONFIG_EFI_CUSTOM_SSDT_OVERLAYS
+ #define EFIVAR_SSDT_NAME_MAX	16
+ static char efivar_ssdt[EFIVAR_SSDT_NAME_MAX] __initdata;
+ static int __init efivar_ssdt_setup(char *str)
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
+index 58f9d8c3a17a..44f927641b89 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
+@@ -204,6 +204,7 @@ amdgpu_atomfirmware_get_vram_info(struct amdgpu_device *adev,
+ 				(mode_info->atom_context->bios + data_offset);
+ 			switch (crev) {
+ 			case 11:
++			case 12:
+ 				mem_channel_number = igp_info->v11.umachannelnumber;
+ 				/* channel width is 64 */
+ 				if (vram_width)
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index affde2de2a0d..59288653412d 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -4091,6 +4091,8 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
+ 	need_full_reset = job_signaled = false;
+ 	INIT_LIST_HEAD(&device_list);
+ 
++	amdgpu_ras_set_error_query_ready(adev, false);
++
+ 	dev_info(adev->dev, "GPU %s begin!\n",
+ 		(in_ras_intr && !use_baco) ? "jobs stop":"reset");
+ 
+@@ -4147,6 +4149,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
+ 	/* block all schedulers and reset given job's ring */
+ 	list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
+ 		if (tmp_adev != adev) {
++			amdgpu_ras_set_error_query_ready(tmp_adev, false);
+ 			amdgpu_device_lock_adev(tmp_adev, false);
+ 			if (!amdgpu_sriov_vf(tmp_adev))
+ 			                amdgpu_amdkfd_pre_reset(tmp_adev);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+index 532f4d908b8d..96b8feb77b15 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+@@ -2590,7 +2590,7 @@ static ssize_t amdgpu_hwmon_show_sclk(struct device *dev,
+ 	if (r)
+ 		return r;
+ 
+-	return snprintf(buf, PAGE_SIZE, "%d\n", sclk * 10 * 1000);
++	return snprintf(buf, PAGE_SIZE, "%u\n", sclk * 10 * 1000);
+ }
+ 
+ static ssize_t amdgpu_hwmon_show_sclk_label(struct device *dev,
+@@ -2622,7 +2622,7 @@ static ssize_t amdgpu_hwmon_show_mclk(struct device *dev,
+ 	if (r)
+ 		return r;
+ 
+-	return snprintf(buf, PAGE_SIZE, "%d\n", mclk * 10 * 1000);
++	return snprintf(buf, PAGE_SIZE, "%u\n", mclk * 10 * 1000);
+ }
+ 
+ static ssize_t amdgpu_hwmon_show_mclk_label(struct device *dev,
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+index ab379b44679c..cd18596b47d3 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+@@ -80,6 +80,20 @@ atomic_t amdgpu_ras_in_intr = ATOMIC_INIT(0);
+ static bool amdgpu_ras_check_bad_page(struct amdgpu_device *adev,
+ 				uint64_t addr);
+ 
++void amdgpu_ras_set_error_query_ready(struct amdgpu_device *adev, bool ready)
++{
++	if (adev && amdgpu_ras_get_context(adev))
++		amdgpu_ras_get_context(adev)->error_query_ready = ready;
++}
++
++bool amdgpu_ras_get_error_query_ready(struct amdgpu_device *adev)
++{
++	if (adev && amdgpu_ras_get_context(adev))
++		return amdgpu_ras_get_context(adev)->error_query_ready;
++
++	return false;
++}
++
+ static ssize_t amdgpu_ras_debugfs_read(struct file *f, char __user *buf,
+ 					size_t size, loff_t *pos)
+ {
+@@ -281,7 +295,7 @@ static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f, const char __user *
+ 	struct ras_debug_if data;
+ 	int ret = 0;
+ 
+-	if (amdgpu_ras_intr_triggered()) {
++	if (!amdgpu_ras_get_error_query_ready(adev)) {
+ 		DRM_WARN("RAS WARN: error injection currently inaccessible\n");
+ 		return size;
+ 	}
+@@ -399,7 +413,7 @@ static ssize_t amdgpu_ras_sysfs_read(struct device *dev,
+ 		.head = obj->head,
+ 	};
+ 
+-	if (amdgpu_ras_intr_triggered())
++	if (!amdgpu_ras_get_error_query_ready(obj->adev))
+ 		return snprintf(buf, PAGE_SIZE,
+ 				"Query currently inaccessible\n");
+ 
+@@ -1430,9 +1444,10 @@ static void amdgpu_ras_do_recovery(struct work_struct *work)
+ 	struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev, false);
+ 
+ 	/* Build list of devices to query RAS related errors */
+-	if  (hive && adev->gmc.xgmi.num_physical_nodes > 1) {
++	if  (hive && adev->gmc.xgmi.num_physical_nodes > 1)
+ 		device_list_handle = &hive->device_list;
+-	} else {
++	else {
++		INIT_LIST_HEAD(&device_list);
+ 		list_add_tail(&adev->gmc.xgmi.head, &device_list);
+ 		device_list_handle = &device_list;
+ 	}
+@@ -1896,8 +1911,10 @@ int amdgpu_ras_late_init(struct amdgpu_device *adev,
+ 	}
+ 
+ 	/* in resume phase, no need to create ras fs node */
+-	if (adev->in_suspend || adev->in_gpu_reset)
++	if (adev->in_suspend || adev->in_gpu_reset) {
++		amdgpu_ras_set_error_query_ready(adev, true);
+ 		return 0;
++	}
+ 
+ 	if (ih_info->cb) {
+ 		r = amdgpu_ras_interrupt_add_handler(adev, ih_info);
+@@ -1909,6 +1926,8 @@ int amdgpu_ras_late_init(struct amdgpu_device *adev,
+ 	if (r)
+ 		goto sysfs;
+ 
++	amdgpu_ras_set_error_query_ready(adev, true);
++
+ 	return 0;
+ cleanup:
+ 	amdgpu_ras_sysfs_remove(adev, ras_block);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
+index 55c3eceb390d..e7df5d8429f8 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
+@@ -334,6 +334,8 @@ struct amdgpu_ras {
+ 	uint32_t flags;
+ 	bool reboot;
+ 	struct amdgpu_ras_eeprom_control eeprom_control;
++
++	bool error_query_ready;
+ };
+ 
+ struct ras_fs_data {
+@@ -629,4 +631,6 @@ static inline void amdgpu_ras_intr_cleared(void)
+ 
+ void amdgpu_ras_global_ras_isr(struct amdgpu_device *adev);
+ 
++void amdgpu_ras_set_error_query_ready(struct amdgpu_device *adev, bool ready);
++
+ #endif
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index f9f02e08054b..69b1f61928ef 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -3797,8 +3797,7 @@ static void update_stream_scaling_settings(const struct drm_display_mode *mode,
+ 
+ static enum dc_color_depth
+ convert_color_depth_from_display_info(const struct drm_connector *connector,
+-				      const struct drm_connector_state *state,
+-				      bool is_y420)
++				      bool is_y420, int requested_bpc)
+ {
+ 	uint8_t bpc;
+ 
+@@ -3818,10 +3817,7 @@ convert_color_depth_from_display_info(const struct drm_connector *connector,
+ 		bpc = bpc ? bpc : 8;
+ 	}
+ 
+-	if (!state)
+-		state = connector->state;
+-
+-	if (state) {
++	if (requested_bpc > 0) {
+ 		/*
+ 		 * Cap display bpc based on the user requested value.
+ 		 *
+@@ -3830,7 +3826,7 @@ convert_color_depth_from_display_info(const struct drm_connector *connector,
+ 		 * or if this was called outside of atomic check, so it
+ 		 * can't be used directly.
+ 		 */
+-		bpc = min(bpc, state->max_requested_bpc);
++		bpc = min_t(u8, bpc, requested_bpc);
+ 
+ 		/* Round down to the nearest even number. */
+ 		bpc = bpc - (bpc & 1);
+@@ -3952,7 +3948,8 @@ static void fill_stream_properties_from_drm_display_mode(
+ 	const struct drm_display_mode *mode_in,
+ 	const struct drm_connector *connector,
+ 	const struct drm_connector_state *connector_state,
+-	const struct dc_stream_state *old_stream)
++	const struct dc_stream_state *old_stream,
++	int requested_bpc)
+ {
+ 	struct dc_crtc_timing *timing_out = &stream->timing;
+ 	const struct drm_display_info *info = &connector->display_info;
+@@ -3982,8 +3979,9 @@ static void fill_stream_properties_from_drm_display_mode(
+ 
+ 	timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
+ 	timing_out->display_color_depth = convert_color_depth_from_display_info(
+-		connector, connector_state,
+-		(timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420));
++		connector,
++		(timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
++		requested_bpc);
+ 	timing_out->scan_type = SCANNING_TYPE_NODATA;
+ 	timing_out->hdmi_vic = 0;
+ 
+@@ -4189,7 +4187,8 @@ static struct dc_stream_state *
+ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
+ 		       const struct drm_display_mode *drm_mode,
+ 		       const struct dm_connector_state *dm_state,
+-		       const struct dc_stream_state *old_stream)
++		       const struct dc_stream_state *old_stream,
++		       int requested_bpc)
+ {
+ 	struct drm_display_mode *preferred_mode = NULL;
+ 	struct drm_connector *drm_connector;
+@@ -4274,10 +4273,10 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
+ 	*/
+ 	if (!scale || mode_refresh != preferred_refresh)
+ 		fill_stream_properties_from_drm_display_mode(stream,
+-			&mode, &aconnector->base, con_state, NULL);
++			&mode, &aconnector->base, con_state, NULL, requested_bpc);
+ 	else
+ 		fill_stream_properties_from_drm_display_mode(stream,
+-			&mode, &aconnector->base, con_state, old_stream);
++			&mode, &aconnector->base, con_state, old_stream, requested_bpc);
+ 
+ 	stream->timing.flags.DSC = 0;
+ 
+@@ -4800,16 +4799,55 @@ static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
+ 	create_eml_sink(aconnector);
+ }
+ 
++static struct dc_stream_state *
++create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
++				const struct drm_display_mode *drm_mode,
++				const struct dm_connector_state *dm_state,
++				const struct dc_stream_state *old_stream)
++{
++	struct drm_connector *connector = &aconnector->base;
++	struct amdgpu_device *adev = connector->dev->dev_private;
++	struct dc_stream_state *stream;
++	const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
++	int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
++	enum dc_status dc_result = DC_OK;
++
++	do {
++		stream = create_stream_for_sink(aconnector, drm_mode,
++						dm_state, old_stream,
++						requested_bpc);
++		if (stream == NULL) {
++			DRM_ERROR("Failed to create stream for sink!\n");
++			break;
++		}
++
++		dc_result = dc_validate_stream(adev->dm.dc, stream);
++
++		if (dc_result != DC_OK) {
++			DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d\n",
++				      drm_mode->hdisplay,
++				      drm_mode->vdisplay,
++				      drm_mode->clock,
++				      dc_result);
++
++			dc_stream_release(stream);
++			stream = NULL;
++			requested_bpc -= 2; /* lower bpc to retry validation */
++		}
++
++	} while (stream == NULL && requested_bpc >= 6);
++
++	return stream;
++}
++
+ enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
+ 				   struct drm_display_mode *mode)
+ {
+ 	int result = MODE_ERROR;
+ 	struct dc_sink *dc_sink;
+-	struct amdgpu_device *adev = connector->dev->dev_private;
+ 	/* TODO: Unhardcode stream count */
+ 	struct dc_stream_state *stream;
+ 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+-	enum dc_status dc_result = DC_OK;
+ 
+ 	if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
+ 			(mode->flags & DRM_MODE_FLAG_DBLSCAN))
+@@ -4830,24 +4868,11 @@ enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connec
+ 		goto fail;
+ 	}
+ 
+-	stream = create_stream_for_sink(aconnector, mode, NULL, NULL);
+-	if (stream == NULL) {
+-		DRM_ERROR("Failed to create stream for sink!\n");
+-		goto fail;
+-	}
+-
+-	dc_result = dc_validate_stream(adev->dm.dc, stream);
+-
+-	if (dc_result == DC_OK)
++	stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
++	if (stream) {
++		dc_stream_release(stream);
+ 		result = MODE_OK;
+-	else
+-		DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d\n",
+-			      mode->hdisplay,
+-			      mode->vdisplay,
+-			      mode->clock,
+-			      dc_result);
+-
+-	dc_stream_release(stream);
++	}
+ 
+ fail:
+ 	/* TODO: error handling*/
+@@ -5170,10 +5195,12 @@ static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
+ 		return 0;
+ 
+ 	if (!state->duplicated) {
++		int max_bpc = conn_state->max_requested_bpc;
+ 		is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
+ 				aconnector->force_yuv420_output;
+-		color_depth = convert_color_depth_from_display_info(connector, conn_state,
+-								    is_y420);
++		color_depth = convert_color_depth_from_display_info(connector,
++								    is_y420,
++								    max_bpc);
+ 		bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
+ 		clock = adjusted_mode->clock;
+ 		dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
+@@ -7589,10 +7616,10 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
+ 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
+ 			goto skip_modeset;
+ 
+-		new_stream = create_stream_for_sink(aconnector,
+-						     &new_crtc_state->mode,
+-						    dm_new_conn_state,
+-						    dm_old_crtc_state->stream);
++		new_stream = create_validate_stream_for_sink(aconnector,
++							     &new_crtc_state->mode,
++							     dm_new_conn_state,
++							     dm_old_crtc_state->stream);
+ 
+ 		/*
+ 		 * we can have no stream on ACTION_SET if a display
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
+index 4acaf4be8a81..c825d383f0f1 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
+@@ -2533,10 +2533,12 @@ void dc_commit_updates_for_stream(struct dc *dc,
+ 
+ 	copy_stream_update_to_stream(dc, context, stream, stream_update);
+ 
+-	if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) {
+-		DC_ERROR("Mode validation failed for stream update!\n");
+-		dc_release_state(context);
+-		return;
++	if (update_type > UPDATE_TYPE_FAST) {
++		if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) {
++			DC_ERROR("Mode validation failed for stream update!\n");
++			dc_release_state(context);
++			return;
++		}
+ 	}
+ 
+ 	commit_planes_for_stream(
+diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c
+index 16aa171971d3..f1e7024c508c 100644
+--- a/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c
++++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c
+@@ -508,9 +508,11 @@ static int vega20_smu_init(struct pp_hwmgr *hwmgr)
+ 	priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].version = 0x01;
+ 	priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].size = sizeof(DpmActivityMonitorCoeffInt_t);
+ 
+-	ret = smu_v11_0_i2c_eeprom_control_init(&adev->pm.smu_i2c);
+-	if (ret)
+-		goto err4;
++	if (adev->psp.ras.ras) {
++		ret = smu_v11_0_i2c_eeprom_control_init(&adev->pm.smu_i2c);
++		if (ret)
++			goto err4;
++	}
+ 
+ 	return 0;
+ 
+@@ -546,7 +548,8 @@ static int vega20_smu_fini(struct pp_hwmgr *hwmgr)
+ 			(struct vega20_smumgr *)(hwmgr->smu_backend);
+ 	struct amdgpu_device *adev = hwmgr->adev;
+ 
+-	smu_v11_0_i2c_eeprom_control_fini(&adev->pm.smu_i2c);
++	if (adev->psp.ras.ras)
++		smu_v11_0_i2c_eeprom_control_fini(&adev->pm.smu_i2c);
+ 
+ 	if (priv) {
+ 		amdgpu_bo_free_kernel(&priv->smu_tables.entry[TABLE_PPTABLE].handle,
+diff --git a/drivers/gpu/drm/i915/gt/intel_timeline.c b/drivers/gpu/drm/i915/gt/intel_timeline.c
+index 08b56d7ab4f4..92da746f01c1 100644
+--- a/drivers/gpu/drm/i915/gt/intel_timeline.c
++++ b/drivers/gpu/drm/i915/gt/intel_timeline.c
+@@ -119,6 +119,15 @@ static void __idle_hwsp_free(struct intel_timeline_hwsp *hwsp, int cacheline)
+ 	spin_unlock_irqrestore(&gt->hwsp_lock, flags);
+ }
+ 
++static void __rcu_cacheline_free(struct rcu_head *rcu)
++{
++	struct intel_timeline_cacheline *cl =
++		container_of(rcu, typeof(*cl), rcu);
++
++	i915_active_fini(&cl->active);
++	kfree(cl);
++}
++
+ static void __idle_cacheline_free(struct intel_timeline_cacheline *cl)
+ {
+ 	GEM_BUG_ON(!i915_active_is_idle(&cl->active));
+@@ -127,8 +136,7 @@ static void __idle_cacheline_free(struct intel_timeline_cacheline *cl)
+ 	i915_vma_put(cl->hwsp->vma);
+ 	__idle_hwsp_free(cl->hwsp, ptr_unmask_bits(cl->vaddr, CACHELINE_BITS));
+ 
+-	i915_active_fini(&cl->active);
+-	kfree_rcu(cl, rcu);
++	call_rcu(&cl->rcu, __rcu_cacheline_free);
+ }
+ 
+ __i915_active_call
+diff --git a/drivers/gpu/drm/i915/gt/shaders/README b/drivers/gpu/drm/i915/gt/shaders/README
+new file mode 100644
+index 000000000000..e7e96d7073c7
+--- /dev/null
++++ b/drivers/gpu/drm/i915/gt/shaders/README
+@@ -0,0 +1,46 @@
++ASM sources for auto generated shaders
++======================================
++
++The i915/gt/hsw_clear_kernel.c and i915/gt/ivb_clear_kernel.c files contain
++pre-compiled batch chunks that will clear any residual render cache during
++context switch.
++
++They are generated from their respective platform ASM files present on
++i915/gt/shaders/clear_kernel directory.
++
++The generated .c files should never be modified directly. Instead, any modification
++needs to be done on the on their respective ASM files and build instructions below
++needes to be followed.
++
++Building
++========
++
++Environment
++-----------
++
++IGT GPU tool scripts and the Mesa's i965 instruction assembler tool are used
++on building.
++
++Please make sure your Mesa tool is compiled with "-Dtools=intel" and
++"-Ddri-drivers=i965", and run this script from IGT source root directory"
++
++The instructions bellow assume:
++    *  IGT gpu tools source code is located on your home directory (~) as ~/igt
++    *  Mesa source code is located on your home directory (~) as ~/mesa
++       and built under the ~/mesa/build directory
++    *  Linux kernel source code is under your home directory (~) as ~/linux
++
++Instructions
++------------
++
++~ $ cp ~/linux/drivers/gpu/drm/i915/gt/shaders/clear_kernel/ivb.asm \
++       ~/igt/lib/i915/shaders/clear_kernel/ivb.asm
++~ $ cd ~/igt
++igt $ ./scripts/generate_clear_kernel.sh -g ivb \
++      -m ~/mesa/build/src/intel/tools/i965_asm
++
++~ $ cp ~/linux/drivers/gpu/drm/i915/gt/shaders/clear_kernel/hsw.asm \
++    ~/igt/lib/i915/shaders/clear_kernel/hsw.asm
++~ $ cd ~/igt
++igt $ ./scripts/generate_clear_kernel.sh -g hsw \
++      -m ~/mesa/build/src/intel/tools/i965_asm
+\ No newline at end of file
+diff --git a/drivers/gpu/drm/i915/gt/shaders/clear_kernel/hsw.asm b/drivers/gpu/drm/i915/gt/shaders/clear_kernel/hsw.asm
+new file mode 100644
+index 000000000000..5fdf384bb621
+--- /dev/null
++++ b/drivers/gpu/drm/i915/gt/shaders/clear_kernel/hsw.asm
+@@ -0,0 +1,119 @@
++// SPDX-License-Identifier: MIT
++/*
++ * Copyright © 2020 Intel Corporation
++ */
++
++/*
++ * Kernel for PAVP buffer clear.
++ *
++ *	1. Clear all 64 GRF registers assigned to the kernel with designated value;
++ *	2. Write 32x16 block of all "0" to render target buffer which indirectly clears
++ *	   512 bytes of Render Cache.
++ */
++
++/* Store designated "clear GRF" value */
++mov(1)          f0.1<1>UW       g1.2<0,1,0>UW                   { align1 1N };
++
++/**
++ * Curbe Format
++ *
++ * DW 1.0 - Block Offset to write Render Cache
++ * DW 1.1 [15:0] - Clear Word
++ * DW 1.2 - Delay iterations
++ * DW 1.3 - Enable Instrumentation (only for debug)
++ * DW 1.4 - Rsvd (intended for context ID)
++ * DW 1.5 - [31:16]:SliceCount, [15:0]:SubSlicePerSliceCount
++ * DW 1.6 - Rsvd MBZ (intended for Enable Wait on Total Thread Count)
++ * DW 1.7 - Rsvd MBZ (inteded for Total Thread Count)
++ *
++ * Binding Table
++ *
++ * BTI 0: 2D Surface to help clear L3 (Render/Data Cache)
++ * BTI 1: Wait/Instrumentation Buffer
++ *  Size : (SliceCount * SubSliceCount  * 16 EUs/SubSlice) rows * (16 threads/EU) cols (Format R32_UINT)
++ *         Expected to be initialized to 0 by driver/another kernel
++ *  Layout:
++ *          RowN: Histogram for EU-N: (SliceID*SubSlicePerSliceCount + SSID)*16 + EUID [assume max 16 EUs / SS]
++ *          Col-k[DW-k]: Threads Executed on ThreadID-k for EU-N
++ */
++add(1)          g1.2<1>UD       g1.2<0,1,0>UD   0x00000001UD    { align1 1N }; /* Loop count to delay kernel: Init to (g1.2 + 1) */
++cmp.z.f0.0(1)   null<1>UD       g1.3<0,1,0>UD   0x00000000UD    { align1 1N };
++(+f0.0) jmpi(1) 352D                                            { align1 WE_all 1N };
++
++/**
++ * State Register has info on where this thread is running
++ *	IVB: sr0.0 :: [15:13]: MBZ, 12: HSID (Half-Slice ID), [11:8]EUID, [2:0] ThreadSlotID
++ *	HSW: sr0.0 :: 15: MBZ, [14:13]: SliceID, 12: HSID (Half-Slice ID), [11:8]EUID, [2:0] ThreadSlotID
++ */
++mov(8)          g3<1>UD         0x00000000UD                    { align1 1Q };
++shr(1)          g3<1>D          sr0<0,1,0>D     12D             { align1 1N };
++and(1)          g3<1>D          g3<0,1,0>D      1D              { align1 1N }; /* g3 has HSID */
++shr(1)          g3.1<1>D        sr0<0,1,0>D     13D             { align1 1N };
++and(1)          g3.1<1>D        g3.1<0,1,0>D    3D              { align1 1N }; /* g3.1 has sliceID */
++mul(1)          g3.5<1>D        g3.1<0,1,0>D    g1.10<0,1,0>UW  { align1 1N };
++add(1)          g3<1>D          g3<0,1,0>D      g3.5<0,1,0>D    { align1 1N }; /* g3 = sliceID * SubSlicePerSliceCount + HSID */
++shr(1)          g3.2<1>D        sr0<0,1,0>D     8D              { align1 1N };
++and(1)          g3.2<1>D        g3.2<0,1,0>D    15D             { align1 1N }; /* g3.2 = EUID */
++mul(1)          g3.4<1>D        g3<0,1,0>D      16D             { align1 1N };
++add(1)          g3.2<1>D        g3.2<0,1,0>D    g3.4<0,1,0>D    { align1 1N }; /* g3.2 now points to EU row number (Y-pixel = V address )  in instrumentation surf */
++
++mov(8)          g5<1>UD         0x00000000UD                    { align1 1Q };
++and(1)          g3.3<1>D        sr0<0,1,0>D     7D              { align1 1N };
++mul(1)          g3.3<1>D        g3.3<0,1,0>D    4D              { align1 1N };
++
++mov(8)          g4<1>UD         g0<8,8,1>UD                     { align1 1Q }; /* Initialize message header with g0 */
++mov(1)          g4<1>UD         g3.3<0,1,0>UD                   { align1 1N }; /* Block offset */
++mov(1)          g4.1<1>UD       g3.2<0,1,0>UD                   { align1 1N }; /* Block offset */
++mov(1)          g4.2<1>UD       0x00000003UD                    { align1 1N }; /* Block size (1 row x 4 bytes) */
++and(1)          g4.3<1>UD       g4.3<0,1,0>UW   0xffffffffUD    { align1 1N };
++
++/* Media block read to fetch current value at specified location in instrumentation buffer */
++sendc(8)        g5<1>UD         g4<8,8,1>F      0x02190001
++
++                            render MsgDesc: media block read MsgCtrl = 0x0 Surface = 1 mlen 1 rlen 1 { align1 1Q };
++add(1)          g5<1>D          g5<0,1,0>D      1D              { align1 1N };
++
++/* Media block write for updated value at specified location in instrumentation buffer */
++sendc(8)        g5<1>UD         g4<8,8,1>F      0x040a8001
++                            render MsgDesc: media block write MsgCtrl = 0x0 Surface = 1 mlen 2 rlen 0 { align1 1Q };
++
++/* Delay thread for specified parameter */
++add.nz.f0.0(1)  g1.2<1>UD       g1.2<0,1,0>UD   -1D             { align1 1N };
++(+f0.0) jmpi(1) -32D                                            { align1 WE_all 1N };
++
++/* Store designated "clear GRF" value */
++mov(1)          f0.1<1>UW       g1.2<0,1,0>UW                   { align1 1N };
++
++/* Initialize looping parameters */
++mov(1)          a0<1>D          0D                              { align1 1N }; /* Initialize a0.0:w=0 */
++mov(1)          a0.4<1>W        127W                            { align1 1N }; /* Loop count. Each loop contains 16 GRF's */
++
++/* Write 32x16 all "0" block */
++mov(8)          g2<1>UD         g0<8,8,1>UD                     { align1 1Q };
++mov(8)          g127<1>UD       g0<8,8,1>UD                     { align1 1Q };
++mov(2)          g2<1>UD         g1<2,2,1>UW                     { align1 1N };
++mov(1)          g2.2<1>UD       0x000f000fUD                    { align1 1N }; /* Block size (16x16) */
++and(1)          g2.3<1>UD       g2.3<0,1,0>UW   0xffffffefUD    { align1 1N };
++mov(16)         g3<1>UD         0x00000000UD                    { align1 1H };
++mov(16)         g4<1>UD         0x00000000UD                    { align1 1H };
++mov(16)         g5<1>UD         0x00000000UD                    { align1 1H };
++mov(16)         g6<1>UD         0x00000000UD                    { align1 1H };
++mov(16)         g7<1>UD         0x00000000UD                    { align1 1H };
++mov(16)         g8<1>UD         0x00000000UD                    { align1 1H };
++mov(16)         g9<1>UD         0x00000000UD                    { align1 1H };
++mov(16)         g10<1>UD        0x00000000UD                    { align1 1H };
++sendc(8)        null<1>UD       g2<8,8,1>F      0x120a8000
++                            render MsgDesc: media block write MsgCtrl = 0x0 Surface = 0 mlen 9 rlen 0 { align1 1Q };
++add(1)          g2<1>UD         g1<0,1,0>UW     0x0010UW        { align1 1N };
++sendc(8)        null<1>UD       g2<8,8,1>F      0x120a8000
++                            render MsgDesc: media block write MsgCtrl = 0x0 Surface = 0 mlen 9 rlen 0 { align1 1Q };
++
++/* Now, clear all GRF registers */
++add.nz.f0.0(1)  a0.4<1>W        a0.4<0,1,0>W    -1W             { align1 1N };
++mov(16)         g[a0]<1>UW      f0.1<0,1,0>UW                   { align1 1H };
++add(1)          a0<1>D          a0<0,1,0>D      32D             { align1 1N };
++(+f0.0) jmpi(1) -64D                                            { align1 WE_all 1N };
++
++/* Terminante the thread */
++sendc(8)        null<1>UD       g127<8,8,1>F    0x82000010
++                            thread_spawner MsgDesc: mlen 1 rlen 0           { align1 1Q EOT };
+diff --git a/drivers/gpu/drm/i915/gt/shaders/clear_kernel/ivb.asm b/drivers/gpu/drm/i915/gt/shaders/clear_kernel/ivb.asm
+new file mode 100644
+index 000000000000..97c7ac9e3854
+--- /dev/null
++++ b/drivers/gpu/drm/i915/gt/shaders/clear_kernel/ivb.asm
+@@ -0,0 +1,117 @@
++// SPDX-License-Identifier: MIT
++/*
++ * Copyright © 2020 Intel Corporation
++ */
++
++/*
++ * Kernel for PAVP buffer clear.
++ *
++ *	1. Clear all 64 GRF registers assigned to the kernel with designated value;
++ *	2. Write 32x16 block of all "0" to render target buffer which indirectly clears
++ *	   512 bytes of Render Cache.
++ */
++
++/* Store designated "clear GRF" value */
++mov(1)          f0.1<1>UW       g1.2<0,1,0>UW                   { align1 1N };
++
++/**
++ * Curbe Format
++ *
++ * DW 1.0 - Block Offset to write Render Cache
++ * DW 1.1 [15:0] - Clear Word
++ * DW 1.2 - Delay iterations
++ * DW 1.3 - Enable Instrumentation (only for debug)
++ * DW 1.4 - Rsvd (intended for context ID)
++ * DW 1.5 - [31:16]:SliceCount, [15:0]:SubSlicePerSliceCount
++ * DW 1.6 - Rsvd MBZ (intended for Enable Wait on Total Thread Count)
++ * DW 1.7 - Rsvd MBZ (inteded for Total Thread Count)
++ *
++ * Binding Table
++ *
++ * BTI 0: 2D Surface to help clear L3 (Render/Data Cache)
++ * BTI 1: Wait/Instrumentation Buffer
++ *  Size : (SliceCount * SubSliceCount  * 16 EUs/SubSlice) rows * (16 threads/EU) cols (Format R32_UINT)
++ *         Expected to be initialized to 0 by driver/another kernel
++ *  Layout :
++ *           RowN: Histogram for EU-N: (SliceID*SubSlicePerSliceCount + SSID)*16 + EUID [assume max 16 EUs / SS]
++ *           Col-k[DW-k]: Threads Executed on ThreadID-k for EU-N
++ */
++add(1)          g1.2<1>UD       g1.2<0,1,0>UD   0x00000001UD    { align1 1N }; /* Loop count to delay kernel: Init to (g1.2 + 1) */
++cmp.z.f0.0(1)   null<1>UD       g1.3<0,1,0>UD   0x00000000UD    { align1 1N };
++(+f0.0) jmpi(1) 44D                                             { align1 WE_all 1N };
++
++/**
++ * State Register has info on where this thread is running
++ *	IVB: sr0.0 :: [15:13]: MBZ, 12: HSID (Half-Slice ID), [11:8]EUID, [2:0] ThreadSlotID
++ *	HSW: sr0.0 :: 15: MBZ, [14:13]: SliceID, 12: HSID (Half-Slice ID), [11:8]EUID, [2:0] ThreadSlotID
++ */
++mov(8)          g3<1>UD         0x00000000UD                    { align1 1Q };
++shr(1)          g3<1>D          sr0<0,1,0>D     12D             { align1 1N };
++and(1)          g3<1>D          g3<0,1,0>D      1D              { align1 1N }; /* g3 has HSID */
++shr(1)          g3.1<1>D        sr0<0,1,0>D     13D             { align1 1N };
++and(1)          g3.1<1>D        g3.1<0,1,0>D    3D              { align1 1N }; /* g3.1 has sliceID */
++mul(1)          g3.5<1>D        g3.1<0,1,0>D    g1.10<0,1,0>UW  { align1 1N };
++add(1)          g3<1>D          g3<0,1,0>D      g3.5<0,1,0>D    { align1 1N }; /* g3 = sliceID * SubSlicePerSliceCount + HSID */
++shr(1)          g3.2<1>D        sr0<0,1,0>D     8D              { align1 1N };
++and(1)          g3.2<1>D        g3.2<0,1,0>D    15D             { align1 1N }; /* g3.2 = EUID */
++mul(1)          g3.4<1>D        g3<0,1,0>D      16D             { align1 1N };
++add(1)          g3.2<1>D        g3.2<0,1,0>D    g3.4<0,1,0>D    { align1 1N }; /* g3.2 now points to EU row number (Y-pixel = V address )  in instrumentation surf */
++
++mov(8)          g5<1>UD         0x00000000UD                    { align1 1Q };
++and(1)          g3.3<1>D        sr0<0,1,0>D     7D              { align1 1N };
++mul(1)          g3.3<1>D        g3.3<0,1,0>D    4D              { align1 1N };
++
++mov(8)          g4<1>UD         g0<8,8,1>UD                     { align1 1Q }; /* Initialize message header with g0 */
++mov(1)          g4<1>UD         g3.3<0,1,0>UD                   { align1 1N }; /* Block offset */
++mov(1)          g4.1<1>UD       g3.2<0,1,0>UD                   { align1 1N }; /* Block offset */
++mov(1)          g4.2<1>UD       0x00000003UD                    { align1 1N }; /* Block size (1 row x 4 bytes) */
++and(1)          g4.3<1>UD       g4.3<0,1,0>UW   0xffffffffUD    { align1 1N };
++
++/* Media block read to fetch current value at specified location in instrumentation buffer */
++sendc(8)        g5<1>UD         g4<8,8,1>F      0x02190001
++                            render MsgDesc: media block read MsgCtrl = 0x0 Surface = 1 mlen 1 rlen 1 { align1 1Q };
++add(1)          g5<1>D          g5<0,1,0>D      1D              { align1 1N };
++
++/* Media block write for updated value at specified location in instrumentation buffer */
++sendc(8)        g5<1>UD         g4<8,8,1>F      0x040a8001
++                            render MsgDesc: media block write MsgCtrl = 0x0 Surface = 1 mlen 2 rlen 0 { align1 1Q };
++/* Delay thread for specified parameter */
++add.nz.f0.0(1)  g1.2<1>UD       g1.2<0,1,0>UD   -1D             { align1 1N };
++(+f0.0) jmpi(1) -4D                                             { align1 WE_all 1N };
++
++/* Store designated "clear GRF" value */
++mov(1)          f0.1<1>UW       g1.2<0,1,0>UW                   { align1 1N };
++
++/* Initialize looping parameters */
++mov(1)          a0<1>D          0D                              { align1 1N }; /* Initialize a0.0:w=0 */
++mov(1)          a0.4<1>W        127W                            { align1 1N }; /* Loop count. Each loop contains 16 GRF's */
++
++/* Write 32x16 all "0" block */
++mov(8)          g2<1>UD         g0<8,8,1>UD                     { align1 1Q };
++mov(8)          g127<1>UD       g0<8,8,1>UD                     { align1 1Q };
++mov(2)          g2<1>UD         g1<2,2,1>UW                     { align1 1N };
++mov(1)          g2.2<1>UD       0x000f000fUD                    { align1 1N }; /* Block size (16x16) */
++and(1)          g2.3<1>UD       g2.3<0,1,0>UW   0xffffffefUD    { align1 1N };
++mov(16)         g3<1>UD         0x00000000UD                    { align1 1H };
++mov(16)         g4<1>UD         0x00000000UD                    { align1 1H };
++mov(16)         g5<1>UD         0x00000000UD                    { align1 1H };
++mov(16)         g6<1>UD         0x00000000UD                    { align1 1H };
++mov(16)         g7<1>UD         0x00000000UD                    { align1 1H };
++mov(16)         g8<1>UD         0x00000000UD                    { align1 1H };
++mov(16)         g9<1>UD         0x00000000UD                    { align1 1H };
++mov(16)         g10<1>UD        0x00000000UD                    { align1 1H };
++sendc(8)        null<1>UD       g2<8,8,1>F      0x120a8000
++                            render MsgDesc: media block write MsgCtrl = 0x0 Surface = 0 mlen 9 rlen 0 { align1 1Q };
++add(1)          g2<1>UD         g1<0,1,0>UW     0x0010UW        { align1 1N };
++sendc(8)        null<1>UD       g2<8,8,1>F      0x120a8000
++                            render MsgDesc: media block write MsgCtrl = 0x0 Surface = 0 mlen 9 rlen 0 { align1 1Q };
++
++/* Now, clear all GRF registers */
++add.nz.f0.0(1)  a0.4<1>W        a0.4<0,1,0>W    -1W             { align1 1N };
++mov(16)         g[a0]<1>UW      f0.1<0,1,0>UW                   { align1 1H };
++add(1)          a0<1>D          a0<0,1,0>D      32D             { align1 1N };
++(+f0.0) jmpi(1) -8D                                             { align1 WE_all 1N };
++
++/* Terminante the thread */
++sendc(8)        null<1>UD       g127<8,8,1>F    0x82000010
++                            thread_spawner MsgDesc: mlen 1 rlen 0           { align1 1Q EOT };
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
+index a1b79ee2bd9d..a2f6b688a976 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
+@@ -2173,7 +2173,7 @@ struct drm_encoder *dpu_encoder_init(struct drm_device *dev,
+ 
+ 	dpu_enc = devm_kzalloc(dev->dev, sizeof(*dpu_enc), GFP_KERNEL);
+ 	if (!dpu_enc)
+-		return ERR_PTR(ENOMEM);
++		return ERR_PTR(-ENOMEM);
+ 
+ 	rc = drm_encoder_init(dev, &dpu_enc->base, &dpu_encoder_funcs,
+ 			drm_enc_mode, NULL);
+diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
+index 68d4644ac2dc..f07e0c32b93a 100644
+--- a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
++++ b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
+@@ -262,9 +262,8 @@ sun4i_hdmi_connector_detect(struct drm_connector *connector, bool force)
+ 	struct sun4i_hdmi *hdmi = drm_connector_to_sun4i_hdmi(connector);
+ 	unsigned long reg;
+ 
+-	if (readl_poll_timeout(hdmi->base + SUN4I_HDMI_HPD_REG, reg,
+-			       reg & SUN4I_HDMI_HPD_HIGH,
+-			       0, 500000)) {
++	reg = readl(hdmi->base + SUN4I_HDMI_HPD_REG);
++	if (reg & SUN4I_HDMI_HPD_HIGH) {
+ 		cec_phys_addr_invalidate(hdmi->cec_adap);
+ 		return connector_status_disconnected;
+ 	}
+diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
+index ec173da45b42..7469cfa72518 100644
+--- a/drivers/hv/vmbus_drv.c
++++ b/drivers/hv/vmbus_drv.c
+@@ -1328,7 +1328,7 @@ static void hv_kmsg_dump(struct kmsg_dumper *dumper,
+ 	 * Write dump contents to the page. No need to synchronize; panic should
+ 	 * be single-threaded.
+ 	 */
+-	kmsg_dump_get_buffer(dumper, true, hv_panic_page, HV_HYP_PAGE_SIZE,
++	kmsg_dump_get_buffer(dumper, false, hv_panic_page, HV_HYP_PAGE_SIZE,
+ 			     &bytes_written);
+ 	if (bytes_written)
+ 		hyperv_report_panic_msg(panic_pa, bytes_written);
+diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c
+index 0db8ef4fd6e1..a270b975e90b 100644
+--- a/drivers/hwmon/acpi_power_meter.c
++++ b/drivers/hwmon/acpi_power_meter.c
+@@ -883,7 +883,7 @@ static int acpi_power_meter_add(struct acpi_device *device)
+ 
+ 	res = setup_attrs(resource);
+ 	if (res)
+-		goto exit_free;
++		goto exit_free_capability;
+ 
+ 	resource->hwmon_dev = hwmon_device_register(&device->dev);
+ 	if (IS_ERR(resource->hwmon_dev)) {
+@@ -896,6 +896,8 @@ static int acpi_power_meter_add(struct acpi_device *device)
+ 
+ exit_remove:
+ 	remove_attrs(resource);
++exit_free_capability:
++	free_capabilities(resource);
+ exit_free:
+ 	kfree(resource);
+ exit:
+diff --git a/drivers/hwmon/max6697.c b/drivers/hwmon/max6697.c
+index 743752a2467a..64122eb38060 100644
+--- a/drivers/hwmon/max6697.c
++++ b/drivers/hwmon/max6697.c
+@@ -38,8 +38,9 @@ static const u8 MAX6697_REG_CRIT[] = {
+  * Map device tree / platform data register bit map to chip bit map.
+  * Applies to alert register and over-temperature register.
+  */
+-#define MAX6697_MAP_BITS(reg)	((((reg) & 0x7e) >> 1) | \
++#define MAX6697_ALERT_MAP_BITS(reg)	((((reg) & 0x7e) >> 1) | \
+ 				 (((reg) & 0x01) << 6) | ((reg) & 0x80))
++#define MAX6697_OVERT_MAP_BITS(reg) (((reg) >> 1) | (((reg) & 0x01) << 7))
+ 
+ #define MAX6697_REG_STAT(n)		(0x44 + (n))
+ 
+@@ -562,12 +563,12 @@ static int max6697_init_chip(struct max6697_data *data,
+ 		return ret;
+ 
+ 	ret = i2c_smbus_write_byte_data(client, MAX6697_REG_ALERT_MASK,
+-					MAX6697_MAP_BITS(pdata->alert_mask));
++				MAX6697_ALERT_MAP_BITS(pdata->alert_mask));
+ 	if (ret < 0)
+ 		return ret;
+ 
+ 	ret = i2c_smbus_write_byte_data(client, MAX6697_REG_OVERT_MASK,
+-				MAX6697_MAP_BITS(pdata->over_temperature_mask));
++			MAX6697_OVERT_MAP_BITS(pdata->over_temperature_mask));
+ 	if (ret < 0)
+ 		return ret;
+ 
+diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
+index 8d321bf7d15b..e721a016f3e7 100644
+--- a/drivers/hwmon/pmbus/pmbus_core.c
++++ b/drivers/hwmon/pmbus/pmbus_core.c
+@@ -1869,7 +1869,7 @@ static int pmbus_add_fan_ctrl(struct i2c_client *client,
+ 	struct pmbus_sensor *sensor;
+ 
+ 	sensor = pmbus_add_sensor(data, "fan", "target", index, page,
+-				  PMBUS_VIRT_FAN_TARGET_1 + id, 0xff, PSC_FAN,
++				  0xff, PMBUS_VIRT_FAN_TARGET_1 + id, PSC_FAN,
+ 				  false, false, true);
+ 
+ 	if (!sensor)
+@@ -1880,14 +1880,14 @@ static int pmbus_add_fan_ctrl(struct i2c_client *client,
+ 		return 0;
+ 
+ 	sensor = pmbus_add_sensor(data, "pwm", NULL, index, page,
+-				  PMBUS_VIRT_PWM_1 + id, 0xff, PSC_PWM,
++				  0xff, PMBUS_VIRT_PWM_1 + id, PSC_PWM,
+ 				  false, false, true);
+ 
+ 	if (!sensor)
+ 		return -ENOMEM;
+ 
+ 	sensor = pmbus_add_sensor(data, "pwm", "enable", index, page,
+-				  PMBUS_VIRT_PWM_ENABLE_1 + id, 0xff, PSC_PWM,
++				  0xff, PMBUS_VIRT_PWM_ENABLE_1 + id, PSC_PWM,
+ 				  true, false, false);
+ 
+ 	if (!sensor)
+@@ -1929,7 +1929,7 @@ static int pmbus_add_fan_attributes(struct i2c_client *client,
+ 				continue;
+ 
+ 			if (pmbus_add_sensor(data, "fan", "input", index,
+-					     page, pmbus_fan_registers[f], 0xff,
++					     page, 0xff, pmbus_fan_registers[f],
+ 					     PSC_FAN, true, true, true) == NULL)
+ 				return -ENOMEM;
+ 
+diff --git a/drivers/i2c/algos/i2c-algo-pca.c b/drivers/i2c/algos/i2c-algo-pca.c
+index 7f10312d1b88..388978775be0 100644
+--- a/drivers/i2c/algos/i2c-algo-pca.c
++++ b/drivers/i2c/algos/i2c-algo-pca.c
+@@ -314,7 +314,8 @@ static int pca_xfer(struct i2c_adapter *i2c_adap,
+ 			DEB2("BUS ERROR - SDA Stuck low\n");
+ 			pca_reset(adap);
+ 			goto out;
+-		case 0x90: /* Bus error - SCL stuck low */
++		case 0x78: /* Bus error - SCL stuck low (PCA9665) */
++		case 0x90: /* Bus error - SCL stuck low (PCA9564) */
+ 			DEB2("BUS ERROR - SCL Stuck low\n");
+ 			pca_reset(adap);
+ 			goto out;
+diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c
+index 5536673060cc..3a9c2cfbef97 100644
+--- a/drivers/i2c/busses/i2c-designware-platdrv.c
++++ b/drivers/i2c/busses/i2c-designware-platdrv.c
+@@ -234,6 +234,17 @@ static const u32 supported_speeds[] = {
+ 	I2C_MAX_STANDARD_MODE_FREQ,
+ };
+ 
++static const struct dmi_system_id dw_i2c_hwmon_class_dmi[] = {
++	{
++		.ident = "Qtechnology QT5222",
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "Qtechnology"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "QT5222"),
++		},
++	},
++	{ } /* terminate list */
++};
++
+ static int dw_i2c_plat_probe(struct platform_device *pdev)
+ {
+ 	struct dw_i2c_platform_data *pdata = dev_get_platdata(&pdev->dev);
+@@ -349,7 +360,8 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
+ 
+ 	adap = &dev->adapter;
+ 	adap->owner = THIS_MODULE;
+-	adap->class = I2C_CLASS_DEPRECATED;
++	adap->class = dmi_check_system(dw_i2c_hwmon_class_dmi) ?
++					I2C_CLASS_HWMON : I2C_CLASS_DEPRECATED;
+ 	ACPI_COMPANION_SET(&adap->dev, ACPI_COMPANION(&pdev->dev));
+ 	adap->dev.of_node = pdev->dev.of_node;
+ 	adap->nr = -1;
+diff --git a/drivers/i2c/busses/i2c-mlxcpld.c b/drivers/i2c/busses/i2c-mlxcpld.c
+index 2fd717d8dd30..71d7bae2cbca 100644
+--- a/drivers/i2c/busses/i2c-mlxcpld.c
++++ b/drivers/i2c/busses/i2c-mlxcpld.c
+@@ -337,9 +337,9 @@ static int mlxcpld_i2c_wait_for_tc(struct mlxcpld_i2c_priv *priv)
+ 		if (priv->smbus_block && (val & MLXCPLD_I2C_SMBUS_BLK_BIT)) {
+ 			mlxcpld_i2c_read_comm(priv, MLXCPLD_LPCI2C_NUM_DAT_REG,
+ 					      &datalen, 1);
+-			if (unlikely(datalen > (I2C_SMBUS_BLOCK_MAX + 1))) {
++			if (unlikely(datalen > I2C_SMBUS_BLOCK_MAX)) {
+ 				dev_err(priv->dev, "Incorrect smbus block read message len\n");
+-				return -E2BIG;
++				return -EPROTO;
+ 			}
+ 		} else {
+ 			datalen = priv->xfer.data_len;
+diff --git a/drivers/infiniband/core/counters.c b/drivers/infiniband/core/counters.c
+index 2257d7f7810f..738d1faf4bba 100644
+--- a/drivers/infiniband/core/counters.c
++++ b/drivers/infiniband/core/counters.c
+@@ -202,7 +202,7 @@ static int __rdma_counter_unbind_qp(struct ib_qp *qp)
+ 	return ret;
+ }
+ 
+-static void counter_history_stat_update(const struct rdma_counter *counter)
++static void counter_history_stat_update(struct rdma_counter *counter)
+ {
+ 	struct ib_device *dev = counter->device;
+ 	struct rdma_port_counter *port_counter;
+@@ -212,6 +212,8 @@ static void counter_history_stat_update(const struct rdma_counter *counter)
+ 	if (!port_counter->hstats)
+ 		return;
+ 
++	rdma_counter_query_stats(counter);
++
+ 	for (i = 0; i < counter->stats->num_counters; i++)
+ 		port_counter->hstats->value[i] += counter->stats->value[i];
+ }
+diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
+index 124251b0ccba..b3e16a06c13b 100644
+--- a/drivers/irqchip/irq-gic-v3-its.c
++++ b/drivers/irqchip/irq-gic-v3-its.c
+@@ -3681,10 +3681,10 @@ static void its_wait_vpt_parse_complete(void)
+ 	if (!gic_rdists->has_vpend_valid_dirty)
+ 		return;
+ 
+-	WARN_ON_ONCE(readq_relaxed_poll_timeout(vlpi_base + GICR_VPENDBASER,
+-						val,
+-						!(val & GICR_VPENDBASER_Dirty),
+-						10, 500));
++	WARN_ON_ONCE(readq_relaxed_poll_timeout_atomic(vlpi_base + GICR_VPENDBASER,
++						       val,
++						       !(val & GICR_VPENDBASER_Dirty),
++						       10, 500));
+ }
+ 
+ static void its_vpe_schedule(struct its_vpe *vpe)
+diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
+index 30ab623343d3..882204d1ef4f 100644
+--- a/drivers/irqchip/irq-gic.c
++++ b/drivers/irqchip/irq-gic.c
+@@ -329,10 +329,8 @@ static int gic_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu)
+ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
+ 			    bool force)
+ {
+-	void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + (gic_irq(d) & ~3);
+-	unsigned int cpu, shift = (gic_irq(d) % 4) * 8;
+-	u32 val, mask, bit;
+-	unsigned long flags;
++	void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + gic_irq(d);
++	unsigned int cpu;
+ 
+ 	if (!force)
+ 		cpu = cpumask_any_and(mask_val, cpu_online_mask);
+@@ -342,13 +340,7 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
+ 	if (cpu >= NR_GIC_CPU_IF || cpu >= nr_cpu_ids)
+ 		return -EINVAL;
+ 
+-	gic_lock_irqsave(flags);
+-	mask = 0xff << shift;
+-	bit = gic_cpu_map[cpu] << shift;
+-	val = readl_relaxed(reg) & ~mask;
+-	writel_relaxed(val | bit, reg);
+-	gic_unlock_irqrestore(flags);
+-
++	writeb_relaxed(gic_cpu_map[cpu], reg);
+ 	irq_data_update_effective_affinity(d, cpumask_of(cpu));
+ 
+ 	return IRQ_SET_MASK_OK_DONE;
+diff --git a/drivers/md/dm-zoned-target.c b/drivers/md/dm-zoned-target.c
+index f4f83d39b3dc..29881fea6acb 100644
+--- a/drivers/md/dm-zoned-target.c
++++ b/drivers/md/dm-zoned-target.c
+@@ -790,7 +790,7 @@ static int dmz_ctr(struct dm_target *ti, unsigned int argc, char **argv)
+ 	}
+ 
+ 	/* Set target (no write same support) */
+-	ti->max_io_len = dev->zone_nr_sectors << 9;
++	ti->max_io_len = dev->zone_nr_sectors;
+ 	ti->num_flush_bios = 1;
+ 	ti->num_discard_bios = 1;
+ 	ti->num_write_zeroes_bios = 1;
+diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
+index 7b9cd69f9844..d8ab8e366818 100644
+--- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
++++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
+@@ -1975,7 +1975,6 @@ int cudbg_collect_dump_context(struct cudbg_init *pdbg_init,
+ 	u8 mem_type[CTXT_INGRESS + 1] = { 0 };
+ 	struct cudbg_buffer temp_buff = { 0 };
+ 	struct cudbg_ch_cntxt *buff;
+-	u64 *dst_off, *src_off;
+ 	u8 *ctx_buf;
+ 	u8 i, k;
+ 	int rc;
+@@ -2044,8 +2043,11 @@ int cudbg_collect_dump_context(struct cudbg_init *pdbg_init,
+ 		}
+ 
+ 		for (j = 0; j < max_ctx_qid; j++) {
++			__be64 *dst_off;
++			u64 *src_off;
++
+ 			src_off = (u64 *)(ctx_buf + j * SGE_CTXT_SIZE);
+-			dst_off = (u64 *)buff->data;
++			dst_off = (__be64 *)buff->data;
+ 
+ 			/* The data is stored in 64-bit cpu order.  Convert it
+ 			 * to big endian before parsing.
+diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
+index 796555255207..7a7f61a8cdf4 100644
+--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
+@@ -165,6 +165,9 @@ static void set_nat_params(struct adapter *adap, struct filter_entry *f,
+ 			   unsigned int tid, bool dip, bool sip, bool dp,
+ 			   bool sp)
+ {
++	u8 *nat_lp = (u8 *)&f->fs.nat_lport;
++	u8 *nat_fp = (u8 *)&f->fs.nat_fport;
++
+ 	if (dip) {
+ 		if (f->fs.type) {
+ 			set_tcb_field(adap, f, tid, TCB_SND_UNA_RAW_W,
+@@ -236,8 +239,9 @@ static void set_nat_params(struct adapter *adap, struct filter_entry *f,
+ 	}
+ 
+ 	set_tcb_field(adap, f, tid, TCB_PDU_HDR_LEN_W, WORD_MASK,
+-		      (dp ? f->fs.nat_lport : 0) |
+-		      (sp ? f->fs.nat_fport << 16 : 0), 1);
++		      (dp ? (nat_lp[1] | nat_lp[0] << 8) : 0) |
++		      (sp ? (nat_fp[1] << 16 | nat_fp[0] << 24) : 0),
++		      1);
+ }
+ 
+ /* Validate filter spec against configuration done on the card. */
+@@ -909,6 +913,9 @@ int set_filter_wr(struct adapter *adapter, int fidx)
+ 	fwr->fpm = htons(f->fs.mask.fport);
+ 
+ 	if (adapter->params.filter2_wr_support) {
++		u8 *nat_lp = (u8 *)&f->fs.nat_lport;
++		u8 *nat_fp = (u8 *)&f->fs.nat_fport;
++
+ 		fwr->natmode_to_ulp_type =
+ 			FW_FILTER2_WR_ULP_TYPE_V(f->fs.nat_mode ?
+ 						 ULP_MODE_TCPDDP :
+@@ -916,8 +923,8 @@ int set_filter_wr(struct adapter *adapter, int fidx)
+ 			FW_FILTER2_WR_NATMODE_V(f->fs.nat_mode);
+ 		memcpy(fwr->newlip, f->fs.nat_lip, sizeof(fwr->newlip));
+ 		memcpy(fwr->newfip, f->fs.nat_fip, sizeof(fwr->newfip));
+-		fwr->newlport = htons(f->fs.nat_lport);
+-		fwr->newfport = htons(f->fs.nat_fport);
++		fwr->newlport = htons(nat_lp[1] | nat_lp[0] << 8);
++		fwr->newfport = htons(nat_fp[1] | nat_fp[0] << 8);
+ 	}
+ 
+ 	/* Mark the filter as "pending" and ship off the Filter Work Request.
+@@ -1105,16 +1112,16 @@ static bool is_addr_all_mask(u8 *ipmask, int family)
+ 		struct in_addr *addr;
+ 
+ 		addr = (struct in_addr *)ipmask;
+-		if (addr->s_addr == 0xffffffff)
++		if (ntohl(addr->s_addr) == 0xffffffff)
+ 			return true;
+ 	} else if (family == AF_INET6) {
+ 		struct in6_addr *addr6;
+ 
+ 		addr6 = (struct in6_addr *)ipmask;
+-		if (addr6->s6_addr32[0] == 0xffffffff &&
+-		    addr6->s6_addr32[1] == 0xffffffff &&
+-		    addr6->s6_addr32[2] == 0xffffffff &&
+-		    addr6->s6_addr32[3] == 0xffffffff)
++		if (ntohl(addr6->s6_addr32[0]) == 0xffffffff &&
++		    ntohl(addr6->s6_addr32[1]) == 0xffffffff &&
++		    ntohl(addr6->s6_addr32[2]) == 0xffffffff &&
++		    ntohl(addr6->s6_addr32[3]) == 0xffffffff)
+ 			return true;
+ 	}
+ 	return false;
+diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+index a70018f067aa..e8934c48f09b 100644
+--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+@@ -2604,7 +2604,7 @@ int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid,
+ 
+ 	/* Clear out filter specifications */
+ 	memset(&f->fs, 0, sizeof(struct ch_filter_specification));
+-	f->fs.val.lport = cpu_to_be16(sport);
++	f->fs.val.lport = be16_to_cpu(sport);
+ 	f->fs.mask.lport  = ~0;
+ 	val = (u8 *)&sip;
+ 	if ((val[0] | val[1] | val[2] | val[3]) != 0) {
+diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
+index 4a5fa9eba0b6..59b65d4db086 100644
+--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
+@@ -58,10 +58,6 @@ static struct ch_tc_pedit_fields pedits[] = {
+ 	PEDIT_FIELDS(IP6_, DST_63_32, 4, nat_lip, 4),
+ 	PEDIT_FIELDS(IP6_, DST_95_64, 4, nat_lip, 8),
+ 	PEDIT_FIELDS(IP6_, DST_127_96, 4, nat_lip, 12),
+-	PEDIT_FIELDS(TCP_, SPORT, 2, nat_fport, 0),
+-	PEDIT_FIELDS(TCP_, DPORT, 2, nat_lport, 0),
+-	PEDIT_FIELDS(UDP_, SPORT, 2, nat_fport, 0),
+-	PEDIT_FIELDS(UDP_, DPORT, 2, nat_lport, 0),
+ };
+ 
+ static struct ch_tc_flower_entry *allocate_flower_entry(void)
+@@ -156,14 +152,14 @@ static void cxgb4_process_flow_match(struct net_device *dev,
+ 		struct flow_match_ports match;
+ 
+ 		flow_rule_match_ports(rule, &match);
+-		fs->val.lport = cpu_to_be16(match.key->dst);
+-		fs->mask.lport = cpu_to_be16(match.mask->dst);
+-		fs->val.fport = cpu_to_be16(match.key->src);
+-		fs->mask.fport = cpu_to_be16(match.mask->src);
++		fs->val.lport = be16_to_cpu(match.key->dst);
++		fs->mask.lport = be16_to_cpu(match.mask->dst);
++		fs->val.fport = be16_to_cpu(match.key->src);
++		fs->mask.fport = be16_to_cpu(match.mask->src);
+ 
+ 		/* also initialize nat_lport/fport to same values */
+-		fs->nat_lport = cpu_to_be16(match.key->dst);
+-		fs->nat_fport = cpu_to_be16(match.key->src);
++		fs->nat_lport = fs->val.lport;
++		fs->nat_fport = fs->val.fport;
+ 	}
+ 
+ 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) {
+@@ -354,12 +350,9 @@ static void process_pedit_field(struct ch_filter_specification *fs, u32 val,
+ 		switch (offset) {
+ 		case PEDIT_TCP_SPORT_DPORT:
+ 			if (~mask & PEDIT_TCP_UDP_SPORT_MASK)
+-				offload_pedit(fs, cpu_to_be32(val) >> 16,
+-					      cpu_to_be32(mask) >> 16,
+-					      TCP_SPORT);
++				fs->nat_fport = val;
+ 			else
+-				offload_pedit(fs, cpu_to_be32(val),
+-					      cpu_to_be32(mask), TCP_DPORT);
++				fs->nat_lport = val >> 16;
+ 		}
+ 		fs->nat_mode = NAT_MODE_ALL;
+ 		break;
+@@ -367,12 +360,9 @@ static void process_pedit_field(struct ch_filter_specification *fs, u32 val,
+ 		switch (offset) {
+ 		case PEDIT_UDP_SPORT_DPORT:
+ 			if (~mask & PEDIT_TCP_UDP_SPORT_MASK)
+-				offload_pedit(fs, cpu_to_be32(val) >> 16,
+-					      cpu_to_be32(mask) >> 16,
+-					      UDP_SPORT);
++				fs->nat_fport = val;
+ 			else
+-				offload_pedit(fs, cpu_to_be32(val),
+-					      cpu_to_be32(mask), UDP_DPORT);
++				fs->nat_lport = val >> 16;
+ 		}
+ 		fs->nat_mode = NAT_MODE_ALL;
+ 	}
+diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c
+index 3f3c11e54d97..dede02505ceb 100644
+--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c
++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c
+@@ -48,7 +48,7 @@ static int fill_match_fields(struct adapter *adap,
+ 			     bool next_header)
+ {
+ 	unsigned int i, j;
+-	u32 val, mask;
++	__be32 val, mask;
+ 	int off, err;
+ 	bool found;
+ 
+@@ -228,7 +228,7 @@ int cxgb4_config_knode(struct net_device *dev, struct tc_cls_u32_offload *cls)
+ 		const struct cxgb4_next_header *next;
+ 		bool found = false;
+ 		unsigned int i, j;
+-		u32 val, mask;
++		__be32 val, mask;
+ 		int off;
+ 
+ 		if (t->table[link_uhtid - 1].link_handle) {
+@@ -242,10 +242,10 @@ int cxgb4_config_knode(struct net_device *dev, struct tc_cls_u32_offload *cls)
+ 
+ 		/* Try to find matches that allow jumps to next header. */
+ 		for (i = 0; next[i].jump; i++) {
+-			if (next[i].offoff != cls->knode.sel->offoff ||
+-			    next[i].shift != cls->knode.sel->offshift ||
+-			    next[i].mask != cls->knode.sel->offmask ||
+-			    next[i].offset != cls->knode.sel->off)
++			if (next[i].sel.offoff != cls->knode.sel->offoff ||
++			    next[i].sel.offshift != cls->knode.sel->offshift ||
++			    next[i].sel.offmask != cls->knode.sel->offmask ||
++			    next[i].sel.off != cls->knode.sel->off)
+ 				continue;
+ 
+ 			/* Found a possible candidate.  Find a key that
+@@ -257,9 +257,9 @@ int cxgb4_config_knode(struct net_device *dev, struct tc_cls_u32_offload *cls)
+ 				val = cls->knode.sel->keys[j].val;
+ 				mask = cls->knode.sel->keys[j].mask;
+ 
+-				if (next[i].match_off == off &&
+-				    next[i].match_val == val &&
+-				    next[i].match_mask == mask) {
++				if (next[i].key.off == off &&
++				    next[i].key.val == val &&
++				    next[i].key.mask == mask) {
+ 					found = true;
+ 					break;
+ 				}
+diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32_parse.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32_parse.h
+index 125868c6770a..f59dd4b2ae6f 100644
+--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32_parse.h
++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32_parse.h
+@@ -38,12 +38,12 @@
+ struct cxgb4_match_field {
+ 	int off; /* Offset from the beginning of the header to match */
+ 	/* Fill the value/mask pair in the spec if matched */
+-	int (*val)(struct ch_filter_specification *f, u32 val, u32 mask);
++	int (*val)(struct ch_filter_specification *f, __be32 val, __be32 mask);
+ };
+ 
+ /* IPv4 match fields */
+ static inline int cxgb4_fill_ipv4_tos(struct ch_filter_specification *f,
+-				      u32 val, u32 mask)
++				      __be32 val, __be32 mask)
+ {
+ 	f->val.tos  = (ntohl(val)  >> 16) & 0x000000FF;
+ 	f->mask.tos = (ntohl(mask) >> 16) & 0x000000FF;
+@@ -52,7 +52,7 @@ static inline int cxgb4_fill_ipv4_tos(struct ch_filter_specification *f,
+ }
+ 
+ static inline int cxgb4_fill_ipv4_frag(struct ch_filter_specification *f,
+-				       u32 val, u32 mask)
++				       __be32 val, __be32 mask)
+ {
+ 	u32 mask_val;
+ 	u8 frag_val;
+@@ -74,7 +74,7 @@ static inline int cxgb4_fill_ipv4_frag(struct ch_filter_specification *f,
+ }
+ 
+ static inline int cxgb4_fill_ipv4_proto(struct ch_filter_specification *f,
+-					u32 val, u32 mask)
++					__be32 val, __be32 mask)
+ {
+ 	f->val.proto  = (ntohl(val)  >> 16) & 0x000000FF;
+ 	f->mask.proto = (ntohl(mask) >> 16) & 0x000000FF;
+@@ -83,7 +83,7 @@ static inline int cxgb4_fill_ipv4_proto(struct ch_filter_specification *f,
+ }
+ 
+ static inline int cxgb4_fill_ipv4_src_ip(struct ch_filter_specification *f,
+-					 u32 val, u32 mask)
++					 __be32 val, __be32 mask)
+ {
+ 	memcpy(&f->val.fip[0],  &val,  sizeof(u32));
+ 	memcpy(&f->mask.fip[0], &mask, sizeof(u32));
+@@ -92,7 +92,7 @@ static inline int cxgb4_fill_ipv4_src_ip(struct ch_filter_specification *f,
+ }
+ 
+ static inline int cxgb4_fill_ipv4_dst_ip(struct ch_filter_specification *f,
+-					 u32 val, u32 mask)
++					 __be32 val, __be32 mask)
+ {
+ 	memcpy(&f->val.lip[0],  &val,  sizeof(u32));
+ 	memcpy(&f->mask.lip[0], &mask, sizeof(u32));
+@@ -111,7 +111,7 @@ static const struct cxgb4_match_field cxgb4_ipv4_fields[] = {
+ 
+ /* IPv6 match fields */
+ static inline int cxgb4_fill_ipv6_tos(struct ch_filter_specification *f,
+-				      u32 val, u32 mask)
++				      __be32 val, __be32 mask)
+ {
+ 	f->val.tos  = (ntohl(val)  >> 20) & 0x000000FF;
+ 	f->mask.tos = (ntohl(mask) >> 20) & 0x000000FF;
+@@ -120,7 +120,7 @@ static inline int cxgb4_fill_ipv6_tos(struct ch_filter_specification *f,
+ }
+ 
+ static inline int cxgb4_fill_ipv6_proto(struct ch_filter_specification *f,
+-					u32 val, u32 mask)
++					__be32 val, __be32 mask)
+ {
+ 	f->val.proto  = (ntohl(val)  >> 8) & 0x000000FF;
+ 	f->mask.proto = (ntohl(mask) >> 8) & 0x000000FF;
+@@ -129,7 +129,7 @@ static inline int cxgb4_fill_ipv6_proto(struct ch_filter_specification *f,
+ }
+ 
+ static inline int cxgb4_fill_ipv6_src_ip0(struct ch_filter_specification *f,
+-					  u32 val, u32 mask)
++					  __be32 val, __be32 mask)
+ {
+ 	memcpy(&f->val.fip[0],  &val,  sizeof(u32));
+ 	memcpy(&f->mask.fip[0], &mask, sizeof(u32));
+@@ -138,7 +138,7 @@ static inline int cxgb4_fill_ipv6_src_ip0(struct ch_filter_specification *f,
+ }
+ 
+ static inline int cxgb4_fill_ipv6_src_ip1(struct ch_filter_specification *f,
+-					  u32 val, u32 mask)
++					  __be32 val, __be32 mask)
+ {
+ 	memcpy(&f->val.fip[4],  &val,  sizeof(u32));
+ 	memcpy(&f->mask.fip[4], &mask, sizeof(u32));
+@@ -147,7 +147,7 @@ static inline int cxgb4_fill_ipv6_src_ip1(struct ch_filter_specification *f,
+ }
+ 
+ static inline int cxgb4_fill_ipv6_src_ip2(struct ch_filter_specification *f,
+-					  u32 val, u32 mask)
++					  __be32 val, __be32 mask)
+ {
+ 	memcpy(&f->val.fip[8],  &val,  sizeof(u32));
+ 	memcpy(&f->mask.fip[8], &mask, sizeof(u32));
+@@ -156,7 +156,7 @@ static inline int cxgb4_fill_ipv6_src_ip2(struct ch_filter_specification *f,
+ }
+ 
+ static inline int cxgb4_fill_ipv6_src_ip3(struct ch_filter_specification *f,
+-					  u32 val, u32 mask)
++					  __be32 val, __be32 mask)
+ {
+ 	memcpy(&f->val.fip[12],  &val,  sizeof(u32));
+ 	memcpy(&f->mask.fip[12], &mask, sizeof(u32));
+@@ -165,7 +165,7 @@ static inline int cxgb4_fill_ipv6_src_ip3(struct ch_filter_specification *f,
+ }
+ 
+ static inline int cxgb4_fill_ipv6_dst_ip0(struct ch_filter_specification *f,
+-					  u32 val, u32 mask)
++					  __be32 val, __be32 mask)
+ {
+ 	memcpy(&f->val.lip[0],  &val,  sizeof(u32));
+ 	memcpy(&f->mask.lip[0], &mask, sizeof(u32));
+@@ -174,7 +174,7 @@ static inline int cxgb4_fill_ipv6_dst_ip0(struct ch_filter_specification *f,
+ }
+ 
+ static inline int cxgb4_fill_ipv6_dst_ip1(struct ch_filter_specification *f,
+-					  u32 val, u32 mask)
++					  __be32 val, __be32 mask)
+ {
+ 	memcpy(&f->val.lip[4],  &val,  sizeof(u32));
+ 	memcpy(&f->mask.lip[4], &mask, sizeof(u32));
+@@ -183,7 +183,7 @@ static inline int cxgb4_fill_ipv6_dst_ip1(struct ch_filter_specification *f,
+ }
+ 
+ static inline int cxgb4_fill_ipv6_dst_ip2(struct ch_filter_specification *f,
+-					  u32 val, u32 mask)
++					  __be32 val, __be32 mask)
+ {
+ 	memcpy(&f->val.lip[8],  &val,  sizeof(u32));
+ 	memcpy(&f->mask.lip[8], &mask, sizeof(u32));
+@@ -192,7 +192,7 @@ static inline int cxgb4_fill_ipv6_dst_ip2(struct ch_filter_specification *f,
+ }
+ 
+ static inline int cxgb4_fill_ipv6_dst_ip3(struct ch_filter_specification *f,
+-					  u32 val, u32 mask)
++					  __be32 val, __be32 mask)
+ {
+ 	memcpy(&f->val.lip[12],  &val,  sizeof(u32));
+ 	memcpy(&f->mask.lip[12], &mask, sizeof(u32));
+@@ -216,7 +216,7 @@ static const struct cxgb4_match_field cxgb4_ipv6_fields[] = {
+ 
+ /* TCP/UDP match */
+ static inline int cxgb4_fill_l4_ports(struct ch_filter_specification *f,
+-				      u32 val, u32 mask)
++				      __be32 val, __be32 mask)
+ {
+ 	f->val.fport  = ntohl(val)  >> 16;
+ 	f->mask.fport = ntohl(mask) >> 16;
+@@ -237,19 +237,13 @@ static const struct cxgb4_match_field cxgb4_udp_fields[] = {
+ };
+ 
+ struct cxgb4_next_header {
+-	unsigned int offset; /* Offset to next header */
+-	/* offset, shift, and mask added to offset above
++	/* Offset, shift, and mask added to beginning of the header
+ 	 * to get to next header.  Useful when using a header
+ 	 * field's value to jump to next header such as IHL field
+ 	 * in IPv4 header.
+ 	 */
+-	unsigned int offoff;
+-	u32 shift;
+-	u32 mask;
+-	/* match criteria to make this jump */
+-	unsigned int match_off;
+-	u32 match_val;
+-	u32 match_mask;
++	struct tc_u32_sel sel;
++	struct tc_u32_key key;
+ 	/* location of jump to make */
+ 	const struct cxgb4_match_field *jump;
+ };
+@@ -258,26 +252,74 @@ struct cxgb4_next_header {
+  * IPv4 header.
+  */
+ static const struct cxgb4_next_header cxgb4_ipv4_jumps[] = {
+-	{ .offset = 0, .offoff = 0, .shift = 6, .mask = 0xF,
+-	  .match_off = 8, .match_val = 0x600, .match_mask = 0xFF00,
+-	  .jump = cxgb4_tcp_fields },
+-	{ .offset = 0, .offoff = 0, .shift = 6, .mask = 0xF,
+-	  .match_off = 8, .match_val = 0x1100, .match_mask = 0xFF00,
+-	  .jump = cxgb4_udp_fields },
+-	{ .jump = NULL }
++	{
++		/* TCP Jump */
++		.sel = {
++			.off = 0,
++			.offoff = 0,
++			.offshift = 6,
++			.offmask = cpu_to_be16(0x0f00),
++		},
++		.key = {
++			.off = 8,
++			.val = cpu_to_be32(0x00060000),
++			.mask = cpu_to_be32(0x00ff0000),
++		},
++		.jump = cxgb4_tcp_fields,
++	},
++	{
++		/* UDP Jump */
++		.sel = {
++			.off = 0,
++			.offoff = 0,
++			.offshift = 6,
++			.offmask = cpu_to_be16(0x0f00),
++		},
++		.key = {
++			.off = 8,
++			.val = cpu_to_be32(0x00110000),
++			.mask = cpu_to_be32(0x00ff0000),
++		},
++		.jump = cxgb4_udp_fields,
++	},
++	{ .jump = NULL },
+ };
+ 
+ /* Accept a rule with a jump directly past the 40 Bytes of IPv6 fixed header
+  * to get to transport layer header.
+  */
+ static const struct cxgb4_next_header cxgb4_ipv6_jumps[] = {
+-	{ .offset = 0x28, .offoff = 0, .shift = 0, .mask = 0,
+-	  .match_off = 4, .match_val = 0x60000, .match_mask = 0xFF0000,
+-	  .jump = cxgb4_tcp_fields },
+-	{ .offset = 0x28, .offoff = 0, .shift = 0, .mask = 0,
+-	  .match_off = 4, .match_val = 0x110000, .match_mask = 0xFF0000,
+-	  .jump = cxgb4_udp_fields },
+-	{ .jump = NULL }
++	{
++		/* TCP Jump */
++		.sel = {
++			.off = 40,
++			.offoff = 0,
++			.offshift = 0,
++			.offmask = 0,
++		},
++		.key = {
++			.off = 4,
++			.val = cpu_to_be32(0x00000600),
++			.mask = cpu_to_be32(0x0000ff00),
++		},
++		.jump = cxgb4_tcp_fields,
++	},
++	{
++		/* UDP Jump */
++		.sel = {
++			.off = 40,
++			.offoff = 0,
++			.offshift = 0,
++			.offmask = 0,
++		},
++		.key = {
++			.off = 4,
++			.val = cpu_to_be32(0x00001100),
++			.mask = cpu_to_be32(0x0000ff00),
++		},
++		.jump = cxgb4_udp_fields,
++	},
++	{ .jump = NULL },
+ };
+ 
+ struct cxgb4_link {
+diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
+index db8106d9d6ed..28ce9856a078 100644
+--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
++++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
+@@ -3300,7 +3300,7 @@ static noinline int t4_systim_to_hwstamp(struct adapter *adapter,
+ 
+ 	hwtstamps = skb_hwtstamps(skb);
+ 	memset(hwtstamps, 0, sizeof(*hwtstamps));
+-	hwtstamps->hwtstamp = ns_to_ktime(be64_to_cpu(*((u64 *)data)));
++	hwtstamps->hwtstamp = ns_to_ktime(get_unaligned_be64(data));
+ 
+ 	return RX_PTP_PKT_SUC;
+ }
+diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c
+index 4486a0db8ef0..a7e4274d3f40 100644
+--- a/drivers/net/ethernet/freescale/enetc/enetc.c
++++ b/drivers/net/ethernet/freescale/enetc/enetc.c
+@@ -756,6 +756,9 @@ void enetc_get_si_caps(struct enetc_si *si)
+ 
+ 	if (val & ENETC_SIPCAPR0_QBV)
+ 		si->hw_features |= ENETC_SI_F_QBV;
++
++	if (val & ENETC_SIPCAPR0_PSFP)
++		si->hw_features |= ENETC_SI_F_PSFP;
+ }
+ 
+ static int enetc_dma_alloc_bdr(struct enetc_bdr *r, size_t bd_size)
+@@ -1567,6 +1570,41 @@ static int enetc_set_rss(struct net_device *ndev, int en)
+ 	return 0;
+ }
+ 
++static int enetc_set_psfp(struct net_device *ndev, int en)
++{
++	struct enetc_ndev_priv *priv = netdev_priv(ndev);
++
++	if (en) {
++		priv->active_offloads |= ENETC_F_QCI;
++		enetc_get_max_cap(priv);
++		enetc_psfp_enable(&priv->si->hw);
++	} else {
++		priv->active_offloads &= ~ENETC_F_QCI;
++		memset(&priv->psfp_cap, 0, sizeof(struct psfp_cap));
++		enetc_psfp_disable(&priv->si->hw);
++	}
++
++	return 0;
++}
++
++static void enetc_enable_rxvlan(struct net_device *ndev, bool en)
++{
++	struct enetc_ndev_priv *priv = netdev_priv(ndev);
++	int i;
++
++	for (i = 0; i < priv->num_rx_rings; i++)
++		enetc_bdr_enable_rxvlan(&priv->si->hw, i, en);
++}
++
++static void enetc_enable_txvlan(struct net_device *ndev, bool en)
++{
++	struct enetc_ndev_priv *priv = netdev_priv(ndev);
++	int i;
++
++	for (i = 0; i < priv->num_tx_rings; i++)
++		enetc_bdr_enable_txvlan(&priv->si->hw, i, en);
++}
++
+ int enetc_set_features(struct net_device *ndev,
+ 		       netdev_features_t features)
+ {
+@@ -1575,6 +1613,17 @@ int enetc_set_features(struct net_device *ndev,
+ 	if (changed & NETIF_F_RXHASH)
+ 		enetc_set_rss(ndev, !!(features & NETIF_F_RXHASH));
+ 
++	if (changed & NETIF_F_HW_VLAN_CTAG_RX)
++		enetc_enable_rxvlan(ndev,
++				    !!(features & NETIF_F_HW_VLAN_CTAG_RX));
++
++	if (changed & NETIF_F_HW_VLAN_CTAG_TX)
++		enetc_enable_txvlan(ndev,
++				    !!(features & NETIF_F_HW_VLAN_CTAG_TX));
++
++	if (changed & NETIF_F_HW_TC)
++		enetc_set_psfp(ndev, !!(features & NETIF_F_HW_TC));
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/net/ethernet/freescale/enetc/enetc.h b/drivers/net/ethernet/freescale/enetc/enetc.h
+index 56c43f35b633..2cfe877c3778 100644
+--- a/drivers/net/ethernet/freescale/enetc/enetc.h
++++ b/drivers/net/ethernet/freescale/enetc/enetc.h
+@@ -151,6 +151,7 @@ enum enetc_errata {
+ };
+ 
+ #define ENETC_SI_F_QBV BIT(0)
++#define ENETC_SI_F_PSFP BIT(1)
+ 
+ /* PCI IEP device data */
+ struct enetc_si {
+@@ -203,12 +204,20 @@ struct enetc_cls_rule {
+ };
+ 
+ #define ENETC_MAX_BDR_INT	2 /* fixed to max # of available cpus */
++struct psfp_cap {
++	u32 max_streamid;
++	u32 max_psfp_filter;
++	u32 max_psfp_gate;
++	u32 max_psfp_gatelist;
++	u32 max_psfp_meter;
++};
+ 
+ /* TODO: more hardware offloads */
+ enum enetc_active_offloads {
+ 	ENETC_F_RX_TSTAMP	= BIT(0),
+ 	ENETC_F_TX_TSTAMP	= BIT(1),
+ 	ENETC_F_QBV             = BIT(2),
++	ENETC_F_QCI		= BIT(3),
+ };
+ 
+ struct enetc_ndev_priv {
+@@ -231,6 +240,8 @@ struct enetc_ndev_priv {
+ 
+ 	struct enetc_cls_rule *cls_rules;
+ 
++	struct psfp_cap psfp_cap;
++
+ 	struct device_node *phy_node;
+ 	phy_interface_t if_mode;
+ };
+@@ -289,9 +300,46 @@ int enetc_setup_tc_taprio(struct net_device *ndev, void *type_data);
+ void enetc_sched_speed_set(struct net_device *ndev);
+ int enetc_setup_tc_cbs(struct net_device *ndev, void *type_data);
+ int enetc_setup_tc_txtime(struct net_device *ndev, void *type_data);
++
++static inline void enetc_get_max_cap(struct enetc_ndev_priv *priv)
++{
++	u32 reg;
++
++	reg = enetc_port_rd(&priv->si->hw, ENETC_PSIDCAPR);
++	priv->psfp_cap.max_streamid = reg & ENETC_PSIDCAPR_MSK;
++	/* Port stream filter capability */
++	reg = enetc_port_rd(&priv->si->hw, ENETC_PSFCAPR);
++	priv->psfp_cap.max_psfp_filter = reg & ENETC_PSFCAPR_MSK;
++	/* Port stream gate capability */
++	reg = enetc_port_rd(&priv->si->hw, ENETC_PSGCAPR);
++	priv->psfp_cap.max_psfp_gate = (reg & ENETC_PSGCAPR_SGIT_MSK);
++	priv->psfp_cap.max_psfp_gatelist = (reg & ENETC_PSGCAPR_GCL_MSK) >> 16;
++	/* Port flow meter capability */
++	reg = enetc_port_rd(&priv->si->hw, ENETC_PFMCAPR);
++	priv->psfp_cap.max_psfp_meter = reg & ENETC_PFMCAPR_MSK;
++}
++
++static inline void enetc_psfp_enable(struct enetc_hw *hw)
++{
++	enetc_wr(hw, ENETC_PPSFPMR, enetc_rd(hw, ENETC_PPSFPMR) |
++		 ENETC_PPSFPMR_PSFPEN | ENETC_PPSFPMR_VS |
++		 ENETC_PPSFPMR_PVC | ENETC_PPSFPMR_PVZC);
++}
++
++static inline void enetc_psfp_disable(struct enetc_hw *hw)
++{
++	enetc_wr(hw, ENETC_PPSFPMR, enetc_rd(hw, ENETC_PPSFPMR) &
++		 ~ENETC_PPSFPMR_PSFPEN & ~ENETC_PPSFPMR_VS &
++		 ~ENETC_PPSFPMR_PVC & ~ENETC_PPSFPMR_PVZC);
++}
+ #else
+ #define enetc_setup_tc_taprio(ndev, type_data) -EOPNOTSUPP
+ #define enetc_sched_speed_set(ndev) (void)0
+ #define enetc_setup_tc_cbs(ndev, type_data) -EOPNOTSUPP
+ #define enetc_setup_tc_txtime(ndev, type_data) -EOPNOTSUPP
++#define enetc_get_max_cap(p)		\
++	memset(&((p)->psfp_cap), 0, sizeof(struct psfp_cap))
++
++#define enetc_psfp_enable(hw) (void)0
++#define enetc_psfp_disable(hw) (void)0
+ #endif
+diff --git a/drivers/net/ethernet/freescale/enetc/enetc_hw.h b/drivers/net/ethernet/freescale/enetc/enetc_hw.h
+index 2a6523136947..02efda266c46 100644
+--- a/drivers/net/ethernet/freescale/enetc/enetc_hw.h
++++ b/drivers/net/ethernet/freescale/enetc/enetc_hw.h
+@@ -19,6 +19,7 @@
+ #define ENETC_SICTR1	0x1c
+ #define ENETC_SIPCAPR0	0x20
+ #define ENETC_SIPCAPR0_QBV	BIT(4)
++#define ENETC_SIPCAPR0_PSFP	BIT(9)
+ #define ENETC_SIPCAPR0_RSS	BIT(8)
+ #define ENETC_SIPCAPR1	0x24
+ #define ENETC_SITGTGR	0x30
+@@ -228,6 +229,15 @@ enum enetc_bdr_type {TX, RX};
+ #define ENETC_PM0_IFM_RLP	(BIT(5) | BIT(11))
+ #define ENETC_PM0_IFM_RGAUTO	(BIT(15) | ENETC_PMO_IFM_RG | BIT(1))
+ #define ENETC_PM0_IFM_XGMII	BIT(12)
++#define ENETC_PSIDCAPR		0x1b08
++#define ENETC_PSIDCAPR_MSK	GENMASK(15, 0)
++#define ENETC_PSFCAPR		0x1b18
++#define ENETC_PSFCAPR_MSK	GENMASK(15, 0)
++#define ENETC_PSGCAPR		0x1b28
++#define ENETC_PSGCAPR_GCL_MSK	GENMASK(18, 16)
++#define ENETC_PSGCAPR_SGIT_MSK	GENMASK(15, 0)
++#define ENETC_PFMCAPR		0x1b38
++#define ENETC_PFMCAPR_MSK	GENMASK(15, 0)
+ 
+ /* MAC counters */
+ #define ENETC_PM0_REOCT		0x8100
+@@ -521,22 +531,22 @@ struct enetc_msg_cmd_header {
+ 
+ /* Common H/W utility functions */
+ 
+-static inline void enetc_enable_rxvlan(struct enetc_hw *hw, int si_idx,
+-				       bool en)
++static inline void enetc_bdr_enable_rxvlan(struct enetc_hw *hw, int idx,
++					   bool en)
+ {
+-	u32 val = enetc_rxbdr_rd(hw, si_idx, ENETC_RBMR);
++	u32 val = enetc_rxbdr_rd(hw, idx, ENETC_RBMR);
+ 
+ 	val = (val & ~ENETC_RBMR_VTE) | (en ? ENETC_RBMR_VTE : 0);
+-	enetc_rxbdr_wr(hw, si_idx, ENETC_RBMR, val);
++	enetc_rxbdr_wr(hw, idx, ENETC_RBMR, val);
+ }
+ 
+-static inline void enetc_enable_txvlan(struct enetc_hw *hw, int si_idx,
+-				       bool en)
++static inline void enetc_bdr_enable_txvlan(struct enetc_hw *hw, int idx,
++					   bool en)
+ {
+-	u32 val = enetc_txbdr_rd(hw, si_idx, ENETC_TBMR);
++	u32 val = enetc_txbdr_rd(hw, idx, ENETC_TBMR);
+ 
+ 	val = (val & ~ENETC_TBMR_VIH) | (en ? ENETC_TBMR_VIH : 0);
+-	enetc_txbdr_wr(hw, si_idx, ENETC_TBMR, val);
++	enetc_txbdr_wr(hw, idx, ENETC_TBMR, val);
+ }
+ 
+ static inline void enetc_set_bdr_prio(struct enetc_hw *hw, int bdr_idx,
+@@ -621,3 +631,10 @@ struct enetc_cbd {
+ /* Port time specific departure */
+ #define ENETC_PTCTSDR(n)	(0x1210 + 4 * (n))
+ #define ENETC_TSDE		BIT(31)
++
++/* PSFP setting */
++#define ENETC_PPSFPMR 0x11b00
++#define ENETC_PPSFPMR_PSFPEN BIT(0)
++#define ENETC_PPSFPMR_VS BIT(1)
++#define ENETC_PPSFPMR_PVC BIT(2)
++#define ENETC_PPSFPMR_PVZC BIT(3)
+diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
+index 85e2b741df41..438648a06f2a 100644
+--- a/drivers/net/ethernet/freescale/enetc/enetc_pf.c
++++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
+@@ -667,15 +667,6 @@ static int enetc_pf_set_features(struct net_device *ndev,
+ 				 netdev_features_t features)
+ {
+ 	netdev_features_t changed = ndev->features ^ features;
+-	struct enetc_ndev_priv *priv = netdev_priv(ndev);
+-
+-	if (changed & NETIF_F_HW_VLAN_CTAG_RX)
+-		enetc_enable_rxvlan(&priv->si->hw, 0,
+-				    !!(features & NETIF_F_HW_VLAN_CTAG_RX));
+-
+-	if (changed & NETIF_F_HW_VLAN_CTAG_TX)
+-		enetc_enable_txvlan(&priv->si->hw, 0,
+-				    !!(features & NETIF_F_HW_VLAN_CTAG_TX));
+ 
+ 	if (changed & NETIF_F_LOOPBACK)
+ 		enetc_set_loopback(ndev, !!(features & NETIF_F_LOOPBACK));
+@@ -739,6 +730,14 @@ static void enetc_pf_netdev_setup(struct enetc_si *si, struct net_device *ndev,
+ 	if (si->hw_features & ENETC_SI_F_QBV)
+ 		priv->active_offloads |= ENETC_F_QBV;
+ 
++	if (si->hw_features & ENETC_SI_F_PSFP) {
++		priv->active_offloads |= ENETC_F_QCI;
++		ndev->features |= NETIF_F_HW_TC;
++		ndev->hw_features |= NETIF_F_HW_TC;
++		enetc_get_max_cap(priv);
++		enetc_psfp_enable(&si->hw);
++	}
++
+ 	/* pick up primary MAC address from SI */
+ 	enetc_get_primary_mac_addr(&si->hw, ndev->dev_addr);
+ }
+diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
+index 355be77f4241..3cf4dc3433f9 100644
+--- a/drivers/net/usb/smsc95xx.c
++++ b/drivers/net/usb/smsc95xx.c
+@@ -1324,7 +1324,7 @@ static void smsc95xx_unbind(struct usbnet *dev, struct usb_interface *intf)
+ 	struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
+ 
+ 	if (pdata) {
+-		cancel_delayed_work(&pdata->carrier_check);
++		cancel_delayed_work_sync(&pdata->carrier_check);
+ 		netif_dbg(dev, ifdown, dev->net, "free pdata\n");
+ 		kfree(pdata);
+ 		pdata = NULL;
+diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
+index 7b4cbe2c6954..71d63ed62071 100644
+--- a/drivers/nvme/host/core.c
++++ b/drivers/nvme/host/core.c
+@@ -1120,10 +1120,16 @@ static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl, unsigned nsid,
+ 		dev_warn(ctrl->device,
+ 			"Identify Descriptors failed (%d)\n", status);
+ 		 /*
+-		  * Don't treat an error as fatal, as we potentially already
+-		  * have a NGUID or EUI-64.
++		  * Don't treat non-retryable errors as fatal, as we potentially
++		  * already have a NGUID or EUI-64.  If we failed with DNR set,
++		  * we want to silently ignore the error as we can still
++		  * identify the device, but if the status has DNR set, we want
++		  * to propagate the error back specifically for the disk
++		  * revalidation flow to make sure we don't abandon the
++		  * device just because of a temporal retry-able error (such
++		  * as path of transport errors).
+ 		  */
+-		if (status > 0 && !(status & NVME_SC_DNR))
++		if (status > 0 && (status & NVME_SC_DNR))
+ 			status = 0;
+ 		goto free_data;
+ 	}
+@@ -1910,14 +1916,6 @@ static void __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id)
+ 	if (ns->head->disk) {
+ 		nvme_update_disk_info(ns->head->disk, ns, id);
+ 		blk_queue_stack_limits(ns->head->disk->queue, ns->queue);
+-		if (bdi_cap_stable_pages_required(ns->queue->backing_dev_info)) {
+-			struct backing_dev_info *info =
+-				ns->head->disk->queue->backing_dev_info;
+-
+-                        info->capabilities |= BDI_CAP_STABLE_WRITES;
+-		}
+-
+-		revalidate_disk(ns->head->disk);
+ 	}
+ #endif
+ }
+diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
+index 17f172cf456a..36db7d2e6a89 100644
+--- a/drivers/nvme/host/multipath.c
++++ b/drivers/nvme/host/multipath.c
+@@ -3,6 +3,7 @@
+  * Copyright (c) 2017-2018 Christoph Hellwig.
+  */
+ 
++#include <linux/backing-dev.h>
+ #include <linux/moduleparam.h>
+ #include <trace/events/block.h>
+ #include "nvme.h"
+@@ -412,11 +413,11 @@ static void nvme_mpath_set_live(struct nvme_ns *ns)
+ 	if (!head->disk)
+ 		return;
+ 
+-	mutex_lock(&head->lock);
+-	if (!(head->disk->flags & GENHD_FL_UP))
++	if (!test_and_set_bit(NVME_NSHEAD_DISK_LIVE, &head->flags))
+ 		device_add_disk(&head->subsys->dev, head->disk,
+ 				nvme_ns_id_attr_groups);
+ 
++	mutex_lock(&head->lock);
+ 	if (nvme_path_is_optimized(ns)) {
+ 		int node, srcu_idx;
+ 
+@@ -638,30 +639,46 @@ static ssize_t ana_state_show(struct device *dev, struct device_attribute *attr,
+ }
+ DEVICE_ATTR_RO(ana_state);
+ 
+-static int nvme_set_ns_ana_state(struct nvme_ctrl *ctrl,
++static int nvme_lookup_ana_group_desc(struct nvme_ctrl *ctrl,
+ 		struct nvme_ana_group_desc *desc, void *data)
+ {
+-	struct nvme_ns *ns = data;
++	struct nvme_ana_group_desc *dst = data;
+ 
+-	if (ns->ana_grpid == le32_to_cpu(desc->grpid)) {
+-		nvme_update_ns_ana_state(desc, ns);
+-		return -ENXIO; /* just break out of the loop */
+-	}
++	if (desc->grpid != dst->grpid)
++		return 0;
+ 
+-	return 0;
++	*dst = *desc;
++	return -ENXIO; /* just break out of the loop */
+ }
+ 
+ void nvme_mpath_add_disk(struct nvme_ns *ns, struct nvme_id_ns *id)
+ {
+ 	if (nvme_ctrl_use_ana(ns->ctrl)) {
++		struct nvme_ana_group_desc desc = {
++			.grpid = id->anagrpid,
++			.state = 0,
++		};
++
+ 		mutex_lock(&ns->ctrl->ana_lock);
+ 		ns->ana_grpid = le32_to_cpu(id->anagrpid);
+-		nvme_parse_ana_log(ns->ctrl, ns, nvme_set_ns_ana_state);
++		nvme_parse_ana_log(ns->ctrl, &desc, nvme_lookup_ana_group_desc);
+ 		mutex_unlock(&ns->ctrl->ana_lock);
++		if (desc.state) {
++			/* found the group desc: update */
++			nvme_update_ns_ana_state(&desc, ns);
++		}
+ 	} else {
+ 		ns->ana_state = NVME_ANA_OPTIMIZED; 
+ 		nvme_mpath_set_live(ns);
+ 	}
++
++	if (bdi_cap_stable_pages_required(ns->queue->backing_dev_info)) {
++		struct gendisk *disk = ns->head->disk;
++
++		if (disk)
++			disk->queue->backing_dev_info->capabilities |=
++					BDI_CAP_STABLE_WRITES;
++	}
+ }
+ 
+ void nvme_mpath_remove_disk(struct nvme_ns_head *head)
+@@ -675,6 +692,14 @@ void nvme_mpath_remove_disk(struct nvme_ns_head *head)
+ 	kblockd_schedule_work(&head->requeue_work);
+ 	flush_work(&head->requeue_work);
+ 	blk_cleanup_queue(head->disk->queue);
++	if (!test_bit(NVME_NSHEAD_DISK_LIVE, &head->flags)) {
++		/*
++		 * if device_add_disk wasn't called, prevent
++		 * disk release to put a bogus reference on the
++		 * request queue
++		 */
++		head->disk->queue = NULL;
++	}
+ 	put_disk(head->disk);
+ }
+ 
+diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
+index 2e04a36296d9..719342600be6 100644
+--- a/drivers/nvme/host/nvme.h
++++ b/drivers/nvme/host/nvme.h
+@@ -359,6 +359,8 @@ struct nvme_ns_head {
+ 	spinlock_t		requeue_lock;
+ 	struct work_struct	requeue_work;
+ 	struct mutex		lock;
++	unsigned long		flags;
++#define NVME_NSHEAD_DISK_LIVE	0
+ 	struct nvme_ns __rcu	*current_path[];
+ #endif
+ };
+diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
+index caa6b840e459..cfbb4294fb8b 100644
+--- a/drivers/scsi/qla2xxx/qla_init.c
++++ b/drivers/scsi/qla2xxx/qla_init.c
+@@ -5933,7 +5933,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha)
+ 			break;
+ 		}
+ 
+-		if (NVME_TARGET(vha->hw, fcport)) {
++		if (found && NVME_TARGET(vha->hw, fcport)) {
+ 			if (fcport->disc_state == DSC_DELETE_PEND) {
+ 				qla2x00_set_fcport_disc_state(fcport, DSC_GNL);
+ 				vha->fcport_count--;
+diff --git a/drivers/soc/ti/omap_prm.c b/drivers/soc/ti/omap_prm.c
+index 96c6f777519c..c9b3f9ebf0bb 100644
+--- a/drivers/soc/ti/omap_prm.c
++++ b/drivers/soc/ti/omap_prm.c
+@@ -256,10 +256,10 @@ static int omap_reset_deassert(struct reset_controller_dev *rcdev,
+ 		goto exit;
+ 
+ 	/* wait for the status to be set */
+-	ret = readl_relaxed_poll_timeout(reset->prm->base +
+-					 reset->prm->data->rstst,
+-					 v, v & BIT(st_bit), 1,
+-					 OMAP_RESET_MAX_WAIT);
++	ret = readl_relaxed_poll_timeout_atomic(reset->prm->base +
++						 reset->prm->data->rstst,
++						 v, v & BIT(st_bit), 1,
++						 OMAP_RESET_MAX_WAIT);
+ 	if (ret)
+ 		pr_err("%s: timedout waiting for %s:%lu\n", __func__,
+ 		       reset->prm->data->name, id);
+diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c
+index 88176eaca448..856a4a0edcc7 100644
+--- a/drivers/spi/spi-fsl-dspi.c
++++ b/drivers/spi/spi-fsl-dspi.c
+@@ -1105,6 +1105,8 @@ static int dspi_suspend(struct device *dev)
+ 	struct spi_controller *ctlr = dev_get_drvdata(dev);
+ 	struct fsl_dspi *dspi = spi_controller_get_devdata(ctlr);
+ 
++	if (dspi->irq)
++		disable_irq(dspi->irq);
+ 	spi_controller_suspend(ctlr);
+ 	clk_disable_unprepare(dspi->clk);
+ 
+@@ -1125,6 +1127,8 @@ static int dspi_resume(struct device *dev)
+ 	if (ret)
+ 		return ret;
+ 	spi_controller_resume(ctlr);
++	if (dspi->irq)
++		enable_irq(dspi->irq);
+ 
+ 	return 0;
+ }
+@@ -1381,8 +1385,8 @@ static int dspi_probe(struct platform_device *pdev)
+ 		goto poll_mode;
+ 	}
+ 
+-	ret = devm_request_irq(&pdev->dev, dspi->irq, dspi_interrupt,
+-			       IRQF_SHARED, pdev->name, dspi);
++	ret = request_threaded_irq(dspi->irq, dspi_interrupt, NULL,
++				   IRQF_SHARED, pdev->name, dspi);
+ 	if (ret < 0) {
+ 		dev_err(&pdev->dev, "Unable to attach DSPI interrupt\n");
+ 		goto out_clk_put;
+@@ -1396,7 +1400,7 @@ poll_mode:
+ 		ret = dspi_request_dma(dspi, res->start);
+ 		if (ret < 0) {
+ 			dev_err(&pdev->dev, "can't get dma channels\n");
+-			goto out_clk_put;
++			goto out_free_irq;
+ 		}
+ 	}
+ 
+@@ -1411,11 +1415,14 @@ poll_mode:
+ 	ret = spi_register_controller(ctlr);
+ 	if (ret != 0) {
+ 		dev_err(&pdev->dev, "Problem registering DSPI ctlr\n");
+-		goto out_clk_put;
++		goto out_free_irq;
+ 	}
+ 
+ 	return ret;
+ 
++out_free_irq:
++	if (dspi->irq)
++		free_irq(dspi->irq, dspi);
+ out_clk_put:
+ 	clk_disable_unprepare(dspi->clk);
+ out_ctlr_put:
+@@ -1431,6 +1438,8 @@ static int dspi_remove(struct platform_device *pdev)
+ 
+ 	/* Disconnect from the SPI framework */
+ 	dspi_release_dma(dspi);
++	if (dspi->irq)
++		free_irq(dspi->irq, dspi);
+ 	clk_disable_unprepare(dspi->clk);
+ 	spi_unregister_controller(dspi->ctlr);
+ 
+diff --git a/drivers/thermal/cpufreq_cooling.c b/drivers/thermal/cpufreq_cooling.c
+index e297e135c031..bdc887cb4b63 100644
+--- a/drivers/thermal/cpufreq_cooling.c
++++ b/drivers/thermal/cpufreq_cooling.c
+@@ -123,12 +123,12 @@ static u32 cpu_power_to_freq(struct cpufreq_cooling_device *cpufreq_cdev,
+ {
+ 	int i;
+ 
+-	for (i = cpufreq_cdev->max_level - 1; i >= 0; i--) {
+-		if (power > cpufreq_cdev->em->table[i].power)
++	for (i = cpufreq_cdev->max_level; i >= 0; i--) {
++		if (power >= cpufreq_cdev->em->table[i].power)
+ 			break;
+ 	}
+ 
+-	return cpufreq_cdev->em->table[i + 1].frequency;
++	return cpufreq_cdev->em->table[i].frequency;
+ }
+ 
+ /**
+diff --git a/drivers/thermal/mtk_thermal.c b/drivers/thermal/mtk_thermal.c
+index 76e30603d4d5..6b7ef1993d7e 100644
+--- a/drivers/thermal/mtk_thermal.c
++++ b/drivers/thermal/mtk_thermal.c
+@@ -211,6 +211,9 @@ enum {
+ /* The total number of temperature sensors in the MT8183 */
+ #define MT8183_NUM_SENSORS	6
+ 
++/* The number of banks in the MT8183 */
++#define MT8183_NUM_ZONES               1
++
+ /* The number of sensing points per bank */
+ #define MT8183_NUM_SENSORS_PER_ZONE	 6
+ 
+@@ -497,7 +500,7 @@ static const struct mtk_thermal_data mt7622_thermal_data = {
+  */
+ static const struct mtk_thermal_data mt8183_thermal_data = {
+ 	.auxadc_channel = MT8183_TEMP_AUXADC_CHANNEL,
+-	.num_banks = MT8183_NUM_SENSORS_PER_ZONE,
++	.num_banks = MT8183_NUM_ZONES,
+ 	.num_sensors = MT8183_NUM_SENSORS,
+ 	.vts_index = mt8183_vts_index,
+ 	.cali_val = MT8183_CALIBRATION,
+diff --git a/drivers/thermal/rcar_gen3_thermal.c b/drivers/thermal/rcar_gen3_thermal.c
+index 58fe7c1ef00b..c48c5e9b8f20 100644
+--- a/drivers/thermal/rcar_gen3_thermal.c
++++ b/drivers/thermal/rcar_gen3_thermal.c
+@@ -167,7 +167,7 @@ static int rcar_gen3_thermal_get_temp(void *devdata, int *temp)
+ {
+ 	struct rcar_gen3_thermal_tsc *tsc = devdata;
+ 	int mcelsius, val;
+-	u32 reg;
++	int reg;
+ 
+ 	/* Read register and convert to mili Celsius */
+ 	reg = rcar_gen3_thermal_read(tsc, REG_GEN3_TEMP) & CTEMP_MASK;
+diff --git a/drivers/thermal/sprd_thermal.c b/drivers/thermal/sprd_thermal.c
+index a340374e8c51..4cde70dcf655 100644
+--- a/drivers/thermal/sprd_thermal.c
++++ b/drivers/thermal/sprd_thermal.c
+@@ -348,8 +348,8 @@ static int sprd_thm_probe(struct platform_device *pdev)
+ 
+ 	thm->var_data = pdata;
+ 	thm->base = devm_platform_ioremap_resource(pdev, 0);
+-	if (!thm->base)
+-		return -ENOMEM;
++	if (IS_ERR(thm->base))
++		return PTR_ERR(thm->base);
+ 
+ 	thm->nr_sensors = of_get_child_count(np);
+ 	if (thm->nr_sensors == 0 || thm->nr_sensors > SPRD_THM_MAX_SENSOR) {
+diff --git a/drivers/usb/misc/usbtest.c b/drivers/usb/misc/usbtest.c
+index 98ada1a3425c..bae88893ee8e 100644
+--- a/drivers/usb/misc/usbtest.c
++++ b/drivers/usb/misc/usbtest.c
+@@ -2873,6 +2873,7 @@ static void usbtest_disconnect(struct usb_interface *intf)
+ 
+ 	usb_set_intfdata(intf, NULL);
+ 	dev_dbg(&intf->dev, "disconnect\n");
++	kfree(dev->buf);
+ 	kfree(dev);
+ }
+ 
+diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c
+index 0c17f18b4794..1b1c86953008 100644
+--- a/fs/btrfs/block-group.c
++++ b/fs/btrfs/block-group.c
+@@ -863,11 +863,34 @@ static void clear_incompat_bg_bits(struct btrfs_fs_info *fs_info, u64 flags)
+ 	}
+ }
+ 
++static int remove_block_group_item(struct btrfs_trans_handle *trans,
++				   struct btrfs_path *path,
++				   struct btrfs_block_group *block_group)
++{
++	struct btrfs_fs_info *fs_info = trans->fs_info;
++	struct btrfs_root *root;
++	struct btrfs_key key;
++	int ret;
++
++	root = fs_info->extent_root;
++	key.objectid = block_group->start;
++	key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
++	key.offset = block_group->length;
++
++	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
++	if (ret > 0)
++		ret = -ENOENT;
++	if (ret < 0)
++		return ret;
++
++	ret = btrfs_del_item(trans, root, path);
++	return ret;
++}
++
+ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
+ 			     u64 group_start, struct extent_map *em)
+ {
+ 	struct btrfs_fs_info *fs_info = trans->fs_info;
+-	struct btrfs_root *root = fs_info->extent_root;
+ 	struct btrfs_path *path;
+ 	struct btrfs_block_group *block_group;
+ 	struct btrfs_free_cluster *cluster;
+@@ -1068,9 +1091,24 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
+ 
+ 	spin_unlock(&block_group->space_info->lock);
+ 
+-	key.objectid = block_group->start;
+-	key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
+-	key.offset = block_group->length;
++	/*
++	 * Remove the free space for the block group from the free space tree
++	 * and the block group's item from the extent tree before marking the
++	 * block group as removed. This is to prevent races with tasks that
++	 * freeze and unfreeze a block group, this task and another task
++	 * allocating a new block group - the unfreeze task ends up removing
++	 * the block group's extent map before the task calling this function
++	 * deletes the block group item from the extent tree, allowing for
++	 * another task to attempt to create another block group with the same
++	 * item key (and failing with -EEXIST and a transaction abort).
++	 */
++	ret = remove_block_group_free_space(trans, block_group);
++	if (ret)
++		goto out;
++
++	ret = remove_block_group_item(trans, path, block_group);
++	if (ret < 0)
++		goto out;
+ 
+ 	mutex_lock(&fs_info->chunk_mutex);
+ 	spin_lock(&block_group->lock);
+@@ -1103,20 +1141,6 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
+ 
+ 	mutex_unlock(&fs_info->chunk_mutex);
+ 
+-	ret = remove_block_group_free_space(trans, block_group);
+-	if (ret)
+-		goto out;
+-
+-	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
+-	if (ret > 0)
+-		ret = -EIO;
+-	if (ret < 0)
+-		goto out;
+-
+-	ret = btrfs_del_item(trans, root, path);
+-	if (ret)
+-		goto out;
+-
+ 	if (remove_em) {
+ 		struct extent_map_tree *em_tree;
+ 
+diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
+index 52d565ff66e2..93244934d4f9 100644
+--- a/fs/btrfs/file.c
++++ b/fs/btrfs/file.c
+@@ -1541,7 +1541,7 @@ lock_and_cleanup_extent_if_need(struct btrfs_inode *inode, struct page **pages,
+ }
+ 
+ static noinline int check_can_nocow(struct btrfs_inode *inode, loff_t pos,
+-				    size_t *write_bytes)
++				    size_t *write_bytes, bool nowait)
+ {
+ 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
+ 	struct btrfs_root *root = inode->root;
+@@ -1549,27 +1549,43 @@ static noinline int check_can_nocow(struct btrfs_inode *inode, loff_t pos,
+ 	u64 num_bytes;
+ 	int ret;
+ 
+-	if (!btrfs_drew_try_write_lock(&root->snapshot_lock))
++	if (!nowait && !btrfs_drew_try_write_lock(&root->snapshot_lock))
+ 		return -EAGAIN;
+ 
+ 	lockstart = round_down(pos, fs_info->sectorsize);
+ 	lockend = round_up(pos + *write_bytes,
+ 			   fs_info->sectorsize) - 1;
++	num_bytes = lockend - lockstart + 1;
+ 
+-	btrfs_lock_and_flush_ordered_range(inode, lockstart,
+-					   lockend, NULL);
++	if (nowait) {
++		struct btrfs_ordered_extent *ordered;
++
++		if (!try_lock_extent(&inode->io_tree, lockstart, lockend))
++			return -EAGAIN;
++
++		ordered = btrfs_lookup_ordered_range(inode, lockstart,
++						     num_bytes);
++		if (ordered) {
++			btrfs_put_ordered_extent(ordered);
++			ret = -EAGAIN;
++			goto out_unlock;
++		}
++	} else {
++		btrfs_lock_and_flush_ordered_range(inode, lockstart,
++						   lockend, NULL);
++	}
+ 
+-	num_bytes = lockend - lockstart + 1;
+ 	ret = can_nocow_extent(&inode->vfs_inode, lockstart, &num_bytes,
+ 			NULL, NULL, NULL);
+ 	if (ret <= 0) {
+ 		ret = 0;
+-		btrfs_drew_write_unlock(&root->snapshot_lock);
++		if (!nowait)
++			btrfs_drew_write_unlock(&root->snapshot_lock);
+ 	} else {
+ 		*write_bytes = min_t(size_t, *write_bytes ,
+ 				     num_bytes - pos + lockstart);
+ 	}
+-
++out_unlock:
+ 	unlock_extent(&inode->io_tree, lockstart, lockend);
+ 
+ 	return ret;
+@@ -1641,7 +1657,7 @@ static noinline ssize_t btrfs_buffered_write(struct kiocb *iocb,
+ 			if ((BTRFS_I(inode)->flags & (BTRFS_INODE_NODATACOW |
+ 						      BTRFS_INODE_PREALLOC)) &&
+ 			    check_can_nocow(BTRFS_I(inode), pos,
+-					&write_bytes) > 0) {
++					    &write_bytes, false) > 0) {
+ 				/*
+ 				 * For nodata cow case, no need to reserve
+ 				 * data space.
+@@ -1920,12 +1936,11 @@ static ssize_t btrfs_file_write_iter(struct kiocb *iocb,
+ 		 */
+ 		if (!(BTRFS_I(inode)->flags & (BTRFS_INODE_NODATACOW |
+ 					      BTRFS_INODE_PREALLOC)) ||
+-		    check_can_nocow(BTRFS_I(inode), pos, &nocow_bytes) <= 0) {
++		    check_can_nocow(BTRFS_I(inode), pos, &nocow_bytes,
++				    true) <= 0) {
+ 			inode_unlock(inode);
+ 			return -EAGAIN;
+ 		}
+-		/* check_can_nocow() locks the snapshot lock on success */
+-		btrfs_drew_write_unlock(&root->snapshot_lock);
+ 		/*
+ 		 * There are holes in the range or parts of the range that must
+ 		 * be COWed (shared extents, RO block groups, etc), so just bail
+diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
+index 47b9fbb70bf5..bda8615f8c33 100644
+--- a/fs/cifs/connect.c
++++ b/fs/cifs/connect.c
+@@ -5306,9 +5306,15 @@ cifs_construct_tcon(struct cifs_sb_info *cifs_sb, kuid_t fsuid)
+ 	vol_info->nocase = master_tcon->nocase;
+ 	vol_info->nohandlecache = master_tcon->nohandlecache;
+ 	vol_info->local_lease = master_tcon->local_lease;
++	vol_info->no_lease = master_tcon->no_lease;
++	vol_info->resilient = master_tcon->use_resilient;
++	vol_info->persistent = master_tcon->use_persistent;
++	vol_info->handle_timeout = master_tcon->handle_timeout;
+ 	vol_info->no_linux_ext = !master_tcon->unix_ext;
++	vol_info->linux_ext = master_tcon->posix_extensions;
+ 	vol_info->sectype = master_tcon->ses->sectype;
+ 	vol_info->sign = master_tcon->ses->sign;
++	vol_info->seal = master_tcon->seal;
+ 
+ 	rc = cifs_set_vol_auth(vol_info, master_tcon->ses);
+ 	if (rc) {
+@@ -5334,10 +5340,6 @@ cifs_construct_tcon(struct cifs_sb_info *cifs_sb, kuid_t fsuid)
+ 		goto out;
+ 	}
+ 
+-	/* if new SMB3.11 POSIX extensions are supported do not remap / and \ */
+-	if (tcon->posix_extensions)
+-		cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_POSIX_PATHS;
+-
+ 	if (cap_unix(ses))
+ 		reset_cifs_unix_caps(0, tcon, NULL, vol_info);
+ 
+diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
+index 5d2965a23730..430b0b125654 100644
+--- a/fs/cifs/inode.c
++++ b/fs/cifs/inode.c
+@@ -1855,6 +1855,7 @@ cifs_rename2(struct inode *source_dir, struct dentry *source_dentry,
+ 	FILE_UNIX_BASIC_INFO *info_buf_target;
+ 	unsigned int xid;
+ 	int rc, tmprc;
++	bool new_target = d_really_is_negative(target_dentry);
+ 
+ 	if (flags & ~RENAME_NOREPLACE)
+ 		return -EINVAL;
+@@ -1931,8 +1932,13 @@ cifs_rename2(struct inode *source_dir, struct dentry *source_dentry,
+ 	 */
+ 
+ unlink_target:
+-	/* Try unlinking the target dentry if it's not negative */
+-	if (d_really_is_positive(target_dentry) && (rc == -EACCES || rc == -EEXIST)) {
++	/*
++	 * If the target dentry was created during the rename, try
++	 * unlinking it if it's not negative
++	 */
++	if (new_target &&
++	    d_really_is_positive(target_dentry) &&
++	    (rc == -EACCES || rc == -EEXIST)) {
+ 		if (d_is_dir(target_dentry))
+ 			tmprc = cifs_rmdir(target_dir, target_dentry);
+ 		else
+diff --git a/fs/exfat/dir.c b/fs/exfat/dir.c
+index 4b91afb0f051..6db302d76d4c 100644
+--- a/fs/exfat/dir.c
++++ b/fs/exfat/dir.c
+@@ -314,7 +314,7 @@ const struct file_operations exfat_dir_operations = {
+ 	.llseek		= generic_file_llseek,
+ 	.read		= generic_read_dir,
+ 	.iterate	= exfat_iterate,
+-	.fsync		= generic_file_fsync,
++	.fsync		= exfat_file_fsync,
+ };
+ 
+ int exfat_alloc_new_dir(struct inode *inode, struct exfat_chain *clu)
+@@ -430,10 +430,12 @@ static void exfat_init_name_entry(struct exfat_dentry *ep,
+ 	ep->dentry.name.flags = 0x0;
+ 
+ 	for (i = 0; i < EXFAT_FILE_NAME_LEN; i++) {
+-		ep->dentry.name.unicode_0_14[i] = cpu_to_le16(*uniname);
+-		if (*uniname == 0x0)
+-			break;
+-		uniname++;
++		if (*uniname != 0x0) {
++			ep->dentry.name.unicode_0_14[i] = cpu_to_le16(*uniname);
++			uniname++;
++		} else {
++			ep->dentry.name.unicode_0_14[i] = 0x0;
++		}
+ 	}
+ }
+ 
+diff --git a/fs/exfat/exfat_fs.h b/fs/exfat/exfat_fs.h
+index d67fb8a6f770..d865050fa6cd 100644
+--- a/fs/exfat/exfat_fs.h
++++ b/fs/exfat/exfat_fs.h
+@@ -424,6 +424,7 @@ void exfat_truncate(struct inode *inode, loff_t size);
+ int exfat_setattr(struct dentry *dentry, struct iattr *attr);
+ int exfat_getattr(const struct path *path, struct kstat *stat,
+ 		unsigned int request_mask, unsigned int query_flags);
++int exfat_file_fsync(struct file *file, loff_t start, loff_t end, int datasync);
+ 
+ /* namei.c */
+ extern const struct dentry_operations exfat_dentry_ops;
+diff --git a/fs/exfat/file.c b/fs/exfat/file.c
+index 5b4ddff18731..b93aa9e6cb16 100644
+--- a/fs/exfat/file.c
++++ b/fs/exfat/file.c
+@@ -6,6 +6,7 @@
+ #include <linux/slab.h>
+ #include <linux/cred.h>
+ #include <linux/buffer_head.h>
++#include <linux/blkdev.h>
+ 
+ #include "exfat_raw.h"
+ #include "exfat_fs.h"
+@@ -347,12 +348,28 @@ out:
+ 	return error;
+ }
+ 
++int exfat_file_fsync(struct file *filp, loff_t start, loff_t end, int datasync)
++{
++	struct inode *inode = filp->f_mapping->host;
++	int err;
++
++	err = __generic_file_fsync(filp, start, end, datasync);
++	if (err)
++		return err;
++
++	err = sync_blockdev(inode->i_sb->s_bdev);
++	if (err)
++		return err;
++
++	return blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL);
++}
++
+ const struct file_operations exfat_file_operations = {
+ 	.llseek		= generic_file_llseek,
+ 	.read_iter	= generic_file_read_iter,
+ 	.write_iter	= generic_file_write_iter,
+ 	.mmap		= generic_file_mmap,
+-	.fsync		= generic_file_fsync,
++	.fsync		= exfat_file_fsync,
+ 	.splice_read	= generic_file_splice_read,
+ 	.splice_write	= iter_file_splice_write,
+ };
+diff --git a/fs/exfat/namei.c b/fs/exfat/namei.c
+index a2659a8a68a1..2c9c78317721 100644
+--- a/fs/exfat/namei.c
++++ b/fs/exfat/namei.c
+@@ -984,7 +984,6 @@ static int exfat_rmdir(struct inode *dir, struct dentry *dentry)
+ 		goto unlock;
+ 	}
+ 
+-	exfat_set_vol_flags(sb, VOL_DIRTY);
+ 	exfat_chain_set(&clu_to_free, ei->start_clu,
+ 		EXFAT_B_TO_CLU_ROUND_UP(i_size_read(inode), sbi), ei->flags);
+ 
+@@ -1012,6 +1011,7 @@ static int exfat_rmdir(struct inode *dir, struct dentry *dentry)
+ 	num_entries++;
+ 	brelse(bh);
+ 
++	exfat_set_vol_flags(sb, VOL_DIRTY);
+ 	err = exfat_remove_entries(dir, &cdir, entry, 0, num_entries);
+ 	if (err) {
+ 		exfat_msg(sb, KERN_ERR,
+@@ -1089,10 +1089,14 @@ static int exfat_rename_file(struct inode *inode, struct exfat_chain *p_dir,
+ 
+ 		epold = exfat_get_dentry(sb, p_dir, oldentry + 1, &old_bh,
+ 			&sector_old);
++		if (!epold)
++			return -EIO;
+ 		epnew = exfat_get_dentry(sb, p_dir, newentry + 1, &new_bh,
+ 			&sector_new);
+-		if (!epold || !epnew)
++		if (!epnew) {
++			brelse(old_bh);
+ 			return -EIO;
++		}
+ 
+ 		memcpy(epnew, epold, DENTRY_SIZE);
+ 		exfat_update_bh(sb, new_bh, sync);
+@@ -1173,10 +1177,14 @@ static int exfat_move_file(struct inode *inode, struct exfat_chain *p_olddir,
+ 
+ 	epmov = exfat_get_dentry(sb, p_olddir, oldentry + 1, &mov_bh,
+ 		&sector_mov);
++	if (!epmov)
++		return -EIO;
+ 	epnew = exfat_get_dentry(sb, p_newdir, newentry + 1, &new_bh,
+ 		&sector_new);
+-	if (!epmov || !epnew)
++	if (!epnew) {
++		brelse(mov_bh);
+ 		return -EIO;
++	}
+ 
+ 	memcpy(epnew, epmov, DENTRY_SIZE);
+ 	exfat_update_bh(sb, new_bh, IS_DIRSYNC(inode));
+diff --git a/fs/exfat/super.c b/fs/exfat/super.c
+index c1b1ed306a48..e87980153398 100644
+--- a/fs/exfat/super.c
++++ b/fs/exfat/super.c
+@@ -637,10 +637,20 @@ static void exfat_free(struct fs_context *fc)
+ 	}
+ }
+ 
++static int exfat_reconfigure(struct fs_context *fc)
++{
++	fc->sb_flags |= SB_NODIRATIME;
++
++	/* volume flag will be updated in exfat_sync_fs */
++	sync_filesystem(fc->root->d_sb);
++	return 0;
++}
++
+ static const struct fs_context_operations exfat_context_ops = {
+ 	.parse_param	= exfat_parse_param,
+ 	.get_tree	= exfat_get_tree,
+ 	.free		= exfat_free,
++	.reconfigure	= exfat_reconfigure,
+ };
+ 
+ static int exfat_init_fs_context(struct fs_context *fc)
+diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c
+index b7a5221bea7d..04882712cd66 100644
+--- a/fs/gfs2/log.c
++++ b/fs/gfs2/log.c
+@@ -987,6 +987,16 @@ void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl, u32 flags)
+ 
+ out:
+ 	if (gfs2_withdrawn(sdp)) {
++		/**
++		 * If the tr_list is empty, we're withdrawing during a log
++		 * flush that targets a transaction, but the transaction was
++		 * never queued onto any of the ail lists. Here we add it to
++		 * ail1 just so that ail_drain() will find and free it.
++		 */
++		spin_lock(&sdp->sd_ail_lock);
++		if (tr && list_empty(&tr->tr_list))
++			list_add(&tr->tr_list, &sdp->sd_ail1_list);
++		spin_unlock(&sdp->sd_ail_lock);
+ 		ail_drain(sdp); /* frees all transactions */
+ 		tr = NULL;
+ 	}
+diff --git a/fs/io_uring.c b/fs/io_uring.c
+index 4ab1728de247..2be6ea010340 100644
+--- a/fs/io_uring.c
++++ b/fs/io_uring.c
+@@ -858,6 +858,7 @@ static int __io_sqe_files_update(struct io_ring_ctx *ctx,
+ 				 struct io_uring_files_update *ip,
+ 				 unsigned nr_args);
+ static int io_grab_files(struct io_kiocb *req);
++static void io_complete_rw_common(struct kiocb *kiocb, long res);
+ static void io_cleanup_req(struct io_kiocb *req);
+ static int io_file_get(struct io_submit_state *state, struct io_kiocb *req,
+ 		       int fd, struct file **out_file, bool fixed);
+@@ -1697,6 +1698,14 @@ static void io_iopoll_queue(struct list_head *again)
+ 	do {
+ 		req = list_first_entry(again, struct io_kiocb, list);
+ 		list_del(&req->list);
++
++		/* shouldn't happen unless io_uring is dying, cancel reqs */
++		if (unlikely(!current->mm)) {
++			io_complete_rw_common(&req->rw.kiocb, -EAGAIN);
++			io_put_req(req);
++			continue;
++		}
++
+ 		refcount_inc(&req->refs);
+ 		io_queue_async_work(req);
+ 	} while (!list_empty(again));
+@@ -2748,6 +2757,8 @@ static int io_splice_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+ 
+ 	if (req->flags & REQ_F_NEED_CLEANUP)
+ 		return 0;
++	if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
++		return -EINVAL;
+ 
+ 	sp->file_in = NULL;
+ 	sp->off_in = READ_ONCE(sqe->splice_off_in);
+@@ -2910,6 +2921,8 @@ static int io_fallocate_prep(struct io_kiocb *req,
+ {
+ 	if (sqe->ioprio || sqe->buf_index || sqe->rw_flags)
+ 		return -EINVAL;
++	if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
++		return -EINVAL;
+ 
+ 	req->sync.off = READ_ONCE(sqe->off);
+ 	req->sync.len = READ_ONCE(sqe->addr);
+@@ -2935,6 +2948,8 @@ static int io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+ 	const char __user *fname;
+ 	int ret;
+ 
++	if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL)))
++		return -EINVAL;
+ 	if (sqe->ioprio || sqe->buf_index)
+ 		return -EINVAL;
+ 	if (req->flags & REQ_F_FIXED_FILE)
+@@ -2968,6 +2983,8 @@ static int io_openat2_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+ 	size_t len;
+ 	int ret;
+ 
++	if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL)))
++		return -EINVAL;
+ 	if (sqe->ioprio || sqe->buf_index)
+ 		return -EINVAL;
+ 	if (req->flags & REQ_F_FIXED_FILE)
+@@ -3207,6 +3224,8 @@ static int io_epoll_ctl_prep(struct io_kiocb *req,
+ #if defined(CONFIG_EPOLL)
+ 	if (sqe->ioprio || sqe->buf_index)
+ 		return -EINVAL;
++	if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
++		return -EINVAL;
+ 
+ 	req->epoll.epfd = READ_ONCE(sqe->fd);
+ 	req->epoll.op = READ_ONCE(sqe->len);
+@@ -3251,6 +3270,8 @@ static int io_madvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+ #if defined(CONFIG_ADVISE_SYSCALLS) && defined(CONFIG_MMU)
+ 	if (sqe->ioprio || sqe->buf_index || sqe->off)
+ 		return -EINVAL;
++	if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
++		return -EINVAL;
+ 
+ 	req->madvise.addr = READ_ONCE(sqe->addr);
+ 	req->madvise.len = READ_ONCE(sqe->len);
+@@ -3285,6 +3306,8 @@ static int io_fadvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+ {
+ 	if (sqe->ioprio || sqe->buf_index || sqe->addr)
+ 		return -EINVAL;
++	if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
++		return -EINVAL;
+ 
+ 	req->fadvise.offset = READ_ONCE(sqe->off);
+ 	req->fadvise.len = READ_ONCE(sqe->len);
+@@ -3322,6 +3345,8 @@ static int io_statx_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+ 	unsigned lookup_flags;
+ 	int ret;
+ 
++	if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
++		return -EINVAL;
+ 	if (sqe->ioprio || sqe->buf_index)
+ 		return -EINVAL;
+ 	if (req->flags & REQ_F_FIXED_FILE)
+@@ -3402,6 +3427,8 @@ static int io_close_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+ 	 */
+ 	req->work.flags |= IO_WQ_WORK_NO_CANCEL;
+ 
++	if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL)))
++		return -EINVAL;
+ 	if (sqe->ioprio || sqe->off || sqe->addr || sqe->len ||
+ 	    sqe->rw_flags || sqe->buf_index)
+ 		return -EINVAL;
+@@ -4109,6 +4136,29 @@ struct io_poll_table {
+ 	int error;
+ };
+ 
++static int io_req_task_work_add(struct io_kiocb *req, struct callback_head *cb)
++{
++	struct task_struct *tsk = req->task;
++	struct io_ring_ctx *ctx = req->ctx;
++	int ret, notify = TWA_RESUME;
++
++	/*
++	 * SQPOLL kernel thread doesn't need notification, just a wakeup.
++	 * If we're not using an eventfd, then TWA_RESUME is always fine,
++	 * as we won't have dependencies between request completions for
++	 * other kernel wait conditions.
++	 */
++	if (ctx->flags & IORING_SETUP_SQPOLL)
++		notify = 0;
++	else if (ctx->cq_ev_fd)
++		notify = TWA_SIGNAL;
++
++	ret = task_work_add(tsk, cb, notify);
++	if (!ret)
++		wake_up_process(tsk);
++	return ret;
++}
++
+ static int __io_async_wake(struct io_kiocb *req, struct io_poll_iocb *poll,
+ 			   __poll_t mask, task_work_func_t func)
+ {
+@@ -4132,13 +4182,13 @@ static int __io_async_wake(struct io_kiocb *req, struct io_poll_iocb *poll,
+ 	 * of executing it. We can't safely execute it anyway, as we may not
+ 	 * have the needed state needed for it anyway.
+ 	 */
+-	ret = task_work_add(tsk, &req->task_work, true);
++	ret = io_req_task_work_add(req, &req->task_work);
+ 	if (unlikely(ret)) {
+ 		WRITE_ONCE(poll->canceled, true);
+ 		tsk = io_wq_get_task(req->ctx->io_wq);
+-		task_work_add(tsk, &req->task_work, true);
++		task_work_add(tsk, &req->task_work, 0);
++		wake_up_process(tsk);
+ 	}
+-	wake_up_process(tsk);
+ 	return 1;
+ }
+ 
+@@ -6066,7 +6116,7 @@ static int io_sq_thread(void *data)
+ 		 * If submit got -EBUSY, flag us as needing the application
+ 		 * to enter the kernel to reap and flush events.
+ 		 */
+-		if (!to_submit || ret == -EBUSY) {
++		if (!to_submit || ret == -EBUSY || need_resched()) {
+ 			/*
+ 			 * Drop cur_mm before scheduling, we can't hold it for
+ 			 * long periods (or over schedule()). Do this before
+@@ -6082,7 +6132,7 @@ static int io_sq_thread(void *data)
+ 			 * more IO, we should wait for the application to
+ 			 * reap events and wake us up.
+ 			 */
+-			if (!list_empty(&ctx->poll_list) ||
++			if (!list_empty(&ctx->poll_list) || need_resched() ||
+ 			    (!time_after(jiffies, timeout) && ret != -EBUSY &&
+ 			    !percpu_ref_is_dying(&ctx->refs))) {
+ 				if (current->task_works)
+@@ -6233,15 +6283,23 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
+ 	do {
+ 		prepare_to_wait_exclusive(&ctx->wait, &iowq.wq,
+ 						TASK_INTERRUPTIBLE);
++		/* make sure we run task_work before checking for signals */
+ 		if (current->task_works)
+ 			task_work_run();
+-		if (io_should_wake(&iowq, false))
+-			break;
+-		schedule();
+ 		if (signal_pending(current)) {
++			if (current->jobctl & JOBCTL_TASK_WORK) {
++				spin_lock_irq(&current->sighand->siglock);
++				current->jobctl &= ~JOBCTL_TASK_WORK;
++				recalc_sigpending();
++				spin_unlock_irq(&current->sighand->siglock);
++				continue;
++			}
+ 			ret = -EINTR;
+ 			break;
+ 		}
++		if (io_should_wake(&iowq, false))
++			break;
++		schedule();
+ 	} while (1);
+ 	finish_wait(&ctx->wait, &iowq.wq);
+ 
+diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
+index c107caa56525..bdfae3ba3953 100644
+--- a/fs/nfsd/nfs4state.c
++++ b/fs/nfsd/nfs4state.c
+@@ -7859,9 +7859,14 @@ nfs4_state_start_net(struct net *net)
+ 	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
+ 	int ret;
+ 
+-	ret = nfs4_state_create_net(net);
++	ret = get_nfsdfs(net);
+ 	if (ret)
+ 		return ret;
++	ret = nfs4_state_create_net(net);
++	if (ret) {
++		mntput(nn->nfsd_mnt);
++		return ret;
++	}
+ 	locks_start_grace(net, &nn->nfsd4_manager);
+ 	nfsd4_client_tracking_init(net);
+ 	if (nn->track_reclaim_completes && nn->reclaim_str_hashtbl_size == 0)
+@@ -7930,6 +7935,7 @@ nfs4_state_shutdown_net(struct net *net)
+ 
+ 	nfsd4_client_tracking_exit(net);
+ 	nfs4_state_destroy_net(net);
++	mntput(nn->nfsd_mnt);
+ }
+ 
+ void
+diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
+index 71687d99b090..f298aad41070 100644
+--- a/fs/nfsd/nfsctl.c
++++ b/fs/nfsd/nfsctl.c
+@@ -1335,6 +1335,7 @@ void nfsd_client_rmdir(struct dentry *dentry)
+ 	WARN_ON_ONCE(ret);
+ 	fsnotify_rmdir(dir, dentry);
+ 	d_delete(dentry);
++	dput(dentry);
+ 	inode_unlock(dir);
+ }
+ 
+@@ -1424,6 +1425,18 @@ static struct file_system_type nfsd_fs_type = {
+ };
+ MODULE_ALIAS_FS("nfsd");
+ 
++int get_nfsdfs(struct net *net)
++{
++	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
++	struct vfsmount *mnt;
++
++	mnt =  vfs_kern_mount(&nfsd_fs_type, SB_KERNMOUNT, "nfsd", NULL);
++	if (IS_ERR(mnt))
++		return PTR_ERR(mnt);
++	nn->nfsd_mnt = mnt;
++	return 0;
++}
++
+ #ifdef CONFIG_PROC_FS
+ static int create_proc_exports_entry(void)
+ {
+@@ -1451,7 +1464,6 @@ unsigned int nfsd_net_id;
+ static __net_init int nfsd_init_net(struct net *net)
+ {
+ 	int retval;
+-	struct vfsmount *mnt;
+ 	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
+ 
+ 	retval = nfsd_export_init(net);
+@@ -1478,16 +1490,8 @@ static __net_init int nfsd_init_net(struct net *net)
+ 	init_waitqueue_head(&nn->ntf_wq);
+ 	seqlock_init(&nn->boot_lock);
+ 
+-	mnt =  vfs_kern_mount(&nfsd_fs_type, SB_KERNMOUNT, "nfsd", NULL);
+-	if (IS_ERR(mnt)) {
+-		retval = PTR_ERR(mnt);
+-		goto out_mount_err;
+-	}
+-	nn->nfsd_mnt = mnt;
+ 	return 0;
+ 
+-out_mount_err:
+-	nfsd_reply_cache_shutdown(nn);
+ out_drc_error:
+ 	nfsd_idmap_shutdown(net);
+ out_idmap_error:
+@@ -1500,7 +1504,6 @@ static __net_exit void nfsd_exit_net(struct net *net)
+ {
+ 	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
+ 
+-	mntput(nn->nfsd_mnt);
+ 	nfsd_reply_cache_shutdown(nn);
+ 	nfsd_idmap_shutdown(net);
+ 	nfsd_export_shutdown(net);
+diff --git a/fs/nfsd/nfsd.h b/fs/nfsd/nfsd.h
+index 2ab5569126b8..b61de3cd69b7 100644
+--- a/fs/nfsd/nfsd.h
++++ b/fs/nfsd/nfsd.h
+@@ -88,6 +88,8 @@ int		nfsd_pool_stats_release(struct inode *, struct file *);
+ 
+ void		nfsd_destroy(struct net *net);
+ 
++int get_nfsdfs(struct net *);
++
+ struct nfsdfs_client {
+ 	struct kref cl_ref;
+ 	void (*cl_release)(struct kref *kref);
+@@ -98,6 +100,7 @@ struct dentry *nfsd_client_mkdir(struct nfsd_net *nn,
+ 		struct nfsdfs_client *ncl, u32 id, const struct tree_descr *);
+ void nfsd_client_rmdir(struct dentry *dentry);
+ 
++
+ #if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL)
+ #ifdef CONFIG_NFSD_V2_ACL
+ extern const struct svc_version nfsd_acl_version2;
+diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
+index 0aa02eb18bd3..8fa3e0ff3671 100644
+--- a/fs/nfsd/vfs.c
++++ b/fs/nfsd/vfs.c
+@@ -1225,6 +1225,9 @@ nfsd_create_locked(struct svc_rqst *rqstp, struct svc_fh *fhp,
+ 		iap->ia_mode = 0;
+ 	iap->ia_mode = (iap->ia_mode & S_IALLUGO) | type;
+ 
++	if (!IS_POSIXACL(dirp))
++		iap->ia_mode &= ~current_umask();
++
+ 	err = 0;
+ 	host_err = 0;
+ 	switch (type) {
+@@ -1457,6 +1460,9 @@ do_nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp,
+ 		goto out;
+ 	}
+ 
++	if (!IS_POSIXACL(dirp))
++		iap->ia_mode &= ~current_umask();
++
+ 	host_err = vfs_create(dirp, dchild, iap->ia_mode, true);
+ 	if (host_err < 0) {
+ 		fh_drop_write(fhp);
+diff --git a/fs/xfs/xfs_log_cil.c b/fs/xfs/xfs_log_cil.c
+index b43f0e8f43f2..9ed90368ab31 100644
+--- a/fs/xfs/xfs_log_cil.c
++++ b/fs/xfs/xfs_log_cil.c
+@@ -671,7 +671,8 @@ xlog_cil_push_work(
+ 	/*
+ 	 * Wake up any background push waiters now this context is being pushed.
+ 	 */
+-	wake_up_all(&ctx->push_wait);
++	if (ctx->space_used >= XLOG_CIL_BLOCKING_SPACE_LIMIT(log))
++		wake_up_all(&cil->xc_push_wait);
+ 
+ 	/*
+ 	 * Check if we've anything to push. If there is nothing, then we don't
+@@ -743,13 +744,12 @@ xlog_cil_push_work(
+ 
+ 	/*
+ 	 * initialise the new context and attach it to the CIL. Then attach
+-	 * the current context to the CIL committing lsit so it can be found
++	 * the current context to the CIL committing list so it can be found
+ 	 * during log forces to extract the commit lsn of the sequence that
+ 	 * needs to be forced.
+ 	 */
+ 	INIT_LIST_HEAD(&new_ctx->committing);
+ 	INIT_LIST_HEAD(&new_ctx->busy_extents);
+-	init_waitqueue_head(&new_ctx->push_wait);
+ 	new_ctx->sequence = ctx->sequence + 1;
+ 	new_ctx->cil = cil;
+ 	cil->xc_ctx = new_ctx;
+@@ -937,7 +937,7 @@ xlog_cil_push_background(
+ 	if (cil->xc_ctx->space_used >= XLOG_CIL_BLOCKING_SPACE_LIMIT(log)) {
+ 		trace_xfs_log_cil_wait(log, cil->xc_ctx->ticket);
+ 		ASSERT(cil->xc_ctx->space_used < log->l_logsize);
+-		xlog_wait(&cil->xc_ctx->push_wait, &cil->xc_push_lock);
++		xlog_wait(&cil->xc_push_wait, &cil->xc_push_lock);
+ 		return;
+ 	}
+ 
+@@ -1216,12 +1216,12 @@ xlog_cil_init(
+ 	INIT_LIST_HEAD(&cil->xc_committing);
+ 	spin_lock_init(&cil->xc_cil_lock);
+ 	spin_lock_init(&cil->xc_push_lock);
++	init_waitqueue_head(&cil->xc_push_wait);
+ 	init_rwsem(&cil->xc_ctx_lock);
+ 	init_waitqueue_head(&cil->xc_commit_wait);
+ 
+ 	INIT_LIST_HEAD(&ctx->committing);
+ 	INIT_LIST_HEAD(&ctx->busy_extents);
+-	init_waitqueue_head(&ctx->push_wait);
+ 	ctx->sequence = 1;
+ 	ctx->cil = cil;
+ 	cil->xc_ctx = ctx;
+diff --git a/fs/xfs/xfs_log_priv.h b/fs/xfs/xfs_log_priv.h
+index ec22c7a3867f..75a62870b63a 100644
+--- a/fs/xfs/xfs_log_priv.h
++++ b/fs/xfs/xfs_log_priv.h
+@@ -240,7 +240,6 @@ struct xfs_cil_ctx {
+ 	struct xfs_log_vec	*lv_chain;	/* logvecs being pushed */
+ 	struct list_head	iclog_entry;
+ 	struct list_head	committing;	/* ctx committing list */
+-	wait_queue_head_t	push_wait;	/* background push throttle */
+ 	struct work_struct	discard_endio_work;
+ };
+ 
+@@ -274,6 +273,7 @@ struct xfs_cil {
+ 	wait_queue_head_t	xc_commit_wait;
+ 	xfs_lsn_t		xc_current_sequence;
+ 	struct work_struct	xc_push_work;
++	wait_queue_head_t	xc_push_wait;	/* background push throttle */
+ } ____cacheline_aligned_in_smp;
+ 
+ /*
+diff --git a/include/crypto/if_alg.h b/include/crypto/if_alg.h
+index 56527c85d122..088c1ded2714 100644
+--- a/include/crypto/if_alg.h
++++ b/include/crypto/if_alg.h
+@@ -29,8 +29,8 @@ struct alg_sock {
+ 
+ 	struct sock *parent;
+ 
+-	unsigned int refcnt;
+-	unsigned int nokey_refcnt;
++	atomic_t refcnt;
++	atomic_t nokey_refcnt;
+ 
+ 	const struct af_alg_type *type;
+ 	void *private;
+diff --git a/include/linux/lsm_hook_defs.h b/include/linux/lsm_hook_defs.h
+index 5616b2567aa7..c2d073c49bf8 100644
+--- a/include/linux/lsm_hook_defs.h
++++ b/include/linux/lsm_hook_defs.h
+@@ -149,7 +149,7 @@ LSM_HOOK(int, 0, inode_listsecurity, struct inode *inode, char *buffer,
+ 	 size_t buffer_size)
+ LSM_HOOK(void, LSM_RET_VOID, inode_getsecid, struct inode *inode, u32 *secid)
+ LSM_HOOK(int, 0, inode_copy_up, struct dentry *src, struct cred **new)
+-LSM_HOOK(int, 0, inode_copy_up_xattr, const char *name)
++LSM_HOOK(int, -EOPNOTSUPP, inode_copy_up_xattr, const char *name)
+ LSM_HOOK(int, 0, kernfs_init_security, struct kernfs_node *kn_dir,
+ 	 struct kernfs_node *kn)
+ LSM_HOOK(int, 0, file_permission, struct file *file, int mask)
+diff --git a/include/linux/sched/jobctl.h b/include/linux/sched/jobctl.h
+index fa067de9f1a9..d2b4204ba4d3 100644
+--- a/include/linux/sched/jobctl.h
++++ b/include/linux/sched/jobctl.h
+@@ -19,6 +19,7 @@ struct task_struct;
+ #define JOBCTL_TRAPPING_BIT	21	/* switching to TRACED */
+ #define JOBCTL_LISTENING_BIT	22	/* ptracer is listening for events */
+ #define JOBCTL_TRAP_FREEZE_BIT	23	/* trap for cgroup freezer */
++#define JOBCTL_TASK_WORK_BIT	24	/* set by TWA_SIGNAL */
+ 
+ #define JOBCTL_STOP_DEQUEUED	(1UL << JOBCTL_STOP_DEQUEUED_BIT)
+ #define JOBCTL_STOP_PENDING	(1UL << JOBCTL_STOP_PENDING_BIT)
+@@ -28,9 +29,10 @@ struct task_struct;
+ #define JOBCTL_TRAPPING		(1UL << JOBCTL_TRAPPING_BIT)
+ #define JOBCTL_LISTENING	(1UL << JOBCTL_LISTENING_BIT)
+ #define JOBCTL_TRAP_FREEZE	(1UL << JOBCTL_TRAP_FREEZE_BIT)
++#define JOBCTL_TASK_WORK	(1UL << JOBCTL_TASK_WORK_BIT)
+ 
+ #define JOBCTL_TRAP_MASK	(JOBCTL_TRAP_STOP | JOBCTL_TRAP_NOTIFY)
+-#define JOBCTL_PENDING_MASK	(JOBCTL_STOP_PENDING | JOBCTL_TRAP_MASK)
++#define JOBCTL_PENDING_MASK	(JOBCTL_STOP_PENDING | JOBCTL_TRAP_MASK | JOBCTL_TASK_WORK)
+ 
+ extern bool task_set_jobctl_pending(struct task_struct *task, unsigned long mask);
+ extern void task_clear_jobctl_trapping(struct task_struct *task);
+diff --git a/include/linux/task_work.h b/include/linux/task_work.h
+index bd9a6a91c097..0fb93aafa478 100644
+--- a/include/linux/task_work.h
++++ b/include/linux/task_work.h
+@@ -13,7 +13,10 @@ init_task_work(struct callback_head *twork, task_work_func_t func)
+ 	twork->func = func;
+ }
+ 
+-int task_work_add(struct task_struct *task, struct callback_head *twork, bool);
++#define TWA_RESUME	1
++#define TWA_SIGNAL	2
++int task_work_add(struct task_struct *task, struct callback_head *twork, int);
++
+ struct callback_head *task_work_cancel(struct task_struct *, task_work_func_t);
+ void task_work_run(void);
+ 
+diff --git a/include/net/seg6.h b/include/net/seg6.h
+index 640724b35273..9d19c15e8545 100644
+--- a/include/net/seg6.h
++++ b/include/net/seg6.h
+@@ -57,7 +57,7 @@ extern void seg6_iptunnel_exit(void);
+ extern int seg6_local_init(void);
+ extern void seg6_local_exit(void);
+ 
+-extern bool seg6_validate_srh(struct ipv6_sr_hdr *srh, int len);
++extern bool seg6_validate_srh(struct ipv6_sr_hdr *srh, int len, bool reduced);
+ extern int seg6_do_srh_encap(struct sk_buff *skb, struct ipv6_sr_hdr *osrh,
+ 			     int proto);
+ extern int seg6_do_srh_inline(struct sk_buff *skb, struct ipv6_sr_hdr *osrh);
+diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
+index d47c7d6656cd..9be6accf8fe3 100644
+--- a/kernel/debug/debug_core.c
++++ b/kernel/debug/debug_core.c
+@@ -577,6 +577,7 @@ static int kgdb_cpu_enter(struct kgdb_state *ks, struct pt_regs *regs,
+ 		arch_kgdb_ops.disable_hw_break(regs);
+ 
+ acquirelock:
++	rcu_read_lock();
+ 	/*
+ 	 * Interrupts will be restored by the 'trap return' code, except when
+ 	 * single stepping.
+@@ -636,6 +637,7 @@ return_normal:
+ 			atomic_dec(&slaves_in_kgdb);
+ 			dbg_touch_watchdogs();
+ 			local_irq_restore(flags);
++			rcu_read_unlock();
+ 			return 0;
+ 		}
+ 		cpu_relax();
+@@ -654,6 +656,7 @@ return_normal:
+ 		raw_spin_unlock(&dbg_master_lock);
+ 		dbg_touch_watchdogs();
+ 		local_irq_restore(flags);
++		rcu_read_unlock();
+ 
+ 		goto acquirelock;
+ 	}
+@@ -777,6 +780,7 @@ kgdb_restore:
+ 	raw_spin_unlock(&dbg_master_lock);
+ 	dbg_touch_watchdogs();
+ 	local_irq_restore(flags);
++	rcu_read_unlock();
+ 
+ 	return kgdb_info[cpu].ret_state;
+ }
+diff --git a/kernel/padata.c b/kernel/padata.c
+index aae789896616..859c77d22aa7 100644
+--- a/kernel/padata.c
++++ b/kernel/padata.c
+@@ -260,7 +260,7 @@ static void padata_reorder(struct parallel_data *pd)
+ 	 *
+ 	 * Ensure reorder queue is read after pd->lock is dropped so we see
+ 	 * new objects from another task in padata_do_serial.  Pairs with
+-	 * smp_mb__after_atomic in padata_do_serial.
++	 * smp_mb in padata_do_serial.
+ 	 */
+ 	smp_mb();
+ 
+@@ -342,7 +342,7 @@ void padata_do_serial(struct padata_priv *padata)
+ 	 * with the trylock of pd->lock in padata_reorder.  Pairs with smp_mb
+ 	 * in padata_reorder.
+ 	 */
+-	smp_mb__after_atomic();
++	smp_mb();
+ 
+ 	padata_reorder(pd);
+ }
+diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
+index 239970b991c0..0f4aaad236a9 100644
+--- a/kernel/sched/debug.c
++++ b/kernel/sched/debug.c
+@@ -258,7 +258,7 @@ sd_alloc_ctl_domain_table(struct sched_domain *sd)
+ 	set_table_entry(&table[2], "busy_factor",	  &sd->busy_factor,	    sizeof(int),  0644, proc_dointvec_minmax);
+ 	set_table_entry(&table[3], "imbalance_pct",	  &sd->imbalance_pct,	    sizeof(int),  0644, proc_dointvec_minmax);
+ 	set_table_entry(&table[4], "cache_nice_tries",	  &sd->cache_nice_tries,    sizeof(int),  0644, proc_dointvec_minmax);
+-	set_table_entry(&table[5], "flags",		  &sd->flags,		    sizeof(int),  0644, proc_dointvec_minmax);
++	set_table_entry(&table[5], "flags",		  &sd->flags,		    sizeof(int),  0444, proc_dointvec_minmax);
+ 	set_table_entry(&table[6], "max_newidle_lb_cost", &sd->max_newidle_lb_cost, sizeof(long), 0644, proc_doulongvec_minmax);
+ 	set_table_entry(&table[7], "name",		  sd->name,	       CORENAME_MAX_SIZE, 0444, proc_dostring);
+ 	/* &table[8] is terminator */
+diff --git a/kernel/signal.c b/kernel/signal.c
+index 284fc1600063..d5feb34b5e15 100644
+--- a/kernel/signal.c
++++ b/kernel/signal.c
+@@ -2529,9 +2529,6 @@ bool get_signal(struct ksignal *ksig)
+ 	struct signal_struct *signal = current->signal;
+ 	int signr;
+ 
+-	if (unlikely(current->task_works))
+-		task_work_run();
+-
+ 	if (unlikely(uprobe_deny_signal()))
+ 		return false;
+ 
+@@ -2544,6 +2541,13 @@ bool get_signal(struct ksignal *ksig)
+ 
+ relock:
+ 	spin_lock_irq(&sighand->siglock);
++	current->jobctl &= ~JOBCTL_TASK_WORK;
++	if (unlikely(current->task_works)) {
++		spin_unlock_irq(&sighand->siglock);
++		task_work_run();
++		goto relock;
++	}
++
+ 	/*
+ 	 * Every stopped thread goes here after wakeup. Check to see if
+ 	 * we should notify the parent, prepare_signal(SIGCONT) encodes
+diff --git a/kernel/task_work.c b/kernel/task_work.c
+index 825f28259a19..5c0848ca1287 100644
+--- a/kernel/task_work.c
++++ b/kernel/task_work.c
+@@ -25,9 +25,10 @@ static struct callback_head work_exited; /* all we need is ->next == NULL */
+  * 0 if succeeds or -ESRCH.
+  */
+ int
+-task_work_add(struct task_struct *task, struct callback_head *work, bool notify)
++task_work_add(struct task_struct *task, struct callback_head *work, int notify)
+ {
+ 	struct callback_head *head;
++	unsigned long flags;
+ 
+ 	do {
+ 		head = READ_ONCE(task->task_works);
+@@ -36,8 +37,19 @@ task_work_add(struct task_struct *task, struct callback_head *work, bool notify)
+ 		work->next = head;
+ 	} while (cmpxchg(&task->task_works, head, work) != head);
+ 
+-	if (notify)
++	switch (notify) {
++	case TWA_RESUME:
+ 		set_notify_resume(task);
++		break;
++	case TWA_SIGNAL:
++		if (lock_task_sighand(task, &flags)) {
++			task->jobctl |= JOBCTL_TASK_WORK;
++			signal_wake_up(task, 0);
++			unlock_task_sighand(task, &flags);
++		}
++		break;
++	}
++
+ 	return 0;
+ }
+ 
+diff --git a/mm/cma.c b/mm/cma.c
+index 0463ad2ce06b..26ecff818881 100644
+--- a/mm/cma.c
++++ b/mm/cma.c
+@@ -339,13 +339,13 @@ int __init cma_declare_contiguous_nid(phys_addr_t base,
+ 		 */
+ 		if (base < highmem_start && limit > highmem_start) {
+ 			addr = memblock_alloc_range_nid(size, alignment,
+-					highmem_start, limit, nid, false);
++					highmem_start, limit, nid, true);
+ 			limit = highmem_start;
+ 		}
+ 
+ 		if (!addr) {
+ 			addr = memblock_alloc_range_nid(size, alignment, base,
+-					limit, nid, false);
++					limit, nid, true);
+ 			if (!addr) {
+ 				ret = -ENOMEM;
+ 				goto err;
+diff --git a/mm/debug.c b/mm/debug.c
+index 2189357f0987..f2ede2df585a 100644
+--- a/mm/debug.c
++++ b/mm/debug.c
+@@ -110,13 +110,57 @@ void __dump_page(struct page *page, const char *reason)
+ 	else if (PageAnon(page))
+ 		type = "anon ";
+ 	else if (mapping) {
+-		if (mapping->host && mapping->host->i_dentry.first) {
+-			struct dentry *dentry;
+-			dentry = container_of(mapping->host->i_dentry.first, struct dentry, d_u.d_alias);
+-			pr_warn("%ps name:\"%pd\"\n", mapping->a_ops, dentry);
+-		} else
+-			pr_warn("%ps\n", mapping->a_ops);
++		const struct inode *host;
++		const struct address_space_operations *a_ops;
++		const struct hlist_node *dentry_first;
++		const struct dentry *dentry_ptr;
++		struct dentry dentry;
++
++		/*
++		 * mapping can be invalid pointer and we don't want to crash
++		 * accessing it, so probe everything depending on it carefully
++		 */
++		if (probe_kernel_read_strict(&host, &mapping->host,
++						sizeof(struct inode *)) ||
++		    probe_kernel_read_strict(&a_ops, &mapping->a_ops,
++				sizeof(struct address_space_operations *))) {
++			pr_warn("failed to read mapping->host or a_ops, mapping not a valid kernel address?\n");
++			goto out_mapping;
++		}
++
++		if (!host) {
++			pr_warn("mapping->a_ops:%ps\n", a_ops);
++			goto out_mapping;
++		}
++
++		if (probe_kernel_read_strict(&dentry_first,
++			&host->i_dentry.first, sizeof(struct hlist_node *))) {
++			pr_warn("mapping->a_ops:%ps with invalid mapping->host inode address %px\n",
++				a_ops, host);
++			goto out_mapping;
++		}
++
++		if (!dentry_first) {
++			pr_warn("mapping->a_ops:%ps\n", a_ops);
++			goto out_mapping;
++		}
++
++		dentry_ptr = container_of(dentry_first, struct dentry, d_u.d_alias);
++		if (probe_kernel_read_strict(&dentry, dentry_ptr,
++							sizeof(struct dentry))) {
++			pr_warn("mapping->aops:%ps with invalid mapping->host->i_dentry.first %px\n",
++				a_ops, dentry_ptr);
++		} else {
++			/*
++			 * if dentry is corrupted, the %pd handler may still
++			 * crash, but it's unlikely that we reach here with a
++			 * corrupted struct page
++			 */
++			pr_warn("mapping->aops:%ps dentry name:\"%pd\"\n",
++								a_ops, &dentry);
++		}
+ 	}
++out_mapping:
+ 	BUILD_BUG_ON(ARRAY_SIZE(pageflag_names) != __NR_PAGEFLAGS + 1);
+ 
+ 	pr_warn("%sflags: %#lx(%pGp)%s\n", type, page->flags, &page->flags,
+diff --git a/mm/hugetlb.c b/mm/hugetlb.c
+index bcabbe02192b..4f7cdc55fbe4 100644
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -1594,7 +1594,7 @@ static struct address_space *_get_hugetlb_page_mapping(struct page *hpage)
+ 
+ 	/* Use first found vma */
+ 	pgoff_start = page_to_pgoff(hpage);
+-	pgoff_end = pgoff_start + hpage_nr_pages(hpage) - 1;
++	pgoff_end = pgoff_start + pages_per_huge_page(page_hstate(hpage)) - 1;
+ 	anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root,
+ 					pgoff_start, pgoff_end) {
+ 		struct vm_area_struct *vma = avc->vma;
+diff --git a/mm/slub.c b/mm/slub.c
+index 63bd39c47643..660f4324c097 100644
+--- a/mm/slub.c
++++ b/mm/slub.c
+@@ -679,6 +679,20 @@ static void slab_fix(struct kmem_cache *s, char *fmt, ...)
+ 	va_end(args);
+ }
+ 
++static bool freelist_corrupted(struct kmem_cache *s, struct page *page,
++			       void *freelist, void *nextfree)
++{
++	if ((s->flags & SLAB_CONSISTENCY_CHECKS) &&
++	    !check_valid_pointer(s, page, nextfree)) {
++		object_err(s, page, freelist, "Freechain corrupt");
++		freelist = NULL;
++		slab_fix(s, "Isolate corrupted freechain");
++		return true;
++	}
++
++	return false;
++}
++
+ static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p)
+ {
+ 	unsigned int off;	/* Offset of last byte */
+@@ -1410,6 +1424,11 @@ static inline void inc_slabs_node(struct kmem_cache *s, int node,
+ static inline void dec_slabs_node(struct kmem_cache *s, int node,
+ 							int objects) {}
+ 
++static bool freelist_corrupted(struct kmem_cache *s, struct page *page,
++			       void *freelist, void *nextfree)
++{
++	return false;
++}
+ #endif /* CONFIG_SLUB_DEBUG */
+ 
+ /*
+@@ -2093,6 +2112,14 @@ static void deactivate_slab(struct kmem_cache *s, struct page *page,
+ 		void *prior;
+ 		unsigned long counters;
+ 
++		/*
++		 * If 'nextfree' is invalid, it is possible that the object at
++		 * 'freelist' is already corrupted.  So isolate all objects
++		 * starting at 'freelist'.
++		 */
++		if (freelist_corrupted(s, page, freelist, nextfree))
++			break;
++
+ 		do {
+ 			prior = page->freelist;
+ 			counters = page->counters;
+@@ -5654,7 +5681,8 @@ static void memcg_propagate_slab_attrs(struct kmem_cache *s)
+ 		 */
+ 		if (buffer)
+ 			buf = buffer;
+-		else if (root_cache->max_attr_size < ARRAY_SIZE(mbuf))
++		else if (root_cache->max_attr_size < ARRAY_SIZE(mbuf) &&
++			 !IS_ENABLED(CONFIG_SLUB_STATS))
+ 			buf = mbuf;
+ 		else {
+ 			buffer = (char *) get_zeroed_page(GFP_KERNEL);
+diff --git a/mm/swap_state.c b/mm/swap_state.c
+index ebed37bbf7a3..e3d36776c08b 100644
+--- a/mm/swap_state.c
++++ b/mm/swap_state.c
+@@ -23,6 +23,7 @@
+ #include <linux/huge_mm.h>
+ 
+ #include <asm/pgtable.h>
++#include "internal.h"
+ 
+ /*
+  * swapper_space is a fiction, retained to simplify the path through
+@@ -418,7 +419,8 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
+ 		/* May fail (-ENOMEM) if XArray node allocation failed. */
+ 		__SetPageLocked(new_page);
+ 		__SetPageSwapBacked(new_page);
+-		err = add_to_swap_cache(new_page, entry, gfp_mask & GFP_KERNEL);
++		err = add_to_swap_cache(new_page, entry,
++					gfp_mask & GFP_RECLAIM_MASK);
+ 		if (likely(!err)) {
+ 			/* Initiate read into locked page */
+ 			SetPageWorkingset(new_page);
+diff --git a/net/core/filter.c b/net/core/filter.c
+index 9512a9772d69..45fa65a28983 100644
+--- a/net/core/filter.c
++++ b/net/core/filter.c
+@@ -4920,7 +4920,7 @@ static int bpf_push_seg6_encap(struct sk_buff *skb, u32 type, void *hdr, u32 len
+ 	int err;
+ 	struct ipv6_sr_hdr *srh = (struct ipv6_sr_hdr *)hdr;
+ 
+-	if (!seg6_validate_srh(srh, len))
++	if (!seg6_validate_srh(srh, len, false))
+ 		return -EINVAL;
+ 
+ 	switch (type) {
+diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c
+index fc7027314ad8..ef100cfd2ac1 100644
+--- a/net/hsr/hsr_device.c
++++ b/net/hsr/hsr_device.c
+@@ -341,7 +341,7 @@ static void hsr_announce(struct timer_list *t)
+ 	rcu_read_unlock();
+ }
+ 
+-static void hsr_del_ports(struct hsr_priv *hsr)
++void hsr_del_ports(struct hsr_priv *hsr)
+ {
+ 	struct hsr_port *port;
+ 
+@@ -358,31 +358,12 @@ static void hsr_del_ports(struct hsr_priv *hsr)
+ 		hsr_del_port(port);
+ }
+ 
+-/* This has to be called after all the readers are gone.
+- * Otherwise we would have to check the return value of
+- * hsr_port_get_hsr().
+- */
+-static void hsr_dev_destroy(struct net_device *hsr_dev)
+-{
+-	struct hsr_priv *hsr = netdev_priv(hsr_dev);
+-
+-	hsr_debugfs_term(hsr);
+-	hsr_del_ports(hsr);
+-
+-	del_timer_sync(&hsr->prune_timer);
+-	del_timer_sync(&hsr->announce_timer);
+-
+-	hsr_del_self_node(hsr);
+-	hsr_del_nodes(&hsr->node_db);
+-}
+-
+ static const struct net_device_ops hsr_device_ops = {
+ 	.ndo_change_mtu = hsr_dev_change_mtu,
+ 	.ndo_open = hsr_dev_open,
+ 	.ndo_stop = hsr_dev_close,
+ 	.ndo_start_xmit = hsr_dev_xmit,
+ 	.ndo_fix_features = hsr_fix_features,
+-	.ndo_uninit = hsr_dev_destroy,
+ };
+ 
+ static struct device_type hsr_type = {
+diff --git a/net/hsr/hsr_device.h b/net/hsr/hsr_device.h
+index a099d7de7e79..b8f9262ed101 100644
+--- a/net/hsr/hsr_device.h
++++ b/net/hsr/hsr_device.h
+@@ -11,6 +11,7 @@
+ #include <linux/netdevice.h>
+ #include "hsr_main.h"
+ 
++void hsr_del_ports(struct hsr_priv *hsr);
+ void hsr_dev_setup(struct net_device *dev);
+ int hsr_dev_finalize(struct net_device *hsr_dev, struct net_device *slave[2],
+ 		     unsigned char multicast_spec, u8 protocol_version,
+@@ -18,5 +19,4 @@ int hsr_dev_finalize(struct net_device *hsr_dev, struct net_device *slave[2],
+ void hsr_check_carrier_and_operstate(struct hsr_priv *hsr);
+ bool is_hsr_master(struct net_device *dev);
+ int hsr_get_max_mtu(struct hsr_priv *hsr);
+-
+ #endif /* __HSR_DEVICE_H */
+diff --git a/net/hsr/hsr_main.c b/net/hsr/hsr_main.c
+index 26d6c39f24e1..144da15f0a81 100644
+--- a/net/hsr/hsr_main.c
++++ b/net/hsr/hsr_main.c
+@@ -6,6 +6,7 @@
+  */
+ 
+ #include <linux/netdevice.h>
++#include <net/rtnetlink.h>
+ #include <linux/rculist.h>
+ #include <linux/timer.h>
+ #include <linux/etherdevice.h>
+@@ -15,12 +16,23 @@
+ #include "hsr_framereg.h"
+ #include "hsr_slave.h"
+ 
++static bool hsr_slave_empty(struct hsr_priv *hsr)
++{
++	struct hsr_port *port;
++
++	hsr_for_each_port(hsr, port)
++		if (port->type != HSR_PT_MASTER)
++			return false;
++	return true;
++}
++
+ static int hsr_netdev_notify(struct notifier_block *nb, unsigned long event,
+ 			     void *ptr)
+ {
+-	struct net_device *dev;
+ 	struct hsr_port *port, *master;
++	struct net_device *dev;
+ 	struct hsr_priv *hsr;
++	LIST_HEAD(list_kill);
+ 	int mtu_max;
+ 	int res;
+ 
+@@ -85,8 +97,17 @@ static int hsr_netdev_notify(struct notifier_block *nb, unsigned long event,
+ 		master->dev->mtu = mtu_max;
+ 		break;
+ 	case NETDEV_UNREGISTER:
+-		if (!is_hsr_master(dev))
++		if (!is_hsr_master(dev)) {
++			master = hsr_port_get_hsr(port->hsr, HSR_PT_MASTER);
+ 			hsr_del_port(port);
++			if (hsr_slave_empty(master->hsr)) {
++				const struct rtnl_link_ops *ops;
++
++				ops = master->dev->rtnl_link_ops;
++				ops->dellink(master->dev, &list_kill);
++				unregister_netdevice_many(&list_kill);
++			}
++		}
+ 		break;
+ 	case NETDEV_PRE_TYPE_CHANGE:
+ 		/* HSR works only on Ethernet devices. Refuse slave to change
+@@ -126,9 +147,9 @@ static int __init hsr_init(void)
+ 
+ static void __exit hsr_exit(void)
+ {
+-	unregister_netdevice_notifier(&hsr_nb);
+ 	hsr_netlink_exit();
+ 	hsr_debugfs_remove_root();
++	unregister_netdevice_notifier(&hsr_nb);
+ }
+ 
+ module_init(hsr_init);
+diff --git a/net/hsr/hsr_netlink.c b/net/hsr/hsr_netlink.c
+index 1decb25f6764..6e14b7d22639 100644
+--- a/net/hsr/hsr_netlink.c
++++ b/net/hsr/hsr_netlink.c
+@@ -83,6 +83,22 @@ static int hsr_newlink(struct net *src_net, struct net_device *dev,
+ 	return hsr_dev_finalize(dev, link, multicast_spec, hsr_version, extack);
+ }
+ 
++static void hsr_dellink(struct net_device *dev, struct list_head *head)
++{
++	struct hsr_priv *hsr = netdev_priv(dev);
++
++	del_timer_sync(&hsr->prune_timer);
++	del_timer_sync(&hsr->announce_timer);
++
++	hsr_debugfs_term(hsr);
++	hsr_del_ports(hsr);
++
++	hsr_del_self_node(hsr);
++	hsr_del_nodes(&hsr->node_db);
++
++	unregister_netdevice_queue(dev, head);
++}
++
+ static int hsr_fill_info(struct sk_buff *skb, const struct net_device *dev)
+ {
+ 	struct hsr_priv *hsr = netdev_priv(dev);
+@@ -118,6 +134,7 @@ static struct rtnl_link_ops hsr_link_ops __read_mostly = {
+ 	.priv_size	= sizeof(struct hsr_priv),
+ 	.setup		= hsr_dev_setup,
+ 	.newlink	= hsr_newlink,
++	.dellink	= hsr_dellink,
+ 	.fill_info	= hsr_fill_info,
+ };
+ 
+diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
+index 5af97b4f5df3..ff187fd2083f 100644
+--- a/net/ipv6/ipv6_sockglue.c
++++ b/net/ipv6/ipv6_sockglue.c
+@@ -458,7 +458,7 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
+ 				struct ipv6_sr_hdr *srh = (struct ipv6_sr_hdr *)
+ 							  opt->srcrt;
+ 
+-				if (!seg6_validate_srh(srh, optlen))
++				if (!seg6_validate_srh(srh, optlen, false))
+ 					goto sticky_done;
+ 				break;
+ 			}
+diff --git a/net/ipv6/seg6.c b/net/ipv6/seg6.c
+index 37b434293bda..d2f8138e5a73 100644
+--- a/net/ipv6/seg6.c
++++ b/net/ipv6/seg6.c
+@@ -25,7 +25,7 @@
+ #include <net/seg6_hmac.h>
+ #endif
+ 
+-bool seg6_validate_srh(struct ipv6_sr_hdr *srh, int len)
++bool seg6_validate_srh(struct ipv6_sr_hdr *srh, int len, bool reduced)
+ {
+ 	unsigned int tlv_offset;
+ 	int max_last_entry;
+@@ -37,13 +37,17 @@ bool seg6_validate_srh(struct ipv6_sr_hdr *srh, int len)
+ 	if (((srh->hdrlen + 1) << 3) != len)
+ 		return false;
+ 
+-	max_last_entry = (srh->hdrlen / 2) - 1;
+-
+-	if (srh->first_segment > max_last_entry)
++	if (!reduced && srh->segments_left > srh->first_segment) {
+ 		return false;
++	} else {
++		max_last_entry = (srh->hdrlen / 2) - 1;
+ 
+-	if (srh->segments_left > srh->first_segment + 1)
+-		return false;
++		if (srh->first_segment > max_last_entry)
++			return false;
++
++		if (srh->segments_left > srh->first_segment + 1)
++			return false;
++	}
+ 
+ 	tlv_offset = sizeof(*srh) + ((srh->first_segment + 1) << 4);
+ 
+diff --git a/net/ipv6/seg6_iptunnel.c b/net/ipv6/seg6_iptunnel.c
+index c7cbfeae94f5..e0e9f48ab14f 100644
+--- a/net/ipv6/seg6_iptunnel.c
++++ b/net/ipv6/seg6_iptunnel.c
+@@ -426,7 +426,7 @@ static int seg6_build_state(struct net *net, struct nlattr *nla,
+ 	}
+ 
+ 	/* verify that SRH is consistent */
+-	if (!seg6_validate_srh(tuninfo->srh, tuninfo_len - sizeof(*tuninfo)))
++	if (!seg6_validate_srh(tuninfo->srh, tuninfo_len - sizeof(*tuninfo), false))
+ 		return -EINVAL;
+ 
+ 	newts = lwtunnel_state_alloc(tuninfo_len + sizeof(*slwt));
+diff --git a/net/ipv6/seg6_local.c b/net/ipv6/seg6_local.c
+index 52493423f329..eba23279912d 100644
+--- a/net/ipv6/seg6_local.c
++++ b/net/ipv6/seg6_local.c
+@@ -87,7 +87,7 @@ static struct ipv6_sr_hdr *get_srh(struct sk_buff *skb)
+ 	 */
+ 	srh = (struct ipv6_sr_hdr *)(skb->data + srhoff);
+ 
+-	if (!seg6_validate_srh(srh, len))
++	if (!seg6_validate_srh(srh, len, true))
+ 		return NULL;
+ 
+ 	return srh;
+@@ -495,7 +495,7 @@ bool seg6_bpf_has_valid_srh(struct sk_buff *skb)
+ 			return false;
+ 
+ 		srh->hdrlen = (u8)(srh_state->hdrlen >> 3);
+-		if (!seg6_validate_srh(srh, (srh->hdrlen + 1) << 3))
++		if (!seg6_validate_srh(srh, (srh->hdrlen + 1) << 3, true))
+ 			return false;
+ 
+ 		srh_state->valid = true;
+@@ -670,7 +670,7 @@ static int parse_nla_srh(struct nlattr **attrs, struct seg6_local_lwt *slwt)
+ 	if (len < sizeof(*srh) + sizeof(struct in6_addr))
+ 		return -EINVAL;
+ 
+-	if (!seg6_validate_srh(srh, len))
++	if (!seg6_validate_srh(srh, len, false))
+ 		return -EINVAL;
+ 
+ 	slwt->srh = kmemdup(srh, len, GFP_KERNEL);
+diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
+index db3e4e74e785..0112ead58fd8 100644
+--- a/net/mptcp/subflow.c
++++ b/net/mptcp/subflow.c
+@@ -424,22 +424,25 @@ static struct sock *subflow_syn_recv_sock(const struct sock *sk,
+ 	struct mptcp_subflow_context *listener = mptcp_subflow_ctx(sk);
+ 	struct mptcp_subflow_request_sock *subflow_req;
+ 	struct mptcp_options_received mp_opt;
+-	bool fallback_is_fatal = false;
++	bool fallback, fallback_is_fatal;
+ 	struct sock *new_msk = NULL;
+-	bool fallback = false;
+ 	struct sock *child;
+ 
+ 	pr_debug("listener=%p, req=%p, conn=%p", listener, req, listener->conn);
+ 
+-	/* we need later a valid 'mp_capable' value even when options are not
+-	 * parsed
++	/* After child creation we must look for 'mp_capable' even when options
++	 * are not parsed
+ 	 */
+ 	mp_opt.mp_capable = 0;
+-	if (tcp_rsk(req)->is_mptcp == 0)
++
++	/* hopefully temporary handling for MP_JOIN+syncookie */
++	subflow_req = mptcp_subflow_rsk(req);
++	fallback_is_fatal = subflow_req->mp_join;
++	fallback = !tcp_rsk(req)->is_mptcp;
++	if (fallback)
+ 		goto create_child;
+ 
+ 	/* if the sk is MP_CAPABLE, we try to fetch the client key */
+-	subflow_req = mptcp_subflow_rsk(req);
+ 	if (subflow_req->mp_capable) {
+ 		if (TCP_SKB_CB(skb)->seq != subflow_req->ssn_offset + 1) {
+ 			/* here we can receive and accept an in-window,
+@@ -460,12 +463,11 @@ create_msk:
+ 		if (!new_msk)
+ 			fallback = true;
+ 	} else if (subflow_req->mp_join) {
+-		fallback_is_fatal = true;
+ 		mptcp_get_options(skb, &mp_opt);
+ 		if (!mp_opt.mp_join ||
+ 		    !subflow_hmac_valid(req, &mp_opt)) {
+ 			SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINACKMAC);
+-			return NULL;
++			fallback = true;
+ 		}
+ 	}
+ 
+diff --git a/net/rxrpc/call_event.c b/net/rxrpc/call_event.c
+index 2a65ac41055f..9ff85ee8337c 100644
+--- a/net/rxrpc/call_event.c
++++ b/net/rxrpc/call_event.c
+@@ -248,7 +248,18 @@ static void rxrpc_resend(struct rxrpc_call *call, unsigned long now_j)
+ 		if (anno_type != RXRPC_TX_ANNO_RETRANS)
+ 			continue;
+ 
++		/* We need to reset the retransmission state, but we need to do
++		 * so before we drop the lock as a new ACK/NAK may come in and
++		 * confuse things
++		 */
++		annotation &= ~RXRPC_TX_ANNO_MASK;
++		annotation |= RXRPC_TX_ANNO_UNACK | RXRPC_TX_ANNO_RESENT;
++		call->rxtx_annotations[ix] = annotation;
++
+ 		skb = call->rxtx_buffer[ix];
++		if (!skb)
++			continue;
++
+ 		rxrpc_get_skb(skb, rxrpc_skb_got);
+ 		spin_unlock_bh(&call->lock);
+ 
+@@ -262,24 +273,6 @@ static void rxrpc_resend(struct rxrpc_call *call, unsigned long now_j)
+ 
+ 		rxrpc_free_skb(skb, rxrpc_skb_freed);
+ 		spin_lock_bh(&call->lock);
+-
+-		/* We need to clear the retransmit state, but there are two
+-		 * things we need to be aware of: A new ACK/NAK might have been
+-		 * received and the packet might have been hard-ACK'd (in which
+-		 * case it will no longer be in the buffer).
+-		 */
+-		if (after(seq, call->tx_hard_ack)) {
+-			annotation = call->rxtx_annotations[ix];
+-			anno_type = annotation & RXRPC_TX_ANNO_MASK;
+-			if (anno_type == RXRPC_TX_ANNO_RETRANS ||
+-			    anno_type == RXRPC_TX_ANNO_NAK) {
+-				annotation &= ~RXRPC_TX_ANNO_MASK;
+-				annotation |= RXRPC_TX_ANNO_UNACK;
+-			}
+-			annotation |= RXRPC_TX_ANNO_RESENT;
+-			call->rxtx_annotations[ix] = annotation;
+-		}
+-
+ 		if (after(call->tx_hard_ack, seq))
+ 			seq = call->tx_hard_ack;
+ 	}
+diff --git a/net/tipc/msg.c b/net/tipc/msg.c
+index 3ad411884e6c..560d7a4c0fff 100644
+--- a/net/tipc/msg.c
++++ b/net/tipc/msg.c
+@@ -235,21 +235,18 @@ int tipc_msg_append(struct tipc_msg *_hdr, struct msghdr *m, int dlen,
+ 			msg_set_size(hdr, MIN_H_SIZE);
+ 			__skb_queue_tail(txq, skb);
+ 			total += 1;
+-			if (prev)
+-				msg_set_ack_required(buf_msg(prev), 0);
+-			msg_set_ack_required(hdr, 1);
+ 		}
+ 		hdr = buf_msg(skb);
+ 		curr = msg_blocks(hdr);
+ 		mlen = msg_size(hdr);
+-		cpy = min_t(int, rem, mss - mlen);
++		cpy = min_t(size_t, rem, mss - mlen);
+ 		if (cpy != copy_from_iter(skb->data + mlen, cpy, &m->msg_iter))
+ 			return -EFAULT;
+ 		msg_set_size(hdr, mlen + cpy);
+ 		skb_put(skb, cpy);
+ 		rem -= cpy;
+ 		total += msg_blocks(hdr) - curr;
+-	} while (rem);
++	} while (rem > 0);
+ 	return total - accounted;
+ }
+ 
+diff --git a/net/tipc/msg.h b/net/tipc/msg.h
+index 871feadbbc19..a4e2029170b1 100644
+--- a/net/tipc/msg.h
++++ b/net/tipc/msg.h
+@@ -321,9 +321,19 @@ static inline int msg_ack_required(struct tipc_msg *m)
+ 	return msg_bits(m, 0, 18, 1);
+ }
+ 
+-static inline void msg_set_ack_required(struct tipc_msg *m, u32 d)
++static inline void msg_set_ack_required(struct tipc_msg *m)
+ {
+-	msg_set_bits(m, 0, 18, 1, d);
++	msg_set_bits(m, 0, 18, 1, 1);
++}
++
++static inline int msg_nagle_ack(struct tipc_msg *m)
++{
++	return msg_bits(m, 0, 18, 1);
++}
++
++static inline void msg_set_nagle_ack(struct tipc_msg *m)
++{
++	msg_set_bits(m, 0, 18, 1, 1);
+ }
+ 
+ static inline bool msg_is_rcast(struct tipc_msg *m)
+diff --git a/net/tipc/socket.c b/net/tipc/socket.c
+index e370ad0edd76..f02f2abf6e3c 100644
+--- a/net/tipc/socket.c
++++ b/net/tipc/socket.c
+@@ -48,6 +48,8 @@
+ #include "group.h"
+ #include "trace.h"
+ 
++#define NAGLE_START_INIT	4
++#define NAGLE_START_MAX		1024
+ #define CONN_TIMEOUT_DEFAULT    8000    /* default connect timeout = 8s */
+ #define CONN_PROBING_INTV	msecs_to_jiffies(3600000)  /* [ms] => 1 h */
+ #define TIPC_FWD_MSG		1
+@@ -119,7 +121,10 @@ struct tipc_sock {
+ 	struct rcu_head rcu;
+ 	struct tipc_group *group;
+ 	u32 oneway;
++	u32 nagle_start;
+ 	u16 snd_backlog;
++	u16 msg_acc;
++	u16 pkt_cnt;
+ 	bool expect_ack;
+ 	bool nodelay;
+ 	bool group_is_open;
+@@ -143,7 +148,7 @@ static int tipc_sk_insert(struct tipc_sock *tsk);
+ static void tipc_sk_remove(struct tipc_sock *tsk);
+ static int __tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dsz);
+ static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dsz);
+-static void tipc_sk_push_backlog(struct tipc_sock *tsk);
++static void tipc_sk_push_backlog(struct tipc_sock *tsk, bool nagle_ack);
+ 
+ static const struct proto_ops packet_ops;
+ static const struct proto_ops stream_ops;
+@@ -474,6 +479,7 @@ static int tipc_sk_create(struct net *net, struct socket *sock,
+ 	tsk = tipc_sk(sk);
+ 	tsk->max_pkt = MAX_PKT_DEFAULT;
+ 	tsk->maxnagle = 0;
++	tsk->nagle_start = NAGLE_START_INIT;
+ 	INIT_LIST_HEAD(&tsk->publications);
+ 	INIT_LIST_HEAD(&tsk->cong_links);
+ 	msg = &tsk->phdr;
+@@ -541,7 +547,7 @@ static void __tipc_shutdown(struct socket *sock, int error)
+ 					    !tsk_conn_cong(tsk)));
+ 
+ 	/* Push out delayed messages if in Nagle mode */
+-	tipc_sk_push_backlog(tsk);
++	tipc_sk_push_backlog(tsk, false);
+ 	/* Remove pending SYN */
+ 	__skb_queue_purge(&sk->sk_write_queue);
+ 
+@@ -1252,14 +1258,37 @@ void tipc_sk_mcast_rcv(struct net *net, struct sk_buff_head *arrvq,
+ /* tipc_sk_push_backlog(): send accumulated buffers in socket write queue
+  *                         when socket is in Nagle mode
+  */
+-static void tipc_sk_push_backlog(struct tipc_sock *tsk)
++static void tipc_sk_push_backlog(struct tipc_sock *tsk, bool nagle_ack)
+ {
+ 	struct sk_buff_head *txq = &tsk->sk.sk_write_queue;
++	struct sk_buff *skb = skb_peek_tail(txq);
+ 	struct net *net = sock_net(&tsk->sk);
+ 	u32 dnode = tsk_peer_node(tsk);
+-	struct sk_buff *skb = skb_peek(txq);
+ 	int rc;
+ 
++	if (nagle_ack) {
++		tsk->pkt_cnt += skb_queue_len(txq);
++		if (!tsk->pkt_cnt || tsk->msg_acc / tsk->pkt_cnt < 2) {
++			tsk->oneway = 0;
++			if (tsk->nagle_start < NAGLE_START_MAX)
++				tsk->nagle_start *= 2;
++			tsk->expect_ack = false;
++			pr_debug("tsk %10u: bad nagle %u -> %u, next start %u!\n",
++				 tsk->portid, tsk->msg_acc, tsk->pkt_cnt,
++				 tsk->nagle_start);
++		} else {
++			tsk->nagle_start = NAGLE_START_INIT;
++			if (skb) {
++				msg_set_ack_required(buf_msg(skb));
++				tsk->expect_ack = true;
++			} else {
++				tsk->expect_ack = false;
++			}
++		}
++		tsk->msg_acc = 0;
++		tsk->pkt_cnt = 0;
++	}
++
+ 	if (!skb || tsk->cong_link_cnt)
+ 		return;
+ 
+@@ -1267,9 +1296,10 @@ static void tipc_sk_push_backlog(struct tipc_sock *tsk)
+ 	if (msg_is_syn(buf_msg(skb)))
+ 		return;
+ 
++	if (tsk->msg_acc)
++		tsk->pkt_cnt += skb_queue_len(txq);
+ 	tsk->snt_unacked += tsk->snd_backlog;
+ 	tsk->snd_backlog = 0;
+-	tsk->expect_ack = true;
+ 	rc = tipc_node_xmit(net, txq, dnode, tsk->portid);
+ 	if (rc == -ELINKCONG)
+ 		tsk->cong_link_cnt = 1;
+@@ -1322,8 +1352,7 @@ static void tipc_sk_conn_proto_rcv(struct tipc_sock *tsk, struct sk_buff *skb,
+ 		return;
+ 	} else if (mtyp == CONN_ACK) {
+ 		was_cong = tsk_conn_cong(tsk);
+-		tsk->expect_ack = false;
+-		tipc_sk_push_backlog(tsk);
++		tipc_sk_push_backlog(tsk, msg_nagle_ack(hdr));
+ 		tsk->snt_unacked -= msg_conn_ack(hdr);
+ 		if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL)
+ 			tsk->snd_win = msg_adv_win(hdr);
+@@ -1516,6 +1545,7 @@ static int __tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dlen)
+ 	struct tipc_sock *tsk = tipc_sk(sk);
+ 	struct tipc_msg *hdr = &tsk->phdr;
+ 	struct net *net = sock_net(sk);
++	struct sk_buff *skb;
+ 	u32 dnode = tsk_peer_node(tsk);
+ 	int maxnagle = tsk->maxnagle;
+ 	int maxpkt = tsk->max_pkt;
+@@ -1544,17 +1574,30 @@ static int __tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dlen)
+ 			break;
+ 		send = min_t(size_t, dlen - sent, TIPC_MAX_USER_MSG_SIZE);
+ 		blocks = tsk->snd_backlog;
+-		if (tsk->oneway++ >= 4 && send <= maxnagle) {
++		if (tsk->oneway++ >= tsk->nagle_start && maxnagle &&
++		    send <= maxnagle) {
+ 			rc = tipc_msg_append(hdr, m, send, maxnagle, txq);
+ 			if (unlikely(rc < 0))
+ 				break;
+ 			blocks += rc;
++			tsk->msg_acc++;
+ 			if (blocks <= 64 && tsk->expect_ack) {
+ 				tsk->snd_backlog = blocks;
+ 				sent += send;
+ 				break;
++			} else if (blocks > 64) {
++				tsk->pkt_cnt += skb_queue_len(txq);
++			} else {
++				skb = skb_peek_tail(txq);
++				if (skb) {
++					msg_set_ack_required(buf_msg(skb));
++					tsk->expect_ack = true;
++				} else {
++					tsk->expect_ack = false;
++				}
++				tsk->msg_acc = 0;
++				tsk->pkt_cnt = 0;
+ 			}
+-			tsk->expect_ack = true;
+ 		} else {
+ 			rc = tipc_msg_build(hdr, m, sent, send, maxpkt, txq);
+ 			if (unlikely(rc != send))
+@@ -2091,7 +2134,7 @@ static void tipc_sk_proto_rcv(struct sock *sk,
+ 		smp_wmb();
+ 		tsk->cong_link_cnt--;
+ 		wakeup = true;
+-		tipc_sk_push_backlog(tsk);
++		tipc_sk_push_backlog(tsk, false);
+ 		break;
+ 	case GROUP_PROTOCOL:
+ 		tipc_group_proto_rcv(grp, &wakeup, hdr, inputq, xmitq);
+@@ -2180,7 +2223,7 @@ static bool tipc_sk_filter_connect(struct tipc_sock *tsk, struct sk_buff *skb,
+ 		return false;
+ 	case TIPC_ESTABLISHED:
+ 		if (!skb_queue_empty(&sk->sk_write_queue))
+-			tipc_sk_push_backlog(tsk);
++			tipc_sk_push_backlog(tsk, false);
+ 		/* Accept only connection-based messages sent by peer */
+ 		if (likely(con_msg && !err && pport == oport &&
+ 			   pnode == onode)) {
+@@ -2188,8 +2231,10 @@ static bool tipc_sk_filter_connect(struct tipc_sock *tsk, struct sk_buff *skb,
+ 				struct sk_buff *skb;
+ 
+ 				skb = tipc_sk_build_ack(tsk);
+-				if (skb)
++				if (skb) {
++					msg_set_nagle_ack(buf_msg(skb));
+ 					__skb_queue_tail(xmitq, skb);
++				}
+ 			}
+ 			return true;
+ 		}
+diff --git a/samples/vfs/test-statx.c b/samples/vfs/test-statx.c
+index a3d68159fb51..507f09c38b49 100644
+--- a/samples/vfs/test-statx.c
++++ b/samples/vfs/test-statx.c
+@@ -23,6 +23,8 @@
+ #include <linux/fcntl.h>
+ #define statx foo
+ #define statx_timestamp foo_timestamp
++struct statx;
++struct statx_timestamp;
+ #include <sys/stat.h>
+ #undef statx
+ #undef statx_timestamp
+diff --git a/security/security.c b/security/security.c
+index 51de970fbb1e..8b4d342ade5e 100644
+--- a/security/security.c
++++ b/security/security.c
+@@ -1409,7 +1409,22 @@ EXPORT_SYMBOL(security_inode_copy_up);
+ 
+ int security_inode_copy_up_xattr(const char *name)
+ {
+-	return call_int_hook(inode_copy_up_xattr, -EOPNOTSUPP, name);
++	struct security_hook_list *hp;
++	int rc;
++
++	/*
++	 * The implementation can return 0 (accept the xattr), 1 (discard the
++	 * xattr), -EOPNOTSUPP if it does not know anything about the xattr or
++	 * any other error code incase of an error.
++	 */
++	hlist_for_each_entry(hp,
++		&security_hook_heads.inode_copy_up_xattr, list) {
++		rc = hp->hook.inode_copy_up_xattr(name);
++		if (rc != LSM_RET_DEFAULT(inode_copy_up_xattr))
++			return rc;
++	}
++
++	return LSM_RET_DEFAULT(inode_copy_up_xattr);
+ }
+ EXPORT_SYMBOL(security_inode_copy_up_xattr);
+ 
+diff --git a/sound/usb/card.h b/sound/usb/card.h
+index d6219fba9699..f39f23e3525d 100644
+--- a/sound/usb/card.h
++++ b/sound/usb/card.h
+@@ -84,10 +84,6 @@ struct snd_usb_endpoint {
+ 	dma_addr_t sync_dma;		/* DMA address of syncbuf */
+ 
+ 	unsigned int pipe;		/* the data i/o pipe */
+-	unsigned int framesize[2];	/* small/large frame sizes in samples */
+-	unsigned int sample_rem;	/* remainder from division fs/fps */
+-	unsigned int sample_accum;	/* sample accumulator */
+-	unsigned int fps;		/* frames per second */
+ 	unsigned int freqn;		/* nominal sampling rate in fs/fps in Q16.16 format */
+ 	unsigned int freqm;		/* momentary sampling rate in fs/fps in Q16.16 format */
+ 	int	   freqshift;		/* how much to shift the feedback value to get Q16.16 */
+diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c
+index 9bea7d3f99f8..87cc249a31b9 100644
+--- a/sound/usb/endpoint.c
++++ b/sound/usb/endpoint.c
+@@ -124,12 +124,12 @@ int snd_usb_endpoint_implicit_feedback_sink(struct snd_usb_endpoint *ep)
+ 
+ /*
+  * For streaming based on information derived from sync endpoints,
+- * prepare_outbound_urb_sizes() will call slave_next_packet_size() to
++ * prepare_outbound_urb_sizes() will call next_packet_size() to
+  * determine the number of samples to be sent in the next packet.
+  *
+- * For implicit feedback, slave_next_packet_size() is unused.
++ * For implicit feedback, next_packet_size() is unused.
+  */
+-int snd_usb_endpoint_slave_next_packet_size(struct snd_usb_endpoint *ep)
++int snd_usb_endpoint_next_packet_size(struct snd_usb_endpoint *ep)
+ {
+ 	unsigned long flags;
+ 	int ret;
+@@ -146,29 +146,6 @@ int snd_usb_endpoint_slave_next_packet_size(struct snd_usb_endpoint *ep)
+ 	return ret;
+ }
+ 
+-/*
+- * For adaptive and synchronous endpoints, prepare_outbound_urb_sizes()
+- * will call next_packet_size() to determine the number of samples to be
+- * sent in the next packet.
+- */
+-int snd_usb_endpoint_next_packet_size(struct snd_usb_endpoint *ep)
+-{
+-	int ret;
+-
+-	if (ep->fill_max)
+-		return ep->maxframesize;
+-
+-	ep->sample_accum += ep->sample_rem;
+-	if (ep->sample_accum >= ep->fps) {
+-		ep->sample_accum -= ep->fps;
+-		ret = ep->framesize[1];
+-	} else {
+-		ret = ep->framesize[0];
+-	}
+-
+-	return ret;
+-}
+-
+ static void retire_outbound_urb(struct snd_usb_endpoint *ep,
+ 				struct snd_urb_ctx *urb_ctx)
+ {
+@@ -213,8 +190,6 @@ static void prepare_silent_urb(struct snd_usb_endpoint *ep,
+ 
+ 		if (ctx->packet_size[i])
+ 			counts = ctx->packet_size[i];
+-		else if (ep->sync_master)
+-			counts = snd_usb_endpoint_slave_next_packet_size(ep);
+ 		else
+ 			counts = snd_usb_endpoint_next_packet_size(ep);
+ 
+@@ -1086,17 +1061,10 @@ int snd_usb_endpoint_set_params(struct snd_usb_endpoint *ep,
+ 	ep->maxpacksize = fmt->maxpacksize;
+ 	ep->fill_max = !!(fmt->attributes & UAC_EP_CS_ATTR_FILL_MAX);
+ 
+-	if (snd_usb_get_speed(ep->chip->dev) == USB_SPEED_FULL) {
++	if (snd_usb_get_speed(ep->chip->dev) == USB_SPEED_FULL)
+ 		ep->freqn = get_usb_full_speed_rate(rate);
+-		ep->fps = 1000;
+-	} else {
++	else
+ 		ep->freqn = get_usb_high_speed_rate(rate);
+-		ep->fps = 8000;
+-	}
+-
+-	ep->sample_rem = rate % ep->fps;
+-	ep->framesize[0] = rate / ep->fps;
+-	ep->framesize[1] = (rate + (ep->fps - 1)) / ep->fps;
+ 
+ 	/* calculate the frequency in 16.16 format */
+ 	ep->freqm = ep->freqn;
+@@ -1155,7 +1123,6 @@ int snd_usb_endpoint_start(struct snd_usb_endpoint *ep)
+ 	ep->active_mask = 0;
+ 	ep->unlink_mask = 0;
+ 	ep->phase = 0;
+-	ep->sample_accum = 0;
+ 
+ 	snd_usb_endpoint_start_quirk(ep);
+ 
+diff --git a/sound/usb/endpoint.h b/sound/usb/endpoint.h
+index d23fa0a8c11b..63a39d4fa8d8 100644
+--- a/sound/usb/endpoint.h
++++ b/sound/usb/endpoint.h
+@@ -28,7 +28,6 @@ void snd_usb_endpoint_release(struct snd_usb_endpoint *ep);
+ void snd_usb_endpoint_free(struct snd_usb_endpoint *ep);
+ 
+ int snd_usb_endpoint_implicit_feedback_sink(struct snd_usb_endpoint *ep);
+-int snd_usb_endpoint_slave_next_packet_size(struct snd_usb_endpoint *ep);
+ int snd_usb_endpoint_next_packet_size(struct snd_usb_endpoint *ep);
+ 
+ void snd_usb_handle_sync_urb(struct snd_usb_endpoint *ep,
+diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
+index 39aec83f8aca..c73efdf7545e 100644
+--- a/sound/usb/pcm.c
++++ b/sound/usb/pcm.c
+@@ -1585,8 +1585,6 @@ static void prepare_playback_urb(struct snd_usb_substream *subs,
+ 	for (i = 0; i < ctx->packets; i++) {
+ 		if (ctx->packet_size[i])
+ 			counts = ctx->packet_size[i];
+-		else if (ep->sync_master)
+-			counts = snd_usb_endpoint_slave_next_packet_size(ep);
+ 		else
+ 			counts = snd_usb_endpoint_next_packet_size(ep);
+ 
+diff --git a/tools/lib/traceevent/event-parse.c b/tools/lib/traceevent/event-parse.c
+index e1bd2a93c6db..010e60d5a081 100644
+--- a/tools/lib/traceevent/event-parse.c
++++ b/tools/lib/traceevent/event-parse.c
+@@ -1425,13 +1425,28 @@ static unsigned int type_size(const char *name)
+ 	return 0;
+ }
+ 
++static int append(char **buf, const char *delim, const char *str)
++{
++	char *new_buf;
++
++	new_buf = realloc(*buf, strlen(*buf) + strlen(delim) + strlen(str) + 1);
++	if (!new_buf)
++		return -1;
++	strcat(new_buf, delim);
++	strcat(new_buf, str);
++	*buf = new_buf;
++	return 0;
++}
++
+ static int event_read_fields(struct tep_event *event, struct tep_format_field **fields)
+ {
+ 	struct tep_format_field *field = NULL;
+ 	enum tep_event_type type;
+ 	char *token;
+ 	char *last_token;
++	char *delim = " ";
+ 	int count = 0;
++	int ret;
+ 
+ 	do {
+ 		unsigned int size_dynamic = 0;
+@@ -1490,24 +1505,51 @@ static int event_read_fields(struct tep_event *event, struct tep_format_field **
+ 					field->flags |= TEP_FIELD_IS_POINTER;
+ 
+ 				if (field->type) {
+-					char *new_type;
+-					new_type = realloc(field->type,
+-							   strlen(field->type) +
+-							   strlen(last_token) + 2);
+-					if (!new_type) {
+-						free(last_token);
+-						goto fail;
+-					}
+-					field->type = new_type;
+-					strcat(field->type, " ");
+-					strcat(field->type, last_token);
++					ret = append(&field->type, delim, last_token);
+ 					free(last_token);
++					if (ret < 0)
++						goto fail;
+ 				} else
+ 					field->type = last_token;
+ 				last_token = token;
++				delim = " ";
+ 				continue;
+ 			}
+ 
++			/* Handle __attribute__((user)) */
++			if ((type == TEP_EVENT_DELIM) &&
++			    strcmp("__attribute__", last_token) == 0 &&
++			    token[0] == '(') {
++				int depth = 1;
++				int ret;
++
++				ret = append(&field->type, " ", last_token);
++				ret |= append(&field->type, "", "(");
++				if (ret < 0)
++					goto fail;
++
++				delim = " ";
++				while ((type = read_token(&token)) != TEP_EVENT_NONE) {
++					if (type == TEP_EVENT_DELIM) {
++						if (token[0] == '(')
++							depth++;
++						else if (token[0] == ')')
++							depth--;
++						if (!depth)
++							break;
++						ret = append(&field->type, "", token);
++						delim = "";
++					} else {
++						ret = append(&field->type, delim, token);
++						delim = " ";
++					}
++					if (ret < 0)
++						goto fail;
++					free(last_token);
++					last_token = token;
++				}
++				continue;
++			}
+ 			break;
+ 		}
+ 
+@@ -1523,8 +1565,6 @@ static int event_read_fields(struct tep_event *event, struct tep_format_field **
+ 		if (strcmp(token, "[") == 0) {
+ 			enum tep_event_type last_type = type;
+ 			char *brackets = token;
+-			char *new_brackets;
+-			int len;
+ 
+ 			field->flags |= TEP_FIELD_IS_ARRAY;
+ 
+@@ -1536,29 +1576,27 @@ static int event_read_fields(struct tep_event *event, struct tep_format_field **
+ 				field->arraylen = 0;
+ 
+ 		        while (strcmp(token, "]") != 0) {
++				const char *delim;
++
+ 				if (last_type == TEP_EVENT_ITEM &&
+ 				    type == TEP_EVENT_ITEM)
+-					len = 2;
++					delim = " ";
+ 				else
+-					len = 1;
++					delim = "";
++
+ 				last_type = type;
+ 
+-				new_brackets = realloc(brackets,
+-						       strlen(brackets) +
+-						       strlen(token) + len);
+-				if (!new_brackets) {
++				ret = append(&brackets, delim, token);
++				if (ret < 0) {
+ 					free(brackets);
+ 					goto fail;
+ 				}
+-				brackets = new_brackets;
+-				if (len == 2)
+-					strcat(brackets, " ");
+-				strcat(brackets, token);
+ 				/* We only care about the last token */
+ 				field->arraylen = strtoul(token, NULL, 0);
+ 				free_token(token);
+ 				type = read_token(&token);
+ 				if (type == TEP_EVENT_NONE) {
++					free(brackets);
+ 					do_warning_event(event, "failed to find token");
+ 					goto fail;
+ 				}
+@@ -1566,13 +1604,11 @@ static int event_read_fields(struct tep_event *event, struct tep_format_field **
+ 
+ 			free_token(token);
+ 
+-			new_brackets = realloc(brackets, strlen(brackets) + 2);
+-			if (!new_brackets) {
++			ret = append(&brackets, "", "]");
++			if (ret < 0) {
+ 				free(brackets);
+ 				goto fail;
+ 			}
+-			brackets = new_brackets;
+-			strcat(brackets, "]");
+ 
+ 			/* add brackets to type */
+ 
+@@ -1582,34 +1618,23 @@ static int event_read_fields(struct tep_event *event, struct tep_format_field **
+ 			 * the format: type [] item;
+ 			 */
+ 			if (type == TEP_EVENT_ITEM) {
+-				char *new_type;
+-				new_type = realloc(field->type,
+-						   strlen(field->type) +
+-						   strlen(field->name) +
+-						   strlen(brackets) + 2);
+-				if (!new_type) {
++				ret = append(&field->type, " ", field->name);
++				if (ret < 0) {
+ 					free(brackets);
+ 					goto fail;
+ 				}
+-				field->type = new_type;
+-				strcat(field->type, " ");
+-				strcat(field->type, field->name);
++				ret = append(&field->type, "", brackets);
++
+ 				size_dynamic = type_size(field->name);
+ 				free_token(field->name);
+-				strcat(field->type, brackets);
+ 				field->name = field->alias = token;
+ 				type = read_token(&token);
+ 			} else {
+-				char *new_type;
+-				new_type = realloc(field->type,
+-						   strlen(field->type) +
+-						   strlen(brackets) + 1);
+-				if (!new_type) {
++				ret = append(&field->type, "", brackets);
++				if (ret < 0) {
+ 					free(brackets);
+ 					goto fail;
+ 				}
+-				field->type = new_type;
+-				strcat(field->type, brackets);
+ 			}
+ 			free(brackets);
+ 		}
+@@ -2046,19 +2071,16 @@ process_op(struct tep_event *event, struct tep_print_arg *arg, char **tok)
+ 		/* could just be a type pointer */
+ 		if ((strcmp(arg->op.op, "*") == 0) &&
+ 		    type == TEP_EVENT_DELIM && (strcmp(token, ")") == 0)) {
+-			char *new_atom;
++			int ret;
+ 
+ 			if (left->type != TEP_PRINT_ATOM) {
+ 				do_warning_event(event, "bad pointer type");
+ 				goto out_free;
+ 			}
+-			new_atom = realloc(left->atom.atom,
+-					    strlen(left->atom.atom) + 3);
+-			if (!new_atom)
++			ret = append(&left->atom.atom, " ", "*");
++			if (ret < 0)
+ 				goto out_warn_free;
+ 
+-			left->atom.atom = new_atom;
+-			strcat(left->atom.atom, " *");
+ 			free(arg->op.op);
+ 			*arg = *left;
+ 			free(left);
+@@ -3151,18 +3173,15 @@ process_arg_token(struct tep_event *event, struct tep_print_arg *arg,
+ 		}
+ 		/* atoms can be more than one token long */
+ 		while (type == TEP_EVENT_ITEM) {
+-			char *new_atom;
+-			new_atom = realloc(atom,
+-					   strlen(atom) + strlen(token) + 2);
+-			if (!new_atom) {
++			int ret;
++
++			ret = append(&atom, " ", token);
++			if (ret < 0) {
+ 				free(atom);
+ 				*tok = NULL;
+ 				free_token(token);
+ 				return TEP_EVENT_ERROR;
+ 			}
+-			atom = new_atom;
+-			strcat(atom, " ");
+-			strcat(atom, token);
+ 			free_token(token);
+ 			type = read_token_item(&token);
+ 		}
+diff --git a/tools/testing/selftests/tpm2/test_smoke.sh b/tools/testing/selftests/tpm2/test_smoke.sh
+index 8155c2ea7ccb..a5e994a68d88 100755
+--- a/tools/testing/selftests/tpm2/test_smoke.sh
++++ b/tools/testing/selftests/tpm2/test_smoke.sh
+@@ -1,10 +1,5 @@
+-#!/bin/bash
++#!/bin/sh
+ # SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+ 
+ python -m unittest -v tpm2_tests.SmokeTest
+ python -m unittest -v tpm2_tests.AsyncTest
+-
+-CLEAR_CMD=$(which tpm2_clear)
+-if [ -n $CLEAR_CMD ]; then
+-	tpm2_clear -T device
+-fi
+diff --git a/tools/testing/selftests/tpm2/test_space.sh b/tools/testing/selftests/tpm2/test_space.sh
+index a6f5e346635e..3ded3011b642 100755
+--- a/tools/testing/selftests/tpm2/test_space.sh
++++ b/tools/testing/selftests/tpm2/test_space.sh
+@@ -1,4 +1,4 @@
+-#!/bin/bash
++#!/bin/sh
+ # SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+ 
+ python -m unittest -v tpm2_tests.SpaceTest


             reply	other threads:[~2020-07-09 12:16 UTC|newest]

Thread overview: 25+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-07-09 12:15 Mike Pagano [this message]
  -- strict thread matches above, loose matches on Subject: below --
2020-08-27 13:19 [gentoo-commits] proj/linux-patches:5.7 commit in: / Mike Pagano
2020-08-26 11:17 Mike Pagano
2020-08-21 11:43 Alice Ferrazzi
2020-08-19 14:55 Mike Pagano
2020-08-19  9:31 Alice Ferrazzi
2020-08-12 23:32 Alice Ferrazzi
2020-08-07 12:13 Alice Ferrazzi
2020-08-05 14:36 Thomas Deutschmann
2020-07-31 18:07 Mike Pagano
2020-07-29 12:43 Mike Pagano
2020-07-22 12:59 Mike Pagano
2020-07-16 11:22 Mike Pagano
2020-07-01 12:24 Mike Pagano
2020-06-29 17:37 Mike Pagano
2020-06-29 17:32 Mike Pagano
2020-06-24 16:49 Mike Pagano
2020-06-22 11:25 Mike Pagano
2020-06-18 17:33 Mike Pagano
2020-06-17 16:41 Mike Pagano
2020-06-10 19:41 Mike Pagano
2020-06-07 21:57 Mike Pagano
2020-05-26 17:59 Mike Pagano
2020-05-26 17:49 Mike Pagano
2020-05-04 20:59 Mike Pagano

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1594296944.1d06c709e96c894be7ce445cad98142c8c24befd.mpagano@gentoo \
    --to=mpagano@gentoo.org \
    --cc=gentoo-commits@lists.gentoo.org \
    --cc=gentoo-dev@lists.gentoo.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox