public inbox for gentoo-commits@lists.gentoo.org
 help / color / mirror / Atom feed
From: "Mike Pagano" <mpagano@gentoo.org>
To: gentoo-commits@lists.gentoo.org
Subject: [gentoo-commits] proj/linux-patches:5.5 commit in: /
Date: Fri, 28 Feb 2020 18:31:31 +0000 (UTC)	[thread overview]
Message-ID: <1582914678.73ca5fd154594c0936d64b6e648d3083d1826fe2.mpagano@gentoo> (raw)

commit:     73ca5fd154594c0936d64b6e648d3083d1826fe2
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Fri Feb 28 18:31:18 2020 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Fri Feb 28 18:31:18 2020 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=73ca5fd1

Linux patch 5.5.7

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README            |    4 +
 1006_linux-5.5.7.patch | 6813 ++++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 6817 insertions(+)

diff --git a/0000_README b/0000_README
index ff99e11..7611ed2 100644
--- a/0000_README
+++ b/0000_README
@@ -67,6 +67,10 @@ Patch:  1005_linux-5.5.6.patch
 From:   http://www.kernel.org
 Desc:   Linux 5.5.6
 
+Patch:  1006_linux-5.5.7.patch
+From:   http://www.kernel.org
+Desc:   Linux 5.5.7
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1006_linux-5.5.7.patch b/1006_linux-5.5.7.patch
new file mode 100644
index 0000000..345bb5c
--- /dev/null
+++ b/1006_linux-5.5.7.patch
@@ -0,0 +1,6813 @@
+diff --git a/Documentation/arm64/tagged-address-abi.rst b/Documentation/arm64/tagged-address-abi.rst
+index d4a85d535bf9..4a9d9c794ee5 100644
+--- a/Documentation/arm64/tagged-address-abi.rst
++++ b/Documentation/arm64/tagged-address-abi.rst
+@@ -44,8 +44,15 @@ The AArch64 Tagged Address ABI has two stages of relaxation depending
+ how the user addresses are used by the kernel:
+ 
+ 1. User addresses not accessed by the kernel but used for address space
+-   management (e.g. ``mmap()``, ``mprotect()``, ``madvise()``). The use
+-   of valid tagged pointers in this context is always allowed.
++   management (e.g. ``mprotect()``, ``madvise()``). The use of valid
++   tagged pointers in this context is allowed with the exception of
++   ``brk()``, ``mmap()`` and the ``new_address`` argument to
++   ``mremap()`` as these have the potential to alias with existing
++   user addresses.
++
++   NOTE: This behaviour changed in v5.6 and so some earlier kernels may
++   incorrectly accept valid tagged pointers for the ``brk()``,
++   ``mmap()`` and ``mremap()`` system calls.
+ 
+ 2. User addresses accessed by the kernel (e.g. ``write()``). This ABI
+    relaxation is disabled by default and the application thread needs to
+diff --git a/MAINTAINERS b/MAINTAINERS
+index 44bc9d7f04a4..e73a47a881b0 100644
+--- a/MAINTAINERS
++++ b/MAINTAINERS
+@@ -8302,7 +8302,7 @@ M:	Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
+ M:	Rodrigo Vivi <rodrigo.vivi@intel.com>
+ L:	intel-gfx@lists.freedesktop.org
+ W:	https://01.org/linuxgraphics/
+-B:	https://01.org/linuxgraphics/documentation/how-report-bugs
++B:	https://gitlab.freedesktop.org/drm/intel/-/wikis/How-to-file-i915-bugs
+ C:	irc://chat.freenode.net/intel-gfx
+ Q:	http://patchwork.freedesktop.org/project/intel-gfx/
+ T:	git git://anongit.freedesktop.org/drm-intel
+diff --git a/Makefile b/Makefile
+index 7fb236f30926..0f64b92fa39a 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 5
+-SUBLEVEL = 6
++SUBLEVEL = 7
+ EXTRAVERSION =
+ NAME = Kleptomaniac Octopus
+ 
+diff --git a/arch/arm64/include/asm/lse.h b/arch/arm64/include/asm/lse.h
+index 73834996c4b6..5de132100b6d 100644
+--- a/arch/arm64/include/asm/lse.h
++++ b/arch/arm64/include/asm/lse.h
+@@ -6,7 +6,7 @@
+ 
+ #if defined(CONFIG_AS_LSE) && defined(CONFIG_ARM64_LSE_ATOMICS)
+ 
+-#define __LSE_PREAMBLE	".arch armv8-a+lse\n"
++#define __LSE_PREAMBLE	".arch_extension lse\n"
+ 
+ #include <linux/compiler_types.h>
+ #include <linux/export.h>
+diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
+index a4f9ca5479b0..4d94676e5a8b 100644
+--- a/arch/arm64/include/asm/memory.h
++++ b/arch/arm64/include/asm/memory.h
+@@ -213,7 +213,7 @@ static inline unsigned long kaslr_offset(void)
+ 	((__force __typeof__(addr))sign_extend64((__force u64)(addr), 55))
+ 
+ #define untagged_addr(addr)	({					\
+-	u64 __addr = (__force u64)addr;					\
++	u64 __addr = (__force u64)(addr);					\
+ 	__addr &= __untagged_addr(__addr);				\
+ 	(__force __typeof__(addr))__addr;				\
+ })
+diff --git a/arch/mips/boot/dts/ingenic/jz4740.dtsi b/arch/mips/boot/dts/ingenic/jz4740.dtsi
+index 5accda2767be..a3301bab9231 100644
+--- a/arch/mips/boot/dts/ingenic/jz4740.dtsi
++++ b/arch/mips/boot/dts/ingenic/jz4740.dtsi
+@@ -1,5 +1,6 @@
+ // SPDX-License-Identifier: GPL-2.0
+ #include <dt-bindings/clock/jz4740-cgu.h>
++#include <dt-bindings/clock/ingenic,tcu.h>
+ 
+ / {
+ 	#address-cells = <1>;
+@@ -45,14 +46,6 @@
+ 		#clock-cells = <1>;
+ 	};
+ 
+-	watchdog: watchdog@10002000 {
+-		compatible = "ingenic,jz4740-watchdog";
+-		reg = <0x10002000 0x10>;
+-
+-		clocks = <&cgu JZ4740_CLK_RTC>;
+-		clock-names = "rtc";
+-	};
+-
+ 	tcu: timer@10002000 {
+ 		compatible = "ingenic,jz4740-tcu", "simple-mfd";
+ 		reg = <0x10002000 0x1000>;
+@@ -73,6 +66,14 @@
+ 
+ 		interrupt-parent = <&intc>;
+ 		interrupts = <23 22 21>;
++
++		watchdog: watchdog@0 {
++			compatible = "ingenic,jz4740-watchdog";
++			reg = <0x0 0xc>;
++
++			clocks = <&tcu TCU_CLK_WDT>;
++			clock-names = "wdt";
++		};
+ 	};
+ 
+ 	rtc_dev: rtc@10003000 {
+diff --git a/arch/mips/boot/dts/ingenic/jz4780.dtsi b/arch/mips/boot/dts/ingenic/jz4780.dtsi
+index f928329b034b..bb89653d16a3 100644
+--- a/arch/mips/boot/dts/ingenic/jz4780.dtsi
++++ b/arch/mips/boot/dts/ingenic/jz4780.dtsi
+@@ -1,5 +1,6 @@
+ // SPDX-License-Identifier: GPL-2.0
+ #include <dt-bindings/clock/jz4780-cgu.h>
++#include <dt-bindings/clock/ingenic,tcu.h>
+ #include <dt-bindings/dma/jz4780-dma.h>
+ 
+ / {
+@@ -67,6 +68,14 @@
+ 
+ 		interrupt-parent = <&intc>;
+ 		interrupts = <27 26 25>;
++
++		watchdog: watchdog@0 {
++			compatible = "ingenic,jz4780-watchdog";
++			reg = <0x0 0xc>;
++
++			clocks = <&tcu TCU_CLK_WDT>;
++			clock-names = "wdt";
++		};
+ 	};
+ 
+ 	rtc_dev: rtc@10003000 {
+@@ -348,14 +357,6 @@
+ 		status = "disabled";
+ 	};
+ 
+-	watchdog: watchdog@10002000 {
+-		compatible = "ingenic,jz4780-watchdog";
+-		reg = <0x10002000 0x10>;
+-
+-		clocks = <&cgu JZ4780_CLK_RTCLK>;
+-		clock-names = "rtc";
+-	};
+-
+ 	nemc: nemc@13410000 {
+ 		compatible = "ingenic,jz4780-nemc";
+ 		reg = <0x13410000 0x10000>;
+diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
+index 7f1fd41e3065..9b97c6091c5c 100644
+--- a/arch/powerpc/include/asm/page.h
++++ b/arch/powerpc/include/asm/page.h
+@@ -295,8 +295,13 @@ static inline bool pfn_valid(unsigned long pfn)
+ /*
+  * Some number of bits at the level of the page table that points to
+  * a hugepte are used to encode the size.  This masks those bits.
++ * On 8xx, HW assistance requires 4k alignment for the hugepte.
+  */
++#ifdef CONFIG_PPC_8xx
++#define HUGEPD_SHIFT_MASK     0xfff
++#else
+ #define HUGEPD_SHIFT_MASK     0x3f
++#endif
+ 
+ #ifndef __ASSEMBLY__
+ 
+diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c
+index a1eaffe868de..7b048cee767c 100644
+--- a/arch/powerpc/kernel/eeh_driver.c
++++ b/arch/powerpc/kernel/eeh_driver.c
+@@ -1184,6 +1184,17 @@ void eeh_handle_special_event(void)
+ 			eeh_pe_state_mark(pe, EEH_PE_RECOVERING);
+ 			eeh_handle_normal_event(pe);
+ 		} else {
++			eeh_for_each_pe(pe, tmp_pe)
++				eeh_pe_for_each_dev(tmp_pe, edev, tmp_edev)
++					edev->mode &= ~EEH_DEV_NO_HANDLER;
++
++			/* Notify all devices to be down */
++			eeh_pe_state_clear(pe, EEH_PE_PRI_BUS, true);
++			eeh_set_channel_state(pe, pci_channel_io_perm_failure);
++			eeh_pe_report(
++				"error_detected(permanent failure)", pe,
++				eeh_report_failure, NULL);
++
+ 			pci_lock_rescan_remove();
+ 			list_for_each_entry(hose, &hose_list, list_node) {
+ 				phb_pe = eeh_phb_pe_get(hose);
+@@ -1192,16 +1203,6 @@ void eeh_handle_special_event(void)
+ 				    (phb_pe->state & EEH_PE_RECOVERING))
+ 					continue;
+ 
+-				eeh_for_each_pe(pe, tmp_pe)
+-					eeh_pe_for_each_dev(tmp_pe, edev, tmp_edev)
+-						edev->mode &= ~EEH_DEV_NO_HANDLER;
+-
+-				/* Notify all devices to be down */
+-				eeh_pe_state_clear(pe, EEH_PE_PRI_BUS, true);
+-				eeh_set_channel_state(pe, pci_channel_io_perm_failure);
+-				eeh_pe_report(
+-					"error_detected(permanent failure)", pe,
+-					eeh_report_failure, NULL);
+ 				bus = eeh_pe_bus_get(phb_pe);
+ 				if (!bus) {
+ 					pr_err("%s: Cannot find PCI bus for "
+diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
+index 59bb4f4ae316..13f699256258 100644
+--- a/arch/powerpc/kernel/entry_32.S
++++ b/arch/powerpc/kernel/entry_32.S
+@@ -778,7 +778,7 @@ fast_exception_return:
+ 1:	lis	r3,exc_exit_restart_end@ha
+ 	addi	r3,r3,exc_exit_restart_end@l
+ 	cmplw	r12,r3
+-#if CONFIG_PPC_BOOK3S_601
++#ifdef CONFIG_PPC_BOOK3S_601
+ 	bge	2b
+ #else
+ 	bge	3f
+@@ -786,7 +786,7 @@ fast_exception_return:
+ 	lis	r4,exc_exit_restart@ha
+ 	addi	r4,r4,exc_exit_restart@l
+ 	cmplw	r12,r4
+-#if CONFIG_PPC_BOOK3S_601
++#ifdef CONFIG_PPC_BOOK3S_601
+ 	blt	2b
+ #else
+ 	blt	3f
+diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S
+index 19f583e18402..98d8b6832fcb 100644
+--- a/arch/powerpc/kernel/head_8xx.S
++++ b/arch/powerpc/kernel/head_8xx.S
+@@ -289,7 +289,7 @@ InstructionTLBMiss:
+ 	 * set.  All other Linux PTE bits control the behavior
+ 	 * of the MMU.
+ 	 */
+-	rlwimi	r10, r10, 0, 0x0f00	/* Clear bits 20-23 */
++	rlwinm	r10, r10, 0, ~0x0f00	/* Clear bits 20-23 */
+ 	rlwimi	r10, r10, 4, 0x0400	/* Copy _PAGE_EXEC into bit 21 */
+ 	ori	r10, r10, RPN_PATTERN | 0x200 /* Set 22 and 24-27 */
+ 	mtspr	SPRN_MI_RPN, r10	/* Update TLB entry */
+diff --git a/arch/powerpc/kernel/signal.c b/arch/powerpc/kernel/signal.c
+index e6c30cee6abf..d215f9554553 100644
+--- a/arch/powerpc/kernel/signal.c
++++ b/arch/powerpc/kernel/signal.c
+@@ -200,14 +200,27 @@ unsigned long get_tm_stackpointer(struct task_struct *tsk)
+ 	 * normal/non-checkpointed stack pointer.
+ 	 */
+ 
++	unsigned long ret = tsk->thread.regs->gpr[1];
++
+ #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ 	BUG_ON(tsk != current);
+ 
+ 	if (MSR_TM_ACTIVE(tsk->thread.regs->msr)) {
++		preempt_disable();
+ 		tm_reclaim_current(TM_CAUSE_SIGNAL);
+ 		if (MSR_TM_TRANSACTIONAL(tsk->thread.regs->msr))
+-			return tsk->thread.ckpt_regs.gpr[1];
++			ret = tsk->thread.ckpt_regs.gpr[1];
++
++		/*
++		 * If we treclaim, we must clear the current thread's TM bits
++		 * before re-enabling preemption. Otherwise we might be
++		 * preempted and have the live MSR[TS] changed behind our back
++		 * (tm_recheckpoint_new_task() would recheckpoint). Besides, we
++		 * enter the signal handler in non-transactional state.
++		 */
++		tsk->thread.regs->msr &= ~MSR_TS_MASK;
++		preempt_enable();
+ 	}
+ #endif
+-	return tsk->thread.regs->gpr[1];
++	return ret;
+ }
+diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
+index 98600b276f76..1b090a76b444 100644
+--- a/arch/powerpc/kernel/signal_32.c
++++ b/arch/powerpc/kernel/signal_32.c
+@@ -489,19 +489,11 @@ static int save_user_regs(struct pt_regs *regs, struct mcontext __user *frame,
+  */
+ static int save_tm_user_regs(struct pt_regs *regs,
+ 			     struct mcontext __user *frame,
+-			     struct mcontext __user *tm_frame, int sigret)
++			     struct mcontext __user *tm_frame, int sigret,
++			     unsigned long msr)
+ {
+-	unsigned long msr = regs->msr;
+-
+ 	WARN_ON(tm_suspend_disabled);
+ 
+-	/* Remove TM bits from thread's MSR.  The MSR in the sigcontext
+-	 * just indicates to userland that we were doing a transaction, but we
+-	 * don't want to return in transactional state.  This also ensures
+-	 * that flush_fp_to_thread won't set TIF_RESTORE_TM again.
+-	 */
+-	regs->msr &= ~MSR_TS_MASK;
+-
+ 	/* Save both sets of general registers */
+ 	if (save_general_regs(&current->thread.ckpt_regs, frame)
+ 	    || save_general_regs(regs, tm_frame))
+@@ -912,6 +904,10 @@ int handle_rt_signal32(struct ksignal *ksig, sigset_t *oldset,
+ 	int sigret;
+ 	unsigned long tramp;
+ 	struct pt_regs *regs = tsk->thread.regs;
++#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
++	/* Save the thread's msr before get_tm_stackpointer() changes it */
++	unsigned long msr = regs->msr;
++#endif
+ 
+ 	BUG_ON(tsk != current);
+ 
+@@ -944,13 +940,13 @@ int handle_rt_signal32(struct ksignal *ksig, sigset_t *oldset,
+ 
+ #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ 	tm_frame = &rt_sf->uc_transact.uc_mcontext;
+-	if (MSR_TM_ACTIVE(regs->msr)) {
++	if (MSR_TM_ACTIVE(msr)) {
+ 		if (__put_user((unsigned long)&rt_sf->uc_transact,
+ 			       &rt_sf->uc.uc_link) ||
+ 		    __put_user((unsigned long)tm_frame,
+ 			       &rt_sf->uc_transact.uc_regs))
+ 			goto badframe;
+-		if (save_tm_user_regs(regs, frame, tm_frame, sigret))
++		if (save_tm_user_regs(regs, frame, tm_frame, sigret, msr))
+ 			goto badframe;
+ 	}
+ 	else
+@@ -1369,6 +1365,10 @@ int handle_signal32(struct ksignal *ksig, sigset_t *oldset,
+ 	int sigret;
+ 	unsigned long tramp;
+ 	struct pt_regs *regs = tsk->thread.regs;
++#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
++	/* Save the thread's msr before get_tm_stackpointer() changes it */
++	unsigned long msr = regs->msr;
++#endif
+ 
+ 	BUG_ON(tsk != current);
+ 
+@@ -1402,9 +1402,9 @@ int handle_signal32(struct ksignal *ksig, sigset_t *oldset,
+ 
+ #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ 	tm_mctx = &frame->mctx_transact;
+-	if (MSR_TM_ACTIVE(regs->msr)) {
++	if (MSR_TM_ACTIVE(msr)) {
+ 		if (save_tm_user_regs(regs, &frame->mctx, &frame->mctx_transact,
+-				      sigret))
++				      sigret, msr))
+ 			goto badframe;
+ 	}
+ 	else
+diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
+index 117515564ec7..84ed2e77ef9c 100644
+--- a/arch/powerpc/kernel/signal_64.c
++++ b/arch/powerpc/kernel/signal_64.c
+@@ -192,7 +192,8 @@ static long setup_sigcontext(struct sigcontext __user *sc,
+ static long setup_tm_sigcontexts(struct sigcontext __user *sc,
+ 				 struct sigcontext __user *tm_sc,
+ 				 struct task_struct *tsk,
+-				 int signr, sigset_t *set, unsigned long handler)
++				 int signr, sigset_t *set, unsigned long handler,
++				 unsigned long msr)
+ {
+ 	/* When CONFIG_ALTIVEC is set, we _always_ setup v_regs even if the
+ 	 * process never used altivec yet (MSR_VEC is zero in pt_regs of
+@@ -207,12 +208,11 @@ static long setup_tm_sigcontexts(struct sigcontext __user *sc,
+ 	elf_vrreg_t __user *tm_v_regs = sigcontext_vmx_regs(tm_sc);
+ #endif
+ 	struct pt_regs *regs = tsk->thread.regs;
+-	unsigned long msr = tsk->thread.regs->msr;
+ 	long err = 0;
+ 
+ 	BUG_ON(tsk != current);
+ 
+-	BUG_ON(!MSR_TM_ACTIVE(regs->msr));
++	BUG_ON(!MSR_TM_ACTIVE(msr));
+ 
+ 	WARN_ON(tm_suspend_disabled);
+ 
+@@ -222,13 +222,6 @@ static long setup_tm_sigcontexts(struct sigcontext __user *sc,
+ 	 */
+ 	msr |= tsk->thread.ckpt_regs.msr & (MSR_FP | MSR_VEC | MSR_VSX);
+ 
+-	/* Remove TM bits from thread's MSR.  The MSR in the sigcontext
+-	 * just indicates to userland that we were doing a transaction, but we
+-	 * don't want to return in transactional state.  This also ensures
+-	 * that flush_fp_to_thread won't set TIF_RESTORE_TM again.
+-	 */
+-	regs->msr &= ~MSR_TS_MASK;
+-
+ #ifdef CONFIG_ALTIVEC
+ 	err |= __put_user(v_regs, &sc->v_regs);
+ 	err |= __put_user(tm_v_regs, &tm_sc->v_regs);
+@@ -824,6 +817,10 @@ int handle_rt_signal64(struct ksignal *ksig, sigset_t *set,
+ 	unsigned long newsp = 0;
+ 	long err = 0;
+ 	struct pt_regs *regs = tsk->thread.regs;
++#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
++	/* Save the thread's msr before get_tm_stackpointer() changes it */
++	unsigned long msr = regs->msr;
++#endif
+ 
+ 	BUG_ON(tsk != current);
+ 
+@@ -841,7 +838,7 @@ int handle_rt_signal64(struct ksignal *ksig, sigset_t *set,
+ 	err |= __put_user(0, &frame->uc.uc_flags);
+ 	err |= __save_altstack(&frame->uc.uc_stack, regs->gpr[1]);
+ #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+-	if (MSR_TM_ACTIVE(regs->msr)) {
++	if (MSR_TM_ACTIVE(msr)) {
+ 		/* The ucontext_t passed to userland points to the second
+ 		 * ucontext_t (for transactional state) with its uc_link ptr.
+ 		 */
+@@ -849,7 +846,8 @@ int handle_rt_signal64(struct ksignal *ksig, sigset_t *set,
+ 		err |= setup_tm_sigcontexts(&frame->uc.uc_mcontext,
+ 					    &frame->uc_transact.uc_mcontext,
+ 					    tsk, ksig->sig, NULL,
+-					    (unsigned long)ksig->ka.sa.sa_handler);
++					    (unsigned long)ksig->ka.sa.sa_handler,
++					    msr);
+ 	} else
+ #endif
+ 	{
+diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
+index 73d4873fc7f8..33b3461d91e8 100644
+--- a/arch/powerpc/mm/hugetlbpage.c
++++ b/arch/powerpc/mm/hugetlbpage.c
+@@ -53,20 +53,24 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
+ 	if (pshift >= pdshift) {
+ 		cachep = PGT_CACHE(PTE_T_ORDER);
+ 		num_hugepd = 1 << (pshift - pdshift);
++		new = NULL;
+ 	} else if (IS_ENABLED(CONFIG_PPC_8xx)) {
+-		cachep = PGT_CACHE(PTE_INDEX_SIZE);
++		cachep = NULL;
+ 		num_hugepd = 1;
++		new = pte_alloc_one(mm);
+ 	} else {
+ 		cachep = PGT_CACHE(pdshift - pshift);
+ 		num_hugepd = 1;
++		new = NULL;
+ 	}
+ 
+-	if (!cachep) {
++	if (!cachep && !new) {
+ 		WARN_ONCE(1, "No page table cache created for hugetlb tables");
+ 		return -ENOMEM;
+ 	}
+ 
+-	new = kmem_cache_alloc(cachep, pgtable_gfp_flags(mm, GFP_KERNEL));
++	if (cachep)
++		new = kmem_cache_alloc(cachep, pgtable_gfp_flags(mm, GFP_KERNEL));
+ 
+ 	BUG_ON(pshift > HUGEPD_SHIFT_MASK);
+ 	BUG_ON((unsigned long)new & HUGEPD_SHIFT_MASK);
+@@ -97,7 +101,10 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
+ 	if (i < num_hugepd) {
+ 		for (i = i - 1 ; i >= 0; i--, hpdp--)
+ 			*hpdp = __hugepd(0);
+-		kmem_cache_free(cachep, new);
++		if (cachep)
++			kmem_cache_free(cachep, new);
++		else
++			pte_free(mm, new);
+ 	} else {
+ 		kmemleak_ignore(new);
+ 	}
+@@ -324,8 +331,7 @@ static void free_hugepd_range(struct mmu_gather *tlb, hugepd_t *hpdp, int pdshif
+ 	if (shift >= pdshift)
+ 		hugepd_free(tlb, hugepte);
+ 	else if (IS_ENABLED(CONFIG_PPC_8xx))
+-		pgtable_free_tlb(tlb, hugepte,
+-				 get_hugepd_cache_index(PTE_INDEX_SIZE));
++		pgtable_free_tlb(tlb, hugepte, 0);
+ 	else
+ 		pgtable_free_tlb(tlb, hugepte,
+ 				 get_hugepd_cache_index(pdshift - shift));
+@@ -639,12 +645,13 @@ static int __init hugetlbpage_init(void)
+ 		 * if we have pdshift and shift value same, we don't
+ 		 * use pgt cache for hugepd.
+ 		 */
+-		if (pdshift > shift && IS_ENABLED(CONFIG_PPC_8xx))
+-			pgtable_cache_add(PTE_INDEX_SIZE);
+-		else if (pdshift > shift)
+-			pgtable_cache_add(pdshift - shift);
+-		else if (IS_ENABLED(CONFIG_PPC_FSL_BOOK3E) || IS_ENABLED(CONFIG_PPC_8xx))
++		if (pdshift > shift) {
++			if (!IS_ENABLED(CONFIG_PPC_8xx))
++				pgtable_cache_add(pdshift - shift);
++		} else if (IS_ENABLED(CONFIG_PPC_FSL_BOOK3E) ||
++			   IS_ENABLED(CONFIG_PPC_8xx)) {
+ 			pgtable_cache_add(PTE_T_ORDER);
++		}
+ 
+ 		configured = true;
+ 	}
+diff --git a/arch/s390/boot/kaslr.c b/arch/s390/boot/kaslr.c
+index 5d12352545c5..5591243d673e 100644
+--- a/arch/s390/boot/kaslr.c
++++ b/arch/s390/boot/kaslr.c
+@@ -75,7 +75,7 @@ static unsigned long get_random(unsigned long limit)
+ 		*(unsigned long *) prng.parm_block ^= seed;
+ 		for (i = 0; i < 16; i++) {
+ 			cpacf_kmc(CPACF_KMC_PRNG, prng.parm_block,
+-				  (char *) entropy, (char *) entropy,
++				  (u8 *) entropy, (u8 *) entropy,
+ 				  sizeof(entropy));
+ 			memcpy(prng.parm_block, entropy, sizeof(entropy));
+ 		}
+diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h
+index 85e944f04c70..1019efd85b9d 100644
+--- a/arch/s390/include/asm/page.h
++++ b/arch/s390/include/asm/page.h
+@@ -42,7 +42,7 @@ void __storage_key_init_range(unsigned long start, unsigned long end);
+ 
+ static inline void storage_key_init_range(unsigned long start, unsigned long end)
+ {
+-	if (PAGE_DEFAULT_KEY)
++	if (PAGE_DEFAULT_KEY != 0)
+ 		__storage_key_init_range(start, end);
+ }
+ 
+diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
+index 178e4e1a47f5..7425c83fd343 100644
+--- a/arch/x86/include/asm/kvm_host.h
++++ b/arch/x86/include/asm/kvm_host.h
+@@ -1115,7 +1115,7 @@ struct kvm_x86_ops {
+ 	void (*load_eoi_exitmap)(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap);
+ 	void (*set_virtual_apic_mode)(struct kvm_vcpu *vcpu);
+ 	void (*set_apic_access_page_addr)(struct kvm_vcpu *vcpu, hpa_t hpa);
+-	void (*deliver_posted_interrupt)(struct kvm_vcpu *vcpu, int vector);
++	int (*deliver_posted_interrupt)(struct kvm_vcpu *vcpu, int vector);
+ 	int (*sync_pir_to_irr)(struct kvm_vcpu *vcpu);
+ 	int (*set_tss_addr)(struct kvm *kvm, unsigned int addr);
+ 	int (*set_identity_map_addr)(struct kvm *kvm, u64 ident_addr);
+diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
+index 084e98da04a7..717660f82f8f 100644
+--- a/arch/x86/include/asm/msr-index.h
++++ b/arch/x86/include/asm/msr-index.h
+@@ -512,6 +512,8 @@
+ #define MSR_K7_HWCR			0xc0010015
+ #define MSR_K7_HWCR_SMMLOCK_BIT		0
+ #define MSR_K7_HWCR_SMMLOCK		BIT_ULL(MSR_K7_HWCR_SMMLOCK_BIT)
++#define MSR_K7_HWCR_IRPERF_EN_BIT	30
++#define MSR_K7_HWCR_IRPERF_EN		BIT_ULL(MSR_K7_HWCR_IRPERF_EN_BIT)
+ #define MSR_K7_FID_VID_CTL		0xc0010041
+ #define MSR_K7_FID_VID_STATUS		0xc0010042
+ 
+diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
+index 62c30279be77..c3f4dd4ae155 100644
+--- a/arch/x86/kernel/cpu/amd.c
++++ b/arch/x86/kernel/cpu/amd.c
+@@ -28,6 +28,7 @@
+ 
+ static const int amd_erratum_383[];
+ static const int amd_erratum_400[];
++static const int amd_erratum_1054[];
+ static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum);
+ 
+ /*
+@@ -978,6 +979,15 @@ static void init_amd(struct cpuinfo_x86 *c)
+ 	/* AMD CPUs don't reset SS attributes on SYSRET, Xen does. */
+ 	if (!cpu_has(c, X86_FEATURE_XENPV))
+ 		set_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS);
++
++	/*
++	 * Turn on the Instructions Retired free counter on machines not
++	 * susceptible to erratum #1054 "Instructions Retired Performance
++	 * Counter May Be Inaccurate".
++	 */
++	if (cpu_has(c, X86_FEATURE_IRPERF) &&
++	    !cpu_has_amd_erratum(c, amd_erratum_1054))
++		msr_set_bit(MSR_K7_HWCR, MSR_K7_HWCR_IRPERF_EN_BIT);
+ }
+ 
+ #ifdef CONFIG_X86_32
+@@ -1105,6 +1115,10 @@ static const int amd_erratum_400[] =
+ static const int amd_erratum_383[] =
+ 	AMD_OSVW_ERRATUM(3, AMD_MODEL_RANGE(0x10, 0, 0, 0xff, 0xf));
+ 
++/* #1054: Instructions Retired Performance Counter May Be Inaccurate */
++static const int amd_erratum_1054[] =
++	AMD_OSVW_ERRATUM(0, AMD_MODEL_RANGE(0x17, 0, 0, 0x2f, 0xf));
++
+ 
+ static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum)
+ {
+diff --git a/arch/x86/kernel/cpu/mce/amd.c b/arch/x86/kernel/cpu/mce/amd.c
+index d6cf5c18a7e0..f031c651dd32 100644
+--- a/arch/x86/kernel/cpu/mce/amd.c
++++ b/arch/x86/kernel/cpu/mce/amd.c
+@@ -1161,9 +1161,12 @@ static const struct sysfs_ops threshold_ops = {
+ 	.store			= store,
+ };
+ 
++static void threshold_block_release(struct kobject *kobj);
++
+ static struct kobj_type threshold_ktype = {
+ 	.sysfs_ops		= &threshold_ops,
+ 	.default_attrs		= default_attrs,
++	.release		= threshold_block_release,
+ };
+ 
+ static const char *get_name(unsigned int bank, struct threshold_block *b)
+@@ -1196,8 +1199,9 @@ static const char *get_name(unsigned int bank, struct threshold_block *b)
+ 	return buf_mcatype;
+ }
+ 
+-static int allocate_threshold_blocks(unsigned int cpu, unsigned int bank,
+-				     unsigned int block, u32 address)
++static int allocate_threshold_blocks(unsigned int cpu, struct threshold_bank *tb,
++				     unsigned int bank, unsigned int block,
++				     u32 address)
+ {
+ 	struct threshold_block *b = NULL;
+ 	u32 low, high;
+@@ -1241,16 +1245,12 @@ static int allocate_threshold_blocks(unsigned int cpu, unsigned int bank,
+ 
+ 	INIT_LIST_HEAD(&b->miscj);
+ 
+-	if (per_cpu(threshold_banks, cpu)[bank]->blocks) {
+-		list_add(&b->miscj,
+-			 &per_cpu(threshold_banks, cpu)[bank]->blocks->miscj);
+-	} else {
+-		per_cpu(threshold_banks, cpu)[bank]->blocks = b;
+-	}
++	if (tb->blocks)
++		list_add(&b->miscj, &tb->blocks->miscj);
++	else
++		tb->blocks = b;
+ 
+-	err = kobject_init_and_add(&b->kobj, &threshold_ktype,
+-				   per_cpu(threshold_banks, cpu)[bank]->kobj,
+-				   get_name(bank, b));
++	err = kobject_init_and_add(&b->kobj, &threshold_ktype, tb->kobj, get_name(bank, b));
+ 	if (err)
+ 		goto out_free;
+ recurse:
+@@ -1258,7 +1258,7 @@ recurse:
+ 	if (!address)
+ 		return 0;
+ 
+-	err = allocate_threshold_blocks(cpu, bank, block, address);
++	err = allocate_threshold_blocks(cpu, tb, bank, block, address);
+ 	if (err)
+ 		goto out_free;
+ 
+@@ -1343,8 +1343,6 @@ static int threshold_create_bank(unsigned int cpu, unsigned int bank)
+ 		goto out_free;
+ 	}
+ 
+-	per_cpu(threshold_banks, cpu)[bank] = b;
+-
+ 	if (is_shared_bank(bank)) {
+ 		refcount_set(&b->cpus, 1);
+ 
+@@ -1355,9 +1353,13 @@ static int threshold_create_bank(unsigned int cpu, unsigned int bank)
+ 		}
+ 	}
+ 
+-	err = allocate_threshold_blocks(cpu, bank, 0, msr_ops.misc(bank));
+-	if (!err)
+-		goto out;
++	err = allocate_threshold_blocks(cpu, b, bank, 0, msr_ops.misc(bank));
++	if (err)
++		goto out_free;
++
++	per_cpu(threshold_banks, cpu)[bank] = b;
++
++	return 0;
+ 
+  out_free:
+ 	kfree(b);
+@@ -1366,8 +1368,12 @@ static int threshold_create_bank(unsigned int cpu, unsigned int bank)
+ 	return err;
+ }
+ 
+-static void deallocate_threshold_block(unsigned int cpu,
+-						 unsigned int bank)
++static void threshold_block_release(struct kobject *kobj)
++{
++	kfree(to_block(kobj));
++}
++
++static void deallocate_threshold_block(unsigned int cpu, unsigned int bank)
+ {
+ 	struct threshold_block *pos = NULL;
+ 	struct threshold_block *tmp = NULL;
+@@ -1377,13 +1383,11 @@ static void deallocate_threshold_block(unsigned int cpu,
+ 		return;
+ 
+ 	list_for_each_entry_safe(pos, tmp, &head->blocks->miscj, miscj) {
+-		kobject_put(&pos->kobj);
+ 		list_del(&pos->miscj);
+-		kfree(pos);
++		kobject_put(&pos->kobj);
+ 	}
+ 
+-	kfree(per_cpu(threshold_banks, cpu)[bank]->blocks);
+-	per_cpu(threshold_banks, cpu)[bank]->blocks = NULL;
++	kobject_put(&head->blocks->kobj);
+ }
+ 
+ static void __threshold_remove_blocks(struct threshold_bank *b)
+diff --git a/arch/x86/kernel/ima_arch.c b/arch/x86/kernel/ima_arch.c
+index 4d4f5d9faac3..23054909c8dd 100644
+--- a/arch/x86/kernel/ima_arch.c
++++ b/arch/x86/kernel/ima_arch.c
+@@ -10,8 +10,6 @@ extern struct boot_params boot_params;
+ 
+ static enum efi_secureboot_mode get_sb_mode(void)
+ {
+-	efi_char16_t efi_SecureBoot_name[] = L"SecureBoot";
+-	efi_char16_t efi_SetupMode_name[] = L"SecureBoot";
+ 	efi_guid_t efi_variable_guid = EFI_GLOBAL_VARIABLE_GUID;
+ 	efi_status_t status;
+ 	unsigned long size;
+@@ -25,7 +23,7 @@ static enum efi_secureboot_mode get_sb_mode(void)
+ 	}
+ 
+ 	/* Get variable contents into buffer */
+-	status = efi.get_variable(efi_SecureBoot_name, &efi_variable_guid,
++	status = efi.get_variable(L"SecureBoot", &efi_variable_guid,
+ 				  NULL, &size, &secboot);
+ 	if (status == EFI_NOT_FOUND) {
+ 		pr_info("ima: secureboot mode disabled\n");
+@@ -38,7 +36,7 @@ static enum efi_secureboot_mode get_sb_mode(void)
+ 	}
+ 
+ 	size = sizeof(setupmode);
+-	status = efi.get_variable(efi_SetupMode_name, &efi_variable_guid,
++	status = efi.get_variable(L"SetupMode", &efi_variable_guid,
+ 				  NULL, &size, &setupmode);
+ 
+ 	if (status != EFI_SUCCESS)	/* ignore unknown SetupMode */
+diff --git a/arch/x86/kvm/irq_comm.c b/arch/x86/kvm/irq_comm.c
+index 8ecd48d31800..5ddcaacef291 100644
+--- a/arch/x86/kvm/irq_comm.c
++++ b/arch/x86/kvm/irq_comm.c
+@@ -416,7 +416,7 @@ void kvm_scan_ioapic_routes(struct kvm_vcpu *vcpu,
+ 
+ 			kvm_set_msi_irq(vcpu->kvm, entry, &irq);
+ 
+-			if (irq.level && kvm_apic_match_dest(vcpu, NULL, 0,
++			if (irq.trig_mode && kvm_apic_match_dest(vcpu, NULL, 0,
+ 						irq.dest_id, irq.dest_mode))
+ 				__set_bit(irq.vector, ioapic_handled_vectors);
+ 		}
+diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
+index 3323115f52d5..f05123acaa64 100644
+--- a/arch/x86/kvm/lapic.c
++++ b/arch/x86/kvm/lapic.c
+@@ -630,9 +630,11 @@ static inline bool pv_eoi_enabled(struct kvm_vcpu *vcpu)
+ static bool pv_eoi_get_pending(struct kvm_vcpu *vcpu)
+ {
+ 	u8 val;
+-	if (pv_eoi_get_user(vcpu, &val) < 0)
++	if (pv_eoi_get_user(vcpu, &val) < 0) {
+ 		printk(KERN_WARNING "Can't read EOI MSR value: 0x%llx\n",
+ 			   (unsigned long long)vcpu->arch.pv_eoi.msr_val);
++		return false;
++	}
+ 	return val & 0x1;
+ }
+ 
+@@ -1049,11 +1051,8 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
+ 						       apic->regs + APIC_TMR);
+ 		}
+ 
+-		if (vcpu->arch.apicv_active)
+-			kvm_x86_ops->deliver_posted_interrupt(vcpu, vector);
+-		else {
++		if (kvm_x86_ops->deliver_posted_interrupt(vcpu, vector)) {
+ 			kvm_lapic_set_irr(vector, apic);
+-
+ 			kvm_make_request(KVM_REQ_EVENT, vcpu);
+ 			kvm_vcpu_kick(vcpu);
+ 		}
+diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
+index 8b0620f3aed6..aace3b6ca2f7 100644
+--- a/arch/x86/kvm/svm.c
++++ b/arch/x86/kvm/svm.c
+@@ -5160,8 +5160,11 @@ static void svm_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
+ 	return;
+ }
+ 
+-static void svm_deliver_avic_intr(struct kvm_vcpu *vcpu, int vec)
++static int svm_deliver_avic_intr(struct kvm_vcpu *vcpu, int vec)
+ {
++	if (!vcpu->arch.apicv_active)
++		return -1;
++
+ 	kvm_lapic_set_irr(vec, vcpu->arch.apic);
+ 	smp_mb__after_atomic();
+ 
+@@ -5173,6 +5176,8 @@ static void svm_deliver_avic_intr(struct kvm_vcpu *vcpu, int vec)
+ 		put_cpu();
+ 	} else
+ 		kvm_vcpu_wake_up(vcpu);
++
++	return 0;
+ }
+ 
+ static bool svm_dy_apicv_has_pending_interrupt(struct kvm_vcpu *vcpu)
+diff --git a/arch/x86/kvm/vmx/capabilities.h b/arch/x86/kvm/vmx/capabilities.h
+index 283bdb7071af..f486e2606247 100644
+--- a/arch/x86/kvm/vmx/capabilities.h
++++ b/arch/x86/kvm/vmx/capabilities.h
+@@ -12,6 +12,7 @@ extern bool __read_mostly enable_ept;
+ extern bool __read_mostly enable_unrestricted_guest;
+ extern bool __read_mostly enable_ept_ad_bits;
+ extern bool __read_mostly enable_pml;
++extern bool __read_mostly enable_apicv;
+ extern int __read_mostly pt_mode;
+ 
+ #define PT_MODE_SYSTEM		0
+diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
+index 3babe5e29429..af5a36dfc88a 100644
+--- a/arch/x86/kvm/vmx/nested.c
++++ b/arch/x86/kvm/vmx/nested.c
+@@ -5304,24 +5304,17 @@ fail:
+ 	return 1;
+ }
+ 
+-
+-static bool nested_vmx_exit_handled_io(struct kvm_vcpu *vcpu,
+-				       struct vmcs12 *vmcs12)
++/*
++ * Return true if an IO instruction with the specified port and size should cause
++ * a VM-exit into L1.
++ */
++bool nested_vmx_check_io_bitmaps(struct kvm_vcpu *vcpu, unsigned int port,
++				 int size)
+ {
+-	unsigned long exit_qualification;
++	struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
+ 	gpa_t bitmap, last_bitmap;
+-	unsigned int port;
+-	int size;
+ 	u8 b;
+ 
+-	if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS))
+-		return nested_cpu_has(vmcs12, CPU_BASED_UNCOND_IO_EXITING);
+-
+-	exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
+-
+-	port = exit_qualification >> 16;
+-	size = (exit_qualification & 7) + 1;
+-
+ 	last_bitmap = (gpa_t)-1;
+ 	b = -1;
+ 
+@@ -5348,6 +5341,24 @@ static bool nested_vmx_exit_handled_io(struct kvm_vcpu *vcpu,
+ 	return false;
+ }
+ 
++static bool nested_vmx_exit_handled_io(struct kvm_vcpu *vcpu,
++				       struct vmcs12 *vmcs12)
++{
++	unsigned long exit_qualification;
++	unsigned short port;
++	int size;
++
++	if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS))
++		return nested_cpu_has(vmcs12, CPU_BASED_UNCOND_IO_EXITING);
++
++	exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
++
++	port = exit_qualification >> 16;
++	size = (exit_qualification & 7) + 1;
++
++	return nested_vmx_check_io_bitmaps(vcpu, port, size);
++}
++
+ /*
+  * Return 1 if we should exit from L2 to L1 to handle an MSR access access,
+  * rather than handle it ourselves in L0. I.e., check whether L1 expressed
+@@ -5968,8 +5979,7 @@ void nested_vmx_set_vmcs_shadowing_bitmap(void)
+  * bit in the high half is on if the corresponding bit in the control field
+  * may be on. See also vmx_control_verify().
+  */
+-void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, u32 ept_caps,
+-				bool apicv)
++void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, u32 ept_caps)
+ {
+ 	/*
+ 	 * Note that as a general rule, the high half of the MSRs (bits in
+@@ -5996,7 +6006,7 @@ void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, u32 ept_caps,
+ 		PIN_BASED_EXT_INTR_MASK |
+ 		PIN_BASED_NMI_EXITING |
+ 		PIN_BASED_VIRTUAL_NMIS |
+-		(apicv ? PIN_BASED_POSTED_INTR : 0);
++		(enable_apicv ? PIN_BASED_POSTED_INTR : 0);
+ 	msrs->pinbased_ctls_high |=
+ 		PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR |
+ 		PIN_BASED_VMX_PREEMPTION_TIMER;
+diff --git a/arch/x86/kvm/vmx/nested.h b/arch/x86/kvm/vmx/nested.h
+index fc874d4ead0f..e1c7faed7df4 100644
+--- a/arch/x86/kvm/vmx/nested.h
++++ b/arch/x86/kvm/vmx/nested.h
+@@ -17,8 +17,7 @@ enum nvmx_vmentry_status {
+ };
+ 
+ void vmx_leave_nested(struct kvm_vcpu *vcpu);
+-void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, u32 ept_caps,
+-				bool apicv);
++void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, u32 ept_caps);
+ void nested_vmx_hardware_unsetup(void);
+ __init int nested_vmx_hardware_setup(int (*exit_handlers[])(struct kvm_vcpu *));
+ void nested_vmx_set_vmcs_shadowing_bitmap(void);
+@@ -34,6 +33,8 @@ int vmx_get_vmx_msr(struct nested_vmx_msrs *msrs, u32 msr_index, u64 *pdata);
+ int get_vmx_mem_address(struct kvm_vcpu *vcpu, unsigned long exit_qualification,
+ 			u32 vmx_instruction_info, bool wr, int len, gva_t *ret);
+ void nested_vmx_pmu_entry_exit_ctls_update(struct kvm_vcpu *vcpu);
++bool nested_vmx_check_io_bitmaps(struct kvm_vcpu *vcpu, unsigned int port,
++				 int size);
+ 
+ static inline struct vmcs12 *get_vmcs12(struct kvm_vcpu *vcpu)
+ {
+diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
+index c0d837c37f34..be438bc7cfa3 100644
+--- a/arch/x86/kvm/vmx/vmx.c
++++ b/arch/x86/kvm/vmx/vmx.c
+@@ -95,7 +95,7 @@ module_param(emulate_invalid_guest_state, bool, S_IRUGO);
+ static bool __read_mostly fasteoi = 1;
+ module_param(fasteoi, bool, S_IRUGO);
+ 
+-static bool __read_mostly enable_apicv = 1;
++bool __read_mostly enable_apicv = 1;
+ module_param(enable_apicv, bool, S_IRUGO);
+ 
+ /*
+@@ -3848,24 +3848,29 @@ static int vmx_deliver_nested_posted_interrupt(struct kvm_vcpu *vcpu,
+  * 2. If target vcpu isn't running(root mode), kick it to pick up the
+  * interrupt from PIR in next vmentry.
+  */
+-static void vmx_deliver_posted_interrupt(struct kvm_vcpu *vcpu, int vector)
++static int vmx_deliver_posted_interrupt(struct kvm_vcpu *vcpu, int vector)
+ {
+ 	struct vcpu_vmx *vmx = to_vmx(vcpu);
+ 	int r;
+ 
+ 	r = vmx_deliver_nested_posted_interrupt(vcpu, vector);
+ 	if (!r)
+-		return;
++		return 0;
++
++	if (!vcpu->arch.apicv_active)
++		return -1;
+ 
+ 	if (pi_test_and_set_pir(vector, &vmx->pi_desc))
+-		return;
++		return 0;
+ 
+ 	/* If a previous notification has sent the IPI, nothing to do.  */
+ 	if (pi_test_and_set_on(&vmx->pi_desc))
+-		return;
++		return 0;
+ 
+ 	if (!kvm_vcpu_trigger_posted_interrupt(vcpu, false))
+ 		kvm_vcpu_kick(vcpu);
++
++	return 0;
+ }
+ 
+ /*
+@@ -6803,8 +6808,7 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
+ 
+ 	if (nested)
+ 		nested_vmx_setup_ctls_msrs(&vmx->nested.msrs,
+-					   vmx_capability.ept,
+-					   kvm_vcpu_apicv_active(&vmx->vcpu));
++					   vmx_capability.ept);
+ 	else
+ 		memset(&vmx->nested.msrs, 0, sizeof(vmx->nested.msrs));
+ 
+@@ -6884,8 +6888,7 @@ static int __init vmx_check_processor_compat(void)
+ 	if (setup_vmcs_config(&vmcs_conf, &vmx_cap) < 0)
+ 		return -EIO;
+ 	if (nested)
+-		nested_vmx_setup_ctls_msrs(&vmcs_conf.nested, vmx_cap.ept,
+-					   enable_apicv);
++		nested_vmx_setup_ctls_msrs(&vmcs_conf.nested, vmx_cap.ept);
+ 	if (memcmp(&vmcs_config, &vmcs_conf, sizeof(struct vmcs_config)) != 0) {
+ 		printk(KERN_ERR "kvm: CPU %d feature inconsistency!\n",
+ 				smp_processor_id());
+@@ -7146,6 +7149,39 @@ static void vmx_request_immediate_exit(struct kvm_vcpu *vcpu)
+ 	to_vmx(vcpu)->req_immediate_exit = true;
+ }
+ 
++static int vmx_check_intercept_io(struct kvm_vcpu *vcpu,
++				  struct x86_instruction_info *info)
++{
++	struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
++	unsigned short port;
++	bool intercept;
++	int size;
++
++	if (info->intercept == x86_intercept_in ||
++	    info->intercept == x86_intercept_ins) {
++		port = info->src_val;
++		size = info->dst_bytes;
++	} else {
++		port = info->dst_val;
++		size = info->src_bytes;
++	}
++
++	/*
++	 * If the 'use IO bitmaps' VM-execution control is 0, IO instruction
++	 * VM-exits depend on the 'unconditional IO exiting' VM-execution
++	 * control.
++	 *
++	 * Otherwise, IO instruction VM-exits are controlled by the IO bitmaps.
++	 */
++	if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS))
++		intercept = nested_cpu_has(vmcs12,
++					   CPU_BASED_UNCOND_IO_EXITING);
++	else
++		intercept = nested_vmx_check_io_bitmaps(vcpu, port, size);
++
++	return intercept ? X86EMUL_UNHANDLEABLE : X86EMUL_CONTINUE;
++}
++
+ static int vmx_check_intercept(struct kvm_vcpu *vcpu,
+ 			       struct x86_instruction_info *info,
+ 			       enum x86_intercept_stage stage)
+@@ -7153,19 +7189,31 @@ static int vmx_check_intercept(struct kvm_vcpu *vcpu,
+ 	struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
+ 	struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
+ 
++	switch (info->intercept) {
+ 	/*
+ 	 * RDPID causes #UD if disabled through secondary execution controls.
+ 	 * Because it is marked as EmulateOnUD, we need to intercept it here.
+ 	 */
+-	if (info->intercept == x86_intercept_rdtscp &&
+-	    !nested_cpu_has2(vmcs12, SECONDARY_EXEC_RDTSCP)) {
+-		ctxt->exception.vector = UD_VECTOR;
+-		ctxt->exception.error_code_valid = false;
+-		return X86EMUL_PROPAGATE_FAULT;
+-	}
++	case x86_intercept_rdtscp:
++		if (!nested_cpu_has2(vmcs12, SECONDARY_EXEC_RDTSCP)) {
++			ctxt->exception.vector = UD_VECTOR;
++			ctxt->exception.error_code_valid = false;
++			return X86EMUL_PROPAGATE_FAULT;
++		}
++		break;
++
++	case x86_intercept_in:
++	case x86_intercept_ins:
++	case x86_intercept_out:
++	case x86_intercept_outs:
++		return vmx_check_intercept_io(vcpu, info);
+ 
+ 	/* TODO: check more intercepts... */
+-	return X86EMUL_CONTINUE;
++	default:
++		break;
++	}
++
++	return X86EMUL_UNHANDLEABLE;
+ }
+ 
+ #ifdef CONFIG_X86_64
+@@ -7747,7 +7795,7 @@ static __init int hardware_setup(void)
+ 
+ 	if (nested) {
+ 		nested_vmx_setup_ctls_msrs(&vmcs_config.nested,
+-					   vmx_capability.ept, enable_apicv);
++					   vmx_capability.ept);
+ 
+ 		r = nested_vmx_hardware_setup(kvm_vmx_exit_handlers);
+ 		if (r)
+diff --git a/crypto/hash_info.c b/crypto/hash_info.c
+index c754cb75dd1a..a49ff96bde77 100644
+--- a/crypto/hash_info.c
++++ b/crypto/hash_info.c
+@@ -26,7 +26,7 @@ const char *const hash_algo_name[HASH_ALGO__LAST] = {
+ 	[HASH_ALGO_TGR_128]	= "tgr128",
+ 	[HASH_ALGO_TGR_160]	= "tgr160",
+ 	[HASH_ALGO_TGR_192]	= "tgr192",
+-	[HASH_ALGO_SM3_256]	= "sm3-256",
++	[HASH_ALGO_SM3_256]	= "sm3",
+ 	[HASH_ALGO_STREEBOG_256] = "streebog256",
+ 	[HASH_ALGO_STREEBOG_512] = "streebog512",
+ };
+diff --git a/drivers/acpi/acpica/evevent.c b/drivers/acpi/acpica/evevent.c
+index 9e2f5a05c066..bad2257356fe 100644
+--- a/drivers/acpi/acpica/evevent.c
++++ b/drivers/acpi/acpica/evevent.c
+@@ -265,4 +265,49 @@ static u32 acpi_ev_fixed_event_dispatch(u32 event)
+ 		 handler) (acpi_gbl_fixed_event_handlers[event].context));
+ }
+ 
++/*******************************************************************************
++ *
++ * FUNCTION:    acpi_any_fixed_event_status_set
++ *
++ * PARAMETERS:  None
++ *
++ * RETURN:      TRUE or FALSE
++ *
++ * DESCRIPTION: Checks the PM status register for active fixed events
++ *
++ ******************************************************************************/
++
++u32 acpi_any_fixed_event_status_set(void)
++{
++	acpi_status status;
++	u32 in_status;
++	u32 in_enable;
++	u32 i;
++
++	status = acpi_hw_register_read(ACPI_REGISTER_PM1_ENABLE, &in_enable);
++	if (ACPI_FAILURE(status)) {
++		return (FALSE);
++	}
++
++	status = acpi_hw_register_read(ACPI_REGISTER_PM1_STATUS, &in_status);
++	if (ACPI_FAILURE(status)) {
++		return (FALSE);
++	}
++
++	/*
++	 * Check for all possible Fixed Events and dispatch those that are active
++	 */
++	for (i = 0; i < ACPI_NUM_FIXED_EVENTS; i++) {
++
++		/* Both the status and enable bits must be on for this event */
++
++		if ((in_status & acpi_gbl_fixed_event_info[i].status_bit_mask) &&
++		    (in_enable & acpi_gbl_fixed_event_info[i].enable_bit_mask)) {
++			return (TRUE);
++		}
++	}
++
++	return (FALSE);
++}
++
+ #endif				/* !ACPI_REDUCED_HARDWARE */
+diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
+index 5672fa8cb300..ce59a3f32eac 100644
+--- a/drivers/acpi/sleep.c
++++ b/drivers/acpi/sleep.c
+@@ -1002,6 +1002,13 @@ static bool acpi_s2idle_wake(void)
+ 		if (irqd_is_wakeup_armed(irq_get_irq_data(acpi_sci_irq)))
+ 			return true;
+ 
++		/*
++		 * If the status bit of any enabled fixed event is set, the
++		 * wakeup is regarded as valid.
++		 */
++		if (acpi_any_fixed_event_status_set())
++			return true;
++
+ 		/*
+ 		 * If there are no EC events to process and at least one of the
+ 		 * other enabled GPEs is active, the wakeup is regarded as a
+diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
+index 4bfd1b14b390..11ea1aff40db 100644
+--- a/drivers/ata/ahci.c
++++ b/drivers/ata/ahci.c
+@@ -81,6 +81,7 @@ enum board_ids {
+ 
+ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
+ static void ahci_remove_one(struct pci_dev *dev);
++static void ahci_shutdown_one(struct pci_dev *dev);
+ static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class,
+ 				 unsigned long deadline);
+ static int ahci_avn_hardreset(struct ata_link *link, unsigned int *class,
+@@ -606,6 +607,7 @@ static struct pci_driver ahci_pci_driver = {
+ 	.id_table		= ahci_pci_tbl,
+ 	.probe			= ahci_init_one,
+ 	.remove			= ahci_remove_one,
++	.shutdown		= ahci_shutdown_one,
+ 	.driver = {
+ 		.pm		= &ahci_pci_pm_ops,
+ 	},
+@@ -1877,6 +1879,11 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 	return 0;
+ }
+ 
++static void ahci_shutdown_one(struct pci_dev *pdev)
++{
++	ata_pci_shutdown_one(pdev);
++}
++
+ static void ahci_remove_one(struct pci_dev *pdev)
+ {
+ 	pm_runtime_get_noresume(&pdev->dev);
+diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
+index 6f4ab5c5b52d..42c8728f6117 100644
+--- a/drivers/ata/libata-core.c
++++ b/drivers/ata/libata-core.c
+@@ -6767,6 +6767,26 @@ void ata_pci_remove_one(struct pci_dev *pdev)
+ 	ata_host_detach(host);
+ }
+ 
++void ata_pci_shutdown_one(struct pci_dev *pdev)
++{
++	struct ata_host *host = pci_get_drvdata(pdev);
++	int i;
++
++	for (i = 0; i < host->n_ports; i++) {
++		struct ata_port *ap = host->ports[i];
++
++		ap->pflags |= ATA_PFLAG_FROZEN;
++
++		/* Disable port interrupts */
++		if (ap->ops->freeze)
++			ap->ops->freeze(ap);
++
++		/* Stop the port DMA engines */
++		if (ap->ops->port_stop)
++			ap->ops->port_stop(ap);
++	}
++}
++
+ /* move to PCI subsystem */
+ int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
+ {
+@@ -7387,6 +7407,7 @@ EXPORT_SYMBOL_GPL(ata_timing_cycle2mode);
+ 
+ #ifdef CONFIG_PCI
+ EXPORT_SYMBOL_GPL(pci_test_config_bits);
++EXPORT_SYMBOL_GPL(ata_pci_shutdown_one);
+ EXPORT_SYMBOL_GPL(ata_pci_remove_one);
+ #ifdef CONFIG_PM
+ EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
+diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
+index 485865fd0412..f19a03b62365 100644
+--- a/drivers/block/floppy.c
++++ b/drivers/block/floppy.c
+@@ -853,14 +853,17 @@ static void reset_fdc_info(int mode)
+ /* selects the fdc and drive, and enables the fdc's input/dma. */
+ static void set_fdc(int drive)
+ {
++	unsigned int new_fdc = fdc;
++
+ 	if (drive >= 0 && drive < N_DRIVE) {
+-		fdc = FDC(drive);
++		new_fdc = FDC(drive);
+ 		current_drive = drive;
+ 	}
+-	if (fdc != 1 && fdc != 0) {
++	if (new_fdc >= N_FDC) {
+ 		pr_info("bad fdc value\n");
+ 		return;
+ 	}
++	fdc = new_fdc;
+ 	set_dor(fdc, ~0, 8);
+ #if N_FDC > 1
+ 	set_dor(1 - fdc, ~8, 0);
+diff --git a/drivers/char/tpm/Makefile b/drivers/char/tpm/Makefile
+index 5a0d99d4fec0..9567e5197f74 100644
+--- a/drivers/char/tpm/Makefile
++++ b/drivers/char/tpm/Makefile
+@@ -21,9 +21,11 @@ tpm-$(CONFIG_EFI) += eventlog/efi.o
+ tpm-$(CONFIG_OF) += eventlog/of.o
+ obj-$(CONFIG_TCG_TIS_CORE) += tpm_tis_core.o
+ obj-$(CONFIG_TCG_TIS) += tpm_tis.o
+-obj-$(CONFIG_TCG_TIS_SPI) += tpm_tis_spi_mod.o
+-tpm_tis_spi_mod-y := tpm_tis_spi.o
+-tpm_tis_spi_mod-$(CONFIG_TCG_TIS_SPI_CR50) += tpm_tis_spi_cr50.o
++
++obj-$(CONFIG_TCG_TIS_SPI) += tpm_tis_spi.o
++tpm_tis_spi-y := tpm_tis_spi_main.o
++tpm_tis_spi-$(CONFIG_TCG_TIS_SPI_CR50) += tpm_tis_spi_cr50.o
++
+ obj-$(CONFIG_TCG_TIS_I2C_ATMEL) += tpm_i2c_atmel.o
+ obj-$(CONFIG_TCG_TIS_I2C_INFINEON) += tpm_i2c_infineon.o
+ obj-$(CONFIG_TCG_TIS_I2C_NUVOTON) += tpm_i2c_nuvoton.o
+diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c
+index 13696deceae8..760329598b99 100644
+--- a/drivers/char/tpm/tpm2-cmd.c
++++ b/drivers/char/tpm/tpm2-cmd.c
+@@ -525,6 +525,8 @@ static int tpm2_init_bank_info(struct tpm_chip *chip, u32 bank_index)
+ 		return 0;
+ 	}
+ 
++	bank->crypto_id = HASH_ALGO__LAST;
++
+ 	return tpm2_pcr_read(chip, 0, &digest, &bank->digest_size);
+ }
+ 
+diff --git a/drivers/char/tpm/tpm_tis_spi.c b/drivers/char/tpm/tpm_tis_spi.c
+deleted file mode 100644
+index d1754fd6c573..000000000000
+--- a/drivers/char/tpm/tpm_tis_spi.c
++++ /dev/null
+@@ -1,298 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0-only
+-/*
+- * Copyright (C) 2015 Infineon Technologies AG
+- * Copyright (C) 2016 STMicroelectronics SAS
+- *
+- * Authors:
+- * Peter Huewe <peter.huewe@infineon.com>
+- * Christophe Ricard <christophe-h.ricard@st.com>
+- *
+- * Maintained by: <tpmdd-devel@lists.sourceforge.net>
+- *
+- * Device driver for TCG/TCPA TPM (trusted platform module).
+- * Specifications at www.trustedcomputinggroup.org
+- *
+- * This device driver implements the TPM interface as defined in
+- * the TCG TPM Interface Spec version 1.3, revision 27 via _raw/native
+- * SPI access_.
+- *
+- * It is based on the original tpm_tis device driver from Leendert van
+- * Dorn and Kyleen Hall and Jarko Sakkinnen.
+- */
+-
+-#include <linux/acpi.h>
+-#include <linux/completion.h>
+-#include <linux/init.h>
+-#include <linux/interrupt.h>
+-#include <linux/kernel.h>
+-#include <linux/module.h>
+-#include <linux/slab.h>
+-
+-#include <linux/of_device.h>
+-#include <linux/spi/spi.h>
+-#include <linux/tpm.h>
+-
+-#include "tpm.h"
+-#include "tpm_tis_core.h"
+-#include "tpm_tis_spi.h"
+-
+-#define MAX_SPI_FRAMESIZE 64
+-
+-/*
+- * TCG SPI flow control is documented in section 6.4 of the spec[1]. In short,
+- * keep trying to read from the device until MISO goes high indicating the
+- * wait state has ended.
+- *
+- * [1] https://trustedcomputinggroup.org/resource/pc-client-platform-tpm-profile-ptp-specification/
+- */
+-static int tpm_tis_spi_flow_control(struct tpm_tis_spi_phy *phy,
+-				    struct spi_transfer *spi_xfer)
+-{
+-	struct spi_message m;
+-	int ret, i;
+-
+-	if ((phy->iobuf[3] & 0x01) == 0) {
+-		// handle SPI wait states
+-		phy->iobuf[0] = 0;
+-
+-		for (i = 0; i < TPM_RETRY; i++) {
+-			spi_xfer->len = 1;
+-			spi_message_init(&m);
+-			spi_message_add_tail(spi_xfer, &m);
+-			ret = spi_sync_locked(phy->spi_device, &m);
+-			if (ret < 0)
+-				return ret;
+-			if (phy->iobuf[0] & 0x01)
+-				break;
+-		}
+-
+-		if (i == TPM_RETRY)
+-			return -ETIMEDOUT;
+-	}
+-
+-	return 0;
+-}
+-
+-int tpm_tis_spi_transfer(struct tpm_tis_data *data, u32 addr, u16 len,
+-			 u8 *in, const u8 *out)
+-{
+-	struct tpm_tis_spi_phy *phy = to_tpm_tis_spi_phy(data);
+-	int ret = 0;
+-	struct spi_message m;
+-	struct spi_transfer spi_xfer;
+-	u8 transfer_len;
+-
+-	spi_bus_lock(phy->spi_device->master);
+-
+-	while (len) {
+-		transfer_len = min_t(u16, len, MAX_SPI_FRAMESIZE);
+-
+-		phy->iobuf[0] = (in ? 0x80 : 0) | (transfer_len - 1);
+-		phy->iobuf[1] = 0xd4;
+-		phy->iobuf[2] = addr >> 8;
+-		phy->iobuf[3] = addr;
+-
+-		memset(&spi_xfer, 0, sizeof(spi_xfer));
+-		spi_xfer.tx_buf = phy->iobuf;
+-		spi_xfer.rx_buf = phy->iobuf;
+-		spi_xfer.len = 4;
+-		spi_xfer.cs_change = 1;
+-
+-		spi_message_init(&m);
+-		spi_message_add_tail(&spi_xfer, &m);
+-		ret = spi_sync_locked(phy->spi_device, &m);
+-		if (ret < 0)
+-			goto exit;
+-
+-		ret = phy->flow_control(phy, &spi_xfer);
+-		if (ret < 0)
+-			goto exit;
+-
+-		spi_xfer.cs_change = 0;
+-		spi_xfer.len = transfer_len;
+-		spi_xfer.delay_usecs = 5;
+-
+-		if (in) {
+-			spi_xfer.tx_buf = NULL;
+-		} else if (out) {
+-			spi_xfer.rx_buf = NULL;
+-			memcpy(phy->iobuf, out, transfer_len);
+-			out += transfer_len;
+-		}
+-
+-		spi_message_init(&m);
+-		spi_message_add_tail(&spi_xfer, &m);
+-		reinit_completion(&phy->ready);
+-		ret = spi_sync_locked(phy->spi_device, &m);
+-		if (ret < 0)
+-			goto exit;
+-
+-		if (in) {
+-			memcpy(in, phy->iobuf, transfer_len);
+-			in += transfer_len;
+-		}
+-
+-		len -= transfer_len;
+-	}
+-
+-exit:
+-	spi_bus_unlock(phy->spi_device->master);
+-	return ret;
+-}
+-
+-static int tpm_tis_spi_read_bytes(struct tpm_tis_data *data, u32 addr,
+-				  u16 len, u8 *result)
+-{
+-	return tpm_tis_spi_transfer(data, addr, len, result, NULL);
+-}
+-
+-static int tpm_tis_spi_write_bytes(struct tpm_tis_data *data, u32 addr,
+-				   u16 len, const u8 *value)
+-{
+-	return tpm_tis_spi_transfer(data, addr, len, NULL, value);
+-}
+-
+-int tpm_tis_spi_read16(struct tpm_tis_data *data, u32 addr, u16 *result)
+-{
+-	__le16 result_le;
+-	int rc;
+-
+-	rc = data->phy_ops->read_bytes(data, addr, sizeof(u16),
+-				       (u8 *)&result_le);
+-	if (!rc)
+-		*result = le16_to_cpu(result_le);
+-
+-	return rc;
+-}
+-
+-int tpm_tis_spi_read32(struct tpm_tis_data *data, u32 addr, u32 *result)
+-{
+-	__le32 result_le;
+-	int rc;
+-
+-	rc = data->phy_ops->read_bytes(data, addr, sizeof(u32),
+-				       (u8 *)&result_le);
+-	if (!rc)
+-		*result = le32_to_cpu(result_le);
+-
+-	return rc;
+-}
+-
+-int tpm_tis_spi_write32(struct tpm_tis_data *data, u32 addr, u32 value)
+-{
+-	__le32 value_le;
+-	int rc;
+-
+-	value_le = cpu_to_le32(value);
+-	rc = data->phy_ops->write_bytes(data, addr, sizeof(u32),
+-					(u8 *)&value_le);
+-
+-	return rc;
+-}
+-
+-int tpm_tis_spi_init(struct spi_device *spi, struct tpm_tis_spi_phy *phy,
+-		     int irq, const struct tpm_tis_phy_ops *phy_ops)
+-{
+-	phy->iobuf = devm_kmalloc(&spi->dev, MAX_SPI_FRAMESIZE, GFP_KERNEL);
+-	if (!phy->iobuf)
+-		return -ENOMEM;
+-
+-	phy->spi_device = spi;
+-
+-	return tpm_tis_core_init(&spi->dev, &phy->priv, irq, phy_ops, NULL);
+-}
+-
+-static const struct tpm_tis_phy_ops tpm_spi_phy_ops = {
+-	.read_bytes = tpm_tis_spi_read_bytes,
+-	.write_bytes = tpm_tis_spi_write_bytes,
+-	.read16 = tpm_tis_spi_read16,
+-	.read32 = tpm_tis_spi_read32,
+-	.write32 = tpm_tis_spi_write32,
+-};
+-
+-static int tpm_tis_spi_probe(struct spi_device *dev)
+-{
+-	struct tpm_tis_spi_phy *phy;
+-	int irq;
+-
+-	phy = devm_kzalloc(&dev->dev, sizeof(struct tpm_tis_spi_phy),
+-			   GFP_KERNEL);
+-	if (!phy)
+-		return -ENOMEM;
+-
+-	phy->flow_control = tpm_tis_spi_flow_control;
+-
+-	/* If the SPI device has an IRQ then use that */
+-	if (dev->irq > 0)
+-		irq = dev->irq;
+-	else
+-		irq = -1;
+-
+-	init_completion(&phy->ready);
+-	return tpm_tis_spi_init(dev, phy, irq, &tpm_spi_phy_ops);
+-}
+-
+-typedef int (*tpm_tis_spi_probe_func)(struct spi_device *);
+-
+-static int tpm_tis_spi_driver_probe(struct spi_device *spi)
+-{
+-	const struct spi_device_id *spi_dev_id = spi_get_device_id(spi);
+-	tpm_tis_spi_probe_func probe_func;
+-
+-	probe_func = of_device_get_match_data(&spi->dev);
+-	if (!probe_func && spi_dev_id)
+-		probe_func = (tpm_tis_spi_probe_func)spi_dev_id->driver_data;
+-	if (!probe_func)
+-		return -ENODEV;
+-
+-	return probe_func(spi);
+-}
+-
+-static SIMPLE_DEV_PM_OPS(tpm_tis_pm, tpm_pm_suspend, tpm_tis_spi_resume);
+-
+-static int tpm_tis_spi_remove(struct spi_device *dev)
+-{
+-	struct tpm_chip *chip = spi_get_drvdata(dev);
+-
+-	tpm_chip_unregister(chip);
+-	tpm_tis_remove(chip);
+-	return 0;
+-}
+-
+-static const struct spi_device_id tpm_tis_spi_id[] = {
+-	{ "tpm_tis_spi", (unsigned long)tpm_tis_spi_probe },
+-	{ "cr50", (unsigned long)cr50_spi_probe },
+-	{}
+-};
+-MODULE_DEVICE_TABLE(spi, tpm_tis_spi_id);
+-
+-static const struct of_device_id of_tis_spi_match[] = {
+-	{ .compatible = "st,st33htpm-spi", .data = tpm_tis_spi_probe },
+-	{ .compatible = "infineon,slb9670", .data = tpm_tis_spi_probe },
+-	{ .compatible = "tcg,tpm_tis-spi", .data = tpm_tis_spi_probe },
+-	{ .compatible = "google,cr50", .data = cr50_spi_probe },
+-	{}
+-};
+-MODULE_DEVICE_TABLE(of, of_tis_spi_match);
+-
+-static const struct acpi_device_id acpi_tis_spi_match[] = {
+-	{"SMO0768", 0},
+-	{}
+-};
+-MODULE_DEVICE_TABLE(acpi, acpi_tis_spi_match);
+-
+-static struct spi_driver tpm_tis_spi_driver = {
+-	.driver = {
+-		.name = "tpm_tis_spi",
+-		.pm = &tpm_tis_pm,
+-		.of_match_table = of_match_ptr(of_tis_spi_match),
+-		.acpi_match_table = ACPI_PTR(acpi_tis_spi_match),
+-	},
+-	.probe = tpm_tis_spi_driver_probe,
+-	.remove = tpm_tis_spi_remove,
+-	.id_table = tpm_tis_spi_id,
+-};
+-module_spi_driver(tpm_tis_spi_driver);
+-
+-MODULE_DESCRIPTION("TPM Driver for native SPI access");
+-MODULE_LICENSE("GPL");
+diff --git a/drivers/char/tpm/tpm_tis_spi_main.c b/drivers/char/tpm/tpm_tis_spi_main.c
+new file mode 100644
+index 000000000000..d1754fd6c573
+--- /dev/null
++++ b/drivers/char/tpm/tpm_tis_spi_main.c
+@@ -0,0 +1,298 @@
++// SPDX-License-Identifier: GPL-2.0-only
++/*
++ * Copyright (C) 2015 Infineon Technologies AG
++ * Copyright (C) 2016 STMicroelectronics SAS
++ *
++ * Authors:
++ * Peter Huewe <peter.huewe@infineon.com>
++ * Christophe Ricard <christophe-h.ricard@st.com>
++ *
++ * Maintained by: <tpmdd-devel@lists.sourceforge.net>
++ *
++ * Device driver for TCG/TCPA TPM (trusted platform module).
++ * Specifications at www.trustedcomputinggroup.org
++ *
++ * This device driver implements the TPM interface as defined in
++ * the TCG TPM Interface Spec version 1.3, revision 27 via _raw/native
++ * SPI access_.
++ *
++ * It is based on the original tpm_tis device driver from Leendert van
++ * Dorn and Kyleen Hall and Jarko Sakkinnen.
++ */
++
++#include <linux/acpi.h>
++#include <linux/completion.h>
++#include <linux/init.h>
++#include <linux/interrupt.h>
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/slab.h>
++
++#include <linux/of_device.h>
++#include <linux/spi/spi.h>
++#include <linux/tpm.h>
++
++#include "tpm.h"
++#include "tpm_tis_core.h"
++#include "tpm_tis_spi.h"
++
++#define MAX_SPI_FRAMESIZE 64
++
++/*
++ * TCG SPI flow control is documented in section 6.4 of the spec[1]. In short,
++ * keep trying to read from the device until MISO goes high indicating the
++ * wait state has ended.
++ *
++ * [1] https://trustedcomputinggroup.org/resource/pc-client-platform-tpm-profile-ptp-specification/
++ */
++static int tpm_tis_spi_flow_control(struct tpm_tis_spi_phy *phy,
++				    struct spi_transfer *spi_xfer)
++{
++	struct spi_message m;
++	int ret, i;
++
++	if ((phy->iobuf[3] & 0x01) == 0) {
++		// handle SPI wait states
++		phy->iobuf[0] = 0;
++
++		for (i = 0; i < TPM_RETRY; i++) {
++			spi_xfer->len = 1;
++			spi_message_init(&m);
++			spi_message_add_tail(spi_xfer, &m);
++			ret = spi_sync_locked(phy->spi_device, &m);
++			if (ret < 0)
++				return ret;
++			if (phy->iobuf[0] & 0x01)
++				break;
++		}
++
++		if (i == TPM_RETRY)
++			return -ETIMEDOUT;
++	}
++
++	return 0;
++}
++
++int tpm_tis_spi_transfer(struct tpm_tis_data *data, u32 addr, u16 len,
++			 u8 *in, const u8 *out)
++{
++	struct tpm_tis_spi_phy *phy = to_tpm_tis_spi_phy(data);
++	int ret = 0;
++	struct spi_message m;
++	struct spi_transfer spi_xfer;
++	u8 transfer_len;
++
++	spi_bus_lock(phy->spi_device->master);
++
++	while (len) {
++		transfer_len = min_t(u16, len, MAX_SPI_FRAMESIZE);
++
++		phy->iobuf[0] = (in ? 0x80 : 0) | (transfer_len - 1);
++		phy->iobuf[1] = 0xd4;
++		phy->iobuf[2] = addr >> 8;
++		phy->iobuf[3] = addr;
++
++		memset(&spi_xfer, 0, sizeof(spi_xfer));
++		spi_xfer.tx_buf = phy->iobuf;
++		spi_xfer.rx_buf = phy->iobuf;
++		spi_xfer.len = 4;
++		spi_xfer.cs_change = 1;
++
++		spi_message_init(&m);
++		spi_message_add_tail(&spi_xfer, &m);
++		ret = spi_sync_locked(phy->spi_device, &m);
++		if (ret < 0)
++			goto exit;
++
++		ret = phy->flow_control(phy, &spi_xfer);
++		if (ret < 0)
++			goto exit;
++
++		spi_xfer.cs_change = 0;
++		spi_xfer.len = transfer_len;
++		spi_xfer.delay_usecs = 5;
++
++		if (in) {
++			spi_xfer.tx_buf = NULL;
++		} else if (out) {
++			spi_xfer.rx_buf = NULL;
++			memcpy(phy->iobuf, out, transfer_len);
++			out += transfer_len;
++		}
++
++		spi_message_init(&m);
++		spi_message_add_tail(&spi_xfer, &m);
++		reinit_completion(&phy->ready);
++		ret = spi_sync_locked(phy->spi_device, &m);
++		if (ret < 0)
++			goto exit;
++
++		if (in) {
++			memcpy(in, phy->iobuf, transfer_len);
++			in += transfer_len;
++		}
++
++		len -= transfer_len;
++	}
++
++exit:
++	spi_bus_unlock(phy->spi_device->master);
++	return ret;
++}
++
++static int tpm_tis_spi_read_bytes(struct tpm_tis_data *data, u32 addr,
++				  u16 len, u8 *result)
++{
++	return tpm_tis_spi_transfer(data, addr, len, result, NULL);
++}
++
++static int tpm_tis_spi_write_bytes(struct tpm_tis_data *data, u32 addr,
++				   u16 len, const u8 *value)
++{
++	return tpm_tis_spi_transfer(data, addr, len, NULL, value);
++}
++
++int tpm_tis_spi_read16(struct tpm_tis_data *data, u32 addr, u16 *result)
++{
++	__le16 result_le;
++	int rc;
++
++	rc = data->phy_ops->read_bytes(data, addr, sizeof(u16),
++				       (u8 *)&result_le);
++	if (!rc)
++		*result = le16_to_cpu(result_le);
++
++	return rc;
++}
++
++int tpm_tis_spi_read32(struct tpm_tis_data *data, u32 addr, u32 *result)
++{
++	__le32 result_le;
++	int rc;
++
++	rc = data->phy_ops->read_bytes(data, addr, sizeof(u32),
++				       (u8 *)&result_le);
++	if (!rc)
++		*result = le32_to_cpu(result_le);
++
++	return rc;
++}
++
++int tpm_tis_spi_write32(struct tpm_tis_data *data, u32 addr, u32 value)
++{
++	__le32 value_le;
++	int rc;
++
++	value_le = cpu_to_le32(value);
++	rc = data->phy_ops->write_bytes(data, addr, sizeof(u32),
++					(u8 *)&value_le);
++
++	return rc;
++}
++
++int tpm_tis_spi_init(struct spi_device *spi, struct tpm_tis_spi_phy *phy,
++		     int irq, const struct tpm_tis_phy_ops *phy_ops)
++{
++	phy->iobuf = devm_kmalloc(&spi->dev, MAX_SPI_FRAMESIZE, GFP_KERNEL);
++	if (!phy->iobuf)
++		return -ENOMEM;
++
++	phy->spi_device = spi;
++
++	return tpm_tis_core_init(&spi->dev, &phy->priv, irq, phy_ops, NULL);
++}
++
++static const struct tpm_tis_phy_ops tpm_spi_phy_ops = {
++	.read_bytes = tpm_tis_spi_read_bytes,
++	.write_bytes = tpm_tis_spi_write_bytes,
++	.read16 = tpm_tis_spi_read16,
++	.read32 = tpm_tis_spi_read32,
++	.write32 = tpm_tis_spi_write32,
++};
++
++static int tpm_tis_spi_probe(struct spi_device *dev)
++{
++	struct tpm_tis_spi_phy *phy;
++	int irq;
++
++	phy = devm_kzalloc(&dev->dev, sizeof(struct tpm_tis_spi_phy),
++			   GFP_KERNEL);
++	if (!phy)
++		return -ENOMEM;
++
++	phy->flow_control = tpm_tis_spi_flow_control;
++
++	/* If the SPI device has an IRQ then use that */
++	if (dev->irq > 0)
++		irq = dev->irq;
++	else
++		irq = -1;
++
++	init_completion(&phy->ready);
++	return tpm_tis_spi_init(dev, phy, irq, &tpm_spi_phy_ops);
++}
++
++typedef int (*tpm_tis_spi_probe_func)(struct spi_device *);
++
++static int tpm_tis_spi_driver_probe(struct spi_device *spi)
++{
++	const struct spi_device_id *spi_dev_id = spi_get_device_id(spi);
++	tpm_tis_spi_probe_func probe_func;
++
++	probe_func = of_device_get_match_data(&spi->dev);
++	if (!probe_func && spi_dev_id)
++		probe_func = (tpm_tis_spi_probe_func)spi_dev_id->driver_data;
++	if (!probe_func)
++		return -ENODEV;
++
++	return probe_func(spi);
++}
++
++static SIMPLE_DEV_PM_OPS(tpm_tis_pm, tpm_pm_suspend, tpm_tis_spi_resume);
++
++static int tpm_tis_spi_remove(struct spi_device *dev)
++{
++	struct tpm_chip *chip = spi_get_drvdata(dev);
++
++	tpm_chip_unregister(chip);
++	tpm_tis_remove(chip);
++	return 0;
++}
++
++static const struct spi_device_id tpm_tis_spi_id[] = {
++	{ "tpm_tis_spi", (unsigned long)tpm_tis_spi_probe },
++	{ "cr50", (unsigned long)cr50_spi_probe },
++	{}
++};
++MODULE_DEVICE_TABLE(spi, tpm_tis_spi_id);
++
++static const struct of_device_id of_tis_spi_match[] = {
++	{ .compatible = "st,st33htpm-spi", .data = tpm_tis_spi_probe },
++	{ .compatible = "infineon,slb9670", .data = tpm_tis_spi_probe },
++	{ .compatible = "tcg,tpm_tis-spi", .data = tpm_tis_spi_probe },
++	{ .compatible = "google,cr50", .data = cr50_spi_probe },
++	{}
++};
++MODULE_DEVICE_TABLE(of, of_tis_spi_match);
++
++static const struct acpi_device_id acpi_tis_spi_match[] = {
++	{"SMO0768", 0},
++	{}
++};
++MODULE_DEVICE_TABLE(acpi, acpi_tis_spi_match);
++
++static struct spi_driver tpm_tis_spi_driver = {
++	.driver = {
++		.name = "tpm_tis_spi",
++		.pm = &tpm_tis_pm,
++		.of_match_table = of_match_ptr(of_tis_spi_match),
++		.acpi_match_table = ACPI_PTR(acpi_tis_spi_match),
++	},
++	.probe = tpm_tis_spi_driver_probe,
++	.remove = tpm_tis_spi_remove,
++	.id_table = tpm_tis_spi_id,
++};
++module_spi_driver(tpm_tis_spi_driver);
++
++MODULE_DESCRIPTION("TPM Driver for native SPI access");
++MODULE_LICENSE("GPL");
+diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
+index 66f1b2ac5cde..c27e206a764c 100644
+--- a/drivers/dma/imx-sdma.c
++++ b/drivers/dma/imx-sdma.c
+@@ -760,8 +760,12 @@ static void sdma_start_desc(struct sdma_channel *sdmac)
+ 		return;
+ 	}
+ 	sdmac->desc = desc = to_sdma_desc(&vd->tx);
+-
+-	list_del(&vd->node);
++	/*
++	 * Do not delete the node in desc_issued list in cyclic mode, otherwise
++	 * the desc allocated will never be freed in vchan_dma_desc_free_list
++	 */
++	if (!(sdmac->flags & IMX_DMA_SG_LOOP))
++		list_del(&vd->node);
+ 
+ 	sdma->channel_control[channel].base_bd_ptr = desc->bd_phys;
+ 	sdma->channel_control[channel].current_bd_ptr = desc->bd_phys;
+@@ -1067,6 +1071,7 @@ static void sdma_channel_terminate_work(struct work_struct *work)
+ 
+ 	spin_lock_irqsave(&sdmac->vc.lock, flags);
+ 	vchan_get_all_descriptors(&sdmac->vc, &head);
++	sdmac->desc = NULL;
+ 	spin_unlock_irqrestore(&sdmac->vc.lock, flags);
+ 	vchan_dma_desc_free_list(&sdmac->vc, &head);
+ 	sdmac->context_loaded = false;
+@@ -1075,19 +1080,11 @@ static void sdma_channel_terminate_work(struct work_struct *work)
+ static int sdma_disable_channel_async(struct dma_chan *chan)
+ {
+ 	struct sdma_channel *sdmac = to_sdma_chan(chan);
+-	unsigned long flags;
+-
+-	spin_lock_irqsave(&sdmac->vc.lock, flags);
+ 
+ 	sdma_disable_channel(chan);
+ 
+-	if (sdmac->desc) {
+-		vchan_terminate_vdesc(&sdmac->desc->vd);
+-		sdmac->desc = NULL;
++	if (sdmac->desc)
+ 		schedule_work(&sdmac->terminate_worker);
+-	}
+-
+-	spin_unlock_irqrestore(&sdmac->vc.lock, flags);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/fsi/Kconfig b/drivers/fsi/Kconfig
+index 92ce6d85802c..4cc0e630ab79 100644
+--- a/drivers/fsi/Kconfig
++++ b/drivers/fsi/Kconfig
+@@ -55,6 +55,7 @@ config FSI_MASTER_AST_CF
+ 
+ config FSI_MASTER_ASPEED
+ 	tristate "FSI ASPEED master"
++	depends on HAS_IOMEM
+ 	help
+ 	 This option enables a FSI master that is present behind an OPB bridge
+ 	 in the AST2600.
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+index ba9e53a1abc3..d9b8e3298d78 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+@@ -3909,11 +3909,13 @@ static uint64_t gfx_v10_0_get_gpu_clock_counter(struct amdgpu_device *adev)
+ {
+ 	uint64_t clock;
+ 
++	amdgpu_gfx_off_ctrl(adev, false);
+ 	mutex_lock(&adev->gfx.gpu_clock_mutex);
+ 	WREG32_SOC15(GC, 0, mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1);
+ 	clock = (uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_LSB) |
+ 		((uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
+ 	mutex_unlock(&adev->gfx.gpu_clock_mutex);
++	amdgpu_gfx_off_ctrl(adev, true);
+ 	return clock;
+ }
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+index 97105a5bb246..085b84322e92 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+@@ -3852,6 +3852,7 @@ static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev)
+ {
+ 	uint64_t clock;
+ 
++	amdgpu_gfx_off_ctrl(adev, false);
+ 	mutex_lock(&adev->gfx.gpu_clock_mutex);
+ 	if (adev->asic_type == CHIP_VEGA10 && amdgpu_sriov_runtime(adev)) {
+ 		uint32_t tmp, lsb, msb, i = 0;
+@@ -3870,6 +3871,7 @@ static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev)
+ 			((uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
+ 	}
+ 	mutex_unlock(&adev->gfx.gpu_clock_mutex);
++	amdgpu_gfx_off_ctrl(adev, true);
+ 	return clock;
+ }
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
+index 04ea7cd69295..624e223175c2 100644
+--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
++++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
+@@ -270,7 +270,12 @@ static u32 soc15_get_config_memsize(struct amdgpu_device *adev)
+ 
+ static u32 soc15_get_xclk(struct amdgpu_device *adev)
+ {
+-	return adev->clock.spll.reference_freq;
++	u32 reference_clock = adev->clock.spll.reference_freq;
++
++	if (adev->asic_type == CHIP_RAVEN)
++		return reference_clock / 4;
++
++	return reference_clock;
+ }
+ 
+ 
+diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c
+index 8029478ffebb..b0b0ccbb059d 100644
+--- a/drivers/gpu/drm/bridge/tc358767.c
++++ b/drivers/gpu/drm/bridge/tc358767.c
+@@ -297,7 +297,7 @@ static inline int tc_poll_timeout(struct tc_data *tc, unsigned int addr,
+ 
+ static int tc_aux_wait_busy(struct tc_data *tc)
+ {
+-	return tc_poll_timeout(tc, DP0_AUXSTATUS, AUX_BUSY, 0, 1000, 100000);
++	return tc_poll_timeout(tc, DP0_AUXSTATUS, AUX_BUSY, 0, 100, 100000);
+ }
+ 
+ static int tc_aux_write_data(struct tc_data *tc, const void *data,
+@@ -640,7 +640,7 @@ static int tc_aux_link_setup(struct tc_data *tc)
+ 	if (ret)
+ 		goto err;
+ 
+-	ret = tc_poll_timeout(tc, DP_PHY_CTRL, PHY_RDY, PHY_RDY, 1, 1000);
++	ret = tc_poll_timeout(tc, DP_PHY_CTRL, PHY_RDY, PHY_RDY, 100, 100000);
+ 	if (ret == -ETIMEDOUT) {
+ 		dev_err(tc->dev, "Timeout waiting for PHY to become ready");
+ 		return ret;
+@@ -876,7 +876,7 @@ static int tc_wait_link_training(struct tc_data *tc)
+ 	int ret;
+ 
+ 	ret = tc_poll_timeout(tc, DP0_LTSTAT, LT_LOOPDONE,
+-			      LT_LOOPDONE, 1, 1000);
++			      LT_LOOPDONE, 500, 100000);
+ 	if (ret) {
+ 		dev_err(tc->dev, "Link training timeout waiting for LT_LOOPDONE!\n");
+ 		return ret;
+@@ -949,7 +949,7 @@ static int tc_main_link_enable(struct tc_data *tc)
+ 	dp_phy_ctrl &= ~(DP_PHY_RST | PHY_M1_RST | PHY_M0_RST);
+ 	ret = regmap_write(tc->regmap, DP_PHY_CTRL, dp_phy_ctrl);
+ 
+-	ret = tc_poll_timeout(tc, DP_PHY_CTRL, PHY_RDY, PHY_RDY, 1, 1000);
++	ret = tc_poll_timeout(tc, DP_PHY_CTRL, PHY_RDY, PHY_RDY, 500, 100000);
+ 	if (ret) {
+ 		dev_err(dev, "timeout waiting for phy become ready");
+ 		return ret;
+diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig
+index ba9595960bbe..907c4471f591 100644
+--- a/drivers/gpu/drm/i915/Kconfig
++++ b/drivers/gpu/drm/i915/Kconfig
+@@ -75,9 +75,8 @@ config DRM_I915_CAPTURE_ERROR
+ 	help
+ 	  This option enables capturing the GPU state when a hang is detected.
+ 	  This information is vital for triaging hangs and assists in debugging.
+-	  Please report any hang to
+-	    https://bugs.freedesktop.org/enter_bug.cgi?product=DRI
+-	  for triaging.
++	  Please report any hang for triaging according to:
++	    https://gitlab.freedesktop.org/drm/intel/-/wikis/How-to-file-i915-bugs
+ 
+ 	  If in doubt, say "Y".
+ 
+diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c
+index 2a27fb5d7dc6..1488822398fe 100644
+--- a/drivers/gpu/drm/i915/display/intel_ddi.c
++++ b/drivers/gpu/drm/i915/display/intel_ddi.c
+@@ -4227,7 +4227,9 @@ static bool intel_ddi_is_audio_enabled(struct drm_i915_private *dev_priv,
+ void intel_ddi_compute_min_voltage_level(struct drm_i915_private *dev_priv,
+ 					 struct intel_crtc_state *crtc_state)
+ {
+-	if (INTEL_GEN(dev_priv) >= 11 && crtc_state->port_clock > 594000)
++	if (IS_ELKHARTLAKE(dev_priv) && crtc_state->port_clock > 594000)
++		crtc_state->min_voltage_level = 3;
++	else if (INTEL_GEN(dev_priv) >= 11 && crtc_state->port_clock > 594000)
+ 		crtc_state->min_voltage_level = 1;
+ 	else if (IS_CANNONLAKE(dev_priv) && crtc_state->port_clock > 594000)
+ 		crtc_state->min_voltage_level = 2;
+diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
+index 301897791627..b670239a293b 100644
+--- a/drivers/gpu/drm/i915/display/intel_display.c
++++ b/drivers/gpu/drm/i915/display/intel_display.c
+@@ -10731,7 +10731,7 @@ static u32 intel_cursor_base(const struct intel_plane_state *plane_state)
+ 	u32 base;
+ 
+ 	if (INTEL_INFO(dev_priv)->display.cursor_needs_physical)
+-		base = obj->phys_handle->busaddr;
++		base = sg_dma_address(obj->mm.pages->sgl);
+ 	else
+ 		base = intel_plane_ggtt_offset(plane_state);
+ 
+diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
+index 42385277c684..f3d608df1c4d 100644
+--- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
++++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
+@@ -484,6 +484,22 @@ static int __context_set_persistence(struct i915_gem_context *ctx, bool state)
+ 		if (!(ctx->i915->caps.scheduler & I915_SCHEDULER_CAP_PREEMPTION))
+ 			return -ENODEV;
+ 
++		/*
++		 * If the cancel fails, we then need to reset, cleanly!
++		 *
++		 * If the per-engine reset fails, all hope is lost! We resort
++		 * to a full GPU reset in that unlikely case, but realistically
++		 * if the engine could not reset, the full reset does not fare
++		 * much better. The damage has been done.
++		 *
++		 * However, if we cannot reset an engine by itself, we cannot
++		 * cleanup a hanging persistent context without causing
++		 * colateral damage, and we should not pretend we can by
++		 * exposing the interface.
++		 */
++		if (!intel_has_reset_engine(&ctx->i915->gt))
++			return -ENODEV;
++
+ 		i915_gem_context_clear_persistence(ctx);
+ 	}
+ 
+diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
+index e3f3944fbd90..1078a76d6d84 100644
+--- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
++++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
+@@ -260,9 +260,6 @@ struct drm_i915_gem_object {
+ 
+ 		void *gvt_info;
+ 	};
+-
+-	/** for phys allocated objects */
+-	struct drm_dma_handle *phys_handle;
+ };
+ 
+ static inline struct drm_i915_gem_object *
+diff --git a/drivers/gpu/drm/i915/gem/i915_gem_phys.c b/drivers/gpu/drm/i915/gem/i915_gem_phys.c
+index 8043ff63d73f..5e2e0109c9ba 100644
+--- a/drivers/gpu/drm/i915/gem/i915_gem_phys.c
++++ b/drivers/gpu/drm/i915/gem/i915_gem_phys.c
+@@ -22,88 +22,87 @@
+ static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
+ {
+ 	struct address_space *mapping = obj->base.filp->f_mapping;
+-	struct drm_dma_handle *phys;
+-	struct sg_table *st;
+ 	struct scatterlist *sg;
+-	char *vaddr;
++	struct sg_table *st;
++	dma_addr_t dma;
++	void *vaddr;
++	void *dst;
+ 	int i;
+-	int err;
+ 
+ 	if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj)))
+ 		return -EINVAL;
+ 
+-	/* Always aligning to the object size, allows a single allocation
++	/*
++	 * Always aligning to the object size, allows a single allocation
+ 	 * to handle all possible callers, and given typical object sizes,
+ 	 * the alignment of the buddy allocation will naturally match.
+ 	 */
+-	phys = drm_pci_alloc(obj->base.dev,
+-			     roundup_pow_of_two(obj->base.size),
+-			     roundup_pow_of_two(obj->base.size));
+-	if (!phys)
++	vaddr = dma_alloc_coherent(&obj->base.dev->pdev->dev,
++				   roundup_pow_of_two(obj->base.size),
++				   &dma, GFP_KERNEL);
++	if (!vaddr)
+ 		return -ENOMEM;
+ 
+-	vaddr = phys->vaddr;
++	st = kmalloc(sizeof(*st), GFP_KERNEL);
++	if (!st)
++		goto err_pci;
++
++	if (sg_alloc_table(st, 1, GFP_KERNEL))
++		goto err_st;
++
++	sg = st->sgl;
++	sg->offset = 0;
++	sg->length = obj->base.size;
++
++	sg_assign_page(sg, (struct page *)vaddr);
++	sg_dma_address(sg) = dma;
++	sg_dma_len(sg) = obj->base.size;
++
++	dst = vaddr;
+ 	for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
+ 		struct page *page;
+-		char *src;
++		void *src;
+ 
+ 		page = shmem_read_mapping_page(mapping, i);
+-		if (IS_ERR(page)) {
+-			err = PTR_ERR(page);
+-			goto err_phys;
+-		}
++		if (IS_ERR(page))
++			goto err_st;
+ 
+ 		src = kmap_atomic(page);
+-		memcpy(vaddr, src, PAGE_SIZE);
+-		drm_clflush_virt_range(vaddr, PAGE_SIZE);
++		memcpy(dst, src, PAGE_SIZE);
++		drm_clflush_virt_range(dst, PAGE_SIZE);
+ 		kunmap_atomic(src);
+ 
+ 		put_page(page);
+-		vaddr += PAGE_SIZE;
++		dst += PAGE_SIZE;
+ 	}
+ 
+ 	intel_gt_chipset_flush(&to_i915(obj->base.dev)->gt);
+ 
+-	st = kmalloc(sizeof(*st), GFP_KERNEL);
+-	if (!st) {
+-		err = -ENOMEM;
+-		goto err_phys;
+-	}
+-
+-	if (sg_alloc_table(st, 1, GFP_KERNEL)) {
+-		kfree(st);
+-		err = -ENOMEM;
+-		goto err_phys;
+-	}
+-
+-	sg = st->sgl;
+-	sg->offset = 0;
+-	sg->length = obj->base.size;
+-
+-	sg_dma_address(sg) = phys->busaddr;
+-	sg_dma_len(sg) = obj->base.size;
+-
+-	obj->phys_handle = phys;
+-
+ 	__i915_gem_object_set_pages(obj, st, sg->length);
+ 
+ 	return 0;
+ 
+-err_phys:
+-	drm_pci_free(obj->base.dev, phys);
+-
+-	return err;
++err_st:
++	kfree(st);
++err_pci:
++	dma_free_coherent(&obj->base.dev->pdev->dev,
++			  roundup_pow_of_two(obj->base.size),
++			  vaddr, dma);
++	return -ENOMEM;
+ }
+ 
+ static void
+ i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
+ 			       struct sg_table *pages)
+ {
++	dma_addr_t dma = sg_dma_address(pages->sgl);
++	void *vaddr = sg_page(pages->sgl);
++
+ 	__i915_gem_object_release_shmem(obj, pages, false);
+ 
+ 	if (obj->mm.dirty) {
+ 		struct address_space *mapping = obj->base.filp->f_mapping;
+-		char *vaddr = obj->phys_handle->vaddr;
++		void *src = vaddr;
+ 		int i;
+ 
+ 		for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
+@@ -115,15 +114,16 @@ i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
+ 				continue;
+ 
+ 			dst = kmap_atomic(page);
+-			drm_clflush_virt_range(vaddr, PAGE_SIZE);
+-			memcpy(dst, vaddr, PAGE_SIZE);
++			drm_clflush_virt_range(src, PAGE_SIZE);
++			memcpy(dst, src, PAGE_SIZE);
+ 			kunmap_atomic(dst);
+ 
+ 			set_page_dirty(page);
+ 			if (obj->mm.madv == I915_MADV_WILLNEED)
+ 				mark_page_accessed(page);
+ 			put_page(page);
+-			vaddr += PAGE_SIZE;
++
++			src += PAGE_SIZE;
+ 		}
+ 		obj->mm.dirty = false;
+ 	}
+@@ -131,7 +131,9 @@ i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
+ 	sg_free_table(pages);
+ 	kfree(pages);
+ 
+-	drm_pci_free(obj->base.dev, obj->phys_handle);
++	dma_free_coherent(&obj->base.dev->pdev->dev,
++			  roundup_pow_of_two(obj->base.size),
++			  vaddr, dma);
+ }
+ 
+ static void phys_release(struct drm_i915_gem_object *obj)
+diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
+index d925a1035c9d..0d80472c0f29 100644
+--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
++++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
+@@ -1157,7 +1157,7 @@ static u64 execlists_update_context(struct i915_request *rq)
+ {
+ 	struct intel_context *ce = rq->hw_context;
+ 	u64 desc = ce->lrc_desc;
+-	u32 tail;
++	u32 tail, prev;
+ 
+ 	/*
+ 	 * WaIdleLiteRestore:bdw,skl
+@@ -1170,9 +1170,15 @@ static u64 execlists_update_context(struct i915_request *rq)
+ 	 * subsequent resubmissions (for lite restore). Should that fail us,
+ 	 * and we try and submit the same tail again, force the context
+ 	 * reload.
++	 *
++	 * If we need to return to a preempted context, we need to skip the
++	 * lite-restore and force it to reload the RING_TAIL. Otherwise, the
++	 * HW has a tendency to ignore us rewinding the TAIL to the end of
++	 * an earlier request.
+ 	 */
+ 	tail = intel_ring_set_tail(rq->ring, rq->tail);
+-	if (unlikely(ce->lrc_reg_state[CTX_RING_TAIL] == tail))
++	prev = ce->lrc_reg_state[CTX_RING_TAIL];
++	if (unlikely(intel_ring_direction(rq->ring, tail, prev) <= 0))
+ 		desc |= CTX_DESC_FORCE_RESTORE;
+ 	ce->lrc_reg_state[CTX_RING_TAIL] = tail;
+ 	rq->tail = rq->wa_tail;
+@@ -1427,6 +1433,11 @@ last_active(const struct intel_engine_execlists *execlists)
+ 	return *last;
+ }
+ 
++#define for_each_waiter(p__, rq__) \
++	list_for_each_entry_lockless(p__, \
++				     &(rq__)->sched.waiters_list, \
++				     wait_link)
++
+ static void defer_request(struct i915_request *rq, struct list_head * const pl)
+ {
+ 	LIST_HEAD(list);
+@@ -1444,7 +1455,7 @@ static void defer_request(struct i915_request *rq, struct list_head * const pl)
+ 		GEM_BUG_ON(i915_request_is_active(rq));
+ 		list_move_tail(&rq->sched.link, pl);
+ 
+-		list_for_each_entry(p, &rq->sched.waiters_list, wait_link) {
++		for_each_waiter(p, rq) {
+ 			struct i915_request *w =
+ 				container_of(p->waiter, typeof(*w), sched);
+ 
+@@ -1651,14 +1662,6 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
+ 			 */
+ 			__unwind_incomplete_requests(engine);
+ 
+-			/*
+-			 * If we need to return to the preempted context, we
+-			 * need to skip the lite-restore and force it to
+-			 * reload the RING_TAIL. Otherwise, the HW has a
+-			 * tendency to ignore us rewinding the TAIL to the
+-			 * end of an earlier request.
+-			 */
+-			last->hw_context->lrc_desc |= CTX_DESC_FORCE_RESTORE;
+ 			last = NULL;
+ 		} else if (need_timeslice(engine, last) &&
+ 			   timer_expired(&engine->execlists.timer)) {
+diff --git a/drivers/gpu/drm/i915/gt/intel_ring.c b/drivers/gpu/drm/i915/gt/intel_ring.c
+index 374b28f13ca0..6ff803f397c4 100644
+--- a/drivers/gpu/drm/i915/gt/intel_ring.c
++++ b/drivers/gpu/drm/i915/gt/intel_ring.c
+@@ -145,6 +145,7 @@ intel_engine_create_ring(struct intel_engine_cs *engine, int size)
+ 
+ 	kref_init(&ring->ref);
+ 	ring->size = size;
++	ring->wrap = BITS_PER_TYPE(ring->size) - ilog2(size);
+ 
+ 	/*
+ 	 * Workaround an erratum on the i830 which causes a hang if
+diff --git a/drivers/gpu/drm/i915/gt/intel_ring.h b/drivers/gpu/drm/i915/gt/intel_ring.h
+index ea2839d9e044..5bdce24994aa 100644
+--- a/drivers/gpu/drm/i915/gt/intel_ring.h
++++ b/drivers/gpu/drm/i915/gt/intel_ring.h
+@@ -56,6 +56,14 @@ static inline u32 intel_ring_wrap(const struct intel_ring *ring, u32 pos)
+ 	return pos & (ring->size - 1);
+ }
+ 
++static inline int intel_ring_direction(const struct intel_ring *ring,
++				       u32 next, u32 prev)
++{
++	typecheck(typeof(ring->size), next);
++	typecheck(typeof(ring->size), prev);
++	return (next - prev) << ring->wrap;
++}
++
+ static inline bool
+ intel_ring_offset_valid(const struct intel_ring *ring,
+ 			unsigned int pos)
+diff --git a/drivers/gpu/drm/i915/gt/intel_ring_types.h b/drivers/gpu/drm/i915/gt/intel_ring_types.h
+index d9f17f38e0cc..3cd7fec7fd8d 100644
+--- a/drivers/gpu/drm/i915/gt/intel_ring_types.h
++++ b/drivers/gpu/drm/i915/gt/intel_ring_types.h
+@@ -45,6 +45,7 @@ struct intel_ring {
+ 
+ 	u32 space;
+ 	u32 size;
++	u32 wrap;
+ 	u32 effective_size;
+ };
+ 
+diff --git a/drivers/gpu/drm/i915/gt/mock_engine.c b/drivers/gpu/drm/i915/gt/mock_engine.c
+index 83f549d203a0..a635cf832d69 100644
+--- a/drivers/gpu/drm/i915/gt/mock_engine.c
++++ b/drivers/gpu/drm/i915/gt/mock_engine.c
+@@ -59,11 +59,26 @@ static struct intel_ring *mock_ring(struct intel_engine_cs *engine)
+ 	ring->vaddr = (void *)(ring + 1);
+ 	atomic_set(&ring->pin_count, 1);
+ 
++	ring->vma = i915_vma_alloc();
++	if (!ring->vma) {
++		kfree(ring);
++		return NULL;
++	}
++	i915_active_init(&ring->vma->active, NULL, NULL);
++
+ 	intel_ring_update_space(ring);
+ 
+ 	return ring;
+ }
+ 
++static void mock_ring_free(struct intel_ring *ring)
++{
++	i915_active_fini(&ring->vma->active);
++	i915_vma_free(ring->vma);
++
++	kfree(ring);
++}
++
+ static struct i915_request *first_request(struct mock_engine *engine)
+ {
+ 	return list_first_entry_or_null(&engine->hw_queue,
+@@ -121,7 +136,7 @@ static void mock_context_destroy(struct kref *ref)
+ 	GEM_BUG_ON(intel_context_is_pinned(ce));
+ 
+ 	if (test_bit(CONTEXT_ALLOC_BIT, &ce->flags)) {
+-		kfree(ce->ring);
++		mock_ring_free(ce->ring);
+ 		mock_timeline_unpin(ce->timeline);
+ 	}
+ 
+diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
+index 4b04af569c05..7dc7bb850d0a 100644
+--- a/drivers/gpu/drm/i915/gvt/gtt.c
++++ b/drivers/gpu/drm/i915/gvt/gtt.c
+@@ -1956,7 +1956,11 @@ void _intel_vgpu_mm_release(struct kref *mm_ref)
+ 
+ 	if (mm->type == INTEL_GVT_MM_PPGTT) {
+ 		list_del(&mm->ppgtt_mm.list);
++
++		mutex_lock(&mm->vgpu->gvt->gtt.ppgtt_mm_lock);
+ 		list_del(&mm->ppgtt_mm.lru_list);
++		mutex_unlock(&mm->vgpu->gvt->gtt.ppgtt_mm_lock);
++
+ 		invalidate_ppgtt_mm(mm);
+ 	} else {
+ 		vfree(mm->ggtt_mm.virtual_ggtt);
+diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
+index 905890e3ac24..3f07948ea4da 100644
+--- a/drivers/gpu/drm/i915/i915_gem.c
++++ b/drivers/gpu/drm/i915/i915_gem.c
+@@ -154,7 +154,7 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
+ 		     struct drm_i915_gem_pwrite *args,
+ 		     struct drm_file *file)
+ {
+-	void *vaddr = obj->phys_handle->vaddr + args->offset;
++	void *vaddr = sg_page(obj->mm.pages->sgl) + args->offset;
+ 	char __user *user_data = u64_to_user_ptr(args->data_ptr);
+ 
+ 	/*
+@@ -800,10 +800,10 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
+ 		ret = i915_gem_gtt_pwrite_fast(obj, args);
+ 
+ 	if (ret == -EFAULT || ret == -ENOSPC) {
+-		if (obj->phys_handle)
+-			ret = i915_gem_phys_pwrite(obj, args, file);
+-		else
++		if (i915_gem_object_has_struct_page(obj))
+ 			ret = i915_gem_shmem_pwrite(obj, args);
++		else
++			ret = i915_gem_phys_pwrite(obj, args, file);
+ 	}
+ 
+ 	i915_gem_object_unpin_pages(obj);
+diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
+index 3c85cb0ee99f..354845800085 100644
+--- a/drivers/gpu/drm/i915/i915_gpu_error.c
++++ b/drivers/gpu/drm/i915/i915_gpu_error.c
+@@ -1820,7 +1820,8 @@ void i915_capture_error_state(struct drm_i915_private *i915,
+ 	if (!xchg(&warned, true) &&
+ 	    ktime_get_real_seconds() - DRIVER_TIMESTAMP < DAY_AS_SECONDS(180)) {
+ 		pr_info("GPU hangs can indicate a bug anywhere in the entire gfx stack, including userspace.\n");
+-		pr_info("Please file a _new_ bug report on bugs.freedesktop.org against DRI -> DRM/Intel\n");
++		pr_info("Please file a _new_ bug report at https://gitlab.freedesktop.org/drm/intel/issues/new.\n");
++		pr_info("Please see https://gitlab.freedesktop.org/drm/intel/-/wikis/How-to-file-i915-bugs for details.\n");
+ 		pr_info("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n");
+ 		pr_info("The GPU crash dump is required to analyze GPU hangs, so please always attach it.\n");
+ 		pr_info("GPU crash dump saved to /sys/class/drm/card%d/error\n",
+diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
+index 247a9671bca5..e954fa6109c5 100644
+--- a/drivers/gpu/drm/i915/i915_scheduler.c
++++ b/drivers/gpu/drm/i915/i915_scheduler.c
+@@ -415,8 +415,6 @@ bool __i915_sched_node_add_dependency(struct i915_sched_node *node,
+ 
+ 	if (!node_signaled(signal)) {
+ 		INIT_LIST_HEAD(&dep->dfs_link);
+-		list_add(&dep->wait_link, &signal->waiters_list);
+-		list_add(&dep->signal_link, &node->signalers_list);
+ 		dep->signaler = signal;
+ 		dep->waiter = node;
+ 		dep->flags = flags;
+@@ -426,6 +424,10 @@ bool __i915_sched_node_add_dependency(struct i915_sched_node *node,
+ 		    !node_started(signal))
+ 			node->flags |= I915_SCHED_HAS_SEMAPHORE_CHAIN;
+ 
++		/* All set, now publish. Beware the lockless walkers. */
++		list_add(&dep->signal_link, &node->signalers_list);
++		list_add_rcu(&dep->wait_link, &signal->waiters_list);
++
+ 		/*
+ 		 * As we do not allow WAIT to preempt inflight requests,
+ 		 * once we have executed a request, along with triggering
+diff --git a/drivers/gpu/drm/i915/i915_utils.c b/drivers/gpu/drm/i915/i915_utils.c
+index 0348c6d0ef5f..412135a07d5d 100644
+--- a/drivers/gpu/drm/i915/i915_utils.c
++++ b/drivers/gpu/drm/i915/i915_utils.c
+@@ -8,9 +8,8 @@
+ #include "i915_drv.h"
+ #include "i915_utils.h"
+ 
+-#define FDO_BUG_URL "https://bugs.freedesktop.org/enter_bug.cgi?product=DRI"
+-#define FDO_BUG_MSG "Please file a bug at " FDO_BUG_URL " against DRM/Intel " \
+-		    "providing the dmesg log by booting with drm.debug=0xf"
++#define FDO_BUG_URL "https://gitlab.freedesktop.org/drm/intel/-/wikis/How-to-file-i915-bugs"
++#define FDO_BUG_MSG "Please file a bug on drm/i915; see " FDO_BUG_URL " for details."
+ 
+ void
+ __i915_printk(struct drm_i915_private *dev_priv, const char *level,
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c
+index 24ab6249083a..6f420cc73dbd 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c
++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c
+@@ -255,13 +255,13 @@ static const struct dpu_format dpu_format_map[] = {
+ 
+ 	INTERLEAVED_RGB_FMT(RGB565,
+ 		0, COLOR_5BIT, COLOR_6BIT, COLOR_5BIT,
+-		C2_R_Cr, C0_G_Y, C1_B_Cb, 0, 3,
++		C1_B_Cb, C0_G_Y, C2_R_Cr, 0, 3,
+ 		false, 2, 0,
+ 		DPU_FETCH_LINEAR, 1),
+ 
+ 	INTERLEAVED_RGB_FMT(BGR565,
+ 		0, COLOR_5BIT, COLOR_6BIT, COLOR_5BIT,
+-		C1_B_Cb, C0_G_Y, C2_R_Cr, 0, 3,
++		C2_R_Cr, C0_G_Y, C1_B_Cb, 0, 3,
+ 		false, 2, 0,
+ 		DPU_FETCH_LINEAR, 1),
+ 
+diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.c b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
+index 5193b6257061..b856e87574fd 100644
+--- a/drivers/gpu/drm/nouveau/dispnv50/wndw.c
++++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
+@@ -451,6 +451,8 @@ nv50_wndw_atomic_check(struct drm_plane *plane, struct drm_plane_state *state)
+ 		asyw->clr.ntfy = armw->ntfy.handle != 0;
+ 		asyw->clr.sema = armw->sema.handle != 0;
+ 		asyw->clr.xlut = armw->xlut.handle != 0;
++		if (asyw->clr.xlut && asyw->visible)
++			asyw->set.xlut = asyw->xlut.handle != 0;
+ 		asyw->clr.csc  = armw->csc.valid;
+ 		if (wndw->func->image_clr)
+ 			asyw->clr.image = armw->image.handle[0] != 0;
+diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c
+index 763cfca886a7..3107b0738e40 100644
+--- a/drivers/gpu/drm/panfrost/panfrost_mmu.c
++++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c
+@@ -151,7 +151,12 @@ u32 panfrost_mmu_as_get(struct panfrost_device *pfdev, struct panfrost_mmu *mmu)
+ 	as = mmu->as;
+ 	if (as >= 0) {
+ 		int en = atomic_inc_return(&mmu->as_count);
+-		WARN_ON(en >= NUM_JOB_SLOTS);
++
++		/*
++		 * AS can be retained by active jobs or a perfcnt context,
++		 * hence the '+ 1' here.
++		 */
++		WARN_ON(en >= (NUM_JOB_SLOTS + 1));
+ 
+ 		list_move(&mmu->list, &pfdev->as_lru_list);
+ 		goto out;
+diff --git a/drivers/gpu/drm/panfrost/panfrost_perfcnt.c b/drivers/gpu/drm/panfrost/panfrost_perfcnt.c
+index 684820448be3..6913578d5aa7 100644
+--- a/drivers/gpu/drm/panfrost/panfrost_perfcnt.c
++++ b/drivers/gpu/drm/panfrost/panfrost_perfcnt.c
+@@ -73,7 +73,7 @@ static int panfrost_perfcnt_enable_locked(struct panfrost_device *pfdev,
+ 	struct panfrost_file_priv *user = file_priv->driver_priv;
+ 	struct panfrost_perfcnt *perfcnt = pfdev->perfcnt;
+ 	struct drm_gem_shmem_object *bo;
+-	u32 cfg;
++	u32 cfg, as;
+ 	int ret;
+ 
+ 	if (user == perfcnt->user)
+@@ -126,12 +126,8 @@ static int panfrost_perfcnt_enable_locked(struct panfrost_device *pfdev,
+ 
+ 	perfcnt->user = user;
+ 
+-	/*
+-	 * Always use address space 0 for now.
+-	 * FIXME: this needs to be updated when we start using different
+-	 * address space.
+-	 */
+-	cfg = GPU_PERFCNT_CFG_AS(0) |
++	as = panfrost_mmu_as_get(pfdev, perfcnt->mapping->mmu);
++	cfg = GPU_PERFCNT_CFG_AS(as) |
+ 	      GPU_PERFCNT_CFG_MODE(GPU_PERFCNT_CFG_MODE_MANUAL);
+ 
+ 	/*
+@@ -195,6 +191,7 @@ static int panfrost_perfcnt_disable_locked(struct panfrost_device *pfdev,
+ 	drm_gem_shmem_vunmap(&perfcnt->mapping->obj->base.base, perfcnt->buf);
+ 	perfcnt->buf = NULL;
+ 	panfrost_gem_close(&perfcnt->mapping->obj->base.base, file_priv);
++	panfrost_mmu_as_put(pfdev, perfcnt->mapping->mmu);
+ 	panfrost_gem_mapping_put(perfcnt->mapping);
+ 	perfcnt->mapping = NULL;
+ 	pm_runtime_mark_last_busy(pfdev->dev);
+diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c
+index 4cf25458f0b9..0db8ef4fd6e1 100644
+--- a/drivers/hwmon/acpi_power_meter.c
++++ b/drivers/hwmon/acpi_power_meter.c
+@@ -355,7 +355,9 @@ static ssize_t show_str(struct device *dev,
+ 	struct acpi_device *acpi_dev = to_acpi_device(dev);
+ 	struct acpi_power_meter_resource *resource = acpi_dev->driver_data;
+ 	acpi_string val;
++	int ret;
+ 
++	mutex_lock(&resource->lock);
+ 	switch (attr->index) {
+ 	case 0:
+ 		val = resource->model_number;
+@@ -372,8 +374,9 @@ static ssize_t show_str(struct device *dev,
+ 		val = "";
+ 		break;
+ 	}
+-
+-	return sprintf(buf, "%s\n", val);
++	ret = sprintf(buf, "%s\n", val);
++	mutex_unlock(&resource->lock);
++	return ret;
+ }
+ 
+ static ssize_t show_val(struct device *dev,
+@@ -817,11 +820,12 @@ static void acpi_power_meter_notify(struct acpi_device *device, u32 event)
+ 
+ 	resource = acpi_driver_data(device);
+ 
+-	mutex_lock(&resource->lock);
+ 	switch (event) {
+ 	case METER_NOTIFY_CONFIG:
++		mutex_lock(&resource->lock);
+ 		free_capabilities(resource);
+ 		res = read_capabilities(resource);
++		mutex_unlock(&resource->lock);
+ 		if (res)
+ 			break;
+ 
+@@ -830,15 +834,12 @@ static void acpi_power_meter_notify(struct acpi_device *device, u32 event)
+ 		break;
+ 	case METER_NOTIFY_TRIP:
+ 		sysfs_notify(&device->dev.kobj, NULL, POWER_AVERAGE_NAME);
+-		update_meter(resource);
+ 		break;
+ 	case METER_NOTIFY_CAP:
+ 		sysfs_notify(&device->dev.kobj, NULL, POWER_CAP_NAME);
+-		update_cap(resource);
+ 		break;
+ 	case METER_NOTIFY_INTERVAL:
+ 		sysfs_notify(&device->dev.kobj, NULL, POWER_AVG_INTERVAL_NAME);
+-		update_avg_interval(resource);
+ 		break;
+ 	case METER_NOTIFY_CAPPING:
+ 		sysfs_notify(&device->dev.kobj, NULL, POWER_ALARM_NAME);
+@@ -848,7 +849,6 @@ static void acpi_power_meter_notify(struct acpi_device *device, u32 event)
+ 		WARN(1, "Unexpected event %d\n", event);
+ 		break;
+ 	}
+-	mutex_unlock(&resource->lock);
+ 
+ 	acpi_bus_generate_netlink_event(ACPI_POWER_METER_CLASS,
+ 					dev_name(&device->dev), event, 0);
+@@ -912,8 +912,8 @@ static int acpi_power_meter_remove(struct acpi_device *device)
+ 	resource = acpi_driver_data(device);
+ 	hwmon_device_unregister(resource->hwmon_dev);
+ 
+-	free_capabilities(resource);
+ 	remove_attrs(resource);
++	free_capabilities(resource);
+ 
+ 	kfree(resource);
+ 	return 0;
+diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
+index b273e421e910..a1a035270cab 100644
+--- a/drivers/infiniband/ulp/isert/ib_isert.c
++++ b/drivers/infiniband/ulp/isert/ib_isert.c
+@@ -2575,6 +2575,17 @@ isert_wait4logout(struct isert_conn *isert_conn)
+ 	}
+ }
+ 
++static void
++isert_wait4cmds(struct iscsi_conn *conn)
++{
++	isert_info("iscsi_conn %p\n", conn);
++
++	if (conn->sess) {
++		target_sess_cmd_list_set_waiting(conn->sess->se_sess);
++		target_wait_for_sess_cmds(conn->sess->se_sess);
++	}
++}
++
+ /**
+  * isert_put_unsol_pending_cmds() - Drop commands waiting for
+  *     unsolicitate dataout
+@@ -2622,6 +2633,7 @@ static void isert_wait_conn(struct iscsi_conn *conn)
+ 
+ 	ib_drain_qp(isert_conn->qp);
+ 	isert_put_unsol_pending_cmds(conn);
++	isert_wait4cmds(conn);
+ 	isert_wait4logout(isert_conn);
+ 
+ 	queue_work(isert_release_wq, &isert_conn->release_work);
+diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
+index dfedbb04f647..e7fc9e928788 100644
+--- a/drivers/iommu/intel-iommu.c
++++ b/drivers/iommu/intel-iommu.c
+@@ -732,6 +732,11 @@ static int iommu_dummy(struct device *dev)
+ 	return dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
+ }
+ 
++static bool attach_deferred(struct device *dev)
++{
++	return dev->archdata.iommu == DEFER_DEVICE_DOMAIN_INFO;
++}
++
+ /**
+  * is_downstream_to_pci_bridge - test if a device belongs to the PCI
+  *				 sub-hierarchy of a candidate PCI-PCI bridge
+@@ -2424,8 +2429,7 @@ static struct dmar_domain *find_domain(struct device *dev)
+ {
+ 	struct device_domain_info *info;
+ 
+-	if (unlikely(dev->archdata.iommu == DEFER_DEVICE_DOMAIN_INFO ||
+-		     dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO))
++	if (unlikely(attach_deferred(dev) || iommu_dummy(dev)))
+ 		return NULL;
+ 
+ 	/* No lock here, assumes no domain exit in normal case */
+@@ -2436,18 +2440,14 @@ static struct dmar_domain *find_domain(struct device *dev)
+ 	return NULL;
+ }
+ 
+-static struct dmar_domain *deferred_attach_domain(struct device *dev)
++static void do_deferred_attach(struct device *dev)
+ {
+-	if (unlikely(dev->archdata.iommu == DEFER_DEVICE_DOMAIN_INFO)) {
+-		struct iommu_domain *domain;
+-
+-		dev->archdata.iommu = NULL;
+-		domain = iommu_get_domain_for_dev(dev);
+-		if (domain)
+-			intel_iommu_attach_device(domain, dev);
+-	}
++	struct iommu_domain *domain;
+ 
+-	return find_domain(dev);
++	dev->archdata.iommu = NULL;
++	domain = iommu_get_domain_for_dev(dev);
++	if (domain)
++		intel_iommu_attach_device(domain, dev);
+ }
+ 
+ static inline struct device_domain_info *
+@@ -2799,7 +2799,7 @@ static int identity_mapping(struct device *dev)
+ 	struct device_domain_info *info;
+ 
+ 	info = dev->archdata.iommu;
+-	if (info && info != DUMMY_DEVICE_DOMAIN_INFO && info != DEFER_DEVICE_DOMAIN_INFO)
++	if (info)
+ 		return (info->domain == si_domain);
+ 
+ 	return 0;
+@@ -3470,6 +3470,9 @@ static bool iommu_need_mapping(struct device *dev)
+ 	if (iommu_dummy(dev))
+ 		return false;
+ 
++	if (unlikely(attach_deferred(dev)))
++		do_deferred_attach(dev);
++
+ 	ret = identity_mapping(dev);
+ 	if (ret) {
+ 		u64 dma_mask = *dev->dma_mask;
+@@ -3518,7 +3521,7 @@ static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
+ 
+ 	BUG_ON(dir == DMA_NONE);
+ 
+-	domain = deferred_attach_domain(dev);
++	domain = find_domain(dev);
+ 	if (!domain)
+ 		return DMA_MAPPING_ERROR;
+ 
+@@ -3738,7 +3741,7 @@ static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nele
+ 	if (!iommu_need_mapping(dev))
+ 		return dma_direct_map_sg(dev, sglist, nelems, dir, attrs);
+ 
+-	domain = deferred_attach_domain(dev);
++	domain = find_domain(dev);
+ 	if (!domain)
+ 		return 0;
+ 
+@@ -3833,7 +3836,11 @@ bounce_map_single(struct device *dev, phys_addr_t paddr, size_t size,
+ 	int prot = 0;
+ 	int ret;
+ 
+-	domain = deferred_attach_domain(dev);
++	if (unlikely(attach_deferred(dev)))
++		do_deferred_attach(dev);
++
++	domain = find_domain(dev);
++
+ 	if (WARN_ON(dir == DMA_NONE || !domain))
+ 		return DMA_MAPPING_ERROR;
+ 
+@@ -5989,7 +5996,7 @@ intel_iommu_aux_get_pasid(struct iommu_domain *domain, struct device *dev)
+ static bool intel_iommu_is_attach_deferred(struct iommu_domain *domain,
+ 					   struct device *dev)
+ {
+-	return dev->archdata.iommu == DEFER_DEVICE_DOMAIN_INFO;
++	return attach_deferred(dev);
+ }
+ 
+ const struct iommu_ops intel_iommu_ops = {
+diff --git a/drivers/iommu/qcom_iommu.c b/drivers/iommu/qcom_iommu.c
+index 52f38292df5b..c3de46acf50a 100644
+--- a/drivers/iommu/qcom_iommu.c
++++ b/drivers/iommu/qcom_iommu.c
+@@ -345,21 +345,19 @@ static void qcom_iommu_domain_free(struct iommu_domain *domain)
+ {
+ 	struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain);
+ 
+-	if (WARN_ON(qcom_domain->iommu))    /* forgot to detach? */
+-		return;
+-
+ 	iommu_put_dma_cookie(domain);
+ 
+-	/* NOTE: unmap can be called after client device is powered off,
+-	 * for example, with GPUs or anything involving dma-buf.  So we
+-	 * cannot rely on the device_link.  Make sure the IOMMU is on to
+-	 * avoid unclocked accesses in the TLB inv path:
+-	 */
+-	pm_runtime_get_sync(qcom_domain->iommu->dev);
+-
+-	free_io_pgtable_ops(qcom_domain->pgtbl_ops);
+-
+-	pm_runtime_put_sync(qcom_domain->iommu->dev);
++	if (qcom_domain->iommu) {
++		/*
++		 * NOTE: unmap can be called after client device is powered
++		 * off, for example, with GPUs or anything involving dma-buf.
++		 * So we cannot rely on the device_link.  Make sure the IOMMU
++		 * is on to avoid unclocked accesses in the TLB inv path:
++		 */
++		pm_runtime_get_sync(qcom_domain->iommu->dev);
++		free_io_pgtable_ops(qcom_domain->pgtbl_ops);
++		pm_runtime_put_sync(qcom_domain->iommu->dev);
++	}
+ 
+ 	kfree(qcom_domain);
+ }
+@@ -405,7 +403,7 @@ static void qcom_iommu_detach_dev(struct iommu_domain *domain, struct device *de
+ 	struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain);
+ 	unsigned i;
+ 
+-	if (!qcom_domain->iommu)
++	if (WARN_ON(!qcom_domain->iommu))
+ 		return;
+ 
+ 	pm_runtime_get_sync(qcom_iommu->dev);
+@@ -418,8 +416,6 @@ static void qcom_iommu_detach_dev(struct iommu_domain *domain, struct device *de
+ 		ctx->domain = NULL;
+ 	}
+ 	pm_runtime_put_sync(qcom_iommu->dev);
+-
+-	qcom_domain->iommu = NULL;
+ }
+ 
+ static int qcom_iommu_map(struct iommu_domain *domain, unsigned long iova,
+diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
+index 8b2b9e254d28..f4015a5fb5c0 100644
+--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
++++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
+@@ -1078,8 +1078,6 @@ construct_skb:
+ 				skb = ice_build_skb(rx_ring, rx_buf, &xdp);
+ 			else
+ 				skb = ice_construct_skb(rx_ring, rx_buf, &xdp);
+-		} else {
+-			skb = ice_construct_skb(rx_ring, rx_buf, &xdp);
+ 		}
+ 		/* exit if we failed to retrieve a buffer */
+ 		if (!skb) {
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/health.c b/drivers/net/ethernet/mellanox/mlx5/core/en/health.c
+index 3a975641f902..20b907dc1e29 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/health.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/health.c
+@@ -200,7 +200,7 @@ int mlx5e_health_report(struct mlx5e_priv *priv,
+ 	netdev_err(priv->netdev, err_str);
+ 
+ 	if (!reporter)
+-		return err_ctx->recover(&err_ctx->ctx);
++		return err_ctx->recover(err_ctx->ctx);
+ 
+ 	return devlink_health_report(reporter, err_str, err_ctx);
+ }
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
+index 7c8796d9743f..a226277b0980 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
+@@ -179,6 +179,14 @@ mlx5e_tx_dma_unmap(struct device *pdev, struct mlx5e_sq_dma *dma)
+ 	}
+ }
+ 
++static inline void mlx5e_rqwq_reset(struct mlx5e_rq *rq)
++{
++	if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
++		mlx5_wq_ll_reset(&rq->mpwqe.wq);
++	else
++		mlx5_wq_cyc_reset(&rq->wqe.wq);
++}
++
+ /* SW parser related functions */
+ 
+ struct mlx5e_swp_spec {
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+index 4997b8a51994..5d9cfac67236 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+@@ -721,6 +721,9 @@ int mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state, int next_state)
+ 	if (!in)
+ 		return -ENOMEM;
+ 
++	if (curr_state == MLX5_RQC_STATE_RST && next_state == MLX5_RQC_STATE_RDY)
++		mlx5e_rqwq_reset(rq);
++
+ 	rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
+ 
+ 	MLX5_SET(modify_rq_in, in, rq_state, curr_state);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+index 3df3604e8929..07282c679dcd 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+@@ -456,12 +456,16 @@ static void esw_destroy_legacy_table(struct mlx5_eswitch *esw)
+ 
+ static int esw_legacy_enable(struct mlx5_eswitch *esw)
+ {
+-	int ret;
++	struct mlx5_vport *vport;
++	int ret, i;
+ 
+ 	ret = esw_create_legacy_table(esw);
+ 	if (ret)
+ 		return ret;
+ 
++	mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs)
++		vport->info.link_state = MLX5_VPORT_ADMIN_STATE_AUTO;
++
+ 	ret = mlx5_eswitch_enable_pf_vf_vports(esw, MLX5_LEGACY_SRIOV_VPORT_EVENTS);
+ 	if (ret)
+ 		esw_destroy_legacy_table(esw);
+@@ -2449,25 +2453,17 @@ out:
+ 
+ int mlx5_eswitch_get_vepa(struct mlx5_eswitch *esw, u8 *setting)
+ {
+-	int err = 0;
+-
+ 	if (!esw)
+ 		return -EOPNOTSUPP;
+ 
+ 	if (!ESW_ALLOWED(esw))
+ 		return -EPERM;
+ 
+-	mutex_lock(&esw->state_lock);
+-	if (esw->mode != MLX5_ESWITCH_LEGACY) {
+-		err = -EOPNOTSUPP;
+-		goto out;
+-	}
++	if (esw->mode != MLX5_ESWITCH_LEGACY)
++		return -EOPNOTSUPP;
+ 
+ 	*setting = esw->fdb_table.legacy.vepa_uplink_rule ? 1 : 0;
+-
+-out:
+-	mutex_unlock(&esw->state_lock);
+-	return err;
++	return 0;
+ }
+ 
+ int mlx5_eswitch_set_vport_trust(struct mlx5_eswitch *esw,
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+index 3e6412783078..dfefc6250f23 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+@@ -1377,7 +1377,7 @@ static int esw_offloads_start(struct mlx5_eswitch *esw,
+ 		return -EINVAL;
+ 	}
+ 
+-	mlx5_eswitch_disable(esw, true);
++	mlx5_eswitch_disable(esw, false);
+ 	mlx5_eswitch_update_num_of_vfs(esw, esw->dev->priv.sriov.num_vfs);
+ 	err = mlx5_eswitch_enable(esw, MLX5_ESWITCH_OFFLOADS);
+ 	if (err) {
+@@ -2271,7 +2271,7 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw,
+ {
+ 	int err, err1;
+ 
+-	mlx5_eswitch_disable(esw, true);
++	mlx5_eswitch_disable(esw, false);
+ 	err = mlx5_eswitch_enable(esw, MLX5_ESWITCH_LEGACY);
+ 	if (err) {
+ 		NL_SET_ERR_MSG_MOD(extack, "Failed setting eswitch to legacy");
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.c b/drivers/net/ethernet/mellanox/mlx5/core/wq.c
+index 02f7e4a39578..01f075fac276 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/wq.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.c
+@@ -94,6 +94,13 @@ void mlx5_wq_cyc_wqe_dump(struct mlx5_wq_cyc *wq, u16 ix, u8 nstrides)
+ 	print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET, 16, 1, wqe, len, false);
+ }
+ 
++void mlx5_wq_cyc_reset(struct mlx5_wq_cyc *wq)
++{
++	wq->wqe_ctr = 0;
++	wq->cur_sz = 0;
++	mlx5_wq_cyc_update_db_record(wq);
++}
++
+ int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
+ 		      void *qpc, struct mlx5_wq_qp *wq,
+ 		      struct mlx5_wq_ctrl *wq_ctrl)
+@@ -192,6 +199,19 @@ err_db_free:
+ 	return err;
+ }
+ 
++static void mlx5_wq_ll_init_list(struct mlx5_wq_ll *wq)
++{
++	struct mlx5_wqe_srq_next_seg *next_seg;
++	int i;
++
++	for (i = 0; i < wq->fbc.sz_m1; i++) {
++		next_seg = mlx5_wq_ll_get_wqe(wq, i);
++		next_seg->next_wqe_index = cpu_to_be16(i + 1);
++	}
++	next_seg = mlx5_wq_ll_get_wqe(wq, i);
++	wq->tail_next = &next_seg->next_wqe_index;
++}
++
+ int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
+ 		      void *wqc, struct mlx5_wq_ll *wq,
+ 		      struct mlx5_wq_ctrl *wq_ctrl)
+@@ -199,9 +219,7 @@ int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
+ 	u8 log_wq_stride = MLX5_GET(wq, wqc, log_wq_stride);
+ 	u8 log_wq_sz     = MLX5_GET(wq, wqc, log_wq_sz);
+ 	struct mlx5_frag_buf_ctrl *fbc = &wq->fbc;
+-	struct mlx5_wqe_srq_next_seg *next_seg;
+ 	int err;
+-	int i;
+ 
+ 	err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
+ 	if (err) {
+@@ -220,13 +238,7 @@ int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
+ 
+ 	mlx5_init_fbc(wq_ctrl->buf.frags, log_wq_stride, log_wq_sz, fbc);
+ 
+-	for (i = 0; i < fbc->sz_m1; i++) {
+-		next_seg = mlx5_wq_ll_get_wqe(wq, i);
+-		next_seg->next_wqe_index = cpu_to_be16(i + 1);
+-	}
+-	next_seg = mlx5_wq_ll_get_wqe(wq, i);
+-	wq->tail_next = &next_seg->next_wqe_index;
+-
++	mlx5_wq_ll_init_list(wq);
+ 	wq_ctrl->mdev = mdev;
+ 
+ 	return 0;
+@@ -237,6 +249,15 @@ err_db_free:
+ 	return err;
+ }
+ 
++void mlx5_wq_ll_reset(struct mlx5_wq_ll *wq)
++{
++	wq->head = 0;
++	wq->wqe_ctr = 0;
++	wq->cur_sz = 0;
++	mlx5_wq_ll_init_list(wq);
++	mlx5_wq_ll_update_db_record(wq);
++}
++
+ void mlx5_wq_destroy(struct mlx5_wq_ctrl *wq_ctrl)
+ {
+ 	mlx5_frag_buf_free(wq_ctrl->mdev, &wq_ctrl->buf);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.h b/drivers/net/ethernet/mellanox/mlx5/core/wq.h
+index d9a94bc223c0..4cadc336593f 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/wq.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.h
+@@ -80,6 +80,7 @@ int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
+ 		       void *wqc, struct mlx5_wq_cyc *wq,
+ 		       struct mlx5_wq_ctrl *wq_ctrl);
+ void mlx5_wq_cyc_wqe_dump(struct mlx5_wq_cyc *wq, u16 ix, u8 nstrides);
++void mlx5_wq_cyc_reset(struct mlx5_wq_cyc *wq);
+ 
+ int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
+ 		      void *qpc, struct mlx5_wq_qp *wq,
+@@ -92,6 +93,7 @@ int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
+ int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
+ 		      void *wqc, struct mlx5_wq_ll *wq,
+ 		      struct mlx5_wq_ctrl *wq_ctrl);
++void mlx5_wq_ll_reset(struct mlx5_wq_ll *wq);
+ 
+ void mlx5_wq_destroy(struct mlx5_wq_ctrl *wq_ctrl);
+ 
+diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
+index 797c18337d96..a11900cf3a36 100644
+--- a/drivers/nvme/host/multipath.c
++++ b/drivers/nvme/host/multipath.c
+@@ -715,6 +715,7 @@ int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
+ 	}
+ 
+ 	INIT_WORK(&ctrl->ana_work, nvme_ana_work);
++	kfree(ctrl->ana_log_buf);
+ 	ctrl->ana_log_buf = kmalloc(ctrl->ana_log_size, GFP_KERNEL);
+ 	if (!ctrl->ana_log_buf) {
+ 		error = -ENOMEM;
+diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c
+index 74d497d39c5a..c6695354b123 100644
+--- a/drivers/staging/android/ashmem.c
++++ b/drivers/staging/android/ashmem.c
+@@ -351,8 +351,23 @@ static inline vm_flags_t calc_vm_may_flags(unsigned long prot)
+ 	       _calc_vm_trans(prot, PROT_EXEC,  VM_MAYEXEC);
+ }
+ 
++static int ashmem_vmfile_mmap(struct file *file, struct vm_area_struct *vma)
++{
++	/* do not allow to mmap ashmem backing shmem file directly */
++	return -EPERM;
++}
++
++static unsigned long
++ashmem_vmfile_get_unmapped_area(struct file *file, unsigned long addr,
++				unsigned long len, unsigned long pgoff,
++				unsigned long flags)
++{
++	return current->mm->get_unmapped_area(file, addr, len, pgoff, flags);
++}
++
+ static int ashmem_mmap(struct file *file, struct vm_area_struct *vma)
+ {
++	static struct file_operations vmfile_fops;
+ 	struct ashmem_area *asma = file->private_data;
+ 	int ret = 0;
+ 
+@@ -393,6 +408,19 @@ static int ashmem_mmap(struct file *file, struct vm_area_struct *vma)
+ 		}
+ 		vmfile->f_mode |= FMODE_LSEEK;
+ 		asma->file = vmfile;
++		/*
++		 * override mmap operation of the vmfile so that it can't be
++		 * remapped which would lead to creation of a new vma with no
++		 * asma permission checks. Have to override get_unmapped_area
++		 * as well to prevent VM_BUG_ON check for f_ops modification.
++		 */
++		if (!vmfile_fops.mmap) {
++			vmfile_fops = *vmfile->f_op;
++			vmfile_fops.mmap = ashmem_vmfile_mmap;
++			vmfile_fops.get_unmapped_area =
++					ashmem_vmfile_get_unmapped_area;
++		}
++		vmfile->f_op = &vmfile_fops;
+ 	}
+ 	get_file(asma->file);
+ 
+diff --git a/drivers/staging/greybus/audio_manager.c b/drivers/staging/greybus/audio_manager.c
+index 9b19ea9d3fa1..9a3f7c034ab4 100644
+--- a/drivers/staging/greybus/audio_manager.c
++++ b/drivers/staging/greybus/audio_manager.c
+@@ -92,8 +92,8 @@ void gb_audio_manager_remove_all(void)
+ 
+ 	list_for_each_entry_safe(module, next, &modules_list, list) {
+ 		list_del(&module->list);
+-		kobject_put(&module->kobj);
+ 		ida_simple_remove(&module_id, module->id);
++		kobject_put(&module->kobj);
+ 	}
+ 
+ 	is_empty = list_empty(&modules_list);
+diff --git a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
+index 47f4cc6a19a9..df945a059cf6 100644
+--- a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
++++ b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
+@@ -2011,7 +2011,7 @@ static int wpa_supplicant_ioctl(struct net_device *dev, struct iw_point *p)
+ 	struct ieee_param *param;
+ 	uint ret = 0;
+ 
+-	if (p->length < sizeof(struct ieee_param) || !p->pointer) {
++	if (!p->pointer || p->length != sizeof(struct ieee_param)) {
+ 		ret = -EINVAL;
+ 		goto out;
+ 	}
+@@ -2798,7 +2798,7 @@ static int rtw_hostapd_ioctl(struct net_device *dev, struct iw_point *p)
+ 		goto out;
+ 	}
+ 
+-	if (!p->pointer) {
++	if (!p->pointer || p->length != sizeof(struct ieee_param)) {
+ 		ret = -EINVAL;
+ 		goto out;
+ 	}
+diff --git a/drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c b/drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c
+index b44e902ed338..b6d56cfb0a19 100644
+--- a/drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c
++++ b/drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c
+@@ -476,14 +476,13 @@ int rtl8723bs_xmit_thread(void *context)
+ 	s32 ret;
+ 	struct adapter *padapter;
+ 	struct xmit_priv *pxmitpriv;
+-	u8 thread_name[20] = "RTWHALXT";
+-
++	u8 thread_name[20];
+ 
+ 	ret = _SUCCESS;
+ 	padapter = context;
+ 	pxmitpriv = &padapter->xmitpriv;
+ 
+-	rtw_sprintf(thread_name, 20, "%s-"ADPT_FMT, thread_name, ADPT_ARG(padapter));
++	rtw_sprintf(thread_name, 20, "RTWHALXT-" ADPT_FMT, ADPT_ARG(padapter));
+ 	thread_enter(thread_name);
+ 
+ 	DBG_871X("start "FUNC_ADPT_FMT"\n", FUNC_ADPT_ARG(padapter));
+diff --git a/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c b/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
+index db6528a01229..2ac0d84f090e 100644
+--- a/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
++++ b/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
+@@ -3373,7 +3373,7 @@ static int wpa_supplicant_ioctl(struct net_device *dev, struct iw_point *p)
+ 
+ 	/* down(&ieee->wx_sem); */
+ 
+-	if (p->length < sizeof(struct ieee_param) || !p->pointer) {
++	if (!p->pointer || p->length != sizeof(struct ieee_param)) {
+ 		ret = -EINVAL;
+ 		goto out;
+ 	}
+@@ -4207,7 +4207,7 @@ static int rtw_hostapd_ioctl(struct net_device *dev, struct iw_point *p)
+ 
+ 
+ 	/* if (p->length < sizeof(struct ieee_param) || !p->pointer) { */
+-	if (!p->pointer) {
++	if (!p->pointer || p->length != sizeof(*param)) {
+ 		ret = -EINVAL;
+ 		goto out;
+ 	}
+diff --git a/drivers/staging/vt6656/dpc.c b/drivers/staging/vt6656/dpc.c
+index 3b94e80f1d5e..879ceef517fb 100644
+--- a/drivers/staging/vt6656/dpc.c
++++ b/drivers/staging/vt6656/dpc.c
+@@ -130,7 +130,7 @@ int vnt_rx_data(struct vnt_private *priv, struct vnt_rcb *ptr_rcb,
+ 
+ 	vnt_rf_rssi_to_dbm(priv, *rssi, &rx_dbm);
+ 
+-	priv->bb_pre_ed_rssi = (u8)rx_dbm + 1;
++	priv->bb_pre_ed_rssi = (u8)-rx_dbm + 1;
+ 	priv->current_rssi = priv->bb_pre_ed_rssi;
+ 
+ 	skb_pull(skb, 8);
+diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
+index b94ed4e30770..09e55ea0bf5d 100644
+--- a/drivers/target/iscsi/iscsi_target.c
++++ b/drivers/target/iscsi/iscsi_target.c
+@@ -1165,9 +1165,7 @@ int iscsit_setup_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
+ 		hdr->cmdsn, be32_to_cpu(hdr->data_length), payload_length,
+ 		conn->cid);
+ 
+-	if (target_get_sess_cmd(&cmd->se_cmd, true) < 0)
+-		return iscsit_add_reject_cmd(cmd,
+-				ISCSI_REASON_WAITING_FOR_LOGOUT, buf);
++	target_get_sess_cmd(&cmd->se_cmd, true);
+ 
+ 	cmd->sense_reason = transport_lookup_cmd_lun(&cmd->se_cmd,
+ 						     scsilun_to_int(&hdr->lun));
+@@ -2004,9 +2002,7 @@ iscsit_handle_task_mgt_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
+ 			      conn->sess->se_sess, 0, DMA_NONE,
+ 			      TCM_SIMPLE_TAG, cmd->sense_buffer + 2);
+ 
+-	if (target_get_sess_cmd(&cmd->se_cmd, true) < 0)
+-		return iscsit_add_reject_cmd(cmd,
+-				ISCSI_REASON_WAITING_FOR_LOGOUT, buf);
++	target_get_sess_cmd(&cmd->se_cmd, true);
+ 
+ 	/*
+ 	 * TASK_REASSIGN for ERL=2 / connection stays inside of
+@@ -4149,6 +4145,9 @@ int iscsit_close_connection(
+ 	iscsit_stop_nopin_response_timer(conn);
+ 	iscsit_stop_nopin_timer(conn);
+ 
++	if (conn->conn_transport->iscsit_wait_conn)
++		conn->conn_transport->iscsit_wait_conn(conn);
++
+ 	/*
+ 	 * During Connection recovery drop unacknowledged out of order
+ 	 * commands for this connection, and prepare the other commands
+@@ -4231,11 +4230,6 @@ int iscsit_close_connection(
+ 	 * must wait until they have completed.
+ 	 */
+ 	iscsit_check_conn_usage_count(conn);
+-	target_sess_cmd_list_set_waiting(sess->se_sess);
+-	target_wait_for_sess_cmds(sess->se_sess);
+-
+-	if (conn->conn_transport->iscsit_wait_conn)
+-		conn->conn_transport->iscsit_wait_conn(conn);
+ 
+ 	ahash_request_free(conn->conn_tx_hash);
+ 	if (conn->conn_rx_hash) {
+diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
+index ea482d4b1f00..0ae9e60fc4d5 100644
+--- a/drivers/target/target_core_transport.c
++++ b/drivers/target/target_core_transport.c
+@@ -666,6 +666,11 @@ static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd)
+ 
+ 	target_remove_from_state_list(cmd);
+ 
++	/*
++	 * Clear struct se_cmd->se_lun before the handoff to FE.
++	 */
++	cmd->se_lun = NULL;
++
+ 	spin_lock_irqsave(&cmd->t_state_lock, flags);
+ 	/*
+ 	 * Determine if frontend context caller is requesting the stopping of
+@@ -693,6 +698,17 @@ static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd)
+ 	return cmd->se_tfo->check_stop_free(cmd);
+ }
+ 
++static void transport_lun_remove_cmd(struct se_cmd *cmd)
++{
++	struct se_lun *lun = cmd->se_lun;
++
++	if (!lun)
++		return;
++
++	if (cmpxchg(&cmd->lun_ref_active, true, false))
++		percpu_ref_put(&lun->lun_ref);
++}
++
+ static void target_complete_failure_work(struct work_struct *work)
+ {
+ 	struct se_cmd *cmd = container_of(work, struct se_cmd, work);
+@@ -783,6 +799,8 @@ static void target_handle_abort(struct se_cmd *cmd)
+ 
+ 	WARN_ON_ONCE(kref_read(&cmd->cmd_kref) == 0);
+ 
++	transport_lun_remove_cmd(cmd);
++
+ 	transport_cmd_check_stop_to_fabric(cmd);
+ }
+ 
+@@ -1708,6 +1726,7 @@ static void target_complete_tmr_failure(struct work_struct *work)
+ 	se_cmd->se_tmr_req->response = TMR_LUN_DOES_NOT_EXIST;
+ 	se_cmd->se_tfo->queue_tm_rsp(se_cmd);
+ 
++	transport_lun_remove_cmd(se_cmd);
+ 	transport_cmd_check_stop_to_fabric(se_cmd);
+ }
+ 
+@@ -1898,6 +1917,7 @@ void transport_generic_request_failure(struct se_cmd *cmd,
+ 		goto queue_full;
+ 
+ check_stop:
++	transport_lun_remove_cmd(cmd);
+ 	transport_cmd_check_stop_to_fabric(cmd);
+ 	return;
+ 
+@@ -2195,6 +2215,7 @@ queue_status:
+ 		transport_handle_queue_full(cmd, cmd->se_dev, ret, false);
+ 		return;
+ 	}
++	transport_lun_remove_cmd(cmd);
+ 	transport_cmd_check_stop_to_fabric(cmd);
+ }
+ 
+@@ -2289,6 +2310,7 @@ static void target_complete_ok_work(struct work_struct *work)
+ 		if (ret)
+ 			goto queue_full;
+ 
++		transport_lun_remove_cmd(cmd);
+ 		transport_cmd_check_stop_to_fabric(cmd);
+ 		return;
+ 	}
+@@ -2314,6 +2336,7 @@ static void target_complete_ok_work(struct work_struct *work)
+ 			if (ret)
+ 				goto queue_full;
+ 
++			transport_lun_remove_cmd(cmd);
+ 			transport_cmd_check_stop_to_fabric(cmd);
+ 			return;
+ 		}
+@@ -2349,6 +2372,7 @@ queue_rsp:
+ 			if (ret)
+ 				goto queue_full;
+ 
++			transport_lun_remove_cmd(cmd);
+ 			transport_cmd_check_stop_to_fabric(cmd);
+ 			return;
+ 		}
+@@ -2384,6 +2408,7 @@ queue_status:
+ 		break;
+ 	}
+ 
++	transport_lun_remove_cmd(cmd);
+ 	transport_cmd_check_stop_to_fabric(cmd);
+ 	return;
+ 
+@@ -2710,6 +2735,9 @@ int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
+ 		 */
+ 		if (cmd->state_active)
+ 			target_remove_from_state_list(cmd);
++
++		if (cmd->se_lun)
++			transport_lun_remove_cmd(cmd);
+ 	}
+ 	if (aborted)
+ 		cmd->free_compl = &compl;
+@@ -2781,9 +2809,6 @@ static void target_release_cmd_kref(struct kref *kref)
+ 	struct completion *abrt_compl = se_cmd->abrt_compl;
+ 	unsigned long flags;
+ 
+-	if (se_cmd->lun_ref_active)
+-		percpu_ref_put(&se_cmd->se_lun->lun_ref);
+-
+ 	if (se_sess) {
+ 		spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
+ 		list_del_init(&se_cmd->se_cmd_list);
+diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c
+index ca86a8e09c77..43bfeb886614 100644
+--- a/drivers/thunderbolt/switch.c
++++ b/drivers/thunderbolt/switch.c
+@@ -274,6 +274,12 @@ out:
+ 	return ret;
+ }
+ 
++static int tb_switch_nvm_no_read(void *priv, unsigned int offset, void *val,
++				 size_t bytes)
++{
++	return -EPERM;
++}
++
+ static int tb_switch_nvm_write(void *priv, unsigned int offset, void *val,
+ 			       size_t bytes)
+ {
+@@ -319,6 +325,7 @@ static struct nvmem_device *register_nvmem(struct tb_switch *sw, int id,
+ 		config.read_only = true;
+ 	} else {
+ 		config.name = "nvm_non_active";
++		config.reg_read = tb_switch_nvm_no_read;
+ 		config.reg_write = tb_switch_nvm_write;
+ 		config.root_only = true;
+ 	}
+diff --git a/drivers/tty/serdev/serdev-ttyport.c b/drivers/tty/serdev/serdev-ttyport.c
+index d1cdd2ab8b4c..d367803e2044 100644
+--- a/drivers/tty/serdev/serdev-ttyport.c
++++ b/drivers/tty/serdev/serdev-ttyport.c
+@@ -265,7 +265,6 @@ struct device *serdev_tty_port_register(struct tty_port *port,
+ 					struct device *parent,
+ 					struct tty_driver *drv, int idx)
+ {
+-	const struct tty_port_client_operations *old_ops;
+ 	struct serdev_controller *ctrl;
+ 	struct serport *serport;
+ 	int ret;
+@@ -284,7 +283,6 @@ struct device *serdev_tty_port_register(struct tty_port *port,
+ 
+ 	ctrl->ops = &ctrl_ops;
+ 
+-	old_ops = port->client_ops;
+ 	port->client_ops = &client_ops;
+ 	port->client_data = ctrl;
+ 
+@@ -297,7 +295,7 @@ struct device *serdev_tty_port_register(struct tty_port *port,
+ 
+ err_reset_data:
+ 	port->client_data = NULL;
+-	port->client_ops = old_ops;
++	port->client_ops = &tty_port_default_client_ops;
+ 	serdev_controller_put(ctrl);
+ 
+ 	return ERR_PTR(ret);
+@@ -312,8 +310,8 @@ int serdev_tty_port_unregister(struct tty_port *port)
+ 		return -ENODEV;
+ 
+ 	serdev_controller_remove(ctrl);
+-	port->client_ops = NULL;
+ 	port->client_data = NULL;
++	port->client_ops = &tty_port_default_client_ops;
+ 	serdev_controller_put(ctrl);
+ 
+ 	return 0;
+diff --git a/drivers/tty/serial/8250/8250_aspeed_vuart.c b/drivers/tty/serial/8250/8250_aspeed_vuart.c
+index 6e67fd89445a..0ed5404f35d6 100644
+--- a/drivers/tty/serial/8250/8250_aspeed_vuart.c
++++ b/drivers/tty/serial/8250/8250_aspeed_vuart.c
+@@ -449,7 +449,6 @@ static int aspeed_vuart_probe(struct platform_device *pdev)
+ 		port.port.line = rc;
+ 
+ 	port.port.irq = irq_of_parse_and_map(np, 0);
+-	port.port.irqflags = IRQF_SHARED;
+ 	port.port.handle_irq = aspeed_vuart_handle_irq;
+ 	port.port.iotype = UPIO_MEM;
+ 	port.port.type = PORT_16550A;
+diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c
+index e682390ce0de..28bdbd7b4ab2 100644
+--- a/drivers/tty/serial/8250/8250_core.c
++++ b/drivers/tty/serial/8250/8250_core.c
+@@ -174,7 +174,7 @@ static int serial_link_irq_chain(struct uart_8250_port *up)
+ 	struct hlist_head *h;
+ 	struct hlist_node *n;
+ 	struct irq_info *i;
+-	int ret, irq_flags = up->port.flags & UPF_SHARE_IRQ ? IRQF_SHARED : 0;
++	int ret;
+ 
+ 	mutex_lock(&hash_mutex);
+ 
+@@ -209,9 +209,8 @@ static int serial_link_irq_chain(struct uart_8250_port *up)
+ 		INIT_LIST_HEAD(&up->list);
+ 		i->head = &up->list;
+ 		spin_unlock_irq(&i->lock);
+-		irq_flags |= up->port.irqflags;
+ 		ret = request_irq(up->port.irq, serial8250_interrupt,
+-				  irq_flags, up->port.name, i);
++				  up->port.irqflags, up->port.name, i);
+ 		if (ret < 0)
+ 			serial_do_unlink(i, up);
+ 	}
+diff --git a/drivers/tty/serial/8250/8250_of.c b/drivers/tty/serial/8250/8250_of.c
+index 92fbf46ce3bd..3205c83577e0 100644
+--- a/drivers/tty/serial/8250/8250_of.c
++++ b/drivers/tty/serial/8250/8250_of.c
+@@ -202,7 +202,6 @@ static int of_platform_serial_setup(struct platform_device *ofdev,
+ 
+ 	port->type = type;
+ 	port->uartclk = clk;
+-	port->irqflags |= IRQF_SHARED;
+ 
+ 	if (of_property_read_bool(np, "no-loopback-test"))
+ 		port->flags |= UPF_SKIP_TEST;
+diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
+index 90655910b0c7..5741b3822cf6 100644
+--- a/drivers/tty/serial/8250/8250_port.c
++++ b/drivers/tty/serial/8250/8250_port.c
+@@ -2178,6 +2178,10 @@ int serial8250_do_startup(struct uart_port *port)
+ 		}
+ 	}
+ 
++	/* Check if we need to have shared IRQs */
++	if (port->irq && (up->port.flags & UPF_SHARE_IRQ))
++		up->port.irqflags |= IRQF_SHARED;
++
+ 	if (port->irq && !(up->port.flags & UPF_NO_THRE_TEST)) {
+ 		unsigned char iir1;
+ 		/*
+diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
+index 1ba9bc667e13..8a909d556185 100644
+--- a/drivers/tty/serial/atmel_serial.c
++++ b/drivers/tty/serial/atmel_serial.c
+@@ -574,7 +574,8 @@ static void atmel_stop_tx(struct uart_port *port)
+ 	atmel_uart_writel(port, ATMEL_US_IDR, atmel_port->tx_done_mask);
+ 
+ 	if (atmel_uart_is_half_duplex(port))
+-		atmel_start_rx(port);
++		if (!atomic_read(&atmel_port->tasklet_shutdown))
++			atmel_start_rx(port);
+ 
+ }
+ 
+diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
+index dd3120c5db2b..0357fad48247 100644
+--- a/drivers/tty/serial/imx.c
++++ b/drivers/tty/serial/imx.c
+@@ -603,7 +603,7 @@ static void imx_uart_dma_tx(struct imx_port *sport)
+ 
+ 	sport->tx_bytes = uart_circ_chars_pending(xmit);
+ 
+-	if (xmit->tail < xmit->head) {
++	if (xmit->tail < xmit->head || xmit->head == 0) {
+ 		sport->dma_tx_nents = 1;
+ 		sg_init_one(sgl, xmit->buf + xmit->tail, sport->tx_bytes);
+ 	} else {
+diff --git a/drivers/tty/serial/qcom_geni_serial.c b/drivers/tty/serial/qcom_geni_serial.c
+index ff63728a95f4..ebace5ad175c 100644
+--- a/drivers/tty/serial/qcom_geni_serial.c
++++ b/drivers/tty/serial/qcom_geni_serial.c
+@@ -128,6 +128,7 @@ static int handle_rx_console(struct uart_port *uport, u32 bytes, bool drop);
+ static int handle_rx_uart(struct uart_port *uport, u32 bytes, bool drop);
+ static unsigned int qcom_geni_serial_tx_empty(struct uart_port *port);
+ static void qcom_geni_serial_stop_rx(struct uart_port *uport);
++static void qcom_geni_serial_handle_rx(struct uart_port *uport, bool drop);
+ 
+ static const unsigned long root_freq[] = {7372800, 14745600, 19200000, 29491200,
+ 					32000000, 48000000, 64000000, 80000000,
+@@ -618,7 +619,7 @@ static void qcom_geni_serial_stop_rx(struct uart_port *uport)
+ 	u32 irq_en;
+ 	u32 status;
+ 	struct qcom_geni_serial_port *port = to_dev_port(uport, uport);
+-	u32 irq_clear = S_CMD_DONE_EN;
++	u32 s_irq_status;
+ 
+ 	irq_en = readl(uport->membase + SE_GENI_S_IRQ_EN);
+ 	irq_en &= ~(S_RX_FIFO_WATERMARK_EN | S_RX_FIFO_LAST_EN);
+@@ -634,10 +635,19 @@ static void qcom_geni_serial_stop_rx(struct uart_port *uport)
+ 		return;
+ 
+ 	geni_se_cancel_s_cmd(&port->se);
+-	qcom_geni_serial_poll_bit(uport, SE_GENI_S_CMD_CTRL_REG,
+-					S_GENI_CMD_CANCEL, false);
++	qcom_geni_serial_poll_bit(uport, SE_GENI_S_IRQ_STATUS,
++					S_CMD_CANCEL_EN, true);
++	/*
++	 * If timeout occurs secondary engine remains active
++	 * and Abort sequence is executed.
++	 */
++	s_irq_status = readl(uport->membase + SE_GENI_S_IRQ_STATUS);
++	/* Flush the Rx buffer */
++	if (s_irq_status & S_RX_FIFO_LAST_EN)
++		qcom_geni_serial_handle_rx(uport, true);
++	writel(s_irq_status, uport->membase + SE_GENI_S_IRQ_CLEAR);
++
+ 	status = readl(uport->membase + SE_GENI_STATUS);
+-	writel(irq_clear, uport->membase + SE_GENI_S_IRQ_CLEAR);
+ 	if (status & S_GENI_CMD_ACTIVE)
+ 		qcom_geni_serial_abort_rx(uport);
+ }
+diff --git a/drivers/tty/tty_port.c b/drivers/tty/tty_port.c
+index 044c3cbdcfa4..ea80bf872f54 100644
+--- a/drivers/tty/tty_port.c
++++ b/drivers/tty/tty_port.c
+@@ -52,10 +52,11 @@ static void tty_port_default_wakeup(struct tty_port *port)
+ 	}
+ }
+ 
+-static const struct tty_port_client_operations default_client_ops = {
++const struct tty_port_client_operations tty_port_default_client_ops = {
+ 	.receive_buf = tty_port_default_receive_buf,
+ 	.write_wakeup = tty_port_default_wakeup,
+ };
++EXPORT_SYMBOL_GPL(tty_port_default_client_ops);
+ 
+ void tty_port_init(struct tty_port *port)
+ {
+@@ -68,7 +69,7 @@ void tty_port_init(struct tty_port *port)
+ 	spin_lock_init(&port->lock);
+ 	port->close_delay = (50 * HZ) / 100;
+ 	port->closing_wait = (3000 * HZ) / 100;
+-	port->client_ops = &default_client_ops;
++	port->client_ops = &tty_port_default_client_ops;
+ 	kref_init(&port->kref);
+ }
+ EXPORT_SYMBOL(tty_port_init);
+diff --git a/drivers/tty/vt/selection.c b/drivers/tty/vt/selection.c
+index 78732feaf65b..44d974d4159f 100644
+--- a/drivers/tty/vt/selection.c
++++ b/drivers/tty/vt/selection.c
+@@ -29,6 +29,8 @@
+ #include <linux/console.h>
+ #include <linux/tty_flip.h>
+ 
++#include <linux/sched/signal.h>
++
+ /* Don't take this from <ctype.h>: 011-015 on the screen aren't spaces */
+ #define isspace(c)	((c) == ' ')
+ 
+@@ -350,6 +352,7 @@ int paste_selection(struct tty_struct *tty)
+ 	unsigned int count;
+ 	struct  tty_ldisc *ld;
+ 	DECLARE_WAITQUEUE(wait, current);
++	int ret = 0;
+ 
+ 	console_lock();
+ 	poke_blanked_console();
+@@ -363,6 +366,10 @@ int paste_selection(struct tty_struct *tty)
+ 	add_wait_queue(&vc->paste_wait, &wait);
+ 	while (sel_buffer && sel_buffer_lth > pasted) {
+ 		set_current_state(TASK_INTERRUPTIBLE);
++		if (signal_pending(current)) {
++			ret = -EINTR;
++			break;
++		}
+ 		if (tty_throttled(tty)) {
+ 			schedule();
+ 			continue;
+@@ -378,6 +385,6 @@ int paste_selection(struct tty_struct *tty)
+ 
+ 	tty_buffer_unlock_exclusive(&vc->port);
+ 	tty_ldisc_deref(ld);
+-	return 0;
++	return ret;
+ }
+ EXPORT_SYMBOL_GPL(paste_selection);
+diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
+index 34aa39d1aed9..3b4ccc2a30c1 100644
+--- a/drivers/tty/vt/vt.c
++++ b/drivers/tty/vt/vt.c
+@@ -936,10 +936,21 @@ static void flush_scrollback(struct vc_data *vc)
+ 	WARN_CONSOLE_UNLOCKED();
+ 
+ 	set_origin(vc);
+-	if (vc->vc_sw->con_flush_scrollback)
++	if (vc->vc_sw->con_flush_scrollback) {
+ 		vc->vc_sw->con_flush_scrollback(vc);
+-	else
++	} else if (con_is_visible(vc)) {
++		/*
++		 * When no con_flush_scrollback method is provided then the
++		 * legacy way for flushing the scrollback buffer is to use
++		 * a side effect of the con_switch method. We do it only on
++		 * the foreground console as background consoles have no
++		 * scrollback buffers in that case and we obviously don't
++		 * want to switch to them.
++		 */
++		hide_cursor(vc);
+ 		vc->vc_sw->con_switch(vc);
++		set_cursor(vc);
++	}
+ }
+ 
+ /*
+diff --git a/drivers/tty/vt/vt_ioctl.c b/drivers/tty/vt/vt_ioctl.c
+index 8b0ed139592f..ee6c91ef1f6c 100644
+--- a/drivers/tty/vt/vt_ioctl.c
++++ b/drivers/tty/vt/vt_ioctl.c
+@@ -876,15 +876,20 @@ int vt_ioctl(struct tty_struct *tty,
+ 			return -EINVAL;
+ 
+ 		for (i = 0; i < MAX_NR_CONSOLES; i++) {
++			struct vc_data *vcp;
++
+ 			if (!vc_cons[i].d)
+ 				continue;
+ 			console_lock();
+-			if (v.v_vlin)
+-				vc_cons[i].d->vc_scan_lines = v.v_vlin;
+-			if (v.v_clin)
+-				vc_cons[i].d->vc_font.height = v.v_clin;
+-			vc_cons[i].d->vc_resize_user = 1;
+-			vc_resize(vc_cons[i].d, v.v_cols, v.v_rows);
++			vcp = vc_cons[i].d;
++			if (vcp) {
++				if (v.v_vlin)
++					vcp->vc_scan_lines = v.v_vlin;
++				if (v.v_clin)
++					vcp->vc_font.height = v.v_clin;
++				vcp->vc_resize_user = 1;
++				vc_resize(vcp, v.v_cols, v.v_rows);
++			}
+ 			console_unlock();
+ 		}
+ 		break;
+diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
+index 26bc05e48d8a..7df22bcefa9d 100644
+--- a/drivers/usb/core/config.c
++++ b/drivers/usb/core/config.c
+@@ -256,6 +256,7 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno,
+ 		struct usb_host_interface *ifp, int num_ep,
+ 		unsigned char *buffer, int size)
+ {
++	struct usb_device *udev = to_usb_device(ddev);
+ 	unsigned char *buffer0 = buffer;
+ 	struct usb_endpoint_descriptor *d;
+ 	struct usb_host_endpoint *endpoint;
+@@ -297,6 +298,16 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno,
+ 		goto skip_to_next_endpoint_or_interface_descriptor;
+ 	}
+ 
++	/* Ignore blacklisted endpoints */
++	if (udev->quirks & USB_QUIRK_ENDPOINT_BLACKLIST) {
++		if (usb_endpoint_is_blacklisted(udev, ifp, d)) {
++			dev_warn(ddev, "config %d interface %d altsetting %d has a blacklisted endpoint with address 0x%X, skipping\n",
++					cfgno, inum, asnum,
++					d->bEndpointAddress);
++			goto skip_to_next_endpoint_or_interface_descriptor;
++		}
++	}
++
+ 	endpoint = &ifp->endpoint[ifp->desc.bNumEndpoints];
+ 	++ifp->desc.bNumEndpoints;
+ 
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
+index 3405b146edc9..1d212f82c69b 100644
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -38,7 +38,9 @@
+ #include "otg_whitelist.h"
+ 
+ #define USB_VENDOR_GENESYS_LOGIC		0x05e3
++#define USB_VENDOR_SMSC				0x0424
+ #define HUB_QUIRK_CHECK_PORT_AUTOSUSPEND	0x01
++#define HUB_QUIRK_DISABLE_AUTOSUSPEND		0x02
+ 
+ #define USB_TP_TRANSMISSION_DELAY	40	/* ns */
+ #define USB_TP_TRANSMISSION_DELAY_MAX	65535	/* ns */
+@@ -1217,11 +1219,6 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
+ #ifdef CONFIG_PM
+ 			udev->reset_resume = 1;
+ #endif
+-			/* Don't set the change_bits when the device
+-			 * was powered off.
+-			 */
+-			if (test_bit(port1, hub->power_bits))
+-				set_bit(port1, hub->change_bits);
+ 
+ 		} else {
+ 			/* The power session is gone; tell hub_wq */
+@@ -1731,6 +1728,10 @@ static void hub_disconnect(struct usb_interface *intf)
+ 	kfree(hub->buffer);
+ 
+ 	pm_suspend_ignore_children(&intf->dev, false);
++
++	if (hub->quirk_disable_autosuspend)
++		usb_autopm_put_interface(intf);
++
+ 	kref_put(&hub->kref, hub_release);
+ }
+ 
+@@ -1863,6 +1864,11 @@ static int hub_probe(struct usb_interface *intf, const struct usb_device_id *id)
+ 	if (id->driver_info & HUB_QUIRK_CHECK_PORT_AUTOSUSPEND)
+ 		hub->quirk_check_port_auto_suspend = 1;
+ 
++	if (id->driver_info & HUB_QUIRK_DISABLE_AUTOSUSPEND) {
++		hub->quirk_disable_autosuspend = 1;
++		usb_autopm_get_interface(intf);
++	}
++
+ 	if (hub_configure(hub, &desc->endpoint[0].desc) >= 0)
+ 		return 0;
+ 
+@@ -5599,6 +5605,10 @@ out_hdev_lock:
+ }
+ 
+ static const struct usb_device_id hub_id_table[] = {
++    { .match_flags = USB_DEVICE_ID_MATCH_VENDOR | USB_DEVICE_ID_MATCH_INT_CLASS,
++      .idVendor = USB_VENDOR_SMSC,
++      .bInterfaceClass = USB_CLASS_HUB,
++      .driver_info = HUB_QUIRK_DISABLE_AUTOSUSPEND},
+     { .match_flags = USB_DEVICE_ID_MATCH_VENDOR
+ 			| USB_DEVICE_ID_MATCH_INT_CLASS,
+       .idVendor = USB_VENDOR_GENESYS_LOGIC,
+diff --git a/drivers/usb/core/hub.h b/drivers/usb/core/hub.h
+index a9e24e4b8df1..a97dd1ba964e 100644
+--- a/drivers/usb/core/hub.h
++++ b/drivers/usb/core/hub.h
+@@ -61,6 +61,7 @@ struct usb_hub {
+ 	unsigned		quiescing:1;
+ 	unsigned		disconnected:1;
+ 	unsigned		in_reset:1;
++	unsigned		quirk_disable_autosuspend:1;
+ 
+ 	unsigned		quirk_check_port_auto_suspend:1;
+ 
+diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
+index 6b6413073584..2b24336a72e5 100644
+--- a/drivers/usb/core/quirks.c
++++ b/drivers/usb/core/quirks.c
+@@ -354,6 +354,10 @@ static const struct usb_device_id usb_quirk_list[] = {
+ 	{ USB_DEVICE(0x0904, 0x6103), .driver_info =
+ 			USB_QUIRK_LINEAR_FRAME_INTR_BINTERVAL },
+ 
++	/* Sound Devices USBPre2 */
++	{ USB_DEVICE(0x0926, 0x0202), .driver_info =
++			USB_QUIRK_ENDPOINT_BLACKLIST },
++
+ 	/* Keytouch QWERTY Panel keyboard */
+ 	{ USB_DEVICE(0x0926, 0x3333), .driver_info =
+ 			USB_QUIRK_CONFIG_INTF_STRINGS },
+@@ -445,6 +449,9 @@ static const struct usb_device_id usb_quirk_list[] = {
+ 	/* INTEL VALUE SSD */
+ 	{ USB_DEVICE(0x8086, 0xf1a5), .driver_info = USB_QUIRK_RESET_RESUME },
+ 
++	/* novation SoundControl XL */
++	{ USB_DEVICE(0x1235, 0x0061), .driver_info = USB_QUIRK_RESET_RESUME },
++
+ 	{ }  /* terminating entry must be last */
+ };
+ 
+@@ -472,6 +479,39 @@ static const struct usb_device_id usb_amd_resume_quirk_list[] = {
+ 	{ }  /* terminating entry must be last */
+ };
+ 
++/*
++ * Entries for blacklisted endpoints that should be ignored when parsing
++ * configuration descriptors.
++ *
++ * Matched for devices with USB_QUIRK_ENDPOINT_BLACKLIST.
++ */
++static const struct usb_device_id usb_endpoint_blacklist[] = {
++	{ USB_DEVICE_INTERFACE_NUMBER(0x0926, 0x0202, 1), .driver_info = 0x85 },
++	{ }
++};
++
++bool usb_endpoint_is_blacklisted(struct usb_device *udev,
++		struct usb_host_interface *intf,
++		struct usb_endpoint_descriptor *epd)
++{
++	const struct usb_device_id *id;
++	unsigned int address;
++
++	for (id = usb_endpoint_blacklist; id->match_flags; ++id) {
++		if (!usb_match_device(udev, id))
++			continue;
++
++		if (!usb_match_one_id_intf(udev, intf, id))
++			continue;
++
++		address = id->driver_info;
++		if (address == epd->bEndpointAddress)
++			return true;
++	}
++
++	return false;
++}
++
+ static bool usb_match_any_interface(struct usb_device *udev,
+ 				    const struct usb_device_id *id)
+ {
+diff --git a/drivers/usb/core/usb.h b/drivers/usb/core/usb.h
+index cf4783cf661a..3ad0ee57e859 100644
+--- a/drivers/usb/core/usb.h
++++ b/drivers/usb/core/usb.h
+@@ -37,6 +37,9 @@ extern void usb_authorize_interface(struct usb_interface *);
+ extern void usb_detect_quirks(struct usb_device *udev);
+ extern void usb_detect_interface_quirks(struct usb_device *udev);
+ extern void usb_release_quirk_list(void);
++extern bool usb_endpoint_is_blacklisted(struct usb_device *udev,
++		struct usb_host_interface *intf,
++		struct usb_endpoint_descriptor *epd);
+ extern int usb_remove_device(struct usb_device *udev);
+ 
+ extern int usb_get_device_descriptor(struct usb_device *dev,
+diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
+index a9133773b89e..7fd0900a9cb0 100644
+--- a/drivers/usb/dwc2/gadget.c
++++ b/drivers/usb/dwc2/gadget.c
+@@ -1083,11 +1083,6 @@ static void dwc2_hsotg_start_req(struct dwc2_hsotg *hsotg,
+ 	else
+ 		packets = 1;	/* send one packet if length is zero. */
+ 
+-	if (hs_ep->isochronous && length > (hs_ep->mc * hs_ep->ep.maxpacket)) {
+-		dev_err(hsotg->dev, "req length > maxpacket*mc\n");
+-		return;
+-	}
+-
+ 	if (dir_in && index != 0)
+ 		if (hs_ep->isochronous)
+ 			epsize = DXEPTSIZ_MC(packets);
+@@ -1391,6 +1386,13 @@ static int dwc2_hsotg_ep_queue(struct usb_ep *ep, struct usb_request *req,
+ 	req->actual = 0;
+ 	req->status = -EINPROGRESS;
+ 
++	/* Don't queue ISOC request if length greater than mps*mc */
++	if (hs_ep->isochronous &&
++	    req->length > (hs_ep->mc * hs_ep->ep.maxpacket)) {
++		dev_err(hs->dev, "req length > maxpacket*mc\n");
++		return -EINVAL;
++	}
++
+ 	/* In DDMA mode for ISOC's don't queue request if length greater
+ 	 * than descriptor limits.
+ 	 */
+@@ -1632,6 +1634,7 @@ static int dwc2_hsotg_process_req_status(struct dwc2_hsotg *hsotg,
+ 	struct dwc2_hsotg_ep *ep0 = hsotg->eps_out[0];
+ 	struct dwc2_hsotg_ep *ep;
+ 	__le16 reply;
++	u16 status;
+ 	int ret;
+ 
+ 	dev_dbg(hsotg->dev, "%s: USB_REQ_GET_STATUS\n", __func__);
+@@ -1643,11 +1646,10 @@ static int dwc2_hsotg_process_req_status(struct dwc2_hsotg *hsotg,
+ 
+ 	switch (ctrl->bRequestType & USB_RECIP_MASK) {
+ 	case USB_RECIP_DEVICE:
+-		/*
+-		 * bit 0 => self powered
+-		 * bit 1 => remote wakeup
+-		 */
+-		reply = cpu_to_le16(0);
++		status = 1 << USB_DEVICE_SELF_POWERED;
++		status |= hsotg->remote_wakeup_allowed <<
++			  USB_DEVICE_REMOTE_WAKEUP;
++		reply = cpu_to_le16(status);
+ 		break;
+ 
+ 	case USB_RECIP_INTERFACE:
+@@ -1758,7 +1760,10 @@ static int dwc2_hsotg_process_req_feature(struct dwc2_hsotg *hsotg,
+ 	case USB_RECIP_DEVICE:
+ 		switch (wValue) {
+ 		case USB_DEVICE_REMOTE_WAKEUP:
+-			hsotg->remote_wakeup_allowed = 1;
++			if (set)
++				hsotg->remote_wakeup_allowed = 1;
++			else
++				hsotg->remote_wakeup_allowed = 0;
+ 			break;
+ 
+ 		case USB_DEVICE_TEST_MODE:
+@@ -1768,16 +1773,17 @@ static int dwc2_hsotg_process_req_feature(struct dwc2_hsotg *hsotg,
+ 				return -EINVAL;
+ 
+ 			hsotg->test_mode = wIndex >> 8;
+-			ret = dwc2_hsotg_send_reply(hsotg, ep0, NULL, 0);
+-			if (ret) {
+-				dev_err(hsotg->dev,
+-					"%s: failed to send reply\n", __func__);
+-				return ret;
+-			}
+ 			break;
+ 		default:
+ 			return -ENOENT;
+ 		}
++
++		ret = dwc2_hsotg_send_reply(hsotg, ep0, NULL, 0);
++		if (ret) {
++			dev_err(hsotg->dev,
++				"%s: failed to send reply\n", __func__);
++			return ret;
++		}
+ 		break;
+ 
+ 	case USB_RECIP_ENDPOINT:
+diff --git a/drivers/usb/dwc3/debug.h b/drivers/usb/dwc3/debug.h
+index e56beb9d1e36..4a13ceaf4093 100644
+--- a/drivers/usb/dwc3/debug.h
++++ b/drivers/usb/dwc3/debug.h
+@@ -256,86 +256,77 @@ static inline const char *dwc3_ep_event_string(char *str, size_t size,
+ 	u8 epnum = event->endpoint_number;
+ 	size_t len;
+ 	int status;
+-	int ret;
+ 
+-	ret = snprintf(str, size, "ep%d%s: ", epnum >> 1,
++	len = scnprintf(str, size, "ep%d%s: ", epnum >> 1,
+ 			(epnum & 1) ? "in" : "out");
+-	if (ret < 0)
+-		return "UNKNOWN";
+ 
+ 	status = event->status;
+ 
+ 	switch (event->endpoint_event) {
+ 	case DWC3_DEPEVT_XFERCOMPLETE:
+-		len = strlen(str);
+-		snprintf(str + len, size - len, "Transfer Complete (%c%c%c)",
++		len += scnprintf(str + len, size - len,
++				"Transfer Complete (%c%c%c)",
+ 				status & DEPEVT_STATUS_SHORT ? 'S' : 's',
+ 				status & DEPEVT_STATUS_IOC ? 'I' : 'i',
+ 				status & DEPEVT_STATUS_LST ? 'L' : 'l');
+ 
+-		len = strlen(str);
+-
+ 		if (epnum <= 1)
+-			snprintf(str + len, size - len, " [%s]",
++			scnprintf(str + len, size - len, " [%s]",
+ 					dwc3_ep0_state_string(ep0state));
+ 		break;
+ 	case DWC3_DEPEVT_XFERINPROGRESS:
+-		len = strlen(str);
+-
+-		snprintf(str + len, size - len, "Transfer In Progress [%d] (%c%c%c)",
++		scnprintf(str + len, size - len,
++				"Transfer In Progress [%d] (%c%c%c)",
+ 				event->parameters,
+ 				status & DEPEVT_STATUS_SHORT ? 'S' : 's',
+ 				status & DEPEVT_STATUS_IOC ? 'I' : 'i',
+ 				status & DEPEVT_STATUS_LST ? 'M' : 'm');
+ 		break;
+ 	case DWC3_DEPEVT_XFERNOTREADY:
+-		len = strlen(str);
+-
+-		snprintf(str + len, size - len, "Transfer Not Ready [%d]%s",
++		len += scnprintf(str + len, size - len,
++				"Transfer Not Ready [%d]%s",
+ 				event->parameters,
+ 				status & DEPEVT_STATUS_TRANSFER_ACTIVE ?
+ 				" (Active)" : " (Not Active)");
+ 
+-		len = strlen(str);
+-
+ 		/* Control Endpoints */
+ 		if (epnum <= 1) {
+ 			int phase = DEPEVT_STATUS_CONTROL_PHASE(event->status);
+ 
+ 			switch (phase) {
+ 			case DEPEVT_STATUS_CONTROL_DATA:
+-				snprintf(str + ret, size - ret,
++				scnprintf(str + len, size - len,
+ 						" [Data Phase]");
+ 				break;
+ 			case DEPEVT_STATUS_CONTROL_STATUS:
+-				snprintf(str + ret, size - ret,
++				scnprintf(str + len, size - len,
+ 						" [Status Phase]");
+ 			}
+ 		}
+ 		break;
+ 	case DWC3_DEPEVT_RXTXFIFOEVT:
+-		snprintf(str + ret, size - ret, "FIFO");
++		scnprintf(str + len, size - len, "FIFO");
+ 		break;
+ 	case DWC3_DEPEVT_STREAMEVT:
+ 		status = event->status;
+ 
+ 		switch (status) {
+ 		case DEPEVT_STREAMEVT_FOUND:
+-			snprintf(str + ret, size - ret, " Stream %d Found",
++			scnprintf(str + len, size - len, " Stream %d Found",
+ 					event->parameters);
+ 			break;
+ 		case DEPEVT_STREAMEVT_NOTFOUND:
+ 		default:
+-			snprintf(str + ret, size - ret, " Stream Not Found");
++			scnprintf(str + len, size - len, " Stream Not Found");
+ 			break;
+ 		}
+ 
+ 		break;
+ 	case DWC3_DEPEVT_EPCMDCMPLT:
+-		snprintf(str + ret, size - ret, "Endpoint Command Complete");
++		scnprintf(str + len, size - len, "Endpoint Command Complete");
+ 		break;
+ 	default:
+-		snprintf(str, size, "UNKNOWN");
++		scnprintf(str + len, size - len, "UNKNOWN");
+ 	}
+ 
+ 	return str;
+diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
+index 8b95be897078..e0cb1c2d5675 100644
+--- a/drivers/usb/dwc3/gadget.c
++++ b/drivers/usb/dwc3/gadget.c
+@@ -2426,7 +2426,8 @@ static int dwc3_gadget_ep_reclaim_completed_trb(struct dwc3_ep *dep,
+ 	if (event->status & DEPEVT_STATUS_SHORT && !chain)
+ 		return 1;
+ 
+-	if (event->status & DEPEVT_STATUS_IOC)
++	if ((trb->ctrl & DWC3_TRB_CTRL_IOC) ||
++	    (trb->ctrl & DWC3_TRB_CTRL_LST))
+ 		return 1;
+ 
+ 	return 0;
+diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
+index 3b4f67000315..cd303a3ea680 100644
+--- a/drivers/usb/gadget/composite.c
++++ b/drivers/usb/gadget/composite.c
+@@ -437,12 +437,10 @@ static u8 encode_bMaxPower(enum usb_device_speed speed,
+ 		val = CONFIG_USB_GADGET_VBUS_DRAW;
+ 	if (!val)
+ 		return 0;
+-	switch (speed) {
+-	case USB_SPEED_SUPER:
+-		return DIV_ROUND_UP(val, 8);
+-	default:
++	if (speed < USB_SPEED_SUPER)
+ 		return DIV_ROUND_UP(val, 2);
+-	}
++	else
++		return DIV_ROUND_UP(val, 8);
+ }
+ 
+ static int config_buf(struct usb_configuration *config,
+diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
+index 7a3a29e5e9d2..af92b2576fe9 100644
+--- a/drivers/usb/host/xhci-hub.c
++++ b/drivers/usb/host/xhci-hub.c
+@@ -55,6 +55,7 @@ static u8 usb_bos_descriptor [] = {
+ static int xhci_create_usb3_bos_desc(struct xhci_hcd *xhci, char *buf,
+ 				     u16 wLength)
+ {
++	struct xhci_port_cap *port_cap = NULL;
+ 	int i, ssa_count;
+ 	u32 temp;
+ 	u16 desc_size, ssp_cap_size, ssa_size = 0;
+@@ -64,16 +65,24 @@ static int xhci_create_usb3_bos_desc(struct xhci_hcd *xhci, char *buf,
+ 	ssp_cap_size = sizeof(usb_bos_descriptor) - desc_size;
+ 
+ 	/* does xhci support USB 3.1 Enhanced SuperSpeed */
+-	if (xhci->usb3_rhub.min_rev >= 0x01) {
++	for (i = 0; i < xhci->num_port_caps; i++) {
++		if (xhci->port_caps[i].maj_rev == 0x03 &&
++		    xhci->port_caps[i].min_rev >= 0x01) {
++			usb3_1 = true;
++			port_cap = &xhci->port_caps[i];
++			break;
++		}
++	}
++
++	if (usb3_1) {
+ 		/* does xhci provide a PSI table for SSA speed attributes? */
+-		if (xhci->usb3_rhub.psi_count) {
++		if (port_cap->psi_count) {
+ 			/* two SSA entries for each unique PSI ID, RX and TX */
+-			ssa_count = xhci->usb3_rhub.psi_uid_count * 2;
++			ssa_count = port_cap->psi_uid_count * 2;
+ 			ssa_size = ssa_count * sizeof(u32);
+ 			ssp_cap_size -= 16; /* skip copying the default SSA */
+ 		}
+ 		desc_size += ssp_cap_size;
+-		usb3_1 = true;
+ 	}
+ 	memcpy(buf, &usb_bos_descriptor, min(desc_size, wLength));
+ 
+@@ -99,7 +108,7 @@ static int xhci_create_usb3_bos_desc(struct xhci_hcd *xhci, char *buf,
+ 	}
+ 
+ 	/* If PSI table exists, add the custom speed attributes from it */
+-	if (usb3_1 && xhci->usb3_rhub.psi_count) {
++	if (usb3_1 && port_cap->psi_count) {
+ 		u32 ssp_cap_base, bm_attrib, psi, psi_mant, psi_exp;
+ 		int offset;
+ 
+@@ -111,7 +120,7 @@ static int xhci_create_usb3_bos_desc(struct xhci_hcd *xhci, char *buf,
+ 
+ 		/* attribute count SSAC bits 4:0 and ID count SSIC bits 8:5 */
+ 		bm_attrib = (ssa_count - 1) & 0x1f;
+-		bm_attrib |= (xhci->usb3_rhub.psi_uid_count - 1) << 5;
++		bm_attrib |= (port_cap->psi_uid_count - 1) << 5;
+ 		put_unaligned_le32(bm_attrib, &buf[ssp_cap_base + 4]);
+ 
+ 		if (wLength < desc_size + ssa_size)
+@@ -124,8 +133,8 @@ static int xhci_create_usb3_bos_desc(struct xhci_hcd *xhci, char *buf,
+ 		 * USB 3.1 requires two SSA entries (RX and TX) for every link
+ 		 */
+ 		offset = desc_size;
+-		for (i = 0; i < xhci->usb3_rhub.psi_count; i++) {
+-			psi = xhci->usb3_rhub.psi[i];
++		for (i = 0; i < port_cap->psi_count; i++) {
++			psi = port_cap->psi[i];
+ 			psi &= ~USB_SSP_SUBLINK_SPEED_RSVD;
+ 			psi_exp = XHCI_EXT_PORT_PSIE(psi);
+ 			psi_mant = XHCI_EXT_PORT_PSIM(psi);
+diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
+index 3b1388fa2f36..884c601bfa15 100644
+--- a/drivers/usb/host/xhci-mem.c
++++ b/drivers/usb/host/xhci-mem.c
+@@ -1475,9 +1475,15 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
+ 	/* Allow 3 retries for everything but isoc, set CErr = 3 */
+ 	if (!usb_endpoint_xfer_isoc(&ep->desc))
+ 		err_count = 3;
+-	/* Some devices get this wrong */
+-	if (usb_endpoint_xfer_bulk(&ep->desc) && udev->speed == USB_SPEED_HIGH)
+-		max_packet = 512;
++	/* HS bulk max packet should be 512, FS bulk supports 8, 16, 32 or 64 */
++	if (usb_endpoint_xfer_bulk(&ep->desc)) {
++		if (udev->speed == USB_SPEED_HIGH)
++			max_packet = 512;
++		if (udev->speed == USB_SPEED_FULL) {
++			max_packet = rounddown_pow_of_two(max_packet);
++			max_packet = clamp_val(max_packet, 8, 64);
++		}
++	}
+ 	/* xHCI 1.0 and 1.1 indicates that ctrl ep avg TRB Length should be 8 */
+ 	if (usb_endpoint_xfer_control(&ep->desc) && xhci->hci_version >= 0x100)
+ 		avg_trb_len = 8;
+@@ -1909,17 +1915,17 @@ no_bw:
+ 	xhci->usb3_rhub.num_ports = 0;
+ 	xhci->num_active_eps = 0;
+ 	kfree(xhci->usb2_rhub.ports);
+-	kfree(xhci->usb2_rhub.psi);
+ 	kfree(xhci->usb3_rhub.ports);
+-	kfree(xhci->usb3_rhub.psi);
+ 	kfree(xhci->hw_ports);
+ 	kfree(xhci->rh_bw);
+ 	kfree(xhci->ext_caps);
++	for (i = 0; i < xhci->num_port_caps; i++)
++		kfree(xhci->port_caps[i].psi);
++	kfree(xhci->port_caps);
++	xhci->num_port_caps = 0;
+ 
+ 	xhci->usb2_rhub.ports = NULL;
+-	xhci->usb2_rhub.psi = NULL;
+ 	xhci->usb3_rhub.ports = NULL;
+-	xhci->usb3_rhub.psi = NULL;
+ 	xhci->hw_ports = NULL;
+ 	xhci->rh_bw = NULL;
+ 	xhci->ext_caps = NULL;
+@@ -2120,6 +2126,7 @@ static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports,
+ 	u8 major_revision, minor_revision;
+ 	struct xhci_hub *rhub;
+ 	struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
++	struct xhci_port_cap *port_cap;
+ 
+ 	temp = readl(addr);
+ 	major_revision = XHCI_EXT_PORT_MAJOR(temp);
+@@ -2154,31 +2161,39 @@ static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports,
+ 		/* WTF? "Valid values are ‘1’ to MaxPorts" */
+ 		return;
+ 
+-	rhub->psi_count = XHCI_EXT_PORT_PSIC(temp);
+-	if (rhub->psi_count) {
+-		rhub->psi = kcalloc_node(rhub->psi_count, sizeof(*rhub->psi),
+-				    GFP_KERNEL, dev_to_node(dev));
+-		if (!rhub->psi)
+-			rhub->psi_count = 0;
++	port_cap = &xhci->port_caps[xhci->num_port_caps++];
++	if (xhci->num_port_caps > max_caps)
++		return;
++
++	port_cap->maj_rev = major_revision;
++	port_cap->min_rev = minor_revision;
++	port_cap->psi_count = XHCI_EXT_PORT_PSIC(temp);
+ 
+-		rhub->psi_uid_count++;
+-		for (i = 0; i < rhub->psi_count; i++) {
+-			rhub->psi[i] = readl(addr + 4 + i);
++	if (port_cap->psi_count) {
++		port_cap->psi = kcalloc_node(port_cap->psi_count,
++					     sizeof(*port_cap->psi),
++					     GFP_KERNEL, dev_to_node(dev));
++		if (!port_cap->psi)
++			port_cap->psi_count = 0;
++
++		port_cap->psi_uid_count++;
++		for (i = 0; i < port_cap->psi_count; i++) {
++			port_cap->psi[i] = readl(addr + 4 + i);
+ 
+ 			/* count unique ID values, two consecutive entries can
+ 			 * have the same ID if link is assymetric
+ 			 */
+-			if (i && (XHCI_EXT_PORT_PSIV(rhub->psi[i]) !=
+-				  XHCI_EXT_PORT_PSIV(rhub->psi[i - 1])))
+-				rhub->psi_uid_count++;
++			if (i && (XHCI_EXT_PORT_PSIV(port_cap->psi[i]) !=
++				  XHCI_EXT_PORT_PSIV(port_cap->psi[i - 1])))
++				port_cap->psi_uid_count++;
+ 
+ 			xhci_dbg(xhci, "PSIV:%d PSIE:%d PLT:%d PFD:%d LP:%d PSIM:%d\n",
+-				  XHCI_EXT_PORT_PSIV(rhub->psi[i]),
+-				  XHCI_EXT_PORT_PSIE(rhub->psi[i]),
+-				  XHCI_EXT_PORT_PLT(rhub->psi[i]),
+-				  XHCI_EXT_PORT_PFD(rhub->psi[i]),
+-				  XHCI_EXT_PORT_LP(rhub->psi[i]),
+-				  XHCI_EXT_PORT_PSIM(rhub->psi[i]));
++				  XHCI_EXT_PORT_PSIV(port_cap->psi[i]),
++				  XHCI_EXT_PORT_PSIE(port_cap->psi[i]),
++				  XHCI_EXT_PORT_PLT(port_cap->psi[i]),
++				  XHCI_EXT_PORT_PFD(port_cap->psi[i]),
++				  XHCI_EXT_PORT_LP(port_cap->psi[i]),
++				  XHCI_EXT_PORT_PSIM(port_cap->psi[i]));
+ 		}
+ 	}
+ 	/* cache usb2 port capabilities */
+@@ -2213,6 +2228,7 @@ static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports,
+ 			continue;
+ 		}
+ 		hw_port->rhub = rhub;
++		hw_port->port_cap = port_cap;
+ 		rhub->num_ports++;
+ 	}
+ 	/* FIXME: Should we disable ports not in the Extended Capabilities? */
+@@ -2303,6 +2319,11 @@ static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags)
+ 	if (!xhci->ext_caps)
+ 		return -ENOMEM;
+ 
++	xhci->port_caps = kcalloc_node(cap_count, sizeof(*xhci->port_caps),
++				flags, dev_to_node(dev));
++	if (!xhci->port_caps)
++		return -ENOMEM;
++
+ 	offset = cap_start;
+ 
+ 	while (offset) {
+diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
+index 4917c5b033fa..5e9b537df631 100644
+--- a/drivers/usb/host/xhci-pci.c
++++ b/drivers/usb/host/xhci-pci.c
+@@ -49,6 +49,7 @@
+ #define PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_XHCI		0x15ec
+ #define PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_XHCI		0x15f0
+ #define PCI_DEVICE_ID_INTEL_ICE_LAKE_XHCI		0x8a13
++#define PCI_DEVICE_ID_INTEL_CML_XHCI			0xa3af
+ 
+ #define PCI_DEVICE_ID_AMD_PROMONTORYA_4			0x43b9
+ #define PCI_DEVICE_ID_AMD_PROMONTORYA_3			0x43ba
+@@ -187,7 +188,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
+ 		 pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI ||
+ 		 pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_B_XHCI ||
+ 		 pdev->device == PCI_DEVICE_ID_INTEL_APL_XHCI ||
+-		 pdev->device == PCI_DEVICE_ID_INTEL_DNV_XHCI)) {
++		 pdev->device == PCI_DEVICE_ID_INTEL_DNV_XHCI ||
++		 pdev->device == PCI_DEVICE_ID_INTEL_CML_XHCI)) {
+ 		xhci->quirks |= XHCI_PME_STUCK_QUIRK;
+ 	}
+ 	if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
+@@ -302,6 +304,9 @@ static int xhci_pci_setup(struct usb_hcd *hcd)
+ 	if (!usb_hcd_is_primary_hcd(hcd))
+ 		return 0;
+ 
++	if (xhci->quirks & XHCI_PME_STUCK_QUIRK)
++		xhci_pme_acpi_rtd3_enable(pdev);
++
+ 	xhci_dbg(xhci, "Got SBRN %u\n", (unsigned int) xhci->sbrn);
+ 
+ 	/* Find any debug ports */
+@@ -359,9 +364,6 @@ static int xhci_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
+ 			HCC_MAX_PSA(xhci->hcc_params) >= 4)
+ 		xhci->shared_hcd->can_do_streams = 1;
+ 
+-	if (xhci->quirks & XHCI_PME_STUCK_QUIRK)
+-		xhci_pme_acpi_rtd3_enable(dev);
+-
+ 	/* USB-2 and USB-3 roothubs initialized, allow runtime pm suspend */
+ 	pm_runtime_put_noidle(&dev->dev);
+ 
+diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
+index 13d8838cd552..3ecee10fdcdc 100644
+--- a/drivers/usb/host/xhci.h
++++ b/drivers/usb/host/xhci.h
+@@ -1702,12 +1702,20 @@ struct xhci_bus_state {
+  * Intel Lynx Point LP xHCI host.
+  */
+ #define	XHCI_MAX_REXIT_TIMEOUT_MS	20
++struct xhci_port_cap {
++	u32			*psi;	/* array of protocol speed ID entries */
++	u8			psi_count;
++	u8			psi_uid_count;
++	u8			maj_rev;
++	u8			min_rev;
++};
+ 
+ struct xhci_port {
+ 	__le32 __iomem		*addr;
+ 	int			hw_portnum;
+ 	int			hcd_portnum;
+ 	struct xhci_hub		*rhub;
++	struct xhci_port_cap	*port_cap;
+ };
+ 
+ struct xhci_hub {
+@@ -1719,9 +1727,6 @@ struct xhci_hub {
+ 	/* supported prococol extended capabiliy values */
+ 	u8			maj_rev;
+ 	u8			min_rev;
+-	u32			*psi;	/* array of protocol speed ID entries */
+-	u8			psi_count;
+-	u8			psi_uid_count;
+ };
+ 
+ /* There is one xhci_hcd structure per controller */
+@@ -1880,6 +1885,9 @@ struct xhci_hcd {
+ 	/* cached usb2 extened protocol capabilites */
+ 	u32                     *ext_caps;
+ 	unsigned int            num_ext_caps;
++	/* cached extended protocol port capabilities */
++	struct xhci_port_cap	*port_caps;
++	unsigned int		num_port_caps;
+ 	/* Compliance Mode Recovery Data */
+ 	struct timer_list	comp_mode_recovery_timer;
+ 	u32			port_status_u0;
+diff --git a/drivers/usb/misc/iowarrior.c b/drivers/usb/misc/iowarrior.c
+index dce44fbf031f..dce20301e367 100644
+--- a/drivers/usb/misc/iowarrior.c
++++ b/drivers/usb/misc/iowarrior.c
+@@ -33,6 +33,14 @@
+ #define USB_DEVICE_ID_CODEMERCS_IOWPV2	0x1512
+ /* full speed iowarrior */
+ #define USB_DEVICE_ID_CODEMERCS_IOW56	0x1503
++/* fuller speed iowarrior */
++#define USB_DEVICE_ID_CODEMERCS_IOW28	0x1504
++#define USB_DEVICE_ID_CODEMERCS_IOW28L	0x1505
++#define USB_DEVICE_ID_CODEMERCS_IOW100	0x1506
++
++/* OEMed devices */
++#define USB_DEVICE_ID_CODEMERCS_IOW24SAG	0x158a
++#define USB_DEVICE_ID_CODEMERCS_IOW56AM		0x158b
+ 
+ /* Get a minor range for your devices from the usb maintainer */
+ #ifdef CONFIG_USB_DYNAMIC_MINORS
+@@ -133,6 +141,11 @@ static const struct usb_device_id iowarrior_ids[] = {
+ 	{USB_DEVICE(USB_VENDOR_ID_CODEMERCS, USB_DEVICE_ID_CODEMERCS_IOWPV1)},
+ 	{USB_DEVICE(USB_VENDOR_ID_CODEMERCS, USB_DEVICE_ID_CODEMERCS_IOWPV2)},
+ 	{USB_DEVICE(USB_VENDOR_ID_CODEMERCS, USB_DEVICE_ID_CODEMERCS_IOW56)},
++	{USB_DEVICE(USB_VENDOR_ID_CODEMERCS, USB_DEVICE_ID_CODEMERCS_IOW24SAG)},
++	{USB_DEVICE(USB_VENDOR_ID_CODEMERCS, USB_DEVICE_ID_CODEMERCS_IOW56AM)},
++	{USB_DEVICE(USB_VENDOR_ID_CODEMERCS, USB_DEVICE_ID_CODEMERCS_IOW28)},
++	{USB_DEVICE(USB_VENDOR_ID_CODEMERCS, USB_DEVICE_ID_CODEMERCS_IOW28L)},
++	{USB_DEVICE(USB_VENDOR_ID_CODEMERCS, USB_DEVICE_ID_CODEMERCS_IOW100)},
+ 	{}			/* Terminating entry */
+ };
+ MODULE_DEVICE_TABLE(usb, iowarrior_ids);
+@@ -357,6 +370,7 @@ static ssize_t iowarrior_write(struct file *file,
+ 	}
+ 	switch (dev->product_id) {
+ 	case USB_DEVICE_ID_CODEMERCS_IOW24:
++	case USB_DEVICE_ID_CODEMERCS_IOW24SAG:
+ 	case USB_DEVICE_ID_CODEMERCS_IOWPV1:
+ 	case USB_DEVICE_ID_CODEMERCS_IOWPV2:
+ 	case USB_DEVICE_ID_CODEMERCS_IOW40:
+@@ -371,6 +385,10 @@ static ssize_t iowarrior_write(struct file *file,
+ 		goto exit;
+ 		break;
+ 	case USB_DEVICE_ID_CODEMERCS_IOW56:
++	case USB_DEVICE_ID_CODEMERCS_IOW56AM:
++	case USB_DEVICE_ID_CODEMERCS_IOW28:
++	case USB_DEVICE_ID_CODEMERCS_IOW28L:
++	case USB_DEVICE_ID_CODEMERCS_IOW100:
+ 		/* The IOW56 uses asynchronous IO and more urbs */
+ 		if (atomic_read(&dev->write_busy) == MAX_WRITES_IN_FLIGHT) {
+ 			/* Wait until we are below the limit for submitted urbs */
+@@ -493,6 +511,7 @@ static long iowarrior_ioctl(struct file *file, unsigned int cmd,
+ 	switch (cmd) {
+ 	case IOW_WRITE:
+ 		if (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW24 ||
++		    dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW24SAG ||
+ 		    dev->product_id == USB_DEVICE_ID_CODEMERCS_IOWPV1 ||
+ 		    dev->product_id == USB_DEVICE_ID_CODEMERCS_IOWPV2 ||
+ 		    dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW40) {
+@@ -767,7 +786,11 @@ static int iowarrior_probe(struct usb_interface *interface,
+ 		goto error;
+ 	}
+ 
+-	if (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW56) {
++	if ((dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW56) ||
++	    (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW56AM) ||
++	    (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW28) ||
++	    (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW28L) ||
++	    (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW100)) {
+ 		res = usb_find_last_int_out_endpoint(iface_desc,
+ 				&dev->int_out_endpoint);
+ 		if (res) {
+@@ -780,7 +803,11 @@ static int iowarrior_probe(struct usb_interface *interface,
+ 	/* we have to check the report_size often, so remember it in the endianness suitable for our machine */
+ 	dev->report_size = usb_endpoint_maxp(dev->int_in_endpoint);
+ 	if ((dev->interface->cur_altsetting->desc.bInterfaceNumber == 0) &&
+-	    (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW56))
++	    ((dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW56) ||
++	     (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW56AM) ||
++	     (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW28) ||
++	     (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW28L) ||
++	     (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW100)))
+ 		/* IOWarrior56 has wMaxPacketSize different from report size */
+ 		dev->report_size = 7;
+ 
+diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c
+index d3f420f3a083..c5ecdcd51ffc 100644
+--- a/drivers/usb/serial/ch341.c
++++ b/drivers/usb/serial/ch341.c
+@@ -205,6 +205,16 @@ static int ch341_get_divisor(speed_t speed)
+ 			16 * speed - 16 * CH341_CLKRATE / (clk_div * (div + 1)))
+ 		div++;
+ 
++	/*
++	 * Prefer lower base clock (fact = 0) if even divisor.
++	 *
++	 * Note that this makes the receiver more tolerant to errors.
++	 */
++	if (fact == 1 && div % 2 == 0) {
++		div /= 2;
++		fact = 0;
++	}
++
+ 	return (0x100 - div) << 8 | fact << 2 | ps;
+ }
+ 
+diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
+index 95bba3ba6ac6..3670fda02c34 100644
+--- a/drivers/usb/storage/uas.c
++++ b/drivers/usb/storage/uas.c
+@@ -45,6 +45,7 @@ struct uas_dev_info {
+ 	struct scsi_cmnd *cmnd[MAX_CMNDS];
+ 	spinlock_t lock;
+ 	struct work_struct work;
++	struct work_struct scan_work;      /* for async scanning */
+ };
+ 
+ enum {
+@@ -114,6 +115,17 @@ out:
+ 	spin_unlock_irqrestore(&devinfo->lock, flags);
+ }
+ 
++static void uas_scan_work(struct work_struct *work)
++{
++	struct uas_dev_info *devinfo =
++		container_of(work, struct uas_dev_info, scan_work);
++	struct Scsi_Host *shost = usb_get_intfdata(devinfo->intf);
++
++	dev_dbg(&devinfo->intf->dev, "starting scan\n");
++	scsi_scan_host(shost);
++	dev_dbg(&devinfo->intf->dev, "scan complete\n");
++}
++
+ static void uas_add_work(struct uas_cmd_info *cmdinfo)
+ {
+ 	struct scsi_pointer *scp = (void *)cmdinfo;
+@@ -982,6 +994,7 @@ static int uas_probe(struct usb_interface *intf, const struct usb_device_id *id)
+ 	init_usb_anchor(&devinfo->data_urbs);
+ 	spin_lock_init(&devinfo->lock);
+ 	INIT_WORK(&devinfo->work, uas_do_work);
++	INIT_WORK(&devinfo->scan_work, uas_scan_work);
+ 
+ 	result = uas_configure_endpoints(devinfo);
+ 	if (result)
+@@ -998,7 +1011,9 @@ static int uas_probe(struct usb_interface *intf, const struct usb_device_id *id)
+ 	if (result)
+ 		goto free_streams;
+ 
+-	scsi_scan_host(shost);
++	/* Submit the delayed_work for SCSI-device scanning */
++	schedule_work(&devinfo->scan_work);
++
+ 	return result;
+ 
+ free_streams:
+@@ -1166,6 +1181,12 @@ static void uas_disconnect(struct usb_interface *intf)
+ 	usb_kill_anchored_urbs(&devinfo->data_urbs);
+ 	uas_zap_pending(devinfo, DID_NO_CONNECT);
+ 
++	/*
++	 * Prevent SCSI scanning (if it hasn't started yet)
++	 * or wait for the SCSI-scanning routine to stop.
++	 */
++	cancel_work_sync(&devinfo->scan_work);
++
+ 	scsi_remove_host(shost);
+ 	uas_free_streams(devinfo);
+ 	scsi_host_put(shost);
+diff --git a/drivers/xen/preempt.c b/drivers/xen/preempt.c
+index 8b9919c26095..456a164364a2 100644
+--- a/drivers/xen/preempt.c
++++ b/drivers/xen/preempt.c
+@@ -33,7 +33,9 @@ asmlinkage __visible void xen_maybe_preempt_hcall(void)
+ 		 * cpu.
+ 		 */
+ 		__this_cpu_write(xen_in_preemptible_hcall, false);
+-		_cond_resched();
++		local_irq_enable();
++		cond_resched();
++		local_irq_disable();
+ 		__this_cpu_write(xen_in_preemptible_hcall, true);
+ 	}
+ }
+diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
+index c1e47db439e2..3bb4bc2c9fd1 100644
+--- a/fs/btrfs/disk-io.c
++++ b/fs/btrfs/disk-io.c
+@@ -3200,6 +3200,7 @@ int __cold open_ctree(struct super_block *sb,
+ 	if (IS_ERR(fs_info->fs_root)) {
+ 		err = PTR_ERR(fs_info->fs_root);
+ 		btrfs_warn(fs_info, "failed to read fs tree: %d", err);
++		fs_info->fs_root = NULL;
+ 		goto fail_qgroup;
+ 	}
+ 
+@@ -4272,6 +4273,7 @@ static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
+ 		cond_resched();
+ 		spin_lock(&delayed_refs->lock);
+ 	}
++	btrfs_qgroup_destroy_extent_records(trans);
+ 
+ 	spin_unlock(&delayed_refs->lock);
+ 
+@@ -4497,7 +4499,6 @@ void btrfs_cleanup_one_transaction(struct btrfs_transaction *cur_trans,
+ 	wake_up(&fs_info->transaction_wait);
+ 
+ 	btrfs_destroy_delayed_inodes(fs_info);
+-	btrfs_assert_delayed_root_empty(fs_info);
+ 
+ 	btrfs_destroy_marked_extents(fs_info, &cur_trans->dirty_pages,
+ 				     EXTENT_DIRTY);
+diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
+index 274318e9114e..f50341ce5d44 100644
+--- a/fs/btrfs/extent-tree.c
++++ b/fs/btrfs/extent-tree.c
+@@ -4430,6 +4430,8 @@ int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
+ 
+ 	ret = alloc_reserved_file_extent(trans, 0, root_objectid, 0, owner,
+ 					 offset, ins, 1);
++	if (ret)
++		btrfs_pin_extent(fs_info, ins->objectid, ins->offset, 1);
+ 	btrfs_put_block_group(block_group);
+ 	return ret;
+ }
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index 537b4c563f09..e6901744a5be 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -4757,6 +4757,8 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
+ 	u64 bytes_deleted = 0;
+ 	bool be_nice = false;
+ 	bool should_throttle = false;
++	const u64 lock_start = ALIGN_DOWN(new_size, fs_info->sectorsize);
++	struct extent_state *cached_state = NULL;
+ 
+ 	BUG_ON(new_size > 0 && min_type != BTRFS_EXTENT_DATA_KEY);
+ 
+@@ -4773,6 +4775,10 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
+ 		return -ENOMEM;
+ 	path->reada = READA_BACK;
+ 
++	if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID)
++		lock_extent_bits(&BTRFS_I(inode)->io_tree, lock_start, (u64)-1,
++				 &cached_state);
++
+ 	/*
+ 	 * We want to drop from the next block forward in case this new size is
+ 	 * not block aligned since we will be keeping the last block of the
+@@ -4809,7 +4815,6 @@ search_again:
+ 		goto out;
+ 	}
+ 
+-	path->leave_spinning = 1;
+ 	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
+ 	if (ret < 0)
+ 		goto out;
+@@ -4961,7 +4966,6 @@ delete:
+ 		     root == fs_info->tree_root)) {
+ 			struct btrfs_ref ref = { 0 };
+ 
+-			btrfs_set_path_blocking(path);
+ 			bytes_deleted += extent_num_bytes;
+ 
+ 			btrfs_init_generic_ref(&ref, BTRFS_DROP_DELAYED_REF,
+@@ -5037,6 +5041,8 @@ out:
+ 		if (!ret && last_size > new_size)
+ 			last_size = new_size;
+ 		btrfs_ordered_update_i_size(inode, last_size, NULL);
++		unlock_extent_cached(&BTRFS_I(inode)->io_tree, lock_start,
++				     (u64)-1, &cached_state);
+ 	}
+ 
+ 	btrfs_free_path(path);
+@@ -10481,6 +10487,7 @@ static int __btrfs_prealloc_file_range(struct inode *inode, int mode,
+ 	struct btrfs_root *root = BTRFS_I(inode)->root;
+ 	struct btrfs_key ins;
+ 	u64 cur_offset = start;
++	u64 clear_offset = start;
+ 	u64 i_size;
+ 	u64 cur_bytes;
+ 	u64 last_alloc = (u64)-1;
+@@ -10515,6 +10522,15 @@ static int __btrfs_prealloc_file_range(struct inode *inode, int mode,
+ 				btrfs_end_transaction(trans);
+ 			break;
+ 		}
++
++		/*
++		 * We've reserved this space, and thus converted it from
++		 * ->bytes_may_use to ->bytes_reserved.  Any error that happens
++		 * from here on out we will only need to clear our reservation
++		 * for the remaining unreserved area, so advance our
++		 * clear_offset by our extent size.
++		 */
++		clear_offset += ins.offset;
+ 		btrfs_dec_block_group_reservations(fs_info, ins.objectid);
+ 
+ 		last_alloc = ins.offset;
+@@ -10594,9 +10610,9 @@ next:
+ 		if (own_trans)
+ 			btrfs_end_transaction(trans);
+ 	}
+-	if (cur_offset < end)
+-		btrfs_free_reserved_data_space(inode, NULL, cur_offset,
+-			end - cur_offset + 1);
++	if (clear_offset < end)
++		btrfs_free_reserved_data_space(inode, NULL, clear_offset,
++			end - clear_offset + 1);
+ 	return ret;
+ }
+ 
+diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
+index fb09bc2f8e4d..0596117202a2 100644
+--- a/fs/btrfs/ordered-data.c
++++ b/fs/btrfs/ordered-data.c
+@@ -686,10 +686,15 @@ int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len)
+ 		}
+ 		btrfs_start_ordered_extent(inode, ordered, 1);
+ 		end = ordered->file_offset;
++		/*
++		 * If the ordered extent had an error save the error but don't
++		 * exit without waiting first for all other ordered extents in
++		 * the range to complete.
++		 */
+ 		if (test_bit(BTRFS_ORDERED_IOERR, &ordered->flags))
+ 			ret = -EIO;
+ 		btrfs_put_ordered_extent(ordered);
+-		if (ret || end == 0 || end == start)
++		if (end == 0 || end == start)
+ 			break;
+ 		end--;
+ 	}
+diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
+index 39fc8c3d3a75..410b791f28a5 100644
+--- a/fs/btrfs/qgroup.c
++++ b/fs/btrfs/qgroup.c
+@@ -4016,3 +4016,16 @@ out:
+ 	}
+ 	return ret;
+ }
++
++void btrfs_qgroup_destroy_extent_records(struct btrfs_transaction *trans)
++{
++	struct btrfs_qgroup_extent_record *entry;
++	struct btrfs_qgroup_extent_record *next;
++	struct rb_root *root;
++
++	root = &trans->delayed_refs.dirty_extent_root;
++	rbtree_postorder_for_each_entry_safe(entry, next, root, node) {
++		ulist_free(entry->old_roots);
++		kfree(entry);
++	}
++}
+diff --git a/fs/btrfs/qgroup.h b/fs/btrfs/qgroup.h
+index 236f12224d52..1bc654459469 100644
+--- a/fs/btrfs/qgroup.h
++++ b/fs/btrfs/qgroup.h
+@@ -414,5 +414,6 @@ int btrfs_qgroup_add_swapped_blocks(struct btrfs_trans_handle *trans,
+ 		u64 last_snapshot);
+ int btrfs_qgroup_trace_subtree_after_cow(struct btrfs_trans_handle *trans,
+ 		struct btrfs_root *root, struct extent_buffer *eb);
++void btrfs_qgroup_destroy_extent_records(struct btrfs_transaction *trans);
+ 
+ #endif
+diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
+index 33dcc88b428a..beb6c69cd1e5 100644
+--- a/fs/btrfs/transaction.c
++++ b/fs/btrfs/transaction.c
+@@ -121,6 +121,8 @@ void btrfs_put_transaction(struct btrfs_transaction *transaction)
+ 		BUG_ON(!list_empty(&transaction->list));
+ 		WARN_ON(!RB_EMPTY_ROOT(
+ 				&transaction->delayed_refs.href_root.rb_root));
++		WARN_ON(!RB_EMPTY_ROOT(
++				&transaction->delayed_refs.dirty_extent_root));
+ 		if (transaction->delayed_refs.pending_csums)
+ 			btrfs_err(transaction->fs_info,
+ 				  "pending csums is %llu",
+diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c
+index f91db24bbf3b..a064b408d841 100644
+--- a/fs/ecryptfs/crypto.c
++++ b/fs/ecryptfs/crypto.c
+@@ -311,8 +311,10 @@ static int crypt_scatterlist(struct ecryptfs_crypt_stat *crypt_stat,
+ 	struct extent_crypt_result ecr;
+ 	int rc = 0;
+ 
+-	BUG_ON(!crypt_stat || !crypt_stat->tfm
+-	       || !(crypt_stat->flags & ECRYPTFS_STRUCT_INITIALIZED));
++	if (!crypt_stat || !crypt_stat->tfm
++	       || !(crypt_stat->flags & ECRYPTFS_STRUCT_INITIALIZED))
++		return -EINVAL;
++
+ 	if (unlikely(ecryptfs_verbosity > 0)) {
+ 		ecryptfs_printk(KERN_DEBUG, "Key size [%zd]; key:\n",
+ 				crypt_stat->key_size);
+diff --git a/fs/ecryptfs/keystore.c b/fs/ecryptfs/keystore.c
+index 216fbe6a4837..4dc09638de8f 100644
+--- a/fs/ecryptfs/keystore.c
++++ b/fs/ecryptfs/keystore.c
+@@ -1304,7 +1304,7 @@ parse_tag_1_packet(struct ecryptfs_crypt_stat *crypt_stat,
+ 		printk(KERN_WARNING "Tag 1 packet contains key larger "
+ 		       "than ECRYPTFS_MAX_ENCRYPTED_KEY_BYTES\n");
+ 		rc = -EINVAL;
+-		goto out;
++		goto out_free;
+ 	}
+ 	memcpy((*new_auth_tok)->session_key.encrypted_key,
+ 	       &data[(*packet_size)], (body_size - (ECRYPTFS_SIG_SIZE + 2)));
+diff --git a/fs/ecryptfs/messaging.c b/fs/ecryptfs/messaging.c
+index d668e60b85b5..c05ca39aa449 100644
+--- a/fs/ecryptfs/messaging.c
++++ b/fs/ecryptfs/messaging.c
+@@ -379,6 +379,7 @@ int __init ecryptfs_init_messaging(void)
+ 					* ecryptfs_message_buf_len),
+ 				       GFP_KERNEL);
+ 	if (!ecryptfs_msg_ctx_arr) {
++		kfree(ecryptfs_daemon_hash);
+ 		rc = -ENOMEM;
+ 		goto out;
+ 	}
+diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
+index 0b202e00d93f..5aba67a504cf 100644
+--- a/fs/ext4/balloc.c
++++ b/fs/ext4/balloc.c
+@@ -270,6 +270,7 @@ struct ext4_group_desc * ext4_get_group_desc(struct super_block *sb,
+ 	ext4_group_t ngroups = ext4_get_groups_count(sb);
+ 	struct ext4_group_desc *desc;
+ 	struct ext4_sb_info *sbi = EXT4_SB(sb);
++	struct buffer_head *bh_p;
+ 
+ 	if (block_group >= ngroups) {
+ 		ext4_error(sb, "block_group >= groups_count - block_group = %u,"
+@@ -280,7 +281,14 @@ struct ext4_group_desc * ext4_get_group_desc(struct super_block *sb,
+ 
+ 	group_desc = block_group >> EXT4_DESC_PER_BLOCK_BITS(sb);
+ 	offset = block_group & (EXT4_DESC_PER_BLOCK(sb) - 1);
+-	if (!sbi->s_group_desc[group_desc]) {
++	bh_p = sbi_array_rcu_deref(sbi, s_group_desc, group_desc);
++	/*
++	 * sbi_array_rcu_deref returns with rcu unlocked, this is ok since
++	 * the pointer being dereferenced won't be dereferenced again. By
++	 * looking at the usage in add_new_gdb() the value isn't modified,
++	 * just the pointer, and so it remains valid.
++	 */
++	if (!bh_p) {
+ 		ext4_error(sb, "Group descriptor not loaded - "
+ 			   "block_group = %u, group_desc = %u, desc = %u",
+ 			   block_group, group_desc, offset);
+@@ -288,10 +296,10 @@ struct ext4_group_desc * ext4_get_group_desc(struct super_block *sb,
+ 	}
+ 
+ 	desc = (struct ext4_group_desc *)(
+-		(__u8 *)sbi->s_group_desc[group_desc]->b_data +
++		(__u8 *)bh_p->b_data +
+ 		offset * EXT4_DESC_SIZE(sb));
+ 	if (bh)
+-		*bh = sbi->s_group_desc[group_desc];
++		*bh = bh_p;
+ 	return desc;
+ }
+ 
+diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
+index 1fd6c1e2ce2a..7a14e553d58f 100644
+--- a/fs/ext4/ext4.h
++++ b/fs/ext4/ext4.h
+@@ -1401,7 +1401,7 @@ struct ext4_sb_info {
+ 	loff_t s_bitmap_maxbytes;	/* max bytes for bitmap files */
+ 	struct buffer_head * s_sbh;	/* Buffer containing the super block */
+ 	struct ext4_super_block *s_es;	/* Pointer to the super block in the buffer */
+-	struct buffer_head **s_group_desc;
++	struct buffer_head * __rcu *s_group_desc;
+ 	unsigned int s_mount_opt;
+ 	unsigned int s_mount_opt2;
+ 	unsigned int s_mount_flags;
+@@ -1463,7 +1463,7 @@ struct ext4_sb_info {
+ #endif
+ 
+ 	/* for buddy allocator */
+-	struct ext4_group_info ***s_group_info;
++	struct ext4_group_info ** __rcu *s_group_info;
+ 	struct inode *s_buddy_cache;
+ 	spinlock_t s_md_lock;
+ 	unsigned short *s_mb_offsets;
+@@ -1513,7 +1513,7 @@ struct ext4_sb_info {
+ 	unsigned int s_extent_max_zeroout_kb;
+ 
+ 	unsigned int s_log_groups_per_flex;
+-	struct flex_groups *s_flex_groups;
++	struct flex_groups * __rcu *s_flex_groups;
+ 	ext4_group_t s_flex_groups_allocated;
+ 
+ 	/* workqueue for reserved extent conversions (buffered io) */
+@@ -1553,8 +1553,11 @@ struct ext4_sb_info {
+ 	struct ratelimit_state s_warning_ratelimit_state;
+ 	struct ratelimit_state s_msg_ratelimit_state;
+ 
+-	/* Barrier between changing inodes' journal flags and writepages ops. */
+-	struct percpu_rw_semaphore s_journal_flag_rwsem;
++	/*
++	 * Barrier between writepages ops and changing any inode's JOURNAL_DATA
++	 * or EXTENTS flag.
++	 */
++	struct percpu_rw_semaphore s_writepages_rwsem;
+ 	struct dax_device *s_daxdev;
+ };
+ 
+@@ -1574,6 +1577,23 @@ static inline int ext4_valid_inum(struct super_block *sb, unsigned long ino)
+ 		 ino <= le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count));
+ }
+ 
++/*
++ * Returns: sbi->field[index]
++ * Used to access an array element from the following sbi fields which require
++ * rcu protection to avoid dereferencing an invalid pointer due to reassignment
++ * - s_group_desc
++ * - s_group_info
++ * - s_flex_group
++ */
++#define sbi_array_rcu_deref(sbi, field, index)				   \
++({									   \
++	typeof(*((sbi)->field)) _v;					   \
++	rcu_read_lock();						   \
++	_v = ((typeof(_v)*)rcu_dereference((sbi)->field))[index];	   \
++	rcu_read_unlock();						   \
++	_v;								   \
++})
++
+ /*
+  * Inode dynamic state flags
+  */
+@@ -2669,6 +2689,7 @@ extern int ext4_generic_delete_entry(handle_t *handle,
+ extern bool ext4_empty_dir(struct inode *inode);
+ 
+ /* resize.c */
++extern void ext4_kvfree_array_rcu(void *to_free);
+ extern int ext4_group_add(struct super_block *sb,
+ 				struct ext4_new_group_data *input);
+ extern int ext4_group_extend(struct super_block *sb,
+@@ -2916,13 +2937,13 @@ static inline
+ struct ext4_group_info *ext4_get_group_info(struct super_block *sb,
+ 					    ext4_group_t group)
+ {
+-	 struct ext4_group_info ***grp_info;
++	 struct ext4_group_info **grp_info;
+ 	 long indexv, indexh;
+ 	 BUG_ON(group >= EXT4_SB(sb)->s_groups_count);
+-	 grp_info = EXT4_SB(sb)->s_group_info;
+ 	 indexv = group >> (EXT4_DESC_PER_BLOCK_BITS(sb));
+ 	 indexh = group & ((EXT4_DESC_PER_BLOCK(sb)) - 1);
+-	 return grp_info[indexv][indexh];
++	 grp_info = sbi_array_rcu_deref(EXT4_SB(sb), s_group_info, indexv);
++	 return grp_info[indexh];
+ }
+ 
+ /*
+@@ -2972,7 +2993,7 @@ static inline void ext4_update_i_disksize(struct inode *inode, loff_t newsize)
+ 		     !inode_is_locked(inode));
+ 	down_write(&EXT4_I(inode)->i_data_sem);
+ 	if (newsize > EXT4_I(inode)->i_disksize)
+-		EXT4_I(inode)->i_disksize = newsize;
++		WRITE_ONCE(EXT4_I(inode)->i_disksize, newsize);
+ 	up_write(&EXT4_I(inode)->i_data_sem);
+ }
+ 
+diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
+index 8ca4a23129aa..7db0c8814f2e 100644
+--- a/fs/ext4/ialloc.c
++++ b/fs/ext4/ialloc.c
+@@ -325,11 +325,13 @@ void ext4_free_inode(handle_t *handle, struct inode *inode)
+ 
+ 	percpu_counter_inc(&sbi->s_freeinodes_counter);
+ 	if (sbi->s_log_groups_per_flex) {
+-		ext4_group_t f = ext4_flex_group(sbi, block_group);
++		struct flex_groups *fg;
+ 
+-		atomic_inc(&sbi->s_flex_groups[f].free_inodes);
++		fg = sbi_array_rcu_deref(sbi, s_flex_groups,
++					 ext4_flex_group(sbi, block_group));
++		atomic_inc(&fg->free_inodes);
+ 		if (is_directory)
+-			atomic_dec(&sbi->s_flex_groups[f].used_dirs);
++			atomic_dec(&fg->used_dirs);
+ 	}
+ 	BUFFER_TRACE(bh2, "call ext4_handle_dirty_metadata");
+ 	fatal = ext4_handle_dirty_metadata(handle, NULL, bh2);
+@@ -365,12 +367,13 @@ static void get_orlov_stats(struct super_block *sb, ext4_group_t g,
+ 			    int flex_size, struct orlov_stats *stats)
+ {
+ 	struct ext4_group_desc *desc;
+-	struct flex_groups *flex_group = EXT4_SB(sb)->s_flex_groups;
+ 
+ 	if (flex_size > 1) {
+-		stats->free_inodes = atomic_read(&flex_group[g].free_inodes);
+-		stats->free_clusters = atomic64_read(&flex_group[g].free_clusters);
+-		stats->used_dirs = atomic_read(&flex_group[g].used_dirs);
++		struct flex_groups *fg = sbi_array_rcu_deref(EXT4_SB(sb),
++							     s_flex_groups, g);
++		stats->free_inodes = atomic_read(&fg->free_inodes);
++		stats->free_clusters = atomic64_read(&fg->free_clusters);
++		stats->used_dirs = atomic_read(&fg->used_dirs);
+ 		return;
+ 	}
+ 
+@@ -1051,7 +1054,8 @@ got:
+ 		if (sbi->s_log_groups_per_flex) {
+ 			ext4_group_t f = ext4_flex_group(sbi, group);
+ 
+-			atomic_inc(&sbi->s_flex_groups[f].used_dirs);
++			atomic_inc(&sbi_array_rcu_deref(sbi, s_flex_groups,
++							f)->used_dirs);
+ 		}
+ 	}
+ 	if (ext4_has_group_desc_csum(sb)) {
+@@ -1074,7 +1078,8 @@ got:
+ 
+ 	if (sbi->s_log_groups_per_flex) {
+ 		flex_group = ext4_flex_group(sbi, group);
+-		atomic_dec(&sbi->s_flex_groups[flex_group].free_inodes);
++		atomic_dec(&sbi_array_rcu_deref(sbi, s_flex_groups,
++						flex_group)->free_inodes);
+ 	}
+ 
+ 	inode->i_ino = ino + group * EXT4_INODES_PER_GROUP(sb);
+diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
+index 25191201ccdc..74a941e920cf 100644
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -2466,7 +2466,7 @@ update_disksize:
+ 	 * truncate are avoided by checking i_size under i_data_sem.
+ 	 */
+ 	disksize = ((loff_t)mpd->first_page) << PAGE_SHIFT;
+-	if (disksize > EXT4_I(inode)->i_disksize) {
++	if (disksize > READ_ONCE(EXT4_I(inode)->i_disksize)) {
+ 		int err2;
+ 		loff_t i_size;
+ 
+@@ -2627,7 +2627,7 @@ static int ext4_writepages(struct address_space *mapping,
+ 	if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
+ 		return -EIO;
+ 
+-	percpu_down_read(&sbi->s_journal_flag_rwsem);
++	percpu_down_read(&sbi->s_writepages_rwsem);
+ 	trace_ext4_writepages(inode, wbc);
+ 
+ 	/*
+@@ -2848,7 +2848,7 @@ unplug:
+ out_writepages:
+ 	trace_ext4_writepages_result(inode, wbc, ret,
+ 				     nr_to_write - wbc->nr_to_write);
+-	percpu_up_read(&sbi->s_journal_flag_rwsem);
++	percpu_up_read(&sbi->s_writepages_rwsem);
+ 	return ret;
+ }
+ 
+@@ -2863,13 +2863,13 @@ static int ext4_dax_writepages(struct address_space *mapping,
+ 	if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
+ 		return -EIO;
+ 
+-	percpu_down_read(&sbi->s_journal_flag_rwsem);
++	percpu_down_read(&sbi->s_writepages_rwsem);
+ 	trace_ext4_writepages(inode, wbc);
+ 
+ 	ret = dax_writeback_mapping_range(mapping, inode->i_sb->s_bdev, wbc);
+ 	trace_ext4_writepages_result(inode, wbc, ret,
+ 				     nr_to_write - wbc->nr_to_write);
+-	percpu_up_read(&sbi->s_journal_flag_rwsem);
++	percpu_up_read(&sbi->s_writepages_rwsem);
+ 	return ret;
+ }
+ 
+@@ -5830,7 +5830,7 @@ int ext4_change_inode_journal_flag(struct inode *inode, int val)
+ 		}
+ 	}
+ 
+-	percpu_down_write(&sbi->s_journal_flag_rwsem);
++	percpu_down_write(&sbi->s_writepages_rwsem);
+ 	jbd2_journal_lock_updates(journal);
+ 
+ 	/*
+@@ -5847,7 +5847,7 @@ int ext4_change_inode_journal_flag(struct inode *inode, int val)
+ 		err = jbd2_journal_flush(journal);
+ 		if (err < 0) {
+ 			jbd2_journal_unlock_updates(journal);
+-			percpu_up_write(&sbi->s_journal_flag_rwsem);
++			percpu_up_write(&sbi->s_writepages_rwsem);
+ 			return err;
+ 		}
+ 		ext4_clear_inode_flag(inode, EXT4_INODE_JOURNAL_DATA);
+@@ -5855,7 +5855,7 @@ int ext4_change_inode_journal_flag(struct inode *inode, int val)
+ 	ext4_set_aops(inode);
+ 
+ 	jbd2_journal_unlock_updates(journal);
+-	percpu_up_write(&sbi->s_journal_flag_rwsem);
++	percpu_up_write(&sbi->s_writepages_rwsem);
+ 
+ 	if (val)
+ 		up_write(&EXT4_I(inode)->i_mmap_sem);
+diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
+index a3e2767bdf2f..c76ffc259d19 100644
+--- a/fs/ext4/mballoc.c
++++ b/fs/ext4/mballoc.c
+@@ -2356,7 +2356,7 @@ int ext4_mb_alloc_groupinfo(struct super_block *sb, ext4_group_t ngroups)
+ {
+ 	struct ext4_sb_info *sbi = EXT4_SB(sb);
+ 	unsigned size;
+-	struct ext4_group_info ***new_groupinfo;
++	struct ext4_group_info ***old_groupinfo, ***new_groupinfo;
+ 
+ 	size = (ngroups + EXT4_DESC_PER_BLOCK(sb) - 1) >>
+ 		EXT4_DESC_PER_BLOCK_BITS(sb);
+@@ -2369,13 +2369,16 @@ int ext4_mb_alloc_groupinfo(struct super_block *sb, ext4_group_t ngroups)
+ 		ext4_msg(sb, KERN_ERR, "can't allocate buddy meta group");
+ 		return -ENOMEM;
+ 	}
+-	if (sbi->s_group_info) {
+-		memcpy(new_groupinfo, sbi->s_group_info,
++	rcu_read_lock();
++	old_groupinfo = rcu_dereference(sbi->s_group_info);
++	if (old_groupinfo)
++		memcpy(new_groupinfo, old_groupinfo,
+ 		       sbi->s_group_info_size * sizeof(*sbi->s_group_info));
+-		kvfree(sbi->s_group_info);
+-	}
+-	sbi->s_group_info = new_groupinfo;
++	rcu_read_unlock();
++	rcu_assign_pointer(sbi->s_group_info, new_groupinfo);
+ 	sbi->s_group_info_size = size / sizeof(*sbi->s_group_info);
++	if (old_groupinfo)
++		ext4_kvfree_array_rcu(old_groupinfo);
+ 	ext4_debug("allocated s_groupinfo array for %d meta_bg's\n", 
+ 		   sbi->s_group_info_size);
+ 	return 0;
+@@ -2387,6 +2390,7 @@ int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group,
+ {
+ 	int i;
+ 	int metalen = 0;
++	int idx = group >> EXT4_DESC_PER_BLOCK_BITS(sb);
+ 	struct ext4_sb_info *sbi = EXT4_SB(sb);
+ 	struct ext4_group_info **meta_group_info;
+ 	struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits);
+@@ -2405,12 +2409,12 @@ int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group,
+ 				 "for a buddy group");
+ 			goto exit_meta_group_info;
+ 		}
+-		sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)] =
+-			meta_group_info;
++		rcu_read_lock();
++		rcu_dereference(sbi->s_group_info)[idx] = meta_group_info;
++		rcu_read_unlock();
+ 	}
+ 
+-	meta_group_info =
+-		sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)];
++	meta_group_info = sbi_array_rcu_deref(sbi, s_group_info, idx);
+ 	i = group & (EXT4_DESC_PER_BLOCK(sb) - 1);
+ 
+ 	meta_group_info[i] = kmem_cache_zalloc(cachep, GFP_NOFS);
+@@ -2458,8 +2462,13 @@ int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group,
+ exit_group_info:
+ 	/* If a meta_group_info table has been allocated, release it now */
+ 	if (group % EXT4_DESC_PER_BLOCK(sb) == 0) {
+-		kfree(sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)]);
+-		sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)] = NULL;
++		struct ext4_group_info ***group_info;
++
++		rcu_read_lock();
++		group_info = rcu_dereference(sbi->s_group_info);
++		kfree(group_info[idx]);
++		group_info[idx] = NULL;
++		rcu_read_unlock();
+ 	}
+ exit_meta_group_info:
+ 	return -ENOMEM;
+@@ -2472,6 +2481,7 @@ static int ext4_mb_init_backend(struct super_block *sb)
+ 	struct ext4_sb_info *sbi = EXT4_SB(sb);
+ 	int err;
+ 	struct ext4_group_desc *desc;
++	struct ext4_group_info ***group_info;
+ 	struct kmem_cache *cachep;
+ 
+ 	err = ext4_mb_alloc_groupinfo(sb, ngroups);
+@@ -2507,11 +2517,16 @@ err_freebuddy:
+ 	while (i-- > 0)
+ 		kmem_cache_free(cachep, ext4_get_group_info(sb, i));
+ 	i = sbi->s_group_info_size;
++	rcu_read_lock();
++	group_info = rcu_dereference(sbi->s_group_info);
+ 	while (i-- > 0)
+-		kfree(sbi->s_group_info[i]);
++		kfree(group_info[i]);
++	rcu_read_unlock();
+ 	iput(sbi->s_buddy_cache);
+ err_freesgi:
+-	kvfree(sbi->s_group_info);
++	rcu_read_lock();
++	kvfree(rcu_dereference(sbi->s_group_info));
++	rcu_read_unlock();
+ 	return -ENOMEM;
+ }
+ 
+@@ -2700,7 +2715,7 @@ int ext4_mb_release(struct super_block *sb)
+ 	ext4_group_t ngroups = ext4_get_groups_count(sb);
+ 	ext4_group_t i;
+ 	int num_meta_group_infos;
+-	struct ext4_group_info *grinfo;
++	struct ext4_group_info *grinfo, ***group_info;
+ 	struct ext4_sb_info *sbi = EXT4_SB(sb);
+ 	struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits);
+ 
+@@ -2719,9 +2734,12 @@ int ext4_mb_release(struct super_block *sb)
+ 		num_meta_group_infos = (ngroups +
+ 				EXT4_DESC_PER_BLOCK(sb) - 1) >>
+ 			EXT4_DESC_PER_BLOCK_BITS(sb);
++		rcu_read_lock();
++		group_info = rcu_dereference(sbi->s_group_info);
+ 		for (i = 0; i < num_meta_group_infos; i++)
+-			kfree(sbi->s_group_info[i]);
+-		kvfree(sbi->s_group_info);
++			kfree(group_info[i]);
++		kvfree(group_info);
++		rcu_read_unlock();
+ 	}
+ 	kfree(sbi->s_mb_offsets);
+ 	kfree(sbi->s_mb_maxs);
+@@ -3020,7 +3038,8 @@ ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
+ 		ext4_group_t flex_group = ext4_flex_group(sbi,
+ 							  ac->ac_b_ex.fe_group);
+ 		atomic64_sub(ac->ac_b_ex.fe_len,
+-			     &sbi->s_flex_groups[flex_group].free_clusters);
++			     &sbi_array_rcu_deref(sbi, s_flex_groups,
++						  flex_group)->free_clusters);
+ 	}
+ 
+ 	err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
+@@ -4914,7 +4933,8 @@ do_more:
+ 	if (sbi->s_log_groups_per_flex) {
+ 		ext4_group_t flex_group = ext4_flex_group(sbi, block_group);
+ 		atomic64_add(count_clusters,
+-			     &sbi->s_flex_groups[flex_group].free_clusters);
++			     &sbi_array_rcu_deref(sbi, s_flex_groups,
++						  flex_group)->free_clusters);
+ 	}
+ 
+ 	/*
+@@ -5071,7 +5091,8 @@ int ext4_group_add_blocks(handle_t *handle, struct super_block *sb,
+ 	if (sbi->s_log_groups_per_flex) {
+ 		ext4_group_t flex_group = ext4_flex_group(sbi, block_group);
+ 		atomic64_add(clusters_freed,
+-			     &sbi->s_flex_groups[flex_group].free_clusters);
++			     &sbi_array_rcu_deref(sbi, s_flex_groups,
++						  flex_group)->free_clusters);
+ 	}
+ 
+ 	ext4_mb_unload_buddy(&e4b);
+diff --git a/fs/ext4/migrate.c b/fs/ext4/migrate.c
+index 89725fa42573..fb6520f37135 100644
+--- a/fs/ext4/migrate.c
++++ b/fs/ext4/migrate.c
+@@ -407,6 +407,7 @@ static int free_ext_block(handle_t *handle, struct inode *inode)
+ 
+ int ext4_ext_migrate(struct inode *inode)
+ {
++	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
+ 	handle_t *handle;
+ 	int retval = 0, i;
+ 	__le32 *i_data;
+@@ -431,6 +432,8 @@ int ext4_ext_migrate(struct inode *inode)
+ 		 */
+ 		return retval;
+ 
++	percpu_down_write(&sbi->s_writepages_rwsem);
++
+ 	/*
+ 	 * Worst case we can touch the allocation bitmaps, a bgd
+ 	 * block, and a block to link in the orphan list.  We do need
+@@ -441,7 +444,7 @@ int ext4_ext_migrate(struct inode *inode)
+ 
+ 	if (IS_ERR(handle)) {
+ 		retval = PTR_ERR(handle);
+-		return retval;
++		goto out_unlock;
+ 	}
+ 	goal = (((inode->i_ino - 1) / EXT4_INODES_PER_GROUP(inode->i_sb)) *
+ 		EXT4_INODES_PER_GROUP(inode->i_sb)) + 1;
+@@ -452,7 +455,7 @@ int ext4_ext_migrate(struct inode *inode)
+ 	if (IS_ERR(tmp_inode)) {
+ 		retval = PTR_ERR(tmp_inode);
+ 		ext4_journal_stop(handle);
+-		return retval;
++		goto out_unlock;
+ 	}
+ 	i_size_write(tmp_inode, i_size_read(inode));
+ 	/*
+@@ -494,7 +497,7 @@ int ext4_ext_migrate(struct inode *inode)
+ 		 */
+ 		ext4_orphan_del(NULL, tmp_inode);
+ 		retval = PTR_ERR(handle);
+-		goto out;
++		goto out_tmp_inode;
+ 	}
+ 
+ 	ei = EXT4_I(inode);
+@@ -576,10 +579,11 @@ err_out:
+ 	ext4_ext_tree_init(handle, tmp_inode);
+ out_stop:
+ 	ext4_journal_stop(handle);
+-out:
++out_tmp_inode:
+ 	unlock_new_inode(tmp_inode);
+ 	iput(tmp_inode);
+-
++out_unlock:
++	percpu_up_write(&sbi->s_writepages_rwsem);
+ 	return retval;
+ }
+ 
+@@ -589,7 +593,8 @@ out:
+ int ext4_ind_migrate(struct inode *inode)
+ {
+ 	struct ext4_extent_header	*eh;
+-	struct ext4_super_block		*es = EXT4_SB(inode->i_sb)->s_es;
++	struct ext4_sb_info		*sbi = EXT4_SB(inode->i_sb);
++	struct ext4_super_block		*es = sbi->s_es;
+ 	struct ext4_inode_info		*ei = EXT4_I(inode);
+ 	struct ext4_extent		*ex;
+ 	unsigned int			i, len;
+@@ -613,9 +618,13 @@ int ext4_ind_migrate(struct inode *inode)
+ 	if (test_opt(inode->i_sb, DELALLOC))
+ 		ext4_alloc_da_blocks(inode);
+ 
++	percpu_down_write(&sbi->s_writepages_rwsem);
++
+ 	handle = ext4_journal_start(inode, EXT4_HT_MIGRATE, 1);
+-	if (IS_ERR(handle))
+-		return PTR_ERR(handle);
++	if (IS_ERR(handle)) {
++		ret = PTR_ERR(handle);
++		goto out_unlock;
++	}
+ 
+ 	down_write(&EXT4_I(inode)->i_data_sem);
+ 	ret = ext4_ext_check_inode(inode);
+@@ -650,5 +659,7 @@ int ext4_ind_migrate(struct inode *inode)
+ errout:
+ 	ext4_journal_stop(handle);
+ 	up_write(&EXT4_I(inode)->i_data_sem);
++out_unlock:
++	percpu_up_write(&sbi->s_writepages_rwsem);
+ 	return ret;
+ }
+diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
+index deb9f7a02976..ee615a93af6e 100644
+--- a/fs/ext4/namei.c
++++ b/fs/ext4/namei.c
+@@ -1507,6 +1507,7 @@ restart:
+ 		/*
+ 		 * We deal with the read-ahead logic here.
+ 		 */
++		cond_resched();
+ 		if (ra_ptr >= ra_max) {
+ 			/* Refill the readahead buffer */
+ 			ra_ptr = 0;
+diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
+index a8c0f2b5b6e1..f178af1dffe0 100644
+--- a/fs/ext4/resize.c
++++ b/fs/ext4/resize.c
+@@ -17,6 +17,33 @@
+ 
+ #include "ext4_jbd2.h"
+ 
++struct ext4_rcu_ptr {
++	struct rcu_head rcu;
++	void *ptr;
++};
++
++static void ext4_rcu_ptr_callback(struct rcu_head *head)
++{
++	struct ext4_rcu_ptr *ptr;
++
++	ptr = container_of(head, struct ext4_rcu_ptr, rcu);
++	kvfree(ptr->ptr);
++	kfree(ptr);
++}
++
++void ext4_kvfree_array_rcu(void *to_free)
++{
++	struct ext4_rcu_ptr *ptr = kzalloc(sizeof(*ptr), GFP_KERNEL);
++
++	if (ptr) {
++		ptr->ptr = to_free;
++		call_rcu(&ptr->rcu, ext4_rcu_ptr_callback);
++		return;
++	}
++	synchronize_rcu();
++	kvfree(to_free);
++}
++
+ int ext4_resize_begin(struct super_block *sb)
+ {
+ 	struct ext4_sb_info *sbi = EXT4_SB(sb);
+@@ -542,8 +569,8 @@ static int setup_new_flex_group_blocks(struct super_block *sb,
+ 				brelse(gdb);
+ 				goto out;
+ 			}
+-			memcpy(gdb->b_data, sbi->s_group_desc[j]->b_data,
+-			       gdb->b_size);
++			memcpy(gdb->b_data, sbi_array_rcu_deref(sbi,
++				s_group_desc, j)->b_data, gdb->b_size);
+ 			set_buffer_uptodate(gdb);
+ 
+ 			err = ext4_handle_dirty_metadata(handle, NULL, gdb);
+@@ -861,13 +888,15 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
+ 	}
+ 	brelse(dind);
+ 
+-	o_group_desc = EXT4_SB(sb)->s_group_desc;
++	rcu_read_lock();
++	o_group_desc = rcu_dereference(EXT4_SB(sb)->s_group_desc);
+ 	memcpy(n_group_desc, o_group_desc,
+ 	       EXT4_SB(sb)->s_gdb_count * sizeof(struct buffer_head *));
++	rcu_read_unlock();
+ 	n_group_desc[gdb_num] = gdb_bh;
+-	EXT4_SB(sb)->s_group_desc = n_group_desc;
++	rcu_assign_pointer(EXT4_SB(sb)->s_group_desc, n_group_desc);
+ 	EXT4_SB(sb)->s_gdb_count++;
+-	kvfree(o_group_desc);
++	ext4_kvfree_array_rcu(o_group_desc);
+ 
+ 	le16_add_cpu(&es->s_reserved_gdt_blocks, -1);
+ 	err = ext4_handle_dirty_super(handle, sb);
+@@ -911,9 +940,11 @@ static int add_new_gdb_meta_bg(struct super_block *sb,
+ 		return err;
+ 	}
+ 
+-	o_group_desc = EXT4_SB(sb)->s_group_desc;
++	rcu_read_lock();
++	o_group_desc = rcu_dereference(EXT4_SB(sb)->s_group_desc);
+ 	memcpy(n_group_desc, o_group_desc,
+ 	       EXT4_SB(sb)->s_gdb_count * sizeof(struct buffer_head *));
++	rcu_read_unlock();
+ 	n_group_desc[gdb_num] = gdb_bh;
+ 
+ 	BUFFER_TRACE(gdb_bh, "get_write_access");
+@@ -924,9 +955,9 @@ static int add_new_gdb_meta_bg(struct super_block *sb,
+ 		return err;
+ 	}
+ 
+-	EXT4_SB(sb)->s_group_desc = n_group_desc;
++	rcu_assign_pointer(EXT4_SB(sb)->s_group_desc, n_group_desc);
+ 	EXT4_SB(sb)->s_gdb_count++;
+-	kvfree(o_group_desc);
++	ext4_kvfree_array_rcu(o_group_desc);
+ 	return err;
+ }
+ 
+@@ -1190,7 +1221,8 @@ static int ext4_add_new_descs(handle_t *handle, struct super_block *sb,
+ 		 * use non-sparse filesystems anymore.  This is already checked above.
+ 		 */
+ 		if (gdb_off) {
+-			gdb_bh = sbi->s_group_desc[gdb_num];
++			gdb_bh = sbi_array_rcu_deref(sbi, s_group_desc,
++						     gdb_num);
+ 			BUFFER_TRACE(gdb_bh, "get_write_access");
+ 			err = ext4_journal_get_write_access(handle, gdb_bh);
+ 
+@@ -1272,7 +1304,7 @@ static int ext4_setup_new_descs(handle_t *handle, struct super_block *sb,
+ 		/*
+ 		 * get_write_access() has been called on gdb_bh by ext4_add_new_desc().
+ 		 */
+-		gdb_bh = sbi->s_group_desc[gdb_num];
++		gdb_bh = sbi_array_rcu_deref(sbi, s_group_desc, gdb_num);
+ 		/* Update group descriptor block for new group */
+ 		gdp = (struct ext4_group_desc *)(gdb_bh->b_data +
+ 						 gdb_off * EXT4_DESC_SIZE(sb));
+@@ -1400,11 +1432,14 @@ static void ext4_update_super(struct super_block *sb,
+ 		   percpu_counter_read(&sbi->s_freeclusters_counter));
+ 	if (ext4_has_feature_flex_bg(sb) && sbi->s_log_groups_per_flex) {
+ 		ext4_group_t flex_group;
++		struct flex_groups *fg;
++
+ 		flex_group = ext4_flex_group(sbi, group_data[0].group);
++		fg = sbi_array_rcu_deref(sbi, s_flex_groups, flex_group);
+ 		atomic64_add(EXT4_NUM_B2C(sbi, free_blocks),
+-			     &sbi->s_flex_groups[flex_group].free_clusters);
++			     &fg->free_clusters);
+ 		atomic_add(EXT4_INODES_PER_GROUP(sb) * flex_gd->count,
+-			   &sbi->s_flex_groups[flex_group].free_inodes);
++			   &fg->free_inodes);
+ 	}
+ 
+ 	/*
+@@ -1499,7 +1534,8 @@ exit_journal:
+ 		for (; gdb_num <= gdb_num_end; gdb_num++) {
+ 			struct buffer_head *gdb_bh;
+ 
+-			gdb_bh = sbi->s_group_desc[gdb_num];
++			gdb_bh = sbi_array_rcu_deref(sbi, s_group_desc,
++						     gdb_num);
+ 			if (old_gdb == gdb_bh->b_blocknr)
+ 				continue;
+ 			update_backups(sb, gdb_bh->b_blocknr, gdb_bh->b_data,
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index c51d7ef2e467..12806be10a18 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -970,6 +970,8 @@ static void ext4_put_super(struct super_block *sb)
+ {
+ 	struct ext4_sb_info *sbi = EXT4_SB(sb);
+ 	struct ext4_super_block *es = sbi->s_es;
++	struct buffer_head **group_desc;
++	struct flex_groups **flex_groups;
+ 	int aborted = 0;
+ 	int i, err;
+ 
+@@ -1000,15 +1002,23 @@ static void ext4_put_super(struct super_block *sb)
+ 	if (!sb_rdonly(sb))
+ 		ext4_commit_super(sb, 1);
+ 
++	rcu_read_lock();
++	group_desc = rcu_dereference(sbi->s_group_desc);
+ 	for (i = 0; i < sbi->s_gdb_count; i++)
+-		brelse(sbi->s_group_desc[i]);
+-	kvfree(sbi->s_group_desc);
+-	kvfree(sbi->s_flex_groups);
++		brelse(group_desc[i]);
++	kvfree(group_desc);
++	flex_groups = rcu_dereference(sbi->s_flex_groups);
++	if (flex_groups) {
++		for (i = 0; i < sbi->s_flex_groups_allocated; i++)
++			kvfree(flex_groups[i]);
++		kvfree(flex_groups);
++	}
++	rcu_read_unlock();
+ 	percpu_counter_destroy(&sbi->s_freeclusters_counter);
+ 	percpu_counter_destroy(&sbi->s_freeinodes_counter);
+ 	percpu_counter_destroy(&sbi->s_dirs_counter);
+ 	percpu_counter_destroy(&sbi->s_dirtyclusters_counter);
+-	percpu_free_rwsem(&sbi->s_journal_flag_rwsem);
++	percpu_free_rwsem(&sbi->s_writepages_rwsem);
+ #ifdef CONFIG_QUOTA
+ 	for (i = 0; i < EXT4_MAXQUOTAS; i++)
+ 		kfree(get_qf_name(sb, sbi, i));
+@@ -2335,8 +2345,8 @@ done:
+ int ext4_alloc_flex_bg_array(struct super_block *sb, ext4_group_t ngroup)
+ {
+ 	struct ext4_sb_info *sbi = EXT4_SB(sb);
+-	struct flex_groups *new_groups;
+-	int size;
++	struct flex_groups **old_groups, **new_groups;
++	int size, i;
+ 
+ 	if (!sbi->s_log_groups_per_flex)
+ 		return 0;
+@@ -2345,22 +2355,37 @@ int ext4_alloc_flex_bg_array(struct super_block *sb, ext4_group_t ngroup)
+ 	if (size <= sbi->s_flex_groups_allocated)
+ 		return 0;
+ 
+-	size = roundup_pow_of_two(size * sizeof(struct flex_groups));
+-	new_groups = kvzalloc(size, GFP_KERNEL);
++	new_groups = kvzalloc(roundup_pow_of_two(size *
++			      sizeof(*sbi->s_flex_groups)), GFP_KERNEL);
+ 	if (!new_groups) {
+-		ext4_msg(sb, KERN_ERR, "not enough memory for %d flex groups",
+-			 size / (int) sizeof(struct flex_groups));
++		ext4_msg(sb, KERN_ERR,
++			 "not enough memory for %d flex group pointers", size);
+ 		return -ENOMEM;
+ 	}
+-
+-	if (sbi->s_flex_groups) {
+-		memcpy(new_groups, sbi->s_flex_groups,
+-		       (sbi->s_flex_groups_allocated *
+-			sizeof(struct flex_groups)));
+-		kvfree(sbi->s_flex_groups);
++	for (i = sbi->s_flex_groups_allocated; i < size; i++) {
++		new_groups[i] = kvzalloc(roundup_pow_of_two(
++					 sizeof(struct flex_groups)),
++					 GFP_KERNEL);
++		if (!new_groups[i]) {
++			for (i--; i >= sbi->s_flex_groups_allocated; i--)
++				kvfree(new_groups[i]);
++			kvfree(new_groups);
++			ext4_msg(sb, KERN_ERR,
++				 "not enough memory for %d flex groups", size);
++			return -ENOMEM;
++		}
+ 	}
+-	sbi->s_flex_groups = new_groups;
+-	sbi->s_flex_groups_allocated = size / sizeof(struct flex_groups);
++	rcu_read_lock();
++	old_groups = rcu_dereference(sbi->s_flex_groups);
++	if (old_groups)
++		memcpy(new_groups, old_groups,
++		       (sbi->s_flex_groups_allocated *
++			sizeof(struct flex_groups *)));
++	rcu_read_unlock();
++	rcu_assign_pointer(sbi->s_flex_groups, new_groups);
++	sbi->s_flex_groups_allocated = size;
++	if (old_groups)
++		ext4_kvfree_array_rcu(old_groups);
+ 	return 0;
+ }
+ 
+@@ -2368,6 +2393,7 @@ static int ext4_fill_flex_info(struct super_block *sb)
+ {
+ 	struct ext4_sb_info *sbi = EXT4_SB(sb);
+ 	struct ext4_group_desc *gdp = NULL;
++	struct flex_groups *fg;
+ 	ext4_group_t flex_group;
+ 	int i, err;
+ 
+@@ -2385,12 +2411,11 @@ static int ext4_fill_flex_info(struct super_block *sb)
+ 		gdp = ext4_get_group_desc(sb, i, NULL);
+ 
+ 		flex_group = ext4_flex_group(sbi, i);
+-		atomic_add(ext4_free_inodes_count(sb, gdp),
+-			   &sbi->s_flex_groups[flex_group].free_inodes);
++		fg = sbi_array_rcu_deref(sbi, s_flex_groups, flex_group);
++		atomic_add(ext4_free_inodes_count(sb, gdp), &fg->free_inodes);
+ 		atomic64_add(ext4_free_group_clusters(sb, gdp),
+-			     &sbi->s_flex_groups[flex_group].free_clusters);
+-		atomic_add(ext4_used_dirs_count(sb, gdp),
+-			   &sbi->s_flex_groups[flex_group].used_dirs);
++			     &fg->free_clusters);
++		atomic_add(ext4_used_dirs_count(sb, gdp), &fg->used_dirs);
+ 	}
+ 
+ 	return 1;
+@@ -2964,7 +2989,7 @@ static int ext4_feature_set_ok(struct super_block *sb, int readonly)
+ 		return 0;
+ 	}
+ 
+-#if !defined(CONFIG_QUOTA) || !defined(CONFIG_QFMT_V2)
++#if !IS_ENABLED(CONFIG_QUOTA) || !IS_ENABLED(CONFIG_QFMT_V2)
+ 	if (!readonly && (ext4_has_feature_quota(sb) ||
+ 			  ext4_has_feature_project(sb))) {
+ 		ext4_msg(sb, KERN_ERR,
+@@ -3589,9 +3614,10 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
+ {
+ 	struct dax_device *dax_dev = fs_dax_get_by_bdev(sb->s_bdev);
+ 	char *orig_data = kstrdup(data, GFP_KERNEL);
+-	struct buffer_head *bh;
++	struct buffer_head *bh, **group_desc;
+ 	struct ext4_super_block *es = NULL;
+ 	struct ext4_sb_info *sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
++	struct flex_groups **flex_groups;
+ 	ext4_fsblk_t block;
+ 	ext4_fsblk_t sb_block = get_sb_block(&data);
+ 	ext4_fsblk_t logical_sb_block;
+@@ -4245,9 +4271,10 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
+ 			goto failed_mount;
+ 		}
+ 	}
+-	sbi->s_group_desc = kvmalloc_array(db_count,
+-					   sizeof(struct buffer_head *),
+-					   GFP_KERNEL);
++	rcu_assign_pointer(sbi->s_group_desc,
++			   kvmalloc_array(db_count,
++					  sizeof(struct buffer_head *),
++					  GFP_KERNEL));
+ 	if (sbi->s_group_desc == NULL) {
+ 		ext4_msg(sb, KERN_ERR, "not enough memory");
+ 		ret = -ENOMEM;
+@@ -4263,14 +4290,19 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
+ 	}
+ 
+ 	for (i = 0; i < db_count; i++) {
++		struct buffer_head *bh;
++
+ 		block = descriptor_loc(sb, logical_sb_block, i);
+-		sbi->s_group_desc[i] = sb_bread_unmovable(sb, block);
+-		if (!sbi->s_group_desc[i]) {
++		bh = sb_bread_unmovable(sb, block);
++		if (!bh) {
+ 			ext4_msg(sb, KERN_ERR,
+ 			       "can't read group descriptor %d", i);
+ 			db_count = i;
+ 			goto failed_mount2;
+ 		}
++		rcu_read_lock();
++		rcu_dereference(sbi->s_group_desc)[i] = bh;
++		rcu_read_unlock();
+ 	}
+ 	sbi->s_gdb_count = db_count;
+ 	if (!ext4_check_descriptors(sb, logical_sb_block, &first_not_zeroed)) {
+@@ -4549,7 +4581,7 @@ no_journal:
+ 		err = percpu_counter_init(&sbi->s_dirtyclusters_counter, 0,
+ 					  GFP_KERNEL);
+ 	if (!err)
+-		err = percpu_init_rwsem(&sbi->s_journal_flag_rwsem);
++		err = percpu_init_rwsem(&sbi->s_writepages_rwsem);
+ 
+ 	if (err) {
+ 		ext4_msg(sb, KERN_ERR, "insufficient memory");
+@@ -4637,13 +4669,19 @@ failed_mount7:
+ 	ext4_unregister_li_request(sb);
+ failed_mount6:
+ 	ext4_mb_release(sb);
+-	if (sbi->s_flex_groups)
+-		kvfree(sbi->s_flex_groups);
++	rcu_read_lock();
++	flex_groups = rcu_dereference(sbi->s_flex_groups);
++	if (flex_groups) {
++		for (i = 0; i < sbi->s_flex_groups_allocated; i++)
++			kvfree(flex_groups[i]);
++		kvfree(flex_groups);
++	}
++	rcu_read_unlock();
+ 	percpu_counter_destroy(&sbi->s_freeclusters_counter);
+ 	percpu_counter_destroy(&sbi->s_freeinodes_counter);
+ 	percpu_counter_destroy(&sbi->s_dirs_counter);
+ 	percpu_counter_destroy(&sbi->s_dirtyclusters_counter);
+-	percpu_free_rwsem(&sbi->s_journal_flag_rwsem);
++	percpu_free_rwsem(&sbi->s_writepages_rwsem);
+ failed_mount5:
+ 	ext4_ext_release(sb);
+ 	ext4_release_system_zone(sb);
+@@ -4672,9 +4710,12 @@ failed_mount3:
+ 	if (sbi->s_mmp_tsk)
+ 		kthread_stop(sbi->s_mmp_tsk);
+ failed_mount2:
++	rcu_read_lock();
++	group_desc = rcu_dereference(sbi->s_group_desc);
+ 	for (i = 0; i < db_count; i++)
+-		brelse(sbi->s_group_desc[i]);
+-	kvfree(sbi->s_group_desc);
++		brelse(group_desc[i]);
++	kvfree(group_desc);
++	rcu_read_unlock();
+ failed_mount:
+ 	if (sbi->s_chksum_driver)
+ 		crypto_free_shash(sbi->s_chksum_driver);
+diff --git a/fs/io_uring.c b/fs/io_uring.c
+index 6ae692b02980..678c62782ba3 100644
+--- a/fs/io_uring.c
++++ b/fs/io_uring.c
+@@ -1286,11 +1286,17 @@ static void io_iopoll_reap_events(struct io_ring_ctx *ctx)
+ 	mutex_unlock(&ctx->uring_lock);
+ }
+ 
+-static int __io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events,
+-			    long min)
++static int io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events,
++			   long min)
+ {
+ 	int iters = 0, ret = 0;
+ 
++	/*
++	 * We disallow the app entering submit/complete with polling, but we
++	 * still need to lock the ring to prevent racing with polled issue
++	 * that got punted to a workqueue.
++	 */
++	mutex_lock(&ctx->uring_lock);
+ 	do {
+ 		int tmin = 0;
+ 
+@@ -1326,21 +1332,6 @@ static int __io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events,
+ 		ret = 0;
+ 	} while (min && !*nr_events && !need_resched());
+ 
+-	return ret;
+-}
+-
+-static int io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events,
+-			   long min)
+-{
+-	int ret;
+-
+-	/*
+-	 * We disallow the app entering submit/complete with polling, but we
+-	 * still need to lock the ring to prevent racing with polled issue
+-	 * that got punted to a workqueue.
+-	 */
+-	mutex_lock(&ctx->uring_lock);
+-	ret = __io_iopoll_check(ctx, nr_events, min);
+ 	mutex_unlock(&ctx->uring_lock);
+ 	return ret;
+ }
+@@ -3884,7 +3875,7 @@ static int io_sq_thread(void *data)
+ 				 */
+ 				mutex_lock(&ctx->uring_lock);
+ 				if (!list_empty(&ctx->poll_list))
+-					__io_iopoll_check(ctx, &nr_events, 0);
++					io_iopoll_getevents(ctx, &nr_events, 0);
+ 				else
+ 					inflight = 0;
+ 				mutex_unlock(&ctx->uring_lock);
+@@ -3908,6 +3899,18 @@ static int io_sq_thread(void *data)
+ 		 * to enter the kernel to reap and flush events.
+ 		 */
+ 		if (!to_submit || ret == -EBUSY) {
++			/*
++			 * Drop cur_mm before scheduling, we can't hold it for
++			 * long periods (or over schedule()). Do this before
++			 * adding ourselves to the waitqueue, as the unuse/drop
++			 * may sleep.
++			 */
++			if (cur_mm) {
++				unuse_mm(cur_mm);
++				mmput(cur_mm);
++				cur_mm = NULL;
++			}
++
+ 			/*
+ 			 * We're polling. If we're within the defined idle
+ 			 * period, then let us spin without work before going
+@@ -3922,18 +3925,6 @@ static int io_sq_thread(void *data)
+ 				continue;
+ 			}
+ 
+-			/*
+-			 * Drop cur_mm before scheduling, we can't hold it for
+-			 * long periods (or over schedule()). Do this before
+-			 * adding ourselves to the waitqueue, as the unuse/drop
+-			 * may sleep.
+-			 */
+-			if (cur_mm) {
+-				unuse_mm(cur_mm);
+-				mmput(cur_mm);
+-				cur_mm = NULL;
+-			}
+-
+ 			prepare_to_wait(&ctx->sqo_wait, &wait,
+ 						TASK_INTERRUPTIBLE);
+ 
+diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
+index 0603dfa9ad90..ab1078e85a58 100644
+--- a/fs/jbd2/transaction.c
++++ b/fs/jbd2/transaction.c
+@@ -936,8 +936,6 @@ do_get_write_access(handle_t *handle, struct journal_head *jh,
+ 	char *frozen_buffer = NULL;
+ 	unsigned long start_lock, time_lock;
+ 
+-	if (is_handle_aborted(handle))
+-		return -EROFS;
+ 	journal = transaction->t_journal;
+ 
+ 	jbd_debug(5, "journal_head %p, force_copy %d\n", jh, force_copy);
+@@ -1189,6 +1187,9 @@ int jbd2_journal_get_write_access(handle_t *handle, struct buffer_head *bh)
+ 	struct journal_head *jh;
+ 	int rc;
+ 
++	if (is_handle_aborted(handle))
++		return -EROFS;
++
+ 	if (jbd2_write_access_granted(handle, bh, false))
+ 		return 0;
+ 
+@@ -1326,6 +1327,9 @@ int jbd2_journal_get_undo_access(handle_t *handle, struct buffer_head *bh)
+ 	struct journal_head *jh;
+ 	char *committed_data = NULL;
+ 
++	if (is_handle_aborted(handle))
++		return -EROFS;
++
+ 	if (jbd2_write_access_granted(handle, bh, true))
+ 		return 0;
+ 
+diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h
+index 11fdb0cc9a83..546e6adfeced 100644
+--- a/include/acpi/acpixf.h
++++ b/include/acpi/acpixf.h
+@@ -753,6 +753,7 @@ ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_disable_all_gpes(void))
+ ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_enable_all_runtime_gpes(void))
+ ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_enable_all_wakeup_gpes(void))
+ ACPI_HW_DEPENDENT_RETURN_UINT32(u32 acpi_any_gpe_status_set(void))
++ACPI_HW_DEPENDENT_RETURN_UINT32(u32 acpi_any_fixed_event_status_set(void))
+ 
+ ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
+ 				acpi_get_gpe_device(u32 gpe_index,
+diff --git a/include/linux/intel-svm.h b/include/linux/intel-svm.h
+index 94f047a8a845..d7c403d0dd27 100644
+--- a/include/linux/intel-svm.h
++++ b/include/linux/intel-svm.h
+@@ -122,7 +122,7 @@ static inline int intel_svm_unbind_mm(struct device *dev, int pasid)
+ 	BUG();
+ }
+ 
+-static int intel_svm_is_pasid_valid(struct device *dev, int pasid)
++static inline int intel_svm_is_pasid_valid(struct device *dev, int pasid)
+ {
+ 	return -EINVAL;
+ }
+diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h
+index 4da8df57618a..6b7b35b5394e 100644
+--- a/include/linux/irqdomain.h
++++ b/include/linux/irqdomain.h
+@@ -192,7 +192,7 @@ enum {
+ 	IRQ_DOMAIN_FLAG_HIERARCHY	= (1 << 0),
+ 
+ 	/* Irq domain name was allocated in __irq_domain_add() */
+-	IRQ_DOMAIN_NAME_ALLOCATED	= (1 << 6),
++	IRQ_DOMAIN_NAME_ALLOCATED	= (1 << 1),
+ 
+ 	/* Irq domain is an IPI domain with virq per cpu */
+ 	IRQ_DOMAIN_FLAG_IPI_PER_CPU	= (1 << 2),
+diff --git a/include/linux/libata.h b/include/linux/libata.h
+index 2dbde119721d..bff539918d82 100644
+--- a/include/linux/libata.h
++++ b/include/linux/libata.h
+@@ -1221,6 +1221,7 @@ struct pci_bits {
+ };
+ 
+ extern int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits);
++extern void ata_pci_shutdown_one(struct pci_dev *pdev);
+ extern void ata_pci_remove_one(struct pci_dev *pdev);
+ 
+ #ifdef CONFIG_PM
+diff --git a/include/linux/tty.h b/include/linux/tty.h
+index bfa4e2ee94a9..bd5fe0e907e8 100644
+--- a/include/linux/tty.h
++++ b/include/linux/tty.h
+@@ -225,6 +225,8 @@ struct tty_port_client_operations {
+ 	void (*write_wakeup)(struct tty_port *port);
+ };
+ 
++extern const struct tty_port_client_operations tty_port_default_client_ops;
++
+ struct tty_port {
+ 	struct tty_bufhead	buf;		/* Locked internally */
+ 	struct tty_struct	*tty;		/* Back pointer */
+diff --git a/include/linux/usb/quirks.h b/include/linux/usb/quirks.h
+index a1be64c9940f..22c1f579afe3 100644
+--- a/include/linux/usb/quirks.h
++++ b/include/linux/usb/quirks.h
+@@ -69,4 +69,7 @@
+ /* Hub needs extra delay after resetting its port. */
+ #define USB_QUIRK_HUB_SLOW_RESET		BIT(14)
+ 
++/* device has blacklisted endpoints */
++#define USB_QUIRK_ENDPOINT_BLACKLIST		BIT(15)
++
+ #endif /* __LINUX_USB_QUIRKS_H */
+diff --git a/include/scsi/iscsi_proto.h b/include/scsi/iscsi_proto.h
+index 533f56733ba8..b71b5c4f418c 100644
+--- a/include/scsi/iscsi_proto.h
++++ b/include/scsi/iscsi_proto.h
+@@ -627,7 +627,6 @@ struct iscsi_reject {
+ #define ISCSI_REASON_BOOKMARK_INVALID	9
+ #define ISCSI_REASON_BOOKMARK_NO_RESOURCES	10
+ #define ISCSI_REASON_NEGOTIATION_RESET	11
+-#define ISCSI_REASON_WAITING_FOR_LOGOUT	12
+ 
+ /* Max. number of Key=Value pairs in a text message */
+ #define MAX_KEY_VALUE_PAIRS	8192
+diff --git a/include/sound/rawmidi.h b/include/sound/rawmidi.h
+index 40ab20439fee..a36b7227a15a 100644
+--- a/include/sound/rawmidi.h
++++ b/include/sound/rawmidi.h
+@@ -77,9 +77,9 @@ struct snd_rawmidi_substream {
+ 	struct list_head list;		/* list of all substream for given stream */
+ 	int stream;			/* direction */
+ 	int number;			/* substream number */
+-	unsigned int opened: 1,		/* open flag */
+-		     append: 1,		/* append flag (merge more streams) */
+-		     active_sensing: 1; /* send active sensing when close */
++	bool opened;			/* open flag */
++	bool append;			/* append flag (merge more streams) */
++	bool active_sensing;		/* send active sensing when close */
+ 	int use_count;			/* use counter (for output) */
+ 	size_t bytes;
+ 	struct snd_rawmidi *rmidi;
+diff --git a/ipc/sem.c b/ipc/sem.c
+index ec97a7072413..fe12ea8dd2b3 100644
+--- a/ipc/sem.c
++++ b/ipc/sem.c
+@@ -2368,11 +2368,9 @@ void exit_sem(struct task_struct *tsk)
+ 		ipc_assert_locked_object(&sma->sem_perm);
+ 		list_del(&un->list_id);
+ 
+-		/* we are the last process using this ulp, acquiring ulp->lock
+-		 * isn't required. Besides that, we are also protected against
+-		 * IPC_RMID as we hold sma->sem_perm lock now
+-		 */
++		spin_lock(&ulp->lock);
+ 		list_del_rcu(&un->list_proc);
++		spin_unlock(&ulp->lock);
+ 
+ 		/* perform adjustments registered in un */
+ 		for (i = 0; i < sma->sem_nsems; i++) {
+diff --git a/kernel/bpf/offload.c b/kernel/bpf/offload.c
+index 5b9da0954a27..3668a0bc18ec 100644
+--- a/kernel/bpf/offload.c
++++ b/kernel/bpf/offload.c
+@@ -321,7 +321,7 @@ int bpf_prog_offload_info_fill(struct bpf_prog_info *info,
+ 
+ 	ulen = info->jited_prog_len;
+ 	info->jited_prog_len = aux->offload->jited_len;
+-	if (info->jited_prog_len & ulen) {
++	if (info->jited_prog_len && ulen) {
+ 		uinsns = u64_to_user_ptr(info->jited_prog_insns);
+ 		ulen = min_t(u32, info->jited_prog_len, ulen);
+ 		if (copy_to_user(uinsns, aux->offload->jited_image, ulen)) {
+diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
+index 6af7ae83c4ad..32ec69cdba54 100644
+--- a/kernel/dma/direct.c
++++ b/kernel/dma/direct.c
+@@ -472,28 +472,26 @@ int dma_direct_mmap(struct device *dev, struct vm_area_struct *vma,
+ }
+ #endif /* CONFIG_MMU */
+ 
+-/*
+- * Because 32-bit DMA masks are so common we expect every architecture to be
+- * able to satisfy them - either by not supporting more physical memory, or by
+- * providing a ZONE_DMA32.  If neither is the case, the architecture needs to
+- * use an IOMMU instead of the direct mapping.
+- */
+ int dma_direct_supported(struct device *dev, u64 mask)
+ {
+-	u64 min_mask;
+-
+-	if (IS_ENABLED(CONFIG_ZONE_DMA))
+-		min_mask = DMA_BIT_MASK(zone_dma_bits);
+-	else
+-		min_mask = DMA_BIT_MASK(32);
++	u64 min_mask = (max_pfn - 1) << PAGE_SHIFT;
+ 
+-	min_mask = min_t(u64, min_mask, (max_pfn - 1) << PAGE_SHIFT);
++	/*
++	 * Because 32-bit DMA masks are so common we expect every architecture
++	 * to be able to satisfy them - either by not supporting more physical
++	 * memory, or by providing a ZONE_DMA32.  If neither is the case, the
++	 * architecture needs to use an IOMMU instead of the direct mapping.
++	 */
++	if (mask >= DMA_BIT_MASK(32))
++		return 1;
+ 
+ 	/*
+ 	 * This check needs to be against the actual bit mask value, so
+ 	 * use __phys_to_dma() here so that the SME encryption mask isn't
+ 	 * part of the check.
+ 	 */
++	if (IS_ENABLED(CONFIG_ZONE_DMA))
++		min_mask = min_t(u64, min_mask, DMA_BIT_MASK(zone_dma_bits));
+ 	return mask >= __phys_to_dma(dev, min_mask);
+ }
+ 
+diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h
+index 3924fbe829d4..c9d8eb7f5c02 100644
+--- a/kernel/irq/internals.h
++++ b/kernel/irq/internals.h
+@@ -128,8 +128,6 @@ static inline void unregister_handler_proc(unsigned int irq,
+ 
+ extern bool irq_can_set_affinity_usr(unsigned int irq);
+ 
+-extern int irq_select_affinity_usr(unsigned int irq);
+-
+ extern void irq_set_thread_affinity(struct irq_desc *desc);
+ 
+ extern int irq_do_set_affinity(struct irq_data *data,
+diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
+index 1753486b440c..55b080101a20 100644
+--- a/kernel/irq/manage.c
++++ b/kernel/irq/manage.c
+@@ -442,23 +442,9 @@ int irq_setup_affinity(struct irq_desc *desc)
+ {
+ 	return irq_select_affinity(irq_desc_get_irq(desc));
+ }
+-#endif
++#endif /* CONFIG_AUTO_IRQ_AFFINITY */
++#endif /* CONFIG_SMP */
+ 
+-/*
+- * Called when a bogus affinity is set via /proc/irq
+- */
+-int irq_select_affinity_usr(unsigned int irq)
+-{
+-	struct irq_desc *desc = irq_to_desc(irq);
+-	unsigned long flags;
+-	int ret;
+-
+-	raw_spin_lock_irqsave(&desc->lock, flags);
+-	ret = irq_setup_affinity(desc);
+-	raw_spin_unlock_irqrestore(&desc->lock, flags);
+-	return ret;
+-}
+-#endif
+ 
+ /**
+  *	irq_set_vcpu_affinity - Set vcpu affinity for the interrupt
+diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c
+index cfc4f088a0e7..f5958c55406f 100644
+--- a/kernel/irq/proc.c
++++ b/kernel/irq/proc.c
+@@ -111,6 +111,28 @@ static int irq_affinity_list_proc_show(struct seq_file *m, void *v)
+ 	return show_irq_affinity(AFFINITY_LIST, m);
+ }
+ 
++#ifndef CONFIG_AUTO_IRQ_AFFINITY
++static inline int irq_select_affinity_usr(unsigned int irq)
++{
++	/*
++	 * If the interrupt is started up already then this fails. The
++	 * interrupt is assigned to an online CPU already. There is no
++	 * point to move it around randomly. Tell user space that the
++	 * selected mask is bogus.
++	 *
++	 * If not then any change to the affinity is pointless because the
++	 * startup code invokes irq_setup_affinity() which will select
++	 * a online CPU anyway.
++	 */
++	return -EINVAL;
++}
++#else
++/* ALPHA magic affinity auto selector. Keep it for historical reasons. */
++static inline int irq_select_affinity_usr(unsigned int irq)
++{
++	return irq_select_affinity(irq);
++}
++#endif
+ 
+ static ssize_t write_irq_affinity(int type, struct file *file,
+ 		const char __user *buffer, size_t count, loff_t *pos)
+diff --git a/kernel/sched/psi.c b/kernel/sched/psi.c
+index ce8f6748678a..9154e745f097 100644
+--- a/kernel/sched/psi.c
++++ b/kernel/sched/psi.c
+@@ -1199,6 +1199,9 @@ static ssize_t psi_write(struct file *file, const char __user *user_buf,
+ 	if (static_branch_likely(&psi_disabled))
+ 		return -EOPNOTSUPP;
+ 
++	if (!nbytes)
++		return -EINVAL;
++
+ 	buf_size = min(nbytes, sizeof(buf));
+ 	if (copy_from_user(buf, user_buf, buf_size))
+ 		return -EFAULT;
+diff --git a/lib/crypto/chacha20poly1305.c b/lib/crypto/chacha20poly1305.c
+index 6d83cafebc69..ad0699ce702f 100644
+--- a/lib/crypto/chacha20poly1305.c
++++ b/lib/crypto/chacha20poly1305.c
+@@ -235,6 +235,9 @@ bool chacha20poly1305_crypt_sg_inplace(struct scatterlist *src,
+ 		__le64 lens[2];
+ 	} b __aligned(16);
+ 
++	if (WARN_ON(src_len > INT_MAX))
++		return false;
++
+ 	chacha_load_key(b.k, key);
+ 
+ 	b.iv[0] = 0;
+diff --git a/lib/stackdepot.c b/lib/stackdepot.c
+index ed717dd08ff3..81c69c08d1d1 100644
+--- a/lib/stackdepot.c
++++ b/lib/stackdepot.c
+@@ -83,15 +83,19 @@ static bool init_stack_slab(void **prealloc)
+ 		return true;
+ 	if (stack_slabs[depot_index] == NULL) {
+ 		stack_slabs[depot_index] = *prealloc;
++		*prealloc = NULL;
+ 	} else {
+-		stack_slabs[depot_index + 1] = *prealloc;
++		/* If this is the last depot slab, do not touch the next one. */
++		if (depot_index + 1 < STACK_ALLOC_MAX_SLABS) {
++			stack_slabs[depot_index + 1] = *prealloc;
++			*prealloc = NULL;
++		}
+ 		/*
+ 		 * This smp_store_release pairs with smp_load_acquire() from
+ 		 * |next_slab_inited| above and in stack_depot_save().
+ 		 */
+ 		smp_store_release(&next_slab_inited, 1);
+ 	}
+-	*prealloc = NULL;
+ 	return true;
+ }
+ 
+diff --git a/mm/memcontrol.c b/mm/memcontrol.c
+index 27c231bf4565..eda490113372 100644
+--- a/mm/memcontrol.c
++++ b/mm/memcontrol.c
+@@ -409,8 +409,10 @@ int memcg_expand_shrinker_maps(int new_id)
+ 		if (mem_cgroup_is_root(memcg))
+ 			continue;
+ 		ret = memcg_expand_one_shrinker_map(memcg, size, old_size);
+-		if (ret)
++		if (ret) {
++			mem_cgroup_iter_break(NULL, memcg);
+ 			goto unlock;
++		}
+ 	}
+ unlock:
+ 	if (!ret)
+diff --git a/mm/mmap.c b/mm/mmap.c
+index 71e4ffc83bcd..cb2c79a3e914 100644
+--- a/mm/mmap.c
++++ b/mm/mmap.c
+@@ -195,8 +195,6 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
+ 	bool downgraded = false;
+ 	LIST_HEAD(uf);
+ 
+-	brk = untagged_addr(brk);
+-
+ 	if (down_write_killable(&mm->mmap_sem))
+ 		return -EINTR;
+ 
+@@ -1561,8 +1559,6 @@ unsigned long ksys_mmap_pgoff(unsigned long addr, unsigned long len,
+ 	struct file *file = NULL;
+ 	unsigned long retval;
+ 
+-	addr = untagged_addr(addr);
+-
+ 	if (!(flags & MAP_ANONYMOUS)) {
+ 		audit_mmap_fd(fd, flags);
+ 		file = fget(fd);
+diff --git a/mm/mremap.c b/mm/mremap.c
+index 122938dcec15..af363063ea23 100644
+--- a/mm/mremap.c
++++ b/mm/mremap.c
+@@ -607,7 +607,6 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
+ 	LIST_HEAD(uf_unmap);
+ 
+ 	addr = untagged_addr(addr);
+-	new_addr = untagged_addr(new_addr);
+ 
+ 	if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
+ 		return ret;
+diff --git a/mm/sparse.c b/mm/sparse.c
+index 3918fc3eaef1..29d92e7f55c4 100644
+--- a/mm/sparse.c
++++ b/mm/sparse.c
+@@ -886,7 +886,7 @@ int __meminit sparse_add_section(int nid, unsigned long start_pfn,
+ 	 * Poison uninitialized struct pages in order to catch invalid flags
+ 	 * combinations.
+ 	 */
+-	page_init_poison(pfn_to_page(start_pfn), sizeof(struct page) * nr_pages);
++	page_init_poison(memmap, sizeof(struct page) * nr_pages);
+ 
+ 	ms = __nr_to_section(section_nr);
+ 	set_section_nid(section_nr, nid);
+diff --git a/mm/vmscan.c b/mm/vmscan.c
+index 572fb17c6273..af4b2b3d4e0d 100644
+--- a/mm/vmscan.c
++++ b/mm/vmscan.c
+@@ -2429,10 +2429,13 @@ out:
+ 			/*
+ 			 * Scan types proportional to swappiness and
+ 			 * their relative recent reclaim efficiency.
+-			 * Make sure we don't miss the last page
+-			 * because of a round-off error.
++			 * Make sure we don't miss the last page on
++			 * the offlined memory cgroups because of a
++			 * round-off error.
+ 			 */
+-			scan = DIV64_U64_ROUND_UP(scan * fraction[file],
++			scan = mem_cgroup_online(memcg) ?
++			       div64_u64(scan * fraction[file], denominator) :
++			       DIV64_U64_ROUND_UP(scan * fraction[file],
+ 						  denominator);
+ 			break;
+ 		case SCAN_FILE:
+diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c
+index ced3fc8fad7c..6520d9ec1297 100644
+--- a/net/netfilter/xt_hashlimit.c
++++ b/net/netfilter/xt_hashlimit.c
+@@ -851,6 +851,8 @@ hashlimit_mt(const struct sk_buff *skb, struct xt_action_param *par)
+ 	return hashlimit_mt_common(skb, par, hinfo, &info->cfg, 3);
+ }
+ 
++#define HASHLIMIT_MAX_SIZE 1048576
++
+ static int hashlimit_mt_check_common(const struct xt_mtchk_param *par,
+ 				     struct xt_hashlimit_htable **hinfo,
+ 				     struct hashlimit_cfg3 *cfg,
+@@ -861,6 +863,14 @@ static int hashlimit_mt_check_common(const struct xt_mtchk_param *par,
+ 
+ 	if (cfg->gc_interval == 0 || cfg->expire == 0)
+ 		return -EINVAL;
++	if (cfg->size > HASHLIMIT_MAX_SIZE) {
++		cfg->size = HASHLIMIT_MAX_SIZE;
++		pr_info_ratelimited("size too large, truncated to %u\n", cfg->size);
++	}
++	if (cfg->max > HASHLIMIT_MAX_SIZE) {
++		cfg->max = HASHLIMIT_MAX_SIZE;
++		pr_info_ratelimited("max too large, truncated to %u\n", cfg->max);
++	}
+ 	if (par->family == NFPROTO_IPV4) {
+ 		if (cfg->srcmask > 32 || cfg->dstmask > 32)
+ 			return -EINVAL;
+diff --git a/net/rxrpc/call_object.c b/net/rxrpc/call_object.c
+index dbdbc4f18b5e..c9f34b0a11df 100644
+--- a/net/rxrpc/call_object.c
++++ b/net/rxrpc/call_object.c
+@@ -562,11 +562,11 @@ void rxrpc_put_call(struct rxrpc_call *call, enum rxrpc_call_trace op)
+ }
+ 
+ /*
+- * Final call destruction under RCU.
++ * Final call destruction - but must be done in process context.
+  */
+-static void rxrpc_rcu_destroy_call(struct rcu_head *rcu)
++static void rxrpc_destroy_call(struct work_struct *work)
+ {
+-	struct rxrpc_call *call = container_of(rcu, struct rxrpc_call, rcu);
++	struct rxrpc_call *call = container_of(work, struct rxrpc_call, processor);
+ 	struct rxrpc_net *rxnet = call->rxnet;
+ 
+ 	rxrpc_put_connection(call->conn);
+@@ -578,6 +578,22 @@ static void rxrpc_rcu_destroy_call(struct rcu_head *rcu)
+ 		wake_up_var(&rxnet->nr_calls);
+ }
+ 
++/*
++ * Final call destruction under RCU.
++ */
++static void rxrpc_rcu_destroy_call(struct rcu_head *rcu)
++{
++	struct rxrpc_call *call = container_of(rcu, struct rxrpc_call, rcu);
++
++	if (in_softirq()) {
++		INIT_WORK(&call->processor, rxrpc_destroy_call);
++		if (!rxrpc_queue_work(&call->processor))
++			BUG();
++	} else {
++		rxrpc_destroy_call(&call->processor);
++	}
++}
++
+ /*
+  * clean up a call
+  */
+diff --git a/scripts/get_maintainer.pl b/scripts/get_maintainer.pl
+index 34085d146fa2..7a228681f89f 100755
+--- a/scripts/get_maintainer.pl
++++ b/scripts/get_maintainer.pl
+@@ -932,10 +932,6 @@ sub get_maintainers {
+ 	}
+     }
+ 
+-    foreach my $fix (@fixes) {
+-	vcs_add_commit_signers($fix, "blamed_fixes");
+-    }
+-
+     foreach my $email (@email_to, @list_to) {
+ 	$email->[0] = deduplicate_email($email->[0]);
+     }
+@@ -974,6 +970,10 @@ sub get_maintainers {
+ 	}
+     }
+ 
++    foreach my $fix (@fixes) {
++	vcs_add_commit_signers($fix, "blamed_fixes");
++    }
++
+     my @to = ();
+     if ($email || $email_list) {
+ 	if ($email) {
+diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c
+index 6d9592f0ae1d..cc93157fa950 100644
+--- a/sound/core/seq/seq_clientmgr.c
++++ b/sound/core/seq/seq_clientmgr.c
+@@ -580,7 +580,7 @@ static int update_timestamp_of_queue(struct snd_seq_event *event,
+ 	event->queue = queue;
+ 	event->flags &= ~SNDRV_SEQ_TIME_STAMP_MASK;
+ 	if (real_time) {
+-		event->time.time = snd_seq_timer_get_cur_time(q->timer);
++		event->time.time = snd_seq_timer_get_cur_time(q->timer, true);
+ 		event->flags |= SNDRV_SEQ_TIME_STAMP_REAL;
+ 	} else {
+ 		event->time.tick = snd_seq_timer_get_cur_tick(q->timer);
+@@ -1659,7 +1659,7 @@ static int snd_seq_ioctl_get_queue_status(struct snd_seq_client *client,
+ 	tmr = queue->timer;
+ 	status->events = queue->tickq->cells + queue->timeq->cells;
+ 
+-	status->time = snd_seq_timer_get_cur_time(tmr);
++	status->time = snd_seq_timer_get_cur_time(tmr, true);
+ 	status->tick = snd_seq_timer_get_cur_tick(tmr);
+ 
+ 	status->running = tmr->running;
+diff --git a/sound/core/seq/seq_queue.c b/sound/core/seq/seq_queue.c
+index caf68bf42f13..71a6ea62c3be 100644
+--- a/sound/core/seq/seq_queue.c
++++ b/sound/core/seq/seq_queue.c
+@@ -238,6 +238,8 @@ void snd_seq_check_queue(struct snd_seq_queue *q, int atomic, int hop)
+ {
+ 	unsigned long flags;
+ 	struct snd_seq_event_cell *cell;
++	snd_seq_tick_time_t cur_tick;
++	snd_seq_real_time_t cur_time;
+ 
+ 	if (q == NULL)
+ 		return;
+@@ -254,17 +256,18 @@ void snd_seq_check_queue(struct snd_seq_queue *q, int atomic, int hop)
+ 
+       __again:
+ 	/* Process tick queue... */
++	cur_tick = snd_seq_timer_get_cur_tick(q->timer);
+ 	for (;;) {
+-		cell = snd_seq_prioq_cell_out(q->tickq,
+-					      &q->timer->tick.cur_tick);
++		cell = snd_seq_prioq_cell_out(q->tickq, &cur_tick);
+ 		if (!cell)
+ 			break;
+ 		snd_seq_dispatch_event(cell, atomic, hop);
+ 	}
+ 
+ 	/* Process time queue... */
++	cur_time = snd_seq_timer_get_cur_time(q->timer, false);
+ 	for (;;) {
+-		cell = snd_seq_prioq_cell_out(q->timeq, &q->timer->cur_time);
++		cell = snd_seq_prioq_cell_out(q->timeq, &cur_time);
+ 		if (!cell)
+ 			break;
+ 		snd_seq_dispatch_event(cell, atomic, hop);
+@@ -392,6 +395,7 @@ int snd_seq_queue_check_access(int queueid, int client)
+ int snd_seq_queue_set_owner(int queueid, int client, int locked)
+ {
+ 	struct snd_seq_queue *q = queueptr(queueid);
++	unsigned long flags;
+ 
+ 	if (q == NULL)
+ 		return -EINVAL;
+@@ -401,8 +405,10 @@ int snd_seq_queue_set_owner(int queueid, int client, int locked)
+ 		return -EPERM;
+ 	}
+ 
++	spin_lock_irqsave(&q->owner_lock, flags);
+ 	q->locked = locked ? 1 : 0;
+ 	q->owner = client;
++	spin_unlock_irqrestore(&q->owner_lock, flags);
+ 	queue_access_unlock(q);
+ 	queuefree(q);
+ 
+@@ -539,15 +545,17 @@ void snd_seq_queue_client_termination(int client)
+ 	unsigned long flags;
+ 	int i;
+ 	struct snd_seq_queue *q;
++	bool matched;
+ 
+ 	for (i = 0; i < SNDRV_SEQ_MAX_QUEUES; i++) {
+ 		if ((q = queueptr(i)) == NULL)
+ 			continue;
+ 		spin_lock_irqsave(&q->owner_lock, flags);
+-		if (q->owner == client)
++		matched = (q->owner == client);
++		if (matched)
+ 			q->klocked = 1;
+ 		spin_unlock_irqrestore(&q->owner_lock, flags);
+-		if (q->owner == client) {
++		if (matched) {
+ 			if (q->timer->running)
+ 				snd_seq_timer_stop(q->timer);
+ 			snd_seq_timer_reset(q->timer);
+@@ -739,6 +747,8 @@ void snd_seq_info_queues_read(struct snd_info_entry *entry,
+ 	int i, bpm;
+ 	struct snd_seq_queue *q;
+ 	struct snd_seq_timer *tmr;
++	bool locked;
++	int owner;
+ 
+ 	for (i = 0; i < SNDRV_SEQ_MAX_QUEUES; i++) {
+ 		if ((q = queueptr(i)) == NULL)
+@@ -750,9 +760,14 @@ void snd_seq_info_queues_read(struct snd_info_entry *entry,
+ 		else
+ 			bpm = 0;
+ 
++		spin_lock_irq(&q->owner_lock);
++		locked = q->locked;
++		owner = q->owner;
++		spin_unlock_irq(&q->owner_lock);
++
+ 		snd_iprintf(buffer, "queue %d: [%s]\n", q->queue, q->name);
+-		snd_iprintf(buffer, "owned by client    : %d\n", q->owner);
+-		snd_iprintf(buffer, "lock status        : %s\n", q->locked ? "Locked" : "Free");
++		snd_iprintf(buffer, "owned by client    : %d\n", owner);
++		snd_iprintf(buffer, "lock status        : %s\n", locked ? "Locked" : "Free");
+ 		snd_iprintf(buffer, "queued time events : %d\n", snd_seq_prioq_avail(q->timeq));
+ 		snd_iprintf(buffer, "queued tick events : %d\n", snd_seq_prioq_avail(q->tickq));
+ 		snd_iprintf(buffer, "timer state        : %s\n", tmr->running ? "Running" : "Stopped");
+diff --git a/sound/core/seq/seq_timer.c b/sound/core/seq/seq_timer.c
+index be59b59c9be4..1645e4142e30 100644
+--- a/sound/core/seq/seq_timer.c
++++ b/sound/core/seq/seq_timer.c
+@@ -428,14 +428,15 @@ int snd_seq_timer_continue(struct snd_seq_timer *tmr)
+ }
+ 
+ /* return current 'real' time. use timeofday() to get better granularity. */
+-snd_seq_real_time_t snd_seq_timer_get_cur_time(struct snd_seq_timer *tmr)
++snd_seq_real_time_t snd_seq_timer_get_cur_time(struct snd_seq_timer *tmr,
++					       bool adjust_ktime)
+ {
+ 	snd_seq_real_time_t cur_time;
+ 	unsigned long flags;
+ 
+ 	spin_lock_irqsave(&tmr->lock, flags);
+ 	cur_time = tmr->cur_time;
+-	if (tmr->running) { 
++	if (adjust_ktime && tmr->running) {
+ 		struct timespec64 tm;
+ 
+ 		ktime_get_ts64(&tm);
+@@ -452,7 +453,13 @@ snd_seq_real_time_t snd_seq_timer_get_cur_time(struct snd_seq_timer *tmr)
+  high PPQ values) */
+ snd_seq_tick_time_t snd_seq_timer_get_cur_tick(struct snd_seq_timer *tmr)
+ {
+-	return tmr->tick.cur_tick;
++	snd_seq_tick_time_t cur_tick;
++	unsigned long flags;
++
++	spin_lock_irqsave(&tmr->lock, flags);
++	cur_tick = tmr->tick.cur_tick;
++	spin_unlock_irqrestore(&tmr->lock, flags);
++	return cur_tick;
+ }
+ 
+ 
+diff --git a/sound/core/seq/seq_timer.h b/sound/core/seq/seq_timer.h
+index 66c3e344eae3..4bec57df8158 100644
+--- a/sound/core/seq/seq_timer.h
++++ b/sound/core/seq/seq_timer.h
+@@ -120,7 +120,8 @@ int snd_seq_timer_set_tempo_ppq(struct snd_seq_timer *tmr, int tempo, int ppq);
+ int snd_seq_timer_set_position_tick(struct snd_seq_timer *tmr, snd_seq_tick_time_t position);
+ int snd_seq_timer_set_position_time(struct snd_seq_timer *tmr, snd_seq_real_time_t position);
+ int snd_seq_timer_set_skew(struct snd_seq_timer *tmr, unsigned int skew, unsigned int base);
+-snd_seq_real_time_t snd_seq_timer_get_cur_time(struct snd_seq_timer *tmr);
++snd_seq_real_time_t snd_seq_timer_get_cur_time(struct snd_seq_timer *tmr,
++					       bool adjust_ktime);
+ snd_seq_tick_time_t snd_seq_timer_get_cur_tick(struct snd_seq_timer *tmr);
+ 
+ extern int seq_default_timer_class;
+diff --git a/sound/hda/hdmi_chmap.c b/sound/hda/hdmi_chmap.c
+index 886cb7811bd6..2efee794cac6 100644
+--- a/sound/hda/hdmi_chmap.c
++++ b/sound/hda/hdmi_chmap.c
+@@ -250,7 +250,7 @@ void snd_hdac_print_channel_allocation(int spk_alloc, char *buf, int buflen)
+ 
+ 	for (i = 0, j = 0; i < ARRAY_SIZE(cea_speaker_allocation_names); i++) {
+ 		if (spk_alloc & (1 << i))
+-			j += snprintf(buf + j, buflen - j,  " %s",
++			j += scnprintf(buf + j, buflen - j,  " %s",
+ 					cea_speaker_allocation_names[i]);
+ 	}
+ 	buf[j] = '\0';	/* necessary when j == 0 */
+diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
+index a2fb19129219..6cb72336433a 100644
+--- a/sound/pci/hda/hda_codec.c
++++ b/sound/pci/hda/hda_codec.c
+@@ -4019,7 +4019,7 @@ void snd_print_pcm_bits(int pcm, char *buf, int buflen)
+ 
+ 	for (i = 0, j = 0; i < ARRAY_SIZE(bits); i++)
+ 		if (pcm & (AC_SUPPCM_BITS_8 << i))
+-			j += snprintf(buf + j, buflen - j,  " %d", bits[i]);
++			j += scnprintf(buf + j, buflen - j,  " %d", bits[i]);
+ 
+ 	buf[j] = '\0'; /* necessary when j == 0 */
+ }
+diff --git a/sound/pci/hda/hda_eld.c b/sound/pci/hda/hda_eld.c
+index d081fb2880a0..82cf1da2ff12 100644
+--- a/sound/pci/hda/hda_eld.c
++++ b/sound/pci/hda/hda_eld.c
+@@ -360,7 +360,7 @@ static void hdmi_print_pcm_rates(int pcm, char *buf, int buflen)
+ 
+ 	for (i = 0, j = 0; i < ARRAY_SIZE(alsa_rates); i++)
+ 		if (pcm & (1 << i))
+-			j += snprintf(buf + j, buflen - j,  " %d",
++			j += scnprintf(buf + j, buflen - j,  " %d",
+ 				alsa_rates[i]);
+ 
+ 	buf[j] = '\0'; /* necessary when j == 0 */
+diff --git a/sound/pci/hda/hda_sysfs.c b/sound/pci/hda/hda_sysfs.c
+index fcc34417cbce..6dbe99131bc4 100644
+--- a/sound/pci/hda/hda_sysfs.c
++++ b/sound/pci/hda/hda_sysfs.c
+@@ -222,7 +222,7 @@ static ssize_t init_verbs_show(struct device *dev,
+ 	int i, len = 0;
+ 	mutex_lock(&codec->user_mutex);
+ 	snd_array_for_each(&codec->init_verbs, i, v) {
+-		len += snprintf(buf + len, PAGE_SIZE - len,
++		len += scnprintf(buf + len, PAGE_SIZE - len,
+ 				"0x%02x 0x%03x 0x%04x\n",
+ 				v->nid, v->verb, v->param);
+ 	}
+@@ -272,7 +272,7 @@ static ssize_t hints_show(struct device *dev,
+ 	int i, len = 0;
+ 	mutex_lock(&codec->user_mutex);
+ 	snd_array_for_each(&codec->hints, i, hint) {
+-		len += snprintf(buf + len, PAGE_SIZE - len,
++		len += scnprintf(buf + len, PAGE_SIZE - len,
+ 				"%s = %s\n", hint->key, hint->val);
+ 	}
+ 	mutex_unlock(&codec->user_mutex);
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index c6b1581c6ffa..7ba3ef6b673d 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -2447,7 +2447,9 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1071, 0x8258, "Evesham Voyaeger", ALC882_FIXUP_EAPD),
+ 	SND_PCI_QUIRK(0x1458, 0xa002, "Gigabyte EP45-DS3/Z87X-UD3H", ALC889_FIXUP_FRONT_HP_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1458, 0xa0b8, "Gigabyte AZ370-Gaming", ALC1220_FIXUP_GB_DUAL_CODECS),
++	SND_PCI_QUIRK(0x1462, 0x1228, "MSI-GP63", ALC1220_FIXUP_CLEVO_P950),
+ 	SND_PCI_QUIRK(0x1462, 0x1276, "MSI-GL73", ALC1220_FIXUP_CLEVO_P950),
++	SND_PCI_QUIRK(0x1462, 0x1293, "MSI-GP65", ALC1220_FIXUP_CLEVO_P950),
+ 	SND_PCI_QUIRK(0x1462, 0x7350, "MSI-7350", ALC889_FIXUP_CD),
+ 	SND_PCI_QUIRK(0x1462, 0xda57, "MSI Z270-Gaming", ALC1220_FIXUP_GB_DUAL_CODECS),
+ 	SND_PCI_QUIRK_VENDOR(0x1462, "MSI", ALC882_FIXUP_GPIO3),
+diff --git a/sound/soc/atmel/Kconfig b/sound/soc/atmel/Kconfig
+index d1dc8e6366dc..71f2d42188c4 100644
+--- a/sound/soc/atmel/Kconfig
++++ b/sound/soc/atmel/Kconfig
+@@ -10,11 +10,11 @@ config SND_ATMEL_SOC
+ if SND_ATMEL_SOC
+ 
+ config SND_ATMEL_SOC_PDC
+-	tristate
++	bool
+ 	depends on HAS_DMA
+ 
+ config SND_ATMEL_SOC_DMA
+-	tristate
++	bool
+ 	select SND_SOC_GENERIC_DMAENGINE_PCM
+ 
+ config SND_ATMEL_SOC_SSC
+diff --git a/sound/soc/atmel/Makefile b/sound/soc/atmel/Makefile
+index 1f6890ed3738..c7d2989791be 100644
+--- a/sound/soc/atmel/Makefile
++++ b/sound/soc/atmel/Makefile
+@@ -6,8 +6,14 @@ snd-soc-atmel_ssc_dai-objs := atmel_ssc_dai.o
+ snd-soc-atmel-i2s-objs := atmel-i2s.o
+ snd-soc-mchp-i2s-mcc-objs := mchp-i2s-mcc.o
+ 
+-obj-$(CONFIG_SND_ATMEL_SOC_PDC) += snd-soc-atmel-pcm-pdc.o
+-obj-$(CONFIG_SND_ATMEL_SOC_DMA) += snd-soc-atmel-pcm-dma.o
++# pdc and dma need to both be built-in if any user of
++# ssc is built-in.
++ifdef CONFIG_SND_ATMEL_SOC_PDC
++obj-$(CONFIG_SND_ATMEL_SOC_SSC) += snd-soc-atmel-pcm-pdc.o
++endif
++ifdef CONFIG_SND_ATMEL_SOC_DMA
++obj-$(CONFIG_SND_ATMEL_SOC_SSC) += snd-soc-atmel-pcm-dma.o
++endif
+ obj-$(CONFIG_SND_ATMEL_SOC_SSC) += snd-soc-atmel_ssc_dai.o
+ obj-$(CONFIG_SND_ATMEL_SOC_I2S) += snd-soc-atmel-i2s.o
+ obj-$(CONFIG_SND_MCHP_SOC_I2S_MCC) += snd-soc-mchp-i2s-mcc.o
+diff --git a/sound/soc/fsl/fsl_sai.c b/sound/soc/fsl/fsl_sai.c
+index b517e4bc1b87..41b83ecaf008 100644
+--- a/sound/soc/fsl/fsl_sai.c
++++ b/sound/soc/fsl/fsl_sai.c
+@@ -1019,12 +1019,24 @@ static int fsl_sai_probe(struct platform_device *pdev)
+ 	ret = devm_snd_soc_register_component(&pdev->dev, &fsl_component,
+ 			&fsl_sai_dai, 1);
+ 	if (ret)
+-		return ret;
++		goto err_pm_disable;
+ 
+-	if (sai->soc_data->use_imx_pcm)
+-		return imx_pcm_dma_init(pdev, IMX_SAI_DMABUF_SIZE);
+-	else
+-		return devm_snd_dmaengine_pcm_register(&pdev->dev, NULL, 0);
++	if (sai->soc_data->use_imx_pcm) {
++		ret = imx_pcm_dma_init(pdev, IMX_SAI_DMABUF_SIZE);
++		if (ret)
++			goto err_pm_disable;
++	} else {
++		ret = devm_snd_dmaengine_pcm_register(&pdev->dev, NULL, 0);
++		if (ret)
++			goto err_pm_disable;
++	}
++
++	return ret;
++
++err_pm_disable:
++	pm_runtime_disable(&pdev->dev);
++
++	return ret;
+ }
+ 
+ static int fsl_sai_remove(struct platform_device *pdev)
+diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
+index b6378f025836..935b5375ecc5 100644
+--- a/sound/soc/soc-dapm.c
++++ b/sound/soc/soc-dapm.c
+@@ -3888,9 +3888,6 @@ snd_soc_dai_link_event_pre_pmu(struct snd_soc_dapm_widget *w,
+ 	runtime->rate = params_rate(params);
+ 
+ out:
+-	if (ret < 0)
+-		kfree(runtime);
+-
+ 	kfree(params);
+ 	return ret;
+ }
+diff --git a/sound/soc/sof/intel/hda-dai.c b/sound/soc/sof/intel/hda-dai.c
+index 1923b0c36bce..3f645200d3a5 100644
+--- a/sound/soc/sof/intel/hda-dai.c
++++ b/sound/soc/sof/intel/hda-dai.c
+@@ -443,6 +443,10 @@ struct snd_soc_dai_driver skl_dai[] = {
+ 	.name = "iDisp3 Pin",
+ 	.ops = &hda_link_dai_ops,
+ },
++{
++	.name = "iDisp4 Pin",
++	.ops = &hda_link_dai_ops,
++},
+ {
+ 	.name = "Analog CPU DAI",
+ 	.ops = &hda_link_dai_ops,
+diff --git a/sound/soc/sunxi/sun8i-codec.c b/sound/soc/sunxi/sun8i-codec.c
+index 55798bc8eae2..686561df8e13 100644
+--- a/sound/soc/sunxi/sun8i-codec.c
++++ b/sound/soc/sunxi/sun8i-codec.c
+@@ -80,6 +80,7 @@
+ 
+ #define SUN8I_SYS_SR_CTRL_AIF1_FS_MASK		GENMASK(15, 12)
+ #define SUN8I_SYS_SR_CTRL_AIF2_FS_MASK		GENMASK(11, 8)
++#define SUN8I_AIF1CLK_CTRL_AIF1_DATA_FMT_MASK	GENMASK(3, 2)
+ #define SUN8I_AIF1CLK_CTRL_AIF1_WORD_SIZ_MASK	GENMASK(5, 4)
+ #define SUN8I_AIF1CLK_CTRL_AIF1_LRCK_DIV_MASK	GENMASK(8, 6)
+ #define SUN8I_AIF1CLK_CTRL_AIF1_BCLK_DIV_MASK	GENMASK(12, 9)
+@@ -241,7 +242,7 @@ static int sun8i_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
+ 		return -EINVAL;
+ 	}
+ 	regmap_update_bits(scodec->regmap, SUN8I_AIF1CLK_CTRL,
+-			   BIT(SUN8I_AIF1CLK_CTRL_AIF1_DATA_FMT),
++			   SUN8I_AIF1CLK_CTRL_AIF1_DATA_FMT_MASK,
+ 			   value << SUN8I_AIF1CLK_CTRL_AIF1_DATA_FMT);
+ 
+ 	return 0;
+diff --git a/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c b/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
+index 07f5b462c2ef..aa43e0bd210c 100644
+--- a/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
++++ b/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
+@@ -3,6 +3,11 @@
+ 
+ #include "test_progs.h"
+ 
++#define TCP_REPAIR		19	/* TCP sock is under repair right now */
++
++#define TCP_REPAIR_ON		1
++#define TCP_REPAIR_OFF_NO_WP	-1	/* Turn off without window probes */
++
+ static int connected_socket_v4(void)
+ {
+ 	struct sockaddr_in addr = {


             reply	other threads:[~2020-02-28 18:31 UTC|newest]

Thread overview: 28+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-02-28 18:31 Mike Pagano [this message]
  -- strict thread matches above, loose matches on Subject: below --
2020-04-21 11:22 [gentoo-commits] proj/linux-patches:5.5 commit in: / Mike Pagano
2020-04-17 15:47 Mike Pagano
2020-04-17 14:47 Mike Pagano
2020-04-13 12:47 Mike Pagano
2020-04-08 12:44 Mike Pagano
2020-04-04 22:59 Mike Pagano
2020-04-02 15:28 Mike Pagano
2020-04-01 13:13 Mike Pagano
2020-04-01 12:04 Mike Pagano
2020-03-25 17:57 Mike Pagano
2020-03-25 15:02 Mike Pagano
2020-03-23 16:37 Mike Pagano
2020-03-21 18:59 Mike Pagano
2020-03-19 23:22 Mike Pagano
2020-03-18 15:24 Mike Pagano
2020-03-18 14:25 Mike Pagano
2020-03-12  9:56 Mike Pagano
2020-03-05 16:27 Mike Pagano
2020-02-24 11:10 Mike Pagano
2020-02-19 23:49 Mike Pagano
2020-02-14 23:56 Mike Pagano
2020-02-11 15:37 Mike Pagano
2020-02-05 14:44 Mike Pagano
2020-02-04 18:47 Mike Pagano
2020-02-01 10:33 Mike Pagano
2020-01-29 23:03 Mike Pagano
2019-12-30 23:49 Mike Pagano

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1582914678.73ca5fd154594c0936d64b6e648d3083d1826fe2.mpagano@gentoo \
    --to=mpagano@gentoo.org \
    --cc=gentoo-commits@lists.gentoo.org \
    --cc=gentoo-dev@lists.gentoo.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox