public inbox for gentoo-commits@lists.gentoo.org
 help / color / mirror / Atom feed
From: "Mike Pagano" <mpagano@gentoo.org>
To: gentoo-commits@lists.gentoo.org
Subject: [gentoo-commits] proj/linux-patches:5.9 commit in: /
Date: Wed, 16 Dec 2020 23:15:13 +0000 (UTC)	[thread overview]
Message-ID: <1608160496.6016b8f6e9be6890184aa70a2b691c31f5e19f6a.mpagano@gentoo> (raw)

commit:     6016b8f6e9be6890184aa70a2b691c31f5e19f6a
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed Dec 16 23:14:56 2020 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed Dec 16 23:14:56 2020 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=6016b8f6

Linux patch 5.9.15

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README             |    4 +
 1014_linux-5.9.15.patch | 4503 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 4507 insertions(+)

diff --git a/0000_README b/0000_README
index 8c119c3..48f07c7 100644
--- a/0000_README
+++ b/0000_README
@@ -99,6 +99,10 @@ Patch:  1013_linux-5.9.14.patch
 From:   http://www.kernel.org
 Desc:   Linux 5.9.14
 
+Patch:  1014_linux-5.9.15.patch
+From:   http://www.kernel.org
+Desc:   Linux 5.9.15
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1014_linux-5.9.15.patch b/1014_linux-5.9.15.patch
new file mode 100644
index 0000000..e103604
--- /dev/null
+++ b/1014_linux-5.9.15.patch
@@ -0,0 +1,4503 @@
+diff --git a/Makefile b/Makefile
+index 0983973bcf082..399cda4e42ae1 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 9
+-SUBLEVEL = 14
++SUBLEVEL = 15
+ EXTRAVERSION =
+ NAME = Kleptomaniac Octopus
+ 
+@@ -821,8 +821,11 @@ DEBUG_CFLAGS	+= -gsplit-dwarf
+ else
+ DEBUG_CFLAGS	+= -g
+ endif
++ifneq ($(LLVM_IAS),1)
+ KBUILD_AFLAGS	+= -Wa,-gdwarf-2
+ endif
++endif
++
+ ifdef CONFIG_DEBUG_INFO_DWARF4
+ DEBUG_CFLAGS	+= -gdwarf-4
+ endif
+diff --git a/arch/alpha/kernel/process.c b/arch/alpha/kernel/process.c
+index 7462a79110024..4c7b0414a3ff3 100644
+--- a/arch/alpha/kernel/process.c
++++ b/arch/alpha/kernel/process.c
+@@ -57,7 +57,7 @@ EXPORT_SYMBOL(pm_power_off);
+ void arch_cpu_idle(void)
+ {
+ 	wtint(0);
+-	local_irq_enable();
++	raw_local_irq_enable();
+ }
+ 
+ void arch_cpu_idle_dead(void)
+diff --git a/arch/arc/kernel/stacktrace.c b/arch/arc/kernel/stacktrace.c
+index b23986f984509..b2557f581ea8c 100644
+--- a/arch/arc/kernel/stacktrace.c
++++ b/arch/arc/kernel/stacktrace.c
+@@ -38,15 +38,15 @@
+ 
+ #ifdef CONFIG_ARC_DW2_UNWIND
+ 
+-static void seed_unwind_frame_info(struct task_struct *tsk,
+-				   struct pt_regs *regs,
+-				   struct unwind_frame_info *frame_info)
++static int
++seed_unwind_frame_info(struct task_struct *tsk, struct pt_regs *regs,
++		       struct unwind_frame_info *frame_info)
+ {
+ 	/*
+ 	 * synchronous unwinding (e.g. dump_stack)
+ 	 *  - uses current values of SP and friends
+ 	 */
+-	if (tsk == NULL && regs == NULL) {
++	if (regs == NULL && (tsk == NULL || tsk == current)) {
+ 		unsigned long fp, sp, blink, ret;
+ 		frame_info->task = current;
+ 
+@@ -65,11 +65,15 @@ static void seed_unwind_frame_info(struct task_struct *tsk,
+ 		frame_info->call_frame = 0;
+ 	} else if (regs == NULL) {
+ 		/*
+-		 * Asynchronous unwinding of sleeping task
+-		 *  - Gets SP etc from task's pt_regs (saved bottom of kernel
+-		 *    mode stack of task)
++		 * Asynchronous unwinding of a likely sleeping task
++		 *  - first ensure it is actually sleeping
++		 *  - if so, it will be in __switch_to, kernel mode SP of task
++		 *    is safe-kept and BLINK at a well known location in there
+ 		 */
+ 
++		if (tsk->state == TASK_RUNNING)
++			return -1;
++
+ 		frame_info->task = tsk;
+ 
+ 		frame_info->regs.r27 = TSK_K_FP(tsk);
+@@ -103,6 +107,8 @@ static void seed_unwind_frame_info(struct task_struct *tsk,
+ 		frame_info->regs.r63 = regs->ret;
+ 		frame_info->call_frame = 0;
+ 	}
++
++	return 0;
+ }
+ 
+ #endif
+@@ -116,7 +122,8 @@ arc_unwind_core(struct task_struct *tsk, struct pt_regs *regs,
+ 	unsigned int address;
+ 	struct unwind_frame_info frame_info;
+ 
+-	seed_unwind_frame_info(tsk, regs, &frame_info);
++	if (seed_unwind_frame_info(tsk, regs, &frame_info))
++		return 0;
+ 
+ 	while (1) {
+ 		address = UNW_PC(&frame_info);
+diff --git a/arch/arm/configs/omap2plus_defconfig b/arch/arm/configs/omap2plus_defconfig
+index fe383f5a92fba..50bc1ccc30749 100644
+--- a/arch/arm/configs/omap2plus_defconfig
++++ b/arch/arm/configs/omap2plus_defconfig
+@@ -81,7 +81,6 @@ CONFIG_PARTITION_ADVANCED=y
+ CONFIG_BINFMT_MISC=y
+ CONFIG_CMA=y
+ CONFIG_ZSMALLOC=m
+-CONFIG_ZSMALLOC_PGTABLE_MAPPING=y
+ CONFIG_NET=y
+ CONFIG_PACKET=y
+ CONFIG_UNIX=y
+diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
+index 8e6ace03e960b..9f199b1e83839 100644
+--- a/arch/arm/kernel/process.c
++++ b/arch/arm/kernel/process.c
+@@ -71,7 +71,7 @@ void arch_cpu_idle(void)
+ 		arm_pm_idle();
+ 	else
+ 		cpu_do_idle();
+-	local_irq_enable();
++	raw_local_irq_enable();
+ }
+ 
+ void arch_cpu_idle_prepare(void)
+diff --git a/arch/arm/mach-omap1/board-osk.c b/arch/arm/mach-omap1/board-osk.c
+index 144b9caa935c4..a720259099edf 100644
+--- a/arch/arm/mach-omap1/board-osk.c
++++ b/arch/arm/mach-omap1/board-osk.c
+@@ -288,7 +288,7 @@ static struct gpiod_lookup_table osk_usb_gpio_table = {
+ 	.dev_id = "ohci",
+ 	.table = {
+ 		/* Power GPIO on the I2C-attached TPS65010 */
+-		GPIO_LOOKUP("i2c-tps65010", 1, "power", GPIO_ACTIVE_HIGH),
++		GPIO_LOOKUP("tps65010", 0, "power", GPIO_ACTIVE_HIGH),
+ 		GPIO_LOOKUP(OMAP_GPIO_LABEL, 9, "overcurrent",
+ 			    GPIO_ACTIVE_HIGH),
+ 	},
+diff --git a/arch/arm64/boot/dts/broadcom/stingray/stingray-usb.dtsi b/arch/arm64/boot/dts/broadcom/stingray/stingray-usb.dtsi
+index 55259f973b5a9..aef8f2b00778d 100644
+--- a/arch/arm64/boot/dts/broadcom/stingray/stingray-usb.dtsi
++++ b/arch/arm64/boot/dts/broadcom/stingray/stingray-usb.dtsi
+@@ -5,20 +5,20 @@
+ 	usb {
+ 		compatible = "simple-bus";
+ 		dma-ranges;
+-		#address-cells = <1>;
+-		#size-cells = <1>;
+-		ranges = <0x0 0x0 0x68500000 0x00400000>;
++		#address-cells = <2>;
++		#size-cells = <2>;
++		ranges = <0x0 0x0 0x0 0x68500000 0x0 0x00400000>;
+ 
+ 		usbphy0: usb-phy@0 {
+ 			compatible = "brcm,sr-usb-combo-phy";
+-			reg = <0x00000000 0x100>;
++			reg = <0x0 0x00000000 0x0 0x100>;
+ 			#phy-cells = <1>;
+ 			status = "disabled";
+ 		};
+ 
+ 		xhci0: usb@1000 {
+ 			compatible = "generic-xhci";
+-			reg = <0x00001000 0x1000>;
++			reg = <0x0 0x00001000 0x0 0x1000>;
+ 			interrupts = <GIC_SPI 256 IRQ_TYPE_LEVEL_HIGH>;
+ 			phys = <&usbphy0 1>, <&usbphy0 0>;
+ 			phy-names = "phy0", "phy1";
+@@ -28,7 +28,7 @@
+ 
+ 		bdc0: usb@2000 {
+ 			compatible = "brcm,bdc-v0.16";
+-			reg = <0x00002000 0x1000>;
++			reg = <0x0 0x00002000 0x0 0x1000>;
+ 			interrupts = <GIC_SPI 259 IRQ_TYPE_LEVEL_HIGH>;
+ 			phys = <&usbphy0 0>, <&usbphy0 1>;
+ 			phy-names = "phy0", "phy1";
+@@ -38,21 +38,21 @@
+ 
+ 		usbphy1: usb-phy@10000 {
+ 			compatible = "brcm,sr-usb-combo-phy";
+-			reg = <0x00010000 0x100>;
++			reg = <0x0 0x00010000 0x0 0x100>;
+ 			#phy-cells = <1>;
+ 			status = "disabled";
+ 		};
+ 
+ 		usbphy2: usb-phy@20000 {
+ 			compatible = "brcm,sr-usb-hs-phy";
+-			reg = <0x00020000 0x100>;
++			reg = <0x0 0x00020000 0x0 0x100>;
+ 			#phy-cells = <0>;
+ 			status = "disabled";
+ 		};
+ 
+ 		xhci1: usb@11000 {
+ 			compatible = "generic-xhci";
+-			reg = <0x00011000 0x1000>;
++			reg = <0x0 0x00011000 0x0 0x1000>;
+ 			interrupts = <GIC_SPI 263 IRQ_TYPE_LEVEL_HIGH>;
+ 			phys = <&usbphy1 1>, <&usbphy2>, <&usbphy1 0>;
+ 			phy-names = "phy0", "phy1", "phy2";
+@@ -62,7 +62,7 @@
+ 
+ 		bdc1: usb@21000 {
+ 			compatible = "brcm,bdc-v0.16";
+-			reg = <0x00021000 0x1000>;
++			reg = <0x0 0x00021000 0x0 0x1000>;
+ 			interrupts = <GIC_SPI 266 IRQ_TYPE_LEVEL_HIGH>;
+ 			phys = <&usbphy2>;
+ 			phy-names = "phy0";
+diff --git a/arch/arm64/boot/dts/nvidia/tegra186-p2771-0000.dts b/arch/arm64/boot/dts/nvidia/tegra186-p2771-0000.dts
+index 802b8c52489ac..b5a23643db978 100644
+--- a/arch/arm64/boot/dts/nvidia/tegra186-p2771-0000.dts
++++ b/arch/arm64/boot/dts/nvidia/tegra186-p2771-0000.dts
+@@ -10,18 +10,6 @@
+ 	model = "NVIDIA Jetson TX2 Developer Kit";
+ 	compatible = "nvidia,p2771-0000", "nvidia,tegra186";
+ 
+-	aconnect {
+-		status = "okay";
+-
+-		dma-controller@2930000 {
+-			status = "okay";
+-		};
+-
+-		interrupt-controller@2a40000 {
+-			status = "okay";
+-		};
+-	};
+-
+ 	i2c@3160000 {
+ 		power-monitor@42 {
+ 			compatible = "ti,ina3221";
+diff --git a/arch/arm64/boot/dts/rockchip/rk3326-odroid-go2.dts b/arch/arm64/boot/dts/rockchip/rk3326-odroid-go2.dts
+index 35bd6b904b9c7..3376810385193 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3326-odroid-go2.dts
++++ b/arch/arm64/boot/dts/rockchip/rk3326-odroid-go2.dts
+@@ -243,7 +243,6 @@
+ 		interrupts = <RK_PB2 IRQ_TYPE_LEVEL_LOW>;
+ 		pinctrl-names = "default";
+ 		pinctrl-0 = <&pmic_int>;
+-		rockchip,system-power-controller;
+ 		wakeup-source;
+ 		#clock-cells = <1>;
+ 		clock-output-names = "rk808-clkout1", "xin32k";
+diff --git a/arch/arm64/boot/dts/rockchip/rk3399-roc-pc.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-roc-pc.dtsi
+index b85ec31cd2835..78ef0037ad4b5 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3399-roc-pc.dtsi
++++ b/arch/arm64/boot/dts/rockchip/rk3399-roc-pc.dtsi
+@@ -74,14 +74,14 @@
+ 			label = "red:diy";
+ 			gpios = <&gpio0 RK_PB5 GPIO_ACTIVE_HIGH>;
+ 			default-state = "off";
+-			linux,default-trigger = "mmc1";
++			linux,default-trigger = "mmc2";
+ 		};
+ 
+ 		yellow_led: led-2 {
+ 			label = "yellow:yellow-led";
+ 			gpios = <&gpio0 RK_PA2 GPIO_ACTIVE_HIGH>;
+ 			default-state = "off";
+-			linux,default-trigger = "mmc0";
++			linux,default-trigger = "mmc1";
+ 		};
+ 	};
+ 
+diff --git a/arch/arm64/boot/dts/rockchip/rk3399.dtsi b/arch/arm64/boot/dts/rockchip/rk3399.dtsi
+index ada724b12f014..7a9a7aca86c6a 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3399.dtsi
++++ b/arch/arm64/boot/dts/rockchip/rk3399.dtsi
+@@ -29,6 +29,9 @@
+ 		i2c6 = &i2c6;
+ 		i2c7 = &i2c7;
+ 		i2c8 = &i2c8;
++		mmc0 = &sdio0;
++		mmc1 = &sdmmc;
++		mmc2 = &sdhci;
+ 		serial0 = &uart0;
+ 		serial1 = &uart1;
+ 		serial2 = &uart2;
+diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
+index 2da5f3f9d345f..f7c42a7d09b66 100644
+--- a/arch/arm64/kernel/process.c
++++ b/arch/arm64/kernel/process.c
+@@ -124,7 +124,7 @@ void arch_cpu_idle(void)
+ 	 * tricks
+ 	 */
+ 	cpu_do_idle();
+-	local_irq_enable();
++	raw_local_irq_enable();
+ }
+ 
+ #ifdef CONFIG_HOTPLUG_CPU
+diff --git a/arch/csky/kernel/process.c b/arch/csky/kernel/process.c
+index f730869e21eed..69af6bc87e647 100644
+--- a/arch/csky/kernel/process.c
++++ b/arch/csky/kernel/process.c
+@@ -102,6 +102,6 @@ void arch_cpu_idle(void)
+ #ifdef CONFIG_CPU_PM_STOP
+ 	asm volatile("stop\n");
+ #endif
+-	local_irq_enable();
++	raw_local_irq_enable();
+ }
+ #endif
+diff --git a/arch/h8300/kernel/process.c b/arch/h8300/kernel/process.c
+index 83ce3caf73139..a2961c7b2332c 100644
+--- a/arch/h8300/kernel/process.c
++++ b/arch/h8300/kernel/process.c
+@@ -57,7 +57,7 @@ asmlinkage void ret_from_kernel_thread(void);
+  */
+ void arch_cpu_idle(void)
+ {
+-	local_irq_enable();
++	raw_local_irq_enable();
+ 	__asm__("sleep");
+ }
+ 
+diff --git a/arch/hexagon/kernel/process.c b/arch/hexagon/kernel/process.c
+index dfd322c5ce83a..20962601a1b47 100644
+--- a/arch/hexagon/kernel/process.c
++++ b/arch/hexagon/kernel/process.c
+@@ -44,7 +44,7 @@ void arch_cpu_idle(void)
+ {
+ 	__vmwait();
+ 	/*  interrupts wake us up, but irqs are still disabled */
+-	local_irq_enable();
++	raw_local_irq_enable();
+ }
+ 
+ /*
+diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c
+index f19cb97c00987..1b2769260688d 100644
+--- a/arch/ia64/kernel/process.c
++++ b/arch/ia64/kernel/process.c
+@@ -252,7 +252,7 @@ void arch_cpu_idle(void)
+ 	if (mark_idle)
+ 		(*mark_idle)(1);
+ 
+-	safe_halt();
++	raw_safe_halt();
+ 
+ 	if (mark_idle)
+ 		(*mark_idle)(0);
+diff --git a/arch/microblaze/kernel/process.c b/arch/microblaze/kernel/process.c
+index a9e46e525cd0a..f99860771ff48 100644
+--- a/arch/microblaze/kernel/process.c
++++ b/arch/microblaze/kernel/process.c
+@@ -149,5 +149,5 @@ int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpregs)
+ 
+ void arch_cpu_idle(void)
+ {
+-       local_irq_enable();
++       raw_local_irq_enable();
+ }
+diff --git a/arch/mips/kernel/idle.c b/arch/mips/kernel/idle.c
+index 5bc3b04693c7d..18e69ebf5691d 100644
+--- a/arch/mips/kernel/idle.c
++++ b/arch/mips/kernel/idle.c
+@@ -33,19 +33,19 @@ static void __cpuidle r3081_wait(void)
+ {
+ 	unsigned long cfg = read_c0_conf();
+ 	write_c0_conf(cfg | R30XX_CONF_HALT);
+-	local_irq_enable();
++	raw_local_irq_enable();
+ }
+ 
+ static void __cpuidle r39xx_wait(void)
+ {
+ 	if (!need_resched())
+ 		write_c0_conf(read_c0_conf() | TX39_CONF_HALT);
+-	local_irq_enable();
++	raw_local_irq_enable();
+ }
+ 
+ void __cpuidle r4k_wait(void)
+ {
+-	local_irq_enable();
++	raw_local_irq_enable();
+ 	__r4k_wait();
+ }
+ 
+@@ -64,7 +64,7 @@ void __cpuidle r4k_wait_irqoff(void)
+ 		"	.set	arch=r4000	\n"
+ 		"	wait			\n"
+ 		"	.set	pop		\n");
+-	local_irq_enable();
++	raw_local_irq_enable();
+ }
+ 
+ /*
+@@ -84,7 +84,7 @@ static void __cpuidle rm7k_wait_irqoff(void)
+ 		"	wait						\n"
+ 		"	mtc0	$1, $12		# stalls until W stage	\n"
+ 		"	.set	pop					\n");
+-	local_irq_enable();
++	raw_local_irq_enable();
+ }
+ 
+ /*
+@@ -257,7 +257,7 @@ void arch_cpu_idle(void)
+ 	if (cpu_wait)
+ 		cpu_wait();
+ 	else
+-		local_irq_enable();
++		raw_local_irq_enable();
+ }
+ 
+ #ifdef CONFIG_CPU_IDLE
+diff --git a/arch/nios2/kernel/process.c b/arch/nios2/kernel/process.c
+index 88a4ec03edab4..f5cc55a88d310 100644
+--- a/arch/nios2/kernel/process.c
++++ b/arch/nios2/kernel/process.c
+@@ -33,7 +33,7 @@ EXPORT_SYMBOL(pm_power_off);
+ 
+ void arch_cpu_idle(void)
+ {
+-	local_irq_enable();
++	raw_local_irq_enable();
+ }
+ 
+ /*
+diff --git a/arch/openrisc/kernel/process.c b/arch/openrisc/kernel/process.c
+index 0ff391f00334c..3c98728cce249 100644
+--- a/arch/openrisc/kernel/process.c
++++ b/arch/openrisc/kernel/process.c
+@@ -79,7 +79,7 @@ void machine_power_off(void)
+  */
+ void arch_cpu_idle(void)
+ {
+-	local_irq_enable();
++	raw_local_irq_enable();
+ 	if (mfspr(SPR_UPR) & SPR_UPR_PMP)
+ 		mtspr(SPR_PMR, mfspr(SPR_PMR) | SPR_PMR_DME);
+ }
+diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c
+index f196d96e2f9f5..a92a23d6acd93 100644
+--- a/arch/parisc/kernel/process.c
++++ b/arch/parisc/kernel/process.c
+@@ -169,7 +169,7 @@ void __cpuidle arch_cpu_idle_dead(void)
+ 
+ void __cpuidle arch_cpu_idle(void)
+ {
+-	local_irq_enable();
++	raw_local_irq_enable();
+ 
+ 	/* nop on real hardware, qemu will idle sleep. */
+ 	asm volatile("or %%r10,%%r10,%%r10\n":::);
+diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
+index 3e8da9cf2eb9d..e6643d5699fef 100644
+--- a/arch/powerpc/Makefile
++++ b/arch/powerpc/Makefile
+@@ -249,7 +249,6 @@ KBUILD_CFLAGS		+= $(call cc-option,-mno-string)
+ cpu-as-$(CONFIG_40x)		+= -Wa,-m405
+ cpu-as-$(CONFIG_44x)		+= -Wa,-m440
+ cpu-as-$(CONFIG_ALTIVEC)	+= $(call as-option,-Wa$(comma)-maltivec)
+-cpu-as-$(CONFIG_E200)		+= -Wa,-me200
+ cpu-as-$(CONFIG_E500)		+= -Wa,-me500
+ 
+ # When using '-many -mpower4' gas will first try and find a matching power4
+diff --git a/arch/powerpc/kernel/idle.c b/arch/powerpc/kernel/idle.c
+index 422e31d2f5a2b..8df35f1329a42 100644
+--- a/arch/powerpc/kernel/idle.c
++++ b/arch/powerpc/kernel/idle.c
+@@ -60,9 +60,9 @@ void arch_cpu_idle(void)
+ 		 * interrupts enabled, some don't.
+ 		 */
+ 		if (irqs_disabled())
+-			local_irq_enable();
++			raw_local_irq_enable();
+ 	} else {
+-		local_irq_enable();
++		raw_local_irq_enable();
+ 		/*
+ 		 * Go into low thread priority and possibly
+ 		 * low power mode.
+diff --git a/arch/powerpc/mm/book3s64/hash_native.c b/arch/powerpc/mm/book3s64/hash_native.c
+index cf20e5229ce1f..562094863e915 100644
+--- a/arch/powerpc/mm/book3s64/hash_native.c
++++ b/arch/powerpc/mm/book3s64/hash_native.c
+@@ -68,7 +68,7 @@ static __always_inline void tlbiel_hash_set_isa300(unsigned int set, unsigned in
+ 	rs = ((unsigned long)pid << PPC_BITLSHIFT(31));
+ 
+ 	asm volatile(PPC_TLBIEL(%0, %1, %2, %3, %4)
+-		     : : "r"(rb), "r"(rs), "i"(ric), "i"(prs), "r"(r)
++		     : : "r"(rb), "r"(rs), "i"(ric), "i"(prs), "i"(r)
+ 		     : "memory");
+ }
+ 
+diff --git a/arch/riscv/kernel/process.c b/arch/riscv/kernel/process.c
+index 2b97c493427c9..308e1d95ecbf0 100644
+--- a/arch/riscv/kernel/process.c
++++ b/arch/riscv/kernel/process.c
+@@ -36,7 +36,7 @@ extern asmlinkage void ret_from_kernel_thread(void);
+ void arch_cpu_idle(void)
+ {
+ 	wait_for_interrupt();
+-	local_irq_enable();
++	raw_local_irq_enable();
+ }
+ 
+ void show_regs(struct pt_regs *regs)
+diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
+index ca55db0823534..dd5cb6204335d 100644
+--- a/arch/s390/kernel/entry.S
++++ b/arch/s390/kernel/entry.S
+@@ -765,12 +765,7 @@ ENTRY(io_int_handler)
+ 	xc	__PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
+ 	TSTMSK	__LC_CPU_FLAGS,_CIF_IGNORE_IRQ
+ 	jo	.Lio_restore
+-#if IS_ENABLED(CONFIG_TRACE_IRQFLAGS)
+-	tmhh	%r8,0x300
+-	jz	1f
+ 	TRACE_IRQS_OFF
+-1:
+-#endif
+ 	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
+ .Lio_loop:
+ 	lgr	%r2,%r11		# pass pointer to pt_regs
+@@ -793,12 +788,7 @@ ENTRY(io_int_handler)
+ 	TSTMSK	__LC_CPU_FLAGS,_CIF_WORK
+ 	jnz	.Lio_work
+ .Lio_restore:
+-#if IS_ENABLED(CONFIG_TRACE_IRQFLAGS)
+-	tm	__PT_PSW(%r11),3
+-	jno	0f
+ 	TRACE_IRQS_ON
+-0:
+-#endif
+ 	lg	%r14,__LC_VDSO_PER_CPU
+ 	mvc	__LC_RETURN_PSW(16),__PT_PSW(%r11)
+ 	tm	__PT_PSW+1(%r11),0x01	# returning to user ?
+@@ -980,12 +970,7 @@ ENTRY(ext_int_handler)
+ 	xc	__PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
+ 	TSTMSK	__LC_CPU_FLAGS,_CIF_IGNORE_IRQ
+ 	jo	.Lio_restore
+-#if IS_ENABLED(CONFIG_TRACE_IRQFLAGS)
+-	tmhh	%r8,0x300
+-	jz	1f
+ 	TRACE_IRQS_OFF
+-1:
+-#endif
+ 	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
+ 	lgr	%r2,%r11		# pass pointer to pt_regs
+ 	lghi	%r3,EXT_INTERRUPT
+diff --git a/arch/s390/kernel/idle.c b/arch/s390/kernel/idle.c
+index f7f1e64e0d980..2b85096964f84 100644
+--- a/arch/s390/kernel/idle.c
++++ b/arch/s390/kernel/idle.c
+@@ -33,10 +33,10 @@ void enabled_wait(void)
+ 		PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK;
+ 	clear_cpu_flag(CIF_NOHZ_DELAY);
+ 
+-	local_irq_save(flags);
++	raw_local_irq_save(flags);
+ 	/* Call the assembler magic in entry.S */
+ 	psw_idle(idle, psw_mask);
+-	local_irq_restore(flags);
++	raw_local_irq_restore(flags);
+ 
+ 	/* Account time spent with enabled wait psw loaded as idle time. */
+ 	raw_write_seqcount_begin(&idle->seqcount);
+@@ -123,7 +123,7 @@ void arch_cpu_idle_enter(void)
+ void arch_cpu_idle(void)
+ {
+ 	enabled_wait();
+-	local_irq_enable();
++	raw_local_irq_enable();
+ }
+ 
+ void arch_cpu_idle_exit(void)
+diff --git a/arch/s390/lib/delay.c b/arch/s390/lib/delay.c
+index daca7bad66de3..8c0c68e7770ea 100644
+--- a/arch/s390/lib/delay.c
++++ b/arch/s390/lib/delay.c
+@@ -33,7 +33,7 @@ EXPORT_SYMBOL(__delay);
+ 
+ static void __udelay_disabled(unsigned long long usecs)
+ {
+-	unsigned long cr0, cr0_new, psw_mask, flags;
++	unsigned long cr0, cr0_new, psw_mask;
+ 	struct s390_idle_data idle;
+ 	u64 end;
+ 
+@@ -45,9 +45,8 @@ static void __udelay_disabled(unsigned long long usecs)
+ 	psw_mask = __extract_psw() | PSW_MASK_EXT | PSW_MASK_WAIT;
+ 	set_clock_comparator(end);
+ 	set_cpu_flag(CIF_IGNORE_IRQ);
+-	local_irq_save(flags);
+ 	psw_idle(&idle, psw_mask);
+-	local_irq_restore(flags);
++	trace_hardirqs_off();
+ 	clear_cpu_flag(CIF_IGNORE_IRQ);
+ 	set_clock_comparator(S390_lowcore.clock_comparator);
+ 	__ctl_load(cr0, 0, 0);
+diff --git a/arch/sh/kernel/idle.c b/arch/sh/kernel/idle.c
+index 0dc0f52f9bb8d..f59814983bd59 100644
+--- a/arch/sh/kernel/idle.c
++++ b/arch/sh/kernel/idle.c
+@@ -22,7 +22,7 @@ static void (*sh_idle)(void);
+ void default_idle(void)
+ {
+ 	set_bl_bit();
+-	local_irq_enable();
++	raw_local_irq_enable();
+ 	/* Isn't this racy ? */
+ 	cpu_sleep();
+ 	clear_bl_bit();
+diff --git a/arch/sparc/kernel/leon_pmc.c b/arch/sparc/kernel/leon_pmc.c
+index 065e2d4b72908..396f46bca52eb 100644
+--- a/arch/sparc/kernel/leon_pmc.c
++++ b/arch/sparc/kernel/leon_pmc.c
+@@ -50,7 +50,7 @@ static void pmc_leon_idle_fixup(void)
+ 	register unsigned int address = (unsigned int)leon3_irqctrl_regs;
+ 
+ 	/* Interrupts need to be enabled to not hang the CPU */
+-	local_irq_enable();
++	raw_local_irq_enable();
+ 
+ 	__asm__ __volatile__ (
+ 		"wr	%%g0, %%asr19\n"
+@@ -66,7 +66,7 @@ static void pmc_leon_idle_fixup(void)
+ static void pmc_leon_idle(void)
+ {
+ 	/* Interrupts need to be enabled to not hang the CPU */
+-	local_irq_enable();
++	raw_local_irq_enable();
+ 
+ 	/* For systems without power-down, this will be no-op */
+ 	__asm__ __volatile__ ("wr	%g0, %asr19\n\t");
+diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
+index adfcaeab3ddc5..a023637359154 100644
+--- a/arch/sparc/kernel/process_32.c
++++ b/arch/sparc/kernel/process_32.c
+@@ -74,7 +74,7 @@ void arch_cpu_idle(void)
+ {
+ 	if (sparc_idle)
+ 		(*sparc_idle)();
+-	local_irq_enable();
++	raw_local_irq_enable();
+ }
+ 
+ /* XXX cli/sti -> local_irq_xxx here, check this works once SMP is fixed. */
+diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
+index a75093b993f9a..6f8c7822fc065 100644
+--- a/arch/sparc/kernel/process_64.c
++++ b/arch/sparc/kernel/process_64.c
+@@ -62,11 +62,11 @@ void arch_cpu_idle(void)
+ {
+ 	if (tlb_type != hypervisor) {
+ 		touch_nmi_watchdog();
+-		local_irq_enable();
++		raw_local_irq_enable();
+ 	} else {
+ 		unsigned long pstate;
+ 
+-		local_irq_enable();
++		raw_local_irq_enable();
+ 
+                 /* The sun4v sleeping code requires that we have PSTATE.IE cleared over
+                  * the cpu sleep hypervisor call.
+diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
+index 26b5e243d3fc0..495f101792b3d 100644
+--- a/arch/um/kernel/process.c
++++ b/arch/um/kernel/process.c
+@@ -217,7 +217,7 @@ void arch_cpu_idle(void)
+ {
+ 	cpu_tasks[current_thread_info()->cpu].pid = os_getpid();
+ 	um_idle_sleep();
+-	local_irq_enable();
++	raw_local_irq_enable();
+ }
+ 
+ int __cant_sleep(void) {
+diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
+index 404315df1e167..4c84b87904930 100644
+--- a/arch/x86/events/intel/ds.c
++++ b/arch/x86/events/intel/ds.c
+@@ -1937,7 +1937,7 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
+ 		if (error[bit]) {
+ 			perf_log_lost_samples(event, error[bit]);
+ 
+-			if (perf_event_account_interrupt(event))
++			if (iregs && perf_event_account_interrupt(event))
+ 				x86_pmu_stop(event, 0);
+ 		}
+ 
+diff --git a/arch/x86/include/asm/mwait.h b/arch/x86/include/asm/mwait.h
+index e039a933aca3c..29dd27b5a339d 100644
+--- a/arch/x86/include/asm/mwait.h
++++ b/arch/x86/include/asm/mwait.h
+@@ -88,8 +88,6 @@ static inline void __mwaitx(unsigned long eax, unsigned long ebx,
+ 
+ static inline void __sti_mwait(unsigned long eax, unsigned long ecx)
+ {
+-	trace_hardirqs_on();
+-
+ 	mds_idle_clear_cpu_buffers();
+ 	/* "mwait %eax, %ecx;" */
+ 	asm volatile("sti; .byte 0x0f, 0x01, 0xc9;"
+diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
+index 816b31c685505..394757ee030a6 100644
+--- a/arch/x86/include/asm/pgtable_types.h
++++ b/arch/x86/include/asm/pgtable_types.h
+@@ -155,6 +155,7 @@ enum page_cache_mode {
+ #define _PAGE_ENC		(_AT(pteval_t, sme_me_mask))
+ 
+ #define _PAGE_CACHE_MASK	(_PAGE_PWT | _PAGE_PCD | _PAGE_PAT)
++#define _PAGE_LARGE_CACHE_MASK	(_PAGE_PWT | _PAGE_PCD | _PAGE_PAT_LARGE)
+ 
+ #define _PAGE_NOCACHE		(cachemode2protval(_PAGE_CACHE_MODE_UC))
+ #define _PAGE_CACHE_WP		(cachemode2protval(_PAGE_CACHE_MODE_WP))
+diff --git a/arch/x86/include/asm/sync_core.h b/arch/x86/include/asm/sync_core.h
+index fdb5b356e59b0..76229a424c382 100644
+--- a/arch/x86/include/asm/sync_core.h
++++ b/arch/x86/include/asm/sync_core.h
+@@ -88,12 +88,13 @@ static inline void sync_core_before_usermode(void)
+ 	/* With PTI, we unconditionally serialize before running user code. */
+ 	if (static_cpu_has(X86_FEATURE_PTI))
+ 		return;
++
+ 	/*
+-	 * Return from interrupt and NMI is done through iret, which is core
+-	 * serializing.
++	 * Even if we're in an interrupt, we might reschedule before returning,
++	 * in which case we could switch to a different thread in the same mm
++	 * and return using SYSRET or SYSEXIT.  Instead of trying to keep
++	 * track of our need to sync the core, just sync right away.
+ 	 */
+-	if (in_irq() || in_nmi())
+-		return;
+ 	sync_core();
+ }
+ 
+diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
+index f8a56b5dc29fe..416b6a73e14ee 100644
+--- a/arch/x86/kernel/apic/vector.c
++++ b/arch/x86/kernel/apic/vector.c
+@@ -273,20 +273,24 @@ static int assign_irq_vector_any_locked(struct irq_data *irqd)
+ 	const struct cpumask *affmsk = irq_data_get_affinity_mask(irqd);
+ 	int node = irq_data_get_node(irqd);
+ 
+-	if (node == NUMA_NO_NODE)
+-		goto all;
+-	/* Try the intersection of @affmsk and node mask */
+-	cpumask_and(vector_searchmask, cpumask_of_node(node), affmsk);
+-	if (!assign_vector_locked(irqd, vector_searchmask))
+-		return 0;
+-	/* Try the node mask */
+-	if (!assign_vector_locked(irqd, cpumask_of_node(node)))
+-		return 0;
+-all:
++	if (node != NUMA_NO_NODE) {
++		/* Try the intersection of @affmsk and node mask */
++		cpumask_and(vector_searchmask, cpumask_of_node(node), affmsk);
++		if (!assign_vector_locked(irqd, vector_searchmask))
++			return 0;
++	}
++
+ 	/* Try the full affinity mask */
+ 	cpumask_and(vector_searchmask, affmsk, cpu_online_mask);
+ 	if (!assign_vector_locked(irqd, vector_searchmask))
+ 		return 0;
++
++	if (node != NUMA_NO_NODE) {
++		/* Try the node mask */
++		if (!assign_vector_locked(irqd, cpumask_of_node(node)))
++			return 0;
++	}
++
+ 	/* Try the full online mask */
+ 	return assign_vector_locked(irqd, cpu_online_mask);
+ }
+diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c
+index 40f380461e6d7..bfb59a3f0085d 100644
+--- a/arch/x86/kernel/kprobes/opt.c
++++ b/arch/x86/kernel/kprobes/opt.c
+@@ -271,6 +271,19 @@ static int insn_is_indirect_jump(struct insn *insn)
+ 	return ret;
+ }
+ 
++static bool is_padding_int3(unsigned long addr, unsigned long eaddr)
++{
++	unsigned char ops;
++
++	for (; addr < eaddr; addr++) {
++		if (get_kernel_nofault(ops, (void *)addr) < 0 ||
++		    ops != INT3_INSN_OPCODE)
++			return false;
++	}
++
++	return true;
++}
++
+ /* Decode whole function to ensure any instructions don't jump into target */
+ static int can_optimize(unsigned long paddr)
+ {
+@@ -309,9 +322,14 @@ static int can_optimize(unsigned long paddr)
+ 			return 0;
+ 		kernel_insn_init(&insn, (void *)recovered_insn, MAX_INSN_SIZE);
+ 		insn_get_length(&insn);
+-		/* Another subsystem puts a breakpoint */
++		/*
++		 * In the case of detecting unknown breakpoint, this could be
++		 * a padding INT3 between functions. Let's check that all the
++		 * rest of the bytes are also INT3.
++		 */
+ 		if (insn.opcode.bytes[0] == INT3_INSN_OPCODE)
+-			return 0;
++			return is_padding_int3(addr, paddr - offset + size) ? 1 : 0;
++
+ 		/* Recover address */
+ 		insn.kaddr = (void *)addr;
+ 		insn.next_byte = (void *)(addr + insn.length);
+diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
+index ba4593a913fab..145a7ac0c19aa 100644
+--- a/arch/x86/kernel/process.c
++++ b/arch/x86/kernel/process.c
+@@ -685,7 +685,7 @@ void arch_cpu_idle(void)
+  */
+ void __cpuidle default_idle(void)
+ {
+-	safe_halt();
++	raw_safe_halt();
+ }
+ #if defined(CONFIG_APM_MODULE) || defined(CONFIG_HALTPOLL_CPUIDLE_MODULE)
+ EXPORT_SYMBOL(default_idle);
+@@ -736,6 +736,8 @@ void stop_this_cpu(void *dummy)
+ /*
+  * AMD Erratum 400 aware idle routine. We handle it the same way as C3 power
+  * states (local apic timer and TSC stop).
++ *
++ * XXX this function is completely buggered vs RCU and tracing.
+  */
+ static void amd_e400_idle(void)
+ {
+@@ -757,9 +759,9 @@ static void amd_e400_idle(void)
+ 	 * The switch back from broadcast mode needs to be called with
+ 	 * interrupts disabled.
+ 	 */
+-	local_irq_disable();
++	raw_local_irq_disable();
+ 	tick_broadcast_exit();
+-	local_irq_enable();
++	raw_local_irq_enable();
+ }
+ 
+ /*
+@@ -801,9 +803,9 @@ static __cpuidle void mwait_idle(void)
+ 		if (!need_resched())
+ 			__sti_mwait(0, 0);
+ 		else
+-			local_irq_enable();
++			raw_local_irq_enable();
+ 	} else {
+-		local_irq_enable();
++		raw_local_irq_enable();
+ 	}
+ 	__current_clr_polling();
+ }
+diff --git a/arch/x86/mm/mem_encrypt_identity.c b/arch/x86/mm/mem_encrypt_identity.c
+index e2b0e2ac07bb6..84cda5dc03870 100644
+--- a/arch/x86/mm/mem_encrypt_identity.c
++++ b/arch/x86/mm/mem_encrypt_identity.c
+@@ -45,8 +45,8 @@
+ #define PMD_FLAGS_LARGE		(__PAGE_KERNEL_LARGE_EXEC & ~_PAGE_GLOBAL)
+ 
+ #define PMD_FLAGS_DEC		PMD_FLAGS_LARGE
+-#define PMD_FLAGS_DEC_WP	((PMD_FLAGS_DEC & ~_PAGE_CACHE_MASK) | \
+-				 (_PAGE_PAT | _PAGE_PWT))
++#define PMD_FLAGS_DEC_WP	((PMD_FLAGS_DEC & ~_PAGE_LARGE_CACHE_MASK) | \
++				 (_PAGE_PAT_LARGE | _PAGE_PWT))
+ 
+ #define PMD_FLAGS_ENC		(PMD_FLAGS_LARGE | _PAGE_ENC)
+ 
+diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
+index 0951b47e64c10..a045aacd6cb9d 100644
+--- a/arch/x86/mm/tlb.c
++++ b/arch/x86/mm/tlb.c
+@@ -475,8 +475,14 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
+ 	/*
+ 	 * The membarrier system call requires a full memory barrier and
+ 	 * core serialization before returning to user-space, after
+-	 * storing to rq->curr. Writing to CR3 provides that full
+-	 * memory barrier and core serializing instruction.
++	 * storing to rq->curr, when changing mm.  This is because
++	 * membarrier() sends IPIs to all CPUs that are in the target mm
++	 * to make them issue memory barriers.  However, if another CPU
++	 * switches to/from the target mm concurrently with
++	 * membarrier(), it can cause that CPU not to receive an IPI
++	 * when it really should issue a memory barrier.  Writing to CR3
++	 * provides that full memory barrier and core serializing
++	 * instruction.
+ 	 */
+ 	if (real_prev == next) {
+ 		VM_WARN_ON(this_cpu_read(cpu_tlbstate.ctxs[prev_asid].ctx_id) !=
+diff --git a/drivers/Makefile b/drivers/Makefile
+index c0cd1b9075e3d..5762280377186 100644
+--- a/drivers/Makefile
++++ b/drivers/Makefile
+@@ -145,6 +145,7 @@ obj-$(CONFIG_OF)		+= of/
+ obj-$(CONFIG_SSB)		+= ssb/
+ obj-$(CONFIG_BCMA)		+= bcma/
+ obj-$(CONFIG_VHOST_RING)	+= vhost/
++obj-$(CONFIG_VHOST_IOTLB)	+= vhost/
+ obj-$(CONFIG_VHOST)		+= vhost/
+ obj-$(CONFIG_VLYNQ)		+= vlynq/
+ obj-$(CONFIG_GREYBUS)		+= greybus/
+diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
+index 501e9dacfff9d..9ebf53903d7bf 100644
+--- a/drivers/block/xen-blkback/blkback.c
++++ b/drivers/block/xen-blkback/blkback.c
+@@ -132,73 +132,12 @@ module_param(log_stats, int, 0644);
+ 
+ #define BLKBACK_INVALID_HANDLE (~0)
+ 
+-/* Number of free pages to remove on each call to gnttab_free_pages */
+-#define NUM_BATCH_FREE_PAGES 10
+-
+ static inline bool persistent_gnt_timeout(struct persistent_gnt *persistent_gnt)
+ {
+ 	return pgrant_timeout && (jiffies - persistent_gnt->last_used >=
+ 			HZ * pgrant_timeout);
+ }
+ 
+-static inline int get_free_page(struct xen_blkif_ring *ring, struct page **page)
+-{
+-	unsigned long flags;
+-
+-	spin_lock_irqsave(&ring->free_pages_lock, flags);
+-	if (list_empty(&ring->free_pages)) {
+-		BUG_ON(ring->free_pages_num != 0);
+-		spin_unlock_irqrestore(&ring->free_pages_lock, flags);
+-		return gnttab_alloc_pages(1, page);
+-	}
+-	BUG_ON(ring->free_pages_num == 0);
+-	page[0] = list_first_entry(&ring->free_pages, struct page, lru);
+-	list_del(&page[0]->lru);
+-	ring->free_pages_num--;
+-	spin_unlock_irqrestore(&ring->free_pages_lock, flags);
+-
+-	return 0;
+-}
+-
+-static inline void put_free_pages(struct xen_blkif_ring *ring, struct page **page,
+-                                  int num)
+-{
+-	unsigned long flags;
+-	int i;
+-
+-	spin_lock_irqsave(&ring->free_pages_lock, flags);
+-	for (i = 0; i < num; i++)
+-		list_add(&page[i]->lru, &ring->free_pages);
+-	ring->free_pages_num += num;
+-	spin_unlock_irqrestore(&ring->free_pages_lock, flags);
+-}
+-
+-static inline void shrink_free_pagepool(struct xen_blkif_ring *ring, int num)
+-{
+-	/* Remove requested pages in batches of NUM_BATCH_FREE_PAGES */
+-	struct page *page[NUM_BATCH_FREE_PAGES];
+-	unsigned int num_pages = 0;
+-	unsigned long flags;
+-
+-	spin_lock_irqsave(&ring->free_pages_lock, flags);
+-	while (ring->free_pages_num > num) {
+-		BUG_ON(list_empty(&ring->free_pages));
+-		page[num_pages] = list_first_entry(&ring->free_pages,
+-		                                   struct page, lru);
+-		list_del(&page[num_pages]->lru);
+-		ring->free_pages_num--;
+-		if (++num_pages == NUM_BATCH_FREE_PAGES) {
+-			spin_unlock_irqrestore(&ring->free_pages_lock, flags);
+-			gnttab_free_pages(num_pages, page);
+-			spin_lock_irqsave(&ring->free_pages_lock, flags);
+-			num_pages = 0;
+-		}
+-	}
+-	spin_unlock_irqrestore(&ring->free_pages_lock, flags);
+-	if (num_pages != 0)
+-		gnttab_free_pages(num_pages, page);
+-}
+-
+ #define vaddr(page) ((unsigned long)pfn_to_kaddr(page_to_pfn(page)))
+ 
+ static int do_block_io_op(struct xen_blkif_ring *ring, unsigned int *eoi_flags);
+@@ -331,7 +270,8 @@ static void free_persistent_gnts(struct xen_blkif_ring *ring, struct rb_root *ro
+ 			unmap_data.count = segs_to_unmap;
+ 			BUG_ON(gnttab_unmap_refs_sync(&unmap_data));
+ 
+-			put_free_pages(ring, pages, segs_to_unmap);
++			gnttab_page_cache_put(&ring->free_pages, pages,
++					      segs_to_unmap);
+ 			segs_to_unmap = 0;
+ 		}
+ 
+@@ -371,7 +311,8 @@ void xen_blkbk_unmap_purged_grants(struct work_struct *work)
+ 		if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST) {
+ 			unmap_data.count = segs_to_unmap;
+ 			BUG_ON(gnttab_unmap_refs_sync(&unmap_data));
+-			put_free_pages(ring, pages, segs_to_unmap);
++			gnttab_page_cache_put(&ring->free_pages, pages,
++					      segs_to_unmap);
+ 			segs_to_unmap = 0;
+ 		}
+ 		kfree(persistent_gnt);
+@@ -379,7 +320,7 @@ void xen_blkbk_unmap_purged_grants(struct work_struct *work)
+ 	if (segs_to_unmap > 0) {
+ 		unmap_data.count = segs_to_unmap;
+ 		BUG_ON(gnttab_unmap_refs_sync(&unmap_data));
+-		put_free_pages(ring, pages, segs_to_unmap);
++		gnttab_page_cache_put(&ring->free_pages, pages, segs_to_unmap);
+ 	}
+ }
+ 
+@@ -664,9 +605,10 @@ purge_gnt_list:
+ 
+ 		/* Shrink the free pages pool if it is too large. */
+ 		if (time_before(jiffies, blkif->buffer_squeeze_end))
+-			shrink_free_pagepool(ring, 0);
++			gnttab_page_cache_shrink(&ring->free_pages, 0);
+ 		else
+-			shrink_free_pagepool(ring, max_buffer_pages);
++			gnttab_page_cache_shrink(&ring->free_pages,
++						 max_buffer_pages);
+ 
+ 		if (log_stats && time_after(jiffies, ring->st_print))
+ 			print_stats(ring);
+@@ -697,7 +639,7 @@ void xen_blkbk_free_caches(struct xen_blkif_ring *ring)
+ 	ring->persistent_gnt_c = 0;
+ 
+ 	/* Since we are shutting down remove all pages from the buffer */
+-	shrink_free_pagepool(ring, 0 /* All */);
++	gnttab_page_cache_shrink(&ring->free_pages, 0 /* All */);
+ }
+ 
+ static unsigned int xen_blkbk_unmap_prepare(
+@@ -736,7 +678,7 @@ static void xen_blkbk_unmap_and_respond_callback(int result, struct gntab_unmap_
+ 	   but is this the best way to deal with this? */
+ 	BUG_ON(result);
+ 
+-	put_free_pages(ring, data->pages, data->count);
++	gnttab_page_cache_put(&ring->free_pages, data->pages, data->count);
+ 	make_response(ring, pending_req->id,
+ 		      pending_req->operation, pending_req->status);
+ 	free_req(ring, pending_req);
+@@ -803,7 +745,8 @@ static void xen_blkbk_unmap(struct xen_blkif_ring *ring,
+ 		if (invcount) {
+ 			ret = gnttab_unmap_refs(unmap, NULL, unmap_pages, invcount);
+ 			BUG_ON(ret);
+-			put_free_pages(ring, unmap_pages, invcount);
++			gnttab_page_cache_put(&ring->free_pages, unmap_pages,
++					      invcount);
+ 		}
+ 		pages += batch;
+ 		num -= batch;
+@@ -850,7 +793,8 @@ again:
+ 			pages[i]->page = persistent_gnt->page;
+ 			pages[i]->persistent_gnt = persistent_gnt;
+ 		} else {
+-			if (get_free_page(ring, &pages[i]->page))
++			if (gnttab_page_cache_get(&ring->free_pages,
++						  &pages[i]->page))
+ 				goto out_of_memory;
+ 			addr = vaddr(pages[i]->page);
+ 			pages_to_gnt[segs_to_map] = pages[i]->page;
+@@ -883,7 +827,8 @@ again:
+ 			BUG_ON(new_map_idx >= segs_to_map);
+ 			if (unlikely(map[new_map_idx].status != 0)) {
+ 				pr_debug("invalid buffer -- could not remap it\n");
+-				put_free_pages(ring, &pages[seg_idx]->page, 1);
++				gnttab_page_cache_put(&ring->free_pages,
++						      &pages[seg_idx]->page, 1);
+ 				pages[seg_idx]->handle = BLKBACK_INVALID_HANDLE;
+ 				ret |= 1;
+ 				goto next;
+@@ -944,7 +889,7 @@ next:
+ 
+ out_of_memory:
+ 	pr_alert("%s: out of memory\n", __func__);
+-	put_free_pages(ring, pages_to_gnt, segs_to_map);
++	gnttab_page_cache_put(&ring->free_pages, pages_to_gnt, segs_to_map);
+ 	for (i = last_map; i < num; i++)
+ 		pages[i]->handle = BLKBACK_INVALID_HANDLE;
+ 	return -ENOMEM;
+diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h
+index c6ea5d38c509a..a1b9df2c4ef1a 100644
+--- a/drivers/block/xen-blkback/common.h
++++ b/drivers/block/xen-blkback/common.h
+@@ -288,9 +288,7 @@ struct xen_blkif_ring {
+ 	struct work_struct	persistent_purge_work;
+ 
+ 	/* Buffer of free pages to map grant refs. */
+-	spinlock_t		free_pages_lock;
+-	int			free_pages_num;
+-	struct list_head	free_pages;
++	struct gnttab_page_cache free_pages;
+ 
+ 	struct work_struct	free_work;
+ 	/* Thread shutdown wait queue. */
+diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
+index 5e7c36d73dc62..684b6f11c8051 100644
+--- a/drivers/block/xen-blkback/xenbus.c
++++ b/drivers/block/xen-blkback/xenbus.c
+@@ -144,8 +144,7 @@ static int xen_blkif_alloc_rings(struct xen_blkif *blkif)
+ 		INIT_LIST_HEAD(&ring->pending_free);
+ 		INIT_LIST_HEAD(&ring->persistent_purge_list);
+ 		INIT_WORK(&ring->persistent_purge_work, xen_blkbk_unmap_purged_grants);
+-		spin_lock_init(&ring->free_pages_lock);
+-		INIT_LIST_HEAD(&ring->free_pages);
++		gnttab_page_cache_init(&ring->free_pages);
+ 
+ 		spin_lock_init(&ring->pending_free_lock);
+ 		init_waitqueue_head(&ring->pending_free_wq);
+@@ -317,8 +316,7 @@ static int xen_blkif_disconnect(struct xen_blkif *blkif)
+ 		BUG_ON(atomic_read(&ring->persistent_gnt_in_use) != 0);
+ 		BUG_ON(!list_empty(&ring->persistent_purge_list));
+ 		BUG_ON(!RB_EMPTY_ROOT(&ring->persistent_gnts));
+-		BUG_ON(!list_empty(&ring->free_pages));
+-		BUG_ON(ring->free_pages_num != 0);
++		BUG_ON(ring->free_pages.num_pages != 0);
+ 		BUG_ON(ring->persistent_gnt_c != 0);
+ 		WARN_ON(i != (XEN_BLKIF_REQS_PER_PAGE * blkif->nr_ring_pages));
+ 		ring->active = false;
+diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
+index 46a9617fee5f4..5ffabbdbf6cc1 100644
+--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
++++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
+@@ -183,7 +183,7 @@ static int sdma_v5_2_init_microcode(struct amdgpu_device *adev)
+ 			if (err)
+ 				goto out;
+ 
+-			err = sdma_v5_2_init_inst_ctx(&adev->sdma.instance[0]);
++			err = sdma_v5_2_init_inst_ctx(&adev->sdma.instance[i]);
+ 			if (err)
+ 				goto out;
+ 		}
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index 640cbafdde101..c7020a80b0b2b 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -977,9 +977,6 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
+ 		goto error;
+ 	}
+ 
+-	/* Update the actual used number of crtc */
+-	adev->mode_info.num_crtc = adev->dm.display_indexes_num;
+-
+ 	/* create fake encoders for MST */
+ 	dm_dp_create_fake_mst_encoders(adev);
+ 
+@@ -3099,6 +3096,10 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
+ 	enum dc_connection_type new_connection_type = dc_connection_none;
+ 	const struct dc_plane_cap *plane;
+ 
++	dm->display_indexes_num = dm->dc->caps.max_streams;
++	/* Update the actual used number of crtc */
++	adev->mode_info.num_crtc = adev->dm.display_indexes_num;
++
+ 	link_cnt = dm->dc->caps.max_links;
+ 	if (amdgpu_dm_mode_config_init(dm->adev)) {
+ 		DRM_ERROR("DM: Failed to initialize mode config\n");
+@@ -3160,8 +3161,6 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
+ 			goto fail;
+ 		}
+ 
+-	dm->display_indexes_num = dm->dc->caps.max_streams;
+-
+ 	/* loops over all connectors on the board */
+ 	for (i = 0; i < link_cnt; i++) {
+ 		struct dc_link *link = NULL;
+diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig
+index 6417f374b923a..951d5f708e92b 100644
+--- a/drivers/gpu/drm/exynos/Kconfig
++++ b/drivers/gpu/drm/exynos/Kconfig
+@@ -1,7 +1,8 @@
+ # SPDX-License-Identifier: GPL-2.0-only
+ config DRM_EXYNOS
+ 	tristate "DRM Support for Samsung SoC Exynos Series"
+-	depends on OF && DRM && (ARCH_S3C64XX || ARCH_S5PV210 || ARCH_EXYNOS || ARCH_MULTIPLATFORM || COMPILE_TEST)
++	depends on OF && DRM && COMMON_CLK
++	depends on ARCH_S3C64XX || ARCH_S5PV210 || ARCH_EXYNOS || ARCH_MULTIPLATFORM || COMPILE_TEST
+ 	depends on MMU
+ 	select DRM_KMS_HELPER
+ 	select VIDEOMODE_HELPERS
+diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
+index d6295eb20b636..b12a6bb92241d 100644
+--- a/drivers/gpu/drm/i915/display/intel_dp.c
++++ b/drivers/gpu/drm/i915/display/intel_dp.c
+@@ -597,7 +597,7 @@ static u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp,
+ 		return 0;
+ 	}
+ 	/* Also take into account max slice width */
+-	min_slice_count = min_t(u8, min_slice_count,
++	min_slice_count = max_t(u8, min_slice_count,
+ 				DIV_ROUND_UP(mode_hdisplay,
+ 					     max_slice_width));
+ 
+diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+index 7c9be64d6e30d..5a0b04314bf68 100644
+--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
++++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+@@ -2613,7 +2613,7 @@ static void retire_requests(struct intel_timeline *tl, struct i915_request *end)
+ 			break;
+ }
+ 
+-static void eb_request_add(struct i915_execbuffer *eb)
++static int eb_request_add(struct i915_execbuffer *eb, int err)
+ {
+ 	struct i915_request *rq = eb->request;
+ 	struct intel_timeline * const tl = i915_request_timeline(rq);
+@@ -2634,6 +2634,7 @@ static void eb_request_add(struct i915_execbuffer *eb)
+ 		/* Serialise with context_close via the add_to_timeline */
+ 		i915_request_set_error_once(rq, -ENOENT);
+ 		__i915_request_skip(rq);
++		err = -ENOENT; /* override any transient errors */
+ 	}
+ 
+ 	__i915_request_queue(rq, &attr);
+@@ -2643,6 +2644,8 @@ static void eb_request_add(struct i915_execbuffer *eb)
+ 		retire_requests(tl, prev);
+ 
+ 	mutex_unlock(&tl->mutex);
++
++	return err;
+ }
+ 
+ static int
+@@ -2844,7 +2847,7 @@ i915_gem_do_execbuffer(struct drm_device *dev,
+ err_request:
+ 	add_to_client(eb.request, file);
+ 	i915_request_get(eb.request);
+-	eb_request_add(&eb);
++	err = eb_request_add(&eb, err);
+ 
+ 	if (fences)
+ 		signal_fence_array(&eb, fences);
+diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
+index 9dfa9a95a4d73..e5a2d99846572 100644
+--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
++++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
+@@ -2788,6 +2788,9 @@ static void __execlists_hold(struct i915_request *rq)
+ static bool execlists_hold(struct intel_engine_cs *engine,
+ 			   struct i915_request *rq)
+ {
++	if (i915_request_on_hold(rq))
++		return false;
++
+ 	spin_lock_irq(&engine->active.lock);
+ 
+ 	if (i915_request_completed(rq)) { /* too late! */
+@@ -3169,8 +3172,10 @@ static void execlists_submission_tasklet(unsigned long data)
+ 		spin_unlock_irqrestore(&engine->active.lock, flags);
+ 
+ 		/* Recheck after serialising with direct-submission */
+-		if (unlikely(timeout && preempt_timeout(engine)))
++		if (unlikely(timeout && preempt_timeout(engine))) {
++			cancel_timer(&engine->execlists.preempt);
+ 			execlists_reset(engine, "preemption time out");
++		}
+ 	}
+ }
+ 
+diff --git a/drivers/gpu/drm/i915/gt/intel_mocs.c b/drivers/gpu/drm/i915/gt/intel_mocs.c
+index 4f74706967fdc..413dadfac2d19 100644
+--- a/drivers/gpu/drm/i915/gt/intel_mocs.c
++++ b/drivers/gpu/drm/i915/gt/intel_mocs.c
+@@ -59,8 +59,7 @@ struct drm_i915_mocs_table {
+ #define _L3_CACHEABILITY(value)	((value) << 4)
+ 
+ /* Helper defines */
+-#define GEN9_NUM_MOCS_ENTRIES	62  /* 62 out of 64 - 63 & 64 are reserved. */
+-#define GEN11_NUM_MOCS_ENTRIES	64  /* 63-64 are reserved, but configured. */
++#define GEN9_NUM_MOCS_ENTRIES	64  /* 63-64 are reserved, but configured. */
+ 
+ /* (e)LLC caching options */
+ /*
+@@ -328,11 +327,11 @@ static unsigned int get_mocs_settings(const struct drm_i915_private *i915,
+ 	if (INTEL_GEN(i915) >= 12) {
+ 		table->size  = ARRAY_SIZE(tgl_mocs_table);
+ 		table->table = tgl_mocs_table;
+-		table->n_entries = GEN11_NUM_MOCS_ENTRIES;
++		table->n_entries = GEN9_NUM_MOCS_ENTRIES;
+ 	} else if (IS_GEN(i915, 11)) {
+ 		table->size  = ARRAY_SIZE(icl_mocs_table);
+ 		table->table = icl_mocs_table;
+-		table->n_entries = GEN11_NUM_MOCS_ENTRIES;
++		table->n_entries = GEN9_NUM_MOCS_ENTRIES;
+ 	} else if (IS_GEN9_BC(i915) || IS_CANNONLAKE(i915)) {
+ 		table->size  = ARRAY_SIZE(skl_mocs_table);
+ 		table->n_entries = GEN9_NUM_MOCS_ENTRIES;
+diff --git a/drivers/gpu/drm/panel/panel-sony-acx565akm.c b/drivers/gpu/drm/panel/panel-sony-acx565akm.c
+index fc6a7e451abef..304267f7849ac 100644
+--- a/drivers/gpu/drm/panel/panel-sony-acx565akm.c
++++ b/drivers/gpu/drm/panel/panel-sony-acx565akm.c
+@@ -629,7 +629,7 @@ static int acx565akm_probe(struct spi_device *spi)
+ 	lcd->spi = spi;
+ 	mutex_init(&lcd->mutex);
+ 
+-	lcd->reset_gpio = devm_gpiod_get(&spi->dev, "reset", GPIOD_OUT_LOW);
++	lcd->reset_gpio = devm_gpiod_get(&spi->dev, "reset", GPIOD_OUT_HIGH);
+ 	if (IS_ERR(lcd->reset_gpio)) {
+ 		dev_err(&spi->dev, "failed to get reset GPIO\n");
+ 		return PTR_ERR(lcd->reset_gpio);
+diff --git a/drivers/gpu/drm/rockchip/rockchip_lvds.c b/drivers/gpu/drm/rockchip/rockchip_lvds.c
+index 63f967902c2d8..a29912f3b997e 100644
+--- a/drivers/gpu/drm/rockchip/rockchip_lvds.c
++++ b/drivers/gpu/drm/rockchip/rockchip_lvds.c
+@@ -544,7 +544,7 @@ static int rockchip_lvds_bind(struct device *dev, struct device *master,
+ 	struct device_node  *port, *endpoint;
+ 	int ret = 0, child_count = 0;
+ 	const char *name;
+-	u32 endpoint_id;
++	u32 endpoint_id = 0;
+ 
+ 	lvds->drm_dev = drm_dev;
+ 	port = of_graph_get_port_by_id(dev->of_node, 1);
+diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
+index d09b807e1c3a1..3a1617a3e5bf7 100644
+--- a/drivers/idle/intel_idle.c
++++ b/drivers/idle/intel_idle.c
+@@ -122,26 +122,9 @@ static __cpuidle int intel_idle(struct cpuidle_device *dev,
+ 	struct cpuidle_state *state = &drv->states[index];
+ 	unsigned long eax = flg2MWAIT(state->flags);
+ 	unsigned long ecx = 1; /* break on interrupt flag */
+-	bool tick;
+-
+-	if (!static_cpu_has(X86_FEATURE_ARAT)) {
+-		/*
+-		 * Switch over to one-shot tick broadcast if the target C-state
+-		 * is deeper than C1.
+-		 */
+-		if ((eax >> MWAIT_SUBSTATE_SIZE) & MWAIT_CSTATE_MASK) {
+-			tick = true;
+-			tick_broadcast_enter();
+-		} else {
+-			tick = false;
+-		}
+-	}
+ 
+ 	mwait_idle_with_hints(eax, ecx);
+ 
+-	if (!static_cpu_has(X86_FEATURE_ARAT) && tick)
+-		tick_broadcast_exit();
+-
+ 	return index;
+ }
+ 
+@@ -1153,6 +1136,20 @@ static bool __init intel_idle_max_cstate_reached(int cstate)
+ 	return false;
+ }
+ 
++static bool __init intel_idle_state_needs_timer_stop(struct cpuidle_state *state)
++{
++	unsigned long eax = flg2MWAIT(state->flags);
++
++	if (boot_cpu_has(X86_FEATURE_ARAT))
++		return false;
++
++	/*
++	 * Switch over to one-shot tick broadcast if the target C-state
++	 * is deeper than C1.
++	 */
++	return !!((eax >> MWAIT_SUBSTATE_SIZE) & MWAIT_CSTATE_MASK);
++}
++
+ #ifdef CONFIG_ACPI_PROCESSOR_CSTATE
+ #include <acpi/processor.h>
+ 
+@@ -1265,6 +1262,9 @@ static void __init intel_idle_init_cstates_acpi(struct cpuidle_driver *drv)
+ 		if (disabled_states_mask & BIT(cstate))
+ 			state->flags |= CPUIDLE_FLAG_OFF;
+ 
++		if (intel_idle_state_needs_timer_stop(state))
++			state->flags |= CPUIDLE_FLAG_TIMER_STOP;
++
+ 		state->enter = intel_idle;
+ 		state->enter_s2idle = intel_idle_s2idle;
+ 	}
+@@ -1503,6 +1503,9 @@ static void __init intel_idle_init_cstates_icpu(struct cpuidle_driver *drv)
+ 		     !(cpuidle_state_table[cstate].flags & CPUIDLE_FLAG_ALWAYS_ENABLE)))
+ 			drv->states[drv->state_count].flags |= CPUIDLE_FLAG_OFF;
+ 
++		if (intel_idle_state_needs_timer_stop(&drv->states[drv->state_count]))
++			drv->states[drv->state_count].flags |= CPUIDLE_FLAG_TIMER_STOP;
++
+ 		drv->state_count++;
+ 	}
+ 
+diff --git a/drivers/input/misc/cm109.c b/drivers/input/misc/cm109.c
+index e413801f04910..f515fae465c35 100644
+--- a/drivers/input/misc/cm109.c
++++ b/drivers/input/misc/cm109.c
+@@ -568,12 +568,15 @@ static int cm109_input_open(struct input_dev *idev)
+ 	dev->ctl_data->byte[HID_OR2] = dev->keybit;
+ 	dev->ctl_data->byte[HID_OR3] = 0x00;
+ 
++	dev->ctl_urb_pending = 1;
+ 	error = usb_submit_urb(dev->urb_ctl, GFP_KERNEL);
+-	if (error)
++	if (error) {
++		dev->ctl_urb_pending = 0;
+ 		dev_err(&dev->intf->dev, "%s: usb_submit_urb (urb_ctl) failed %d\n",
+ 			__func__, error);
+-	else
++	} else {
+ 		dev->open = 1;
++	}
+ 
+ 	mutex_unlock(&dev->pm_mutex);
+ 
+diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
+index 7ecb65176c1aa..3a2dcf0805f12 100644
+--- a/drivers/input/serio/i8042-x86ia64io.h
++++ b/drivers/input/serio/i8042-x86ia64io.h
+@@ -611,6 +611,48 @@ static const struct dmi_system_id __initconst i8042_dmi_reset_table[] = {
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "AOA150"),
+ 		},
+ 	},
++	{
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "Aspire A114-31"),
++		},
++	},
++	{
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "Aspire A314-31"),
++		},
++	},
++	{
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "Aspire A315-31"),
++		},
++	},
++	{
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "Aspire ES1-132"),
++		},
++	},
++	{
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "Aspire ES1-332"),
++		},
++	},
++	{
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "Aspire ES1-432"),
++		},
++	},
++	{
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate Spin B118-RN"),
++		},
++	},
+ 	{
+ 		/* Advent 4211 */
+ 		.matches = {
+diff --git a/drivers/interconnect/qcom/msm8916.c b/drivers/interconnect/qcom/msm8916.c
+index 42c6c55816626..e8371d40ab8d8 100644
+--- a/drivers/interconnect/qcom/msm8916.c
++++ b/drivers/interconnect/qcom/msm8916.c
+@@ -182,7 +182,7 @@ DEFINE_QNODE(mas_pcnoc_sdcc_1, MSM8916_MASTER_SDCC_1, 8, -1, -1, MSM8916_PNOC_IN
+ DEFINE_QNODE(mas_pcnoc_sdcc_2, MSM8916_MASTER_SDCC_2, 8, -1, -1, MSM8916_PNOC_INT_1);
+ DEFINE_QNODE(mas_qdss_bam, MSM8916_MASTER_QDSS_BAM, 8, -1, -1, MSM8916_SNOC_QDSS_INT);
+ DEFINE_QNODE(mas_qdss_etr, MSM8916_MASTER_QDSS_ETR, 8, -1, -1, MSM8916_SNOC_QDSS_INT);
+-DEFINE_QNODE(mas_snoc_cfg, MSM8916_MASTER_SNOC_CFG, 4, 20, -1, MSM8916_SNOC_QDSS_INT);
++DEFINE_QNODE(mas_snoc_cfg, MSM8916_MASTER_SNOC_CFG, 4, -1, -1, MSM8916_SNOC_QDSS_INT);
+ DEFINE_QNODE(mas_spdm, MSM8916_MASTER_SPDM, 4, -1, -1, MSM8916_PNOC_MAS_0);
+ DEFINE_QNODE(mas_tcu0, MSM8916_MASTER_TCU0, 8, -1, -1, MSM8916_SLAVE_EBI_CH0, MSM8916_BIMC_SNOC_MAS, MSM8916_SLAVE_AMPSS_L2);
+ DEFINE_QNODE(mas_tcu1, MSM8916_MASTER_TCU1, 8, -1, -1, MSM8916_SLAVE_EBI_CH0, MSM8916_BIMC_SNOC_MAS, MSM8916_SLAVE_AMPSS_L2);
+@@ -208,14 +208,14 @@ DEFINE_QNODE(pcnoc_snoc_mas, MSM8916_PNOC_SNOC_MAS, 8, 29, -1, MSM8916_PNOC_SNOC
+ DEFINE_QNODE(pcnoc_snoc_slv, MSM8916_PNOC_SNOC_SLV, 8, -1, 45, MSM8916_SNOC_INT_0, MSM8916_SNOC_INT_BIMC, MSM8916_SNOC_INT_1);
+ DEFINE_QNODE(qdss_int, MSM8916_SNOC_QDSS_INT, 8, -1, -1, MSM8916_SNOC_INT_0, MSM8916_SNOC_INT_BIMC);
+ DEFINE_QNODE(slv_apps_l2, MSM8916_SLAVE_AMPSS_L2, 8, -1, -1, 0);
+-DEFINE_QNODE(slv_apss, MSM8916_SLAVE_APSS, 4, -1, 20, 0);
++DEFINE_QNODE(slv_apss, MSM8916_SLAVE_APSS, 4, -1, -1, 0);
+ DEFINE_QNODE(slv_audio, MSM8916_SLAVE_LPASS, 4, -1, -1, 0);
+ DEFINE_QNODE(slv_bimc_cfg, MSM8916_SLAVE_BIMC_CFG, 4, -1, -1, 0);
+ DEFINE_QNODE(slv_blsp_1, MSM8916_SLAVE_BLSP_1, 4, -1, -1, 0);
+ DEFINE_QNODE(slv_boot_rom, MSM8916_SLAVE_BOOT_ROM, 4, -1, -1, 0);
+ DEFINE_QNODE(slv_camera_cfg, MSM8916_SLAVE_CAMERA_CFG, 4, -1, -1, 0);
+-DEFINE_QNODE(slv_cats_0, MSM8916_SLAVE_CATS_128, 16, -1, 106, 0);
+-DEFINE_QNODE(slv_cats_1, MSM8916_SLAVE_OCMEM_64, 8, -1, 107, 0);
++DEFINE_QNODE(slv_cats_0, MSM8916_SLAVE_CATS_128, 16, -1, -1, 0);
++DEFINE_QNODE(slv_cats_1, MSM8916_SLAVE_OCMEM_64, 8, -1, -1, 0);
+ DEFINE_QNODE(slv_clk_ctl, MSM8916_SLAVE_CLK_CTL, 4, -1, -1, 0);
+ DEFINE_QNODE(slv_crypto_0_cfg, MSM8916_SLAVE_CRYPTO_0_CFG, 4, -1, -1, 0);
+ DEFINE_QNODE(slv_dehr_cfg, MSM8916_SLAVE_DEHR_CFG, 4, -1, -1, 0);
+@@ -239,7 +239,7 @@ DEFINE_QNODE(slv_sdcc_2, MSM8916_SLAVE_SDCC_2, 4, -1, -1, 0);
+ DEFINE_QNODE(slv_security, MSM8916_SLAVE_SECURITY, 4, -1, -1, 0);
+ DEFINE_QNODE(slv_snoc_cfg, MSM8916_SLAVE_SNOC_CFG, 4, -1, -1, 0);
+ DEFINE_QNODE(slv_spdm, MSM8916_SLAVE_SPDM, 4, -1, -1, 0);
+-DEFINE_QNODE(slv_srvc_snoc, MSM8916_SLAVE_SRVC_SNOC, 8, -1, 29, 0);
++DEFINE_QNODE(slv_srvc_snoc, MSM8916_SLAVE_SRVC_SNOC, 8, -1, -1, 0);
+ DEFINE_QNODE(slv_tcsr, MSM8916_SLAVE_TCSR, 4, -1, -1, 0);
+ DEFINE_QNODE(slv_tlmm, MSM8916_SLAVE_TLMM, 4, -1, -1, 0);
+ DEFINE_QNODE(slv_usb_hs, MSM8916_SLAVE_USB_HS, 4, -1, -1, 0);
+@@ -249,7 +249,7 @@ DEFINE_QNODE(snoc_bimc_0_slv, MSM8916_SNOC_BIMC_0_SLV, 8, -1, 24, MSM8916_SLAVE_
+ DEFINE_QNODE(snoc_bimc_1_mas, MSM8916_SNOC_BIMC_1_MAS, 16, -1, -1, MSM8916_SNOC_BIMC_1_SLV);
+ DEFINE_QNODE(snoc_bimc_1_slv, MSM8916_SNOC_BIMC_1_SLV, 8, -1, -1, MSM8916_SLAVE_EBI_CH0);
+ DEFINE_QNODE(snoc_int_0, MSM8916_SNOC_INT_0, 8, 99, 130, MSM8916_SLAVE_QDSS_STM, MSM8916_SLAVE_IMEM, MSM8916_SNOC_PNOC_MAS);
+-DEFINE_QNODE(snoc_int_1, MSM8916_SNOC_INT_1, 8, 100, 131, MSM8916_SLAVE_APSS, MSM8916_SLAVE_CATS_128, MSM8916_SLAVE_OCMEM_64);
++DEFINE_QNODE(snoc_int_1, MSM8916_SNOC_INT_1, 8, -1, -1, MSM8916_SLAVE_APSS, MSM8916_SLAVE_CATS_128, MSM8916_SLAVE_OCMEM_64);
+ DEFINE_QNODE(snoc_int_bimc, MSM8916_SNOC_INT_BIMC, 8, 101, 132, MSM8916_SNOC_BIMC_0_MAS);
+ DEFINE_QNODE(snoc_pcnoc_mas, MSM8916_SNOC_PNOC_MAS, 8, -1, -1, MSM8916_SNOC_PNOC_SLV);
+ DEFINE_QNODE(snoc_pcnoc_slv, MSM8916_SNOC_PNOC_SLV, 8, -1, -1, MSM8916_PNOC_INT_0);
+diff --git a/drivers/interconnect/qcom/qcs404.c b/drivers/interconnect/qcom/qcs404.c
+index d4769a5ea182e..9820709b43dbd 100644
+--- a/drivers/interconnect/qcom/qcs404.c
++++ b/drivers/interconnect/qcom/qcs404.c
+@@ -157,8 +157,8 @@ struct qcom_icc_desc {
+ 	}
+ 
+ DEFINE_QNODE(mas_apps_proc, QCS404_MASTER_AMPSS_M0, 8, 0, -1, QCS404_SLAVE_EBI_CH0, QCS404_BIMC_SNOC_SLV);
+-DEFINE_QNODE(mas_oxili, QCS404_MASTER_GRAPHICS_3D, 8, 6, -1, QCS404_SLAVE_EBI_CH0, QCS404_BIMC_SNOC_SLV);
+-DEFINE_QNODE(mas_mdp, QCS404_MASTER_MDP_PORT0, 8, 8, -1, QCS404_SLAVE_EBI_CH0, QCS404_BIMC_SNOC_SLV);
++DEFINE_QNODE(mas_oxili, QCS404_MASTER_GRAPHICS_3D, 8, -1, -1, QCS404_SLAVE_EBI_CH0, QCS404_BIMC_SNOC_SLV);
++DEFINE_QNODE(mas_mdp, QCS404_MASTER_MDP_PORT0, 8, -1, -1, QCS404_SLAVE_EBI_CH0, QCS404_BIMC_SNOC_SLV);
+ DEFINE_QNODE(mas_snoc_bimc_1, QCS404_SNOC_BIMC_1_MAS, 8, 76, -1, QCS404_SLAVE_EBI_CH0);
+ DEFINE_QNODE(mas_tcu_0, QCS404_MASTER_TCU_0, 8, -1, -1, QCS404_SLAVE_EBI_CH0, QCS404_BIMC_SNOC_SLV);
+ DEFINE_QNODE(mas_spdm, QCS404_MASTER_SPDM, 4, -1, -1, QCS404_PNOC_INT_3);
+diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
+index be4318044f96c..702fbaa6c9ada 100644
+--- a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
++++ b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
+@@ -69,6 +69,10 @@ struct arm_smmu_device *qcom_smmu_impl_init(struct arm_smmu_device *smmu)
+ {
+ 	struct qcom_smmu *qsmmu;
+ 
++	/* Check to make sure qcom_scm has finished probing */
++	if (!qcom_scm_is_available())
++		return ERR_PTR(-EPROBE_DEFER);
++
+ 	qsmmu = devm_kzalloc(smmu->dev, sizeof(*qsmmu), GFP_KERNEL);
+ 	if (!qsmmu)
+ 		return ERR_PTR(-ENOMEM);
+diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
+index 548de7538632a..51b8743fdda03 100644
+--- a/drivers/irqchip/irq-gic-v3-its.c
++++ b/drivers/irqchip/irq-gic-v3-its.c
+@@ -42,7 +42,6 @@
+ #define ITS_FLAGS_CMDQ_NEEDS_FLUSHING		(1ULL << 0)
+ #define ITS_FLAGS_WORKAROUND_CAVIUM_22375	(1ULL << 1)
+ #define ITS_FLAGS_WORKAROUND_CAVIUM_23144	(1ULL << 2)
+-#define ITS_FLAGS_SAVE_SUSPEND_STATE		(1ULL << 3)
+ 
+ #define RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING	(1 << 0)
+ #define RDIST_FLAGS_RD_TABLES_PREALLOCATED	(1 << 1)
+@@ -4735,9 +4734,6 @@ static int its_save_disable(void)
+ 	list_for_each_entry(its, &its_nodes, entry) {
+ 		void __iomem *base;
+ 
+-		if (!(its->flags & ITS_FLAGS_SAVE_SUSPEND_STATE))
+-			continue;
+-
+ 		base = its->base;
+ 		its->ctlr_save = readl_relaxed(base + GITS_CTLR);
+ 		err = its_force_quiescent(base);
+@@ -4756,9 +4752,6 @@ err:
+ 		list_for_each_entry_continue_reverse(its, &its_nodes, entry) {
+ 			void __iomem *base;
+ 
+-			if (!(its->flags & ITS_FLAGS_SAVE_SUSPEND_STATE))
+-				continue;
+-
+ 			base = its->base;
+ 			writel_relaxed(its->ctlr_save, base + GITS_CTLR);
+ 		}
+@@ -4778,9 +4771,6 @@ static void its_restore_enable(void)
+ 		void __iomem *base;
+ 		int i;
+ 
+-		if (!(its->flags & ITS_FLAGS_SAVE_SUSPEND_STATE))
+-			continue;
+-
+ 		base = its->base;
+ 
+ 		/*
+@@ -4788,7 +4778,10 @@ static void its_restore_enable(void)
+ 		 * don't restore it since writing to CBASER or BASER<n>
+ 		 * registers is undefined according to the GIC v3 ITS
+ 		 * Specification.
++		 *
++		 * Firmware resuming with the ITS enabled is terminally broken.
+ 		 */
++		WARN_ON(readl_relaxed(base + GITS_CTLR) & GITS_CTLR_ENABLE);
+ 		ret = its_force_quiescent(base);
+ 		if (ret) {
+ 			pr_err("ITS@%pa: failed to quiesce on resume: %d\n",
+@@ -5068,9 +5061,6 @@ static int __init its_probe_one(struct resource *res,
+ 		ctlr |= GITS_CTLR_ImDe;
+ 	writel_relaxed(ctlr, its->base + GITS_CTLR);
+ 
+-	if (GITS_TYPER_HCC(typer))
+-		its->flags |= ITS_FLAGS_SAVE_SUSPEND_STATE;
+-
+ 	err = its_init_domain(handle, its);
+ 	if (err)
+ 		goto out_free_tables;
+diff --git a/drivers/media/cec/usb/pulse8/pulse8-cec.c b/drivers/media/cec/usb/pulse8/pulse8-cec.c
+index beae6aa12638a..51c9b04e450b7 100644
+--- a/drivers/media/cec/usb/pulse8/pulse8-cec.c
++++ b/drivers/media/cec/usb/pulse8/pulse8-cec.c
+@@ -88,13 +88,15 @@ enum pulse8_msgcodes {
+ 	MSGCODE_SET_PHYSICAL_ADDRESS,	/* 0x20 */
+ 	MSGCODE_GET_DEVICE_TYPE,
+ 	MSGCODE_SET_DEVICE_TYPE,
+-	MSGCODE_GET_HDMI_VERSION,
++	MSGCODE_GET_HDMI_VERSION,	/* Removed in FW >= 10 */
+ 	MSGCODE_SET_HDMI_VERSION,
+ 	MSGCODE_GET_OSD_NAME,
+ 	MSGCODE_SET_OSD_NAME,
+ 	MSGCODE_WRITE_EEPROM,
+ 	MSGCODE_GET_ADAPTER_TYPE,	/* 0x28 */
+ 	MSGCODE_SET_ACTIVE_SOURCE,
++	MSGCODE_GET_AUTO_POWER_ON,	/* New for FW >= 10 */
++	MSGCODE_SET_AUTO_POWER_ON,
+ 
+ 	MSGCODE_FRAME_EOM = 0x80,
+ 	MSGCODE_FRAME_ACK = 0x40,
+@@ -143,6 +145,8 @@ static const char * const pulse8_msgnames[] = {
+ 	"WRITE_EEPROM",
+ 	"GET_ADAPTER_TYPE",
+ 	"SET_ACTIVE_SOURCE",
++	"GET_AUTO_POWER_ON",
++	"SET_AUTO_POWER_ON",
+ };
+ 
+ static const char *pulse8_msgname(u8 cmd)
+@@ -579,12 +583,14 @@ static int pulse8_cec_adap_log_addr(struct cec_adapter *adap, u8 log_addr)
+ 	if (err)
+ 		goto unlock;
+ 
+-	cmd[0] = MSGCODE_SET_HDMI_VERSION;
+-	cmd[1] = adap->log_addrs.cec_version;
+-	err = pulse8_send_and_wait(pulse8, cmd, 2,
+-				   MSGCODE_COMMAND_ACCEPTED, 0);
+-	if (err)
+-		goto unlock;
++	if (pulse8->vers < 10) {
++		cmd[0] = MSGCODE_SET_HDMI_VERSION;
++		cmd[1] = adap->log_addrs.cec_version;
++		err = pulse8_send_and_wait(pulse8, cmd, 2,
++					   MSGCODE_COMMAND_ACCEPTED, 0);
++		if (err)
++			goto unlock;
++	}
+ 
+ 	if (adap->log_addrs.osd_name[0]) {
+ 		size_t osd_len = strlen(adap->log_addrs.osd_name);
+@@ -650,7 +656,6 @@ static void pulse8_disconnect(struct serio *serio)
+ 	struct pulse8 *pulse8 = serio_get_drvdata(serio);
+ 
+ 	cec_unregister_adapter(pulse8->adap);
+-	pulse8->serio = NULL;
+ 	serio_set_drvdata(serio, NULL);
+ 	serio_close(serio);
+ }
+@@ -692,6 +697,14 @@ static int pulse8_setup(struct pulse8 *pulse8, struct serio *serio,
+ 	dev_dbg(pulse8->dev, "Autonomous mode: %s",
+ 		data[0] ? "on" : "off");
+ 
++	if (pulse8->vers >= 10) {
++		cmd[0] = MSGCODE_GET_AUTO_POWER_ON;
++		err = pulse8_send_and_wait(pulse8, cmd, 1, cmd[0], 1);
++		if (!err)
++			dev_dbg(pulse8->dev, "Auto Power On: %s",
++				data[0] ? "on" : "off");
++	}
++
+ 	cmd[0] = MSGCODE_GET_DEVICE_TYPE;
+ 	err = pulse8_send_and_wait(pulse8, cmd, 1, cmd[0], 1);
+ 	if (err)
+@@ -753,12 +766,15 @@ static int pulse8_setup(struct pulse8 *pulse8, struct serio *serio,
+ 	dev_dbg(pulse8->dev, "Physical address: %x.%x.%x.%x\n",
+ 		cec_phys_addr_exp(*pa));
+ 
+-	cmd[0] = MSGCODE_GET_HDMI_VERSION;
+-	err = pulse8_send_and_wait(pulse8, cmd, 1, cmd[0], 1);
+-	if (err)
+-		return err;
+-	log_addrs->cec_version = data[0];
+-	dev_dbg(pulse8->dev, "CEC version: %d\n", log_addrs->cec_version);
++	log_addrs->cec_version = CEC_OP_CEC_VERSION_1_4;
++	if (pulse8->vers < 10) {
++		cmd[0] = MSGCODE_GET_HDMI_VERSION;
++		err = pulse8_send_and_wait(pulse8, cmd, 1, cmd[0], 1);
++		if (err)
++			return err;
++		log_addrs->cec_version = data[0];
++		dev_dbg(pulse8->dev, "CEC version: %d\n", log_addrs->cec_version);
++	}
+ 
+ 	cmd[0] = MSGCODE_GET_OSD_NAME;
+ 	err = pulse8_send_and_wait(pulse8, cmd, 1, cmd[0], 0);
+@@ -830,8 +846,10 @@ static int pulse8_connect(struct serio *serio, struct serio_driver *drv)
+ 	pulse8->adap = cec_allocate_adapter(&pulse8_cec_adap_ops, pulse8,
+ 					    dev_name(&serio->dev), caps, 1);
+ 	err = PTR_ERR_OR_ZERO(pulse8->adap);
+-	if (err < 0)
+-		goto free_device;
++	if (err < 0) {
++		kfree(pulse8);
++		return err;
++	}
+ 
+ 	pulse8->dev = &serio->dev;
+ 	serio_set_drvdata(serio, pulse8);
+@@ -874,8 +892,6 @@ close_serio:
+ 	serio_close(serio);
+ delete_adap:
+ 	cec_delete_adapter(pulse8->adap);
+-free_device:
+-	kfree(pulse8);
+ 	return err;
+ }
+ 
+diff --git a/drivers/misc/habanalabs/gaudi/gaudi.c b/drivers/misc/habanalabs/gaudi/gaudi.c
+index 2e55890ad6a61..8da1720357a26 100644
+--- a/drivers/misc/habanalabs/gaudi/gaudi.c
++++ b/drivers/misc/habanalabs/gaudi/gaudi.c
+@@ -5416,6 +5416,8 @@ static void gaudi_handle_ecc_event(struct hl_device *hdev, u16 event_type,
+ 		params.num_memories = 33;
+ 		params.derr = true;
+ 		params.disable_clock_gating = true;
++		extract_info_from_fw = false;
++		break;
+ 	default:
+ 		return;
+ 	}
+diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
+index fa313b6341354..ba6f4a65212f7 100644
+--- a/drivers/mmc/core/block.c
++++ b/drivers/mmc/core/block.c
+@@ -580,7 +580,7 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
+ 
+ 	memcpy(&(idata->ic.response), cmd.resp, sizeof(cmd.resp));
+ 
+-	if (idata->rpmb || (cmd.flags & MMC_RSP_R1B)) {
++	if (idata->rpmb || (cmd.flags & MMC_RSP_R1B) == MMC_RSP_R1B) {
+ 		/*
+ 		 * Ensure RPMB/R1B command has completed by polling CMD13
+ 		 * "Send Status".
+diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c
+index b0c27944db7f7..28341aed4648a 100644
+--- a/drivers/mmc/host/mtk-sd.c
++++ b/drivers/mmc/host/mtk-sd.c
+@@ -447,7 +447,7 @@ struct msdc_host {
+ 
+ static const struct mtk_mmc_compatible mt8135_compat = {
+ 	.clk_div_bits = 8,
+-	.recheck_sdio_irq = false,
++	.recheck_sdio_irq = true,
+ 	.hs400_tune = false,
+ 	.pad_tune_reg = MSDC_PAD_TUNE,
+ 	.async_fifo = false,
+@@ -486,7 +486,7 @@ static const struct mtk_mmc_compatible mt8183_compat = {
+ 
+ static const struct mtk_mmc_compatible mt2701_compat = {
+ 	.clk_div_bits = 12,
+-	.recheck_sdio_irq = false,
++	.recheck_sdio_irq = true,
+ 	.hs400_tune = false,
+ 	.pad_tune_reg = MSDC_PAD_TUNE0,
+ 	.async_fifo = true,
+@@ -512,7 +512,7 @@ static const struct mtk_mmc_compatible mt2712_compat = {
+ 
+ static const struct mtk_mmc_compatible mt7622_compat = {
+ 	.clk_div_bits = 12,
+-	.recheck_sdio_irq = false,
++	.recheck_sdio_irq = true,
+ 	.hs400_tune = false,
+ 	.pad_tune_reg = MSDC_PAD_TUNE0,
+ 	.async_fifo = true,
+@@ -525,7 +525,7 @@ static const struct mtk_mmc_compatible mt7622_compat = {
+ 
+ static const struct mtk_mmc_compatible mt8516_compat = {
+ 	.clk_div_bits = 12,
+-	.recheck_sdio_irq = false,
++	.recheck_sdio_irq = true,
+ 	.hs400_tune = false,
+ 	.pad_tune_reg = MSDC_PAD_TUNE0,
+ 	.async_fifo = true,
+@@ -536,7 +536,7 @@ static const struct mtk_mmc_compatible mt8516_compat = {
+ 
+ static const struct mtk_mmc_compatible mt7620_compat = {
+ 	.clk_div_bits = 8,
+-	.recheck_sdio_irq = false,
++	.recheck_sdio_irq = true,
+ 	.hs400_tune = false,
+ 	.pad_tune_reg = MSDC_PAD_TUNE,
+ 	.async_fifo = false,
+@@ -549,6 +549,7 @@ static const struct mtk_mmc_compatible mt7620_compat = {
+ 
+ static const struct mtk_mmc_compatible mt6779_compat = {
+ 	.clk_div_bits = 12,
++	.recheck_sdio_irq = false,
+ 	.hs400_tune = false,
+ 	.pad_tune_reg = MSDC_PAD_TUNE0,
+ 	.async_fifo = true,
+@@ -2654,11 +2655,29 @@ static int msdc_runtime_resume(struct device *dev)
+ 	msdc_restore_reg(host);
+ 	return 0;
+ }
++
++static int msdc_suspend(struct device *dev)
++{
++	struct mmc_host *mmc = dev_get_drvdata(dev);
++	int ret;
++
++	if (mmc->caps2 & MMC_CAP2_CQE) {
++		ret = cqhci_suspend(mmc);
++		if (ret)
++			return ret;
++	}
++
++	return pm_runtime_force_suspend(dev);
++}
++
++static int msdc_resume(struct device *dev)
++{
++	return pm_runtime_force_resume(dev);
++}
+ #endif
+ 
+ static const struct dev_pm_ops msdc_dev_pm_ops = {
+-	SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+-				pm_runtime_force_resume)
++	SET_SYSTEM_SLEEP_PM_OPS(msdc_suspend, msdc_resume)
+ 	SET_RUNTIME_PM_OPS(msdc_runtime_suspend, msdc_runtime_resume, NULL)
+ };
+ 
+diff --git a/drivers/mmc/host/sdhci-of-arasan.c b/drivers/mmc/host/sdhci-of-arasan.c
+index f1ab6a08599c9..358b135a84640 100644
+--- a/drivers/mmc/host/sdhci-of-arasan.c
++++ b/drivers/mmc/host/sdhci-of-arasan.c
+@@ -1186,16 +1186,19 @@ static struct sdhci_arasan_of_data sdhci_arasan_versal_data = {
+ static struct sdhci_arasan_of_data intel_keembay_emmc_data = {
+ 	.soc_ctl_map = &intel_keembay_soc_ctl_map,
+ 	.pdata = &sdhci_keembay_emmc_pdata,
++	.clk_ops = &arasan_clk_ops,
+ };
+ 
+ static struct sdhci_arasan_of_data intel_keembay_sd_data = {
+ 	.soc_ctl_map = &intel_keembay_soc_ctl_map,
+ 	.pdata = &sdhci_keembay_sd_pdata,
++	.clk_ops = &arasan_clk_ops,
+ };
+ 
+ static struct sdhci_arasan_of_data intel_keembay_sdio_data = {
+ 	.soc_ctl_map = &intel_keembay_soc_ctl_map,
+ 	.pdata = &sdhci_keembay_sdio_pdata,
++	.clk_ops = &arasan_clk_ops,
+ };
+ 
+ static const struct of_device_id sdhci_arasan_of_match[] = {
+diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c
+index 8e9f5620c9a21..095505fa09de3 100644
+--- a/drivers/net/can/c_can/c_can.c
++++ b/drivers/net/can/c_can/c_can.c
+@@ -1304,12 +1304,22 @@ int c_can_power_up(struct net_device *dev)
+ 				time_after(time_out, jiffies))
+ 		cpu_relax();
+ 
+-	if (time_after(jiffies, time_out))
+-		return -ETIMEDOUT;
++	if (time_after(jiffies, time_out)) {
++		ret = -ETIMEDOUT;
++		goto err_out;
++	}
+ 
+ 	ret = c_can_start(dev);
+-	if (!ret)
+-		c_can_irq_control(priv, true);
++	if (ret)
++		goto err_out;
++
++	c_can_irq_control(priv, true);
++
++	return 0;
++
++err_out:
++	c_can_reset_ram(priv, false);
++	c_can_pm_runtime_put_sync(priv);
+ 
+ 	return ret;
+ }
+diff --git a/drivers/net/can/kvaser_pciefd.c b/drivers/net/can/kvaser_pciefd.c
+index 72acd1ba162d2..43151dd6cb1c3 100644
+--- a/drivers/net/can/kvaser_pciefd.c
++++ b/drivers/net/can/kvaser_pciefd.c
+@@ -692,8 +692,10 @@ static int kvaser_pciefd_open(struct net_device *netdev)
+ 		return err;
+ 
+ 	err = kvaser_pciefd_bus_on(can);
+-	if (err)
++	if (err) {
++		close_candev(netdev);
+ 		return err;
++	}
+ 
+ 	return 0;
+ }
+diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
+index d4030abad935d..61a93b1920379 100644
+--- a/drivers/net/can/m_can/m_can.c
++++ b/drivers/net/can/m_can/m_can.c
+@@ -1385,6 +1385,8 @@ static int m_can_dev_setup(struct m_can_classdev *m_can_dev)
+ 						&m_can_data_bittiming_const_31X;
+ 		break;
+ 	case 32:
++	case 33:
++		/* Support both MCAN version v3.2.x and v3.3.0 */
+ 		m_can_dev->can.bittiming_const = m_can_dev->bit_timing ?
+ 			m_can_dev->bit_timing : &m_can_bittiming_const_31X;
+ 
+diff --git a/drivers/net/can/m_can/tcan4x5x.c b/drivers/net/can/m_can/tcan4x5x.c
+index e5d7d85e0b6d1..7347ab39c5b65 100644
+--- a/drivers/net/can/m_can/tcan4x5x.c
++++ b/drivers/net/can/m_can/tcan4x5x.c
+@@ -489,18 +489,18 @@ static int tcan4x5x_can_probe(struct spi_device *spi)
+ 	spi->bits_per_word = 32;
+ 	ret = spi_setup(spi);
+ 	if (ret)
+-		goto out_clk;
++		goto out_m_can_class_free_dev;
+ 
+ 	priv->regmap = devm_regmap_init(&spi->dev, &tcan4x5x_bus,
+ 					&spi->dev, &tcan4x5x_regmap);
+ 	if (IS_ERR(priv->regmap)) {
+ 		ret = PTR_ERR(priv->regmap);
+-		goto out_clk;
++		goto out_m_can_class_free_dev;
+ 	}
+ 
+ 	ret = tcan4x5x_power_enable(priv->power, 1);
+ 	if (ret)
+-		goto out_clk;
++		goto out_m_can_class_free_dev;
+ 
+ 	ret = tcan4x5x_parse_config(mcan_class);
+ 	if (ret)
+@@ -519,11 +519,6 @@ static int tcan4x5x_can_probe(struct spi_device *spi)
+ 
+ out_power:
+ 	tcan4x5x_power_enable(priv->power, 0);
+-out_clk:
+-	if (!IS_ERR(mcan_class->cclk)) {
+-		clk_disable_unprepare(mcan_class->cclk);
+-		clk_disable_unprepare(mcan_class->hclk);
+-	}
+  out_m_can_class_free_dev:
+ 	m_can_class_free_dev(mcan_class->net);
+ 	dev_err(&spi->dev, "Probe failed, err=%d\n", ret);
+diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c
+index 9f107798f904b..25a4d7d0b3498 100644
+--- a/drivers/net/can/sja1000/sja1000.c
++++ b/drivers/net/can/sja1000/sja1000.c
+@@ -474,7 +474,6 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status)
+ 		netdev_dbg(dev, "arbitration lost interrupt\n");
+ 		alc = priv->read_reg(priv, SJA1000_ALC);
+ 		priv->can.can_stats.arbitration_lost++;
+-		stats->tx_errors++;
+ 		cf->can_id |= CAN_ERR_LOSTARB;
+ 		cf->data[0] = alc & 0x1f;
+ 	}
+diff --git a/drivers/net/can/sun4i_can.c b/drivers/net/can/sun4i_can.c
+index e2c6cf4b2228f..b3f2f4fe5ee04 100644
+--- a/drivers/net/can/sun4i_can.c
++++ b/drivers/net/can/sun4i_can.c
+@@ -604,7 +604,6 @@ static int sun4i_can_err(struct net_device *dev, u8 isrc, u8 status)
+ 		netdev_dbg(dev, "arbitration lost interrupt\n");
+ 		alc = readl(priv->base + SUN4I_REG_STA_ADDR);
+ 		priv->can.can_stats.arbitration_lost++;
+-		stats->tx_errors++;
+ 		if (likely(skb)) {
+ 			cf->can_id |= CAN_ERR_LOSTARB;
+ 			cf->data[0] = (alc >> 8) & 0x1f;
+diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig
+index 7fb42f388d591..7b79528d6eed2 100644
+--- a/drivers/net/ethernet/broadcom/Kconfig
++++ b/drivers/net/ethernet/broadcom/Kconfig
+@@ -88,6 +88,7 @@ config BNX2
+ config CNIC
+ 	tristate "QLogic CNIC support"
+ 	depends on PCI && (IPV6 || IPV6=n)
++	depends on MMU
+ 	select BNX2
+ 	select UIO
+ 	help
+diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
+index 3654be5772c85..68aa9930d8187 100644
+--- a/drivers/net/ethernet/ibm/ibmvnic.c
++++ b/drivers/net/ethernet/ibm/ibmvnic.c
+@@ -846,7 +846,7 @@ static void release_napi(struct ibmvnic_adapter *adapter)
+ static int ibmvnic_login(struct net_device *netdev)
+ {
+ 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
+-	unsigned long timeout = msecs_to_jiffies(30000);
++	unsigned long timeout = msecs_to_jiffies(20000);
+ 	int retry_count = 0;
+ 	int retries = 10;
+ 	bool retry;
+@@ -862,10 +862,8 @@ static int ibmvnic_login(struct net_device *netdev)
+ 		adapter->init_done_rc = 0;
+ 		reinit_completion(&adapter->init_done);
+ 		rc = send_login(adapter);
+-		if (rc) {
+-			netdev_warn(netdev, "Unable to login\n");
++		if (rc)
+ 			return rc;
+-		}
+ 
+ 		if (!wait_for_completion_timeout(&adapter->init_done,
+ 						 timeout)) {
+@@ -952,7 +950,7 @@ static void release_resources(struct ibmvnic_adapter *adapter)
+ static int set_link_state(struct ibmvnic_adapter *adapter, u8 link_state)
+ {
+ 	struct net_device *netdev = adapter->netdev;
+-	unsigned long timeout = msecs_to_jiffies(30000);
++	unsigned long timeout = msecs_to_jiffies(20000);
+ 	union ibmvnic_crq crq;
+ 	bool resend;
+ 	int rc;
+@@ -2186,17 +2184,6 @@ static struct ibmvnic_rwi *get_next_rwi(struct ibmvnic_adapter *adapter)
+ 	return rwi;
+ }
+ 
+-static void free_all_rwi(struct ibmvnic_adapter *adapter)
+-{
+-	struct ibmvnic_rwi *rwi;
+-
+-	rwi = get_next_rwi(adapter);
+-	while (rwi) {
+-		kfree(rwi);
+-		rwi = get_next_rwi(adapter);
+-	}
+-}
+-
+ static void __ibmvnic_reset(struct work_struct *work)
+ {
+ 	struct ibmvnic_rwi *rwi;
+@@ -2254,6 +2241,14 @@ static void __ibmvnic_reset(struct work_struct *work)
+ 				rc = do_hard_reset(adapter, rwi, reset_state);
+ 				rtnl_unlock();
+ 			}
++			if (rc) {
++				/* give backing device time to settle down */
++				netdev_dbg(adapter->netdev,
++					   "[S:%d] Hard reset failed, waiting 60 secs\n",
++					   adapter->state);
++				set_current_state(TASK_UNINTERRUPTIBLE);
++				schedule_timeout(60 * HZ);
++			}
+ 		} else if (!(rwi->reset_reason == VNIC_RESET_FATAL &&
+ 				adapter->from_passive_init)) {
+ 			rc = do_reset(adapter, rwi, reset_state);
+@@ -2265,9 +2260,9 @@ static void __ibmvnic_reset(struct work_struct *work)
+ 			else
+ 				adapter->state = reset_state;
+ 			rc = 0;
+-		} else if (rc && rc != IBMVNIC_INIT_FAILED &&
+-		    !adapter->force_reset_recovery)
+-			break;
++		}
++		if (rc)
++			netdev_dbg(adapter->netdev, "Reset failed, rc=%d\n", rc);
+ 
+ 		rwi = get_next_rwi(adapter);
+ 
+@@ -2281,11 +2276,6 @@ static void __ibmvnic_reset(struct work_struct *work)
+ 		complete(&adapter->reset_done);
+ 	}
+ 
+-	if (rc) {
+-		netdev_dbg(adapter->netdev, "Reset failed\n");
+-		free_all_rwi(adapter);
+-	}
+-
+ 	clear_bit_unlock(0, &adapter->resetting);
+ }
+ 
+@@ -2368,6 +2358,12 @@ static void ibmvnic_tx_timeout(struct net_device *dev, unsigned int txqueue)
+ {
+ 	struct ibmvnic_adapter *adapter = netdev_priv(dev);
+ 
++	if (test_bit(0, &adapter->resetting)) {
++		netdev_err(adapter->netdev,
++			   "Adapter is resetting, skip timeout reset\n");
++		return;
++	}
++
+ 	ibmvnic_reset(adapter, VNIC_RESET_TIMEOUT);
+ }
+ 
+@@ -2873,15 +2869,26 @@ static int reset_one_sub_crq_queue(struct ibmvnic_adapter *adapter,
+ {
+ 	int rc;
+ 
++	if (!scrq) {
++		netdev_dbg(adapter->netdev,
++			   "Invalid scrq reset. irq (%d) or msgs (%p).\n",
++			   scrq->irq, scrq->msgs);
++		return -EINVAL;
++	}
++
+ 	if (scrq->irq) {
+ 		free_irq(scrq->irq, scrq);
+ 		irq_dispose_mapping(scrq->irq);
+ 		scrq->irq = 0;
+ 	}
+-
+-	memset(scrq->msgs, 0, 4 * PAGE_SIZE);
+-	atomic_set(&scrq->used, 0);
+-	scrq->cur = 0;
++	if (scrq->msgs) {
++		memset(scrq->msgs, 0, 4 * PAGE_SIZE);
++		atomic_set(&scrq->used, 0);
++		scrq->cur = 0;
++	} else {
++		netdev_dbg(adapter->netdev, "Invalid scrq reset\n");
++		return -EINVAL;
++	}
+ 
+ 	rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token,
+ 			   4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq);
+@@ -3728,15 +3735,16 @@ static int send_login(struct ibmvnic_adapter *adapter)
+ 	struct ibmvnic_login_rsp_buffer *login_rsp_buffer;
+ 	struct ibmvnic_login_buffer *login_buffer;
+ 	struct device *dev = &adapter->vdev->dev;
++	struct vnic_login_client_data *vlcd;
+ 	dma_addr_t rsp_buffer_token;
+ 	dma_addr_t buffer_token;
+ 	size_t rsp_buffer_size;
+ 	union ibmvnic_crq crq;
++	int client_data_len;
+ 	size_t buffer_size;
+ 	__be64 *tx_list_p;
+ 	__be64 *rx_list_p;
+-	int client_data_len;
+-	struct vnic_login_client_data *vlcd;
++	int rc;
+ 	int i;
+ 
+ 	if (!adapter->tx_scrq || !adapter->rx_scrq) {
+@@ -3840,16 +3848,25 @@ static int send_login(struct ibmvnic_adapter *adapter)
+ 	crq.login.cmd = LOGIN;
+ 	crq.login.ioba = cpu_to_be32(buffer_token);
+ 	crq.login.len = cpu_to_be32(buffer_size);
+-	ibmvnic_send_crq(adapter, &crq);
++
++	adapter->login_pending = true;
++	rc = ibmvnic_send_crq(adapter, &crq);
++	if (rc) {
++		adapter->login_pending = false;
++		netdev_err(adapter->netdev, "Failed to send login, rc=%d\n", rc);
++		goto buf_rsp_map_failed;
++	}
+ 
+ 	return 0;
+ 
+ buf_rsp_map_failed:
+ 	kfree(login_rsp_buffer);
++	adapter->login_rsp_buf = NULL;
+ buf_rsp_alloc_failed:
+ 	dma_unmap_single(dev, buffer_token, buffer_size, DMA_TO_DEVICE);
+ buf_map_failed:
+ 	kfree(login_buffer);
++	adapter->login_buf = NULL;
+ buf_alloc_failed:
+ 	return -1;
+ }
+@@ -4371,6 +4388,15 @@ static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
+ 	struct ibmvnic_login_buffer *login = adapter->login_buf;
+ 	int i;
+ 
++	/* CHECK: Test/set of login_pending does not need to be atomic
++	 * because only ibmvnic_tasklet tests/clears this.
++	 */
++	if (!adapter->login_pending) {
++		netdev_warn(netdev, "Ignoring unexpected login response\n");
++		return 0;
++	}
++	adapter->login_pending = false;
++
+ 	dma_unmap_single(dev, adapter->login_buf_token, adapter->login_buf_sz,
+ 			 DMA_TO_DEVICE);
+ 	dma_unmap_single(dev, adapter->login_rsp_buf_token,
+@@ -4400,7 +4426,7 @@ static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
+ 	     adapter->req_rx_add_queues !=
+ 	     be32_to_cpu(login_rsp->num_rxadd_subcrqs))) {
+ 		dev_err(dev, "FATAL: Inconsistent login and login rsp\n");
+-		ibmvnic_remove(adapter->vdev);
++		ibmvnic_reset(adapter, VNIC_RESET_FATAL);
+ 		return -EIO;
+ 	}
+ 	release_login_buffer(adapter);
+@@ -4718,6 +4744,11 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
+ 		case IBMVNIC_CRQ_INIT:
+ 			dev_info(dev, "Partner initialized\n");
+ 			adapter->from_passive_init = true;
++			/* Discard any stale login responses from prev reset.
++			 * CHECK: should we clear even on INIT_COMPLETE?
++			 */
++			adapter->login_pending = false;
++
+ 			if (!completion_done(&adapter->init_done)) {
+ 				complete(&adapter->init_done);
+ 				adapter->init_done_rc = -EIO;
+@@ -5056,7 +5087,7 @@ map_failed:
+ static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter)
+ {
+ 	struct device *dev = &adapter->vdev->dev;
+-	unsigned long timeout = msecs_to_jiffies(30000);
++	unsigned long timeout = msecs_to_jiffies(20000);
+ 	u64 old_num_rx_queues, old_num_tx_queues;
+ 	int rc;
+ 
+@@ -5185,6 +5216,7 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
+ 	dev_set_drvdata(&dev->dev, netdev);
+ 	adapter->vdev = dev;
+ 	adapter->netdev = netdev;
++	adapter->login_pending = false;
+ 
+ 	ether_addr_copy(adapter->mac_addr, mac_addr_p);
+ 	ether_addr_copy(netdev->dev_addr, adapter->mac_addr);
+diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
+index 31d604fc7bde7..77f43cbdb6dc4 100644
+--- a/drivers/net/ethernet/ibm/ibmvnic.h
++++ b/drivers/net/ethernet/ibm/ibmvnic.h
+@@ -1084,6 +1084,7 @@ struct ibmvnic_adapter {
+ 	struct delayed_work ibmvnic_delayed_reset;
+ 	unsigned long resetting;
+ 	bool napi_enabled, from_passive_init;
++	bool login_pending;
+ 
+ 	bool failover_pending;
+ 	bool force_reset_recovery;
+diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/sta.h b/drivers/net/wireless/intel/iwlwifi/fw/api/sta.h
+index c010e6febbf47..6a071b3c8118c 100644
+--- a/drivers/net/wireless/intel/iwlwifi/fw/api/sta.h
++++ b/drivers/net/wireless/intel/iwlwifi/fw/api/sta.h
+@@ -5,10 +5,9 @@
+  *
+  * GPL LICENSE SUMMARY
+  *
+- * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+  * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+  * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+- * Copyright(c) 2018 - 2019 Intel Corporation
++ * Copyright(c) 2012-2014, 2018 - 2020 Intel Corporation
+  *
+  * This program is free software; you can redistribute it and/or modify
+  * it under the terms of version 2 of the GNU General Public License as
+@@ -28,10 +27,9 @@
+  *
+  * BSD LICENSE
+  *
+- * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+  * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+  * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+- * Copyright(c) 2018 - 2019 Intel Corporation
++ * Copyright(c) 2012-2014, 2018 - 2020 Intel Corporation
+  * All rights reserved.
+  *
+  * Redistribution and use in source and binary forms, with or without
+@@ -128,7 +126,9 @@ enum iwl_sta_flags {
+ 	STA_FLG_MAX_AGG_SIZE_256K	= (5 << STA_FLG_MAX_AGG_SIZE_SHIFT),
+ 	STA_FLG_MAX_AGG_SIZE_512K	= (6 << STA_FLG_MAX_AGG_SIZE_SHIFT),
+ 	STA_FLG_MAX_AGG_SIZE_1024K	= (7 << STA_FLG_MAX_AGG_SIZE_SHIFT),
+-	STA_FLG_MAX_AGG_SIZE_MSK	= (7 << STA_FLG_MAX_AGG_SIZE_SHIFT),
++	STA_FLG_MAX_AGG_SIZE_2M		= (8 << STA_FLG_MAX_AGG_SIZE_SHIFT),
++	STA_FLG_MAX_AGG_SIZE_4M		= (9 << STA_FLG_MAX_AGG_SIZE_SHIFT),
++	STA_FLG_MAX_AGG_SIZE_MSK	= (0xf << STA_FLG_MAX_AGG_SIZE_SHIFT),
+ 
+ 	STA_FLG_AGG_MPDU_DENS_SHIFT	= 23,
+ 	STA_FLG_AGG_MPDU_DENS_2US	= (4 << STA_FLG_AGG_MPDU_DENS_SHIFT),
+diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
+index e27c13263a232..44abe44c04632 100644
+--- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h
++++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
+@@ -488,8 +488,8 @@ struct iwl_cfg {
+ #define IWL_CFG_RF_ID_HR		0x7
+ #define IWL_CFG_RF_ID_HR1		0x4
+ 
+-#define IWL_CFG_NO_160			0x0
+-#define IWL_CFG_160			0x1
++#define IWL_CFG_NO_160			0x1
++#define IWL_CFG_160			0x0
+ 
+ #define IWL_CFG_CORES_BT		0x0
+ #define IWL_CFG_CORES_BT_GNSS		0x5
+diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
+index cb9e8e189a1a4..1d48c7d7fffd4 100644
+--- a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
++++ b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
+@@ -147,6 +147,16 @@
+ #define CSR_MAC_SHADOW_REG_CTL2		(CSR_BASE + 0x0AC)
+ #define CSR_MAC_SHADOW_REG_CTL2_RX_WAKE	0xFFFF
+ 
++/* LTR control (since IWL_DEVICE_FAMILY_22000) */
++#define CSR_LTR_LONG_VAL_AD			(CSR_BASE + 0x0D4)
++#define CSR_LTR_LONG_VAL_AD_NO_SNOOP_REQ	0x80000000
++#define CSR_LTR_LONG_VAL_AD_NO_SNOOP_SCALE	0x1c000000
++#define CSR_LTR_LONG_VAL_AD_NO_SNOOP_VAL	0x03ff0000
++#define CSR_LTR_LONG_VAL_AD_SNOOP_REQ		0x00008000
++#define CSR_LTR_LONG_VAL_AD_SNOOP_SCALE		0x00001c00
++#define CSR_LTR_LONG_VAL_AD_SNOOP_VAL		0x000003ff
++#define CSR_LTR_LONG_VAL_AD_SCALE_USEC		2
++
+ /* GIO Chicken Bits (PCI Express bus link power management) */
+ #define CSR_GIO_CHICKEN_BITS    (CSR_BASE+0x100)
+ 
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+index 34362dc0d4612..f2d65e8384105 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+@@ -3057,7 +3057,7 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
+ 
+ 	/* this would be a mac80211 bug ... but don't crash */
+ 	if (WARN_ON_ONCE(!mvmvif->phy_ctxt))
+-		return -EINVAL;
++		return test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status) ? 0 : -EINVAL;
+ 
+ 	/*
+ 	 * If we are in a STA removal flow and in DQA mode:
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+index 9e124755a3cee..2158fd2eff736 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+@@ -196,6 +196,7 @@ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
+ 		mpdu_dens = sta->ht_cap.ampdu_density;
+ 	}
+ 
++
+ 	if (sta->vht_cap.vht_supported) {
+ 		agg_size = sta->vht_cap.cap &
+ 			IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK;
+@@ -205,6 +206,23 @@ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
+ 		agg_size = sta->ht_cap.ampdu_factor;
+ 	}
+ 
++	/* D6.0 10.12.2 A-MPDU length limit rules
++	 * A STA indicates the maximum length of the A-MPDU preEOF padding
++	 * that it can receive in an HE PPDU in the Maximum A-MPDU Length
++	 * Exponent field in its HT Capabilities, VHT Capabilities,
++	 * and HE 6 GHz Band Capabilities elements (if present) and the
++	 * Maximum AMPDU Length Exponent Extension field in its HE
++	 * Capabilities element
++	 */
++	if (sta->he_cap.has_he)
++		agg_size += u8_get_bits(sta->he_cap.he_cap_elem.mac_cap_info[3],
++					IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_MASK);
++
++	/* Limit to max A-MPDU supported by FW */
++	if (agg_size > (STA_FLG_MAX_AGG_SIZE_4M >> STA_FLG_MAX_AGG_SIZE_SHIFT))
++		agg_size = (STA_FLG_MAX_AGG_SIZE_4M >>
++			    STA_FLG_MAX_AGG_SIZE_SHIFT);
++
+ 	add_sta_cmd.station_flags |=
+ 		cpu_to_le32(agg_size << STA_FLG_MAX_AGG_SIZE_SHIFT);
+ 	add_sta_cmd.station_flags |=
+diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
+index 1ab1366004159..0fc2a6e49f9ee 100644
+--- a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
++++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
+@@ -252,6 +252,26 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
+ 
+ 	iwl_set_bit(trans, CSR_CTXT_INFO_BOOT_CTRL,
+ 		    CSR_AUTO_FUNC_BOOT_ENA);
++
++	if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_AX210) {
++		/*
++		 * The firmware initializes this again later (to a smaller
++		 * value), but for the boot process initialize the LTR to
++		 * ~250 usec.
++		 */
++		u32 val = CSR_LTR_LONG_VAL_AD_NO_SNOOP_REQ |
++			  u32_encode_bits(CSR_LTR_LONG_VAL_AD_SCALE_USEC,
++					  CSR_LTR_LONG_VAL_AD_NO_SNOOP_SCALE) |
++			  u32_encode_bits(250,
++					  CSR_LTR_LONG_VAL_AD_NO_SNOOP_VAL) |
++			  CSR_LTR_LONG_VAL_AD_SNOOP_REQ |
++			  u32_encode_bits(CSR_LTR_LONG_VAL_AD_SCALE_USEC,
++					  CSR_LTR_LONG_VAL_AD_SNOOP_SCALE) |
++			  u32_encode_bits(250, CSR_LTR_LONG_VAL_AD_SNOOP_VAL);
++
++		iwl_write32(trans, CSR_LTR_LONG_VAL_AD, val);
++	}
++
+ 	if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
+ 		iwl_write_umac_prph(trans, UREG_CPU_INIT_RUN, 1);
+ 	else
+diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+index e5160d6208688..6393e895f95c6 100644
+--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
++++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+@@ -2155,18 +2155,36 @@ static int iwl_trans_pcie_read_mem(struct iwl_trans *trans, u32 addr,
+ 				   void *buf, int dwords)
+ {
+ 	unsigned long flags;
+-	int offs, ret = 0;
++	int offs = 0;
+ 	u32 *vals = buf;
+ 
+-	if (iwl_trans_grab_nic_access(trans, &flags)) {
+-		iwl_write32(trans, HBUS_TARG_MEM_RADDR, addr);
+-		for (offs = 0; offs < dwords; offs++)
+-			vals[offs] = iwl_read32(trans, HBUS_TARG_MEM_RDAT);
+-		iwl_trans_release_nic_access(trans, &flags);
+-	} else {
+-		ret = -EBUSY;
++	while (offs < dwords) {
++		/* limit the time we spin here under lock to 1/2s */
++		ktime_t timeout = ktime_add_us(ktime_get(), 500 * USEC_PER_MSEC);
++
++		if (iwl_trans_grab_nic_access(trans, &flags)) {
++			iwl_write32(trans, HBUS_TARG_MEM_RADDR,
++				    addr + 4 * offs);
++
++			while (offs < dwords) {
++				vals[offs] = iwl_read32(trans,
++							HBUS_TARG_MEM_RDAT);
++				offs++;
++
++				/* calling ktime_get is expensive so
++				 * do it once in 128 reads
++				 */
++				if (offs % 128 == 0 && ktime_after(ktime_get(),
++								   timeout))
++					break;
++			}
++			iwl_trans_release_nic_access(trans, &flags);
++		} else {
++			return -EBUSY;
++		}
+ 	}
+-	return ret;
++
++	return 0;
+ }
+ 
+ static int iwl_trans_pcie_write_mem(struct iwl_trans *trans, u32 addr,
+diff --git a/drivers/phy/broadcom/phy-brcm-usb-init-synopsys.c b/drivers/phy/broadcom/phy-brcm-usb-init-synopsys.c
+index 456dc4a100c20..e63457e145c71 100644
+--- a/drivers/phy/broadcom/phy-brcm-usb-init-synopsys.c
++++ b/drivers/phy/broadcom/phy-brcm-usb-init-synopsys.c
+@@ -270,11 +270,6 @@ static void usb_init_common_7211b0(struct brcm_usb_init_params *params)
+ 	reg |= params->mode << USB_PHY_UTMI_CTL_1_PHY_MODE_SHIFT;
+ 	brcm_usb_writel(reg, usb_phy + USB_PHY_UTMI_CTL_1);
+ 
+-	/* Fix the incorrect default */
+-	reg = brcm_usb_readl(ctrl + USB_CTRL_SETUP);
+-	reg &= ~USB_CTRL_SETUP_tca_drv_sel_MASK;
+-	brcm_usb_writel(reg, ctrl + USB_CTRL_SETUP);
+-
+ 	usb_init_common(params);
+ 
+ 	/*
+diff --git a/drivers/pinctrl/intel/pinctrl-jasperlake.c b/drivers/pinctrl/intel/pinctrl-jasperlake.c
+index 9bd0e8e6310c3..283698cf0dc7d 100644
+--- a/drivers/pinctrl/intel/pinctrl-jasperlake.c
++++ b/drivers/pinctrl/intel/pinctrl-jasperlake.c
+@@ -16,7 +16,7 @@
+ 
+ #define JSL_PAD_OWN	0x020
+ #define JSL_PADCFGLOCK	0x080
+-#define JSL_HOSTSW_OWN	0x0b0
++#define JSL_HOSTSW_OWN	0x0c0
+ #define JSL_GPI_IS	0x100
+ #define JSL_GPI_IE	0x120
+ 
+diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c
+index 4aea3e05e8c65..899c16c17b6da 100644
+--- a/drivers/pinctrl/pinctrl-amd.c
++++ b/drivers/pinctrl/pinctrl-amd.c
+@@ -429,7 +429,6 @@ static int amd_gpio_irq_set_type(struct irq_data *d, unsigned int type)
+ 		pin_reg &= ~BIT(LEVEL_TRIG_OFF);
+ 		pin_reg &= ~(ACTIVE_LEVEL_MASK << ACTIVE_LEVEL_OFF);
+ 		pin_reg |= ACTIVE_HIGH << ACTIVE_LEVEL_OFF;
+-		pin_reg |= DB_TYPE_REMOVE_GLITCH << DB_CNTRL_OFF;
+ 		irq_set_handler_locked(d, handle_edge_irq);
+ 		break;
+ 
+@@ -437,7 +436,6 @@ static int amd_gpio_irq_set_type(struct irq_data *d, unsigned int type)
+ 		pin_reg &= ~BIT(LEVEL_TRIG_OFF);
+ 		pin_reg &= ~(ACTIVE_LEVEL_MASK << ACTIVE_LEVEL_OFF);
+ 		pin_reg |= ACTIVE_LOW << ACTIVE_LEVEL_OFF;
+-		pin_reg |= DB_TYPE_REMOVE_GLITCH << DB_CNTRL_OFF;
+ 		irq_set_handler_locked(d, handle_edge_irq);
+ 		break;
+ 
+@@ -445,7 +443,6 @@ static int amd_gpio_irq_set_type(struct irq_data *d, unsigned int type)
+ 		pin_reg &= ~BIT(LEVEL_TRIG_OFF);
+ 		pin_reg &= ~(ACTIVE_LEVEL_MASK << ACTIVE_LEVEL_OFF);
+ 		pin_reg |= BOTH_EADGE << ACTIVE_LEVEL_OFF;
+-		pin_reg |= DB_TYPE_REMOVE_GLITCH << DB_CNTRL_OFF;
+ 		irq_set_handler_locked(d, handle_edge_irq);
+ 		break;
+ 
+@@ -453,8 +450,6 @@ static int amd_gpio_irq_set_type(struct irq_data *d, unsigned int type)
+ 		pin_reg |= LEVEL_TRIGGER << LEVEL_TRIG_OFF;
+ 		pin_reg &= ~(ACTIVE_LEVEL_MASK << ACTIVE_LEVEL_OFF);
+ 		pin_reg |= ACTIVE_HIGH << ACTIVE_LEVEL_OFF;
+-		pin_reg &= ~(DB_CNTRl_MASK << DB_CNTRL_OFF);
+-		pin_reg |= DB_TYPE_PRESERVE_LOW_GLITCH << DB_CNTRL_OFF;
+ 		irq_set_handler_locked(d, handle_level_irq);
+ 		break;
+ 
+@@ -462,8 +457,6 @@ static int amd_gpio_irq_set_type(struct irq_data *d, unsigned int type)
+ 		pin_reg |= LEVEL_TRIGGER << LEVEL_TRIG_OFF;
+ 		pin_reg &= ~(ACTIVE_LEVEL_MASK << ACTIVE_LEVEL_OFF);
+ 		pin_reg |= ACTIVE_LOW << ACTIVE_LEVEL_OFF;
+-		pin_reg &= ~(DB_CNTRl_MASK << DB_CNTRL_OFF);
+-		pin_reg |= DB_TYPE_PRESERVE_HIGH_GLITCH << DB_CNTRL_OFF;
+ 		irq_set_handler_locked(d, handle_level_irq);
+ 		break;
+ 
+diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
+index 49f4b73be513f..5592a929b5935 100644
+--- a/drivers/platform/x86/acer-wmi.c
++++ b/drivers/platform/x86/acer-wmi.c
+@@ -111,6 +111,7 @@ static const struct key_entry acer_wmi_keymap[] __initconst = {
+ 	{KE_KEY, 0x64, {KEY_SWITCHVIDEOMODE} },	/* Display Switch */
+ 	{KE_IGNORE, 0x81, {KEY_SLEEP} },
+ 	{KE_KEY, 0x82, {KEY_TOUCHPAD_TOGGLE} },	/* Touch Pad Toggle */
++	{KE_IGNORE, 0x84, {KEY_KBDILLUMTOGGLE} }, /* Automatic Keyboard background light toggle */
+ 	{KE_KEY, KEY_TOUCHPAD_ON, {KEY_TOUCHPAD_ON} },
+ 	{KE_KEY, KEY_TOUCHPAD_OFF, {KEY_TOUCHPAD_OFF} },
+ 	{KE_IGNORE, 0x83, {KEY_TOUCHPAD_TOGGLE} },
+diff --git a/drivers/platform/x86/intel-vbtn.c b/drivers/platform/x86/intel-vbtn.c
+index f5901b0b07cd8..0419c8001fe33 100644
+--- a/drivers/platform/x86/intel-vbtn.c
++++ b/drivers/platform/x86/intel-vbtn.c
+@@ -206,6 +206,12 @@ static const struct dmi_system_id dmi_switches_allow_list[] = {
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "HP Stream x360 Convertible PC 11"),
+ 		},
+ 	},
++	{
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion 13 x360 PC"),
++		},
++	},
+ 	{} /* Array terminator */
+ };
+ 
+diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
+index 017f090a90f68..55a94a2dc562e 100644
+--- a/drivers/platform/x86/thinkpad_acpi.c
++++ b/drivers/platform/x86/thinkpad_acpi.c
+@@ -3214,7 +3214,14 @@ static int hotkey_init_tablet_mode(void)
+ 
+ 		in_tablet_mode = hotkey_gmms_get_tablet_mode(res,
+ 							     &has_tablet_mode);
+-		if (has_tablet_mode)
++		/*
++		 * The Yoga 11e series has 2 accelerometers described by a
++		 * BOSC0200 ACPI node. This setup relies on a Windows service
++		 * which calls special ACPI methods on this node to report
++		 * the laptop/tent/tablet mode to the EC. The bmc150 iio driver
++		 * does not support this, so skip the hotkey on these models.
++		 */
++		if (has_tablet_mode && !acpi_dev_present("BOSC0200", "1", -1))
+ 			tp_features.hotkey_tablet = TP_HOTKEY_TABLET_USES_GMMS;
+ 		type = "GMMS";
+ 	} else if (acpi_evalf(hkey_handle, &res, "MHKG", "qd")) {
+@@ -8769,6 +8776,8 @@ static const struct tpacpi_quirk fan_quirk_table[] __initconst = {
+ 	TPACPI_Q_LNV3('N', '2', 'C', TPACPI_FAN_2CTL),	/* P52 / P72 */
+ 	TPACPI_Q_LNV3('N', '2', 'E', TPACPI_FAN_2CTL),	/* P1 / X1 Extreme (1st gen) */
+ 	TPACPI_Q_LNV3('N', '2', 'O', TPACPI_FAN_2CTL),	/* P1 / X1 Extreme (2nd gen) */
++	TPACPI_Q_LNV3('N', '2', 'V', TPACPI_FAN_2CTL),	/* P1 / X1 Extreme (3nd gen) */
++	TPACPI_Q_LNV3('N', '3', '0', TPACPI_FAN_2CTL),	/* P15 (1st gen) / P15v (1st gen) */
+ };
+ 
+ static int __init fan_init(struct ibm_init_struct *iibm)
+@@ -9696,6 +9705,7 @@ static const struct tpacpi_quirk battery_quirk_table[] __initconst = {
+ 	TPACPI_Q_LNV3('R', '0', 'B', true), /* Thinkpad 11e gen 3 */
+ 	TPACPI_Q_LNV3('R', '0', 'C', true), /* Thinkpad 13 */
+ 	TPACPI_Q_LNV3('R', '0', 'J', true), /* Thinkpad 13 gen 2 */
++	TPACPI_Q_LNV3('R', '0', 'K', true), /* Thinkpad 11e gen 4 celeron BIOS */
+ };
+ 
+ static int __init tpacpi_battery_init(struct ibm_init_struct *ibm)
+diff --git a/drivers/platform/x86/touchscreen_dmi.c b/drivers/platform/x86/touchscreen_dmi.c
+index dda60f89c9512..5783139d0a119 100644
+--- a/drivers/platform/x86/touchscreen_dmi.c
++++ b/drivers/platform/x86/touchscreen_dmi.c
+@@ -295,6 +295,21 @@ static const struct ts_dmi_data irbis_tw90_data = {
+ 	.properties	= irbis_tw90_props,
+ };
+ 
++static const struct property_entry irbis_tw118_props[] = {
++	PROPERTY_ENTRY_U32("touchscreen-min-x", 20),
++	PROPERTY_ENTRY_U32("touchscreen-min-y", 30),
++	PROPERTY_ENTRY_U32("touchscreen-size-x", 1960),
++	PROPERTY_ENTRY_U32("touchscreen-size-y", 1510),
++	PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-irbis-tw118.fw"),
++	PROPERTY_ENTRY_U32("silead,max-fingers", 10),
++	{ }
++};
++
++static const struct ts_dmi_data irbis_tw118_data = {
++	.acpi_name	= "MSSL1680:00",
++	.properties	= irbis_tw118_props,
++};
++
+ static const struct property_entry itworks_tw891_props[] = {
+ 	PROPERTY_ENTRY_U32("touchscreen-min-x", 1),
+ 	PROPERTY_ENTRY_U32("touchscreen-min-y", 5),
+@@ -623,6 +638,23 @@ static const struct ts_dmi_data pov_mobii_wintab_p1006w_v10_data = {
+ 	.properties	= pov_mobii_wintab_p1006w_v10_props,
+ };
+ 
++static const struct property_entry predia_basic_props[] = {
++	PROPERTY_ENTRY_U32("touchscreen-min-x", 3),
++	PROPERTY_ENTRY_U32("touchscreen-min-y", 10),
++	PROPERTY_ENTRY_U32("touchscreen-size-x", 1728),
++	PROPERTY_ENTRY_U32("touchscreen-size-y", 1144),
++	PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
++	PROPERTY_ENTRY_STRING("firmware-name", "gsl3680-predia-basic.fw"),
++	PROPERTY_ENTRY_U32("silead,max-fingers", 10),
++	PROPERTY_ENTRY_BOOL("silead,home-button"),
++	{ }
++};
++
++static const struct ts_dmi_data predia_basic_data = {
++	.acpi_name	= "MSSL1680:00",
++	.properties	= predia_basic_props,
++};
++
+ static const struct property_entry schneider_sct101ctm_props[] = {
+ 	PROPERTY_ENTRY_U32("touchscreen-size-x", 1715),
+ 	PROPERTY_ENTRY_U32("touchscreen-size-y", 1140),
+@@ -936,6 +968,14 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "TW90"),
+ 		},
+ 	},
++	{
++		/* Irbis TW118 */
++		.driver_data = (void *)&irbis_tw118_data,
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "IRBIS"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "TW118"),
++		},
++	},
+ 	{
+ 		/* I.T.Works TW891 */
+ 		.driver_data = (void *)&itworks_tw891_data,
+@@ -1109,6 +1149,16 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
+ 			DMI_MATCH(DMI_BIOS_DATE, "10/24/2014"),
+ 		},
+ 	},
++	{
++		/* Predia Basic tablet) */
++		.driver_data = (void *)&predia_basic_data,
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "Insyde"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "CherryTrail"),
++			/* Above matches are too generic, add bios-version match */
++			DMI_MATCH(DMI_BIOS_VERSION, "Mx.WT107.KUBNGEA"),
++		},
++	},
+ 	{
+ 		/* Point of View mobii wintab p800w (v2.1) */
+ 		.driver_data = (void *)&pov_mobii_wintab_p800w_v21_data,
+diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
+index 202ba925c4940..5c3513a4b450e 100644
+--- a/drivers/scsi/be2iscsi/be_main.c
++++ b/drivers/scsi/be2iscsi/be_main.c
+@@ -3020,7 +3020,6 @@ static int beiscsi_create_eqs(struct beiscsi_hba *phba,
+ 			goto create_eq_error;
+ 		}
+ 
+-		mem->dma = paddr;
+ 		mem->va = eq_vaddress;
+ 		ret = be_fill_queue(eq, phba->params.num_eq_entries,
+ 				    sizeof(struct be_eq_entry), eq_vaddress);
+@@ -3030,6 +3029,7 @@ static int beiscsi_create_eqs(struct beiscsi_hba *phba,
+ 			goto create_eq_error;
+ 		}
+ 
++		mem->dma = paddr;
+ 		ret = beiscsi_cmd_eq_create(&phba->ctrl, eq,
+ 					    BEISCSI_EQ_DELAY_DEF);
+ 		if (ret) {
+@@ -3086,7 +3086,6 @@ static int beiscsi_create_cqs(struct beiscsi_hba *phba,
+ 			goto create_cq_error;
+ 		}
+ 
+-		mem->dma = paddr;
+ 		ret = be_fill_queue(cq, phba->params.num_cq_entries,
+ 				    sizeof(struct sol_cqe), cq_vaddress);
+ 		if (ret) {
+@@ -3096,6 +3095,7 @@ static int beiscsi_create_cqs(struct beiscsi_hba *phba,
+ 			goto create_cq_error;
+ 		}
+ 
++		mem->dma = paddr;
+ 		ret = beiscsi_cmd_cq_create(&phba->ctrl, cq, eq, false,
+ 					    false, 0);
+ 		if (ret) {
+diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
+index 8f5f5dc863a4a..719f9ae6c97ae 100644
+--- a/drivers/scsi/storvsc_drv.c
++++ b/drivers/scsi/storvsc_drv.c
+@@ -1952,8 +1952,10 @@ static int storvsc_probe(struct hv_device *device,
+ 			alloc_ordered_workqueue("storvsc_error_wq_%d",
+ 						WQ_MEM_RECLAIM,
+ 						host->host_no);
+-	if (!host_dev->handle_error_wq)
++	if (!host_dev->handle_error_wq) {
++		ret = -ENOMEM;
+ 		goto err_out2;
++	}
+ 	INIT_WORK(&host_dev->host_scan_work, storvsc_host_scan);
+ 	/* Register the HBA and start the scsi bus scan */
+ 	ret = scsi_add_host(host, &device->device);
+diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
+index 9dd32bb0ff2be..18326eb772aeb 100644
+--- a/drivers/scsi/ufs/ufshcd.c
++++ b/drivers/scsi/ufs/ufshcd.c
+@@ -1279,8 +1279,15 @@ static int ufshcd_devfreq_target(struct device *dev,
+ 	}
+ 	spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
+ 
++	pm_runtime_get_noresume(hba->dev);
++	if (!pm_runtime_active(hba->dev)) {
++		pm_runtime_put_noidle(hba->dev);
++		ret = -EAGAIN;
++		goto out;
++	}
+ 	start = ktime_get();
+ 	ret = ufshcd_devfreq_scale(hba, scale_up);
++	pm_runtime_put(hba->dev);
+ 
+ 	trace_ufshcd_profile_clk_scaling(dev_name(hba->dev),
+ 		(scale_up ? "up" : "down"),
+@@ -3163,13 +3170,19 @@ int ufshcd_read_desc_param(struct ufs_hba *hba,
+ 	/* Get the length of descriptor */
+ 	ufshcd_map_desc_id_to_length(hba, desc_id, &buff_len);
+ 	if (!buff_len) {
+-		dev_err(hba->dev, "%s: Failed to get desc length", __func__);
++		dev_err(hba->dev, "%s: Failed to get desc length\n", __func__);
++		return -EINVAL;
++	}
++
++	if (param_offset >= buff_len) {
++		dev_err(hba->dev, "%s: Invalid offset 0x%x in descriptor IDN 0x%x, length 0x%x\n",
++			__func__, param_offset, desc_id, buff_len);
+ 		return -EINVAL;
+ 	}
+ 
+ 	/* Check whether we need temp memory */
+ 	if (param_offset != 0 || param_size < buff_len) {
+-		desc_buf = kmalloc(buff_len, GFP_KERNEL);
++		desc_buf = kzalloc(buff_len, GFP_KERNEL);
+ 		if (!desc_buf)
+ 			return -ENOMEM;
+ 	} else {
+@@ -3183,14 +3196,14 @@ int ufshcd_read_desc_param(struct ufs_hba *hba,
+ 					desc_buf, &buff_len);
+ 
+ 	if (ret) {
+-		dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d, desc_index %d, param_offset %d, ret %d",
++		dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d, desc_index %d, param_offset %d, ret %d\n",
+ 			__func__, desc_id, desc_index, param_offset, ret);
+ 		goto out;
+ 	}
+ 
+ 	/* Sanity check */
+ 	if (desc_buf[QUERY_DESC_DESC_TYPE_OFFSET] != desc_id) {
+-		dev_err(hba->dev, "%s: invalid desc_id %d in descriptor header",
++		dev_err(hba->dev, "%s: invalid desc_id %d in descriptor header\n",
+ 			__func__, desc_buf[QUERY_DESC_DESC_TYPE_OFFSET]);
+ 		ret = -EINVAL;
+ 		goto out;
+@@ -3200,12 +3213,12 @@ int ufshcd_read_desc_param(struct ufs_hba *hba,
+ 	buff_len = desc_buf[QUERY_DESC_LENGTH_OFFSET];
+ 	ufshcd_update_desc_length(hba, desc_id, desc_index, buff_len);
+ 
+-	/* Check wherher we will not copy more data, than available */
+-	if (is_kmalloc && (param_offset + param_size) > buff_len)
+-		param_size = buff_len - param_offset;
+-
+-	if (is_kmalloc)
++	if (is_kmalloc) {
++		/* Make sure we don't copy more data than available */
++		if (param_offset + param_size > buff_len)
++			param_size = buff_len - param_offset;
+ 		memcpy(param_read_buf, &desc_buf[param_offset], param_size);
++	}
+ out:
+ 	if (is_kmalloc)
+ 		kfree(desc_buf);
+diff --git a/drivers/soc/fsl/dpio/dpio-driver.c b/drivers/soc/fsl/dpio/dpio-driver.c
+index 7b642c330977f..7f397b4ad878d 100644
+--- a/drivers/soc/fsl/dpio/dpio-driver.c
++++ b/drivers/soc/fsl/dpio/dpio-driver.c
+@@ -95,7 +95,6 @@ static int register_dpio_irq_handlers(struct fsl_mc_device *dpio_dev, int cpu)
+ {
+ 	int error;
+ 	struct fsl_mc_device_irq *irq;
+-	cpumask_t mask;
+ 
+ 	irq = dpio_dev->irqs[0];
+ 	error = devm_request_irq(&dpio_dev->dev,
+@@ -112,9 +111,7 @@ static int register_dpio_irq_handlers(struct fsl_mc_device *dpio_dev, int cpu)
+ 	}
+ 
+ 	/* set the affinity hint */
+-	cpumask_clear(&mask);
+-	cpumask_set_cpu(cpu, &mask);
+-	if (irq_set_affinity_hint(irq->msi_desc->irq, &mask))
++	if (irq_set_affinity_hint(irq->msi_desc->irq, cpumask_of(cpu)))
+ 		dev_err(&dpio_dev->dev,
+ 			"irq_set_affinity failed irq %d cpu %d\n",
+ 			irq->msi_desc->irq, cpu);
+diff --git a/drivers/spi/spi-nxp-fspi.c b/drivers/spi/spi-nxp-fspi.c
+index 1ccda82da2063..158e09470898b 100644
+--- a/drivers/spi/spi-nxp-fspi.c
++++ b/drivers/spi/spi-nxp-fspi.c
+@@ -991,6 +991,7 @@ static int nxp_fspi_probe(struct platform_device *pdev)
+ 	struct resource *res;
+ 	struct nxp_fspi *f;
+ 	int ret;
++	u32 reg;
+ 
+ 	ctlr = spi_alloc_master(&pdev->dev, sizeof(*f));
+ 	if (!ctlr)
+@@ -1017,6 +1018,12 @@ static int nxp_fspi_probe(struct platform_device *pdev)
+ 		goto err_put_ctrl;
+ 	}
+ 
++	/* Clear potential interrupts */
++	reg = fspi_readl(f, f->iobase + FSPI_INTR);
++	if (reg)
++		fspi_writel(f, reg, f->iobase + FSPI_INTR);
++
++
+ 	/* find the resources - controller memory mapped space */
+ 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "fspi_mmap");
+ 	if (!res) {
+diff --git a/drivers/usb/host/ohci-omap.c b/drivers/usb/host/ohci-omap.c
+index 9ccdf2c216b51..6374501ba1390 100644
+--- a/drivers/usb/host/ohci-omap.c
++++ b/drivers/usb/host/ohci-omap.c
+@@ -91,14 +91,14 @@ static int omap_ohci_transceiver_power(struct ohci_omap_priv *priv, int on)
+ 				| ((1 << 5/*usb1*/) | (1 << 3/*usb2*/)),
+ 			       INNOVATOR_FPGA_CAM_USB_CONTROL);
+ 		else if (priv->power)
+-			gpiod_set_value(priv->power, 0);
++			gpiod_set_value_cansleep(priv->power, 0);
+ 	} else {
+ 		if (machine_is_omap_innovator() && cpu_is_omap1510())
+ 			__raw_writeb(__raw_readb(INNOVATOR_FPGA_CAM_USB_CONTROL)
+ 				& ~((1 << 5/*usb1*/) | (1 << 3/*usb2*/)),
+ 			       INNOVATOR_FPGA_CAM_USB_CONTROL);
+ 		else if (priv->power)
+-			gpiod_set_value(priv->power, 1);
++			gpiod_set_value_cansleep(priv->power, 1);
+ 	}
+ 
+ 	return 0;
+diff --git a/drivers/vdpa/Kconfig b/drivers/vdpa/Kconfig
+index 358f6048dd3ce..6caf539091e55 100644
+--- a/drivers/vdpa/Kconfig
++++ b/drivers/vdpa/Kconfig
+@@ -32,6 +32,7 @@ config IFCVF
+ 
+ config MLX5_VDPA
+ 	bool
++	select VHOST_IOTLB
+ 	help
+ 	  Support library for Mellanox VDPA drivers. Provides code that is
+ 	  common for all types of VDPA drivers. The following drivers are planned:
+diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c
+index 676175bd9a679..eed604fe4d215 100644
+--- a/drivers/vhost/vdpa.c
++++ b/drivers/vhost/vdpa.c
+@@ -567,6 +567,8 @@ static int vhost_vdpa_map(struct vhost_vdpa *v,
+ 
+ 	if (r)
+ 		vhost_iotlb_del_range(dev->iotlb, iova, iova + size - 1);
++	else
++		atomic64_add(size >> PAGE_SHIFT, &dev->mm->pinned_vm);
+ 
+ 	return r;
+ }
+@@ -598,14 +600,16 @@ static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
+ 	unsigned long list_size = PAGE_SIZE / sizeof(struct page *);
+ 	unsigned int gup_flags = FOLL_LONGTERM;
+ 	unsigned long npages, cur_base, map_pfn, last_pfn = 0;
+-	unsigned long locked, lock_limit, pinned, i;
++	unsigned long lock_limit, sz2pin, nchunks, i;
+ 	u64 iova = msg->iova;
++	long pinned;
+ 	int ret = 0;
+ 
+ 	if (vhost_iotlb_itree_first(iotlb, msg->iova,
+ 				    msg->iova + msg->size - 1))
+ 		return -EEXIST;
+ 
++	/* Limit the use of memory for bookkeeping */
+ 	page_list = (struct page **) __get_free_page(GFP_KERNEL);
+ 	if (!page_list)
+ 		return -ENOMEM;
+@@ -614,52 +618,75 @@ static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
+ 		gup_flags |= FOLL_WRITE;
+ 
+ 	npages = PAGE_ALIGN(msg->size + (iova & ~PAGE_MASK)) >> PAGE_SHIFT;
+-	if (!npages)
+-		return -EINVAL;
++	if (!npages) {
++		ret = -EINVAL;
++		goto free;
++	}
+ 
+ 	mmap_read_lock(dev->mm);
+ 
+-	locked = atomic64_add_return(npages, &dev->mm->pinned_vm);
+ 	lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
+-
+-	if (locked > lock_limit) {
++	if (npages + atomic64_read(&dev->mm->pinned_vm) > lock_limit) {
+ 		ret = -ENOMEM;
+-		goto out;
++		goto unlock;
+ 	}
+ 
+ 	cur_base = msg->uaddr & PAGE_MASK;
+ 	iova &= PAGE_MASK;
++	nchunks = 0;
+ 
+ 	while (npages) {
+-		pinned = min_t(unsigned long, npages, list_size);
+-		ret = pin_user_pages(cur_base, pinned,
+-				     gup_flags, page_list, NULL);
+-		if (ret != pinned)
++		sz2pin = min_t(unsigned long, npages, list_size);
++		pinned = pin_user_pages(cur_base, sz2pin,
++					gup_flags, page_list, NULL);
++		if (sz2pin != pinned) {
++			if (pinned < 0) {
++				ret = pinned;
++			} else {
++				unpin_user_pages(page_list, pinned);
++				ret = -ENOMEM;
++			}
+ 			goto out;
++		}
++		nchunks++;
+ 
+ 		if (!last_pfn)
+ 			map_pfn = page_to_pfn(page_list[0]);
+ 
+-		for (i = 0; i < ret; i++) {
++		for (i = 0; i < pinned; i++) {
+ 			unsigned long this_pfn = page_to_pfn(page_list[i]);
+ 			u64 csize;
+ 
+ 			if (last_pfn && (this_pfn != last_pfn + 1)) {
+ 				/* Pin a contiguous chunk of memory */
+ 				csize = (last_pfn - map_pfn + 1) << PAGE_SHIFT;
+-				if (vhost_vdpa_map(v, iova, csize,
+-						   map_pfn << PAGE_SHIFT,
+-						   msg->perm))
++				ret = vhost_vdpa_map(v, iova, csize,
++						     map_pfn << PAGE_SHIFT,
++						     msg->perm);
++				if (ret) {
++					/*
++					 * Unpin the pages that are left unmapped
++					 * from this point on in the current
++					 * page_list. The remaining outstanding
++					 * ones which may stride across several
++					 * chunks will be covered in the common
++					 * error path subsequently.
++					 */
++					unpin_user_pages(&page_list[i],
++							 pinned - i);
+ 					goto out;
++				}
++
+ 				map_pfn = this_pfn;
+ 				iova += csize;
++				nchunks = 0;
+ 			}
+ 
+ 			last_pfn = this_pfn;
+ 		}
+ 
+-		cur_base += ret << PAGE_SHIFT;
+-		npages -= ret;
++		cur_base += pinned << PAGE_SHIFT;
++		npages -= pinned;
+ 	}
+ 
+ 	/* Pin the rest chunk */
+@@ -667,10 +694,27 @@ static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
+ 			     map_pfn << PAGE_SHIFT, msg->perm);
+ out:
+ 	if (ret) {
++		if (nchunks) {
++			unsigned long pfn;
++
++			/*
++			 * Unpin the outstanding pages which are yet to be
++			 * mapped but haven't due to vdpa_map() or
++			 * pin_user_pages() failure.
++			 *
++			 * Mapped pages are accounted in vdpa_map(), hence
++			 * the corresponding unpinning will be handled by
++			 * vdpa_unmap().
++			 */
++			WARN_ON(!last_pfn);
++			for (pfn = map_pfn; pfn <= last_pfn; pfn++)
++				unpin_user_page(pfn_to_page(pfn));
++		}
+ 		vhost_vdpa_unmap(v, msg->iova, msg->size);
+-		atomic64_sub(npages, &dev->mm->pinned_vm);
+ 	}
++unlock:
+ 	mmap_read_unlock(dev->mm);
++free:
+ 	free_page((unsigned long)page_list);
+ 	return ret;
+ }
+diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
+index 523dcdf39cc94..3729bea0c9895 100644
+--- a/drivers/xen/grant-table.c
++++ b/drivers/xen/grant-table.c
+@@ -813,6 +813,129 @@ int gnttab_alloc_pages(int nr_pages, struct page **pages)
+ }
+ EXPORT_SYMBOL_GPL(gnttab_alloc_pages);
+ 
++#ifdef CONFIG_XEN_UNPOPULATED_ALLOC
++static inline void cache_init(struct gnttab_page_cache *cache)
++{
++	cache->pages = NULL;
++}
++
++static inline bool cache_empty(struct gnttab_page_cache *cache)
++{
++	return !cache->pages;
++}
++
++static inline struct page *cache_deq(struct gnttab_page_cache *cache)
++{
++	struct page *page;
++
++	page = cache->pages;
++	cache->pages = page->zone_device_data;
++
++	return page;
++}
++
++static inline void cache_enq(struct gnttab_page_cache *cache, struct page *page)
++{
++	page->zone_device_data = cache->pages;
++	cache->pages = page;
++}
++#else
++static inline void cache_init(struct gnttab_page_cache *cache)
++{
++	INIT_LIST_HEAD(&cache->pages);
++}
++
++static inline bool cache_empty(struct gnttab_page_cache *cache)
++{
++	return list_empty(&cache->pages);
++}
++
++static inline struct page *cache_deq(struct gnttab_page_cache *cache)
++{
++	struct page *page;
++
++	page = list_first_entry(&cache->pages, struct page, lru);
++	list_del(&page->lru);
++
++	return page;
++}
++
++static inline void cache_enq(struct gnttab_page_cache *cache, struct page *page)
++{
++	list_add(&page->lru, &cache->pages);
++}
++#endif
++
++void gnttab_page_cache_init(struct gnttab_page_cache *cache)
++{
++	spin_lock_init(&cache->lock);
++	cache_init(cache);
++	cache->num_pages = 0;
++}
++EXPORT_SYMBOL_GPL(gnttab_page_cache_init);
++
++int gnttab_page_cache_get(struct gnttab_page_cache *cache, struct page **page)
++{
++	unsigned long flags;
++
++	spin_lock_irqsave(&cache->lock, flags);
++
++	if (cache_empty(cache)) {
++		spin_unlock_irqrestore(&cache->lock, flags);
++		return gnttab_alloc_pages(1, page);
++	}
++
++	page[0] = cache_deq(cache);
++	cache->num_pages--;
++
++	spin_unlock_irqrestore(&cache->lock, flags);
++
++	return 0;
++}
++EXPORT_SYMBOL_GPL(gnttab_page_cache_get);
++
++void gnttab_page_cache_put(struct gnttab_page_cache *cache, struct page **page,
++			   unsigned int num)
++{
++	unsigned long flags;
++	unsigned int i;
++
++	spin_lock_irqsave(&cache->lock, flags);
++
++	for (i = 0; i < num; i++)
++		cache_enq(cache, page[i]);
++	cache->num_pages += num;
++
++	spin_unlock_irqrestore(&cache->lock, flags);
++}
++EXPORT_SYMBOL_GPL(gnttab_page_cache_put);
++
++void gnttab_page_cache_shrink(struct gnttab_page_cache *cache, unsigned int num)
++{
++	struct page *page[10];
++	unsigned int i = 0;
++	unsigned long flags;
++
++	spin_lock_irqsave(&cache->lock, flags);
++
++	while (cache->num_pages > num) {
++		page[i] = cache_deq(cache);
++		cache->num_pages--;
++		if (++i == ARRAY_SIZE(page)) {
++			spin_unlock_irqrestore(&cache->lock, flags);
++			gnttab_free_pages(i, page);
++			i = 0;
++			spin_lock_irqsave(&cache->lock, flags);
++		}
++	}
++
++	spin_unlock_irqrestore(&cache->lock, flags);
++
++	if (i != 0)
++		gnttab_free_pages(i, page);
++}
++EXPORT_SYMBOL_GPL(gnttab_page_cache_shrink);
++
+ void gnttab_pages_clear_private(int nr_pages, struct page **pages)
+ {
+ 	int i;
+diff --git a/drivers/xen/unpopulated-alloc.c b/drivers/xen/unpopulated-alloc.c
+index 3b98dc9214268..5d67a90d3fa2b 100644
+--- a/drivers/xen/unpopulated-alloc.c
++++ b/drivers/xen/unpopulated-alloc.c
+@@ -12,7 +12,7 @@
+ #include <xen/xen.h>
+ 
+ static DEFINE_MUTEX(list_lock);
+-static LIST_HEAD(page_list);
++static struct page *page_list;
+ static unsigned int list_count;
+ 
+ static int fill_list(unsigned int nr_pages)
+@@ -75,7 +75,8 @@ static int fill_list(unsigned int nr_pages)
+ 		struct page *pg = virt_to_page(vaddr + PAGE_SIZE * i);
+ 
+ 		BUG_ON(!virt_addr_valid(vaddr + PAGE_SIZE * i));
+-		list_add(&pg->lru, &page_list);
++		pg->zone_device_data = page_list;
++		page_list = pg;
+ 		list_count++;
+ 	}
+ 
+@@ -101,12 +102,10 @@ int xen_alloc_unpopulated_pages(unsigned int nr_pages, struct page **pages)
+ 	}
+ 
+ 	for (i = 0; i < nr_pages; i++) {
+-		struct page *pg = list_first_entry_or_null(&page_list,
+-							   struct page,
+-							   lru);
++		struct page *pg = page_list;
+ 
+ 		BUG_ON(!pg);
+-		list_del(&pg->lru);
++		page_list = pg->zone_device_data;
+ 		list_count--;
+ 		pages[i] = pg;
+ 
+@@ -117,7 +116,8 @@ int xen_alloc_unpopulated_pages(unsigned int nr_pages, struct page **pages)
+ 				unsigned int j;
+ 
+ 				for (j = 0; j <= i; j++) {
+-					list_add(&pages[j]->lru, &page_list);
++					pages[j]->zone_device_data = page_list;
++					page_list = pages[j];
+ 					list_count++;
+ 				}
+ 				goto out;
+@@ -143,7 +143,8 @@ void xen_free_unpopulated_pages(unsigned int nr_pages, struct page **pages)
+ 
+ 	mutex_lock(&list_lock);
+ 	for (i = 0; i < nr_pages; i++) {
+-		list_add(&pages[i]->lru, &page_list);
++		pages[i]->zone_device_data = page_list;
++		page_list = pages[i];
+ 		list_count++;
+ 	}
+ 	mutex_unlock(&list_lock);
+@@ -172,7 +173,8 @@ static int __init init(void)
+ 			struct page *pg =
+ 				pfn_to_page(xen_extra_mem[i].start_pfn + j);
+ 
+-			list_add(&pg->lru, &page_list);
++			pg->zone_device_data = page_list;
++			page_list = pg;
+ 			list_count++;
+ 		}
+ 	}
+diff --git a/drivers/xen/xen-scsiback.c b/drivers/xen/xen-scsiback.c
+index 4acc4e899600c..862162dca33cf 100644
+--- a/drivers/xen/xen-scsiback.c
++++ b/drivers/xen/xen-scsiback.c
+@@ -99,6 +99,8 @@ struct vscsibk_info {
+ 	struct list_head v2p_entry_lists;
+ 
+ 	wait_queue_head_t waiting_to_free;
++
++	struct gnttab_page_cache free_pages;
+ };
+ 
+ /* theoretical maximum of grants for one request */
+@@ -188,10 +190,6 @@ module_param_named(max_buffer_pages, scsiback_max_buffer_pages, int, 0644);
+ MODULE_PARM_DESC(max_buffer_pages,
+ "Maximum number of free pages to keep in backend buffer");
+ 
+-static DEFINE_SPINLOCK(free_pages_lock);
+-static int free_pages_num;
+-static LIST_HEAD(scsiback_free_pages);
+-
+ /* Global spinlock to protect scsiback TPG list */
+ static DEFINE_MUTEX(scsiback_mutex);
+ static LIST_HEAD(scsiback_list);
+@@ -207,41 +205,6 @@ static void scsiback_put(struct vscsibk_info *info)
+ 		wake_up(&info->waiting_to_free);
+ }
+ 
+-static void put_free_pages(struct page **page, int num)
+-{
+-	unsigned long flags;
+-	int i = free_pages_num + num, n = num;
+-
+-	if (num == 0)
+-		return;
+-	if (i > scsiback_max_buffer_pages) {
+-		n = min(num, i - scsiback_max_buffer_pages);
+-		gnttab_free_pages(n, page + num - n);
+-		n = num - n;
+-	}
+-	spin_lock_irqsave(&free_pages_lock, flags);
+-	for (i = 0; i < n; i++)
+-		list_add(&page[i]->lru, &scsiback_free_pages);
+-	free_pages_num += n;
+-	spin_unlock_irqrestore(&free_pages_lock, flags);
+-}
+-
+-static int get_free_page(struct page **page)
+-{
+-	unsigned long flags;
+-
+-	spin_lock_irqsave(&free_pages_lock, flags);
+-	if (list_empty(&scsiback_free_pages)) {
+-		spin_unlock_irqrestore(&free_pages_lock, flags);
+-		return gnttab_alloc_pages(1, page);
+-	}
+-	page[0] = list_first_entry(&scsiback_free_pages, struct page, lru);
+-	list_del(&page[0]->lru);
+-	free_pages_num--;
+-	spin_unlock_irqrestore(&free_pages_lock, flags);
+-	return 0;
+-}
+-
+ static unsigned long vaddr_page(struct page *page)
+ {
+ 	unsigned long pfn = page_to_pfn(page);
+@@ -302,7 +265,8 @@ static void scsiback_fast_flush_area(struct vscsibk_pend *req)
+ 		BUG_ON(err);
+ 	}
+ 
+-	put_free_pages(req->pages, req->n_grants);
++	gnttab_page_cache_put(&req->info->free_pages, req->pages,
++			      req->n_grants);
+ 	req->n_grants = 0;
+ }
+ 
+@@ -445,8 +409,8 @@ static int scsiback_gnttab_data_map_list(struct vscsibk_pend *pending_req,
+ 	struct vscsibk_info *info = pending_req->info;
+ 
+ 	for (i = 0; i < cnt; i++) {
+-		if (get_free_page(pg + mapcount)) {
+-			put_free_pages(pg, mapcount);
++		if (gnttab_page_cache_get(&info->free_pages, pg + mapcount)) {
++			gnttab_page_cache_put(&info->free_pages, pg, mapcount);
+ 			pr_err("no grant page\n");
+ 			return -ENOMEM;
+ 		}
+@@ -796,6 +760,8 @@ static int scsiback_do_cmd_fn(struct vscsibk_info *info,
+ 		cond_resched();
+ 	}
+ 
++	gnttab_page_cache_shrink(&info->free_pages, scsiback_max_buffer_pages);
++
+ 	RING_FINAL_CHECK_FOR_REQUESTS(&info->ring, more_to_do);
+ 	return more_to_do;
+ }
+@@ -1233,6 +1199,8 @@ static int scsiback_remove(struct xenbus_device *dev)
+ 
+ 	scsiback_release_translation_entry(info);
+ 
++	gnttab_page_cache_shrink(&info->free_pages, 0);
++
+ 	dev_set_drvdata(&dev->dev, NULL);
+ 
+ 	return 0;
+@@ -1263,6 +1231,7 @@ static int scsiback_probe(struct xenbus_device *dev,
+ 	info->irq = 0;
+ 	INIT_LIST_HEAD(&info->v2p_entry_lists);
+ 	spin_lock_init(&info->v2p_lock);
++	gnttab_page_cache_init(&info->free_pages);
+ 
+ 	err = xenbus_printf(XBT_NIL, dev->nodename, "feature-sg-grant", "%u",
+ 			    SG_ALL);
+@@ -1879,13 +1848,6 @@ out:
+ 
+ static void __exit scsiback_exit(void)
+ {
+-	struct page *page;
+-
+-	while (free_pages_num) {
+-		if (get_free_page(&page))
+-			BUG();
+-		gnttab_free_pages(1, &page);
+-	}
+ 	target_unregister_template(&scsiback_ops);
+ 	xenbus_unregister_driver(&scsiback_driver);
+ }
+diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
+index 2f5ab8c47f506..c2e38516a931d 100644
+--- a/fs/btrfs/ctree.h
++++ b/fs/btrfs/ctree.h
+@@ -869,7 +869,10 @@ struct btrfs_fs_info {
+ 	 */
+ 	struct ulist *qgroup_ulist;
+ 
+-	/* protect user change for quota operations */
++	/*
++	 * Protect user change for quota operations. If a transaction is needed,
++	 * it must be started before locking this lock.
++	 */
+ 	struct mutex qgroup_ioctl_lock;
+ 
+ 	/* list of dirty qgroups to be written at next commit */
+diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
+index 9205a88f2a881..e6786f5d8457f 100644
+--- a/fs/btrfs/qgroup.c
++++ b/fs/btrfs/qgroup.c
+@@ -11,6 +11,7 @@
+ #include <linux/slab.h>
+ #include <linux/workqueue.h>
+ #include <linux/btrfs.h>
++#include <linux/sched/mm.h>
+ 
+ #include "ctree.h"
+ #include "transaction.h"
+@@ -936,6 +937,7 @@ int btrfs_quota_enable(struct btrfs_fs_info *fs_info)
+ 	struct btrfs_key found_key;
+ 	struct btrfs_qgroup *qgroup = NULL;
+ 	struct btrfs_trans_handle *trans = NULL;
++	struct ulist *ulist = NULL;
+ 	int ret = 0;
+ 	int slot;
+ 
+@@ -943,8 +945,8 @@ int btrfs_quota_enable(struct btrfs_fs_info *fs_info)
+ 	if (fs_info->quota_root)
+ 		goto out;
+ 
+-	fs_info->qgroup_ulist = ulist_alloc(GFP_KERNEL);
+-	if (!fs_info->qgroup_ulist) {
++	ulist = ulist_alloc(GFP_KERNEL);
++	if (!ulist) {
+ 		ret = -ENOMEM;
+ 		goto out;
+ 	}
+@@ -952,6 +954,22 @@ int btrfs_quota_enable(struct btrfs_fs_info *fs_info)
+ 	ret = btrfs_sysfs_add_qgroups(fs_info);
+ 	if (ret < 0)
+ 		goto out;
++
++	/*
++	 * Unlock qgroup_ioctl_lock before starting the transaction. This is to
++	 * avoid lock acquisition inversion problems (reported by lockdep) between
++	 * qgroup_ioctl_lock and the vfs freeze semaphores, acquired when we
++	 * start a transaction.
++	 * After we started the transaction lock qgroup_ioctl_lock again and
++	 * check if someone else created the quota root in the meanwhile. If so,
++	 * just return success and release the transaction handle.
++	 *
++	 * Also we don't need to worry about someone else calling
++	 * btrfs_sysfs_add_qgroups() after we unlock and getting an error because
++	 * that function returns 0 (success) when the sysfs entries already exist.
++	 */
++	mutex_unlock(&fs_info->qgroup_ioctl_lock);
++
+ 	/*
+ 	 * 1 for quota root item
+ 	 * 1 for BTRFS_QGROUP_STATUS item
+@@ -961,12 +979,20 @@ int btrfs_quota_enable(struct btrfs_fs_info *fs_info)
+ 	 * would be a lot of overkill.
+ 	 */
+ 	trans = btrfs_start_transaction(tree_root, 2);
++
++	mutex_lock(&fs_info->qgroup_ioctl_lock);
+ 	if (IS_ERR(trans)) {
+ 		ret = PTR_ERR(trans);
+ 		trans = NULL;
+ 		goto out;
+ 	}
+ 
++	if (fs_info->quota_root)
++		goto out;
++
++	fs_info->qgroup_ulist = ulist;
++	ulist = NULL;
++
+ 	/*
+ 	 * initially create the quota tree
+ 	 */
+@@ -1124,11 +1150,14 @@ out:
+ 	if (ret) {
+ 		ulist_free(fs_info->qgroup_ulist);
+ 		fs_info->qgroup_ulist = NULL;
+-		if (trans)
+-			btrfs_end_transaction(trans);
+ 		btrfs_sysfs_del_qgroups(fs_info);
+ 	}
+ 	mutex_unlock(&fs_info->qgroup_ioctl_lock);
++	if (ret && trans)
++		btrfs_end_transaction(trans);
++	else if (trans)
++		ret = btrfs_end_transaction(trans);
++	ulist_free(ulist);
+ 	return ret;
+ }
+ 
+@@ -1141,19 +1170,29 @@ int btrfs_quota_disable(struct btrfs_fs_info *fs_info)
+ 	mutex_lock(&fs_info->qgroup_ioctl_lock);
+ 	if (!fs_info->quota_root)
+ 		goto out;
++	mutex_unlock(&fs_info->qgroup_ioctl_lock);
+ 
+ 	/*
+ 	 * 1 For the root item
+ 	 *
+ 	 * We should also reserve enough items for the quota tree deletion in
+ 	 * btrfs_clean_quota_tree but this is not done.
++	 *
++	 * Also, we must always start a transaction without holding the mutex
++	 * qgroup_ioctl_lock, see btrfs_quota_enable().
+ 	 */
+ 	trans = btrfs_start_transaction(fs_info->tree_root, 1);
++
++	mutex_lock(&fs_info->qgroup_ioctl_lock);
+ 	if (IS_ERR(trans)) {
+ 		ret = PTR_ERR(trans);
++		trans = NULL;
+ 		goto out;
+ 	}
+ 
++	if (!fs_info->quota_root)
++		goto out;
++
+ 	clear_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
+ 	btrfs_qgroup_wait_for_completion(fs_info, false);
+ 	spin_lock(&fs_info->qgroup_lock);
+@@ -1167,13 +1206,13 @@ int btrfs_quota_disable(struct btrfs_fs_info *fs_info)
+ 	ret = btrfs_clean_quota_tree(trans, quota_root);
+ 	if (ret) {
+ 		btrfs_abort_transaction(trans, ret);
+-		goto end_trans;
++		goto out;
+ 	}
+ 
+ 	ret = btrfs_del_root(trans, &quota_root->root_key);
+ 	if (ret) {
+ 		btrfs_abort_transaction(trans, ret);
+-		goto end_trans;
++		goto out;
+ 	}
+ 
+ 	list_del(&quota_root->dirty_list);
+@@ -1185,10 +1224,13 @@ int btrfs_quota_disable(struct btrfs_fs_info *fs_info)
+ 
+ 	btrfs_put_root(quota_root);
+ 
+-end_trans:
+-	ret = btrfs_end_transaction(trans);
+ out:
+ 	mutex_unlock(&fs_info->qgroup_ioctl_lock);
++	if (ret && trans)
++		btrfs_end_transaction(trans);
++	else if (trans)
++		ret = btrfs_end_transaction(trans);
++
+ 	return ret;
+ }
+ 
+@@ -1324,13 +1366,17 @@ int btrfs_add_qgroup_relation(struct btrfs_trans_handle *trans, u64 src,
+ 	struct btrfs_qgroup *member;
+ 	struct btrfs_qgroup_list *list;
+ 	struct ulist *tmp;
++	unsigned int nofs_flag;
+ 	int ret = 0;
+ 
+ 	/* Check the level of src and dst first */
+ 	if (btrfs_qgroup_level(src) >= btrfs_qgroup_level(dst))
+ 		return -EINVAL;
+ 
++	/* We hold a transaction handle open, must do a NOFS allocation. */
++	nofs_flag = memalloc_nofs_save();
+ 	tmp = ulist_alloc(GFP_KERNEL);
++	memalloc_nofs_restore(nofs_flag);
+ 	if (!tmp)
+ 		return -ENOMEM;
+ 
+@@ -1387,10 +1433,14 @@ static int __del_qgroup_relation(struct btrfs_trans_handle *trans, u64 src,
+ 	struct btrfs_qgroup_list *list;
+ 	struct ulist *tmp;
+ 	bool found = false;
++	unsigned int nofs_flag;
+ 	int ret = 0;
+ 	int ret2;
+ 
++	/* We hold a transaction handle open, must do a NOFS allocation. */
++	nofs_flag = memalloc_nofs_save();
+ 	tmp = ulist_alloc(GFP_KERNEL);
++	memalloc_nofs_restore(nofs_flag);
+ 	if (!tmp)
+ 		return -ENOMEM;
+ 
+diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
+index 5066b0251ed83..b741d84d38755 100644
+--- a/fs/proc/task_mmu.c
++++ b/fs/proc/task_mmu.c
+@@ -1541,11 +1541,15 @@ static ssize_t pagemap_read(struct file *file, char __user *buf,
+ 
+ 	src = *ppos;
+ 	svpfn = src / PM_ENTRY_BYTES;
+-	start_vaddr = svpfn << PAGE_SHIFT;
+ 	end_vaddr = mm->task_size;
+ 
+ 	/* watch out for wraparound */
+-	if (svpfn > mm->task_size >> PAGE_SHIFT)
++	start_vaddr = end_vaddr;
++	if (svpfn <= (ULONG_MAX >> PAGE_SHIFT))
++		start_vaddr = untagged_addr(svpfn << PAGE_SHIFT);
++
++	/* Ensure the address is inside the task */
++	if (start_vaddr > mm->task_size)
+ 		start_vaddr = end_vaddr;
+ 
+ 	/*
+diff --git a/fs/zonefs/super.c b/fs/zonefs/super.c
+index 8ec7c8f109d7d..430ab9e4c94f9 100644
+--- a/fs/zonefs/super.c
++++ b/fs/zonefs/super.c
+@@ -628,21 +628,23 @@ static ssize_t zonefs_file_dio_append(struct kiocb *iocb, struct iov_iter *from)
+ 		bio->bi_opf |= REQ_FUA;
+ 
+ 	ret = bio_iov_iter_get_pages(bio, from);
+-	if (unlikely(ret)) {
+-		bio_io_error(bio);
+-		return ret;
+-	}
++	if (unlikely(ret))
++		goto out_release;
++
+ 	size = bio->bi_iter.bi_size;
+-	task_io_account_write(ret);
++	task_io_account_write(size);
+ 
+ 	if (iocb->ki_flags & IOCB_HIPRI)
+ 		bio_set_polled(bio, iocb);
+ 
+ 	ret = submit_bio_wait(bio);
+ 
++	zonefs_file_write_dio_end_io(iocb, size, ret, 0);
++
++out_release:
++	bio_release_pages(bio, false);
+ 	bio_put(bio);
+ 
+-	zonefs_file_write_dio_end_io(iocb, size, ret, 0);
+ 	if (ret >= 0) {
+ 		iocb->ki_pos += size;
+ 		return size;
+diff --git a/include/asm-generic/barrier.h b/include/asm-generic/barrier.h
+index 798027bb89be8..640f09479bdf7 100644
+--- a/include/asm-generic/barrier.h
++++ b/include/asm-generic/barrier.h
+@@ -13,6 +13,7 @@
+ 
+ #ifndef __ASSEMBLY__
+ 
++#include <linux/compiler.h>
+ #include <asm/rwonce.h>
+ 
+ #ifndef nop
+diff --git a/include/linux/build_bug.h b/include/linux/build_bug.h
+index e3a0be2c90ad9..7bb66e15b481b 100644
+--- a/include/linux/build_bug.h
++++ b/include/linux/build_bug.h
+@@ -77,4 +77,9 @@
+ #define static_assert(expr, ...) __static_assert(expr, ##__VA_ARGS__, #expr)
+ #define __static_assert(expr, msg, ...) _Static_assert(expr, msg)
+ 
++#ifdef __GENKSYMS__
++/* genksyms gets confused by _Static_assert */
++#define _Static_assert(expr, ...)
++#endif
++
+ #endif	/* _LINUX_BUILD_BUG_H */
+diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h
+index cee0c728d39aa..04c0a5a717f7e 100644
+--- a/include/linux/compiler-clang.h
++++ b/include/linux/compiler-clang.h
+@@ -52,12 +52,6 @@
+ #define COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW 1
+ #endif
+ 
+-/* The following are for compatibility with GCC, from compiler-gcc.h,
+- * and may be redefined here because they should not be shared with other
+- * compilers, like ICC.
+- */
+-#define barrier() __asm__ __volatile__("" : : : "memory")
+-
+ #if __has_feature(shadow_call_stack)
+ # define __noscs	__attribute__((__no_sanitize__("shadow-call-stack")))
+ #endif
+diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
+index 3017ebd400546..4a4019776368e 100644
+--- a/include/linux/compiler-gcc.h
++++ b/include/linux/compiler-gcc.h
+@@ -15,25 +15,6 @@
+ # error Sorry, your compiler is too old - please upgrade it.
+ #endif
+ 
+-/* Optimization barrier */
+-
+-/* The "volatile" is due to gcc bugs */
+-#define barrier() __asm__ __volatile__("": : :"memory")
+-/*
+- * This version is i.e. to prevent dead stores elimination on @ptr
+- * where gcc and llvm may behave differently when otherwise using
+- * normal barrier(): while gcc behavior gets along with a normal
+- * barrier(), llvm needs an explicit input variable to be assumed
+- * clobbered. The issue is as follows: while the inline asm might
+- * access any memory it wants, the compiler could have fit all of
+- * @ptr into memory registers instead, and since @ptr never escaped
+- * from that, it proved that the inline asm wasn't touching any of
+- * it. This version works well with both compilers, i.e. we're telling
+- * the compiler that the inline asm absolutely may see the contents
+- * of @ptr. See also: https://llvm.org/bugs/show_bug.cgi?id=15495
+- */
+-#define barrier_data(ptr) __asm__ __volatile__("": :"r"(ptr) :"memory")
+-
+ /*
+  * This macro obfuscates arithmetic on a variable address so that gcc
+  * shouldn't recognize the original var, and make assumptions about it.
+diff --git a/include/linux/compiler.h b/include/linux/compiler.h
+index 6810d80acb0b9..a7b6d72d51167 100644
+--- a/include/linux/compiler.h
++++ b/include/linux/compiler.h
+@@ -80,11 +80,25 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
+ 
+ /* Optimization barrier */
+ #ifndef barrier
+-# define barrier() __memory_barrier()
++/* The "volatile" is due to gcc bugs */
++# define barrier() __asm__ __volatile__("": : :"memory")
+ #endif
+ 
+ #ifndef barrier_data
+-# define barrier_data(ptr) barrier()
++/*
++ * This version is i.e. to prevent dead stores elimination on @ptr
++ * where gcc and llvm may behave differently when otherwise using
++ * normal barrier(): while gcc behavior gets along with a normal
++ * barrier(), llvm needs an explicit input variable to be assumed
++ * clobbered. The issue is as follows: while the inline asm might
++ * access any memory it wants, the compiler could have fit all of
++ * @ptr into memory registers instead, and since @ptr never escaped
++ * from that, it proved that the inline asm wasn't touching any of
++ * it. This version works well with both compilers, i.e. we're telling
++ * the compiler that the inline asm absolutely may see the contents
++ * of @ptr. See also: https://llvm.org/bugs/show_bug.cgi?id=15495
++ */
++# define barrier_data(ptr) __asm__ __volatile__("": :"r"(ptr) :"memory")
+ #endif
+ 
+ /* workaround for GCC PR82365 if needed */
+diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
+index 8fbdfae2c8c02..edc5fbd07c1ca 100644
+--- a/include/linux/netdevice.h
++++ b/include/linux/netdevice.h
+@@ -2778,9 +2778,21 @@ u16 dev_pick_tx_zero(struct net_device *dev, struct sk_buff *skb,
+ 		     struct net_device *sb_dev);
+ u16 dev_pick_tx_cpu_id(struct net_device *dev, struct sk_buff *skb,
+ 		       struct net_device *sb_dev);
++
+ int dev_queue_xmit(struct sk_buff *skb);
+ int dev_queue_xmit_accel(struct sk_buff *skb, struct net_device *sb_dev);
+-int dev_direct_xmit(struct sk_buff *skb, u16 queue_id);
++int __dev_direct_xmit(struct sk_buff *skb, u16 queue_id);
++
++static inline int dev_direct_xmit(struct sk_buff *skb, u16 queue_id)
++{
++	int ret;
++
++	ret = __dev_direct_xmit(skb, queue_id);
++	if (!dev_xmit_complete(ret))
++		kfree_skb(skb);
++	return ret;
++}
++
+ int register_netdevice(struct net_device *dev);
+ void unregister_netdevice_queue(struct net_device *dev, struct list_head *head);
+ void unregister_netdevice_many(struct list_head *head);
+diff --git a/include/linux/zsmalloc.h b/include/linux/zsmalloc.h
+index 0fdbf653b173f..4807ca4d52e03 100644
+--- a/include/linux/zsmalloc.h
++++ b/include/linux/zsmalloc.h
+@@ -20,7 +20,6 @@
+  * zsmalloc mapping modes
+  *
+  * NOTE: These only make a difference when a mapped object spans pages.
+- * They also have no effect when ZSMALLOC_PGTABLE_MAPPING is selected.
+  */
+ enum zs_mapmode {
+ 	ZS_MM_RW, /* normal read-write mapping */
+diff --git a/include/xen/grant_table.h b/include/xen/grant_table.h
+index 9bc5bc07d4d3f..b9c937b3a1499 100644
+--- a/include/xen/grant_table.h
++++ b/include/xen/grant_table.h
+@@ -198,6 +198,23 @@ void gnttab_free_auto_xlat_frames(void);
+ int gnttab_alloc_pages(int nr_pages, struct page **pages);
+ void gnttab_free_pages(int nr_pages, struct page **pages);
+ 
++struct gnttab_page_cache {
++	spinlock_t		lock;
++#ifdef CONFIG_XEN_UNPOPULATED_ALLOC
++	struct page		*pages;
++#else
++	struct list_head	pages;
++#endif
++	unsigned int		num_pages;
++};
++
++void gnttab_page_cache_init(struct gnttab_page_cache *cache);
++int gnttab_page_cache_get(struct gnttab_page_cache *cache, struct page **page);
++void gnttab_page_cache_put(struct gnttab_page_cache *cache, struct page **page,
++			   unsigned int num);
++void gnttab_page_cache_shrink(struct gnttab_page_cache *cache,
++			      unsigned int num);
++
+ #ifdef CONFIG_XEN_GRANT_DMA_ALLOC
+ struct gnttab_dma_alloc_args {
+ 	/* Device for which DMA memory will be/was allocated. */
+diff --git a/kernel/kprobes.c b/kernel/kprobes.c
+index e995541d277d4..1b7fd1ab8ddcd 100644
+--- a/kernel/kprobes.c
++++ b/kernel/kprobes.c
+@@ -1250,7 +1250,13 @@ __acquires(hlist_lock)
+ 
+ 	*head = &kretprobe_inst_table[hash];
+ 	hlist_lock = kretprobe_table_lock_ptr(hash);
+-	raw_spin_lock_irqsave(hlist_lock, *flags);
++	/*
++	 * Nested is a workaround that will soon not be needed.
++	 * There's other protections that make sure the same lock
++	 * is not taken on the same CPU that lockdep is unaware of.
++	 * Differentiate when it is taken in NMI context.
++	 */
++	raw_spin_lock_irqsave_nested(hlist_lock, *flags, !!in_nmi());
+ }
+ NOKPROBE_SYMBOL(kretprobe_hash_lock);
+ 
+@@ -1259,7 +1265,13 @@ static void kretprobe_table_lock(unsigned long hash,
+ __acquires(hlist_lock)
+ {
+ 	raw_spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
+-	raw_spin_lock_irqsave(hlist_lock, *flags);
++	/*
++	 * Nested is a workaround that will soon not be needed.
++	 * There's other protections that make sure the same lock
++	 * is not taken on the same CPU that lockdep is unaware of.
++	 * Differentiate when it is taken in NMI context.
++	 */
++	raw_spin_lock_irqsave_nested(hlist_lock, *flags, !!in_nmi());
+ }
+ NOKPROBE_SYMBOL(kretprobe_table_lock);
+ 
+@@ -1359,7 +1371,8 @@ static void cleanup_rp_inst(struct kretprobe *rp)
+ 	struct hlist_node *next;
+ 	struct hlist_head *head;
+ 
+-	/* No race here */
++	/* To avoid recursive kretprobe by NMI, set kprobe busy here */
++	kprobe_busy_begin();
+ 	for (hash = 0; hash < KPROBE_TABLE_SIZE; hash++) {
+ 		kretprobe_table_lock(hash, &flags);
+ 		head = &kretprobe_inst_table[hash];
+@@ -1369,6 +1382,8 @@ static void cleanup_rp_inst(struct kretprobe *rp)
+ 		}
+ 		kretprobe_table_unlock(hash, &flags);
+ 	}
++	kprobe_busy_end();
++
+ 	free_rp_inst(rp);
+ }
+ NOKPROBE_SYMBOL(cleanup_rp_inst);
+@@ -1937,20 +1952,14 @@ static int pre_handler_kretprobe(struct kprobe *p, struct pt_regs *regs)
+ 	unsigned long hash, flags = 0;
+ 	struct kretprobe_instance *ri;
+ 
+-	/*
+-	 * To avoid deadlocks, prohibit return probing in NMI contexts,
+-	 * just skip the probe and increase the (inexact) 'nmissed'
+-	 * statistical counter, so that the user is informed that
+-	 * something happened:
+-	 */
+-	if (unlikely(in_nmi())) {
+-		rp->nmissed++;
+-		return 0;
+-	}
+-
+ 	/* TODO: consider to only swap the RA after the last pre_handler fired */
+ 	hash = hash_ptr(current, KPROBE_HASH_BITS);
+-	raw_spin_lock_irqsave(&rp->lock, flags);
++	/*
++	 * Nested is a workaround that will soon not be needed.
++	 * There's other protections that make sure the same lock
++	 * is not taken on the same CPU that lockdep is unaware of.
++	 */
++	raw_spin_lock_irqsave_nested(&rp->lock, flags, 1);
+ 	if (!hlist_empty(&rp->free_instances)) {
+ 		ri = hlist_entry(rp->free_instances.first,
+ 				struct kretprobe_instance, hlist);
+@@ -1961,7 +1970,7 @@ static int pre_handler_kretprobe(struct kprobe *p, struct pt_regs *regs)
+ 		ri->task = current;
+ 
+ 		if (rp->entry_handler && rp->entry_handler(ri, regs)) {
+-			raw_spin_lock_irqsave(&rp->lock, flags);
++			raw_spin_lock_irqsave_nested(&rp->lock, flags, 1);
+ 			hlist_add_head(&ri->hlist, &rp->free_instances);
+ 			raw_spin_unlock_irqrestore(&rp->lock, flags);
+ 			return 0;
+diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c
+index f324dc36fc43d..dee807ffad11b 100644
+--- a/kernel/sched/idle.c
++++ b/kernel/sched/idle.c
+@@ -78,7 +78,7 @@ void __weak arch_cpu_idle_dead(void) { }
+ void __weak arch_cpu_idle(void)
+ {
+ 	cpu_idle_force_poll = 1;
+-	local_irq_enable();
++	raw_local_irq_enable();
+ }
+ 
+ /**
+@@ -94,9 +94,35 @@ void __cpuidle default_idle_call(void)
+ 
+ 		trace_cpu_idle(1, smp_processor_id());
+ 		stop_critical_timings();
++
++		/*
++		 * arch_cpu_idle() is supposed to enable IRQs, however
++		 * we can't do that because of RCU and tracing.
++		 *
++		 * Trace IRQs enable here, then switch off RCU, and have
++		 * arch_cpu_idle() use raw_local_irq_enable(). Note that
++		 * rcu_idle_enter() relies on lockdep IRQ state, so switch that
++		 * last -- this is very similar to the entry code.
++		 */
++		trace_hardirqs_on_prepare();
++		lockdep_hardirqs_on_prepare(_THIS_IP_);
+ 		rcu_idle_enter();
++		lockdep_hardirqs_on(_THIS_IP_);
++
+ 		arch_cpu_idle();
++
++		/*
++		 * OK, so IRQs are enabled here, but RCU needs them disabled to
++		 * turn itself back on.. funny thing is that disabling IRQs
++		 * will cause tracing, which needs RCU. Jump through hoops to
++		 * make it 'work'.
++		 */
++		raw_local_irq_disable();
++		lockdep_hardirqs_off(_THIS_IP_);
+ 		rcu_idle_exit();
++		lockdep_hardirqs_on(_THIS_IP_);
++		raw_local_irq_enable();
++
+ 		start_critical_timings();
+ 		trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id());
+ 	}
+diff --git a/lib/zlib_dfltcc/dfltcc_inflate.c b/lib/zlib_dfltcc/dfltcc_inflate.c
+index aa9ef23474df0..db107016d29b3 100644
+--- a/lib/zlib_dfltcc/dfltcc_inflate.c
++++ b/lib/zlib_dfltcc/dfltcc_inflate.c
+@@ -4,6 +4,7 @@
+ #include "dfltcc_util.h"
+ #include "dfltcc.h"
+ #include <asm/setup.h>
++#include <linux/export.h>
+ #include <linux/zutil.h>
+ 
+ /*
+@@ -29,6 +30,7 @@ int dfltcc_can_inflate(
+     return is_bit_set(dfltcc_state->af.fns, DFLTCC_XPND) &&
+                is_bit_set(dfltcc_state->af.fmts, DFLTCC_FMT0);
+ }
++EXPORT_SYMBOL(dfltcc_can_inflate);
+ 
+ static int dfltcc_was_inflate_used(
+     z_streamp strm
+@@ -147,3 +149,4 @@ dfltcc_inflate_action dfltcc_inflate(
+     return (cc == DFLTCC_CC_OP1_TOO_SHORT || cc == DFLTCC_CC_OP2_TOO_SHORT) ?
+         DFLTCC_INFLATE_BREAK : DFLTCC_INFLATE_CONTINUE;
+ }
++EXPORT_SYMBOL(dfltcc_inflate);
+diff --git a/mm/Kconfig b/mm/Kconfig
+index 6c974888f86f9..92501712ea261 100644
+--- a/mm/Kconfig
++++ b/mm/Kconfig
+@@ -706,19 +706,6 @@ config ZSMALLOC
+ 	  returned by an alloc().  This handle must be mapped in order to
+ 	  access the allocated space.
+ 
+-config ZSMALLOC_PGTABLE_MAPPING
+-	bool "Use page table mapping to access object in zsmalloc"
+-	depends on ZSMALLOC=y
+-	help
+-	  By default, zsmalloc uses a copy-based object mapping method to
+-	  access allocations that span two pages. However, if a particular
+-	  architecture (ex, ARM) performs VM mapping faster than copying,
+-	  then you should select this. This causes zsmalloc to use page table
+-	  mapping rather than copying for object mapping.
+-
+-	  You can check speed with zsmalloc benchmark:
+-	  https://github.com/spartacus06/zsmapbench
+-
+ config ZSMALLOC_STAT
+ 	bool "Export zsmalloc statistics"
+ 	depends on ZSMALLOC
+diff --git a/mm/hugetlb.c b/mm/hugetlb.c
+index 4a579b8903290..74dc22dc537bf 100644
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -1227,6 +1227,7 @@ static void destroy_compound_gigantic_page(struct page *page,
+ 	}
+ 
+ 	set_compound_order(page, 0);
++	page[1].compound_nr = 0;
+ 	__ClearPageHead(page);
+ }
+ 
+diff --git a/mm/mmap.c b/mm/mmap.c
+index 7a8987aa69962..c85a2875a9625 100644
+--- a/mm/mmap.c
++++ b/mm/mmap.c
+@@ -1774,6 +1774,17 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
+ 		if (error)
+ 			goto unmap_and_free_vma;
+ 
++		/* Can addr have changed??
++		 *
++		 * Answer: Yes, several device drivers can do it in their
++		 *         f_op->mmap method. -DaveM
++		 * Bug: If addr is changed, prev, rb_link, rb_parent should
++		 *      be updated for vma_link()
++		 */
++		WARN_ON_ONCE(addr != vma->vm_start);
++
++		addr = vma->vm_start;
++
+ 		/* If vm_flags changed after call_mmap(), we should try merge vma again
+ 		 * as we may succeed this time.
+ 		 */
+@@ -1788,25 +1799,12 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
+ 				fput(vma->vm_file);
+ 				vm_area_free(vma);
+ 				vma = merge;
+-				/* Update vm_flags and possible addr to pick up the change. We don't
+-				 * warn here if addr changed as the vma is not linked by vma_link().
+-				 */
+-				addr = vma->vm_start;
++				/* Update vm_flags to pick up the change. */
+ 				vm_flags = vma->vm_flags;
+ 				goto unmap_writable;
+ 			}
+ 		}
+ 
+-		/* Can addr have changed??
+-		 *
+-		 * Answer: Yes, several device drivers can do it in their
+-		 *         f_op->mmap method. -DaveM
+-		 * Bug: If addr is changed, prev, rb_link, rb_parent should
+-		 *      be updated for vma_link()
+-		 */
+-		WARN_ON_ONCE(addr != vma->vm_start);
+-
+-		addr = vma->vm_start;
+ 		vm_flags = vma->vm_flags;
+ 	} else if (vm_flags & VM_SHARED) {
+ 		error = shmem_zero_setup(vma);
+diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
+index c36fdff9a3713..cdfaaadea8ff7 100644
+--- a/mm/zsmalloc.c
++++ b/mm/zsmalloc.c
+@@ -293,11 +293,7 @@ struct zspage {
+ };
+ 
+ struct mapping_area {
+-#ifdef CONFIG_ZSMALLOC_PGTABLE_MAPPING
+-	struct vm_struct *vm; /* vm area for mapping object that span pages */
+-#else
+ 	char *vm_buf; /* copy buffer for objects that span pages */
+-#endif
+ 	char *vm_addr; /* address of kmap_atomic()'ed pages */
+ 	enum zs_mapmode vm_mm; /* mapping mode */
+ };
+@@ -1113,48 +1109,6 @@ static struct zspage *find_get_zspage(struct size_class *class)
+ 	return zspage;
+ }
+ 
+-#ifdef CONFIG_ZSMALLOC_PGTABLE_MAPPING
+-static inline int __zs_cpu_up(struct mapping_area *area)
+-{
+-	/*
+-	 * Make sure we don't leak memory if a cpu UP notification
+-	 * and zs_init() race and both call zs_cpu_up() on the same cpu
+-	 */
+-	if (area->vm)
+-		return 0;
+-	area->vm = alloc_vm_area(PAGE_SIZE * 2, NULL);
+-	if (!area->vm)
+-		return -ENOMEM;
+-	return 0;
+-}
+-
+-static inline void __zs_cpu_down(struct mapping_area *area)
+-{
+-	if (area->vm)
+-		free_vm_area(area->vm);
+-	area->vm = NULL;
+-}
+-
+-static inline void *__zs_map_object(struct mapping_area *area,
+-				struct page *pages[2], int off, int size)
+-{
+-	unsigned long addr = (unsigned long)area->vm->addr;
+-
+-	BUG_ON(map_kernel_range(addr, PAGE_SIZE * 2, PAGE_KERNEL, pages) < 0);
+-	area->vm_addr = area->vm->addr;
+-	return area->vm_addr + off;
+-}
+-
+-static inline void __zs_unmap_object(struct mapping_area *area,
+-				struct page *pages[2], int off, int size)
+-{
+-	unsigned long addr = (unsigned long)area->vm_addr;
+-
+-	unmap_kernel_range(addr, PAGE_SIZE * 2);
+-}
+-
+-#else /* CONFIG_ZSMALLOC_PGTABLE_MAPPING */
+-
+ static inline int __zs_cpu_up(struct mapping_area *area)
+ {
+ 	/*
+@@ -1235,8 +1189,6 @@ out:
+ 	pagefault_enable();
+ }
+ 
+-#endif /* CONFIG_ZSMALLOC_PGTABLE_MAPPING */
+-
+ static int zs_cpu_prepare(unsigned int cpu)
+ {
+ 	struct mapping_area *area;
+diff --git a/net/batman-adv/fragmentation.c b/net/batman-adv/fragmentation.c
+index 9fdbe30681537..b7169c4147f55 100644
+--- a/net/batman-adv/fragmentation.c
++++ b/net/batman-adv/fragmentation.c
+@@ -391,6 +391,7 @@ out:
+ 
+ /**
+  * batadv_frag_create() - create a fragment from skb
++ * @net_dev: outgoing device for fragment
+  * @skb: skb to create fragment from
+  * @frag_head: header to use in new fragment
+  * @fragment_size: size of new fragment
+@@ -401,22 +402,25 @@ out:
+  *
+  * Return: the new fragment, NULL on error.
+  */
+-static struct sk_buff *batadv_frag_create(struct sk_buff *skb,
++static struct sk_buff *batadv_frag_create(struct net_device *net_dev,
++					  struct sk_buff *skb,
+ 					  struct batadv_frag_packet *frag_head,
+ 					  unsigned int fragment_size)
+ {
++	unsigned int ll_reserved = LL_RESERVED_SPACE(net_dev);
++	unsigned int tailroom = net_dev->needed_tailroom;
+ 	struct sk_buff *skb_fragment;
+ 	unsigned int header_size = sizeof(*frag_head);
+ 	unsigned int mtu = fragment_size + header_size;
+ 
+-	skb_fragment = netdev_alloc_skb(NULL, mtu + ETH_HLEN);
++	skb_fragment = dev_alloc_skb(ll_reserved + mtu + tailroom);
+ 	if (!skb_fragment)
+ 		goto err;
+ 
+ 	skb_fragment->priority = skb->priority;
+ 
+ 	/* Eat the last mtu-bytes of the skb */
+-	skb_reserve(skb_fragment, header_size + ETH_HLEN);
++	skb_reserve(skb_fragment, ll_reserved + header_size);
+ 	skb_split(skb, skb_fragment, skb->len - fragment_size);
+ 
+ 	/* Add the header */
+@@ -439,11 +443,12 @@ int batadv_frag_send_packet(struct sk_buff *skb,
+ 			    struct batadv_orig_node *orig_node,
+ 			    struct batadv_neigh_node *neigh_node)
+ {
++	struct net_device *net_dev = neigh_node->if_incoming->net_dev;
+ 	struct batadv_priv *bat_priv;
+ 	struct batadv_hard_iface *primary_if = NULL;
+ 	struct batadv_frag_packet frag_header;
+ 	struct sk_buff *skb_fragment;
+-	unsigned int mtu = neigh_node->if_incoming->net_dev->mtu;
++	unsigned int mtu = net_dev->mtu;
+ 	unsigned int header_size = sizeof(frag_header);
+ 	unsigned int max_fragment_size, num_fragments;
+ 	int ret;
+@@ -503,7 +508,7 @@ int batadv_frag_send_packet(struct sk_buff *skb,
+ 			goto put_primary_if;
+ 		}
+ 
+-		skb_fragment = batadv_frag_create(skb, &frag_header,
++		skb_fragment = batadv_frag_create(net_dev, skb, &frag_header,
+ 						  max_fragment_size);
+ 		if (!skb_fragment) {
+ 			ret = -ENOMEM;
+@@ -522,13 +527,14 @@ int batadv_frag_send_packet(struct sk_buff *skb,
+ 		frag_header.no++;
+ 	}
+ 
+-	/* Make room for the fragment header. */
+-	if (batadv_skb_head_push(skb, header_size) < 0 ||
+-	    pskb_expand_head(skb, header_size + ETH_HLEN, 0, GFP_ATOMIC) < 0) {
+-		ret = -ENOMEM;
++	/* make sure that there is at least enough head for the fragmentation
++	 * and ethernet headers
++	 */
++	ret = skb_cow_head(skb, ETH_HLEN + header_size);
++	if (ret < 0)
+ 		goto put_primary_if;
+-	}
+ 
++	skb_push(skb, header_size);
+ 	memcpy(skb->data, &frag_header, header_size);
+ 
+ 	/* Send the last fragment */
+diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
+index fa06b51c0144d..d72c183919b44 100644
+--- a/net/batman-adv/hard-interface.c
++++ b/net/batman-adv/hard-interface.c
+@@ -554,6 +554,9 @@ static void batadv_hardif_recalc_extra_skbroom(struct net_device *soft_iface)
+ 	needed_headroom = lower_headroom + (lower_header_len - ETH_HLEN);
+ 	needed_headroom += batadv_max_header_len();
+ 
++	/* fragmentation headers don't strip the unicast/... header */
++	needed_headroom += sizeof(struct batadv_frag_packet);
++
+ 	soft_iface->needed_headroom = needed_headroom;
+ 	soft_iface->needed_tailroom = lower_tailroom;
+ }
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 010de57488ce7..4a6241c0534d2 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -4176,7 +4176,7 @@ int dev_queue_xmit_accel(struct sk_buff *skb, struct net_device *sb_dev)
+ }
+ EXPORT_SYMBOL(dev_queue_xmit_accel);
+ 
+-int dev_direct_xmit(struct sk_buff *skb, u16 queue_id)
++int __dev_direct_xmit(struct sk_buff *skb, u16 queue_id)
+ {
+ 	struct net_device *dev = skb->dev;
+ 	struct sk_buff *orig_skb = skb;
+@@ -4205,17 +4205,13 @@ int dev_direct_xmit(struct sk_buff *skb, u16 queue_id)
+ 	dev_xmit_recursion_dec();
+ 
+ 	local_bh_enable();
+-
+-	if (!dev_xmit_complete(ret))
+-		kfree_skb(skb);
+-
+ 	return ret;
+ drop:
+ 	atomic_long_inc(&dev->tx_dropped);
+ 	kfree_skb_list(skb);
+ 	return NET_XMIT_DROP;
+ }
+-EXPORT_SYMBOL(dev_direct_xmit);
++EXPORT_SYMBOL(__dev_direct_xmit);
+ 
+ /*************************************************************************
+  *			Receiver routines
+diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
+index 8dbfd84322a88..f6b284a9c480e 100644
+--- a/net/netfilter/ipvs/ip_vs_ctl.c
++++ b/net/netfilter/ipvs/ip_vs_ctl.c
+@@ -4167,12 +4167,18 @@ int __net_init ip_vs_control_net_init(struct netns_ipvs *ipvs)
+ 
+ 	spin_lock_init(&ipvs->tot_stats.lock);
+ 
+-	proc_create_net("ip_vs", 0, ipvs->net->proc_net, &ip_vs_info_seq_ops,
+-			sizeof(struct ip_vs_iter));
+-	proc_create_net_single("ip_vs_stats", 0, ipvs->net->proc_net,
+-			ip_vs_stats_show, NULL);
+-	proc_create_net_single("ip_vs_stats_percpu", 0, ipvs->net->proc_net,
+-			ip_vs_stats_percpu_show, NULL);
++#ifdef CONFIG_PROC_FS
++	if (!proc_create_net("ip_vs", 0, ipvs->net->proc_net,
++			     &ip_vs_info_seq_ops, sizeof(struct ip_vs_iter)))
++		goto err_vs;
++	if (!proc_create_net_single("ip_vs_stats", 0, ipvs->net->proc_net,
++				    ip_vs_stats_show, NULL))
++		goto err_stats;
++	if (!proc_create_net_single("ip_vs_stats_percpu", 0,
++				    ipvs->net->proc_net,
++				    ip_vs_stats_percpu_show, NULL))
++		goto err_percpu;
++#endif
+ 
+ 	if (ip_vs_control_net_init_sysctl(ipvs))
+ 		goto err;
+@@ -4180,6 +4186,17 @@ int __net_init ip_vs_control_net_init(struct netns_ipvs *ipvs)
+ 	return 0;
+ 
+ err:
++#ifdef CONFIG_PROC_FS
++	remove_proc_entry("ip_vs_stats_percpu", ipvs->net->proc_net);
++
++err_percpu:
++	remove_proc_entry("ip_vs_stats", ipvs->net->proc_net);
++
++err_stats:
++	remove_proc_entry("ip_vs", ipvs->net->proc_net);
++
++err_vs:
++#endif
+ 	free_percpu(ipvs->tot_stats.cpustats);
+ 	return -ENOMEM;
+ }
+@@ -4188,9 +4205,11 @@ void __net_exit ip_vs_control_net_cleanup(struct netns_ipvs *ipvs)
+ {
+ 	ip_vs_trash_cleanup(ipvs);
+ 	ip_vs_control_net_cleanup_sysctl(ipvs);
++#ifdef CONFIG_PROC_FS
+ 	remove_proc_entry("ip_vs_stats_percpu", ipvs->net->proc_net);
+ 	remove_proc_entry("ip_vs_stats", ipvs->net->proc_net);
+ 	remove_proc_entry("ip_vs", ipvs->net->proc_net);
++#endif
+ 	free_percpu(ipvs->tot_stats.cpustats);
+ }
+ 
+diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
+index 6c5e09e7440a9..a1ec2c8fa70a9 100644
+--- a/net/xdp/xsk.c
++++ b/net/xdp/xsk.c
+@@ -377,11 +377,7 @@ static int xsk_generic_xmit(struct sock *sk)
+ 		skb_shinfo(skb)->destructor_arg = (void *)(long)desc.addr;
+ 		skb->destructor = xsk_destruct_skb;
+ 
+-		/* Hinder dev_direct_xmit from freeing the packet and
+-		 * therefore completing it in the destructor
+-		 */
+-		refcount_inc(&skb->users);
+-		err = dev_direct_xmit(skb, xs->queue_id);
++		err = __dev_direct_xmit(skb, xs->queue_id);
+ 		if  (err == NETDEV_TX_BUSY) {
+ 			/* Tell user-space to retry the send */
+ 			skb->destructor = sock_wfree;
+@@ -395,12 +391,10 @@ static int xsk_generic_xmit(struct sock *sk)
+ 		/* Ignore NET_XMIT_CN as packet might have been sent */
+ 		if (err == NET_XMIT_DROP) {
+ 			/* SKB completed but not sent */
+-			kfree_skb(skb);
+ 			err = -EBUSY;
+ 			goto out;
+ 		}
+ 
+-		consume_skb(skb);
+ 		sent_frame = true;
+ 	}
+ 
+diff --git a/samples/ftrace/ftrace-direct-modify.c b/samples/ftrace/ftrace-direct-modify.c
+index c13a5bc5095be..5b9a09957c6e0 100644
+--- a/samples/ftrace/ftrace-direct-modify.c
++++ b/samples/ftrace/ftrace-direct-modify.c
+@@ -21,6 +21,7 @@ static unsigned long my_ip = (unsigned long)schedule;
+ asm (
+ "	.pushsection    .text, \"ax\", @progbits\n"
+ "	.type		my_tramp1, @function\n"
++"	.globl		my_tramp1\n"
+ "   my_tramp1:"
+ "	pushq %rbp\n"
+ "	movq %rsp, %rbp\n"
+@@ -29,6 +30,7 @@ asm (
+ "	.size		my_tramp1, .-my_tramp1\n"
+ "	ret\n"
+ "	.type		my_tramp2, @function\n"
++"	.globl		my_tramp2\n"
+ "   my_tramp2:"
+ "	pushq %rbp\n"
+ "	movq %rsp, %rbp\n"
+diff --git a/samples/ftrace/ftrace-direct-too.c b/samples/ftrace/ftrace-direct-too.c
+index d5c5022be6642..3f0079c9bd6fa 100644
+--- a/samples/ftrace/ftrace-direct-too.c
++++ b/samples/ftrace/ftrace-direct-too.c
+@@ -16,6 +16,7 @@ extern void my_tramp(void *);
+ asm (
+ "	.pushsection    .text, \"ax\", @progbits\n"
+ "	.type		my_tramp, @function\n"
++"	.globl		my_tramp\n"
+ "   my_tramp:"
+ "	pushq %rbp\n"
+ "	movq %rsp, %rbp\n"
+diff --git a/samples/ftrace/ftrace-direct.c b/samples/ftrace/ftrace-direct.c
+index 63ca06d42c803..a2729d1ef17f5 100644
+--- a/samples/ftrace/ftrace-direct.c
++++ b/samples/ftrace/ftrace-direct.c
+@@ -14,6 +14,7 @@ extern void my_tramp(void *);
+ asm (
+ "	.pushsection    .text, \"ax\", @progbits\n"
+ "	.type		my_tramp, @function\n"
++"	.globl		my_tramp\n"
+ "   my_tramp:"
+ "	pushq %rbp\n"
+ "	movq %rsp, %rbp\n"
+diff --git a/sound/soc/intel/boards/bytcr_rt5640.c b/sound/soc/intel/boards/bytcr_rt5640.c
+index fc202747ba837..b956e1675132a 100644
+--- a/sound/soc/intel/boards/bytcr_rt5640.c
++++ b/sound/soc/intel/boards/bytcr_rt5640.c
+@@ -521,10 +521,10 @@ static const struct dmi_system_id byt_rt5640_quirk_table[] = {
+ 		.driver_data = (void *)(BYT_RT5640_IN1_MAP |
+ 					BYT_RT5640_MCLK_EN),
+ 	},
+-	{	/* HP Pavilion x2 10-n000nd */
++	{	/* HP Pavilion x2 10-k0XX, 10-n0XX */
+ 		.matches = {
+-			DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+-			DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "HP Pavilion x2 Detachable"),
++			DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion x2 Detachable"),
+ 		},
+ 		.driver_data = (void *)(BYT_RT5640_DMIC1_MAP |
+ 					BYT_RT5640_JD_SRC_JD2_IN4N |
+@@ -533,6 +533,17 @@ static const struct dmi_system_id byt_rt5640_quirk_table[] = {
+ 					BYT_RT5640_SSP0_AIF1 |
+ 					BYT_RT5640_MCLK_EN),
+ 	},
++	{	/* HP Pavilion x2 10-p0XX */
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "HP"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "HP x2 Detachable 10-p0XX"),
++		},
++		.driver_data = (void *)(BYT_RT5640_DMIC1_MAP |
++					BYT_RT5640_JD_SRC_JD1_IN4P |
++					BYT_RT5640_OVCD_TH_1500UA |
++					BYT_RT5640_OVCD_SF_0P75 |
++					BYT_RT5640_MCLK_EN),
++	},
+ 	{	/* HP Stream 7 */
+ 		.matches = {
+ 			DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+diff --git a/tools/bootconfig/main.c b/tools/bootconfig/main.c
+index e0878f5f74b1b..ffd6a358925da 100644
+--- a/tools/bootconfig/main.c
++++ b/tools/bootconfig/main.c
+@@ -274,6 +274,7 @@ static void show_xbc_error(const char *data, const char *msg, int pos)
+ 
+ int apply_xbc(const char *path, const char *xbc_path)
+ {
++	struct stat stat;
+ 	u32 size, csum;
+ 	char *buf, *data;
+ 	int ret, fd;
+@@ -330,16 +331,26 @@ int apply_xbc(const char *path, const char *xbc_path)
+ 		return fd;
+ 	}
+ 	/* TODO: Ensure the @path is initramfs/initrd image */
++	if (fstat(fd, &stat) < 0) {
++		pr_err("Failed to get the size of %s\n", path);
++		goto out;
++	}
+ 	ret = write(fd, data, size + 8);
+-	if (ret < 0) {
++	if (ret < size + 8) {
++		if (ret < 0)
++			ret = -errno;
+ 		pr_err("Failed to apply a boot config: %d\n", ret);
+-		goto out;
++		if (ret < 0)
++			goto out;
++		goto out_rollback;
+ 	}
+ 	/* Write a magic word of the bootconfig */
+ 	ret = write(fd, BOOTCONFIG_MAGIC, BOOTCONFIG_MAGIC_LEN);
+-	if (ret < 0) {
++	if (ret < BOOTCONFIG_MAGIC_LEN) {
++		if (ret < 0)
++			ret = -errno;
+ 		pr_err("Failed to apply a boot config magic: %d\n", ret);
+-		goto out;
++		goto out_rollback;
+ 	}
+ 	ret = 0;
+ out:
+@@ -347,6 +358,17 @@ out:
+ 	free(data);
+ 
+ 	return ret;
++
++out_rollback:
++	/* Map the partial write to -ENOSPC */
++	if (ret >= 0)
++		ret = -ENOSPC;
++	if (ftruncate(fd, stat.st_size) < 0) {
++		ret = -errno;
++		pr_err("Failed to rollback the write error: %d\n", ret);
++		pr_err("The initrd %s may be corrupted. Recommend to rebuild.\n", path);
++	}
++	goto out;
+ }
+ 
+ int usage(void)
+diff --git a/tools/bpf/bpftool/btf.c b/tools/bpf/bpftool/btf.c
+index 8ab142ff5eac5..2afb7d5b1aca2 100644
+--- a/tools/bpf/bpftool/btf.c
++++ b/tools/bpf/bpftool/btf.c
+@@ -693,6 +693,7 @@ build_btf_type_table(struct btf_attach_table *tab, enum bpf_obj_type type,
+ 		obj_node = calloc(1, sizeof(*obj_node));
+ 		if (!obj_node) {
+ 			p_err("failed to allocate memory: %s", strerror(errno));
++			err = -ENOMEM;
+ 			goto err_free;
+ 		}
+ 
+diff --git a/tools/testing/ktest/ktest.pl b/tools/testing/ktest/ktest.pl
+index cb16d2aac51c3..54188ee16c486 100755
+--- a/tools/testing/ktest/ktest.pl
++++ b/tools/testing/ktest/ktest.pl
+@@ -2040,7 +2040,7 @@ sub reboot_to {
+ 
+     if ($reboot_type eq "grub") {
+ 	run_ssh "'(echo \"savedefault --default=$grub_number --once\" | grub --batch)'";
+-    } elsif ($reboot_type eq "grub2") {
++    } elsif (($reboot_type eq "grub2") or ($reboot_type eq "grub2bls")) {
+ 	run_ssh "$grub_reboot $grub_number";
+     } elsif ($reboot_type eq "syslinux") {
+ 	run_ssh "$syslinux --once \\\"$syslinux_label\\\" $syslinux_path";


             reply	other threads:[~2020-12-16 23:15 UTC|newest]

Thread overview: 17+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-12-16 23:15 Mike Pagano [this message]
  -- strict thread matches above, loose matches on Subject: below --
2020-12-21 13:27 [gentoo-commits] proj/linux-patches:5.9 commit in: / Mike Pagano
2020-12-13 16:11 Mike Pagano
2020-12-11 12:57 Mike Pagano
2020-12-08 12:08 Mike Pagano
2020-12-02 12:51 Mike Pagano
2020-11-24 14:52 Mike Pagano
2020-11-22 19:35 Mike Pagano
2020-11-19 12:41 Mike Pagano
2020-11-18 20:23 Mike Pagano
2020-11-11 15:52 Mike Pagano
2020-11-10 13:58 Mike Pagano
2020-11-05 17:54 Mike Pagano
2020-11-04 23:38 Mike Pagano
2020-11-01 20:33 Mike Pagano
2020-10-29 11:21 Mike Pagano
2020-10-17 10:15 Mike Pagano

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1608160496.6016b8f6e9be6890184aa70a2b691c31f5e19f6a.mpagano@gentoo \
    --to=mpagano@gentoo.org \
    --cc=gentoo-commits@lists.gentoo.org \
    --cc=gentoo-dev@lists.gentoo.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox